vm_pageout.c revision 32585
1/* 2 * Copyright (c) 1991 Regents of the University of California. 3 * All rights reserved. 4 * Copyright (c) 1994 John S. Dyson 5 * All rights reserved. 6 * Copyright (c) 1994 David Greenman 7 * All rights reserved. 8 * 9 * This code is derived from software contributed to Berkeley by 10 * The Mach Operating System project at Carnegie-Mellon University. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. All advertising materials mentioning features or use of this software 21 * must display the following acknowledgement: 22 * This product includes software developed by the University of 23 * California, Berkeley and its contributors. 24 * 4. Neither the name of the University nor the names of its contributors 25 * may be used to endorse or promote products derived from this software 26 * without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 38 * SUCH DAMAGE. 39 * 40 * from: @(#)vm_pageout.c 7.4 (Berkeley) 5/7/91 41 * 42 * 43 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 44 * All rights reserved. 45 * 46 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 47 * 48 * Permission to use, copy, modify and distribute this software and 49 * its documentation is hereby granted, provided that both the copyright 50 * notice and this permission notice appear in all copies of the 51 * software, derivative works or modified versions, and any portions 52 * thereof, and that both notices appear in supporting documentation. 53 * 54 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 55 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 56 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 57 * 58 * Carnegie Mellon requests users of this software to return to 59 * 60 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 61 * School of Computer Science 62 * Carnegie Mellon University 63 * Pittsburgh PA 15213-3890 64 * 65 * any improvements or extensions that they make and grant Carnegie the 66 * rights to redistribute these changes. 67 * 68 * $Id: vm_pageout.c,v 1.107 1998/01/12 01:44:44 dyson Exp $ 69 */ 70 71/* 72 * The proverbial page-out daemon. 73 */ 74 75#include <sys/param.h> 76#include <sys/systm.h> 77#include <sys/kernel.h> 78#include <sys/proc.h> 79#include <sys/resourcevar.h> 80#include <sys/signalvar.h> 81#include <sys/vnode.h> 82#include <sys/vmmeter.h> 83#include <sys/sysctl.h> 84 85#include <vm/vm.h> 86#include <vm/vm_param.h> 87#include <vm/vm_prot.h> 88#include <sys/lock.h> 89#include <vm/vm_object.h> 90#include <vm/vm_page.h> 91#include <vm/vm_map.h> 92#include <vm/vm_pageout.h> 93#include <vm/vm_pager.h> 94#include <vm/swap_pager.h> 95#include <vm/vm_extern.h> 96 97/* 98 * System initialization 99 */ 100 101/* the kernel process "vm_pageout"*/ 102static void vm_pageout __P((void)); 103static int vm_pageout_clean __P((vm_page_t, int)); 104static int vm_pageout_scan __P((void)); 105static int vm_pageout_free_page_calc __P((vm_size_t count)); 106struct proc *pageproc; 107 108static struct kproc_desc page_kp = { 109 "pagedaemon", 110 vm_pageout, 111 &pageproc 112}; 113SYSINIT_KT(pagedaemon, SI_SUB_KTHREAD_PAGE, SI_ORDER_FIRST, kproc_start, &page_kp) 114 115#if !defined(NO_SWAPPING) 116/* the kernel process "vm_daemon"*/ 117static void vm_daemon __P((void)); 118static struct proc *vmproc; 119 120static struct kproc_desc vm_kp = { 121 "vmdaemon", 122 vm_daemon, 123 &vmproc 124}; 125SYSINIT_KT(vmdaemon, SI_SUB_KTHREAD_VM, SI_ORDER_FIRST, kproc_start, &vm_kp) 126#endif 127 128 129int vm_pages_needed; /* Event on which pageout daemon sleeps */ 130 131int vm_pageout_pages_needed; /* flag saying that the pageout daemon needs pages */ 132 133extern int npendingio; 134#if !defined(NO_SWAPPING) 135static int vm_pageout_req_swapout; /* XXX */ 136static int vm_daemon_needed; 137#endif 138extern int nswiodone; 139extern int vm_swap_size; 140extern int vfs_update_wakeup; 141int vm_pageout_stats_max=0, vm_pageout_stats_interval = 0; 142int vm_pageout_full_stats_interval = 0; 143int vm_pageout_stats_free_max=0, vm_pageout_algorithm_lru=0; 144int defer_swap_pageouts=0; 145int disable_swap_pageouts=0; 146 147int max_page_launder=100; 148#if defined(NO_SWAPPING) 149int vm_swap_enabled=0; 150int vm_swap_idle_enabled=0; 151#else 152int vm_swap_enabled=1; 153int vm_swap_idle_enabled=0; 154#endif 155 156SYSCTL_INT(_vm, VM_PAGEOUT_ALGORITHM, pageout_algorithm, 157 CTLFLAG_RW, &vm_pageout_algorithm_lru, 0, ""); 158 159SYSCTL_INT(_vm, OID_AUTO, pageout_stats_max, 160 CTLFLAG_RW, &vm_pageout_stats_max, 0, ""); 161 162SYSCTL_INT(_vm, OID_AUTO, pageout_full_stats_interval, 163 CTLFLAG_RW, &vm_pageout_full_stats_interval, 0, ""); 164 165SYSCTL_INT(_vm, OID_AUTO, pageout_stats_interval, 166 CTLFLAG_RW, &vm_pageout_stats_interval, 0, ""); 167 168SYSCTL_INT(_vm, OID_AUTO, pageout_stats_free_max, 169 CTLFLAG_RW, &vm_pageout_stats_free_max, 0, ""); 170 171#if defined(NO_SWAPPING) 172SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled, 173 CTLFLAG_RD, &vm_swap_enabled, 0, ""); 174SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled, 175 CTLFLAG_RD, &vm_swap_idle_enabled, 0, ""); 176#else 177SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled, 178 CTLFLAG_RW, &vm_swap_enabled, 0, ""); 179SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled, 180 CTLFLAG_RW, &vm_swap_idle_enabled, 0, ""); 181#endif 182 183SYSCTL_INT(_vm, OID_AUTO, defer_swapspace_pageouts, 184 CTLFLAG_RW, &defer_swap_pageouts, 0, ""); 185 186SYSCTL_INT(_vm, OID_AUTO, disable_swapspace_pageouts, 187 CTLFLAG_RW, &disable_swap_pageouts, 0, ""); 188 189SYSCTL_INT(_vm, OID_AUTO, max_page_launder, 190 CTLFLAG_RW, &max_page_launder, 0, ""); 191 192 193#define VM_PAGEOUT_PAGE_COUNT 8 194int vm_pageout_page_count = VM_PAGEOUT_PAGE_COUNT; 195 196int vm_page_max_wired; /* XXX max # of wired pages system-wide */ 197 198#if !defined(NO_SWAPPING) 199typedef void freeer_fcn_t __P((vm_map_t, vm_object_t, vm_pindex_t, int)); 200static void vm_pageout_map_deactivate_pages __P((vm_map_t, vm_pindex_t)); 201static freeer_fcn_t vm_pageout_object_deactivate_pages; 202static void vm_req_vmdaemon __P((void)); 203#endif 204static void vm_pageout_page_stats(void); 205void pmap_collect(void); 206 207/* 208 * vm_pageout_clean: 209 * 210 * Clean the page and remove it from the laundry. 211 * 212 * We set the busy bit to cause potential page faults on this page to 213 * block. 214 * 215 * And we set pageout-in-progress to keep the object from disappearing 216 * during pageout. This guarantees that the page won't move from the 217 * inactive queue. (However, any other page on the inactive queue may 218 * move!) 219 */ 220static int 221vm_pageout_clean(m, sync) 222 vm_page_t m; 223 int sync; 224{ 225 register vm_object_t object; 226 vm_page_t mc[2*vm_pageout_page_count]; 227 int pageout_count; 228 int i, forward_okay, backward_okay, page_base; 229 vm_pindex_t pindex = m->pindex; 230 231 object = m->object; 232 233 /* 234 * If not OBJT_SWAP, additional memory may be needed to do the pageout. 235 * Try to avoid the deadlock. 236 */ 237 if ((sync != VM_PAGEOUT_FORCE) && 238 (object->type == OBJT_DEFAULT) && 239 ((cnt.v_free_count + cnt.v_cache_count) < cnt.v_pageout_free_min)) 240 return 0; 241 242 /* 243 * Don't mess with the page if it's busy. 244 */ 245 if ((!sync && m->hold_count != 0) || 246 ((m->busy != 0) || (m->flags & PG_BUSY))) 247 return 0; 248 249 /* 250 * Try collapsing before it's too late. 251 */ 252 if (!sync && object->backing_object) { 253 vm_object_collapse(object); 254 } 255 256 mc[vm_pageout_page_count] = m; 257 pageout_count = 1; 258 page_base = vm_pageout_page_count; 259 forward_okay = TRUE; 260 if (pindex != 0) 261 backward_okay = TRUE; 262 else 263 backward_okay = FALSE; 264 /* 265 * Scan object for clusterable pages. 266 * 267 * We can cluster ONLY if: ->> the page is NOT 268 * clean, wired, busy, held, or mapped into a 269 * buffer, and one of the following: 270 * 1) The page is inactive, or a seldom used 271 * active page. 272 * -or- 273 * 2) we force the issue. 274 */ 275 for (i = 1; (i < vm_pageout_page_count) && (forward_okay || backward_okay); i++) { 276 vm_page_t p; 277 278 /* 279 * See if forward page is clusterable. 280 */ 281 if (forward_okay) { 282 /* 283 * Stop forward scan at end of object. 284 */ 285 if ((pindex + i) > object->size) { 286 forward_okay = FALSE; 287 goto do_backward; 288 } 289 p = vm_page_lookup(object, pindex + i); 290 if (p) { 291 if (((p->queue - p->pc) == PQ_CACHE) || 292 (p->flags & PG_BUSY) || p->busy) { 293 forward_okay = FALSE; 294 goto do_backward; 295 } 296 vm_page_test_dirty(p); 297 if ((p->dirty & p->valid) != 0 && 298 ((p->queue == PQ_INACTIVE) || 299 (sync == VM_PAGEOUT_FORCE)) && 300 (p->wire_count == 0) && 301 (p->hold_count == 0)) { 302 mc[vm_pageout_page_count + i] = p; 303 pageout_count++; 304 if (pageout_count == vm_pageout_page_count) 305 break; 306 } else { 307 forward_okay = FALSE; 308 } 309 } else { 310 forward_okay = FALSE; 311 } 312 } 313do_backward: 314 /* 315 * See if backward page is clusterable. 316 */ 317 if (backward_okay) { 318 /* 319 * Stop backward scan at beginning of object. 320 */ 321 if ((pindex - i) == 0) { 322 backward_okay = FALSE; 323 } 324 p = vm_page_lookup(object, pindex - i); 325 if (p) { 326 if (((p->queue - p->pc) == PQ_CACHE) || 327 (p->flags & PG_BUSY) || p->busy) { 328 backward_okay = FALSE; 329 continue; 330 } 331 vm_page_test_dirty(p); 332 if ((p->dirty & p->valid) != 0 && 333 ((p->queue == PQ_INACTIVE) || 334 (sync == VM_PAGEOUT_FORCE)) && 335 (p->wire_count == 0) && 336 (p->hold_count == 0)) { 337 mc[vm_pageout_page_count - i] = p; 338 pageout_count++; 339 page_base--; 340 if (pageout_count == vm_pageout_page_count) 341 break; 342 } else { 343 backward_okay = FALSE; 344 } 345 } else { 346 backward_okay = FALSE; 347 } 348 } 349 } 350 351 /* 352 * we allow reads during pageouts... 353 */ 354 for (i = page_base; i < (page_base + pageout_count); i++) { 355 mc[i]->flags |= PG_BUSY; 356 vm_page_protect(mc[i], VM_PROT_READ); 357 } 358 359 return vm_pageout_flush(&mc[page_base], pageout_count, sync); 360} 361 362int 363vm_pageout_flush(mc, count, sync) 364 vm_page_t *mc; 365 int count; 366 int sync; 367{ 368 register vm_object_t object; 369 int pageout_status[count]; 370 int anyok = 0; 371 int i; 372 373 object = mc[0]->object; 374 object->paging_in_progress += count; 375 376 vm_pager_put_pages(object, mc, count, 377 ((sync || (object == kernel_object)) ? TRUE : FALSE), 378 pageout_status); 379 380 for (i = 0; i < count; i++) { 381 vm_page_t mt = mc[i]; 382 383 switch (pageout_status[i]) { 384 case VM_PAGER_OK: 385 anyok++; 386 break; 387 case VM_PAGER_PEND: 388 anyok++; 389 break; 390 case VM_PAGER_BAD: 391 /* 392 * Page outside of range of object. Right now we 393 * essentially lose the changes by pretending it 394 * worked. 395 */ 396 pmap_clear_modify(VM_PAGE_TO_PHYS(mt)); 397 mt->dirty = 0; 398 break; 399 case VM_PAGER_ERROR: 400 case VM_PAGER_FAIL: 401 /* 402 * If page couldn't be paged out, then reactivate the 403 * page so it doesn't clog the inactive list. (We 404 * will try paging out it again later). 405 */ 406 if (mt->queue == PQ_INACTIVE) 407 vm_page_activate(mt); 408 break; 409 case VM_PAGER_AGAIN: 410 break; 411 } 412 413 414 /* 415 * If the operation is still going, leave the page busy to 416 * block all other accesses. Also, leave the paging in 417 * progress indicator set so that we don't attempt an object 418 * collapse. 419 */ 420 if (pageout_status[i] != VM_PAGER_PEND) { 421 vm_object_pip_wakeup(object); 422 PAGE_WAKEUP(mt); 423 } 424 } 425 return anyok; 426} 427 428#if !defined(NO_SWAPPING) 429/* 430 * vm_pageout_object_deactivate_pages 431 * 432 * deactivate enough pages to satisfy the inactive target 433 * requirements or if vm_page_proc_limit is set, then 434 * deactivate all of the pages in the object and its 435 * backing_objects. 436 * 437 * The object and map must be locked. 438 */ 439static void 440vm_pageout_object_deactivate_pages(map, object, desired, map_remove_only) 441 vm_map_t map; 442 vm_object_t object; 443 vm_pindex_t desired; 444 int map_remove_only; 445{ 446 register vm_page_t p, next; 447 int rcount; 448 int remove_mode; 449 int s; 450 451 if (object->type == OBJT_DEVICE) 452 return; 453 454 while (object) { 455 if (vm_map_pmap(map)->pm_stats.resident_count <= desired) 456 return; 457 if (object->paging_in_progress) 458 return; 459 460 remove_mode = map_remove_only; 461 if (object->shadow_count > 1) 462 remove_mode = 1; 463 /* 464 * scan the objects entire memory queue 465 */ 466 rcount = object->resident_page_count; 467 p = TAILQ_FIRST(&object->memq); 468 while (p && (rcount-- > 0)) { 469 int actcount; 470 if (vm_map_pmap(map)->pm_stats.resident_count <= desired) 471 return; 472 next = TAILQ_NEXT(p, listq); 473 cnt.v_pdpages++; 474 if (p->wire_count != 0 || 475 p->hold_count != 0 || 476 p->busy != 0 || 477 (p->flags & PG_BUSY) || 478 !pmap_page_exists(vm_map_pmap(map), VM_PAGE_TO_PHYS(p))) { 479 p = next; 480 continue; 481 } 482 483 actcount = pmap_ts_referenced(VM_PAGE_TO_PHYS(p)); 484 if (actcount) { 485 p->flags |= PG_REFERENCED; 486 } else if (p->flags & PG_REFERENCED) { 487 actcount = 1; 488 } 489 490 if ((p->queue != PQ_ACTIVE) && 491 (p->flags & PG_REFERENCED)) { 492 vm_page_activate(p); 493 p->act_count += actcount; 494 p->flags &= ~PG_REFERENCED; 495 } else if (p->queue == PQ_ACTIVE) { 496 if ((p->flags & PG_REFERENCED) == 0) { 497 p->act_count -= min(p->act_count, ACT_DECLINE); 498 if (!remove_mode && (vm_pageout_algorithm_lru || (p->act_count == 0))) { 499 vm_page_protect(p, VM_PROT_NONE); 500 vm_page_deactivate(p); 501 } else { 502 s = splvm(); 503 TAILQ_REMOVE(&vm_page_queue_active, p, pageq); 504 TAILQ_INSERT_TAIL(&vm_page_queue_active, p, pageq); 505 splx(s); 506 } 507 } else { 508 p->flags &= ~PG_REFERENCED; 509 if (p->act_count < (ACT_MAX - ACT_ADVANCE)) 510 p->act_count += ACT_ADVANCE; 511 s = splvm(); 512 TAILQ_REMOVE(&vm_page_queue_active, p, pageq); 513 TAILQ_INSERT_TAIL(&vm_page_queue_active, p, pageq); 514 splx(s); 515 } 516 } else if (p->queue == PQ_INACTIVE) { 517 vm_page_protect(p, VM_PROT_NONE); 518 } 519 p = next; 520 } 521 object = object->backing_object; 522 } 523 return; 524} 525 526/* 527 * deactivate some number of pages in a map, try to do it fairly, but 528 * that is really hard to do. 529 */ 530static void 531vm_pageout_map_deactivate_pages(map, desired) 532 vm_map_t map; 533 vm_pindex_t desired; 534{ 535 vm_map_entry_t tmpe; 536 vm_object_t obj, bigobj; 537 538 vm_map_reference(map); 539 if (lockmgr(&map->lock, LK_EXCLUSIVE | LK_NOWAIT, (void *)0, curproc)) { 540 vm_map_deallocate(map); 541 return; 542 } 543 544 bigobj = NULL; 545 546 /* 547 * first, search out the biggest object, and try to free pages from 548 * that. 549 */ 550 tmpe = map->header.next; 551 while (tmpe != &map->header) { 552 if ((tmpe->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP)) == 0) { 553 obj = tmpe->object.vm_object; 554 if ((obj != NULL) && (obj->shadow_count <= 1) && 555 ((bigobj == NULL) || 556 (bigobj->resident_page_count < obj->resident_page_count))) { 557 bigobj = obj; 558 } 559 } 560 tmpe = tmpe->next; 561 } 562 563 if (bigobj) 564 vm_pageout_object_deactivate_pages(map, bigobj, desired, 0); 565 566 /* 567 * Next, hunt around for other pages to deactivate. We actually 568 * do this search sort of wrong -- .text first is not the best idea. 569 */ 570 tmpe = map->header.next; 571 while (tmpe != &map->header) { 572 if (vm_map_pmap(map)->pm_stats.resident_count <= desired) 573 break; 574 if ((tmpe->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP)) == 0) { 575 obj = tmpe->object.vm_object; 576 if (obj) 577 vm_pageout_object_deactivate_pages(map, obj, desired, 0); 578 } 579 tmpe = tmpe->next; 580 }; 581 582 /* 583 * Remove all mappings if a process is swapped out, this will free page 584 * table pages. 585 */ 586 if (desired == 0) 587 pmap_remove(vm_map_pmap(map), 588 VM_MIN_ADDRESS, VM_MAXUSER_ADDRESS); 589 vm_map_unlock(map); 590 vm_map_deallocate(map); 591 return; 592} 593#endif 594 595void 596vm_pageout_page_free(vm_page_t m) { 597 struct vnode *vp; 598 vm_object_t object; 599 600 object = m->object; 601 vp = NULL; 602 603 object->ref_count++; 604 if (object->type == OBJT_VNODE) { 605 vp = object->handle; 606 vp->v_usecount++; 607 if (VSHOULDBUSY(vp)) 608 vbusy(vp); 609 } 610 m->flags |= PG_BUSY; 611 vm_page_protect(m, VM_PROT_NONE); 612 PAGE_WAKEUP(m); 613 vm_page_free(m); 614 vm_object_deallocate(object); 615} 616 617/* 618 * vm_pageout_scan does the dirty work for the pageout daemon. 619 */ 620static int 621vm_pageout_scan() 622{ 623 vm_page_t m, next; 624 int page_shortage, addl_page_shortage, maxscan, pcount; 625 int maxlaunder; 626 int pages_freed; 627 struct proc *p, *bigproc; 628 vm_offset_t size, bigsize; 629 vm_object_t object; 630 int force_wakeup = 0; 631 int actcount; 632 int vnodes_skipped = 0; 633 int s; 634 635 /* 636 * Do whatever cleanup that the pmap code can. 637 */ 638 pmap_collect(); 639 640 /* 641 * Start scanning the inactive queue for pages we can free. We keep 642 * scanning until we have enough free pages or we have scanned through 643 * the entire queue. If we encounter dirty pages, we start cleaning 644 * them. 645 */ 646 647 pages_freed = 0; 648 addl_page_shortage = 0; 649 650 if (max_page_launder == 0) 651 max_page_launder = 1; 652 maxlaunder = (cnt.v_inactive_target > max_page_launder) ? 653 max_page_launder : cnt.v_inactive_target; 654 655rescan0: 656 maxscan = cnt.v_inactive_count; 657 for( m = TAILQ_FIRST(&vm_page_queue_inactive); 658 659 (m != NULL) && (maxscan-- > 0) && 660 ((cnt.v_cache_count + cnt.v_free_count) < 661 (cnt.v_cache_min + cnt.v_free_target)); 662 663 m = next) { 664 665 cnt.v_pdpages++; 666 667 if (m->queue != PQ_INACTIVE) { 668 goto rescan0; 669 } 670 671 next = TAILQ_NEXT(m, pageq); 672 673 if (m->hold_count) { 674 s = splvm(); 675 TAILQ_REMOVE(&vm_page_queue_inactive, m, pageq); 676 TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq); 677 splx(s); 678 addl_page_shortage++; 679 continue; 680 } 681 /* 682 * Dont mess with busy pages, keep in the front of the 683 * queue, most likely are being paged out. 684 */ 685 if (m->busy || (m->flags & PG_BUSY)) { 686 addl_page_shortage++; 687 continue; 688 } 689 690 /* 691 * If the object is not being used, we ignore previous references. 692 */ 693 if (m->object->ref_count == 0) { 694 m->flags &= ~PG_REFERENCED; 695 pmap_clear_reference(VM_PAGE_TO_PHYS(m)); 696 697 /* 698 * Otherwise, if the page has been referenced while in the inactive 699 * queue, we bump the "activation count" upwards, making it less 700 * likely that the page will be added back to the inactive queue 701 * prematurely again. Here we check the page tables (or emulated 702 * bits, if any), given the upper level VM system not knowing anything 703 * about existing references. 704 */ 705 } else if (((m->flags & PG_REFERENCED) == 0) && 706 (actcount = pmap_ts_referenced(VM_PAGE_TO_PHYS(m)))) { 707 vm_page_activate(m); 708 m->act_count += (actcount + ACT_ADVANCE); 709 continue; 710 } 711 712 /* 713 * If the upper level VM system knows about any page references, 714 * we activate the page. We also set the "activation count" higher 715 * than normal so that we will less likely place pages back onto the 716 * inactive queue again. 717 */ 718 if ((m->flags & PG_REFERENCED) != 0) { 719 m->flags &= ~PG_REFERENCED; 720 actcount = pmap_ts_referenced(VM_PAGE_TO_PHYS(m)); 721 vm_page_activate(m); 722 m->act_count += (actcount + ACT_ADVANCE + 1); 723 continue; 724 } 725 726 /* 727 * If the upper level VM system doesn't know anything about the 728 * page being dirty, we have to check for it again. As far as the 729 * VM code knows, any partially dirty pages are fully dirty. 730 */ 731 if (m->dirty == 0) { 732 vm_page_test_dirty(m); 733 } else if (m->dirty != 0) { 734 m->dirty = VM_PAGE_BITS_ALL; 735 } 736 737 /* 738 * Invalid pages can be easily freed 739 */ 740 if (m->valid == 0) { 741 vm_pageout_page_free(m); 742 cnt.v_dfree++; 743 pages_freed++; 744 745 /* 746 * Clean pages can be placed onto the cache queue. 747 */ 748 } else if (m->dirty == 0) { 749 vm_page_cache(m); 750 pages_freed++; 751 752 /* 753 * Dirty pages need to be paged out. Note that we clean 754 * only a limited number of pages per pagedaemon pass. 755 */ 756 } else if (maxlaunder > 0) { 757 int written; 758 int swap_pageouts_ok; 759 struct vnode *vp = NULL; 760 761 object = m->object; 762 763 if ((object->type != OBJT_SWAP) && (object->type != OBJT_DEFAULT)) { 764 swap_pageouts_ok = 1; 765 } else { 766 swap_pageouts_ok = !(defer_swap_pageouts || disable_swap_pageouts); 767 swap_pageouts_ok |= (!disable_swap_pageouts && defer_swap_pageouts && 768 (cnt.v_free_count + cnt.v_cache_count) < cnt.v_free_min); 769 770 } 771 772 /* 773 * We don't bother paging objects that are "dead". Those 774 * objects are in a "rundown" state. 775 */ 776 if (!swap_pageouts_ok || (object->flags & OBJ_DEAD)) { 777 s = splvm(); 778 TAILQ_REMOVE(&vm_page_queue_inactive, m, pageq); 779 TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq); 780 splx(s); 781 continue; 782 } 783 784 if (object->type == OBJT_VNODE && (object->flags & OBJ_DEAD) == 0) { 785 vp = object->handle; 786 if (VOP_ISLOCKED(vp) || 787 vget(vp, LK_EXCLUSIVE|LK_NOOBJ, curproc)) { 788 if ((m->queue == PQ_INACTIVE) && 789 (m->hold_count == 0) && 790 (m->busy == 0) && 791 (m->flags & PG_BUSY) == 0) { 792 s = splvm(); 793 TAILQ_REMOVE(&vm_page_queue_inactive, m, pageq); 794 TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq); 795 splx(s); 796 } 797 if (object->flags & OBJ_MIGHTBEDIRTY) 798 vnodes_skipped++; 799 continue; 800 } 801 802 /* 803 * The page might have been moved to another queue 804 * during potential blocking in vget() above. 805 */ 806 if (m->queue != PQ_INACTIVE) { 807 if (object->flags & OBJ_MIGHTBEDIRTY) 808 vnodes_skipped++; 809 vput(vp); 810 continue; 811 } 812 813 /* 814 * The page may have been busied during the blocking in 815 * vput(); We don't move the page back onto the end of 816 * the queue so that statistics are more correct if we don't. 817 */ 818 if (m->busy || (m->flags & PG_BUSY)) { 819 vput(vp); 820 continue; 821 } 822 823 /* 824 * If the page has become held, then skip it 825 */ 826 if (m->hold_count) { 827 s = splvm(); 828 TAILQ_REMOVE(&vm_page_queue_inactive, m, pageq); 829 TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq); 830 splx(s); 831 if (object->flags & OBJ_MIGHTBEDIRTY) 832 vnodes_skipped++; 833 vput(vp); 834 continue; 835 } 836 } 837 838 /* 839 * If a page is dirty, then it is either being washed 840 * (but not yet cleaned) or it is still in the 841 * laundry. If it is still in the laundry, then we 842 * start the cleaning operation. 843 */ 844 written = vm_pageout_clean(m, 0); 845 if (vp) 846 vput(vp); 847 848 maxlaunder -= written; 849 } 850 } 851 852 /* 853 * Compute the page shortage. If we are still very low on memory be 854 * sure that we will move a minimal amount of pages from active to 855 * inactive. 856 */ 857 page_shortage = (cnt.v_inactive_target + cnt.v_cache_min) - 858 (cnt.v_free_count + cnt.v_inactive_count + cnt.v_cache_count); 859 if (page_shortage <= 0) { 860 if (pages_freed == 0) { 861 page_shortage = cnt.v_free_min - cnt.v_free_count; 862 } else { 863 page_shortage = 1; 864 } 865 } 866 867 /* 868 * If the "inactive" loop finds that there is a shortage over and 869 * above the page statistics variables, then we need to accomodate 870 * that. This avoids potential deadlocks due to pages being temporarily 871 * busy for I/O or other types of temporary wiring. 872 */ 873 if (addl_page_shortage) { 874 if (page_shortage < 0) 875 page_shortage = 0; 876 page_shortage += addl_page_shortage; 877 } 878 879 pcount = cnt.v_active_count; 880 m = TAILQ_FIRST(&vm_page_queue_active); 881 while ((m != NULL) && (pcount-- > 0) && (page_shortage > 0)) { 882 883 /* 884 * This is a consistancy check, and should likely be a panic 885 * or warning. 886 */ 887 if (m->queue != PQ_ACTIVE) { 888 break; 889 } 890 891 next = TAILQ_NEXT(m, pageq); 892 /* 893 * Don't deactivate pages that are busy. 894 */ 895 if ((m->busy != 0) || 896 (m->flags & PG_BUSY) || 897 (m->hold_count != 0)) { 898 s = splvm(); 899 TAILQ_REMOVE(&vm_page_queue_active, m, pageq); 900 TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq); 901 splx(s); 902 m = next; 903 continue; 904 } 905 906 /* 907 * The count for pagedaemon pages is done after checking the 908 * page for eligbility... 909 */ 910 cnt.v_pdpages++; 911 912 /* 913 * Check to see "how much" the page has been used. 914 */ 915 actcount = 0; 916 if (m->object->ref_count != 0) { 917 if (m->flags & PG_REFERENCED) { 918 actcount += 1; 919 } 920 actcount += pmap_ts_referenced(VM_PAGE_TO_PHYS(m)); 921 if (actcount) { 922 m->act_count += ACT_ADVANCE + actcount; 923 if (m->act_count > ACT_MAX) 924 m->act_count = ACT_MAX; 925 } 926 } 927 928 /* 929 * Since we have "tested" this bit, we need to clear it now. 930 */ 931 m->flags &= ~PG_REFERENCED; 932 933 /* 934 * Only if an object is currently being used, do we use the 935 * page activation count stats. 936 */ 937 if (actcount && (m->object->ref_count != 0)) { 938 s = splvm(); 939 TAILQ_REMOVE(&vm_page_queue_active, m, pageq); 940 TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq); 941 splx(s); 942 } else { 943 m->act_count -= min(m->act_count, ACT_DECLINE); 944 if (vm_pageout_algorithm_lru || 945 (m->object->ref_count == 0) || (m->act_count == 0)) { 946 page_shortage--; 947 if (m->object->ref_count == 0) { 948 vm_page_protect(m, VM_PROT_NONE); 949 if (m->dirty == 0) 950 vm_page_cache(m); 951 else 952 vm_page_deactivate(m); 953 } else { 954 vm_page_deactivate(m); 955 } 956 } else { 957 s = splvm(); 958 TAILQ_REMOVE(&vm_page_queue_active, m, pageq); 959 TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq); 960 splx(s); 961 } 962 } 963 m = next; 964 } 965 966 s = splvm(); 967 /* 968 * We try to maintain some *really* free pages, this allows interrupt 969 * code to be guaranteed space. 970 */ 971 while (cnt.v_free_count < cnt.v_free_reserved) { 972 static int cache_rover = 0; 973 m = vm_page_list_find(PQ_CACHE, cache_rover); 974 if (!m) 975 break; 976 cache_rover = (cache_rover + PQ_PRIME2) & PQ_L2_MASK; 977 vm_pageout_page_free(m); 978 cnt.v_dfree++; 979 } 980 splx(s); 981 982#if !defined(NO_SWAPPING) 983 /* 984 * Idle process swapout -- run once per second. 985 */ 986 if (vm_swap_idle_enabled) { 987 static long lsec; 988 if (time.tv_sec != lsec) { 989 vm_pageout_req_swapout |= VM_SWAP_IDLE; 990 vm_req_vmdaemon(); 991 lsec = time.tv_sec; 992 } 993 } 994#endif 995 996 /* 997 * If we didn't get enough free pages, and we have skipped a vnode 998 * in a writeable object, wakeup the sync daemon. And kick swapout 999 * if we did not get enough free pages. 1000 */ 1001 if ((cnt.v_cache_count + cnt.v_free_count) < 1002 (cnt.v_free_target + cnt.v_cache_min) ) { 1003 if (vnodes_skipped && 1004 (cnt.v_cache_count + cnt.v_free_count) < cnt.v_free_min) { 1005 if (!vfs_update_wakeup) { 1006 vfs_update_wakeup = 1; 1007 wakeup(&vfs_update_wakeup); 1008 } 1009 } 1010#if !defined(NO_SWAPPING) 1011 if (vm_swap_enabled && 1012 (cnt.v_free_count + cnt.v_cache_count < cnt.v_free_target)) { 1013 vm_req_vmdaemon(); 1014 vm_pageout_req_swapout |= VM_SWAP_NORMAL; 1015 } 1016#endif 1017 } 1018 1019 1020 /* 1021 * make sure that we have swap space -- if we are low on memory and 1022 * swap -- then kill the biggest process. 1023 */ 1024 if ((vm_swap_size == 0 || swap_pager_full) && 1025 ((cnt.v_free_count + cnt.v_cache_count) < cnt.v_free_min)) { 1026 bigproc = NULL; 1027 bigsize = 0; 1028 for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) { 1029 /* 1030 * if this is a system process, skip it 1031 */ 1032 if ((p->p_flag & P_SYSTEM) || (p->p_pid == 1) || 1033 ((p->p_pid < 48) && (vm_swap_size != 0))) { 1034 continue; 1035 } 1036 /* 1037 * if the process is in a non-running type state, 1038 * don't touch it. 1039 */ 1040 if (p->p_stat != SRUN && p->p_stat != SSLEEP) { 1041 continue; 1042 } 1043 /* 1044 * get the process size 1045 */ 1046 size = p->p_vmspace->vm_pmap.pm_stats.resident_count; 1047 /* 1048 * if the this process is bigger than the biggest one 1049 * remember it. 1050 */ 1051 if (size > bigsize) { 1052 bigproc = p; 1053 bigsize = size; 1054 } 1055 } 1056 if (bigproc != NULL) { 1057 killproc(bigproc, "out of swap space"); 1058 bigproc->p_estcpu = 0; 1059 bigproc->p_nice = PRIO_MIN; 1060 resetpriority(bigproc); 1061 wakeup(&cnt.v_free_count); 1062 } 1063 } 1064 return force_wakeup; 1065} 1066 1067/* 1068 * This routine tries to maintain the pseudo LRU active queue, 1069 * so that during long periods of time where there is no paging, 1070 * that some statistic accumlation still occurs. This code 1071 * helps the situation where paging just starts to occur. 1072 */ 1073static void 1074vm_pageout_page_stats() 1075{ 1076 int s; 1077 vm_page_t m,next; 1078 int pcount,tpcount; /* Number of pages to check */ 1079 static int fullintervalcount = 0; 1080 1081 pcount = cnt.v_active_count; 1082 fullintervalcount += vm_pageout_stats_interval; 1083 if (fullintervalcount < vm_pageout_full_stats_interval) { 1084 tpcount = (vm_pageout_stats_max * cnt.v_active_count) / cnt.v_page_count; 1085 if (pcount > tpcount) 1086 pcount = tpcount; 1087 } 1088 1089 m = TAILQ_FIRST(&vm_page_queue_active); 1090 while ((m != NULL) && (pcount-- > 0)) { 1091 int actcount; 1092 1093 if (m->queue != PQ_ACTIVE) { 1094 break; 1095 } 1096 1097 next = TAILQ_NEXT(m, pageq); 1098 /* 1099 * Don't deactivate pages that are busy. 1100 */ 1101 if ((m->busy != 0) || 1102 (m->flags & PG_BUSY) || 1103 (m->hold_count != 0)) { 1104 s = splvm(); 1105 TAILQ_REMOVE(&vm_page_queue_active, m, pageq); 1106 TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq); 1107 splx(s); 1108 m = next; 1109 continue; 1110 } 1111 1112 actcount = 0; 1113 if (m->flags & PG_REFERENCED) { 1114 m->flags &= ~PG_REFERENCED; 1115 actcount += 1; 1116 } 1117 1118 actcount += pmap_ts_referenced(VM_PAGE_TO_PHYS(m)); 1119 if (actcount) { 1120 m->act_count += ACT_ADVANCE + actcount; 1121 if (m->act_count > ACT_MAX) 1122 m->act_count = ACT_MAX; 1123 s = splvm(); 1124 TAILQ_REMOVE(&vm_page_queue_active, m, pageq); 1125 TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq); 1126 splx(s); 1127 } else { 1128 if (m->act_count == 0) { 1129 /* 1130 * We turn off page access, so that we have more accurate 1131 * RSS stats. We don't do this in the normal page deactivation 1132 * when the system is loaded VM wise, because the cost of 1133 * the large number of page protect operations would be higher 1134 * than the value of doing the operation. 1135 */ 1136 vm_page_protect(m, VM_PROT_NONE); 1137 vm_page_deactivate(m); 1138 } else { 1139 m->act_count -= min(m->act_count, ACT_DECLINE); 1140 s = splvm(); 1141 TAILQ_REMOVE(&vm_page_queue_active, m, pageq); 1142 TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq); 1143 splx(s); 1144 } 1145 } 1146 1147 m = next; 1148 } 1149} 1150 1151 1152static int 1153vm_pageout_free_page_calc(count) 1154vm_size_t count; 1155{ 1156 if (count < cnt.v_page_count) 1157 return 0; 1158 /* 1159 * free_reserved needs to include enough for the largest swap pager 1160 * structures plus enough for any pv_entry structs when paging. 1161 */ 1162 if (cnt.v_page_count > 1024) 1163 cnt.v_free_min = 4 + (cnt.v_page_count - 1024) / 200; 1164 else 1165 cnt.v_free_min = 4; 1166 cnt.v_pageout_free_min = (2*MAXBSIZE)/PAGE_SIZE + 1167 cnt.v_interrupt_free_min; 1168 cnt.v_free_reserved = vm_pageout_page_count + 1169 cnt.v_pageout_free_min + (count / 768) + PQ_L2_SIZE; 1170 cnt.v_free_min += cnt.v_free_reserved; 1171 return 1; 1172} 1173 1174 1175/* 1176 * vm_pageout is the high level pageout daemon. 1177 */ 1178static void 1179vm_pageout() 1180{ 1181 /* 1182 * Initialize some paging parameters. 1183 */ 1184 1185 cnt.v_interrupt_free_min = 2; 1186 if (cnt.v_page_count < 2000) 1187 vm_pageout_page_count = 8; 1188 1189 vm_pageout_free_page_calc(cnt.v_page_count); 1190 /* 1191 * free_reserved needs to include enough for the largest swap pager 1192 * structures plus enough for any pv_entry structs when paging. 1193 */ 1194 cnt.v_free_target = 3 * cnt.v_free_min + cnt.v_free_reserved; 1195 1196 if (cnt.v_free_count > 1024) { 1197 cnt.v_cache_max = (cnt.v_free_count - 1024) / 2; 1198 cnt.v_cache_min = (cnt.v_free_count - 1024) / 8; 1199 cnt.v_inactive_target = 2*cnt.v_cache_min + 192; 1200 } else { 1201 cnt.v_cache_min = 0; 1202 cnt.v_cache_max = 0; 1203 cnt.v_inactive_target = cnt.v_free_count / 4; 1204 } 1205 1206 /* XXX does not really belong here */ 1207 if (vm_page_max_wired == 0) 1208 vm_page_max_wired = cnt.v_free_count / 3; 1209 1210 if (vm_pageout_stats_max == 0) 1211 vm_pageout_stats_max = cnt.v_free_target; 1212 1213 /* 1214 * Set interval in seconds for stats scan. 1215 */ 1216 if (vm_pageout_stats_interval == 0) 1217 vm_pageout_stats_interval = 4; 1218 if (vm_pageout_full_stats_interval == 0) 1219 vm_pageout_full_stats_interval = vm_pageout_stats_interval * 4; 1220 1221 1222 /* 1223 * Set maximum free per pass 1224 */ 1225 if (vm_pageout_stats_free_max == 0) 1226 vm_pageout_stats_free_max = 25; 1227 1228 max_page_launder = (cnt.v_page_count > 1800 ? 32 : 16); 1229 1230 swap_pager_swap_init(); 1231 /* 1232 * The pageout daemon is never done, so loop forever. 1233 */ 1234 while (TRUE) { 1235 int inactive_target; 1236 int error; 1237 int s = splvm(); 1238 if (!vm_pages_needed || 1239 ((cnt.v_free_count + cnt.v_cache_count) > cnt.v_free_min)) { 1240 vm_pages_needed = 0; 1241 error = tsleep(&vm_pages_needed, 1242 PVM, "psleep", vm_pageout_stats_interval * hz); 1243 if (error && !vm_pages_needed) { 1244 splx(s); 1245 vm_pageout_page_stats(); 1246 continue; 1247 } 1248 } else if (vm_pages_needed) { 1249 tsleep(&vm_pages_needed, PVM, "psleep", hz/10); 1250 } 1251 inactive_target = 1252 (cnt.v_page_count - cnt.v_wire_count) / 4; 1253 if (inactive_target < 2*cnt.v_free_min) 1254 inactive_target = 2*cnt.v_free_min; 1255 cnt.v_inactive_target = inactive_target; 1256 if (vm_pages_needed) 1257 cnt.v_pdwakeups++; 1258 vm_pages_needed = 0; 1259 splx(s); 1260 vm_pager_sync(); 1261 vm_pageout_scan(); 1262 vm_pager_sync(); 1263 wakeup(&cnt.v_free_count); 1264 } 1265} 1266 1267void 1268pagedaemon_wakeup() 1269{ 1270 if (!vm_pages_needed && curproc != pageproc) { 1271 vm_pages_needed++; 1272 wakeup(&vm_pages_needed); 1273 } 1274} 1275 1276#if !defined(NO_SWAPPING) 1277static void 1278vm_req_vmdaemon() 1279{ 1280 static int lastrun = 0; 1281 1282 if ((ticks > (lastrun + hz)) || (ticks < lastrun)) { 1283 wakeup(&vm_daemon_needed); 1284 lastrun = ticks; 1285 } 1286} 1287 1288static void 1289vm_daemon() 1290{ 1291 vm_object_t object; 1292 struct proc *p; 1293 1294 while (TRUE) { 1295 tsleep(&vm_daemon_needed, PUSER, "psleep", 0); 1296 if (vm_pageout_req_swapout) { 1297 swapout_procs(vm_pageout_req_swapout); 1298 vm_pageout_req_swapout = 0; 1299 } 1300 /* 1301 * scan the processes for exceeding their rlimits or if 1302 * process is swapped out -- deactivate pages 1303 */ 1304 1305 for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) { 1306 quad_t limit; 1307 vm_offset_t size; 1308 1309 /* 1310 * if this is a system process or if we have already 1311 * looked at this process, skip it. 1312 */ 1313 if (p->p_flag & (P_SYSTEM | P_WEXIT)) { 1314 continue; 1315 } 1316 /* 1317 * if the process is in a non-running type state, 1318 * don't touch it. 1319 */ 1320 if (p->p_stat != SRUN && p->p_stat != SSLEEP) { 1321 continue; 1322 } 1323 /* 1324 * get a limit 1325 */ 1326 limit = qmin(p->p_rlimit[RLIMIT_RSS].rlim_cur, 1327 p->p_rlimit[RLIMIT_RSS].rlim_max); 1328 1329 /* 1330 * let processes that are swapped out really be 1331 * swapped out set the limit to nothing (will force a 1332 * swap-out.) 1333 */ 1334 if ((p->p_flag & P_INMEM) == 0) 1335 limit = 0; /* XXX */ 1336 1337 size = p->p_vmspace->vm_pmap.pm_stats.resident_count * PAGE_SIZE; 1338 if (limit >= 0 && size >= limit) { 1339 vm_pageout_map_deactivate_pages(&p->p_vmspace->vm_map, 1340 (vm_pindex_t)(limit >> PAGE_SHIFT) ); 1341 } 1342 } 1343 } 1344} 1345#endif 1346