vm_pageout.c revision 31542
1/* 2 * Copyright (c) 1991 Regents of the University of California. 3 * All rights reserved. 4 * Copyright (c) 1994 John S. Dyson 5 * All rights reserved. 6 * Copyright (c) 1994 David Greenman 7 * All rights reserved. 8 * 9 * This code is derived from software contributed to Berkeley by 10 * The Mach Operating System project at Carnegie-Mellon University. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. All advertising materials mentioning features or use of this software 21 * must display the following acknowledgement: 22 * This product includes software developed by the University of 23 * California, Berkeley and its contributors. 24 * 4. Neither the name of the University nor the names of its contributors 25 * may be used to endorse or promote products derived from this software 26 * without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 38 * SUCH DAMAGE. 39 * 40 * from: @(#)vm_pageout.c 7.4 (Berkeley) 5/7/91 41 * 42 * 43 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 44 * All rights reserved. 45 * 46 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 47 * 48 * Permission to use, copy, modify and distribute this software and 49 * its documentation is hereby granted, provided that both the copyright 50 * notice and this permission notice appear in all copies of the 51 * software, derivative works or modified versions, and any portions 52 * thereof, and that both notices appear in supporting documentation. 53 * 54 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 55 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 56 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 57 * 58 * Carnegie Mellon requests users of this software to return to 59 * 60 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 61 * School of Computer Science 62 * Carnegie Mellon University 63 * Pittsburgh PA 15213-3890 64 * 65 * any improvements or extensions that they make and grant Carnegie the 66 * rights to redistribute these changes. 67 * 68 * $Id: vm_pageout.c,v 1.100 1997/10/25 02:41:56 dyson Exp $ 69 */ 70 71/* 72 * The proverbial page-out daemon. 73 */ 74 75#include <sys/param.h> 76#include <sys/systm.h> 77#include <sys/kernel.h> 78#include <sys/proc.h> 79#include <sys/resourcevar.h> 80#include <sys/signalvar.h> 81#include <sys/vnode.h> 82#include <sys/vmmeter.h> 83#include <sys/sysctl.h> 84 85#include <vm/vm.h> 86#include <vm/vm_param.h> 87#include <vm/vm_prot.h> 88#include <sys/lock.h> 89#include <vm/vm_object.h> 90#include <vm/vm_page.h> 91#include <vm/vm_map.h> 92#include <vm/vm_pageout.h> 93#include <vm/vm_pager.h> 94#include <vm/swap_pager.h> 95#include <vm/vm_extern.h> 96 97/* 98 * System initialization 99 */ 100 101/* the kernel process "vm_pageout"*/ 102static void vm_pageout __P((void)); 103static int vm_pageout_clean __P((vm_page_t, int)); 104static int vm_pageout_scan __P((void)); 105static int vm_pageout_free_page_calc __P((vm_size_t count)); 106struct proc *pageproc; 107 108static struct kproc_desc page_kp = { 109 "pagedaemon", 110 vm_pageout, 111 &pageproc 112}; 113SYSINIT_KT(pagedaemon, SI_SUB_KTHREAD_PAGE, SI_ORDER_FIRST, kproc_start, &page_kp) 114 115#if !defined(NO_SWAPPING) 116/* the kernel process "vm_daemon"*/ 117static void vm_daemon __P((void)); 118static struct proc *vmproc; 119 120static struct kproc_desc vm_kp = { 121 "vmdaemon", 122 vm_daemon, 123 &vmproc 124}; 125SYSINIT_KT(vmdaemon, SI_SUB_KTHREAD_VM, SI_ORDER_FIRST, kproc_start, &vm_kp) 126#endif 127 128 129int vm_pages_needed; /* Event on which pageout daemon sleeps */ 130 131int vm_pageout_pages_needed; /* flag saying that the pageout daemon needs pages */ 132 133extern int npendingio; 134#if !defined(NO_SWAPPING) 135static int vm_pageout_req_swapout; /* XXX */ 136static int vm_daemon_needed; 137#endif 138extern int nswiodone; 139extern int vm_swap_size; 140extern int vfs_update_wakeup; 141int vm_pageout_stats_max=0, vm_pageout_stats_interval = 0; 142int vm_pageout_full_stats_interval = 0; 143int vm_pageout_stats_free_max=0, vm_pageout_algorithm_lru=0; 144int defer_swap_pageouts=0; 145int disable_swap_pageouts=0; 146#if defined(NO_SWAPPING) 147int vm_swapping_enabled=0; 148#else 149int vm_swapping_enabled=1; 150#endif 151 152SYSCTL_INT(_vm, VM_PAGEOUT_ALGORITHM, pageout_algorithm, 153 CTLFLAG_RW, &vm_pageout_algorithm_lru, 0, ""); 154 155SYSCTL_INT(_vm, OID_AUTO, pageout_stats_max, 156 CTLFLAG_RW, &vm_pageout_stats_max, 0, ""); 157 158SYSCTL_INT(_vm, OID_AUTO, pageout_full_stats_interval, 159 CTLFLAG_RW, &vm_pageout_full_stats_interval, 0, ""); 160 161SYSCTL_INT(_vm, OID_AUTO, pageout_stats_interval, 162 CTLFLAG_RW, &vm_pageout_stats_interval, 0, ""); 163 164SYSCTL_INT(_vm, OID_AUTO, pageout_stats_free_max, 165 CTLFLAG_RW, &vm_pageout_stats_free_max, 0, ""); 166 167#if defined(NO_SWAPPING) 168SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swapping_enabled, 169 CTLFLAG_RD, &vm_swapping_enabled, 0, ""); 170#else 171SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swapping_enabled, 172 CTLFLAG_RW, &vm_swapping_enabled, 0, ""); 173#endif 174 175SYSCTL_INT(_vm, OID_AUTO, defer_swap_pageouts, 176 CTLFLAG_RW, &defer_swap_pageouts, 0, ""); 177 178SYSCTL_INT(_vm, OID_AUTO, disable_swap_pageouts, 179 CTLFLAG_RW, &disable_swap_pageouts, 0, ""); 180 181#define MAXLAUNDER (cnt.v_page_count > 1800 ? 32 : 16) 182 183#define VM_PAGEOUT_PAGE_COUNT 16 184int vm_pageout_page_count = VM_PAGEOUT_PAGE_COUNT; 185 186int vm_page_max_wired; /* XXX max # of wired pages system-wide */ 187 188#if !defined(NO_SWAPPING) 189typedef void freeer_fcn_t __P((vm_map_t, vm_object_t, vm_pindex_t, int)); 190static void vm_pageout_map_deactivate_pages __P((vm_map_t, vm_pindex_t)); 191static freeer_fcn_t vm_pageout_object_deactivate_pages; 192static void vm_req_vmdaemon __P((void)); 193#endif 194static void vm_pageout_page_stats(void); 195void pmap_collect(void); 196 197/* 198 * vm_pageout_clean: 199 * 200 * Clean the page and remove it from the laundry. 201 * 202 * We set the busy bit to cause potential page faults on this page to 203 * block. 204 * 205 * And we set pageout-in-progress to keep the object from disappearing 206 * during pageout. This guarantees that the page won't move from the 207 * inactive queue. (However, any other page on the inactive queue may 208 * move!) 209 */ 210static int 211vm_pageout_clean(m, sync) 212 vm_page_t m; 213 int sync; 214{ 215 register vm_object_t object; 216 vm_page_t mc[2*vm_pageout_page_count]; 217 int pageout_count; 218 int i, forward_okay, backward_okay, page_base; 219 vm_pindex_t pindex = m->pindex; 220 221 object = m->object; 222 223 /* 224 * If not OBJT_SWAP, additional memory may be needed to do the pageout. 225 * Try to avoid the deadlock. 226 */ 227 if ((sync != VM_PAGEOUT_FORCE) && 228 (object->type == OBJT_DEFAULT) && 229 ((cnt.v_free_count + cnt.v_cache_count) < cnt.v_pageout_free_min)) 230 return 0; 231 232 /* 233 * Don't mess with the page if it's busy. 234 */ 235 if ((!sync && m->hold_count != 0) || 236 ((m->busy != 0) || (m->flags & PG_BUSY))) 237 return 0; 238 239 /* 240 * Try collapsing before it's too late. 241 */ 242 if (!sync && object->backing_object) { 243 vm_object_collapse(object); 244 } 245 246 mc[vm_pageout_page_count] = m; 247 pageout_count = 1; 248 page_base = vm_pageout_page_count; 249 forward_okay = TRUE; 250 if (pindex != 0) 251 backward_okay = TRUE; 252 else 253 backward_okay = FALSE; 254 /* 255 * Scan object for clusterable pages. 256 * 257 * We can cluster ONLY if: ->> the page is NOT 258 * clean, wired, busy, held, or mapped into a 259 * buffer, and one of the following: 260 * 1) The page is inactive, or a seldom used 261 * active page. 262 * -or- 263 * 2) we force the issue. 264 */ 265 for (i = 1; (i < vm_pageout_page_count) && (forward_okay || backward_okay); i++) { 266 vm_page_t p; 267 268 /* 269 * See if forward page is clusterable. 270 */ 271 if (forward_okay) { 272 /* 273 * Stop forward scan at end of object. 274 */ 275 if ((pindex + i) > object->size) { 276 forward_okay = FALSE; 277 goto do_backward; 278 } 279 p = vm_page_lookup(object, pindex + i); 280 if (p) { 281 if (((p->queue - p->pc) == PQ_CACHE) || 282 (p->flags & PG_BUSY) || p->busy) { 283 forward_okay = FALSE; 284 goto do_backward; 285 } 286 vm_page_test_dirty(p); 287 if ((p->dirty & p->valid) != 0 && 288 ((p->queue == PQ_INACTIVE) || 289 (sync == VM_PAGEOUT_FORCE)) && 290 (p->wire_count == 0) && 291 (p->hold_count == 0)) { 292 mc[vm_pageout_page_count + i] = p; 293 pageout_count++; 294 if (pageout_count == vm_pageout_page_count) 295 break; 296 } else { 297 forward_okay = FALSE; 298 } 299 } else { 300 forward_okay = FALSE; 301 } 302 } 303do_backward: 304 /* 305 * See if backward page is clusterable. 306 */ 307 if (backward_okay) { 308 /* 309 * Stop backward scan at beginning of object. 310 */ 311 if ((pindex - i) == 0) { 312 backward_okay = FALSE; 313 } 314 p = vm_page_lookup(object, pindex - i); 315 if (p) { 316 if (((p->queue - p->pc) == PQ_CACHE) || 317 (p->flags & PG_BUSY) || p->busy) { 318 backward_okay = FALSE; 319 continue; 320 } 321 vm_page_test_dirty(p); 322 if ((p->dirty & p->valid) != 0 && 323 ((p->queue == PQ_INACTIVE) || 324 (sync == VM_PAGEOUT_FORCE)) && 325 (p->wire_count == 0) && 326 (p->hold_count == 0)) { 327 mc[vm_pageout_page_count - i] = p; 328 pageout_count++; 329 page_base--; 330 if (pageout_count == vm_pageout_page_count) 331 break; 332 } else { 333 backward_okay = FALSE; 334 } 335 } else { 336 backward_okay = FALSE; 337 } 338 } 339 } 340 341 /* 342 * we allow reads during pageouts... 343 */ 344 for (i = page_base; i < (page_base + pageout_count); i++) { 345 mc[i]->flags |= PG_BUSY; 346 vm_page_protect(mc[i], VM_PROT_READ); 347 } 348 349 return vm_pageout_flush(&mc[page_base], pageout_count, sync); 350} 351 352int 353vm_pageout_flush(mc, count, sync) 354 vm_page_t *mc; 355 int count; 356 int sync; 357{ 358 register vm_object_t object; 359 int pageout_status[count]; 360 int anyok = 0; 361 int i; 362 363 object = mc[0]->object; 364 object->paging_in_progress += count; 365 366 vm_pager_put_pages(object, mc, count, 367 ((sync || (object == kernel_object)) ? TRUE : FALSE), 368 pageout_status); 369 370 for (i = 0; i < count; i++) { 371 vm_page_t mt = mc[i]; 372 373 switch (pageout_status[i]) { 374 case VM_PAGER_OK: 375 ++anyok; 376 break; 377 case VM_PAGER_PEND: 378 ++anyok; 379 break; 380 case VM_PAGER_BAD: 381 /* 382 * Page outside of range of object. Right now we 383 * essentially lose the changes by pretending it 384 * worked. 385 */ 386 pmap_clear_modify(VM_PAGE_TO_PHYS(mt)); 387 mt->dirty = 0; 388 break; 389 case VM_PAGER_ERROR: 390 case VM_PAGER_FAIL: 391 /* 392 * If page couldn't be paged out, then reactivate the 393 * page so it doesn't clog the inactive list. (We 394 * will try paging out it again later). 395 */ 396 if (mt->queue == PQ_INACTIVE) 397 vm_page_activate(mt); 398 break; 399 case VM_PAGER_AGAIN: 400 break; 401 } 402 403 404 /* 405 * If the operation is still going, leave the page busy to 406 * block all other accesses. Also, leave the paging in 407 * progress indicator set so that we don't attempt an object 408 * collapse. 409 */ 410 if (pageout_status[i] != VM_PAGER_PEND) { 411 vm_object_pip_wakeup(object); 412 PAGE_WAKEUP(mt); 413 } 414 } 415 return anyok; 416} 417 418#if !defined(NO_SWAPPING) 419/* 420 * vm_pageout_object_deactivate_pages 421 * 422 * deactivate enough pages to satisfy the inactive target 423 * requirements or if vm_page_proc_limit is set, then 424 * deactivate all of the pages in the object and its 425 * backing_objects. 426 * 427 * The object and map must be locked. 428 */ 429static void 430vm_pageout_object_deactivate_pages(map, object, desired, map_remove_only) 431 vm_map_t map; 432 vm_object_t object; 433 vm_pindex_t desired; 434 int map_remove_only; 435{ 436 register vm_page_t p, next; 437 int rcount; 438 int remove_mode; 439 int s; 440 441 if (object->type == OBJT_DEVICE) 442 return; 443 444 while (object) { 445 if (vm_map_pmap(map)->pm_stats.resident_count <= desired) 446 return; 447 if (object->paging_in_progress) 448 return; 449 450 remove_mode = map_remove_only; 451 if (object->shadow_count > 1) 452 remove_mode = 1; 453 /* 454 * scan the objects entire memory queue 455 */ 456 rcount = object->resident_page_count; 457 p = TAILQ_FIRST(&object->memq); 458 while (p && (rcount-- > 0)) { 459 int actcount; 460 if (vm_map_pmap(map)->pm_stats.resident_count <= desired) 461 return; 462 next = TAILQ_NEXT(p, listq); 463 cnt.v_pdpages++; 464 if (p->wire_count != 0 || 465 p->hold_count != 0 || 466 p->busy != 0 || 467 (p->flags & PG_BUSY) || 468 !pmap_page_exists(vm_map_pmap(map), VM_PAGE_TO_PHYS(p))) { 469 p = next; 470 continue; 471 } 472 473 actcount = pmap_ts_referenced(VM_PAGE_TO_PHYS(p)); 474 if (actcount) { 475 p->flags |= PG_REFERENCED; 476 } else if (p->flags & PG_REFERENCED) { 477 actcount = 1; 478 } 479 480 if ((p->queue != PQ_ACTIVE) && 481 (p->flags & PG_REFERENCED)) { 482 vm_page_activate(p); 483 p->act_count += actcount; 484 p->flags &= ~PG_REFERENCED; 485 } else if (p->queue == PQ_ACTIVE) { 486 if ((p->flags & PG_REFERENCED) == 0) { 487 p->act_count -= min(p->act_count, ACT_DECLINE); 488 if (!remove_mode && (vm_pageout_algorithm_lru || (p->act_count == 0))) { 489 vm_page_protect(p, VM_PROT_NONE); 490 vm_page_deactivate(p); 491 } else { 492 s = splvm(); 493 TAILQ_REMOVE(&vm_page_queue_active, p, pageq); 494 TAILQ_INSERT_TAIL(&vm_page_queue_active, p, pageq); 495 splx(s); 496 } 497 } else { 498 p->flags &= ~PG_REFERENCED; 499 if (p->act_count < (ACT_MAX - ACT_ADVANCE)) 500 p->act_count += ACT_ADVANCE; 501 s = splvm(); 502 TAILQ_REMOVE(&vm_page_queue_active, p, pageq); 503 TAILQ_INSERT_TAIL(&vm_page_queue_active, p, pageq); 504 splx(s); 505 } 506 } else if (p->queue == PQ_INACTIVE) { 507 vm_page_protect(p, VM_PROT_NONE); 508 } 509 p = next; 510 } 511 object = object->backing_object; 512 } 513 return; 514} 515 516/* 517 * deactivate some number of pages in a map, try to do it fairly, but 518 * that is really hard to do. 519 */ 520static void 521vm_pageout_map_deactivate_pages(map, desired) 522 vm_map_t map; 523 vm_pindex_t desired; 524{ 525 vm_map_entry_t tmpe; 526 vm_object_t obj, bigobj; 527 528 vm_map_reference(map); 529 if (lockmgr(&map->lock, LK_EXCLUSIVE | LK_NOWAIT, (void *)0, curproc)) { 530 vm_map_deallocate(map); 531 return; 532 } 533 534 bigobj = NULL; 535 536 /* 537 * first, search out the biggest object, and try to free pages from 538 * that. 539 */ 540 tmpe = map->header.next; 541 while (tmpe != &map->header) { 542 if ((tmpe->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP)) == 0) { 543 obj = tmpe->object.vm_object; 544 if ((obj != NULL) && (obj->shadow_count <= 1) && 545 ((bigobj == NULL) || 546 (bigobj->resident_page_count < obj->resident_page_count))) { 547 bigobj = obj; 548 } 549 } 550 tmpe = tmpe->next; 551 } 552 553 if (bigobj) 554 vm_pageout_object_deactivate_pages(map, bigobj, desired, 0); 555 556 /* 557 * Next, hunt around for other pages to deactivate. We actually 558 * do this search sort of wrong -- .text first is not the best idea. 559 */ 560 tmpe = map->header.next; 561 while (tmpe != &map->header) { 562 if (vm_map_pmap(map)->pm_stats.resident_count <= desired) 563 break; 564 if ((tmpe->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP)) == 0) { 565 obj = tmpe->object.vm_object; 566 if (obj) 567 vm_pageout_object_deactivate_pages(map, obj, desired, 0); 568 } 569 tmpe = tmpe->next; 570 }; 571 572 /* 573 * Remove all mappings if a process is swapped out, this will free page 574 * table pages. 575 */ 576 if (desired == 0) 577 pmap_remove(vm_map_pmap(map), 578 VM_MIN_ADDRESS, VM_MAXUSER_ADDRESS); 579 vm_map_unlock(map); 580 vm_map_deallocate(map); 581 return; 582} 583#endif 584 585/* 586 * vm_pageout_scan does the dirty work for the pageout daemon. 587 */ 588static int 589vm_pageout_scan() 590{ 591 vm_page_t m, next; 592 int page_shortage, addl_page_shortage, maxscan, maxlaunder, pcount; 593 int pages_freed; 594 struct proc *p, *bigproc; 595 vm_offset_t size, bigsize; 596 vm_object_t object; 597 int force_wakeup = 0; 598 int actcount; 599 int vnodes_skipped = 0; 600 int s; 601 602 /* 603 * Do whatever cleanup that the pmap code can. 604 */ 605 pmap_collect(); 606 607 /* 608 * Start scanning the inactive queue for pages we can free. We keep 609 * scanning until we have enough free pages or we have scanned through 610 * the entire queue. If we encounter dirty pages, we start cleaning 611 * them. 612 */ 613 614 pages_freed = 0; 615 addl_page_shortage = 0; 616 617 maxlaunder = (cnt.v_inactive_target > MAXLAUNDER) ? 618 MAXLAUNDER : cnt.v_inactive_target; 619rescan0: 620 maxscan = cnt.v_inactive_count; 621 for( m = TAILQ_FIRST(&vm_page_queue_inactive); 622 623 (m != NULL) && (maxscan-- > 0) && 624 ((cnt.v_cache_count + cnt.v_free_count) < 625 (cnt.v_cache_min + cnt.v_free_target)); 626 627 m = next) { 628 629 cnt.v_pdpages++; 630 631 if (m->queue != PQ_INACTIVE) { 632 goto rescan0; 633 } 634 635 next = TAILQ_NEXT(m, pageq); 636 637 if (m->hold_count) { 638 s = splvm(); 639 TAILQ_REMOVE(&vm_page_queue_inactive, m, pageq); 640 TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq); 641 splx(s); 642 addl_page_shortage++; 643 continue; 644 } 645 /* 646 * Dont mess with busy pages, keep in the front of the 647 * queue, most likely are being paged out. 648 */ 649 if (m->busy || (m->flags & PG_BUSY)) { 650 addl_page_shortage++; 651 continue; 652 } 653 654 /* 655 * If the object is not being used, we ignore previous references. 656 */ 657 if (m->object->ref_count == 0) { 658 m->flags &= ~PG_REFERENCED; 659 pmap_clear_reference(VM_PAGE_TO_PHYS(m)); 660 661 /* 662 * Otherwise, if the page has been referenced while in the inactive 663 * queue, we bump the "activation count" upwards, making it less 664 * likely that the page will be added back to the inactive queue 665 * prematurely again. Here we check the page tables (or emulated 666 * bits, if any), given the upper level VM system not knowing anything 667 * about existing references. 668 */ 669 } else if (((m->flags & PG_REFERENCED) == 0) && 670 (actcount = pmap_ts_referenced(VM_PAGE_TO_PHYS(m)))) { 671 vm_page_activate(m); 672 m->act_count += (actcount + ACT_ADVANCE); 673 continue; 674 } 675 676 /* 677 * If the upper level VM system knows about any page references, 678 * we activate the page. We also set the "activation count" higher 679 * than normal so that we will less likely place pages back onto the 680 * inactive queue again. 681 */ 682 if ((m->flags & PG_REFERENCED) != 0) { 683 m->flags &= ~PG_REFERENCED; 684#if 0 685 pmap_clear_reference(VM_PAGE_TO_PHYS(m)); 686#else 687 actcount = pmap_ts_referenced(VM_PAGE_TO_PHYS(m)); 688#endif 689 vm_page_activate(m); 690 m->act_count += (actcount + ACT_ADVANCE + 1); 691 continue; 692 } 693 694 /* 695 * If the upper level VM system doesn't know anything about the 696 * page being dirty, we have to check for it again. As far as the 697 * VM code knows, any partially dirty pages are fully dirty. 698 */ 699 if (m->dirty == 0) { 700 vm_page_test_dirty(m); 701 } else if (m->dirty != 0) { 702 m->dirty = VM_PAGE_BITS_ALL; 703 } 704 705 /* 706 * Invalid pages can be easily freed 707 */ 708 if (m->valid == 0) { 709 vm_page_protect(m, VM_PROT_NONE); 710 vm_page_free(m); 711 cnt.v_dfree++; 712 ++pages_freed; 713 714 /* 715 * Clean pages can be placed onto the cache queue. 716 */ 717 } else if (m->dirty == 0) { 718 vm_page_cache(m); 719 ++pages_freed; 720 721 /* 722 * Dirty pages need to be paged out. Note that we clean 723 * only a limited number of pages per pagedaemon pass. 724 */ 725 } else if (maxlaunder > 0) { 726 int written; 727 int swap_pageouts_ok; 728 struct vnode *vp = NULL; 729 730 object = m->object; 731 732 /* 733 * We don't bother paging objects that are "dead". Those 734 * objects are in a "rundown" state. 735 */ 736 if (object->flags & OBJ_DEAD) { 737 s = splvm(); 738 TAILQ_REMOVE(&vm_page_queue_inactive, m, pageq); 739 TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq); 740 splx(s); 741 continue; 742 } 743 744 if ((object->type != OBJT_SWAP) && (object->type != OBJT_DEFAULT)) { 745 swap_pageouts_ok = 1; 746 } else { 747 swap_pageouts_ok = !(defer_swap_pageouts || disable_swap_pageouts); 748 swap_pageouts_ok |= (!disable_swap_pageouts && defer_swap_pageouts && 749 (cnt.v_free_count + cnt.v_cache_count) < cnt.v_free_min); 750 751 } 752 if (!swap_pageouts_ok) { 753 s = splvm(); 754 TAILQ_REMOVE(&vm_page_queue_inactive, m, pageq); 755 TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq); 756 splx(s); 757 continue; 758 } 759 760 if (object->type == OBJT_VNODE) { 761 vp = object->handle; 762 if (VOP_ISLOCKED(vp) || 763 vget(vp, LK_EXCLUSIVE, curproc)) { 764 if ((m->queue == PQ_INACTIVE) && 765 (m->hold_count == 0) && 766 (m->busy == 0) && 767 (m->flags & PG_BUSY) == 0) { 768 s = splvm(); 769 TAILQ_REMOVE(&vm_page_queue_inactive, m, pageq); 770 TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq); 771 splx(s); 772 } 773 if (object->flags & OBJ_MIGHTBEDIRTY) 774 ++vnodes_skipped; 775 continue; 776 } 777 778 /* 779 * The page might have been moved to another queue 780 * during potential blocking in vget() above. 781 */ 782 if (m->queue != PQ_INACTIVE) { 783 if (object->flags & OBJ_MIGHTBEDIRTY) 784 ++vnodes_skipped; 785 vput(vp); 786 continue; 787 } 788 789 /* 790 * The page may have been busied during the blocking in 791 * vput(); We don't move the page back onto the end of 792 * the queue so that statistics are more correct if we don't. 793 */ 794 if (m->busy || (m->flags & PG_BUSY)) { 795 vput(vp); 796 continue; 797 } 798 799 /* 800 * If the page has become held, then skip it 801 */ 802 if (m->hold_count) { 803 s = splvm(); 804 TAILQ_REMOVE(&vm_page_queue_inactive, m, pageq); 805 TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq); 806 splx(s); 807 if (object->flags & OBJ_MIGHTBEDIRTY) 808 ++vnodes_skipped; 809 vput(vp); 810 continue; 811 } 812 } 813 814 /* 815 * If a page is dirty, then it is either being washed 816 * (but not yet cleaned) or it is still in the 817 * laundry. If it is still in the laundry, then we 818 * start the cleaning operation. 819 */ 820 written = vm_pageout_clean(m, 0); 821 if (vp) 822 vput(vp); 823 824 maxlaunder -= written; 825 } 826 } 827 828 /* 829 * Compute the page shortage. If we are still very low on memory be 830 * sure that we will move a minimal amount of pages from active to 831 * inactive. 832 */ 833 page_shortage = (cnt.v_inactive_target + cnt.v_cache_min) - 834 (cnt.v_free_count + cnt.v_inactive_count + cnt.v_cache_count); 835 if (page_shortage <= 0) { 836 if (pages_freed == 0) { 837 page_shortage = cnt.v_free_min - cnt.v_free_count; 838 } else { 839 page_shortage = 1; 840 } 841 } 842 843 /* 844 * If the "inactive" loop finds that there is a shortage over and 845 * above the page statistics variables, then we need to accomodate 846 * that. This avoids potential deadlocks due to pages being temporarily 847 * busy for I/O or other types of temporary wiring. 848 */ 849 if (addl_page_shortage) { 850 if (page_shortage < 0) 851 page_shortage = 0; 852 page_shortage += addl_page_shortage; 853 } 854 855 pcount = cnt.v_active_count; 856 m = TAILQ_FIRST(&vm_page_queue_active); 857 while ((m != NULL) && (pcount-- > 0) && (page_shortage > 0)) { 858 859 /* 860 * This is a consistancy check, and should likely be a panic 861 * or warning. 862 */ 863 if (m->queue != PQ_ACTIVE) { 864 break; 865 } 866 867 next = TAILQ_NEXT(m, pageq); 868 /* 869 * Don't deactivate pages that are busy. 870 */ 871 if ((m->busy != 0) || 872 (m->flags & PG_BUSY) || 873 (m->hold_count != 0)) { 874 s = splvm(); 875 TAILQ_REMOVE(&vm_page_queue_active, m, pageq); 876 TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq); 877 splx(s); 878 m = next; 879 continue; 880 } 881 882 /* 883 * The count for pagedaemon pages is done after checking the 884 * page for eligbility... 885 */ 886 cnt.v_pdpages++; 887 888 /* 889 * Check to see "how much" the page has been used. 890 */ 891 actcount = 0; 892 if (m->object->ref_count != 0) { 893 if (m->flags & PG_REFERENCED) { 894 actcount += 1; 895 } 896 actcount += pmap_ts_referenced(VM_PAGE_TO_PHYS(m)); 897 if (actcount) { 898 m->act_count += ACT_ADVANCE + actcount; 899 if (m->act_count > ACT_MAX) 900 m->act_count = ACT_MAX; 901 } 902 } 903 904 /* 905 * Since we have "tested" this bit, we need to clear it now. 906 */ 907 m->flags &= ~PG_REFERENCED; 908 909 /* 910 * Only if an object is currently being used, do we use the 911 * page activation count stats. 912 */ 913 if (actcount && (m->object->ref_count != 0)) { 914 s = splvm(); 915 TAILQ_REMOVE(&vm_page_queue_active, m, pageq); 916 TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq); 917 splx(s); 918 } else { 919 m->act_count -= min(m->act_count, ACT_DECLINE); 920 if (vm_pageout_algorithm_lru || 921 (m->object->ref_count == 0) || (m->act_count == 0)) { 922 --page_shortage; 923 if (m->object->ref_count == 0) { 924 vm_page_protect(m, VM_PROT_NONE); 925 if (m->dirty == 0) 926 vm_page_cache(m); 927 else 928 vm_page_deactivate(m); 929 } else { 930 vm_page_deactivate(m); 931 } 932 } else { 933 s = splvm(); 934 TAILQ_REMOVE(&vm_page_queue_active, m, pageq); 935 TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq); 936 splx(s); 937 } 938 } 939 m = next; 940 } 941 942 s = splvm(); 943 /* 944 * We try to maintain some *really* free pages, this allows interrupt 945 * code to be guaranteed space. 946 */ 947 while (cnt.v_free_count < cnt.v_free_reserved) { 948 static int cache_rover = 0; 949 m = vm_page_list_find(PQ_CACHE, cache_rover); 950 if (!m) 951 break; 952 cache_rover = (cache_rover + PQ_PRIME2) & PQ_L2_MASK; 953 vm_page_free(m); 954 cnt.v_dfree++; 955 } 956 splx(s); 957 958 /* 959 * If we didn't get enough free pages, and we have skipped a vnode 960 * in a writeable object, wakeup the sync daemon. And kick swapout 961 * if we did not get enough free pages. 962 */ 963 if ((cnt.v_cache_count + cnt.v_free_count) < 964 (cnt.v_free_target + cnt.v_cache_min) ) { 965 if (vnodes_skipped && 966 (cnt.v_cache_count + cnt.v_free_count) < cnt.v_free_min) { 967 if (!vfs_update_wakeup) { 968 vfs_update_wakeup = 1; 969 wakeup(&vfs_update_wakeup); 970 } 971 } 972#if !defined(NO_SWAPPING) 973 if (vm_swapping_enabled && 974 (cnt.v_free_count + cnt.v_cache_count < cnt.v_free_target)) { 975 vm_req_vmdaemon(); 976 vm_pageout_req_swapout = 1; 977 } 978#endif 979 } 980 981 982 /* 983 * make sure that we have swap space -- if we are low on memory and 984 * swap -- then kill the biggest process. 985 */ 986 if ((vm_swap_size == 0 || swap_pager_full) && 987 ((cnt.v_free_count + cnt.v_cache_count) < cnt.v_free_min)) { 988 bigproc = NULL; 989 bigsize = 0; 990 for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) { 991 /* 992 * if this is a system process, skip it 993 */ 994 if ((p->p_flag & P_SYSTEM) || (p->p_pid == 1) || 995 ((p->p_pid < 48) && (vm_swap_size != 0))) { 996 continue; 997 } 998 /* 999 * if the process is in a non-running type state, 1000 * don't touch it. 1001 */ 1002 if (p->p_stat != SRUN && p->p_stat != SSLEEP) { 1003 continue; 1004 } 1005 /* 1006 * get the process size 1007 */ 1008 size = p->p_vmspace->vm_pmap.pm_stats.resident_count; 1009 /* 1010 * if the this process is bigger than the biggest one 1011 * remember it. 1012 */ 1013 if (size > bigsize) { 1014 bigproc = p; 1015 bigsize = size; 1016 } 1017 } 1018 if (bigproc != NULL) { 1019 killproc(bigproc, "out of swap space"); 1020 bigproc->p_estcpu = 0; 1021 bigproc->p_nice = PRIO_MIN; 1022 resetpriority(bigproc); 1023 wakeup(&cnt.v_free_count); 1024 } 1025 } 1026 return force_wakeup; 1027} 1028 1029/* 1030 * This routine tries to maintain the pseudo LRU active queue, 1031 * so that during long periods of time where there is no paging, 1032 * that some statistic accumlation still occurs. This code 1033 * helps the situation where paging just starts to occur. 1034 */ 1035static void 1036vm_pageout_page_stats() 1037{ 1038 int s; 1039 vm_page_t m,next; 1040 int pcount,tpcount; /* Number of pages to check */ 1041 static int fullintervalcount = 0; 1042 1043 pcount = cnt.v_active_count; 1044 fullintervalcount += vm_pageout_stats_interval; 1045 if (fullintervalcount < vm_pageout_full_stats_interval) { 1046 tpcount = (vm_pageout_stats_max * cnt.v_active_count) / cnt.v_page_count; 1047 if (pcount > tpcount) 1048 pcount = tpcount; 1049 } 1050 1051 m = TAILQ_FIRST(&vm_page_queue_active); 1052 while ((m != NULL) && (pcount-- > 0)) { 1053 int actcount; 1054 1055 if (m->queue != PQ_ACTIVE) { 1056 break; 1057 } 1058 1059 next = TAILQ_NEXT(m, pageq); 1060 /* 1061 * Don't deactivate pages that are busy. 1062 */ 1063 if ((m->busy != 0) || 1064 (m->flags & PG_BUSY) || 1065 (m->hold_count != 0)) { 1066 s = splvm(); 1067 TAILQ_REMOVE(&vm_page_queue_active, m, pageq); 1068 TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq); 1069 splx(s); 1070 m = next; 1071 continue; 1072 } 1073 1074 actcount = 0; 1075 if (m->flags & PG_REFERENCED) { 1076 m->flags &= ~PG_REFERENCED; 1077 actcount += 1; 1078 } 1079 1080 actcount += pmap_ts_referenced(VM_PAGE_TO_PHYS(m)); 1081 if (actcount) { 1082 m->act_count += ACT_ADVANCE + actcount; 1083 if (m->act_count > ACT_MAX) 1084 m->act_count = ACT_MAX; 1085 s = splvm(); 1086 TAILQ_REMOVE(&vm_page_queue_active, m, pageq); 1087 TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq); 1088 splx(s); 1089 } else { 1090 if (m->act_count == 0) { 1091 /* 1092 * We turn off page access, so that we have more accurate 1093 * RSS stats. We don't do this in the normal page deactivation 1094 * when the system is loaded VM wise, because the cost of 1095 * the large number of page protect operations would be higher 1096 * than the value of doing the operation. 1097 */ 1098 vm_page_protect(m, VM_PROT_NONE); 1099 vm_page_deactivate(m); 1100 } else { 1101 m->act_count -= min(m->act_count, ACT_DECLINE); 1102 s = splvm(); 1103 TAILQ_REMOVE(&vm_page_queue_active, m, pageq); 1104 TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq); 1105 splx(s); 1106 } 1107 } 1108 1109 m = next; 1110 } 1111} 1112 1113 1114static int 1115vm_pageout_free_page_calc(count) 1116vm_size_t count; 1117{ 1118 if (count < cnt.v_page_count) 1119 return 0; 1120 /* 1121 * free_reserved needs to include enough for the largest swap pager 1122 * structures plus enough for any pv_entry structs when paging. 1123 */ 1124 if (cnt.v_page_count > 1024) 1125 cnt.v_free_min = 4 + (cnt.v_page_count - 1024) / 200; 1126 else 1127 cnt.v_free_min = 4; 1128 cnt.v_pageout_free_min = (2*MAXBSIZE)/PAGE_SIZE + 1129 cnt.v_interrupt_free_min; 1130 cnt.v_free_reserved = vm_pageout_page_count + 1131 cnt.v_pageout_free_min + (count / 768) + PQ_L2_SIZE; 1132 cnt.v_free_min += cnt.v_free_reserved; 1133 return 1; 1134} 1135 1136 1137/* 1138 * vm_pageout is the high level pageout daemon. 1139 */ 1140static void 1141vm_pageout() 1142{ 1143 /* 1144 * Initialize some paging parameters. 1145 */ 1146 1147 cnt.v_interrupt_free_min = 2; 1148 if (cnt.v_page_count < 2000) 1149 vm_pageout_page_count = 8; 1150 1151 vm_pageout_free_page_calc(cnt.v_page_count); 1152 /* 1153 * free_reserved needs to include enough for the largest swap pager 1154 * structures plus enough for any pv_entry structs when paging. 1155 */ 1156 cnt.v_free_target = 3 * cnt.v_free_min + cnt.v_free_reserved; 1157 1158 if (cnt.v_free_count > 1024) { 1159 cnt.v_cache_max = (cnt.v_free_count - 1024) / 2; 1160 cnt.v_cache_min = (cnt.v_free_count - 1024) / 8; 1161 cnt.v_inactive_target = 2*cnt.v_cache_min + 192; 1162 } else { 1163 cnt.v_cache_min = 0; 1164 cnt.v_cache_max = 0; 1165 cnt.v_inactive_target = cnt.v_free_count / 4; 1166 } 1167 1168 /* XXX does not really belong here */ 1169 if (vm_page_max_wired == 0) 1170 vm_page_max_wired = cnt.v_free_count / 3; 1171 1172 if (vm_pageout_stats_max == 0) 1173 vm_pageout_stats_max = cnt.v_free_target; 1174 1175 /* 1176 * Set interval in seconds for stats scan. 1177 */ 1178 if (vm_pageout_stats_interval == 0) 1179 vm_pageout_stats_interval = 4; 1180 if (vm_pageout_full_stats_interval == 0) 1181 vm_pageout_full_stats_interval = vm_pageout_stats_interval * 4; 1182 1183 1184 /* 1185 * Set maximum free per pass 1186 */ 1187 if (vm_pageout_stats_free_max == 0) 1188 vm_pageout_stats_free_max = 25; 1189 1190 1191 swap_pager_swap_init(); 1192 /* 1193 * The pageout daemon is never done, so loop forever. 1194 */ 1195 while (TRUE) { 1196 int inactive_target; 1197 int error; 1198 int s = splvm(); 1199 if (!vm_pages_needed || 1200 ((cnt.v_free_count + cnt.v_cache_count) > cnt.v_free_min)) { 1201 vm_pages_needed = 0; 1202 error = tsleep(&vm_pages_needed, 1203 PVM, "psleep", vm_pageout_stats_interval * hz); 1204 if (error && !vm_pages_needed) { 1205 splx(s); 1206 vm_pageout_page_stats(); 1207 continue; 1208 } 1209 } else if (vm_pages_needed) { 1210 tsleep(&vm_pages_needed, PVM, "psleep", hz/10); 1211 } 1212 inactive_target = 1213 (cnt.v_page_count - cnt.v_wire_count) / 4; 1214 if (inactive_target < 2*cnt.v_free_min) 1215 inactive_target = 2*cnt.v_free_min; 1216 cnt.v_inactive_target = inactive_target; 1217 if (vm_pages_needed) 1218 cnt.v_pdwakeups++; 1219 vm_pages_needed = 0; 1220 splx(s); 1221 vm_pager_sync(); 1222 vm_pageout_scan(); 1223 vm_pager_sync(); 1224 wakeup(&cnt.v_free_count); 1225 } 1226} 1227 1228void 1229pagedaemon_wakeup() 1230{ 1231 if (!vm_pages_needed && curproc != pageproc) { 1232 vm_pages_needed++; 1233 wakeup(&vm_pages_needed); 1234 } 1235} 1236 1237#if !defined(NO_SWAPPING) 1238static void 1239vm_req_vmdaemon() 1240{ 1241 static int lastrun = 0; 1242 1243 if ((ticks > (lastrun + hz)) || (ticks < lastrun)) { 1244 wakeup(&vm_daemon_needed); 1245 lastrun = ticks; 1246 } 1247} 1248 1249static void 1250vm_daemon() 1251{ 1252 vm_object_t object; 1253 struct proc *p; 1254 1255 while (TRUE) { 1256 tsleep(&vm_daemon_needed, PUSER, "psleep", 0); 1257 if (vm_pageout_req_swapout) { 1258 swapout_procs(); 1259 vm_pageout_req_swapout = 0; 1260 } 1261 /* 1262 * scan the processes for exceeding their rlimits or if 1263 * process is swapped out -- deactivate pages 1264 */ 1265 1266 for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) { 1267 quad_t limit; 1268 vm_offset_t size; 1269 1270 /* 1271 * if this is a system process or if we have already 1272 * looked at this process, skip it. 1273 */ 1274 if (p->p_flag & (P_SYSTEM | P_WEXIT)) { 1275 continue; 1276 } 1277 /* 1278 * if the process is in a non-running type state, 1279 * don't touch it. 1280 */ 1281 if (p->p_stat != SRUN && p->p_stat != SSLEEP) { 1282 continue; 1283 } 1284 /* 1285 * get a limit 1286 */ 1287 limit = qmin(p->p_rlimit[RLIMIT_RSS].rlim_cur, 1288 p->p_rlimit[RLIMIT_RSS].rlim_max); 1289 1290 /* 1291 * let processes that are swapped out really be 1292 * swapped out set the limit to nothing (will force a 1293 * swap-out.) 1294 */ 1295 if ((p->p_flag & P_INMEM) == 0) 1296 limit = 0; /* XXX */ 1297 1298 size = p->p_vmspace->vm_pmap.pm_stats.resident_count * PAGE_SIZE; 1299 if (limit >= 0 && size >= limit) { 1300 vm_pageout_map_deactivate_pages(&p->p_vmspace->vm_map, 1301 (vm_pindex_t)(limit >> PAGE_SHIFT) ); 1302 } 1303 } 1304 1305 /* 1306 * we remove cached objects that have no RSS... 1307 */ 1308restart: 1309 object = TAILQ_FIRST(&vm_object_cached_list); 1310 while (object) { 1311 /* 1312 * if there are no resident pages -- get rid of the object 1313 */ 1314 if (object->resident_page_count == 0) { 1315 vm_object_reference(object); 1316 pager_cache(object, FALSE); 1317 goto restart; 1318 } 1319 object = TAILQ_NEXT(object, cached_list); 1320 } 1321 } 1322} 1323#endif 1324