vm_pageout.c revision 17003
1/* 2 * Copyright (c) 1991 Regents of the University of California. 3 * All rights reserved. 4 * Copyright (c) 1994 John S. Dyson 5 * All rights reserved. 6 * Copyright (c) 1994 David Greenman 7 * All rights reserved. 8 * 9 * This code is derived from software contributed to Berkeley by 10 * The Mach Operating System project at Carnegie-Mellon University. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. All advertising materials mentioning features or use of this software 21 * must display the following acknowledgement: 22 * This product includes software developed by the University of 23 * California, Berkeley and its contributors. 24 * 4. Neither the name of the University nor the names of its contributors 25 * may be used to endorse or promote products derived from this software 26 * without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 38 * SUCH DAMAGE. 39 * 40 * from: @(#)vm_pageout.c 7.4 (Berkeley) 5/7/91 41 * 42 * 43 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 44 * All rights reserved. 45 * 46 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 47 * 48 * Permission to use, copy, modify and distribute this software and 49 * its documentation is hereby granted, provided that both the copyright 50 * notice and this permission notice appear in all copies of the 51 * software, derivative works or modified versions, and any portions 52 * thereof, and that both notices appear in supporting documentation. 53 * 54 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 55 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 56 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 57 * 58 * Carnegie Mellon requests users of this software to return to 59 * 60 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 61 * School of Computer Science 62 * Carnegie Mellon University 63 * Pittsburgh PA 15213-3890 64 * 65 * any improvements or extensions that they make and grant Carnegie the 66 * rights to redistribute these changes. 67 * 68 * $Id: vm_pageout.c,v 1.80 1996/06/29 09:15:39 davidg Exp $ 69 */ 70 71/* 72 * The proverbial page-out daemon. 73 */ 74 75#include <sys/param.h> 76#include <sys/systm.h> 77#include <sys/kernel.h> 78#include <sys/proc.h> 79#include <sys/resourcevar.h> 80#include <sys/malloc.h> 81#include <sys/kernel.h> 82#include <sys/signalvar.h> 83#include <sys/vnode.h> 84#include <sys/vmmeter.h> 85#include <sys/sysctl.h> 86 87#include <vm/vm.h> 88#include <vm/vm_param.h> 89#include <vm/vm_prot.h> 90#include <vm/lock.h> 91#include <vm/vm_object.h> 92#include <vm/vm_page.h> 93#include <vm/vm_map.h> 94#include <vm/vm_pageout.h> 95#include <vm/vm_kern.h> 96#include <vm/vm_pager.h> 97#include <vm/swap_pager.h> 98#include <vm/vm_extern.h> 99 100/* 101 * System initialization 102 */ 103 104/* the kernel process "vm_pageout"*/ 105static void vm_pageout __P((void)); 106static int vm_pageout_clean __P((vm_page_t, int)); 107static int vm_pageout_scan __P((void)); 108static int vm_pageout_free_page_calc __P((vm_size_t count)); 109struct proc *pageproc; 110 111static struct kproc_desc page_kp = { 112 "pagedaemon", 113 vm_pageout, 114 &pageproc 115}; 116SYSINIT_KT(pagedaemon, SI_SUB_KTHREAD_PAGE, SI_ORDER_FIRST, kproc_start, &page_kp) 117 118#if !defined(NO_SWAPPING) 119/* the kernel process "vm_daemon"*/ 120static void vm_daemon __P((void)); 121static struct proc *vmproc; 122 123static struct kproc_desc vm_kp = { 124 "vmdaemon", 125 vm_daemon, 126 &vmproc 127}; 128SYSINIT_KT(vmdaemon, SI_SUB_KTHREAD_VM, SI_ORDER_FIRST, kproc_start, &vm_kp) 129#endif 130 131 132int vm_pages_needed; /* Event on which pageout daemon sleeps */ 133 134int vm_pageout_pages_needed; /* flag saying that the pageout daemon needs pages */ 135 136extern int npendingio; 137#if !defined(NO_SWAPPING) 138static int vm_pageout_req_swapout; /* XXX */ 139static int vm_daemon_needed; 140#endif 141extern int nswiodone; 142extern int vm_swap_size; 143extern int vfs_update_wakeup; 144int vm_pageout_algorithm_lru=0; 145#if defined(NO_SWAPPING) 146int vm_swapping_enabled=0; 147#else 148int vm_swapping_enabled=1; 149#endif 150 151SYSCTL_INT(_vm, VM_PAGEOUT_ALGORITHM, pageout_algorithm, 152 CTLFLAG_RW, &vm_pageout_algorithm_lru, 0, ""); 153 154#if defined(NO_SWAPPING) 155SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swapping_enabled, 156 CTLFLAG_RD, &vm_swapping_enabled, 0, ""); 157#else 158SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swapping_enabled, 159 CTLFLAG_RW, &vm_swapping_enabled, 0, ""); 160#endif 161 162#define MAXLAUNDER (cnt.v_page_count > 1800 ? 32 : 16) 163 164#define VM_PAGEOUT_PAGE_COUNT 16 165int vm_pageout_page_count = VM_PAGEOUT_PAGE_COUNT; 166 167int vm_page_max_wired; /* XXX max # of wired pages system-wide */ 168 169#if !defined(NO_SWAPPING) 170typedef void freeer_fcn_t __P((vm_map_t, vm_object_t, vm_pindex_t, int)); 171static void vm_pageout_map_deactivate_pages __P((vm_map_t, vm_pindex_t)); 172static freeer_fcn_t vm_pageout_object_deactivate_pages; 173static void vm_req_vmdaemon __P((void)); 174#endif 175 176/* 177 * vm_pageout_clean: 178 * 179 * Clean the page and remove it from the laundry. 180 * 181 * We set the busy bit to cause potential page faults on this page to 182 * block. 183 * 184 * And we set pageout-in-progress to keep the object from disappearing 185 * during pageout. This guarantees that the page won't move from the 186 * inactive queue. (However, any other page on the inactive queue may 187 * move!) 188 */ 189static int 190vm_pageout_clean(m, sync) 191 vm_page_t m; 192 int sync; 193{ 194 register vm_object_t object; 195 vm_page_t mc[2*vm_pageout_page_count]; 196 int pageout_count; 197 int i, forward_okay, backward_okay, page_base; 198 vm_pindex_t pindex = m->pindex; 199 200 object = m->object; 201 202 /* 203 * If not OBJT_SWAP, additional memory may be needed to do the pageout. 204 * Try to avoid the deadlock. 205 */ 206 if ((sync != VM_PAGEOUT_FORCE) && 207 (object->type == OBJT_DEFAULT) && 208 ((cnt.v_free_count + cnt.v_cache_count) < cnt.v_pageout_free_min)) 209 return 0; 210 211 /* 212 * Don't mess with the page if it's busy. 213 */ 214 if ((!sync && m->hold_count != 0) || 215 ((m->busy != 0) || (m->flags & PG_BUSY))) 216 return 0; 217 218 /* 219 * Try collapsing before it's too late. 220 */ 221 if (!sync && object->backing_object) { 222 vm_object_collapse(object); 223 } 224 mc[vm_pageout_page_count] = m; 225 pageout_count = 1; 226 page_base = vm_pageout_page_count; 227 forward_okay = TRUE; 228 if (pindex != 0) 229 backward_okay = TRUE; 230 else 231 backward_okay = FALSE; 232 /* 233 * Scan object for clusterable pages. 234 * 235 * We can cluster ONLY if: ->> the page is NOT 236 * clean, wired, busy, held, or mapped into a 237 * buffer, and one of the following: 238 * 1) The page is inactive, or a seldom used 239 * active page. 240 * -or- 241 * 2) we force the issue. 242 */ 243 for (i = 1; (i < vm_pageout_page_count) && (forward_okay || backward_okay); i++) { 244 vm_page_t p; 245 246 /* 247 * See if forward page is clusterable. 248 */ 249 if (forward_okay) { 250 /* 251 * Stop forward scan at end of object. 252 */ 253 if ((pindex + i) > object->size) { 254 forward_okay = FALSE; 255 goto do_backward; 256 } 257 p = vm_page_lookup(object, pindex + i); 258 if (p) { 259 if ((p->queue == PQ_CACHE) || (p->flags & PG_BUSY) || p->busy) { 260 forward_okay = FALSE; 261 goto do_backward; 262 } 263 vm_page_test_dirty(p); 264 if ((p->dirty & p->valid) != 0 && 265 ((p->queue == PQ_INACTIVE) || 266 (sync == VM_PAGEOUT_FORCE)) && 267 (p->wire_count == 0) && 268 (p->hold_count == 0)) { 269 mc[vm_pageout_page_count + i] = p; 270 pageout_count++; 271 if (pageout_count == vm_pageout_page_count) 272 break; 273 } else { 274 forward_okay = FALSE; 275 } 276 } else { 277 forward_okay = FALSE; 278 } 279 } 280do_backward: 281 /* 282 * See if backward page is clusterable. 283 */ 284 if (backward_okay) { 285 /* 286 * Stop backward scan at beginning of object. 287 */ 288 if ((pindex - i) == 0) { 289 backward_okay = FALSE; 290 } 291 p = vm_page_lookup(object, pindex - i); 292 if (p) { 293 if ((p->queue == PQ_CACHE) || (p->flags & PG_BUSY) || p->busy) { 294 backward_okay = FALSE; 295 continue; 296 } 297 vm_page_test_dirty(p); 298 if ((p->dirty & p->valid) != 0 && 299 ((p->queue == PQ_INACTIVE) || 300 (sync == VM_PAGEOUT_FORCE)) && 301 (p->wire_count == 0) && 302 (p->hold_count == 0)) { 303 mc[vm_pageout_page_count - i] = p; 304 pageout_count++; 305 page_base--; 306 if (pageout_count == vm_pageout_page_count) 307 break; 308 } else { 309 backward_okay = FALSE; 310 } 311 } else { 312 backward_okay = FALSE; 313 } 314 } 315 } 316 317 /* 318 * we allow reads during pageouts... 319 */ 320 for (i = page_base; i < (page_base + pageout_count); i++) { 321 mc[i]->flags |= PG_BUSY; 322 vm_page_protect(mc[i], VM_PROT_READ); 323 } 324 325 return vm_pageout_flush(&mc[page_base], pageout_count, sync); 326} 327 328int 329vm_pageout_flush(mc, count, sync) 330 vm_page_t *mc; 331 int count; 332 int sync; 333{ 334 register vm_object_t object; 335 int pageout_status[count]; 336 int anyok = 0; 337 int i; 338 339 object = mc[0]->object; 340 object->paging_in_progress += count; 341 342 vm_pager_put_pages(object, mc, count, 343 ((sync || (object == kernel_object)) ? TRUE : FALSE), 344 pageout_status); 345 346 for (i = 0; i < count; i++) { 347 vm_page_t mt = mc[i]; 348 349 switch (pageout_status[i]) { 350 case VM_PAGER_OK: 351 ++anyok; 352 break; 353 case VM_PAGER_PEND: 354 ++anyok; 355 break; 356 case VM_PAGER_BAD: 357 /* 358 * Page outside of range of object. Right now we 359 * essentially lose the changes by pretending it 360 * worked. 361 */ 362 pmap_clear_modify(VM_PAGE_TO_PHYS(mt)); 363 mt->dirty = 0; 364 break; 365 case VM_PAGER_ERROR: 366 case VM_PAGER_FAIL: 367 /* 368 * If page couldn't be paged out, then reactivate the 369 * page so it doesn't clog the inactive list. (We 370 * will try paging out it again later). 371 */ 372 if (mt->queue == PQ_INACTIVE) 373 vm_page_activate(mt); 374 break; 375 case VM_PAGER_AGAIN: 376 break; 377 } 378 379 380 /* 381 * If the operation is still going, leave the page busy to 382 * block all other accesses. Also, leave the paging in 383 * progress indicator set so that we don't attempt an object 384 * collapse. 385 */ 386 if (pageout_status[i] != VM_PAGER_PEND) { 387 vm_object_pip_wakeup(object); 388 PAGE_WAKEUP(mt); 389 } 390 } 391 return anyok; 392} 393 394#if !defined(NO_SWAPPING) 395/* 396 * vm_pageout_object_deactivate_pages 397 * 398 * deactivate enough pages to satisfy the inactive target 399 * requirements or if vm_page_proc_limit is set, then 400 * deactivate all of the pages in the object and its 401 * backing_objects. 402 * 403 * The object and map must be locked. 404 */ 405static void 406vm_pageout_object_deactivate_pages(map, object, desired, map_remove_only) 407 vm_map_t map; 408 vm_object_t object; 409 vm_pindex_t desired; 410 int map_remove_only; 411{ 412 register vm_page_t p, next; 413 int rcount; 414 int remove_mode; 415 int s; 416 417 if (object->type == OBJT_DEVICE) 418 return; 419 420 while (object) { 421 if (vm_map_pmap(map)->pm_stats.resident_count <= desired) 422 return; 423 if (object->paging_in_progress) 424 return; 425 426 remove_mode = map_remove_only; 427 if (object->shadow_count > 1) 428 remove_mode = 1; 429 /* 430 * scan the objects entire memory queue 431 */ 432 rcount = object->resident_page_count; 433 p = TAILQ_FIRST(&object->memq); 434 while (p && (rcount-- > 0)) { 435 int refcount; 436 if (vm_map_pmap(map)->pm_stats.resident_count <= desired) 437 return; 438 next = TAILQ_NEXT(p, listq); 439 cnt.v_pdpages++; 440 if (p->wire_count != 0 || 441 p->hold_count != 0 || 442 p->busy != 0 || 443 (p->flags & PG_BUSY) || 444 !pmap_page_exists(vm_map_pmap(map), VM_PAGE_TO_PHYS(p))) { 445 p = next; 446 continue; 447 } 448 449 refcount = pmap_ts_referenced(VM_PAGE_TO_PHYS(p)); 450 if (refcount) { 451 p->flags |= PG_REFERENCED; 452 } else if (p->flags & PG_REFERENCED) { 453 refcount = 1; 454 } 455 456 if ((p->queue != PQ_ACTIVE) && 457 (p->flags & PG_REFERENCED)) { 458 vm_page_activate(p); 459 p->act_count += refcount; 460 p->flags &= ~PG_REFERENCED; 461 } else if (p->queue == PQ_ACTIVE) { 462 if ((p->flags & PG_REFERENCED) == 0) { 463 p->act_count -= min(p->act_count, ACT_DECLINE); 464 if (!remove_mode && (vm_pageout_algorithm_lru || (p->act_count == 0))) { 465 vm_page_protect(p, VM_PROT_NONE); 466 vm_page_deactivate(p); 467 } else { 468 s = splvm(); 469 TAILQ_REMOVE(&vm_page_queue_active, p, pageq); 470 TAILQ_INSERT_TAIL(&vm_page_queue_active, p, pageq); 471 splx(s); 472 } 473 } else { 474 p->flags &= ~PG_REFERENCED; 475 if (p->act_count < (ACT_MAX - ACT_ADVANCE)) 476 p->act_count += ACT_ADVANCE; 477 s = splvm(); 478 TAILQ_REMOVE(&vm_page_queue_active, p, pageq); 479 TAILQ_INSERT_TAIL(&vm_page_queue_active, p, pageq); 480 splx(s); 481 } 482 } else if (p->queue == PQ_INACTIVE) { 483 vm_page_protect(p, VM_PROT_NONE); 484 } 485 p = next; 486 } 487 object = object->backing_object; 488 } 489 return; 490} 491 492/* 493 * deactivate some number of pages in a map, try to do it fairly, but 494 * that is really hard to do. 495 */ 496static void 497vm_pageout_map_deactivate_pages(map, desired) 498 vm_map_t map; 499 vm_pindex_t desired; 500{ 501 vm_map_entry_t tmpe; 502 vm_object_t obj, bigobj; 503 504 vm_map_reference(map); 505 if (!lock_try_write(&map->lock)) { 506 vm_map_deallocate(map); 507 return; 508 } 509 510 bigobj = NULL; 511 512 /* 513 * first, search out the biggest object, and try to free pages from 514 * that. 515 */ 516 tmpe = map->header.next; 517 while (tmpe != &map->header) { 518 if ((tmpe->is_sub_map == 0) && (tmpe->is_a_map == 0)) { 519 obj = tmpe->object.vm_object; 520 if ((obj != NULL) && (obj->shadow_count <= 1) && 521 ((bigobj == NULL) || 522 (bigobj->resident_page_count < obj->resident_page_count))) { 523 bigobj = obj; 524 } 525 } 526 tmpe = tmpe->next; 527 } 528 529 if (bigobj) 530 vm_pageout_object_deactivate_pages(map, bigobj, desired, 0); 531 532 /* 533 * Next, hunt around for other pages to deactivate. We actually 534 * do this search sort of wrong -- .text first is not the best idea. 535 */ 536 tmpe = map->header.next; 537 while (tmpe != &map->header) { 538 if (vm_map_pmap(map)->pm_stats.resident_count <= desired) 539 break; 540 if ((tmpe->is_sub_map == 0) && (tmpe->is_a_map == 0)) { 541 obj = tmpe->object.vm_object; 542 if (obj) 543 vm_pageout_object_deactivate_pages(map, obj, desired, 0); 544 } 545 tmpe = tmpe->next; 546 }; 547 548 /* 549 * Remove all mappings if a process is swapped out, this will free page 550 * table pages. 551 */ 552 if (desired == 0) 553 pmap_remove(vm_map_pmap(map), 554 VM_MIN_ADDRESS, VM_MAXUSER_ADDRESS); 555 vm_map_unlock(map); 556 vm_map_deallocate(map); 557 return; 558} 559#endif 560 561/* 562 * vm_pageout_scan does the dirty work for the pageout daemon. 563 */ 564static int 565vm_pageout_scan() 566{ 567 vm_page_t m, next, nextnext; 568 int page_shortage, addl_page_shortage, maxscan, maxlaunder, pcount; 569 int pages_freed; 570 struct proc *p, *bigproc; 571 vm_offset_t size, bigsize; 572 vm_object_t object; 573 int force_wakeup = 0; 574 int vnodes_skipped = 0; 575 int s; 576 577 /* 578 * Start scanning the inactive queue for pages we can free. We keep 579 * scanning until we have enough free pages or we have scanned through 580 * the entire queue. If we encounter dirty pages, we start cleaning 581 * them. 582 */ 583 584 pages_freed = 0; 585 addl_page_shortage = 0; 586 587 maxlaunder = (cnt.v_inactive_target > MAXLAUNDER) ? 588 MAXLAUNDER : cnt.v_inactive_target; 589rescan0: 590 maxscan = cnt.v_inactive_count; 591 for( m = TAILQ_FIRST(&vm_page_queue_inactive); 592 593 (m != NULL) && (maxscan-- > 0) && 594 ((cnt.v_cache_count + cnt.v_free_count) < 595 (cnt.v_cache_min + cnt.v_free_target)); 596 597 m = next) { 598 599 cnt.v_pdpages++; 600 601 if (m->queue != PQ_INACTIVE) { 602 goto rescan0; 603 } 604 605 next = TAILQ_NEXT(m, pageq); 606 607 if (m->hold_count) { 608 s = splvm(); 609 TAILQ_REMOVE(&vm_page_queue_inactive, m, pageq); 610 TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq); 611 splx(s); 612 addl_page_shortage++; 613 continue; 614 } 615 /* 616 * Dont mess with busy pages, keep in the front of the 617 * queue, most likely are being paged out. 618 */ 619 if (m->busy || (m->flags & PG_BUSY)) { 620 addl_page_shortage++; 621 continue; 622 } 623 624 if (m->object->ref_count == 0) { 625 m->flags &= ~PG_REFERENCED; 626 pmap_clear_reference(VM_PAGE_TO_PHYS(m)); 627 } else if (((m->flags & PG_REFERENCED) == 0) && 628 pmap_ts_referenced(VM_PAGE_TO_PHYS(m))) { 629 vm_page_activate(m); 630 continue; 631 } 632 633 if ((m->flags & PG_REFERENCED) != 0) { 634 m->flags &= ~PG_REFERENCED; 635 pmap_clear_reference(VM_PAGE_TO_PHYS(m)); 636 vm_page_activate(m); 637 continue; 638 } 639 640 if (m->dirty == 0) { 641 vm_page_test_dirty(m); 642 } else if (m->dirty != 0) { 643 m->dirty = VM_PAGE_BITS_ALL; 644 } 645 646 if (m->valid == 0) { 647 vm_page_protect(m, VM_PROT_NONE); 648 vm_page_free(m); 649 cnt.v_dfree++; 650 ++pages_freed; 651 } else if (m->dirty == 0) { 652 vm_page_cache(m); 653 ++pages_freed; 654 } else if (maxlaunder > 0) { 655 int written; 656 struct vnode *vp = NULL; 657 658 object = m->object; 659 if (object->flags & OBJ_DEAD) { 660 s = splvm(); 661 TAILQ_REMOVE(&vm_page_queue_inactive, m, pageq); 662 TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq); 663 splx(s); 664 continue; 665 } 666 667 if (object->type == OBJT_VNODE) { 668 vp = object->handle; 669 if (VOP_ISLOCKED(vp) || vget(vp, 1)) { 670 if ((m->queue == PQ_INACTIVE) && 671 (m->hold_count == 0) && 672 (m->busy == 0) && 673 (m->flags & PG_BUSY) == 0) { 674 s = splvm(); 675 TAILQ_REMOVE(&vm_page_queue_inactive, m, pageq); 676 TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq); 677 splx(s); 678 } 679 if (object->flags & OBJ_MIGHTBEDIRTY) 680 ++vnodes_skipped; 681 continue; 682 } 683 684 /* 685 * The page might have been moved to another queue 686 * during potential blocking in vget() above. 687 */ 688 if (m->queue != PQ_INACTIVE) { 689 if (object->flags & OBJ_MIGHTBEDIRTY) 690 ++vnodes_skipped; 691 vput(vp); 692 continue; 693 } 694 695 /* 696 * The page may have been busied during the blocking in 697 * vput(); We don't move the page back onto the end of 698 * the queue so that statistics are more correct if we don't. 699 */ 700 if (m->busy || (m->flags & PG_BUSY)) { 701 vput(vp); 702 continue; 703 } 704 705 /* 706 * If the page has become held, then skip it 707 */ 708 if (m->hold_count) { 709 s = splvm(); 710 TAILQ_REMOVE(&vm_page_queue_inactive, m, pageq); 711 TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq); 712 splx(s); 713 if (object->flags & OBJ_MIGHTBEDIRTY) 714 ++vnodes_skipped; 715 vput(vp); 716 continue; 717 } 718 } 719 720 /* 721 * If a page is dirty, then it is either being washed 722 * (but not yet cleaned) or it is still in the 723 * laundry. If it is still in the laundry, then we 724 * start the cleaning operation. 725 */ 726 written = vm_pageout_clean(m, 0); 727 728 if (vp) 729 vput(vp); 730 731 maxlaunder -= written; 732 } 733 } 734 735 /* 736 * Compute the page shortage. If we are still very low on memory be 737 * sure that we will move a minimal amount of pages from active to 738 * inactive. 739 */ 740 741 page_shortage = (cnt.v_inactive_target + cnt.v_cache_min) - 742 (cnt.v_free_count + cnt.v_inactive_count + cnt.v_cache_count); 743 if (page_shortage <= 0) { 744 if (pages_freed == 0) { 745 page_shortage = cnt.v_free_min - cnt.v_free_count; 746 } else { 747 page_shortage = 1; 748 } 749 } 750 if (addl_page_shortage) { 751 if (page_shortage < 0) 752 page_shortage = 0; 753 page_shortage += addl_page_shortage; 754 } 755 756 pcount = cnt.v_active_count; 757 m = TAILQ_FIRST(&vm_page_queue_active); 758 nextnext = NULL; 759 while ((m != NULL) && (pcount-- > 0) && (page_shortage > 0)) { 760 int refcount; 761 762 if (m->queue != PQ_ACTIVE) { 763 m = nextnext; 764 if ((m == NULL) || (m->queue != PQ_ACTIVE)) 765 break; 766 } 767 768 next = TAILQ_NEXT(m, pageq); 769 if (next) 770 nextnext = TAILQ_NEXT(next, pageq); 771 else 772 nextnext = NULL; 773 774 /* 775 * Don't deactivate pages that are busy. 776 */ 777 if ((m->busy != 0) || 778 (m->flags & PG_BUSY) || 779 (m->hold_count != 0)) { 780 s = splvm(); 781 TAILQ_REMOVE(&vm_page_queue_active, m, pageq); 782 TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq); 783 splx(s); 784 m = next; 785 continue; 786 } 787 788 /* 789 * The count for pagedaemon pages is done after checking the 790 * page for eligbility... 791 */ 792 cnt.v_pdpages++; 793 794 refcount = 0; 795 if (m->object->ref_count != 0) { 796 if (m->flags & PG_REFERENCED) { 797 refcount += 1; 798 } 799 refcount += pmap_ts_referenced(VM_PAGE_TO_PHYS(m)); 800 if (refcount) { 801 m->act_count += ACT_ADVANCE + refcount; 802 if (m->act_count > ACT_MAX) 803 m->act_count = ACT_MAX; 804 } 805 } 806 807 m->flags &= ~PG_REFERENCED; 808 809 if (refcount && (m->object->ref_count != 0)) { 810 s = splvm(); 811 TAILQ_REMOVE(&vm_page_queue_active, m, pageq); 812 TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq); 813 splx(s); 814 } else { 815 m->act_count -= min(m->act_count, ACT_DECLINE); 816 if (vm_pageout_algorithm_lru || 817 (m->object->ref_count == 0) || (m->act_count == 0)) { 818 --page_shortage; 819 vm_page_protect(m, VM_PROT_NONE); 820 if ((m->dirty == 0) && 821 (m->object->ref_count == 0)) { 822 vm_page_cache(m); 823 } else { 824 vm_page_deactivate(m); 825 } 826 } else { 827 s = splvm(); 828 TAILQ_REMOVE(&vm_page_queue_active, m, pageq); 829 TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq); 830 splx(s); 831 } 832 } 833 m = next; 834 } 835 836 s = splvm(); 837 /* 838 * We try to maintain some *really* free pages, this allows interrupt 839 * code to be guaranteed space. 840 */ 841 while (cnt.v_free_count < cnt.v_free_reserved) { 842 m = TAILQ_FIRST(&vm_page_queue_cache); 843 if (!m) 844 break; 845 vm_page_free(m); 846 cnt.v_dfree++; 847 } 848 splx(s); 849 850 /* 851 * If we didn't get enough free pages, and we have skipped a vnode 852 * in a writeable object, wakeup the sync daemon. And kick swapout 853 * if we did not get enough free pages. 854 */ 855 if ((cnt.v_cache_count + cnt.v_free_count) < 856 (cnt.v_free_target + cnt.v_cache_min) ) { 857 if (vnodes_skipped && 858 (cnt.v_cache_count + cnt.v_free_count) < cnt.v_free_min) { 859 if (!vfs_update_wakeup) { 860 vfs_update_wakeup = 1; 861 wakeup(&vfs_update_wakeup); 862 } 863 } 864#if !defined(NO_SWAPPING) 865 if (vm_swapping_enabled && 866 (cnt.v_free_count + cnt.v_cache_count < cnt.v_free_target)) { 867 vm_req_vmdaemon(); 868 vm_pageout_req_swapout = 1; 869 } 870#endif 871 } 872 873 874 /* 875 * make sure that we have swap space -- if we are low on memory and 876 * swap -- then kill the biggest process. 877 */ 878 if ((vm_swap_size == 0 || swap_pager_full) && 879 ((cnt.v_free_count + cnt.v_cache_count) < cnt.v_free_min)) { 880 bigproc = NULL; 881 bigsize = 0; 882 for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) { 883 /* 884 * if this is a system process, skip it 885 */ 886 if ((p->p_flag & P_SYSTEM) || (p->p_pid == 1) || 887 ((p->p_pid < 48) && (vm_swap_size != 0))) { 888 continue; 889 } 890 /* 891 * if the process is in a non-running type state, 892 * don't touch it. 893 */ 894 if (p->p_stat != SRUN && p->p_stat != SSLEEP) { 895 continue; 896 } 897 /* 898 * get the process size 899 */ 900 size = p->p_vmspace->vm_pmap.pm_stats.resident_count; 901 /* 902 * if the this process is bigger than the biggest one 903 * remember it. 904 */ 905 if (size > bigsize) { 906 bigproc = p; 907 bigsize = size; 908 } 909 } 910 if (bigproc != NULL) { 911 killproc(bigproc, "out of swap space"); 912 bigproc->p_estcpu = 0; 913 bigproc->p_nice = PRIO_MIN; 914 resetpriority(bigproc); 915 wakeup(&cnt.v_free_count); 916 } 917 } 918 return force_wakeup; 919} 920 921static int 922vm_pageout_free_page_calc(count) 923vm_size_t count; 924{ 925 if (count < cnt.v_page_count) 926 return 0; 927 /* 928 * free_reserved needs to include enough for the largest swap pager 929 * structures plus enough for any pv_entry structs when paging. 930 */ 931 if (cnt.v_page_count > 1024) 932 cnt.v_free_min = 4 + (cnt.v_page_count - 1024) / 200; 933 else 934 cnt.v_free_min = 4; 935 cnt.v_pageout_free_min = (2*MAXBSIZE)/PAGE_SIZE + 936 cnt.v_interrupt_free_min; 937 cnt.v_free_reserved = vm_pageout_page_count + 938 cnt.v_pageout_free_min + (count / 768); 939 cnt.v_free_min += cnt.v_free_reserved; 940 return 1; 941} 942 943 944#ifdef unused 945int 946vm_pageout_free_pages(object, add) 947vm_object_t object; 948int add; 949{ 950 return vm_pageout_free_page_calc(object->size); 951} 952#endif 953 954/* 955 * vm_pageout is the high level pageout daemon. 956 */ 957static void 958vm_pageout() 959{ 960 (void) spl0(); 961 962 /* 963 * Initialize some paging parameters. 964 */ 965 966 cnt.v_interrupt_free_min = 2; 967 if (cnt.v_page_count < 2000) 968 vm_pageout_page_count = 8; 969 970 vm_pageout_free_page_calc(cnt.v_page_count); 971 /* 972 * free_reserved needs to include enough for the largest swap pager 973 * structures plus enough for any pv_entry structs when paging. 974 */ 975 cnt.v_free_target = 3 * cnt.v_free_min + cnt.v_free_reserved; 976 977 if (cnt.v_free_count > 1024) { 978 cnt.v_cache_max = (cnt.v_free_count - 1024) / 2; 979 cnt.v_cache_min = (cnt.v_free_count - 1024) / 8; 980 cnt.v_inactive_target = 2*cnt.v_cache_min + 192; 981 } else { 982 cnt.v_cache_min = 0; 983 cnt.v_cache_max = 0; 984 cnt.v_inactive_target = cnt.v_free_count / 4; 985 } 986 987 /* XXX does not really belong here */ 988 if (vm_page_max_wired == 0) 989 vm_page_max_wired = cnt.v_free_count / 3; 990 991 992 swap_pager_swap_init(); 993 /* 994 * The pageout daemon is never done, so loop forever. 995 */ 996 while (TRUE) { 997 int inactive_target; 998 int s = splvm(); 999 if (!vm_pages_needed || 1000 ((cnt.v_free_count + cnt.v_cache_count) > cnt.v_free_min)) { 1001 vm_pages_needed = 0; 1002 tsleep(&vm_pages_needed, PVM, "psleep", 0); 1003 } else if (!vm_pages_needed) { 1004 tsleep(&vm_pages_needed, PVM, "psleep", hz/10); 1005 } 1006 inactive_target = 1007 (cnt.v_page_count - cnt.v_wire_count) / 4; 1008 if (inactive_target < 2*cnt.v_free_min) 1009 inactive_target = 2*cnt.v_free_min; 1010 cnt.v_inactive_target = inactive_target; 1011 if (vm_pages_needed) 1012 cnt.v_pdwakeups++; 1013 vm_pages_needed = 0; 1014 splx(s); 1015 vm_pager_sync(); 1016 vm_pageout_scan(); 1017 vm_pager_sync(); 1018 wakeup(&cnt.v_free_count); 1019 } 1020} 1021 1022#if !defined(NO_SWAPPING) 1023static void 1024vm_req_vmdaemon() 1025{ 1026 static int lastrun = 0; 1027 1028 if ((ticks > (lastrun + hz)) || (ticks < lastrun)) { 1029 wakeup(&vm_daemon_needed); 1030 lastrun = ticks; 1031 } 1032} 1033 1034static void 1035vm_daemon() 1036{ 1037 vm_object_t object; 1038 struct proc *p; 1039 1040 (void) spl0(); 1041 1042 while (TRUE) { 1043 tsleep(&vm_daemon_needed, PUSER, "psleep", 0); 1044 if (vm_pageout_req_swapout) { 1045 swapout_procs(); 1046 vm_pageout_req_swapout = 0; 1047 } 1048 /* 1049 * scan the processes for exceeding their rlimits or if 1050 * process is swapped out -- deactivate pages 1051 */ 1052 1053 for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) { 1054 quad_t limit; 1055 vm_offset_t size; 1056 1057 /* 1058 * if this is a system process or if we have already 1059 * looked at this process, skip it. 1060 */ 1061 if (p->p_flag & (P_SYSTEM | P_WEXIT)) { 1062 continue; 1063 } 1064 /* 1065 * if the process is in a non-running type state, 1066 * don't touch it. 1067 */ 1068 if (p->p_stat != SRUN && p->p_stat != SSLEEP) { 1069 continue; 1070 } 1071 /* 1072 * get a limit 1073 */ 1074 limit = qmin(p->p_rlimit[RLIMIT_RSS].rlim_cur, 1075 p->p_rlimit[RLIMIT_RSS].rlim_max); 1076 1077 /* 1078 * let processes that are swapped out really be 1079 * swapped out set the limit to nothing (will force a 1080 * swap-out.) 1081 */ 1082 if ((p->p_flag & P_INMEM) == 0) 1083 limit = 0; /* XXX */ 1084 1085 size = p->p_vmspace->vm_pmap.pm_stats.resident_count * PAGE_SIZE; 1086 if (limit >= 0 && size >= limit) { 1087 vm_pageout_map_deactivate_pages(&p->p_vmspace->vm_map, 1088 (vm_pindex_t)(limit >> PAGE_SHIFT) ); 1089 } 1090 } 1091 1092 /* 1093 * we remove cached objects that have no RSS... 1094 */ 1095restart: 1096 object = TAILQ_FIRST(&vm_object_cached_list); 1097 while (object) { 1098 /* 1099 * if there are no resident pages -- get rid of the object 1100 */ 1101 if (object->resident_page_count == 0) { 1102 vm_object_reference(object); 1103 pager_cache(object, FALSE); 1104 goto restart; 1105 } 1106 object = TAILQ_NEXT(object, cached_list); 1107 } 1108 } 1109} 1110#endif 1111