vm_pageout.c revision 71572
1/* 2 * Copyright (c) 1991 Regents of the University of California. 3 * All rights reserved. 4 * Copyright (c) 1994 John S. Dyson 5 * All rights reserved. 6 * Copyright (c) 1994 David Greenman 7 * All rights reserved. 8 * 9 * This code is derived from software contributed to Berkeley by 10 * The Mach Operating System project at Carnegie-Mellon University. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. All advertising materials mentioning features or use of this software 21 * must display the following acknowledgement: 22 * This product includes software developed by the University of 23 * California, Berkeley and its contributors. 24 * 4. Neither the name of the University nor the names of its contributors 25 * may be used to endorse or promote products derived from this software 26 * without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 38 * SUCH DAMAGE. 39 * 40 * from: @(#)vm_pageout.c 7.4 (Berkeley) 5/7/91 41 * 42 * 43 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 44 * All rights reserved. 45 * 46 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 47 * 48 * Permission to use, copy, modify and distribute this software and 49 * its documentation is hereby granted, provided that both the copyright 50 * notice and this permission notice appear in all copies of the 51 * software, derivative works or modified versions, and any portions 52 * thereof, and that both notices appear in supporting documentation. 53 * 54 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 55 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 56 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 57 * 58 * Carnegie Mellon requests users of this software to return to 59 * 60 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 61 * School of Computer Science 62 * Carnegie Mellon University 63 * Pittsburgh PA 15213-3890 64 * 65 * any improvements or extensions that they make and grant Carnegie the 66 * rights to redistribute these changes. 67 * 68 * $FreeBSD: head/sys/vm/vm_pageout.c 71572 2001-01-24 11:28:36Z jhb $ 69 */ 70 71/* 72 * The proverbial page-out daemon. 73 */ 74 75#include "opt_vm.h" 76#include <sys/param.h> 77#include <sys/systm.h> 78#include <sys/kernel.h> 79#include <sys/proc.h> 80#include <sys/kthread.h> 81#include <sys/ktr.h> 82#include <sys/resourcevar.h> 83#include <sys/signalvar.h> 84#include <sys/vnode.h> 85#include <sys/vmmeter.h> 86#include <sys/sysctl.h> 87 88#include <vm/vm.h> 89#include <vm/vm_param.h> 90#include <sys/lock.h> 91#include <vm/vm_object.h> 92#include <vm/vm_page.h> 93#include <vm/vm_map.h> 94#include <vm/vm_pageout.h> 95#include <vm/vm_pager.h> 96#include <vm/vm_zone.h> 97#include <vm/swap_pager.h> 98#include <vm/vm_extern.h> 99 100#include <machine/mutex.h> 101 102/* 103 * System initialization 104 */ 105 106/* the kernel process "vm_pageout"*/ 107static void vm_pageout __P((void)); 108static int vm_pageout_clean __P((vm_page_t)); 109static void vm_pageout_scan __P((int pass)); 110static int vm_pageout_free_page_calc __P((vm_size_t count)); 111struct proc *pageproc; 112 113static struct kproc_desc page_kp = { 114 "pagedaemon", 115 vm_pageout, 116 &pageproc 117}; 118SYSINIT(pagedaemon, SI_SUB_KTHREAD_PAGE, SI_ORDER_FIRST, kproc_start, &page_kp) 119 120#if !defined(NO_SWAPPING) 121/* the kernel process "vm_daemon"*/ 122static void vm_daemon __P((void)); 123static struct proc *vmproc; 124 125static struct kproc_desc vm_kp = { 126 "vmdaemon", 127 vm_daemon, 128 &vmproc 129}; 130SYSINIT(vmdaemon, SI_SUB_KTHREAD_VM, SI_ORDER_FIRST, kproc_start, &vm_kp) 131#endif 132 133 134int vm_pages_needed=0; /* Event on which pageout daemon sleeps */ 135int vm_pageout_deficit=0; /* Estimated number of pages deficit */ 136int vm_pageout_pages_needed=0; /* flag saying that the pageout daemon needs pages */ 137 138#if !defined(NO_SWAPPING) 139static int vm_pageout_req_swapout; /* XXX */ 140static int vm_daemon_needed; 141#endif 142extern int vm_swap_size; 143static int vm_max_launder = 32; 144static int vm_pageout_stats_max=0, vm_pageout_stats_interval = 0; 145static int vm_pageout_full_stats_interval = 0; 146static int vm_pageout_stats_free_max=0, vm_pageout_algorithm=0; 147static int defer_swap_pageouts=0; 148static int disable_swap_pageouts=0; 149 150#if defined(NO_SWAPPING) 151static int vm_swap_enabled=0; 152static int vm_swap_idle_enabled=0; 153#else 154static int vm_swap_enabled=1; 155static int vm_swap_idle_enabled=0; 156#endif 157 158SYSCTL_INT(_vm, VM_PAGEOUT_ALGORITHM, pageout_algorithm, 159 CTLFLAG_RW, &vm_pageout_algorithm, 0, "LRU page mgmt"); 160 161SYSCTL_INT(_vm, OID_AUTO, max_launder, 162 CTLFLAG_RW, &vm_max_launder, 0, "Limit dirty flushes in pageout"); 163 164SYSCTL_INT(_vm, OID_AUTO, pageout_stats_max, 165 CTLFLAG_RW, &vm_pageout_stats_max, 0, "Max pageout stats scan length"); 166 167SYSCTL_INT(_vm, OID_AUTO, pageout_full_stats_interval, 168 CTLFLAG_RW, &vm_pageout_full_stats_interval, 0, "Interval for full stats scan"); 169 170SYSCTL_INT(_vm, OID_AUTO, pageout_stats_interval, 171 CTLFLAG_RW, &vm_pageout_stats_interval, 0, "Interval for partial stats scan"); 172 173SYSCTL_INT(_vm, OID_AUTO, pageout_stats_free_max, 174 CTLFLAG_RW, &vm_pageout_stats_free_max, 0, "Not implemented"); 175 176#if defined(NO_SWAPPING) 177SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled, 178 CTLFLAG_RD, &vm_swap_enabled, 0, ""); 179SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled, 180 CTLFLAG_RD, &vm_swap_idle_enabled, 0, ""); 181#else 182SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled, 183 CTLFLAG_RW, &vm_swap_enabled, 0, "Enable entire process swapout"); 184SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled, 185 CTLFLAG_RW, &vm_swap_idle_enabled, 0, "Allow swapout on idle criteria"); 186#endif 187 188SYSCTL_INT(_vm, OID_AUTO, defer_swapspace_pageouts, 189 CTLFLAG_RW, &defer_swap_pageouts, 0, "Give preference to dirty pages in mem"); 190 191SYSCTL_INT(_vm, OID_AUTO, disable_swapspace_pageouts, 192 CTLFLAG_RW, &disable_swap_pageouts, 0, "Disallow swapout of dirty pages"); 193 194#define VM_PAGEOUT_PAGE_COUNT 16 195int vm_pageout_page_count = VM_PAGEOUT_PAGE_COUNT; 196 197int vm_page_max_wired; /* XXX max # of wired pages system-wide */ 198 199#if !defined(NO_SWAPPING) 200typedef void freeer_fcn_t __P((vm_map_t, vm_object_t, vm_pindex_t, int)); 201static void vm_pageout_map_deactivate_pages __P((vm_map_t, vm_pindex_t)); 202static freeer_fcn_t vm_pageout_object_deactivate_pages; 203static void vm_req_vmdaemon __P((void)); 204#endif 205static void vm_pageout_page_stats(void); 206 207/* 208 * vm_pageout_clean: 209 * 210 * Clean the page and remove it from the laundry. 211 * 212 * We set the busy bit to cause potential page faults on this page to 213 * block. Note the careful timing, however, the busy bit isn't set till 214 * late and we cannot do anything that will mess with the page. 215 */ 216 217static int 218vm_pageout_clean(m) 219 vm_page_t m; 220{ 221 register vm_object_t object; 222 vm_page_t mc[2*vm_pageout_page_count]; 223 int pageout_count; 224 int ib, is, page_base; 225 vm_pindex_t pindex = m->pindex; 226 227 object = m->object; 228 229 /* 230 * It doesn't cost us anything to pageout OBJT_DEFAULT or OBJT_SWAP 231 * with the new swapper, but we could have serious problems paging 232 * out other object types if there is insufficient memory. 233 * 234 * Unfortunately, checking free memory here is far too late, so the 235 * check has been moved up a procedural level. 236 */ 237 238 /* 239 * Don't mess with the page if it's busy, held, or special 240 */ 241 if ((m->hold_count != 0) || 242 ((m->busy != 0) || (m->flags & (PG_BUSY|PG_UNMANAGED)))) { 243 return 0; 244 } 245 246 mc[vm_pageout_page_count] = m; 247 pageout_count = 1; 248 page_base = vm_pageout_page_count; 249 ib = 1; 250 is = 1; 251 252 /* 253 * Scan object for clusterable pages. 254 * 255 * We can cluster ONLY if: ->> the page is NOT 256 * clean, wired, busy, held, or mapped into a 257 * buffer, and one of the following: 258 * 1) The page is inactive, or a seldom used 259 * active page. 260 * -or- 261 * 2) we force the issue. 262 * 263 * During heavy mmap/modification loads the pageout 264 * daemon can really fragment the underlying file 265 * due to flushing pages out of order and not trying 266 * align the clusters (which leave sporatic out-of-order 267 * holes). To solve this problem we do the reverse scan 268 * first and attempt to align our cluster, then do a 269 * forward scan if room remains. 270 */ 271 272more: 273 while (ib && pageout_count < vm_pageout_page_count) { 274 vm_page_t p; 275 276 if (ib > pindex) { 277 ib = 0; 278 break; 279 } 280 281 if ((p = vm_page_lookup(object, pindex - ib)) == NULL) { 282 ib = 0; 283 break; 284 } 285 if (((p->queue - p->pc) == PQ_CACHE) || 286 (p->flags & (PG_BUSY|PG_UNMANAGED)) || p->busy) { 287 ib = 0; 288 break; 289 } 290 vm_page_test_dirty(p); 291 if ((p->dirty & p->valid) == 0 || 292 p->queue != PQ_INACTIVE || 293 p->wire_count != 0 || 294 p->hold_count != 0) { 295 ib = 0; 296 break; 297 } 298 mc[--page_base] = p; 299 ++pageout_count; 300 ++ib; 301 /* 302 * alignment boundry, stop here and switch directions. Do 303 * not clear ib. 304 */ 305 if ((pindex - (ib - 1)) % vm_pageout_page_count == 0) 306 break; 307 } 308 309 while (pageout_count < vm_pageout_page_count && 310 pindex + is < object->size) { 311 vm_page_t p; 312 313 if ((p = vm_page_lookup(object, pindex + is)) == NULL) 314 break; 315 if (((p->queue - p->pc) == PQ_CACHE) || 316 (p->flags & (PG_BUSY|PG_UNMANAGED)) || p->busy) { 317 break; 318 } 319 vm_page_test_dirty(p); 320 if ((p->dirty & p->valid) == 0 || 321 p->queue != PQ_INACTIVE || 322 p->wire_count != 0 || 323 p->hold_count != 0) { 324 break; 325 } 326 mc[page_base + pageout_count] = p; 327 ++pageout_count; 328 ++is; 329 } 330 331 /* 332 * If we exhausted our forward scan, continue with the reverse scan 333 * when possible, even past a page boundry. This catches boundry 334 * conditions. 335 */ 336 if (ib && pageout_count < vm_pageout_page_count) 337 goto more; 338 339 /* 340 * we allow reads during pageouts... 341 */ 342 return vm_pageout_flush(&mc[page_base], pageout_count, 0); 343} 344 345/* 346 * vm_pageout_flush() - launder the given pages 347 * 348 * The given pages are laundered. Note that we setup for the start of 349 * I/O ( i.e. busy the page ), mark it read-only, and bump the object 350 * reference count all in here rather then in the parent. If we want 351 * the parent to do more sophisticated things we may have to change 352 * the ordering. 353 */ 354 355int 356vm_pageout_flush(mc, count, flags) 357 vm_page_t *mc; 358 int count; 359 int flags; 360{ 361 register vm_object_t object; 362 int pageout_status[count]; 363 int numpagedout = 0; 364 int i; 365 366 /* 367 * Initiate I/O. Bump the vm_page_t->busy counter and 368 * mark the pages read-only. 369 * 370 * We do not have to fixup the clean/dirty bits here... we can 371 * allow the pager to do it after the I/O completes. 372 * 373 * NOTE! mc[i]->dirty may be partial or fragmented due to an 374 * edge case with file fragments. 375 */ 376 377 for (i = 0; i < count; i++) { 378 KASSERT(mc[i]->valid == VM_PAGE_BITS_ALL, ("vm_pageout_flush page %p index %d/%d: partially invalid page", mc[i], i, count)); 379 vm_page_io_start(mc[i]); 380 vm_page_protect(mc[i], VM_PROT_READ); 381 } 382 383 object = mc[0]->object; 384 vm_object_pip_add(object, count); 385 386 vm_pager_put_pages(object, mc, count, 387 (flags | ((object == kernel_object) ? OBJPC_SYNC : 0)), 388 pageout_status); 389 390 for (i = 0; i < count; i++) { 391 vm_page_t mt = mc[i]; 392 393 switch (pageout_status[i]) { 394 case VM_PAGER_OK: 395 numpagedout++; 396 break; 397 case VM_PAGER_PEND: 398 numpagedout++; 399 break; 400 case VM_PAGER_BAD: 401 /* 402 * Page outside of range of object. Right now we 403 * essentially lose the changes by pretending it 404 * worked. 405 */ 406 pmap_clear_modify(mt); 407 vm_page_undirty(mt); 408 break; 409 case VM_PAGER_ERROR: 410 case VM_PAGER_FAIL: 411 /* 412 * If page couldn't be paged out, then reactivate the 413 * page so it doesn't clog the inactive list. (We 414 * will try paging out it again later). 415 */ 416 vm_page_activate(mt); 417 break; 418 case VM_PAGER_AGAIN: 419 break; 420 } 421 422 /* 423 * If the operation is still going, leave the page busy to 424 * block all other accesses. Also, leave the paging in 425 * progress indicator set so that we don't attempt an object 426 * collapse. 427 */ 428 if (pageout_status[i] != VM_PAGER_PEND) { 429 vm_object_pip_wakeup(object); 430 vm_page_io_finish(mt); 431 if (!vm_page_count_severe() || !vm_page_try_to_cache(mt)) 432 vm_page_protect(mt, VM_PROT_READ); 433 } 434 } 435 return numpagedout; 436} 437 438#if !defined(NO_SWAPPING) 439/* 440 * vm_pageout_object_deactivate_pages 441 * 442 * deactivate enough pages to satisfy the inactive target 443 * requirements or if vm_page_proc_limit is set, then 444 * deactivate all of the pages in the object and its 445 * backing_objects. 446 * 447 * The object and map must be locked. 448 */ 449static void 450vm_pageout_object_deactivate_pages(map, object, desired, map_remove_only) 451 vm_map_t map; 452 vm_object_t object; 453 vm_pindex_t desired; 454 int map_remove_only; 455{ 456 register vm_page_t p, next; 457 int rcount; 458 int remove_mode; 459 int s; 460 461 if (object->type == OBJT_DEVICE || object->type == OBJT_PHYS) 462 return; 463 464 while (object) { 465 if (pmap_resident_count(vm_map_pmap(map)) <= desired) 466 return; 467 if (object->paging_in_progress) 468 return; 469 470 remove_mode = map_remove_only; 471 if (object->shadow_count > 1) 472 remove_mode = 1; 473 /* 474 * scan the objects entire memory queue 475 */ 476 rcount = object->resident_page_count; 477 p = TAILQ_FIRST(&object->memq); 478 while (p && (rcount-- > 0)) { 479 int actcount; 480 if (pmap_resident_count(vm_map_pmap(map)) <= desired) 481 return; 482 next = TAILQ_NEXT(p, listq); 483 cnt.v_pdpages++; 484 if (p->wire_count != 0 || 485 p->hold_count != 0 || 486 p->busy != 0 || 487 (p->flags & (PG_BUSY|PG_UNMANAGED)) || 488 !pmap_page_exists(vm_map_pmap(map), p)) { 489 p = next; 490 continue; 491 } 492 493 actcount = pmap_ts_referenced(p); 494 if (actcount) { 495 vm_page_flag_set(p, PG_REFERENCED); 496 } else if (p->flags & PG_REFERENCED) { 497 actcount = 1; 498 } 499 500 if ((p->queue != PQ_ACTIVE) && 501 (p->flags & PG_REFERENCED)) { 502 vm_page_activate(p); 503 p->act_count += actcount; 504 vm_page_flag_clear(p, PG_REFERENCED); 505 } else if (p->queue == PQ_ACTIVE) { 506 if ((p->flags & PG_REFERENCED) == 0) { 507 p->act_count -= min(p->act_count, ACT_DECLINE); 508 if (!remove_mode && (vm_pageout_algorithm || (p->act_count == 0))) { 509 vm_page_protect(p, VM_PROT_NONE); 510 vm_page_deactivate(p); 511 } else { 512 s = splvm(); 513 TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE].pl, p, pageq); 514 TAILQ_INSERT_TAIL(&vm_page_queues[PQ_ACTIVE].pl, p, pageq); 515 splx(s); 516 } 517 } else { 518 vm_page_activate(p); 519 vm_page_flag_clear(p, PG_REFERENCED); 520 if (p->act_count < (ACT_MAX - ACT_ADVANCE)) 521 p->act_count += ACT_ADVANCE; 522 s = splvm(); 523 TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE].pl, p, pageq); 524 TAILQ_INSERT_TAIL(&vm_page_queues[PQ_ACTIVE].pl, p, pageq); 525 splx(s); 526 } 527 } else if (p->queue == PQ_INACTIVE) { 528 vm_page_protect(p, VM_PROT_NONE); 529 } 530 p = next; 531 } 532 object = object->backing_object; 533 } 534 return; 535} 536 537/* 538 * deactivate some number of pages in a map, try to do it fairly, but 539 * that is really hard to do. 540 */ 541static void 542vm_pageout_map_deactivate_pages(map, desired) 543 vm_map_t map; 544 vm_pindex_t desired; 545{ 546 vm_map_entry_t tmpe; 547 vm_object_t obj, bigobj; 548 549 if (lockmgr(&map->lock, LK_EXCLUSIVE | LK_NOWAIT, (void *)0, curproc)) { 550 return; 551 } 552 553 bigobj = NULL; 554 555 /* 556 * first, search out the biggest object, and try to free pages from 557 * that. 558 */ 559 tmpe = map->header.next; 560 while (tmpe != &map->header) { 561 if ((tmpe->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { 562 obj = tmpe->object.vm_object; 563 if ((obj != NULL) && (obj->shadow_count <= 1) && 564 ((bigobj == NULL) || 565 (bigobj->resident_page_count < obj->resident_page_count))) { 566 bigobj = obj; 567 } 568 } 569 tmpe = tmpe->next; 570 } 571 572 if (bigobj) 573 vm_pageout_object_deactivate_pages(map, bigobj, desired, 0); 574 575 /* 576 * Next, hunt around for other pages to deactivate. We actually 577 * do this search sort of wrong -- .text first is not the best idea. 578 */ 579 tmpe = map->header.next; 580 while (tmpe != &map->header) { 581 if (pmap_resident_count(vm_map_pmap(map)) <= desired) 582 break; 583 if ((tmpe->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { 584 obj = tmpe->object.vm_object; 585 if (obj) 586 vm_pageout_object_deactivate_pages(map, obj, desired, 0); 587 } 588 tmpe = tmpe->next; 589 }; 590 591 /* 592 * Remove all mappings if a process is swapped out, this will free page 593 * table pages. 594 */ 595 if (desired == 0) 596 pmap_remove(vm_map_pmap(map), 597 VM_MIN_ADDRESS, VM_MAXUSER_ADDRESS); 598 vm_map_unlock(map); 599 return; 600} 601#endif 602 603/* 604 * Don't try to be fancy - being fancy can lead to VOP_LOCK's and therefore 605 * to vnode deadlocks. We only do it for OBJT_DEFAULT and OBJT_SWAP objects 606 * which we know can be trivially freed. 607 */ 608 609void 610vm_pageout_page_free(vm_page_t m) { 611 vm_object_t object = m->object; 612 int type = object->type; 613 614 if (type == OBJT_SWAP || type == OBJT_DEFAULT) 615 vm_object_reference(object); 616 vm_page_busy(m); 617 vm_page_protect(m, VM_PROT_NONE); 618 vm_page_free(m); 619 if (type == OBJT_SWAP || type == OBJT_DEFAULT) 620 vm_object_deallocate(object); 621} 622 623/* 624 * vm_pageout_scan does the dirty work for the pageout daemon. 625 */ 626static void 627vm_pageout_scan(int pass) 628{ 629 vm_page_t m, next; 630 struct vm_page marker; 631 int save_page_shortage; 632 int save_inactive_count; 633 int page_shortage, maxscan, pcount; 634 int addl_page_shortage, addl_page_shortage_init; 635 struct proc *p, *bigproc; 636 vm_offset_t size, bigsize; 637 vm_object_t object; 638 int actcount; 639 int vnodes_skipped = 0; 640 int maxlaunder; 641 int s; 642 643 /* 644 * Do whatever cleanup that the pmap code can. 645 */ 646 pmap_collect(); 647 648 addl_page_shortage_init = vm_pageout_deficit; 649 vm_pageout_deficit = 0; 650 651 /* 652 * Calculate the number of pages we want to either free or move 653 * to the cache. 654 */ 655 page_shortage = vm_paging_target() + addl_page_shortage_init; 656 save_page_shortage = page_shortage; 657 save_inactive_count = cnt.v_inactive_count; 658 659 /* 660 * Initialize our marker 661 */ 662 bzero(&marker, sizeof(marker)); 663 marker.flags = PG_BUSY | PG_FICTITIOUS | PG_MARKER; 664 marker.queue = PQ_INACTIVE; 665 marker.wire_count = 1; 666 667 /* 668 * Start scanning the inactive queue for pages we can move to the 669 * cache or free. The scan will stop when the target is reached or 670 * we have scanned the entire inactive queue. Note that m->act_count 671 * is not used to form decisions for the inactive queue, only for the 672 * active queue. 673 * 674 * maxlaunder limits the number of dirty pages we flush per scan. 675 * For most systems a smaller value (16 or 32) is more robust under 676 * extreme memory and disk pressure because any unnecessary writes 677 * to disk can result in extreme performance degredation. However, 678 * systems with excessive dirty pages (especially when MAP_NOSYNC is 679 * used) will die horribly with limited laundering. If the pageout 680 * daemon cannot clean enough pages in the first pass, we let it go 681 * all out in succeeding passes. 682 */ 683 684 if ((maxlaunder = vm_max_launder) <= 1) 685 maxlaunder = 1; 686 if (pass) 687 maxlaunder = 10000; 688 689rescan0: 690 addl_page_shortage = addl_page_shortage_init; 691 maxscan = cnt.v_inactive_count; 692 for (m = TAILQ_FIRST(&vm_page_queues[PQ_INACTIVE].pl); 693 m != NULL && maxscan-- > 0 && page_shortage > 0; 694 m = next) { 695 696 cnt.v_pdpages++; 697 698 if (m->queue != PQ_INACTIVE) { 699 goto rescan0; 700 } 701 702 next = TAILQ_NEXT(m, pageq); 703 704 /* 705 * skip marker pages 706 */ 707 if (m->flags & PG_MARKER) 708 continue; 709 710 if (m->hold_count) { 711 s = splvm(); 712 TAILQ_REMOVE(&vm_page_queues[PQ_INACTIVE].pl, m, pageq); 713 TAILQ_INSERT_TAIL(&vm_page_queues[PQ_INACTIVE].pl, m, pageq); 714 splx(s); 715 addl_page_shortage++; 716 continue; 717 } 718 /* 719 * Dont mess with busy pages, keep in the front of the 720 * queue, most likely are being paged out. 721 */ 722 if (m->busy || (m->flags & PG_BUSY)) { 723 addl_page_shortage++; 724 continue; 725 } 726 727 /* 728 * If the object is not being used, we ignore previous 729 * references. 730 */ 731 if (m->object->ref_count == 0) { 732 vm_page_flag_clear(m, PG_REFERENCED); 733 pmap_clear_reference(m); 734 735 /* 736 * Otherwise, if the page has been referenced while in the 737 * inactive queue, we bump the "activation count" upwards, 738 * making it less likely that the page will be added back to 739 * the inactive queue prematurely again. Here we check the 740 * page tables (or emulated bits, if any), given the upper 741 * level VM system not knowing anything about existing 742 * references. 743 */ 744 } else if (((m->flags & PG_REFERENCED) == 0) && 745 (actcount = pmap_ts_referenced(m))) { 746 vm_page_activate(m); 747 m->act_count += (actcount + ACT_ADVANCE); 748 continue; 749 } 750 751 /* 752 * If the upper level VM system knows about any page 753 * references, we activate the page. We also set the 754 * "activation count" higher than normal so that we will less 755 * likely place pages back onto the inactive queue again. 756 */ 757 if ((m->flags & PG_REFERENCED) != 0) { 758 vm_page_flag_clear(m, PG_REFERENCED); 759 actcount = pmap_ts_referenced(m); 760 vm_page_activate(m); 761 m->act_count += (actcount + ACT_ADVANCE + 1); 762 continue; 763 } 764 765 /* 766 * If the upper level VM system doesn't know anything about 767 * the page being dirty, we have to check for it again. As 768 * far as the VM code knows, any partially dirty pages are 769 * fully dirty. 770 */ 771 if (m->dirty == 0) { 772 vm_page_test_dirty(m); 773 } else { 774 vm_page_dirty(m); 775 } 776 777 /* 778 * Invalid pages can be easily freed 779 */ 780 if (m->valid == 0) { 781 vm_pageout_page_free(m); 782 cnt.v_dfree++; 783 --page_shortage; 784 785 /* 786 * Clean pages can be placed onto the cache queue. This 787 * effectively frees them. 788 */ 789 } else if (m->dirty == 0) { 790 vm_page_cache(m); 791 --page_shortage; 792 } else if ((m->flags & PG_WINATCFLS) == 0 && pass == 0) { 793 /* 794 * Dirty pages need to be paged out, but flushing 795 * a page is extremely expensive verses freeing 796 * a clean page. Rather then artificially limiting 797 * the number of pages we can flush, we instead give 798 * dirty pages extra priority on the inactive queue 799 * by forcing them to be cycled through the queue 800 * twice before being flushed, after which the 801 * (now clean) page will cycle through once more 802 * before being freed. This significantly extends 803 * the thrash point for a heavily loaded machine. 804 */ 805 s = splvm(); 806 vm_page_flag_set(m, PG_WINATCFLS); 807 TAILQ_REMOVE(&vm_page_queues[PQ_INACTIVE].pl, m, pageq); 808 TAILQ_INSERT_TAIL(&vm_page_queues[PQ_INACTIVE].pl, m, pageq); 809 splx(s); 810 } else if (maxlaunder > 0) { 811 /* 812 * We always want to try to flush some dirty pages if 813 * we encounter them, to keep the system stable. 814 * Normally this number is small, but under extreme 815 * pressure where there are insufficient clean pages 816 * on the inactive queue, we may have to go all out. 817 */ 818 int swap_pageouts_ok; 819 struct vnode *vp = NULL; 820 struct mount *mp; 821 822 object = m->object; 823 824 if ((object->type != OBJT_SWAP) && (object->type != OBJT_DEFAULT)) { 825 swap_pageouts_ok = 1; 826 } else { 827 swap_pageouts_ok = !(defer_swap_pageouts || disable_swap_pageouts); 828 swap_pageouts_ok |= (!disable_swap_pageouts && defer_swap_pageouts && 829 vm_page_count_min()); 830 831 } 832 833 /* 834 * We don't bother paging objects that are "dead". 835 * Those objects are in a "rundown" state. 836 */ 837 if (!swap_pageouts_ok || (object->flags & OBJ_DEAD)) { 838 s = splvm(); 839 TAILQ_REMOVE(&vm_page_queues[PQ_INACTIVE].pl, m, pageq); 840 TAILQ_INSERT_TAIL(&vm_page_queues[PQ_INACTIVE].pl, m, pageq); 841 splx(s); 842 continue; 843 } 844 845 /* 846 * The object is already known NOT to be dead. It 847 * is possible for the vget() to block the whole 848 * pageout daemon, but the new low-memory handling 849 * code should prevent it. 850 * 851 * The previous code skipped locked vnodes and, worse, 852 * reordered pages in the queue. This results in 853 * completely non-deterministic operation and, on a 854 * busy system, can lead to extremely non-optimal 855 * pageouts. For example, it can cause clean pages 856 * to be freed and dirty pages to be moved to the end 857 * of the queue. Since dirty pages are also moved to 858 * the end of the queue once-cleaned, this gives 859 * way too large a weighting to defering the freeing 860 * of dirty pages. 861 * 862 * XXX we need to be able to apply a timeout to the 863 * vget() lock attempt. 864 */ 865 866 if (object->type == OBJT_VNODE) { 867 vp = object->handle; 868 869 mp = NULL; 870 if (vp->v_type == VREG) 871 vn_start_write(vp, &mp, V_NOWAIT); 872 if (vget(vp, LK_EXCLUSIVE|LK_NOOBJ, curproc)) { 873 vn_finished_write(mp); 874 if (object->flags & OBJ_MIGHTBEDIRTY) 875 vnodes_skipped++; 876 continue; 877 } 878 879 /* 880 * The page might have been moved to another 881 * queue during potential blocking in vget() 882 * above. The page might have been freed and 883 * reused for another vnode. The object might 884 * have been reused for another vnode. 885 */ 886 if (m->queue != PQ_INACTIVE || 887 m->object != object || 888 object->handle != vp) { 889 if (object->flags & OBJ_MIGHTBEDIRTY) 890 vnodes_skipped++; 891 vput(vp); 892 vn_finished_write(mp); 893 continue; 894 } 895 896 /* 897 * The page may have been busied during the 898 * blocking in vput(); We don't move the 899 * page back onto the end of the queue so that 900 * statistics are more correct if we don't. 901 */ 902 if (m->busy || (m->flags & PG_BUSY)) { 903 vput(vp); 904 vn_finished_write(mp); 905 continue; 906 } 907 908 /* 909 * If the page has become held, then skip it 910 */ 911 if (m->hold_count) { 912 s = splvm(); 913 TAILQ_REMOVE(&vm_page_queues[PQ_INACTIVE].pl, m, pageq); 914 TAILQ_INSERT_TAIL(&vm_page_queues[PQ_INACTIVE].pl, m, pageq); 915 splx(s); 916 if (object->flags & OBJ_MIGHTBEDIRTY) 917 vnodes_skipped++; 918 vput(vp); 919 vn_finished_write(mp); 920 continue; 921 } 922 } 923 924 /* 925 * If a page is dirty, then it is either being washed 926 * (but not yet cleaned) or it is still in the 927 * laundry. If it is still in the laundry, then we 928 * start the cleaning operation. 929 * 930 * This operation may cluster, invalidating the 'next' 931 * pointer. To prevent an inordinate number of 932 * restarts we use our marker to remember our place. 933 * 934 * decrement page_shortage on success to account for 935 * the (future) cleaned page. Otherwise we could wind 936 * up laundering or cleaning too many pages. 937 */ 938 s = splvm(); 939 TAILQ_INSERT_AFTER(&vm_page_queues[PQ_INACTIVE].pl, m, &marker, pageq); 940 splx(s); 941 if (vm_pageout_clean(m) != 0) { 942 --page_shortage; 943 --maxlaunder; 944 } 945 s = splvm(); 946 next = TAILQ_NEXT(&marker, pageq); 947 TAILQ_REMOVE(&vm_page_queues[PQ_INACTIVE].pl, &marker, pageq); 948 splx(s); 949 if (vp) { 950 vput(vp); 951 vn_finished_write(mp); 952 } 953 } 954 } 955 956 /* 957 * Compute the number of pages we want to try to move from the 958 * active queue to the inactive queue. 959 */ 960 page_shortage = vm_paging_target() + 961 cnt.v_inactive_target - cnt.v_inactive_count; 962 page_shortage += addl_page_shortage; 963 964 /* 965 * Scan the active queue for things we can deactivate. We nominally 966 * track the per-page activity counter and use it to locate 967 * deactivation candidates. 968 */ 969 970 pcount = cnt.v_active_count; 971 m = TAILQ_FIRST(&vm_page_queues[PQ_ACTIVE].pl); 972 973 while ((m != NULL) && (pcount-- > 0) && (page_shortage > 0)) { 974 975 /* 976 * This is a consistency check, and should likely be a panic 977 * or warning. 978 */ 979 if (m->queue != PQ_ACTIVE) { 980 break; 981 } 982 983 next = TAILQ_NEXT(m, pageq); 984 /* 985 * Don't deactivate pages that are busy. 986 */ 987 if ((m->busy != 0) || 988 (m->flags & PG_BUSY) || 989 (m->hold_count != 0)) { 990 s = splvm(); 991 TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE].pl, m, pageq); 992 TAILQ_INSERT_TAIL(&vm_page_queues[PQ_ACTIVE].pl, m, pageq); 993 splx(s); 994 m = next; 995 continue; 996 } 997 998 /* 999 * The count for pagedaemon pages is done after checking the 1000 * page for eligibility... 1001 */ 1002 cnt.v_pdpages++; 1003 1004 /* 1005 * Check to see "how much" the page has been used. 1006 */ 1007 actcount = 0; 1008 if (m->object->ref_count != 0) { 1009 if (m->flags & PG_REFERENCED) { 1010 actcount += 1; 1011 } 1012 actcount += pmap_ts_referenced(m); 1013 if (actcount) { 1014 m->act_count += ACT_ADVANCE + actcount; 1015 if (m->act_count > ACT_MAX) 1016 m->act_count = ACT_MAX; 1017 } 1018 } 1019 1020 /* 1021 * Since we have "tested" this bit, we need to clear it now. 1022 */ 1023 vm_page_flag_clear(m, PG_REFERENCED); 1024 1025 /* 1026 * Only if an object is currently being used, do we use the 1027 * page activation count stats. 1028 */ 1029 if (actcount && (m->object->ref_count != 0)) { 1030 s = splvm(); 1031 TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE].pl, m, pageq); 1032 TAILQ_INSERT_TAIL(&vm_page_queues[PQ_ACTIVE].pl, m, pageq); 1033 splx(s); 1034 } else { 1035 m->act_count -= min(m->act_count, ACT_DECLINE); 1036 if (vm_pageout_algorithm || 1037 m->object->ref_count == 0 || 1038 m->act_count == 0) { 1039 page_shortage--; 1040 if (m->object->ref_count == 0) { 1041 vm_page_protect(m, VM_PROT_NONE); 1042 if (m->dirty == 0) 1043 vm_page_cache(m); 1044 else 1045 vm_page_deactivate(m); 1046 } else { 1047 vm_page_deactivate(m); 1048 } 1049 } else { 1050 s = splvm(); 1051 TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE].pl, m, pageq); 1052 TAILQ_INSERT_TAIL(&vm_page_queues[PQ_ACTIVE].pl, m, pageq); 1053 splx(s); 1054 } 1055 } 1056 m = next; 1057 } 1058 1059 s = splvm(); 1060 1061 /* 1062 * We try to maintain some *really* free pages, this allows interrupt 1063 * code to be guaranteed space. Since both cache and free queues 1064 * are considered basically 'free', moving pages from cache to free 1065 * does not effect other calculations. 1066 */ 1067 1068 while (cnt.v_free_count < cnt.v_free_reserved) { 1069 static int cache_rover = 0; 1070 m = vm_page_list_find(PQ_CACHE, cache_rover, FALSE); 1071 if (!m) 1072 break; 1073 if ((m->flags & (PG_BUSY|PG_UNMANAGED)) || 1074 m->busy || 1075 m->hold_count || 1076 m->wire_count) { 1077#ifdef INVARIANTS 1078 printf("Warning: busy page %p found in cache\n", m); 1079#endif 1080 vm_page_deactivate(m); 1081 continue; 1082 } 1083 cache_rover = (cache_rover + PQ_PRIME2) & PQ_L2_MASK; 1084 vm_pageout_page_free(m); 1085 cnt.v_dfree++; 1086 } 1087 splx(s); 1088 1089#if !defined(NO_SWAPPING) 1090 /* 1091 * Idle process swapout -- run once per second. 1092 */ 1093 if (vm_swap_idle_enabled) { 1094 static long lsec; 1095 if (time_second != lsec) { 1096 vm_pageout_req_swapout |= VM_SWAP_IDLE; 1097 vm_req_vmdaemon(); 1098 lsec = time_second; 1099 } 1100 } 1101#endif 1102 1103 /* 1104 * If we didn't get enough free pages, and we have skipped a vnode 1105 * in a writeable object, wakeup the sync daemon. And kick swapout 1106 * if we did not get enough free pages. 1107 */ 1108 if (vm_paging_target() > 0) { 1109 if (vnodes_skipped && vm_page_count_min()) 1110 (void) speedup_syncer(); 1111#if !defined(NO_SWAPPING) 1112 if (vm_swap_enabled && vm_page_count_target()) { 1113 vm_req_vmdaemon(); 1114 vm_pageout_req_swapout |= VM_SWAP_NORMAL; 1115 } 1116#endif 1117 } 1118 1119 /* 1120 * make sure that we have swap space -- if we are low on memory and 1121 * swap -- then kill the biggest process. 1122 */ 1123 if ((vm_swap_size < 64 || swap_pager_full) && vm_page_count_min()) { 1124 bigproc = NULL; 1125 bigsize = 0; 1126 ALLPROC_LOCK(AP_SHARED); 1127 LIST_FOREACH(p, &allproc, p_list) { 1128 /* 1129 * if this is a system process, skip it 1130 */ 1131 PROC_LOCK(p); 1132 if ((p->p_flag & P_SYSTEM) || (p->p_lock > 0) || 1133 (p->p_pid == 1) || 1134 ((p->p_pid < 48) && (vm_swap_size != 0))) { 1135 PROC_UNLOCK(p); 1136 continue; 1137 } 1138 PROC_UNLOCK(p); 1139 /* 1140 * if the process is in a non-running type state, 1141 * don't touch it. 1142 */ 1143 mtx_enter(&sched_lock, MTX_SPIN); 1144 if (p->p_stat != SRUN && p->p_stat != SSLEEP) { 1145 mtx_exit(&sched_lock, MTX_SPIN); 1146 continue; 1147 } 1148 mtx_exit(&sched_lock, MTX_SPIN); 1149 /* 1150 * get the process size 1151 */ 1152 size = vmspace_resident_count(p->p_vmspace); 1153 /* 1154 * if the this process is bigger than the biggest one 1155 * remember it. 1156 */ 1157 if (size > bigsize) { 1158 bigproc = p; 1159 bigsize = size; 1160 } 1161 } 1162 ALLPROC_LOCK(AP_RELEASE); 1163 if (bigproc != NULL) { 1164 killproc(bigproc, "out of swap space"); 1165 mtx_enter(&sched_lock, MTX_SPIN); 1166 bigproc->p_estcpu = 0; 1167 bigproc->p_nice = PRIO_MIN; 1168 resetpriority(bigproc); 1169 mtx_exit(&sched_lock, MTX_SPIN); 1170 wakeup(&cnt.v_free_count); 1171 } 1172 } 1173} 1174 1175/* 1176 * This routine tries to maintain the pseudo LRU active queue, 1177 * so that during long periods of time where there is no paging, 1178 * that some statistic accumulation still occurs. This code 1179 * helps the situation where paging just starts to occur. 1180 */ 1181static void 1182vm_pageout_page_stats() 1183{ 1184 int s; 1185 vm_page_t m,next; 1186 int pcount,tpcount; /* Number of pages to check */ 1187 static int fullintervalcount = 0; 1188 int page_shortage; 1189 int s0; 1190 1191 page_shortage = 1192 (cnt.v_inactive_target + cnt.v_cache_max + cnt.v_free_min) - 1193 (cnt.v_free_count + cnt.v_inactive_count + cnt.v_cache_count); 1194 1195 if (page_shortage <= 0) 1196 return; 1197 1198 s0 = splvm(); 1199 1200 pcount = cnt.v_active_count; 1201 fullintervalcount += vm_pageout_stats_interval; 1202 if (fullintervalcount < vm_pageout_full_stats_interval) { 1203 tpcount = (vm_pageout_stats_max * cnt.v_active_count) / cnt.v_page_count; 1204 if (pcount > tpcount) 1205 pcount = tpcount; 1206 } else { 1207 fullintervalcount = 0; 1208 } 1209 1210 m = TAILQ_FIRST(&vm_page_queues[PQ_ACTIVE].pl); 1211 while ((m != NULL) && (pcount-- > 0)) { 1212 int actcount; 1213 1214 if (m->queue != PQ_ACTIVE) { 1215 break; 1216 } 1217 1218 next = TAILQ_NEXT(m, pageq); 1219 /* 1220 * Don't deactivate pages that are busy. 1221 */ 1222 if ((m->busy != 0) || 1223 (m->flags & PG_BUSY) || 1224 (m->hold_count != 0)) { 1225 s = splvm(); 1226 TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE].pl, m, pageq); 1227 TAILQ_INSERT_TAIL(&vm_page_queues[PQ_ACTIVE].pl, m, pageq); 1228 splx(s); 1229 m = next; 1230 continue; 1231 } 1232 1233 actcount = 0; 1234 if (m->flags & PG_REFERENCED) { 1235 vm_page_flag_clear(m, PG_REFERENCED); 1236 actcount += 1; 1237 } 1238 1239 actcount += pmap_ts_referenced(m); 1240 if (actcount) { 1241 m->act_count += ACT_ADVANCE + actcount; 1242 if (m->act_count > ACT_MAX) 1243 m->act_count = ACT_MAX; 1244 s = splvm(); 1245 TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE].pl, m, pageq); 1246 TAILQ_INSERT_TAIL(&vm_page_queues[PQ_ACTIVE].pl, m, pageq); 1247 splx(s); 1248 } else { 1249 if (m->act_count == 0) { 1250 /* 1251 * We turn off page access, so that we have 1252 * more accurate RSS stats. We don't do this 1253 * in the normal page deactivation when the 1254 * system is loaded VM wise, because the 1255 * cost of the large number of page protect 1256 * operations would be higher than the value 1257 * of doing the operation. 1258 */ 1259 vm_page_protect(m, VM_PROT_NONE); 1260 vm_page_deactivate(m); 1261 } else { 1262 m->act_count -= min(m->act_count, ACT_DECLINE); 1263 s = splvm(); 1264 TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE].pl, m, pageq); 1265 TAILQ_INSERT_TAIL(&vm_page_queues[PQ_ACTIVE].pl, m, pageq); 1266 splx(s); 1267 } 1268 } 1269 1270 m = next; 1271 } 1272 splx(s0); 1273} 1274 1275static int 1276vm_pageout_free_page_calc(count) 1277vm_size_t count; 1278{ 1279 if (count < cnt.v_page_count) 1280 return 0; 1281 /* 1282 * free_reserved needs to include enough for the largest swap pager 1283 * structures plus enough for any pv_entry structs when paging. 1284 */ 1285 if (cnt.v_page_count > 1024) 1286 cnt.v_free_min = 4 + (cnt.v_page_count - 1024) / 200; 1287 else 1288 cnt.v_free_min = 4; 1289 cnt.v_pageout_free_min = (2*MAXBSIZE)/PAGE_SIZE + 1290 cnt.v_interrupt_free_min; 1291 cnt.v_free_reserved = vm_pageout_page_count + 1292 cnt.v_pageout_free_min + (count / 768) + PQ_L2_SIZE; 1293 cnt.v_free_severe = cnt.v_free_min / 2; 1294 cnt.v_free_min += cnt.v_free_reserved; 1295 cnt.v_free_severe += cnt.v_free_reserved; 1296 return 1; 1297} 1298 1299 1300/* 1301 * vm_pageout is the high level pageout daemon. 1302 */ 1303static void 1304vm_pageout() 1305{ 1306 int pass; 1307 1308 mtx_enter(&Giant, MTX_DEF); 1309 1310 /* 1311 * Initialize some paging parameters. 1312 */ 1313 1314 cnt.v_interrupt_free_min = 2; 1315 if (cnt.v_page_count < 2000) 1316 vm_pageout_page_count = 8; 1317 1318 vm_pageout_free_page_calc(cnt.v_page_count); 1319 /* 1320 * v_free_target and v_cache_min control pageout hysteresis. Note 1321 * that these are more a measure of the VM cache queue hysteresis 1322 * then the VM free queue. Specifically, v_free_target is the 1323 * high water mark (free+cache pages). 1324 * 1325 * v_free_reserved + v_cache_min (mostly means v_cache_min) is the 1326 * low water mark, while v_free_min is the stop. v_cache_min must 1327 * be big enough to handle memory needs while the pageout daemon 1328 * is signalled and run to free more pages. 1329 */ 1330 if (cnt.v_free_count > 6144) 1331 cnt.v_free_target = 4 * cnt.v_free_min + cnt.v_free_reserved; 1332 else 1333 cnt.v_free_target = 2 * cnt.v_free_min + cnt.v_free_reserved; 1334 1335 if (cnt.v_free_count > 2048) { 1336 cnt.v_cache_min = cnt.v_free_target; 1337 cnt.v_cache_max = 2 * cnt.v_cache_min; 1338 cnt.v_inactive_target = (3 * cnt.v_free_target) / 2; 1339 } else { 1340 cnt.v_cache_min = 0; 1341 cnt.v_cache_max = 0; 1342 cnt.v_inactive_target = cnt.v_free_count / 4; 1343 } 1344 if (cnt.v_inactive_target > cnt.v_free_count / 3) 1345 cnt.v_inactive_target = cnt.v_free_count / 3; 1346 1347 /* XXX does not really belong here */ 1348 if (vm_page_max_wired == 0) 1349 vm_page_max_wired = cnt.v_free_count / 3; 1350 1351 if (vm_pageout_stats_max == 0) 1352 vm_pageout_stats_max = cnt.v_free_target; 1353 1354 /* 1355 * Set interval in seconds for stats scan. 1356 */ 1357 if (vm_pageout_stats_interval == 0) 1358 vm_pageout_stats_interval = 5; 1359 if (vm_pageout_full_stats_interval == 0) 1360 vm_pageout_full_stats_interval = vm_pageout_stats_interval * 4; 1361 1362 1363 /* 1364 * Set maximum free per pass 1365 */ 1366 if (vm_pageout_stats_free_max == 0) 1367 vm_pageout_stats_free_max = 5; 1368 1369 curproc->p_flag |= P_BUFEXHAUST; 1370 swap_pager_swap_init(); 1371 pass = 0; 1372 /* 1373 * The pageout daemon is never done, so loop forever. 1374 */ 1375 while (TRUE) { 1376 int error; 1377 int s = splvm(); 1378 1379 /* 1380 * If we have enough free memory, wakeup waiters. Do 1381 * not clear vm_pages_needed until we reach our target, 1382 * otherwise we may be woken up over and over again and 1383 * waste a lot of cpu. 1384 */ 1385 if (vm_pages_needed && !vm_page_count_min()) { 1386 if (vm_paging_needed() <= 0) 1387 vm_pages_needed = 0; 1388 wakeup(&cnt.v_free_count); 1389 } 1390 if (vm_pages_needed) { 1391 /* 1392 * Still not done, take a second pass without waiting 1393 * (unlimited dirty cleaning), otherwise sleep a bit 1394 * and try again. 1395 */ 1396 ++pass; 1397 if (pass > 1) 1398 tsleep(&vm_pages_needed, PVM, "psleep", hz/2); 1399 } else { 1400 /* 1401 * Good enough, sleep & handle stats. Prime the pass 1402 * for the next run. 1403 */ 1404 if (pass > 1) 1405 pass = 1; 1406 else 1407 pass = 0; 1408 error = tsleep(&vm_pages_needed, 1409 PVM, "psleep", vm_pageout_stats_interval * hz); 1410 if (error && !vm_pages_needed) { 1411 splx(s); 1412 pass = 0; 1413 vm_pageout_page_stats(); 1414 continue; 1415 } 1416 } 1417 1418 if (vm_pages_needed) 1419 cnt.v_pdwakeups++; 1420 splx(s); 1421 vm_pageout_scan(pass); 1422 vm_pageout_deficit = 0; 1423 } 1424} 1425 1426void 1427pagedaemon_wakeup() 1428{ 1429 if (!vm_pages_needed && curproc != pageproc) { 1430 vm_pages_needed++; 1431 wakeup(&vm_pages_needed); 1432 } 1433} 1434 1435#if !defined(NO_SWAPPING) 1436static void 1437vm_req_vmdaemon() 1438{ 1439 static int lastrun = 0; 1440 1441 if ((ticks > (lastrun + hz)) || (ticks < lastrun)) { 1442 wakeup(&vm_daemon_needed); 1443 lastrun = ticks; 1444 } 1445} 1446 1447static void 1448vm_daemon() 1449{ 1450 struct proc *p; 1451 1452 mtx_enter(&Giant, MTX_DEF); 1453 1454 while (TRUE) { 1455 tsleep(&vm_daemon_needed, PPAUSE, "psleep", 0); 1456 if (vm_pageout_req_swapout) { 1457 swapout_procs(vm_pageout_req_swapout); 1458 vm_pageout_req_swapout = 0; 1459 } 1460 /* 1461 * scan the processes for exceeding their rlimits or if 1462 * process is swapped out -- deactivate pages 1463 */ 1464 1465 ALLPROC_LOCK(AP_SHARED); 1466 for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) { 1467 vm_pindex_t limit, size; 1468 1469 /* 1470 * if this is a system process or if we have already 1471 * looked at this process, skip it. 1472 */ 1473 if (p->p_flag & (P_SYSTEM | P_WEXIT)) { 1474 continue; 1475 } 1476 /* 1477 * if the process is in a non-running type state, 1478 * don't touch it. 1479 */ 1480 mtx_enter(&sched_lock, MTX_SPIN); 1481 if (p->p_stat != SRUN && p->p_stat != SSLEEP) { 1482 mtx_exit(&sched_lock, MTX_SPIN); 1483 continue; 1484 } 1485 /* 1486 * get a limit 1487 */ 1488 limit = OFF_TO_IDX( 1489 qmin(p->p_rlimit[RLIMIT_RSS].rlim_cur, 1490 p->p_rlimit[RLIMIT_RSS].rlim_max)); 1491 1492 /* 1493 * let processes that are swapped out really be 1494 * swapped out set the limit to nothing (will force a 1495 * swap-out.) 1496 */ 1497 if ((p->p_sflag & PS_INMEM) == 0) 1498 limit = 0; /* XXX */ 1499 mtx_exit(&sched_lock, MTX_SPIN); 1500 1501 size = vmspace_resident_count(p->p_vmspace); 1502 if (limit >= 0 && size >= limit) { 1503 vm_pageout_map_deactivate_pages( 1504 &p->p_vmspace->vm_map, limit); 1505 } 1506 } 1507 ALLPROC_LOCK(AP_RELEASE); 1508 } 1509} 1510#endif 1511