vm_pageout.c revision 88318
1/* 2 * Copyright (c) 1991 Regents of the University of California. 3 * All rights reserved. 4 * Copyright (c) 1994 John S. Dyson 5 * All rights reserved. 6 * Copyright (c) 1994 David Greenman 7 * All rights reserved. 8 * 9 * This code is derived from software contributed to Berkeley by 10 * The Mach Operating System project at Carnegie-Mellon University. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. All advertising materials mentioning features or use of this software 21 * must display the following acknowledgement: 22 * This product includes software developed by the University of 23 * California, Berkeley and its contributors. 24 * 4. Neither the name of the University nor the names of its contributors 25 * may be used to endorse or promote products derived from this software 26 * without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 38 * SUCH DAMAGE. 39 * 40 * from: @(#)vm_pageout.c 7.4 (Berkeley) 5/7/91 41 * 42 * 43 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 44 * All rights reserved. 45 * 46 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 47 * 48 * Permission to use, copy, modify and distribute this software and 49 * its documentation is hereby granted, provided that both the copyright 50 * notice and this permission notice appear in all copies of the 51 * software, derivative works or modified versions, and any portions 52 * thereof, and that both notices appear in supporting documentation. 53 * 54 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 55 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 56 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 57 * 58 * Carnegie Mellon requests users of this software to return to 59 * 60 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 61 * School of Computer Science 62 * Carnegie Mellon University 63 * Pittsburgh PA 15213-3890 64 * 65 * any improvements or extensions that they make and grant Carnegie the 66 * rights to redistribute these changes. 67 * 68 * $FreeBSD: head/sys/vm/vm_pageout.c 88318 2001-12-20 22:42:27Z dillon $ 69 */ 70 71/* 72 * The proverbial page-out daemon. 73 */ 74 75#include "opt_vm.h" 76#include <sys/param.h> 77#include <sys/systm.h> 78#include <sys/kernel.h> 79#include <sys/lock.h> 80#include <sys/mutex.h> 81#include <sys/proc.h> 82#include <sys/kthread.h> 83#include <sys/ktr.h> 84#include <sys/resourcevar.h> 85#include <sys/signalvar.h> 86#include <sys/vnode.h> 87#include <sys/vmmeter.h> 88#include <sys/sx.h> 89#include <sys/sysctl.h> 90 91#include <vm/vm.h> 92#include <vm/vm_param.h> 93#include <vm/vm_object.h> 94#include <vm/vm_page.h> 95#include <vm/vm_map.h> 96#include <vm/vm_pageout.h> 97#include <vm/vm_pager.h> 98#include <vm/vm_zone.h> 99#include <vm/swap_pager.h> 100#include <vm/vm_extern.h> 101 102#include <machine/mutex.h> 103 104/* 105 * System initialization 106 */ 107 108/* the kernel process "vm_pageout"*/ 109static void vm_pageout __P((void)); 110static int vm_pageout_clean __P((vm_page_t)); 111static void vm_pageout_scan __P((int pass)); 112static int vm_pageout_free_page_calc __P((vm_size_t count)); 113struct proc *pageproc; 114 115static struct kproc_desc page_kp = { 116 "pagedaemon", 117 vm_pageout, 118 &pageproc 119}; 120SYSINIT(pagedaemon, SI_SUB_KTHREAD_PAGE, SI_ORDER_FIRST, kproc_start, &page_kp) 121 122#if !defined(NO_SWAPPING) 123/* the kernel process "vm_daemon"*/ 124static void vm_daemon __P((void)); 125static struct proc *vmproc; 126 127static struct kproc_desc vm_kp = { 128 "vmdaemon", 129 vm_daemon, 130 &vmproc 131}; 132SYSINIT(vmdaemon, SI_SUB_KTHREAD_VM, SI_ORDER_FIRST, kproc_start, &vm_kp) 133#endif 134 135 136int vm_pages_needed=0; /* Event on which pageout daemon sleeps */ 137int vm_pageout_deficit=0; /* Estimated number of pages deficit */ 138int vm_pageout_pages_needed=0; /* flag saying that the pageout daemon needs pages */ 139 140#if !defined(NO_SWAPPING) 141static int vm_pageout_req_swapout; /* XXX */ 142static int vm_daemon_needed; 143#endif 144extern int vm_swap_size; 145static int vm_max_launder = 32; 146static int vm_pageout_stats_max=0, vm_pageout_stats_interval = 0; 147static int vm_pageout_full_stats_interval = 0; 148static int vm_pageout_stats_free_max=0, vm_pageout_algorithm=0; 149static int defer_swap_pageouts=0; 150static int disable_swap_pageouts=0; 151 152#if defined(NO_SWAPPING) 153static int vm_swap_enabled=0; 154static int vm_swap_idle_enabled=0; 155#else 156static int vm_swap_enabled=1; 157static int vm_swap_idle_enabled=0; 158#endif 159 160SYSCTL_INT(_vm, VM_PAGEOUT_ALGORITHM, pageout_algorithm, 161 CTLFLAG_RW, &vm_pageout_algorithm, 0, "LRU page mgmt"); 162 163SYSCTL_INT(_vm, OID_AUTO, max_launder, 164 CTLFLAG_RW, &vm_max_launder, 0, "Limit dirty flushes in pageout"); 165 166SYSCTL_INT(_vm, OID_AUTO, pageout_stats_max, 167 CTLFLAG_RW, &vm_pageout_stats_max, 0, "Max pageout stats scan length"); 168 169SYSCTL_INT(_vm, OID_AUTO, pageout_full_stats_interval, 170 CTLFLAG_RW, &vm_pageout_full_stats_interval, 0, "Interval for full stats scan"); 171 172SYSCTL_INT(_vm, OID_AUTO, pageout_stats_interval, 173 CTLFLAG_RW, &vm_pageout_stats_interval, 0, "Interval for partial stats scan"); 174 175SYSCTL_INT(_vm, OID_AUTO, pageout_stats_free_max, 176 CTLFLAG_RW, &vm_pageout_stats_free_max, 0, "Not implemented"); 177 178#if defined(NO_SWAPPING) 179SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled, 180 CTLFLAG_RD, &vm_swap_enabled, 0, ""); 181SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled, 182 CTLFLAG_RD, &vm_swap_idle_enabled, 0, ""); 183#else 184SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled, 185 CTLFLAG_RW, &vm_swap_enabled, 0, "Enable entire process swapout"); 186SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled, 187 CTLFLAG_RW, &vm_swap_idle_enabled, 0, "Allow swapout on idle criteria"); 188#endif 189 190SYSCTL_INT(_vm, OID_AUTO, defer_swapspace_pageouts, 191 CTLFLAG_RW, &defer_swap_pageouts, 0, "Give preference to dirty pages in mem"); 192 193SYSCTL_INT(_vm, OID_AUTO, disable_swapspace_pageouts, 194 CTLFLAG_RW, &disable_swap_pageouts, 0, "Disallow swapout of dirty pages"); 195 196static int pageout_lock_miss; 197SYSCTL_INT(_vm, OID_AUTO, pageout_lock_miss, 198 CTLFLAG_RD, &pageout_lock_miss, 0, "vget() lock misses during pageout"); 199 200#define VM_PAGEOUT_PAGE_COUNT 16 201int vm_pageout_page_count = VM_PAGEOUT_PAGE_COUNT; 202 203int vm_page_max_wired; /* XXX max # of wired pages system-wide */ 204 205#if !defined(NO_SWAPPING) 206typedef void freeer_fcn_t __P((vm_map_t, vm_object_t, vm_pindex_t, int)); 207static void vm_pageout_map_deactivate_pages __P((vm_map_t, vm_pindex_t)); 208static freeer_fcn_t vm_pageout_object_deactivate_pages; 209static void vm_req_vmdaemon __P((void)); 210#endif 211static void vm_pageout_page_stats(void); 212 213/* 214 * vm_pageout_clean: 215 * 216 * Clean the page and remove it from the laundry. 217 * 218 * We set the busy bit to cause potential page faults on this page to 219 * block. Note the careful timing, however, the busy bit isn't set till 220 * late and we cannot do anything that will mess with the page. 221 */ 222 223static int 224vm_pageout_clean(m) 225 vm_page_t m; 226{ 227 vm_object_t object; 228 vm_page_t mc[2*vm_pageout_page_count]; 229 int pageout_count; 230 int ib, is, page_base; 231 vm_pindex_t pindex = m->pindex; 232 233 GIANT_REQUIRED; 234 235 object = m->object; 236 237 /* 238 * It doesn't cost us anything to pageout OBJT_DEFAULT or OBJT_SWAP 239 * with the new swapper, but we could have serious problems paging 240 * out other object types if there is insufficient memory. 241 * 242 * Unfortunately, checking free memory here is far too late, so the 243 * check has been moved up a procedural level. 244 */ 245 246 /* 247 * Don't mess with the page if it's busy, held, or special 248 */ 249 if ((m->hold_count != 0) || 250 ((m->busy != 0) || (m->flags & (PG_BUSY|PG_UNMANAGED)))) { 251 return 0; 252 } 253 254 mc[vm_pageout_page_count] = m; 255 pageout_count = 1; 256 page_base = vm_pageout_page_count; 257 ib = 1; 258 is = 1; 259 260 /* 261 * Scan object for clusterable pages. 262 * 263 * We can cluster ONLY if: ->> the page is NOT 264 * clean, wired, busy, held, or mapped into a 265 * buffer, and one of the following: 266 * 1) The page is inactive, or a seldom used 267 * active page. 268 * -or- 269 * 2) we force the issue. 270 * 271 * During heavy mmap/modification loads the pageout 272 * daemon can really fragment the underlying file 273 * due to flushing pages out of order and not trying 274 * align the clusters (which leave sporatic out-of-order 275 * holes). To solve this problem we do the reverse scan 276 * first and attempt to align our cluster, then do a 277 * forward scan if room remains. 278 */ 279 280more: 281 while (ib && pageout_count < vm_pageout_page_count) { 282 vm_page_t p; 283 284 if (ib > pindex) { 285 ib = 0; 286 break; 287 } 288 289 if ((p = vm_page_lookup(object, pindex - ib)) == NULL) { 290 ib = 0; 291 break; 292 } 293 if (((p->queue - p->pc) == PQ_CACHE) || 294 (p->flags & (PG_BUSY|PG_UNMANAGED)) || p->busy) { 295 ib = 0; 296 break; 297 } 298 vm_page_test_dirty(p); 299 if ((p->dirty & p->valid) == 0 || 300 p->queue != PQ_INACTIVE || 301 p->wire_count != 0 || /* may be held by buf cache */ 302 p->hold_count != 0) { /* may be undergoing I/O */ 303 ib = 0; 304 break; 305 } 306 mc[--page_base] = p; 307 ++pageout_count; 308 ++ib; 309 /* 310 * alignment boundry, stop here and switch directions. Do 311 * not clear ib. 312 */ 313 if ((pindex - (ib - 1)) % vm_pageout_page_count == 0) 314 break; 315 } 316 317 while (pageout_count < vm_pageout_page_count && 318 pindex + is < object->size) { 319 vm_page_t p; 320 321 if ((p = vm_page_lookup(object, pindex + is)) == NULL) 322 break; 323 if (((p->queue - p->pc) == PQ_CACHE) || 324 (p->flags & (PG_BUSY|PG_UNMANAGED)) || p->busy) { 325 break; 326 } 327 vm_page_test_dirty(p); 328 if ((p->dirty & p->valid) == 0 || 329 p->queue != PQ_INACTIVE || 330 p->wire_count != 0 || /* may be held by buf cache */ 331 p->hold_count != 0) { /* may be undergoing I/O */ 332 break; 333 } 334 mc[page_base + pageout_count] = p; 335 ++pageout_count; 336 ++is; 337 } 338 339 /* 340 * If we exhausted our forward scan, continue with the reverse scan 341 * when possible, even past a page boundry. This catches boundry 342 * conditions. 343 */ 344 if (ib && pageout_count < vm_pageout_page_count) 345 goto more; 346 347 /* 348 * we allow reads during pageouts... 349 */ 350 return vm_pageout_flush(&mc[page_base], pageout_count, 0); 351} 352 353/* 354 * vm_pageout_flush() - launder the given pages 355 * 356 * The given pages are laundered. Note that we setup for the start of 357 * I/O ( i.e. busy the page ), mark it read-only, and bump the object 358 * reference count all in here rather then in the parent. If we want 359 * the parent to do more sophisticated things we may have to change 360 * the ordering. 361 */ 362 363int 364vm_pageout_flush(mc, count, flags) 365 vm_page_t *mc; 366 int count; 367 int flags; 368{ 369 vm_object_t object; 370 int pageout_status[count]; 371 int numpagedout = 0; 372 int i; 373 374 GIANT_REQUIRED; 375 /* 376 * Initiate I/O. Bump the vm_page_t->busy counter and 377 * mark the pages read-only. 378 * 379 * We do not have to fixup the clean/dirty bits here... we can 380 * allow the pager to do it after the I/O completes. 381 * 382 * NOTE! mc[i]->dirty may be partial or fragmented due to an 383 * edge case with file fragments. 384 */ 385 386 for (i = 0; i < count; i++) { 387 KASSERT(mc[i]->valid == VM_PAGE_BITS_ALL, ("vm_pageout_flush page %p index %d/%d: partially invalid page", mc[i], i, count)); 388 vm_page_io_start(mc[i]); 389 vm_page_protect(mc[i], VM_PROT_READ); 390 } 391 392 object = mc[0]->object; 393 vm_object_pip_add(object, count); 394 395 vm_pager_put_pages(object, mc, count, 396 (flags | ((object == kernel_object) ? OBJPC_SYNC : 0)), 397 pageout_status); 398 399 for (i = 0; i < count; i++) { 400 vm_page_t mt = mc[i]; 401 402 switch (pageout_status[i]) { 403 case VM_PAGER_OK: 404 numpagedout++; 405 break; 406 case VM_PAGER_PEND: 407 numpagedout++; 408 break; 409 case VM_PAGER_BAD: 410 /* 411 * Page outside of range of object. Right now we 412 * essentially lose the changes by pretending it 413 * worked. 414 */ 415 pmap_clear_modify(mt); 416 vm_page_undirty(mt); 417 break; 418 case VM_PAGER_ERROR: 419 case VM_PAGER_FAIL: 420 /* 421 * If page couldn't be paged out, then reactivate the 422 * page so it doesn't clog the inactive list. (We 423 * will try paging out it again later). 424 */ 425 vm_page_activate(mt); 426 break; 427 case VM_PAGER_AGAIN: 428 break; 429 } 430 431 /* 432 * If the operation is still going, leave the page busy to 433 * block all other accesses. Also, leave the paging in 434 * progress indicator set so that we don't attempt an object 435 * collapse. 436 */ 437 if (pageout_status[i] != VM_PAGER_PEND) { 438 vm_object_pip_wakeup(object); 439 vm_page_io_finish(mt); 440 if (!vm_page_count_severe() || !vm_page_try_to_cache(mt)) 441 vm_page_protect(mt, VM_PROT_READ); 442 } 443 } 444 return numpagedout; 445} 446 447#if !defined(NO_SWAPPING) 448/* 449 * vm_pageout_object_deactivate_pages 450 * 451 * deactivate enough pages to satisfy the inactive target 452 * requirements or if vm_page_proc_limit is set, then 453 * deactivate all of the pages in the object and its 454 * backing_objects. 455 * 456 * The object and map must be locked. 457 */ 458static void 459vm_pageout_object_deactivate_pages(map, object, desired, map_remove_only) 460 vm_map_t map; 461 vm_object_t object; 462 vm_pindex_t desired; 463 int map_remove_only; 464{ 465 vm_page_t p, next; 466 int rcount; 467 int remove_mode; 468 469 GIANT_REQUIRED; 470 if (object->type == OBJT_DEVICE || object->type == OBJT_PHYS) 471 return; 472 473 while (object) { 474 if (pmap_resident_count(vm_map_pmap(map)) <= desired) 475 return; 476 if (object->paging_in_progress) 477 return; 478 479 remove_mode = map_remove_only; 480 if (object->shadow_count > 1) 481 remove_mode = 1; 482 /* 483 * scan the objects entire memory queue 484 */ 485 rcount = object->resident_page_count; 486 p = TAILQ_FIRST(&object->memq); 487 while (p && (rcount-- > 0)) { 488 int actcount; 489 if (pmap_resident_count(vm_map_pmap(map)) <= desired) 490 return; 491 next = TAILQ_NEXT(p, listq); 492 cnt.v_pdpages++; 493 if (p->wire_count != 0 || 494 p->hold_count != 0 || 495 p->busy != 0 || 496 (p->flags & (PG_BUSY|PG_UNMANAGED)) || 497 !pmap_page_exists(vm_map_pmap(map), p)) { 498 p = next; 499 continue; 500 } 501 502 actcount = pmap_ts_referenced(p); 503 if (actcount) { 504 vm_page_flag_set(p, PG_REFERENCED); 505 } else if (p->flags & PG_REFERENCED) { 506 actcount = 1; 507 } 508 509 if ((p->queue != PQ_ACTIVE) && 510 (p->flags & PG_REFERENCED)) { 511 vm_page_activate(p); 512 p->act_count += actcount; 513 vm_page_flag_clear(p, PG_REFERENCED); 514 } else if (p->queue == PQ_ACTIVE) { 515 if ((p->flags & PG_REFERENCED) == 0) { 516 p->act_count -= min(p->act_count, ACT_DECLINE); 517 if (!remove_mode && (vm_pageout_algorithm || (p->act_count == 0))) { 518 vm_page_protect(p, VM_PROT_NONE); 519 vm_page_deactivate(p); 520 } else { 521 vm_pageq_requeue(p); 522 } 523 } else { 524 vm_page_activate(p); 525 vm_page_flag_clear(p, PG_REFERENCED); 526 if (p->act_count < (ACT_MAX - ACT_ADVANCE)) 527 p->act_count += ACT_ADVANCE; 528 vm_pageq_requeue(p); 529 } 530 } else if (p->queue == PQ_INACTIVE) { 531 vm_page_protect(p, VM_PROT_NONE); 532 } 533 p = next; 534 } 535 object = object->backing_object; 536 } 537 return; 538} 539 540/* 541 * deactivate some number of pages in a map, try to do it fairly, but 542 * that is really hard to do. 543 */ 544static void 545vm_pageout_map_deactivate_pages(map, desired) 546 vm_map_t map; 547 vm_pindex_t desired; 548{ 549 vm_map_entry_t tmpe; 550 vm_object_t obj, bigobj; 551 int nothingwired; 552 553 GIANT_REQUIRED; 554 if (lockmgr(&map->lock, LK_EXCLUSIVE | LK_NOWAIT, (void *)0, curthread)) { 555 return; 556 } 557 558 bigobj = NULL; 559 nothingwired = TRUE; 560 561 /* 562 * first, search out the biggest object, and try to free pages from 563 * that. 564 */ 565 tmpe = map->header.next; 566 while (tmpe != &map->header) { 567 if ((tmpe->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { 568 obj = tmpe->object.vm_object; 569 if ((obj != NULL) && (obj->shadow_count <= 1) && 570 ((bigobj == NULL) || 571 (bigobj->resident_page_count < obj->resident_page_count))) { 572 bigobj = obj; 573 } 574 } 575 if (tmpe->wired_count > 0) 576 nothingwired = FALSE; 577 tmpe = tmpe->next; 578 } 579 580 if (bigobj) 581 vm_pageout_object_deactivate_pages(map, bigobj, desired, 0); 582 583 /* 584 * Next, hunt around for other pages to deactivate. We actually 585 * do this search sort of wrong -- .text first is not the best idea. 586 */ 587 tmpe = map->header.next; 588 while (tmpe != &map->header) { 589 if (pmap_resident_count(vm_map_pmap(map)) <= desired) 590 break; 591 if ((tmpe->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { 592 obj = tmpe->object.vm_object; 593 if (obj) 594 vm_pageout_object_deactivate_pages(map, obj, desired, 0); 595 } 596 tmpe = tmpe->next; 597 }; 598 599 /* 600 * Remove all mappings if a process is swapped out, this will free page 601 * table pages. 602 */ 603 if (desired == 0 && nothingwired) 604 pmap_remove(vm_map_pmap(map), 605 VM_MIN_ADDRESS, VM_MAXUSER_ADDRESS); 606 vm_map_unlock(map); 607 return; 608} 609#endif 610 611/* 612 * Don't try to be fancy - being fancy can lead to VOP_LOCK's and therefore 613 * to vnode deadlocks. We only do it for OBJT_DEFAULT and OBJT_SWAP objects 614 * which we know can be trivially freed. 615 */ 616 617void 618vm_pageout_page_free(vm_page_t m) { 619 vm_object_t object = m->object; 620 int type = object->type; 621 622 GIANT_REQUIRED; 623 if (type == OBJT_SWAP || type == OBJT_DEFAULT) 624 vm_object_reference(object); 625 vm_page_busy(m); 626 vm_page_protect(m, VM_PROT_NONE); 627 vm_page_free(m); 628 if (type == OBJT_SWAP || type == OBJT_DEFAULT) 629 vm_object_deallocate(object); 630} 631 632/* 633 * vm_pageout_scan does the dirty work for the pageout daemon. 634 */ 635static void 636vm_pageout_scan(int pass) 637{ 638 vm_page_t m, next; 639 struct vm_page marker; 640 int save_page_shortage; 641 int save_inactive_count; 642 int page_shortage, maxscan, pcount; 643 int addl_page_shortage, addl_page_shortage_init; 644 struct proc *p, *bigproc; 645 vm_offset_t size, bigsize; 646 vm_object_t object; 647 int actcount; 648 int vnodes_skipped = 0; 649 int maxlaunder; 650 int s; 651 652 GIANT_REQUIRED; 653 /* 654 * Do whatever cleanup that the pmap code can. 655 */ 656 pmap_collect(); 657 658 addl_page_shortage_init = vm_pageout_deficit; 659 vm_pageout_deficit = 0; 660 661 /* 662 * Calculate the number of pages we want to either free or move 663 * to the cache. 664 */ 665 page_shortage = vm_paging_target() + addl_page_shortage_init; 666 save_page_shortage = page_shortage; 667 save_inactive_count = cnt.v_inactive_count; 668 669 /* 670 * Initialize our marker 671 */ 672 bzero(&marker, sizeof(marker)); 673 marker.flags = PG_BUSY | PG_FICTITIOUS | PG_MARKER; 674 marker.queue = PQ_INACTIVE; 675 marker.wire_count = 1; 676 677 /* 678 * Start scanning the inactive queue for pages we can move to the 679 * cache or free. The scan will stop when the target is reached or 680 * we have scanned the entire inactive queue. Note that m->act_count 681 * is not used to form decisions for the inactive queue, only for the 682 * active queue. 683 * 684 * maxlaunder limits the number of dirty pages we flush per scan. 685 * For most systems a smaller value (16 or 32) is more robust under 686 * extreme memory and disk pressure because any unnecessary writes 687 * to disk can result in extreme performance degredation. However, 688 * systems with excessive dirty pages (especially when MAP_NOSYNC is 689 * used) will die horribly with limited laundering. If the pageout 690 * daemon cannot clean enough pages in the first pass, we let it go 691 * all out in succeeding passes. 692 */ 693 694 if ((maxlaunder = vm_max_launder) <= 1) 695 maxlaunder = 1; 696 if (pass) 697 maxlaunder = 10000; 698 699rescan0: 700 addl_page_shortage = addl_page_shortage_init; 701 maxscan = cnt.v_inactive_count; 702 703 for (m = TAILQ_FIRST(&vm_page_queues[PQ_INACTIVE].pl); 704 m != NULL && maxscan-- > 0 && page_shortage > 0; 705 m = next) { 706 707 cnt.v_pdpages++; 708 709 if (m->queue != PQ_INACTIVE) { 710 goto rescan0; 711 } 712 713 next = TAILQ_NEXT(m, pageq); 714 715 /* 716 * skip marker pages 717 */ 718 if (m->flags & PG_MARKER) 719 continue; 720 721 /* 722 * A held page may be undergoing I/O, so skip it. 723 */ 724 if (m->hold_count) { 725 vm_pageq_requeue(m); 726 addl_page_shortage++; 727 continue; 728 } 729 /* 730 * Dont mess with busy pages, keep in the front of the 731 * queue, most likely are being paged out. 732 */ 733 if (m->busy || (m->flags & PG_BUSY)) { 734 addl_page_shortage++; 735 continue; 736 } 737 738 /* 739 * If the object is not being used, we ignore previous 740 * references. 741 */ 742 if (m->object->ref_count == 0) { 743 vm_page_flag_clear(m, PG_REFERENCED); 744 pmap_clear_reference(m); 745 746 /* 747 * Otherwise, if the page has been referenced while in the 748 * inactive queue, we bump the "activation count" upwards, 749 * making it less likely that the page will be added back to 750 * the inactive queue prematurely again. Here we check the 751 * page tables (or emulated bits, if any), given the upper 752 * level VM system not knowing anything about existing 753 * references. 754 */ 755 } else if (((m->flags & PG_REFERENCED) == 0) && 756 (actcount = pmap_ts_referenced(m))) { 757 vm_page_activate(m); 758 m->act_count += (actcount + ACT_ADVANCE); 759 continue; 760 } 761 762 /* 763 * If the upper level VM system knows about any page 764 * references, we activate the page. We also set the 765 * "activation count" higher than normal so that we will less 766 * likely place pages back onto the inactive queue again. 767 */ 768 if ((m->flags & PG_REFERENCED) != 0) { 769 vm_page_flag_clear(m, PG_REFERENCED); 770 actcount = pmap_ts_referenced(m); 771 vm_page_activate(m); 772 m->act_count += (actcount + ACT_ADVANCE + 1); 773 continue; 774 } 775 776 /* 777 * If the upper level VM system doesn't know anything about 778 * the page being dirty, we have to check for it again. As 779 * far as the VM code knows, any partially dirty pages are 780 * fully dirty. 781 */ 782 if (m->dirty == 0) { 783 vm_page_test_dirty(m); 784 } else { 785 vm_page_dirty(m); 786 } 787 788 /* 789 * Invalid pages can be easily freed 790 */ 791 if (m->valid == 0) { 792 vm_pageout_page_free(m); 793 cnt.v_dfree++; 794 --page_shortage; 795 796 /* 797 * Clean pages can be placed onto the cache queue. This 798 * effectively frees them. 799 */ 800 } else if (m->dirty == 0) { 801 vm_page_cache(m); 802 --page_shortage; 803 } else if ((m->flags & PG_WINATCFLS) == 0 && pass == 0) { 804 /* 805 * Dirty pages need to be paged out, but flushing 806 * a page is extremely expensive verses freeing 807 * a clean page. Rather then artificially limiting 808 * the number of pages we can flush, we instead give 809 * dirty pages extra priority on the inactive queue 810 * by forcing them to be cycled through the queue 811 * twice before being flushed, after which the 812 * (now clean) page will cycle through once more 813 * before being freed. This significantly extends 814 * the thrash point for a heavily loaded machine. 815 */ 816 vm_page_flag_set(m, PG_WINATCFLS); 817 vm_pageq_requeue(m); 818 } else if (maxlaunder > 0) { 819 /* 820 * We always want to try to flush some dirty pages if 821 * we encounter them, to keep the system stable. 822 * Normally this number is small, but under extreme 823 * pressure where there are insufficient clean pages 824 * on the inactive queue, we may have to go all out. 825 */ 826 int swap_pageouts_ok; 827 struct vnode *vp = NULL; 828 struct mount *mp; 829 830 object = m->object; 831 832 if ((object->type != OBJT_SWAP) && (object->type != OBJT_DEFAULT)) { 833 swap_pageouts_ok = 1; 834 } else { 835 swap_pageouts_ok = !(defer_swap_pageouts || disable_swap_pageouts); 836 swap_pageouts_ok |= (!disable_swap_pageouts && defer_swap_pageouts && 837 vm_page_count_min()); 838 839 } 840 841 /* 842 * We don't bother paging objects that are "dead". 843 * Those objects are in a "rundown" state. 844 */ 845 if (!swap_pageouts_ok || (object->flags & OBJ_DEAD)) { 846 vm_pageq_requeue(m); 847 continue; 848 } 849 850 /* 851 * The object is already known NOT to be dead. It 852 * is possible for the vget() to block the whole 853 * pageout daemon, but the new low-memory handling 854 * code should prevent it. 855 * 856 * The previous code skipped locked vnodes and, worse, 857 * reordered pages in the queue. This results in 858 * completely non-deterministic operation and, on a 859 * busy system, can lead to extremely non-optimal 860 * pageouts. For example, it can cause clean pages 861 * to be freed and dirty pages to be moved to the end 862 * of the queue. Since dirty pages are also moved to 863 * the end of the queue once-cleaned, this gives 864 * way too large a weighting to defering the freeing 865 * of dirty pages. 866 * 867 * We can't wait forever for the vnode lock, we might 868 * deadlock due to a vn_read() getting stuck in 869 * vm_wait while holding this vnode. We skip the 870 * vnode if we can't get it in a reasonable amount 871 * of time. 872 */ 873 if (object->type == OBJT_VNODE) { 874 vp = object->handle; 875 876 mp = NULL; 877 if (vp->v_type == VREG) 878 vn_start_write(vp, &mp, V_NOWAIT); 879 if (vget(vp, LK_EXCLUSIVE|LK_NOOBJ|LK_TIMELOCK, curthread)) { 880 ++pageout_lock_miss; 881 vn_finished_write(mp); 882 if (object->flags & OBJ_MIGHTBEDIRTY) 883 vnodes_skipped++; 884 continue; 885 } 886 887 /* 888 * The page might have been moved to another 889 * queue during potential blocking in vget() 890 * above. The page might have been freed and 891 * reused for another vnode. The object might 892 * have been reused for another vnode. 893 */ 894 if (m->queue != PQ_INACTIVE || 895 m->object != object || 896 object->handle != vp) { 897 if (object->flags & OBJ_MIGHTBEDIRTY) 898 vnodes_skipped++; 899 vput(vp); 900 vn_finished_write(mp); 901 continue; 902 } 903 904 /* 905 * The page may have been busied during the 906 * blocking in vput(); We don't move the 907 * page back onto the end of the queue so that 908 * statistics are more correct if we don't. 909 */ 910 if (m->busy || (m->flags & PG_BUSY)) { 911 vput(vp); 912 vn_finished_write(mp); 913 continue; 914 } 915 916 /* 917 * If the page has become held it might 918 * be undergoing I/O, so skip it 919 */ 920 if (m->hold_count) { 921 vm_pageq_requeue(m); 922 if (object->flags & OBJ_MIGHTBEDIRTY) 923 vnodes_skipped++; 924 vput(vp); 925 vn_finished_write(mp); 926 continue; 927 } 928 } 929 930 /* 931 * If a page is dirty, then it is either being washed 932 * (but not yet cleaned) or it is still in the 933 * laundry. If it is still in the laundry, then we 934 * start the cleaning operation. 935 * 936 * This operation may cluster, invalidating the 'next' 937 * pointer. To prevent an inordinate number of 938 * restarts we use our marker to remember our place. 939 * 940 * decrement page_shortage on success to account for 941 * the (future) cleaned page. Otherwise we could wind 942 * up laundering or cleaning too many pages. 943 */ 944 s = splvm(); 945 TAILQ_INSERT_AFTER(&vm_page_queues[PQ_INACTIVE].pl, m, &marker, pageq); 946 splx(s); 947 if (vm_pageout_clean(m) != 0) { 948 --page_shortage; 949 --maxlaunder; 950 } 951 s = splvm(); 952 next = TAILQ_NEXT(&marker, pageq); 953 TAILQ_REMOVE(&vm_page_queues[PQ_INACTIVE].pl, &marker, pageq); 954 splx(s); 955 if (vp) { 956 vput(vp); 957 vn_finished_write(mp); 958 } 959 } 960 } 961 962 /* 963 * Compute the number of pages we want to try to move from the 964 * active queue to the inactive queue. 965 */ 966 page_shortage = vm_paging_target() + 967 cnt.v_inactive_target - cnt.v_inactive_count; 968 page_shortage += addl_page_shortage; 969 970 /* 971 * Scan the active queue for things we can deactivate. We nominally 972 * track the per-page activity counter and use it to locate 973 * deactivation candidates. 974 */ 975 976 pcount = cnt.v_active_count; 977 m = TAILQ_FIRST(&vm_page_queues[PQ_ACTIVE].pl); 978 979 while ((m != NULL) && (pcount-- > 0) && (page_shortage > 0)) { 980 981 /* 982 * This is a consistency check, and should likely be a panic 983 * or warning. 984 */ 985 if (m->queue != PQ_ACTIVE) { 986 break; 987 } 988 989 next = TAILQ_NEXT(m, pageq); 990 /* 991 * Don't deactivate pages that are busy. 992 */ 993 if ((m->busy != 0) || 994 (m->flags & PG_BUSY) || 995 (m->hold_count != 0)) { 996 vm_pageq_requeue(m); 997 m = next; 998 continue; 999 } 1000 1001 /* 1002 * The count for pagedaemon pages is done after checking the 1003 * page for eligibility... 1004 */ 1005 cnt.v_pdpages++; 1006 1007 /* 1008 * Check to see "how much" the page has been used. 1009 */ 1010 actcount = 0; 1011 if (m->object->ref_count != 0) { 1012 if (m->flags & PG_REFERENCED) { 1013 actcount += 1; 1014 } 1015 actcount += pmap_ts_referenced(m); 1016 if (actcount) { 1017 m->act_count += ACT_ADVANCE + actcount; 1018 if (m->act_count > ACT_MAX) 1019 m->act_count = ACT_MAX; 1020 } 1021 } 1022 1023 /* 1024 * Since we have "tested" this bit, we need to clear it now. 1025 */ 1026 vm_page_flag_clear(m, PG_REFERENCED); 1027 1028 /* 1029 * Only if an object is currently being used, do we use the 1030 * page activation count stats. 1031 */ 1032 if (actcount && (m->object->ref_count != 0)) { 1033 vm_pageq_requeue(m); 1034 } else { 1035 m->act_count -= min(m->act_count, ACT_DECLINE); 1036 if (vm_pageout_algorithm || 1037 m->object->ref_count == 0 || 1038 m->act_count == 0) { 1039 page_shortage--; 1040 if (m->object->ref_count == 0) { 1041 vm_page_protect(m, VM_PROT_NONE); 1042 if (m->dirty == 0) 1043 vm_page_cache(m); 1044 else 1045 vm_page_deactivate(m); 1046 } else { 1047 vm_page_deactivate(m); 1048 } 1049 } else { 1050 vm_pageq_requeue(m); 1051 } 1052 } 1053 m = next; 1054 } 1055 1056 s = splvm(); 1057 1058 /* 1059 * We try to maintain some *really* free pages, this allows interrupt 1060 * code to be guaranteed space. Since both cache and free queues 1061 * are considered basically 'free', moving pages from cache to free 1062 * does not effect other calculations. 1063 */ 1064 1065 while (cnt.v_free_count < cnt.v_free_reserved) { 1066 static int cache_rover = 0; 1067 m = vm_pageq_find(PQ_CACHE, cache_rover, FALSE); 1068 if (!m) 1069 break; 1070 if ((m->flags & (PG_BUSY|PG_UNMANAGED)) || 1071 m->busy || 1072 m->hold_count || 1073 m->wire_count) { 1074#ifdef INVARIANTS 1075 printf("Warning: busy page %p found in cache\n", m); 1076#endif 1077 vm_page_deactivate(m); 1078 continue; 1079 } 1080 cache_rover = (cache_rover + PQ_PRIME2) & PQ_L2_MASK; 1081 vm_pageout_page_free(m); 1082 cnt.v_dfree++; 1083 } 1084 splx(s); 1085 1086#if !defined(NO_SWAPPING) 1087 /* 1088 * Idle process swapout -- run once per second. 1089 */ 1090 if (vm_swap_idle_enabled) { 1091 static long lsec; 1092 if (time_second != lsec) { 1093 vm_pageout_req_swapout |= VM_SWAP_IDLE; 1094 vm_req_vmdaemon(); 1095 lsec = time_second; 1096 } 1097 } 1098#endif 1099 1100 /* 1101 * If we didn't get enough free pages, and we have skipped a vnode 1102 * in a writeable object, wakeup the sync daemon. And kick swapout 1103 * if we did not get enough free pages. 1104 */ 1105 if (vm_paging_target() > 0) { 1106 if (vnodes_skipped && vm_page_count_min()) 1107 (void) speedup_syncer(); 1108#if !defined(NO_SWAPPING) 1109 if (vm_swap_enabled && vm_page_count_target()) { 1110 vm_req_vmdaemon(); 1111 vm_pageout_req_swapout |= VM_SWAP_NORMAL; 1112 } 1113#endif 1114 } 1115 1116 /* 1117 * If we are out of swap and were not able to reach our paging 1118 * target, kill the largest process. 1119 * 1120 * We keep the process bigproc locked once we find it to keep anyone 1121 * from messing with it; however, there is a possibility of 1122 * deadlock if process B is bigproc and one of it's child processes 1123 * attempts to propagate a signal to B while we are waiting for A's 1124 * lock while walking this list. To avoid this, we don't block on 1125 * the process lock but just skip a process if it is already locked. 1126 */ 1127 if ((vm_swap_size < 64 && vm_page_count_min()) || 1128 (swap_pager_full && vm_paging_target() > 0)) { 1129#if 0 1130 if ((vm_swap_size < 64 || swap_pager_full) && vm_page_count_min()) { 1131#endif 1132 bigproc = NULL; 1133 bigsize = 0; 1134 sx_slock(&allproc_lock); 1135 LIST_FOREACH(p, &allproc, p_list) { 1136 /* 1137 * If this process is already locked, skip it. 1138 */ 1139 if (PROC_TRYLOCK(p) == 0) 1140 continue; 1141 /* 1142 * if this is a system process, skip it 1143 */ 1144 if ((p->p_flag & P_SYSTEM) || (p->p_lock > 0) || 1145 (p->p_pid == 1) || 1146 ((p->p_pid < 48) && (vm_swap_size != 0))) { 1147 PROC_UNLOCK(p); 1148 continue; 1149 } 1150 /* 1151 * if the process is in a non-running type state, 1152 * don't touch it. 1153 */ 1154 mtx_lock_spin(&sched_lock); 1155 if (p->p_stat != SRUN && p->p_stat != SSLEEP) { 1156 mtx_unlock_spin(&sched_lock); 1157 PROC_UNLOCK(p); 1158 continue; 1159 } 1160 mtx_unlock_spin(&sched_lock); 1161 /* 1162 * get the process size 1163 */ 1164 size = vmspace_resident_count(p->p_vmspace) + 1165 vmspace_swap_count(p->p_vmspace); 1166 /* 1167 * if the this process is bigger than the biggest one 1168 * remember it. 1169 */ 1170 if (size > bigsize) { 1171 if (bigproc != NULL) 1172 PROC_UNLOCK(bigproc); 1173 bigproc = p; 1174 bigsize = size; 1175 } else 1176 PROC_UNLOCK(p); 1177 } 1178 sx_sunlock(&allproc_lock); 1179 if (bigproc != NULL) { 1180 struct ksegrp *kg; 1181 killproc(bigproc, "out of swap space"); 1182 mtx_lock_spin(&sched_lock); 1183 FOREACH_KSEGRP_IN_PROC(bigproc, kg) { 1184 kg->kg_estcpu = 0; 1185 kg->kg_nice = PRIO_MIN; /* XXXKSE ??? */ 1186 resetpriority(kg); 1187 } 1188 mtx_unlock_spin(&sched_lock); 1189 PROC_UNLOCK(bigproc); 1190 wakeup(&cnt.v_free_count); 1191 } 1192 } 1193} 1194 1195/* 1196 * This routine tries to maintain the pseudo LRU active queue, 1197 * so that during long periods of time where there is no paging, 1198 * that some statistic accumulation still occurs. This code 1199 * helps the situation where paging just starts to occur. 1200 */ 1201static void 1202vm_pageout_page_stats() 1203{ 1204 vm_page_t m,next; 1205 int pcount,tpcount; /* Number of pages to check */ 1206 static int fullintervalcount = 0; 1207 int page_shortage; 1208 int s0; 1209 1210 page_shortage = 1211 (cnt.v_inactive_target + cnt.v_cache_max + cnt.v_free_min) - 1212 (cnt.v_free_count + cnt.v_inactive_count + cnt.v_cache_count); 1213 1214 if (page_shortage <= 0) 1215 return; 1216 1217 s0 = splvm(); 1218 1219 pcount = cnt.v_active_count; 1220 fullintervalcount += vm_pageout_stats_interval; 1221 if (fullintervalcount < vm_pageout_full_stats_interval) { 1222 tpcount = (vm_pageout_stats_max * cnt.v_active_count) / cnt.v_page_count; 1223 if (pcount > tpcount) 1224 pcount = tpcount; 1225 } else { 1226 fullintervalcount = 0; 1227 } 1228 1229 m = TAILQ_FIRST(&vm_page_queues[PQ_ACTIVE].pl); 1230 while ((m != NULL) && (pcount-- > 0)) { 1231 int actcount; 1232 1233 if (m->queue != PQ_ACTIVE) { 1234 break; 1235 } 1236 1237 next = TAILQ_NEXT(m, pageq); 1238 /* 1239 * Don't deactivate pages that are busy. 1240 */ 1241 if ((m->busy != 0) || 1242 (m->flags & PG_BUSY) || 1243 (m->hold_count != 0)) { 1244 vm_pageq_requeue(m); 1245 m = next; 1246 continue; 1247 } 1248 1249 actcount = 0; 1250 if (m->flags & PG_REFERENCED) { 1251 vm_page_flag_clear(m, PG_REFERENCED); 1252 actcount += 1; 1253 } 1254 1255 actcount += pmap_ts_referenced(m); 1256 if (actcount) { 1257 m->act_count += ACT_ADVANCE + actcount; 1258 if (m->act_count > ACT_MAX) 1259 m->act_count = ACT_MAX; 1260 vm_pageq_requeue(m); 1261 } else { 1262 if (m->act_count == 0) { 1263 /* 1264 * We turn off page access, so that we have 1265 * more accurate RSS stats. We don't do this 1266 * in the normal page deactivation when the 1267 * system is loaded VM wise, because the 1268 * cost of the large number of page protect 1269 * operations would be higher than the value 1270 * of doing the operation. 1271 */ 1272 vm_page_protect(m, VM_PROT_NONE); 1273 vm_page_deactivate(m); 1274 } else { 1275 m->act_count -= min(m->act_count, ACT_DECLINE); 1276 vm_pageq_requeue(m); 1277 } 1278 } 1279 1280 m = next; 1281 } 1282 splx(s0); 1283} 1284 1285static int 1286vm_pageout_free_page_calc(count) 1287vm_size_t count; 1288{ 1289 if (count < cnt.v_page_count) 1290 return 0; 1291 /* 1292 * free_reserved needs to include enough for the largest swap pager 1293 * structures plus enough for any pv_entry structs when paging. 1294 */ 1295 if (cnt.v_page_count > 1024) 1296 cnt.v_free_min = 4 + (cnt.v_page_count - 1024) / 200; 1297 else 1298 cnt.v_free_min = 4; 1299 cnt.v_pageout_free_min = (2*MAXBSIZE)/PAGE_SIZE + 1300 cnt.v_interrupt_free_min; 1301 cnt.v_free_reserved = vm_pageout_page_count + 1302 cnt.v_pageout_free_min + (count / 768) + PQ_L2_SIZE; 1303 cnt.v_free_severe = cnt.v_free_min / 2; 1304 cnt.v_free_min += cnt.v_free_reserved; 1305 cnt.v_free_severe += cnt.v_free_reserved; 1306 return 1; 1307} 1308 1309 1310/* 1311 * vm_pageout is the high level pageout daemon. 1312 */ 1313static void 1314vm_pageout() 1315{ 1316 int pass; 1317 1318 mtx_lock(&Giant); 1319 1320 /* 1321 * Initialize some paging parameters. 1322 */ 1323 1324 cnt.v_interrupt_free_min = 2; 1325 if (cnt.v_page_count < 2000) 1326 vm_pageout_page_count = 8; 1327 1328 vm_pageout_free_page_calc(cnt.v_page_count); 1329 /* 1330 * v_free_target and v_cache_min control pageout hysteresis. Note 1331 * that these are more a measure of the VM cache queue hysteresis 1332 * then the VM free queue. Specifically, v_free_target is the 1333 * high water mark (free+cache pages). 1334 * 1335 * v_free_reserved + v_cache_min (mostly means v_cache_min) is the 1336 * low water mark, while v_free_min is the stop. v_cache_min must 1337 * be big enough to handle memory needs while the pageout daemon 1338 * is signalled and run to free more pages. 1339 */ 1340 if (cnt.v_free_count > 6144) 1341 cnt.v_free_target = 4 * cnt.v_free_min + cnt.v_free_reserved; 1342 else 1343 cnt.v_free_target = 2 * cnt.v_free_min + cnt.v_free_reserved; 1344 1345 if (cnt.v_free_count > 2048) { 1346 cnt.v_cache_min = cnt.v_free_target; 1347 cnt.v_cache_max = 2 * cnt.v_cache_min; 1348 cnt.v_inactive_target = (3 * cnt.v_free_target) / 2; 1349 } else { 1350 cnt.v_cache_min = 0; 1351 cnt.v_cache_max = 0; 1352 cnt.v_inactive_target = cnt.v_free_count / 4; 1353 } 1354 if (cnt.v_inactive_target > cnt.v_free_count / 3) 1355 cnt.v_inactive_target = cnt.v_free_count / 3; 1356 1357 /* XXX does not really belong here */ 1358 if (vm_page_max_wired == 0) 1359 vm_page_max_wired = cnt.v_free_count / 3; 1360 1361 if (vm_pageout_stats_max == 0) 1362 vm_pageout_stats_max = cnt.v_free_target; 1363 1364 /* 1365 * Set interval in seconds for stats scan. 1366 */ 1367 if (vm_pageout_stats_interval == 0) 1368 vm_pageout_stats_interval = 5; 1369 if (vm_pageout_full_stats_interval == 0) 1370 vm_pageout_full_stats_interval = vm_pageout_stats_interval * 4; 1371 1372 1373 /* 1374 * Set maximum free per pass 1375 */ 1376 if (vm_pageout_stats_free_max == 0) 1377 vm_pageout_stats_free_max = 5; 1378 1379 PROC_LOCK(curthread->td_proc); 1380 curthread->td_proc->p_flag |= P_BUFEXHAUST; 1381 PROC_UNLOCK(curthread->td_proc); 1382 swap_pager_swap_init(); 1383 pass = 0; 1384 /* 1385 * The pageout daemon is never done, so loop forever. 1386 */ 1387 while (TRUE) { 1388 int error; 1389 int s = splvm(); 1390 1391 /* 1392 * If we have enough free memory, wakeup waiters. Do 1393 * not clear vm_pages_needed until we reach our target, 1394 * otherwise we may be woken up over and over again and 1395 * waste a lot of cpu. 1396 */ 1397 if (vm_pages_needed && !vm_page_count_min()) { 1398 if (vm_paging_needed() <= 0) 1399 vm_pages_needed = 0; 1400 wakeup(&cnt.v_free_count); 1401 } 1402 if (vm_pages_needed) { 1403 /* 1404 * Still not done, take a second pass without waiting 1405 * (unlimited dirty cleaning), otherwise sleep a bit 1406 * and try again. 1407 */ 1408 ++pass; 1409 if (pass > 1) 1410 tsleep(&vm_pages_needed, PVM, 1411 "psleep", hz/2); 1412 } else { 1413 /* 1414 * Good enough, sleep & handle stats. Prime the pass 1415 * for the next run. 1416 */ 1417 if (pass > 1) 1418 pass = 1; 1419 else 1420 pass = 0; 1421 error = tsleep(&vm_pages_needed, PVM, 1422 "psleep", vm_pageout_stats_interval * hz); 1423 if (error && !vm_pages_needed) { 1424 splx(s); 1425 pass = 0; 1426 vm_pageout_page_stats(); 1427 continue; 1428 } 1429 } 1430 1431 if (vm_pages_needed) 1432 cnt.v_pdwakeups++; 1433 splx(s); 1434 vm_pageout_scan(pass); 1435 vm_pageout_deficit = 0; 1436 } 1437} 1438 1439void 1440pagedaemon_wakeup() 1441{ 1442 if (!vm_pages_needed && curthread->td_proc != pageproc) { 1443 vm_pages_needed++; 1444 wakeup(&vm_pages_needed); 1445 } 1446} 1447 1448#if !defined(NO_SWAPPING) 1449static void 1450vm_req_vmdaemon() 1451{ 1452 static int lastrun = 0; 1453 1454 if ((ticks > (lastrun + hz)) || (ticks < lastrun)) { 1455 wakeup(&vm_daemon_needed); 1456 lastrun = ticks; 1457 } 1458} 1459 1460static void 1461vm_daemon() 1462{ 1463 struct proc *p; 1464 1465 mtx_lock(&Giant); 1466 while (TRUE) { 1467 tsleep(&vm_daemon_needed, PPAUSE, "psleep", 0); 1468 if (vm_pageout_req_swapout) { 1469 swapout_procs(vm_pageout_req_swapout); 1470 vm_pageout_req_swapout = 0; 1471 } 1472 /* 1473 * scan the processes for exceeding their rlimits or if 1474 * process is swapped out -- deactivate pages 1475 */ 1476 1477 sx_slock(&allproc_lock); 1478 LIST_FOREACH(p, &allproc, p_list) { 1479 vm_pindex_t limit, size; 1480 1481 /* 1482 * if this is a system process or if we have already 1483 * looked at this process, skip it. 1484 */ 1485 if (p->p_flag & (P_SYSTEM | P_WEXIT)) { 1486 continue; 1487 } 1488 /* 1489 * if the process is in a non-running type state, 1490 * don't touch it. 1491 */ 1492 mtx_lock_spin(&sched_lock); 1493 if (p->p_stat != SRUN && p->p_stat != SSLEEP) { 1494 mtx_unlock_spin(&sched_lock); 1495 continue; 1496 } 1497 /* 1498 * get a limit 1499 */ 1500 limit = OFF_TO_IDX( 1501 qmin(p->p_rlimit[RLIMIT_RSS].rlim_cur, 1502 p->p_rlimit[RLIMIT_RSS].rlim_max)); 1503 1504 /* 1505 * let processes that are swapped out really be 1506 * swapped out set the limit to nothing (will force a 1507 * swap-out.) 1508 */ 1509 if ((p->p_sflag & PS_INMEM) == 0) 1510 limit = 0; /* XXX */ 1511 mtx_unlock_spin(&sched_lock); 1512 1513 size = vmspace_resident_count(p->p_vmspace); 1514 if (limit >= 0 && size >= limit) { 1515 vm_pageout_map_deactivate_pages( 1516 &p->p_vmspace->vm_map, limit); 1517 } 1518 } 1519 sx_sunlock(&allproc_lock); 1520 } 1521} 1522#endif 1523