vm_pageout.c revision 117001
1/* 2 * Copyright (c) 1991 Regents of the University of California. 3 * All rights reserved. 4 * Copyright (c) 1994 John S. Dyson 5 * All rights reserved. 6 * Copyright (c) 1994 David Greenman 7 * All rights reserved. 8 * 9 * This code is derived from software contributed to Berkeley by 10 * The Mach Operating System project at Carnegie-Mellon University. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. All advertising materials mentioning features or use of this software 21 * must display the following acknowledgement: 22 * This product includes software developed by the University of 23 * California, Berkeley and its contributors. 24 * 4. Neither the name of the University nor the names of its contributors 25 * may be used to endorse or promote products derived from this software 26 * without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 38 * SUCH DAMAGE. 39 * 40 * from: @(#)vm_pageout.c 7.4 (Berkeley) 5/7/91 41 * 42 * 43 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 44 * All rights reserved. 45 * 46 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 47 * 48 * Permission to use, copy, modify and distribute this software and 49 * its documentation is hereby granted, provided that both the copyright 50 * notice and this permission notice appear in all copies of the 51 * software, derivative works or modified versions, and any portions 52 * thereof, and that both notices appear in supporting documentation. 53 * 54 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 55 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 56 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 57 * 58 * Carnegie Mellon requests users of this software to return to 59 * 60 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 61 * School of Computer Science 62 * Carnegie Mellon University 63 * Pittsburgh PA 15213-3890 64 * 65 * any improvements or extensions that they make and grant Carnegie the 66 * rights to redistribute these changes. 67 */ 68 69/* 70 * The proverbial page-out daemon. 71 */ 72 73#include <sys/cdefs.h> 74__FBSDID("$FreeBSD: head/sys/vm/vm_pageout.c 117001 2003-06-28 20:07:54Z alc $"); 75 76#include "opt_vm.h" 77#include <sys/param.h> 78#include <sys/systm.h> 79#include <sys/kernel.h> 80#include <sys/eventhandler.h> 81#include <sys/lock.h> 82#include <sys/mutex.h> 83#include <sys/proc.h> 84#include <sys/kthread.h> 85#include <sys/ktr.h> 86#include <sys/resourcevar.h> 87#include <sys/sched.h> 88#include <sys/signalvar.h> 89#include <sys/vnode.h> 90#include <sys/vmmeter.h> 91#include <sys/sx.h> 92#include <sys/sysctl.h> 93 94#include <vm/vm.h> 95#include <vm/vm_param.h> 96#include <vm/vm_object.h> 97#include <vm/vm_page.h> 98#include <vm/vm_map.h> 99#include <vm/vm_pageout.h> 100#include <vm/vm_pager.h> 101#include <vm/swap_pager.h> 102#include <vm/vm_extern.h> 103#include <vm/uma.h> 104 105#include <machine/mutex.h> 106 107/* 108 * System initialization 109 */ 110 111/* the kernel process "vm_pageout"*/ 112static void vm_pageout(void); 113static int vm_pageout_clean(vm_page_t); 114static void vm_pageout_page_free(vm_page_t); 115static void vm_pageout_pmap_collect(void); 116static void vm_pageout_scan(int pass); 117static int vm_pageout_free_page_calc(vm_size_t count); 118struct proc *pageproc; 119 120static struct kproc_desc page_kp = { 121 "pagedaemon", 122 vm_pageout, 123 &pageproc 124}; 125SYSINIT(pagedaemon, SI_SUB_KTHREAD_PAGE, SI_ORDER_FIRST, kproc_start, &page_kp) 126 127#if !defined(NO_SWAPPING) 128/* the kernel process "vm_daemon"*/ 129static void vm_daemon(void); 130static struct proc *vmproc; 131 132static struct kproc_desc vm_kp = { 133 "vmdaemon", 134 vm_daemon, 135 &vmproc 136}; 137SYSINIT(vmdaemon, SI_SUB_KTHREAD_VM, SI_ORDER_FIRST, kproc_start, &vm_kp) 138#endif 139 140 141int vm_pages_needed; /* Event on which pageout daemon sleeps */ 142int vm_pageout_deficit; /* Estimated number of pages deficit */ 143int vm_pageout_pages_needed; /* flag saying that the pageout daemon needs pages */ 144 145#if !defined(NO_SWAPPING) 146static int vm_pageout_req_swapout; /* XXX */ 147static int vm_daemon_needed; 148#endif 149static int vm_max_launder = 32; 150static int vm_pageout_stats_max=0, vm_pageout_stats_interval = 0; 151static int vm_pageout_full_stats_interval = 0; 152static int vm_pageout_stats_free_max=0, vm_pageout_algorithm=0; 153static int defer_swap_pageouts=0; 154static int disable_swap_pageouts=0; 155 156#if defined(NO_SWAPPING) 157static int vm_swap_enabled=0; 158static int vm_swap_idle_enabled=0; 159#else 160static int vm_swap_enabled=1; 161static int vm_swap_idle_enabled=0; 162#endif 163 164SYSCTL_INT(_vm, VM_PAGEOUT_ALGORITHM, pageout_algorithm, 165 CTLFLAG_RW, &vm_pageout_algorithm, 0, "LRU page mgmt"); 166 167SYSCTL_INT(_vm, OID_AUTO, max_launder, 168 CTLFLAG_RW, &vm_max_launder, 0, "Limit dirty flushes in pageout"); 169 170SYSCTL_INT(_vm, OID_AUTO, pageout_stats_max, 171 CTLFLAG_RW, &vm_pageout_stats_max, 0, "Max pageout stats scan length"); 172 173SYSCTL_INT(_vm, OID_AUTO, pageout_full_stats_interval, 174 CTLFLAG_RW, &vm_pageout_full_stats_interval, 0, "Interval for full stats scan"); 175 176SYSCTL_INT(_vm, OID_AUTO, pageout_stats_interval, 177 CTLFLAG_RW, &vm_pageout_stats_interval, 0, "Interval for partial stats scan"); 178 179SYSCTL_INT(_vm, OID_AUTO, pageout_stats_free_max, 180 CTLFLAG_RW, &vm_pageout_stats_free_max, 0, "Not implemented"); 181 182#if defined(NO_SWAPPING) 183SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled, 184 CTLFLAG_RD, &vm_swap_enabled, 0, ""); 185SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled, 186 CTLFLAG_RD, &vm_swap_idle_enabled, 0, ""); 187#else 188SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled, 189 CTLFLAG_RW, &vm_swap_enabled, 0, "Enable entire process swapout"); 190SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled, 191 CTLFLAG_RW, &vm_swap_idle_enabled, 0, "Allow swapout on idle criteria"); 192#endif 193 194SYSCTL_INT(_vm, OID_AUTO, defer_swapspace_pageouts, 195 CTLFLAG_RW, &defer_swap_pageouts, 0, "Give preference to dirty pages in mem"); 196 197SYSCTL_INT(_vm, OID_AUTO, disable_swapspace_pageouts, 198 CTLFLAG_RW, &disable_swap_pageouts, 0, "Disallow swapout of dirty pages"); 199 200static int pageout_lock_miss; 201SYSCTL_INT(_vm, OID_AUTO, pageout_lock_miss, 202 CTLFLAG_RD, &pageout_lock_miss, 0, "vget() lock misses during pageout"); 203 204#define VM_PAGEOUT_PAGE_COUNT 16 205int vm_pageout_page_count = VM_PAGEOUT_PAGE_COUNT; 206 207int vm_page_max_wired; /* XXX max # of wired pages system-wide */ 208 209#if !defined(NO_SWAPPING) 210typedef void freeer_fcn_t(vm_map_t, vm_object_t, vm_pindex_t); 211static void vm_pageout_map_deactivate_pages(vm_map_t, vm_pindex_t); 212static freeer_fcn_t vm_pageout_object_deactivate_pages; 213static void vm_req_vmdaemon(void); 214#endif 215static void vm_pageout_page_stats(void); 216 217/* 218 * vm_pageout_clean: 219 * 220 * Clean the page and remove it from the laundry. 221 * 222 * We set the busy bit to cause potential page faults on this page to 223 * block. Note the careful timing, however, the busy bit isn't set till 224 * late and we cannot do anything that will mess with the page. 225 */ 226static int 227vm_pageout_clean(m) 228 vm_page_t m; 229{ 230 vm_object_t object; 231 vm_page_t mc[2*vm_pageout_page_count]; 232 int numpagedout, pageout_count; 233 int ib, is, page_base; 234 vm_pindex_t pindex = m->pindex; 235 236 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 237 238 /* 239 * It doesn't cost us anything to pageout OBJT_DEFAULT or OBJT_SWAP 240 * with the new swapper, but we could have serious problems paging 241 * out other object types if there is insufficient memory. 242 * 243 * Unfortunately, checking free memory here is far too late, so the 244 * check has been moved up a procedural level. 245 */ 246 247 /* 248 * Don't mess with the page if it's busy, held, or special 249 */ 250 if ((m->hold_count != 0) || 251 ((m->busy != 0) || (m->flags & (PG_BUSY|PG_UNMANAGED))) || 252 !VM_OBJECT_TRYLOCK(m->object)) { 253 return 0; 254 } 255 256 mc[vm_pageout_page_count] = m; 257 pageout_count = 1; 258 page_base = vm_pageout_page_count; 259 ib = 1; 260 is = 1; 261 262 /* 263 * Scan object for clusterable pages. 264 * 265 * We can cluster ONLY if: ->> the page is NOT 266 * clean, wired, busy, held, or mapped into a 267 * buffer, and one of the following: 268 * 1) The page is inactive, or a seldom used 269 * active page. 270 * -or- 271 * 2) we force the issue. 272 * 273 * During heavy mmap/modification loads the pageout 274 * daemon can really fragment the underlying file 275 * due to flushing pages out of order and not trying 276 * align the clusters (which leave sporatic out-of-order 277 * holes). To solve this problem we do the reverse scan 278 * first and attempt to align our cluster, then do a 279 * forward scan if room remains. 280 */ 281 object = m->object; 282more: 283 while (ib && pageout_count < vm_pageout_page_count) { 284 vm_page_t p; 285 286 if (ib > pindex) { 287 ib = 0; 288 break; 289 } 290 291 if ((p = vm_page_lookup(object, pindex - ib)) == NULL) { 292 ib = 0; 293 break; 294 } 295 if (((p->queue - p->pc) == PQ_CACHE) || 296 (p->flags & (PG_BUSY|PG_UNMANAGED)) || p->busy) { 297 ib = 0; 298 break; 299 } 300 vm_page_test_dirty(p); 301 if ((p->dirty & p->valid) == 0 || 302 p->queue != PQ_INACTIVE || 303 p->wire_count != 0 || /* may be held by buf cache */ 304 p->hold_count != 0) { /* may be undergoing I/O */ 305 ib = 0; 306 break; 307 } 308 mc[--page_base] = p; 309 ++pageout_count; 310 ++ib; 311 /* 312 * alignment boundry, stop here and switch directions. Do 313 * not clear ib. 314 */ 315 if ((pindex - (ib - 1)) % vm_pageout_page_count == 0) 316 break; 317 } 318 319 while (pageout_count < vm_pageout_page_count && 320 pindex + is < object->size) { 321 vm_page_t p; 322 323 if ((p = vm_page_lookup(object, pindex + is)) == NULL) 324 break; 325 if (((p->queue - p->pc) == PQ_CACHE) || 326 (p->flags & (PG_BUSY|PG_UNMANAGED)) || p->busy) { 327 break; 328 } 329 vm_page_test_dirty(p); 330 if ((p->dirty & p->valid) == 0 || 331 p->queue != PQ_INACTIVE || 332 p->wire_count != 0 || /* may be held by buf cache */ 333 p->hold_count != 0) { /* may be undergoing I/O */ 334 break; 335 } 336 mc[page_base + pageout_count] = p; 337 ++pageout_count; 338 ++is; 339 } 340 341 /* 342 * If we exhausted our forward scan, continue with the reverse scan 343 * when possible, even past a page boundry. This catches boundry 344 * conditions. 345 */ 346 if (ib && pageout_count < vm_pageout_page_count) 347 goto more; 348 349 /* 350 * we allow reads during pageouts... 351 */ 352 numpagedout = vm_pageout_flush(&mc[page_base], pageout_count, 0, TRUE); 353 VM_OBJECT_UNLOCK(object); 354 return (numpagedout); 355} 356 357/* 358 * vm_pageout_flush() - launder the given pages 359 * 360 * The given pages are laundered. Note that we setup for the start of 361 * I/O ( i.e. busy the page ), mark it read-only, and bump the object 362 * reference count all in here rather then in the parent. If we want 363 * the parent to do more sophisticated things we may have to change 364 * the ordering. 365 */ 366int 367vm_pageout_flush(mc, count, flags, is_object_locked) 368 vm_page_t *mc; 369 int count; 370 int flags; 371 int is_object_locked; 372{ 373 vm_object_t object; 374 int pageout_status[count]; 375 int numpagedout = 0; 376 int i; 377 378 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 379 /* 380 * Initiate I/O. Bump the vm_page_t->busy counter and 381 * mark the pages read-only. 382 * 383 * We do not have to fixup the clean/dirty bits here... we can 384 * allow the pager to do it after the I/O completes. 385 * 386 * NOTE! mc[i]->dirty may be partial or fragmented due to an 387 * edge case with file fragments. 388 */ 389 for (i = 0; i < count; i++) { 390 KASSERT(mc[i]->valid == VM_PAGE_BITS_ALL, ("vm_pageout_flush page %p index %d/%d: partially invalid page", mc[i], i, count)); 391 vm_page_io_start(mc[i]); 392 pmap_page_protect(mc[i], VM_PROT_READ); 393 } 394 object = mc[0]->object; 395 vm_page_unlock_queues(); 396 if (!is_object_locked) 397 VM_OBJECT_LOCK(object); 398 vm_object_pip_add(object, count); 399 VM_OBJECT_UNLOCK(object); 400 401 vm_pager_put_pages(object, mc, count, 402 (flags | ((object == kernel_object) ? VM_PAGER_PUT_SYNC : 0)), 403 pageout_status); 404 405 VM_OBJECT_LOCK(object); 406 vm_page_lock_queues(); 407 for (i = 0; i < count; i++) { 408 vm_page_t mt = mc[i]; 409 410 switch (pageout_status[i]) { 411 case VM_PAGER_OK: 412 case VM_PAGER_PEND: 413 numpagedout++; 414 break; 415 case VM_PAGER_BAD: 416 /* 417 * Page outside of range of object. Right now we 418 * essentially lose the changes by pretending it 419 * worked. 420 */ 421 pmap_clear_modify(mt); 422 vm_page_undirty(mt); 423 break; 424 case VM_PAGER_ERROR: 425 case VM_PAGER_FAIL: 426 /* 427 * If page couldn't be paged out, then reactivate the 428 * page so it doesn't clog the inactive list. (We 429 * will try paging out it again later). 430 */ 431 vm_page_activate(mt); 432 break; 433 case VM_PAGER_AGAIN: 434 break; 435 } 436 437 /* 438 * If the operation is still going, leave the page busy to 439 * block all other accesses. Also, leave the paging in 440 * progress indicator set so that we don't attempt an object 441 * collapse. 442 */ 443 if (pageout_status[i] != VM_PAGER_PEND) { 444 vm_object_pip_wakeup(object); 445 vm_page_io_finish(mt); 446 if (!vm_page_count_severe() || !vm_page_try_to_cache(mt)) 447 pmap_page_protect(mt, VM_PROT_READ); 448 } 449 } 450 if (!is_object_locked) 451 VM_OBJECT_UNLOCK(object); 452 return numpagedout; 453} 454 455#if !defined(NO_SWAPPING) 456/* 457 * vm_pageout_object_deactivate_pages 458 * 459 * deactivate enough pages to satisfy the inactive target 460 * requirements or if vm_page_proc_limit is set, then 461 * deactivate all of the pages in the object and its 462 * backing_objects. 463 * 464 * The object and map must be locked. 465 */ 466static void 467vm_pageout_object_deactivate_pages(map, object, desired) 468 vm_map_t map; 469 vm_object_t object; 470 vm_pindex_t desired; 471{ 472 vm_page_t p, next; 473 int actcount, rcount, remove_mode; 474 475 GIANT_REQUIRED; 476 if (object->type == OBJT_DEVICE || object->type == OBJT_PHYS) 477 return; 478 479 while (object) { 480 if (pmap_resident_count(vm_map_pmap(map)) <= desired) 481 return; 482 if (object->paging_in_progress) 483 return; 484 485 remove_mode = 0; 486 if (object->shadow_count > 1) 487 remove_mode = 1; 488 /* 489 * scan the objects entire memory queue 490 */ 491 rcount = object->resident_page_count; 492 p = TAILQ_FIRST(&object->memq); 493 vm_page_lock_queues(); 494 while (p && (rcount-- > 0)) { 495 if (pmap_resident_count(map->pmap) <= desired) { 496 vm_page_unlock_queues(); 497 return; 498 } 499 next = TAILQ_NEXT(p, listq); 500 cnt.v_pdpages++; 501 if (p->wire_count != 0 || 502 p->hold_count != 0 || 503 p->busy != 0 || 504 (p->flags & (PG_BUSY|PG_UNMANAGED)) || 505 !pmap_page_exists_quick(vm_map_pmap(map), p)) { 506 p = next; 507 continue; 508 } 509 actcount = pmap_ts_referenced(p); 510 if (actcount) { 511 vm_page_flag_set(p, PG_REFERENCED); 512 } else if (p->flags & PG_REFERENCED) { 513 actcount = 1; 514 } 515 if ((p->queue != PQ_ACTIVE) && 516 (p->flags & PG_REFERENCED)) { 517 vm_page_activate(p); 518 p->act_count += actcount; 519 vm_page_flag_clear(p, PG_REFERENCED); 520 } else if (p->queue == PQ_ACTIVE) { 521 if ((p->flags & PG_REFERENCED) == 0) { 522 p->act_count -= min(p->act_count, ACT_DECLINE); 523 if (!remove_mode && (vm_pageout_algorithm || (p->act_count == 0))) { 524 pmap_remove_all(p); 525 vm_page_deactivate(p); 526 } else { 527 vm_pageq_requeue(p); 528 } 529 } else { 530 vm_page_activate(p); 531 vm_page_flag_clear(p, PG_REFERENCED); 532 if (p->act_count < (ACT_MAX - ACT_ADVANCE)) 533 p->act_count += ACT_ADVANCE; 534 vm_pageq_requeue(p); 535 } 536 } else if (p->queue == PQ_INACTIVE) { 537 pmap_remove_all(p); 538 } 539 p = next; 540 } 541 vm_page_unlock_queues(); 542 object = object->backing_object; 543 } 544} 545 546/* 547 * deactivate some number of pages in a map, try to do it fairly, but 548 * that is really hard to do. 549 */ 550static void 551vm_pageout_map_deactivate_pages(map, desired) 552 vm_map_t map; 553 vm_pindex_t desired; 554{ 555 vm_map_entry_t tmpe; 556 vm_object_t obj, bigobj; 557 int nothingwired; 558 559 GIANT_REQUIRED; 560 if (!vm_map_trylock(map)) 561 return; 562 563 bigobj = NULL; 564 nothingwired = TRUE; 565 566 /* 567 * first, search out the biggest object, and try to free pages from 568 * that. 569 */ 570 tmpe = map->header.next; 571 while (tmpe != &map->header) { 572 if ((tmpe->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { 573 obj = tmpe->object.vm_object; 574 if ((obj != NULL) && (obj->shadow_count <= 1) && 575 ((bigobj == NULL) || 576 (bigobj->resident_page_count < obj->resident_page_count))) { 577 bigobj = obj; 578 } 579 } 580 if (tmpe->wired_count > 0) 581 nothingwired = FALSE; 582 tmpe = tmpe->next; 583 } 584 585 if (bigobj) 586 vm_pageout_object_deactivate_pages(map, bigobj, desired); 587 588 /* 589 * Next, hunt around for other pages to deactivate. We actually 590 * do this search sort of wrong -- .text first is not the best idea. 591 */ 592 tmpe = map->header.next; 593 while (tmpe != &map->header) { 594 if (pmap_resident_count(vm_map_pmap(map)) <= desired) 595 break; 596 if ((tmpe->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { 597 obj = tmpe->object.vm_object; 598 if (obj) 599 vm_pageout_object_deactivate_pages(map, obj, desired); 600 } 601 tmpe = tmpe->next; 602 } 603 604 /* 605 * Remove all mappings if a process is swapped out, this will free page 606 * table pages. 607 */ 608 if (desired == 0 && nothingwired) { 609 vm_page_lock_queues(); 610 pmap_remove(vm_map_pmap(map), vm_map_min(map), 611 vm_map_max(map)); 612 vm_page_unlock_queues(); 613 } 614 vm_map_unlock(map); 615} 616#endif /* !defined(NO_SWAPPING) */ 617 618/* 619 * Warning! The page queue lock is released and reacquired. 620 */ 621static void 622vm_pageout_page_free(vm_page_t m) 623{ 624 vm_object_t object = m->object; 625 626 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 627 vm_page_busy(m); 628 vm_page_unlock_queues(); 629 /* 630 * Avoid a lock order reversal. The page must be busy. 631 */ 632 VM_OBJECT_LOCK(object); 633 vm_page_lock_queues(); 634 pmap_remove_all(m); 635 vm_page_free(m); 636 VM_OBJECT_UNLOCK(object); 637 cnt.v_dfree++; 638} 639 640/* 641 * This routine is very drastic, but can save the system 642 * in a pinch. 643 */ 644static void 645vm_pageout_pmap_collect(void) 646{ 647 int i; 648 vm_page_t m; 649 static int warningdone; 650 651 if (pmap_pagedaemon_waken == 0) 652 return; 653 if (warningdone < 5) { 654 printf("collecting pv entries -- suggest increasing PMAP_SHPGPERPROC\n"); 655 warningdone++; 656 } 657 vm_page_lock_queues(); 658 for (i = 0; i < vm_page_array_size; i++) { 659 m = &vm_page_array[i]; 660 if (m->wire_count || m->hold_count || m->busy || 661 (m->flags & (PG_BUSY | PG_UNMANAGED))) 662 continue; 663 pmap_remove_all(m); 664 } 665 vm_page_unlock_queues(); 666 pmap_pagedaemon_waken = 0; 667} 668 669/* 670 * vm_pageout_scan does the dirty work for the pageout daemon. 671 */ 672static void 673vm_pageout_scan(int pass) 674{ 675 vm_page_t m, next; 676 struct vm_page marker; 677 int save_page_shortage; 678 int save_inactive_count; 679 int page_shortage, maxscan, pcount; 680 int addl_page_shortage, addl_page_shortage_init; 681 struct proc *p, *bigproc; 682 vm_offset_t size, bigsize; 683 vm_object_t object; 684 int actcount; 685 int vnodes_skipped = 0; 686 int maxlaunder; 687 int s; 688 struct thread *td; 689 690 GIANT_REQUIRED; 691 /* 692 * Decrease registered cache sizes. 693 */ 694 EVENTHANDLER_INVOKE(vm_lowmem, 0); 695 /* 696 * We do this explicitly after the caches have been drained above. 697 */ 698 uma_reclaim(); 699 /* 700 * Do whatever cleanup that the pmap code can. 701 */ 702 vm_pageout_pmap_collect(); 703 704 addl_page_shortage_init = atomic_readandclear_int(&vm_pageout_deficit); 705 706 /* 707 * Calculate the number of pages we want to either free or move 708 * to the cache. 709 */ 710 page_shortage = vm_paging_target() + addl_page_shortage_init; 711 save_page_shortage = page_shortage; 712 save_inactive_count = cnt.v_inactive_count; 713 714 /* 715 * Initialize our marker 716 */ 717 bzero(&marker, sizeof(marker)); 718 marker.flags = PG_BUSY | PG_FICTITIOUS | PG_MARKER; 719 marker.queue = PQ_INACTIVE; 720 marker.wire_count = 1; 721 722 /* 723 * Start scanning the inactive queue for pages we can move to the 724 * cache or free. The scan will stop when the target is reached or 725 * we have scanned the entire inactive queue. Note that m->act_count 726 * is not used to form decisions for the inactive queue, only for the 727 * active queue. 728 * 729 * maxlaunder limits the number of dirty pages we flush per scan. 730 * For most systems a smaller value (16 or 32) is more robust under 731 * extreme memory and disk pressure because any unnecessary writes 732 * to disk can result in extreme performance degredation. However, 733 * systems with excessive dirty pages (especially when MAP_NOSYNC is 734 * used) will die horribly with limited laundering. If the pageout 735 * daemon cannot clean enough pages in the first pass, we let it go 736 * all out in succeeding passes. 737 */ 738 if ((maxlaunder = vm_max_launder) <= 1) 739 maxlaunder = 1; 740 if (pass) 741 maxlaunder = 10000; 742rescan0: 743 addl_page_shortage = addl_page_shortage_init; 744 maxscan = cnt.v_inactive_count; 745 746 for (m = TAILQ_FIRST(&vm_page_queues[PQ_INACTIVE].pl); 747 m != NULL && maxscan-- > 0 && page_shortage > 0; 748 m = next) { 749 750 cnt.v_pdpages++; 751 752 if (m->queue != PQ_INACTIVE) { 753 goto rescan0; 754 } 755 756 next = TAILQ_NEXT(m, pageq); 757 758 /* 759 * skip marker pages 760 */ 761 if (m->flags & PG_MARKER) 762 continue; 763 764 /* 765 * A held page may be undergoing I/O, so skip it. 766 */ 767 if (m->hold_count) { 768 vm_pageq_requeue(m); 769 addl_page_shortage++; 770 continue; 771 } 772 /* 773 * Don't mess with busy pages, keep in the front of the 774 * queue, most likely are being paged out. 775 */ 776 if (m->busy || (m->flags & PG_BUSY)) { 777 addl_page_shortage++; 778 continue; 779 } 780 781 vm_page_lock_queues(); 782 /* 783 * If the object is not being used, we ignore previous 784 * references. 785 */ 786 if (m->object->ref_count == 0) { 787 vm_page_flag_clear(m, PG_REFERENCED); 788 pmap_clear_reference(m); 789 790 /* 791 * Otherwise, if the page has been referenced while in the 792 * inactive queue, we bump the "activation count" upwards, 793 * making it less likely that the page will be added back to 794 * the inactive queue prematurely again. Here we check the 795 * page tables (or emulated bits, if any), given the upper 796 * level VM system not knowing anything about existing 797 * references. 798 */ 799 } else if (((m->flags & PG_REFERENCED) == 0) && 800 (actcount = pmap_ts_referenced(m))) { 801 vm_page_activate(m); 802 vm_page_unlock_queues(); 803 m->act_count += (actcount + ACT_ADVANCE); 804 continue; 805 } 806 807 /* 808 * If the upper level VM system knows about any page 809 * references, we activate the page. We also set the 810 * "activation count" higher than normal so that we will less 811 * likely place pages back onto the inactive queue again. 812 */ 813 if ((m->flags & PG_REFERENCED) != 0) { 814 vm_page_flag_clear(m, PG_REFERENCED); 815 actcount = pmap_ts_referenced(m); 816 vm_page_activate(m); 817 vm_page_unlock_queues(); 818 m->act_count += (actcount + ACT_ADVANCE + 1); 819 continue; 820 } 821 822 /* 823 * If the upper level VM system doesn't know anything about 824 * the page being dirty, we have to check for it again. As 825 * far as the VM code knows, any partially dirty pages are 826 * fully dirty. 827 */ 828 if (m->dirty == 0) { 829 vm_page_test_dirty(m); 830 } else { 831 vm_page_dirty(m); 832 } 833 vm_page_unlock_queues(); 834 835 /* 836 * Invalid pages can be easily freed 837 */ 838 if (m->valid == 0) { 839 vm_page_lock_queues(); 840 vm_pageout_page_free(m); 841 vm_page_unlock_queues(); 842 --page_shortage; 843 844 /* 845 * Clean pages can be placed onto the cache queue. This 846 * effectively frees them. 847 */ 848 } else if (m->dirty == 0) { 849 vm_page_lock_queues(); 850 vm_page_cache(m); 851 vm_page_unlock_queues(); 852 --page_shortage; 853 } else if ((m->flags & PG_WINATCFLS) == 0 && pass == 0) { 854 /* 855 * Dirty pages need to be paged out, but flushing 856 * a page is extremely expensive verses freeing 857 * a clean page. Rather then artificially limiting 858 * the number of pages we can flush, we instead give 859 * dirty pages extra priority on the inactive queue 860 * by forcing them to be cycled through the queue 861 * twice before being flushed, after which the 862 * (now clean) page will cycle through once more 863 * before being freed. This significantly extends 864 * the thrash point for a heavily loaded machine. 865 */ 866 vm_page_lock_queues(); 867 vm_page_flag_set(m, PG_WINATCFLS); 868 vm_pageq_requeue(m); 869 vm_page_unlock_queues(); 870 } else if (maxlaunder > 0) { 871 /* 872 * We always want to try to flush some dirty pages if 873 * we encounter them, to keep the system stable. 874 * Normally this number is small, but under extreme 875 * pressure where there are insufficient clean pages 876 * on the inactive queue, we may have to go all out. 877 */ 878 int swap_pageouts_ok; 879 struct vnode *vp = NULL; 880 struct mount *mp; 881 882 object = m->object; 883 884 if ((object->type != OBJT_SWAP) && (object->type != OBJT_DEFAULT)) { 885 swap_pageouts_ok = 1; 886 } else { 887 swap_pageouts_ok = !(defer_swap_pageouts || disable_swap_pageouts); 888 swap_pageouts_ok |= (!disable_swap_pageouts && defer_swap_pageouts && 889 vm_page_count_min()); 890 891 } 892 893 /* 894 * We don't bother paging objects that are "dead". 895 * Those objects are in a "rundown" state. 896 */ 897 if (!swap_pageouts_ok || (object->flags & OBJ_DEAD)) { 898 vm_pageq_requeue(m); 899 continue; 900 } 901 902 /* 903 * The object is already known NOT to be dead. It 904 * is possible for the vget() to block the whole 905 * pageout daemon, but the new low-memory handling 906 * code should prevent it. 907 * 908 * The previous code skipped locked vnodes and, worse, 909 * reordered pages in the queue. This results in 910 * completely non-deterministic operation and, on a 911 * busy system, can lead to extremely non-optimal 912 * pageouts. For example, it can cause clean pages 913 * to be freed and dirty pages to be moved to the end 914 * of the queue. Since dirty pages are also moved to 915 * the end of the queue once-cleaned, this gives 916 * way too large a weighting to defering the freeing 917 * of dirty pages. 918 * 919 * We can't wait forever for the vnode lock, we might 920 * deadlock due to a vn_read() getting stuck in 921 * vm_wait while holding this vnode. We skip the 922 * vnode if we can't get it in a reasonable amount 923 * of time. 924 */ 925 if (object->type == OBJT_VNODE) { 926 vp = object->handle; 927 928 mp = NULL; 929 if (vp->v_type == VREG) 930 vn_start_write(vp, &mp, V_NOWAIT); 931 if (vget(vp, LK_EXCLUSIVE|LK_TIMELOCK, curthread)) { 932 ++pageout_lock_miss; 933 vn_finished_write(mp); 934 if (object->flags & OBJ_MIGHTBEDIRTY) 935 vnodes_skipped++; 936 continue; 937 } 938 939 /* 940 * The page might have been moved to another 941 * queue during potential blocking in vget() 942 * above. The page might have been freed and 943 * reused for another vnode. The object might 944 * have been reused for another vnode. 945 */ 946 if (m->queue != PQ_INACTIVE || 947 m->object != object || 948 object->handle != vp) { 949 if (object->flags & OBJ_MIGHTBEDIRTY) 950 vnodes_skipped++; 951 vput(vp); 952 vn_finished_write(mp); 953 continue; 954 } 955 956 /* 957 * The page may have been busied during the 958 * blocking in vput(); We don't move the 959 * page back onto the end of the queue so that 960 * statistics are more correct if we don't. 961 */ 962 if (m->busy || (m->flags & PG_BUSY)) { 963 vput(vp); 964 vn_finished_write(mp); 965 continue; 966 } 967 968 /* 969 * If the page has become held it might 970 * be undergoing I/O, so skip it 971 */ 972 if (m->hold_count) { 973 vm_pageq_requeue(m); 974 if (object->flags & OBJ_MIGHTBEDIRTY) 975 vnodes_skipped++; 976 vput(vp); 977 vn_finished_write(mp); 978 continue; 979 } 980 } 981 982 /* 983 * If a page is dirty, then it is either being washed 984 * (but not yet cleaned) or it is still in the 985 * laundry. If it is still in the laundry, then we 986 * start the cleaning operation. 987 * 988 * This operation may cluster, invalidating the 'next' 989 * pointer. To prevent an inordinate number of 990 * restarts we use our marker to remember our place. 991 * 992 * decrement page_shortage on success to account for 993 * the (future) cleaned page. Otherwise we could wind 994 * up laundering or cleaning too many pages. 995 */ 996 vm_page_lock_queues(); 997 s = splvm(); 998 TAILQ_INSERT_AFTER(&vm_page_queues[PQ_INACTIVE].pl, m, &marker, pageq); 999 splx(s); 1000 if (vm_pageout_clean(m) != 0) { 1001 --page_shortage; 1002 --maxlaunder; 1003 } 1004 s = splvm(); 1005 next = TAILQ_NEXT(&marker, pageq); 1006 TAILQ_REMOVE(&vm_page_queues[PQ_INACTIVE].pl, &marker, pageq); 1007 splx(s); 1008 vm_page_unlock_queues(); 1009 if (vp) { 1010 vput(vp); 1011 vn_finished_write(mp); 1012 } 1013 } 1014 } 1015 1016 /* 1017 * Compute the number of pages we want to try to move from the 1018 * active queue to the inactive queue. 1019 */ 1020 page_shortage = vm_paging_target() + 1021 cnt.v_inactive_target - cnt.v_inactive_count; 1022 page_shortage += addl_page_shortage; 1023 1024 vm_page_lock_queues(); 1025 /* 1026 * Scan the active queue for things we can deactivate. We nominally 1027 * track the per-page activity counter and use it to locate 1028 * deactivation candidates. 1029 */ 1030 pcount = cnt.v_active_count; 1031 m = TAILQ_FIRST(&vm_page_queues[PQ_ACTIVE].pl); 1032 1033 while ((m != NULL) && (pcount-- > 0) && (page_shortage > 0)) { 1034 1035 /* 1036 * This is a consistency check, and should likely be a panic 1037 * or warning. 1038 */ 1039 if (m->queue != PQ_ACTIVE) { 1040 break; 1041 } 1042 1043 next = TAILQ_NEXT(m, pageq); 1044 /* 1045 * Don't deactivate pages that are busy. 1046 */ 1047 if ((m->busy != 0) || 1048 (m->flags & PG_BUSY) || 1049 (m->hold_count != 0)) { 1050 vm_pageq_requeue(m); 1051 m = next; 1052 continue; 1053 } 1054 1055 /* 1056 * The count for pagedaemon pages is done after checking the 1057 * page for eligibility... 1058 */ 1059 cnt.v_pdpages++; 1060 1061 /* 1062 * Check to see "how much" the page has been used. 1063 */ 1064 actcount = 0; 1065 if (m->object->ref_count != 0) { 1066 if (m->flags & PG_REFERENCED) { 1067 actcount += 1; 1068 } 1069 actcount += pmap_ts_referenced(m); 1070 if (actcount) { 1071 m->act_count += ACT_ADVANCE + actcount; 1072 if (m->act_count > ACT_MAX) 1073 m->act_count = ACT_MAX; 1074 } 1075 } 1076 1077 /* 1078 * Since we have "tested" this bit, we need to clear it now. 1079 */ 1080 vm_page_flag_clear(m, PG_REFERENCED); 1081 1082 /* 1083 * Only if an object is currently being used, do we use the 1084 * page activation count stats. 1085 */ 1086 if (actcount && (m->object->ref_count != 0)) { 1087 vm_pageq_requeue(m); 1088 } else { 1089 m->act_count -= min(m->act_count, ACT_DECLINE); 1090 if (vm_pageout_algorithm || 1091 m->object->ref_count == 0 || 1092 m->act_count == 0) { 1093 page_shortage--; 1094 if (m->object->ref_count == 0) { 1095 pmap_remove_all(m); 1096 if (m->dirty == 0) 1097 vm_page_cache(m); 1098 else 1099 vm_page_deactivate(m); 1100 } else { 1101 vm_page_deactivate(m); 1102 } 1103 } else { 1104 vm_pageq_requeue(m); 1105 } 1106 } 1107 m = next; 1108 } 1109 s = splvm(); 1110 1111 /* 1112 * We try to maintain some *really* free pages, this allows interrupt 1113 * code to be guaranteed space. Since both cache and free queues 1114 * are considered basically 'free', moving pages from cache to free 1115 * does not effect other calculations. 1116 */ 1117 while (cnt.v_free_count < cnt.v_free_reserved) { 1118 static int cache_rover = 0; 1119 m = vm_pageq_find(PQ_CACHE, cache_rover, FALSE); 1120 if (!m) 1121 break; 1122 if ((m->flags & (PG_BUSY|PG_UNMANAGED)) || 1123 m->busy || 1124 m->hold_count || 1125 m->wire_count) { 1126#ifdef INVARIANTS 1127 printf("Warning: busy page %p found in cache\n", m); 1128#endif 1129 vm_page_deactivate(m); 1130 continue; 1131 } 1132 cache_rover = (cache_rover + PQ_PRIME2) & PQ_L2_MASK; 1133 vm_pageout_page_free(m); 1134 } 1135 splx(s); 1136 vm_page_unlock_queues(); 1137#if !defined(NO_SWAPPING) 1138 /* 1139 * Idle process swapout -- run once per second. 1140 */ 1141 if (vm_swap_idle_enabled) { 1142 static long lsec; 1143 if (time_second != lsec) { 1144 vm_pageout_req_swapout |= VM_SWAP_IDLE; 1145 vm_req_vmdaemon(); 1146 lsec = time_second; 1147 } 1148 } 1149#endif 1150 1151 /* 1152 * If we didn't get enough free pages, and we have skipped a vnode 1153 * in a writeable object, wakeup the sync daemon. And kick swapout 1154 * if we did not get enough free pages. 1155 */ 1156 if (vm_paging_target() > 0) { 1157 if (vnodes_skipped && vm_page_count_min()) 1158 (void) speedup_syncer(); 1159#if !defined(NO_SWAPPING) 1160 if (vm_swap_enabled && vm_page_count_target()) { 1161 vm_req_vmdaemon(); 1162 vm_pageout_req_swapout |= VM_SWAP_NORMAL; 1163 } 1164#endif 1165 } 1166 1167 /* 1168 * If we are critically low on one of RAM or swap and low on 1169 * the other, kill the largest process. However, we avoid 1170 * doing this on the first pass in order to give ourselves a 1171 * chance to flush out dirty vnode-backed pages and to allow 1172 * active pages to be moved to the inactive queue and reclaimed. 1173 * 1174 * We keep the process bigproc locked once we find it to keep anyone 1175 * from messing with it; however, there is a possibility of 1176 * deadlock if process B is bigproc and one of it's child processes 1177 * attempts to propagate a signal to B while we are waiting for A's 1178 * lock while walking this list. To avoid this, we don't block on 1179 * the process lock but just skip a process if it is already locked. 1180 */ 1181 if (pass != 0 && 1182 ((vm_swap_size < 64 && vm_page_count_min()) || 1183 (swap_pager_full && vm_paging_target() > 0))) { 1184 bigproc = NULL; 1185 bigsize = 0; 1186 sx_slock(&allproc_lock); 1187 FOREACH_PROC_IN_SYSTEM(p) { 1188 int breakout; 1189 /* 1190 * If this process is already locked, skip it. 1191 */ 1192 if (PROC_TRYLOCK(p) == 0) 1193 continue; 1194 /* 1195 * If this is a system or protected process, skip it. 1196 */ 1197 if ((p->p_flag & P_SYSTEM) || (p->p_pid == 1) || 1198 (p->p_flag & P_PROTECTED) || 1199 ((p->p_pid < 48) && (vm_swap_size != 0))) { 1200 PROC_UNLOCK(p); 1201 continue; 1202 } 1203 /* 1204 * if the process is in a non-running type state, 1205 * don't touch it. Check all the threads individually. 1206 */ 1207 mtx_lock_spin(&sched_lock); 1208 breakout = 0; 1209 FOREACH_THREAD_IN_PROC(p, td) { 1210 if (!TD_ON_RUNQ(td) && 1211 !TD_IS_RUNNING(td) && 1212 !TD_IS_SLEEPING(td)) { 1213 breakout = 1; 1214 break; 1215 } 1216 } 1217 if (breakout) { 1218 mtx_unlock_spin(&sched_lock); 1219 PROC_UNLOCK(p); 1220 continue; 1221 } 1222 mtx_unlock_spin(&sched_lock); 1223 /* 1224 * get the process size 1225 */ 1226 if (!vm_map_trylock_read(&p->p_vmspace->vm_map)) { 1227 PROC_UNLOCK(p); 1228 continue; 1229 } 1230 size = vmspace_swap_count(p->p_vmspace); 1231 vm_map_unlock_read(&p->p_vmspace->vm_map); 1232 size += vmspace_resident_count(p->p_vmspace); 1233 /* 1234 * if the this process is bigger than the biggest one 1235 * remember it. 1236 */ 1237 if (size > bigsize) { 1238 if (bigproc != NULL) 1239 PROC_UNLOCK(bigproc); 1240 bigproc = p; 1241 bigsize = size; 1242 } else 1243 PROC_UNLOCK(p); 1244 } 1245 sx_sunlock(&allproc_lock); 1246 if (bigproc != NULL) { 1247 struct ksegrp *kg; 1248 killproc(bigproc, "out of swap space"); 1249 mtx_lock_spin(&sched_lock); 1250 FOREACH_KSEGRP_IN_PROC(bigproc, kg) { 1251 sched_nice(kg, PRIO_MIN); /* XXXKSE ??? */ 1252 } 1253 mtx_unlock_spin(&sched_lock); 1254 PROC_UNLOCK(bigproc); 1255 wakeup(&cnt.v_free_count); 1256 } 1257 } 1258} 1259 1260/* 1261 * This routine tries to maintain the pseudo LRU active queue, 1262 * so that during long periods of time where there is no paging, 1263 * that some statistic accumulation still occurs. This code 1264 * helps the situation where paging just starts to occur. 1265 */ 1266static void 1267vm_pageout_page_stats() 1268{ 1269 vm_page_t m,next; 1270 int pcount,tpcount; /* Number of pages to check */ 1271 static int fullintervalcount = 0; 1272 int page_shortage; 1273 int s0; 1274 1275 page_shortage = 1276 (cnt.v_inactive_target + cnt.v_cache_max + cnt.v_free_min) - 1277 (cnt.v_free_count + cnt.v_inactive_count + cnt.v_cache_count); 1278 1279 if (page_shortage <= 0) 1280 return; 1281 1282 s0 = splvm(); 1283 vm_page_lock_queues(); 1284 pcount = cnt.v_active_count; 1285 fullintervalcount += vm_pageout_stats_interval; 1286 if (fullintervalcount < vm_pageout_full_stats_interval) { 1287 tpcount = (vm_pageout_stats_max * cnt.v_active_count) / cnt.v_page_count; 1288 if (pcount > tpcount) 1289 pcount = tpcount; 1290 } else { 1291 fullintervalcount = 0; 1292 } 1293 1294 m = TAILQ_FIRST(&vm_page_queues[PQ_ACTIVE].pl); 1295 while ((m != NULL) && (pcount-- > 0)) { 1296 int actcount; 1297 1298 if (m->queue != PQ_ACTIVE) { 1299 break; 1300 } 1301 1302 next = TAILQ_NEXT(m, pageq); 1303 /* 1304 * Don't deactivate pages that are busy. 1305 */ 1306 if ((m->busy != 0) || 1307 (m->flags & PG_BUSY) || 1308 (m->hold_count != 0)) { 1309 vm_pageq_requeue(m); 1310 m = next; 1311 continue; 1312 } 1313 1314 actcount = 0; 1315 if (m->flags & PG_REFERENCED) { 1316 vm_page_flag_clear(m, PG_REFERENCED); 1317 actcount += 1; 1318 } 1319 1320 actcount += pmap_ts_referenced(m); 1321 if (actcount) { 1322 m->act_count += ACT_ADVANCE + actcount; 1323 if (m->act_count > ACT_MAX) 1324 m->act_count = ACT_MAX; 1325 vm_pageq_requeue(m); 1326 } else { 1327 if (m->act_count == 0) { 1328 /* 1329 * We turn off page access, so that we have 1330 * more accurate RSS stats. We don't do this 1331 * in the normal page deactivation when the 1332 * system is loaded VM wise, because the 1333 * cost of the large number of page protect 1334 * operations would be higher than the value 1335 * of doing the operation. 1336 */ 1337 pmap_remove_all(m); 1338 vm_page_deactivate(m); 1339 } else { 1340 m->act_count -= min(m->act_count, ACT_DECLINE); 1341 vm_pageq_requeue(m); 1342 } 1343 } 1344 1345 m = next; 1346 } 1347 vm_page_unlock_queues(); 1348 splx(s0); 1349} 1350 1351static int 1352vm_pageout_free_page_calc(count) 1353vm_size_t count; 1354{ 1355 if (count < cnt.v_page_count) 1356 return 0; 1357 /* 1358 * free_reserved needs to include enough for the largest swap pager 1359 * structures plus enough for any pv_entry structs when paging. 1360 */ 1361 if (cnt.v_page_count > 1024) 1362 cnt.v_free_min = 4 + (cnt.v_page_count - 1024) / 200; 1363 else 1364 cnt.v_free_min = 4; 1365 cnt.v_pageout_free_min = (2*MAXBSIZE)/PAGE_SIZE + 1366 cnt.v_interrupt_free_min; 1367 cnt.v_free_reserved = vm_pageout_page_count + 1368 cnt.v_pageout_free_min + (count / 768) + PQ_L2_SIZE; 1369 cnt.v_free_severe = cnt.v_free_min / 2; 1370 cnt.v_free_min += cnt.v_free_reserved; 1371 cnt.v_free_severe += cnt.v_free_reserved; 1372 return 1; 1373} 1374 1375/* 1376 * vm_pageout is the high level pageout daemon. 1377 */ 1378static void 1379vm_pageout() 1380{ 1381 int error, pass, s; 1382 1383 mtx_lock(&Giant); 1384 1385 /* 1386 * Initialize some paging parameters. 1387 */ 1388 cnt.v_interrupt_free_min = 2; 1389 if (cnt.v_page_count < 2000) 1390 vm_pageout_page_count = 8; 1391 1392 vm_pageout_free_page_calc(cnt.v_page_count); 1393 /* 1394 * v_free_target and v_cache_min control pageout hysteresis. Note 1395 * that these are more a measure of the VM cache queue hysteresis 1396 * then the VM free queue. Specifically, v_free_target is the 1397 * high water mark (free+cache pages). 1398 * 1399 * v_free_reserved + v_cache_min (mostly means v_cache_min) is the 1400 * low water mark, while v_free_min is the stop. v_cache_min must 1401 * be big enough to handle memory needs while the pageout daemon 1402 * is signalled and run to free more pages. 1403 */ 1404 if (cnt.v_free_count > 6144) 1405 cnt.v_free_target = 4 * cnt.v_free_min + cnt.v_free_reserved; 1406 else 1407 cnt.v_free_target = 2 * cnt.v_free_min + cnt.v_free_reserved; 1408 1409 if (cnt.v_free_count > 2048) { 1410 cnt.v_cache_min = cnt.v_free_target; 1411 cnt.v_cache_max = 2 * cnt.v_cache_min; 1412 cnt.v_inactive_target = (3 * cnt.v_free_target) / 2; 1413 } else { 1414 cnt.v_cache_min = 0; 1415 cnt.v_cache_max = 0; 1416 cnt.v_inactive_target = cnt.v_free_count / 4; 1417 } 1418 if (cnt.v_inactive_target > cnt.v_free_count / 3) 1419 cnt.v_inactive_target = cnt.v_free_count / 3; 1420 1421 /* XXX does not really belong here */ 1422 if (vm_page_max_wired == 0) 1423 vm_page_max_wired = cnt.v_free_count / 3; 1424 1425 if (vm_pageout_stats_max == 0) 1426 vm_pageout_stats_max = cnt.v_free_target; 1427 1428 /* 1429 * Set interval in seconds for stats scan. 1430 */ 1431 if (vm_pageout_stats_interval == 0) 1432 vm_pageout_stats_interval = 5; 1433 if (vm_pageout_full_stats_interval == 0) 1434 vm_pageout_full_stats_interval = vm_pageout_stats_interval * 4; 1435 1436 /* 1437 * Set maximum free per pass 1438 */ 1439 if (vm_pageout_stats_free_max == 0) 1440 vm_pageout_stats_free_max = 5; 1441 1442 swap_pager_swap_init(); 1443 pass = 0; 1444 /* 1445 * The pageout daemon is never done, so loop forever. 1446 */ 1447 while (TRUE) { 1448 s = splvm(); 1449 vm_page_lock_queues(); 1450 /* 1451 * If we have enough free memory, wakeup waiters. Do 1452 * not clear vm_pages_needed until we reach our target, 1453 * otherwise we may be woken up over and over again and 1454 * waste a lot of cpu. 1455 */ 1456 if (vm_pages_needed && !vm_page_count_min()) { 1457 if (!vm_paging_needed()) 1458 vm_pages_needed = 0; 1459 wakeup(&cnt.v_free_count); 1460 } 1461 if (vm_pages_needed) { 1462 /* 1463 * Still not done, take a second pass without waiting 1464 * (unlimited dirty cleaning), otherwise sleep a bit 1465 * and try again. 1466 */ 1467 ++pass; 1468 if (pass > 1) 1469 msleep(&vm_pages_needed, &vm_page_queue_mtx, PVM, 1470 "psleep", hz/2); 1471 } else { 1472 /* 1473 * Good enough, sleep & handle stats. Prime the pass 1474 * for the next run. 1475 */ 1476 if (pass > 1) 1477 pass = 1; 1478 else 1479 pass = 0; 1480 error = msleep(&vm_pages_needed, &vm_page_queue_mtx, PVM, 1481 "psleep", vm_pageout_stats_interval * hz); 1482 if (error && !vm_pages_needed) { 1483 vm_page_unlock_queues(); 1484 splx(s); 1485 pass = 0; 1486 vm_pageout_page_stats(); 1487 continue; 1488 } 1489 } 1490 if (vm_pages_needed) 1491 cnt.v_pdwakeups++; 1492 vm_page_unlock_queues(); 1493 splx(s); 1494 vm_pageout_scan(pass); 1495 } 1496} 1497 1498/* 1499 * Unless the page queue lock is held by the caller, this function 1500 * should be regarded as advisory. Specifically, the caller should 1501 * not msleep() on &cnt.v_free_count following this function unless 1502 * the page queue lock is held until the msleep() is performed. 1503 */ 1504void 1505pagedaemon_wakeup() 1506{ 1507 1508 if (!vm_pages_needed && curthread->td_proc != pageproc) { 1509 vm_pages_needed = 1; 1510 wakeup(&vm_pages_needed); 1511 } 1512} 1513 1514#if !defined(NO_SWAPPING) 1515static void 1516vm_req_vmdaemon() 1517{ 1518 static int lastrun = 0; 1519 1520 if ((ticks > (lastrun + hz)) || (ticks < lastrun)) { 1521 wakeup(&vm_daemon_needed); 1522 lastrun = ticks; 1523 } 1524} 1525 1526static void 1527vm_daemon() 1528{ 1529 struct proc *p; 1530 int breakout; 1531 struct thread *td; 1532 1533 mtx_lock(&Giant); 1534 while (TRUE) { 1535 tsleep(&vm_daemon_needed, PPAUSE, "psleep", 0); 1536 if (vm_pageout_req_swapout) { 1537 swapout_procs(vm_pageout_req_swapout); 1538 vm_pageout_req_swapout = 0; 1539 } 1540 /* 1541 * scan the processes for exceeding their rlimits or if 1542 * process is swapped out -- deactivate pages 1543 */ 1544 sx_slock(&allproc_lock); 1545 LIST_FOREACH(p, &allproc, p_list) { 1546 vm_pindex_t limit, size; 1547 1548 /* 1549 * if this is a system process or if we have already 1550 * looked at this process, skip it. 1551 */ 1552 PROC_LOCK(p); 1553 if (p->p_flag & (P_SYSTEM | P_WEXIT)) { 1554 PROC_UNLOCK(p); 1555 continue; 1556 } 1557 /* 1558 * if the process is in a non-running type state, 1559 * don't touch it. 1560 */ 1561 mtx_lock_spin(&sched_lock); 1562 breakout = 0; 1563 FOREACH_THREAD_IN_PROC(p, td) { 1564 if (!TD_ON_RUNQ(td) && 1565 !TD_IS_RUNNING(td) && 1566 !TD_IS_SLEEPING(td)) { 1567 breakout = 1; 1568 break; 1569 } 1570 } 1571 mtx_unlock_spin(&sched_lock); 1572 if (breakout) { 1573 PROC_UNLOCK(p); 1574 continue; 1575 } 1576 /* 1577 * get a limit 1578 */ 1579 limit = OFF_TO_IDX( 1580 qmin(p->p_rlimit[RLIMIT_RSS].rlim_cur, 1581 p->p_rlimit[RLIMIT_RSS].rlim_max)); 1582 1583 /* 1584 * let processes that are swapped out really be 1585 * swapped out set the limit to nothing (will force a 1586 * swap-out.) 1587 */ 1588 if ((p->p_sflag & PS_INMEM) == 0) 1589 limit = 0; /* XXX */ 1590 PROC_UNLOCK(p); 1591 1592 size = vmspace_resident_count(p->p_vmspace); 1593 if (limit >= 0 && size >= limit) { 1594 vm_pageout_map_deactivate_pages( 1595 &p->p_vmspace->vm_map, limit); 1596 } 1597 } 1598 sx_sunlock(&allproc_lock); 1599 } 1600} 1601#endif /* !defined(NO_SWAPPING) */ 1602