vm_pageout.c revision 113869
1/* 2 * Copyright (c) 1991 Regents of the University of California. 3 * All rights reserved. 4 * Copyright (c) 1994 John S. Dyson 5 * All rights reserved. 6 * Copyright (c) 1994 David Greenman 7 * All rights reserved. 8 * 9 * This code is derived from software contributed to Berkeley by 10 * The Mach Operating System project at Carnegie-Mellon University. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. All advertising materials mentioning features or use of this software 21 * must display the following acknowledgement: 22 * This product includes software developed by the University of 23 * California, Berkeley and its contributors. 24 * 4. Neither the name of the University nor the names of its contributors 25 * may be used to endorse or promote products derived from this software 26 * without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 38 * SUCH DAMAGE. 39 * 40 * from: @(#)vm_pageout.c 7.4 (Berkeley) 5/7/91 41 * 42 * 43 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 44 * All rights reserved. 45 * 46 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 47 * 48 * Permission to use, copy, modify and distribute this software and 49 * its documentation is hereby granted, provided that both the copyright 50 * notice and this permission notice appear in all copies of the 51 * software, derivative works or modified versions, and any portions 52 * thereof, and that both notices appear in supporting documentation. 53 * 54 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 55 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 56 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 57 * 58 * Carnegie Mellon requests users of this software to return to 59 * 60 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 61 * School of Computer Science 62 * Carnegie Mellon University 63 * Pittsburgh PA 15213-3890 64 * 65 * any improvements or extensions that they make and grant Carnegie the 66 * rights to redistribute these changes. 67 * 68 * $FreeBSD: head/sys/vm/vm_pageout.c 113869 2003-04-22 20:03:08Z jhb $ 69 */ 70 71/* 72 * The proverbial page-out daemon. 73 */ 74 75#include "opt_vm.h" 76#include <sys/param.h> 77#include <sys/systm.h> 78#include <sys/kernel.h> 79#include <sys/eventhandler.h> 80#include <sys/lock.h> 81#include <sys/mutex.h> 82#include <sys/proc.h> 83#include <sys/kthread.h> 84#include <sys/ktr.h> 85#include <sys/resourcevar.h> 86#include <sys/sched.h> 87#include <sys/signalvar.h> 88#include <sys/vnode.h> 89#include <sys/vmmeter.h> 90#include <sys/sx.h> 91#include <sys/sysctl.h> 92 93#include <vm/vm.h> 94#include <vm/vm_param.h> 95#include <vm/vm_object.h> 96#include <vm/vm_page.h> 97#include <vm/vm_map.h> 98#include <vm/vm_pageout.h> 99#include <vm/vm_pager.h> 100#include <vm/swap_pager.h> 101#include <vm/vm_extern.h> 102#include <vm/uma.h> 103 104#include <machine/mutex.h> 105 106/* 107 * System initialization 108 */ 109 110/* the kernel process "vm_pageout"*/ 111static void vm_pageout(void); 112static int vm_pageout_clean(vm_page_t); 113static void vm_pageout_page_free(vm_page_t); 114static void vm_pageout_pmap_collect(void); 115static void vm_pageout_scan(int pass); 116static int vm_pageout_free_page_calc(vm_size_t count); 117struct proc *pageproc; 118 119static struct kproc_desc page_kp = { 120 "pagedaemon", 121 vm_pageout, 122 &pageproc 123}; 124SYSINIT(pagedaemon, SI_SUB_KTHREAD_PAGE, SI_ORDER_FIRST, kproc_start, &page_kp) 125 126#if !defined(NO_SWAPPING) 127/* the kernel process "vm_daemon"*/ 128static void vm_daemon(void); 129static struct proc *vmproc; 130 131static struct kproc_desc vm_kp = { 132 "vmdaemon", 133 vm_daemon, 134 &vmproc 135}; 136SYSINIT(vmdaemon, SI_SUB_KTHREAD_VM, SI_ORDER_FIRST, kproc_start, &vm_kp) 137#endif 138 139 140int vm_pages_needed; /* Event on which pageout daemon sleeps */ 141int vm_pageout_deficit; /* Estimated number of pages deficit */ 142int vm_pageout_pages_needed; /* flag saying that the pageout daemon needs pages */ 143 144#if !defined(NO_SWAPPING) 145static int vm_pageout_req_swapout; /* XXX */ 146static int vm_daemon_needed; 147#endif 148static int vm_max_launder = 32; 149static int vm_pageout_stats_max=0, vm_pageout_stats_interval = 0; 150static int vm_pageout_full_stats_interval = 0; 151static int vm_pageout_stats_free_max=0, vm_pageout_algorithm=0; 152static int defer_swap_pageouts=0; 153static int disable_swap_pageouts=0; 154 155#if defined(NO_SWAPPING) 156static int vm_swap_enabled=0; 157static int vm_swap_idle_enabled=0; 158#else 159static int vm_swap_enabled=1; 160static int vm_swap_idle_enabled=0; 161#endif 162 163SYSCTL_INT(_vm, VM_PAGEOUT_ALGORITHM, pageout_algorithm, 164 CTLFLAG_RW, &vm_pageout_algorithm, 0, "LRU page mgmt"); 165 166SYSCTL_INT(_vm, OID_AUTO, max_launder, 167 CTLFLAG_RW, &vm_max_launder, 0, "Limit dirty flushes in pageout"); 168 169SYSCTL_INT(_vm, OID_AUTO, pageout_stats_max, 170 CTLFLAG_RW, &vm_pageout_stats_max, 0, "Max pageout stats scan length"); 171 172SYSCTL_INT(_vm, OID_AUTO, pageout_full_stats_interval, 173 CTLFLAG_RW, &vm_pageout_full_stats_interval, 0, "Interval for full stats scan"); 174 175SYSCTL_INT(_vm, OID_AUTO, pageout_stats_interval, 176 CTLFLAG_RW, &vm_pageout_stats_interval, 0, "Interval for partial stats scan"); 177 178SYSCTL_INT(_vm, OID_AUTO, pageout_stats_free_max, 179 CTLFLAG_RW, &vm_pageout_stats_free_max, 0, "Not implemented"); 180 181#if defined(NO_SWAPPING) 182SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled, 183 CTLFLAG_RD, &vm_swap_enabled, 0, ""); 184SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled, 185 CTLFLAG_RD, &vm_swap_idle_enabled, 0, ""); 186#else 187SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled, 188 CTLFLAG_RW, &vm_swap_enabled, 0, "Enable entire process swapout"); 189SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled, 190 CTLFLAG_RW, &vm_swap_idle_enabled, 0, "Allow swapout on idle criteria"); 191#endif 192 193SYSCTL_INT(_vm, OID_AUTO, defer_swapspace_pageouts, 194 CTLFLAG_RW, &defer_swap_pageouts, 0, "Give preference to dirty pages in mem"); 195 196SYSCTL_INT(_vm, OID_AUTO, disable_swapspace_pageouts, 197 CTLFLAG_RW, &disable_swap_pageouts, 0, "Disallow swapout of dirty pages"); 198 199static int pageout_lock_miss; 200SYSCTL_INT(_vm, OID_AUTO, pageout_lock_miss, 201 CTLFLAG_RD, &pageout_lock_miss, 0, "vget() lock misses during pageout"); 202 203#define VM_PAGEOUT_PAGE_COUNT 16 204int vm_pageout_page_count = VM_PAGEOUT_PAGE_COUNT; 205 206int vm_page_max_wired; /* XXX max # of wired pages system-wide */ 207 208#if !defined(NO_SWAPPING) 209typedef void freeer_fcn_t(vm_map_t, vm_object_t, vm_pindex_t, int); 210static void vm_pageout_map_deactivate_pages(vm_map_t, vm_pindex_t); 211static freeer_fcn_t vm_pageout_object_deactivate_pages; 212static void vm_req_vmdaemon(void); 213#endif 214static void vm_pageout_page_stats(void); 215 216/* 217 * vm_pageout_clean: 218 * 219 * Clean the page and remove it from the laundry. 220 * 221 * We set the busy bit to cause potential page faults on this page to 222 * block. Note the careful timing, however, the busy bit isn't set till 223 * late and we cannot do anything that will mess with the page. 224 */ 225static int 226vm_pageout_clean(m) 227 vm_page_t m; 228{ 229 vm_object_t object; 230 vm_page_t mc[2*vm_pageout_page_count]; 231 int pageout_count; 232 int ib, is, page_base; 233 vm_pindex_t pindex = m->pindex; 234 235 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 236 237 object = m->object; 238 239 /* 240 * It doesn't cost us anything to pageout OBJT_DEFAULT or OBJT_SWAP 241 * with the new swapper, but we could have serious problems paging 242 * out other object types if there is insufficient memory. 243 * 244 * Unfortunately, checking free memory here is far too late, so the 245 * check has been moved up a procedural level. 246 */ 247 248 /* 249 * Don't mess with the page if it's busy, held, or special 250 */ 251 if ((m->hold_count != 0) || 252 ((m->busy != 0) || (m->flags & (PG_BUSY|PG_UNMANAGED)))) { 253 return 0; 254 } 255 256 mc[vm_pageout_page_count] = m; 257 pageout_count = 1; 258 page_base = vm_pageout_page_count; 259 ib = 1; 260 is = 1; 261 262 /* 263 * Scan object for clusterable pages. 264 * 265 * We can cluster ONLY if: ->> the page is NOT 266 * clean, wired, busy, held, or mapped into a 267 * buffer, and one of the following: 268 * 1) The page is inactive, or a seldom used 269 * active page. 270 * -or- 271 * 2) we force the issue. 272 * 273 * During heavy mmap/modification loads the pageout 274 * daemon can really fragment the underlying file 275 * due to flushing pages out of order and not trying 276 * align the clusters (which leave sporatic out-of-order 277 * holes). To solve this problem we do the reverse scan 278 * first and attempt to align our cluster, then do a 279 * forward scan if room remains. 280 */ 281more: 282 while (ib && pageout_count < vm_pageout_page_count) { 283 vm_page_t p; 284 285 if (ib > pindex) { 286 ib = 0; 287 break; 288 } 289 290 if ((p = vm_page_lookup(object, pindex - ib)) == NULL) { 291 ib = 0; 292 break; 293 } 294 if (((p->queue - p->pc) == PQ_CACHE) || 295 (p->flags & (PG_BUSY|PG_UNMANAGED)) || p->busy) { 296 ib = 0; 297 break; 298 } 299 vm_page_test_dirty(p); 300 if ((p->dirty & p->valid) == 0 || 301 p->queue != PQ_INACTIVE || 302 p->wire_count != 0 || /* may be held by buf cache */ 303 p->hold_count != 0) { /* may be undergoing I/O */ 304 ib = 0; 305 break; 306 } 307 mc[--page_base] = p; 308 ++pageout_count; 309 ++ib; 310 /* 311 * alignment boundry, stop here and switch directions. Do 312 * not clear ib. 313 */ 314 if ((pindex - (ib - 1)) % vm_pageout_page_count == 0) 315 break; 316 } 317 318 while (pageout_count < vm_pageout_page_count && 319 pindex + is < object->size) { 320 vm_page_t p; 321 322 if ((p = vm_page_lookup(object, pindex + is)) == NULL) 323 break; 324 if (((p->queue - p->pc) == PQ_CACHE) || 325 (p->flags & (PG_BUSY|PG_UNMANAGED)) || p->busy) { 326 break; 327 } 328 vm_page_test_dirty(p); 329 if ((p->dirty & p->valid) == 0 || 330 p->queue != PQ_INACTIVE || 331 p->wire_count != 0 || /* may be held by buf cache */ 332 p->hold_count != 0) { /* may be undergoing I/O */ 333 break; 334 } 335 mc[page_base + pageout_count] = p; 336 ++pageout_count; 337 ++is; 338 } 339 340 /* 341 * If we exhausted our forward scan, continue with the reverse scan 342 * when possible, even past a page boundry. This catches boundry 343 * conditions. 344 */ 345 if (ib && pageout_count < vm_pageout_page_count) 346 goto more; 347 348 /* 349 * we allow reads during pageouts... 350 */ 351 return vm_pageout_flush(&mc[page_base], pageout_count, 0); 352} 353 354/* 355 * vm_pageout_flush() - launder the given pages 356 * 357 * The given pages are laundered. Note that we setup for the start of 358 * I/O ( i.e. busy the page ), mark it read-only, and bump the object 359 * reference count all in here rather then in the parent. If we want 360 * the parent to do more sophisticated things we may have to change 361 * the ordering. 362 */ 363int 364vm_pageout_flush(mc, count, flags) 365 vm_page_t *mc; 366 int count; 367 int flags; 368{ 369 vm_object_t object; 370 int pageout_status[count]; 371 int numpagedout = 0; 372 int i; 373 374 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 375 /* 376 * Initiate I/O. Bump the vm_page_t->busy counter and 377 * mark the pages read-only. 378 * 379 * We do not have to fixup the clean/dirty bits here... we can 380 * allow the pager to do it after the I/O completes. 381 * 382 * NOTE! mc[i]->dirty may be partial or fragmented due to an 383 * edge case with file fragments. 384 */ 385 for (i = 0; i < count; i++) { 386 KASSERT(mc[i]->valid == VM_PAGE_BITS_ALL, ("vm_pageout_flush page %p index %d/%d: partially invalid page", mc[i], i, count)); 387 vm_page_io_start(mc[i]); 388 pmap_page_protect(mc[i], VM_PROT_READ); 389 } 390 object = mc[0]->object; 391 vm_page_unlock_queues(); 392 VM_OBJECT_LOCK(object); 393 vm_object_pip_add(object, count); 394 VM_OBJECT_UNLOCK(object); 395 396 vm_pager_put_pages(object, mc, count, 397 (flags | ((object == kernel_object) ? VM_PAGER_PUT_SYNC : 0)), 398 pageout_status); 399 400 VM_OBJECT_LOCK(object); 401 vm_page_lock_queues(); 402 for (i = 0; i < count; i++) { 403 vm_page_t mt = mc[i]; 404 405 switch (pageout_status[i]) { 406 case VM_PAGER_OK: 407 case VM_PAGER_PEND: 408 numpagedout++; 409 break; 410 case VM_PAGER_BAD: 411 /* 412 * Page outside of range of object. Right now we 413 * essentially lose the changes by pretending it 414 * worked. 415 */ 416 pmap_clear_modify(mt); 417 vm_page_undirty(mt); 418 break; 419 case VM_PAGER_ERROR: 420 case VM_PAGER_FAIL: 421 /* 422 * If page couldn't be paged out, then reactivate the 423 * page so it doesn't clog the inactive list. (We 424 * will try paging out it again later). 425 */ 426 vm_page_activate(mt); 427 break; 428 case VM_PAGER_AGAIN: 429 break; 430 } 431 432 /* 433 * If the operation is still going, leave the page busy to 434 * block all other accesses. Also, leave the paging in 435 * progress indicator set so that we don't attempt an object 436 * collapse. 437 */ 438 if (pageout_status[i] != VM_PAGER_PEND) { 439 vm_object_pip_wakeup(object); 440 vm_page_io_finish(mt); 441 if (!vm_page_count_severe() || !vm_page_try_to_cache(mt)) 442 pmap_page_protect(mt, VM_PROT_READ); 443 } 444 } 445 VM_OBJECT_UNLOCK(object); 446 return numpagedout; 447} 448 449#if !defined(NO_SWAPPING) 450/* 451 * vm_pageout_object_deactivate_pages 452 * 453 * deactivate enough pages to satisfy the inactive target 454 * requirements or if vm_page_proc_limit is set, then 455 * deactivate all of the pages in the object and its 456 * backing_objects. 457 * 458 * The object and map must be locked. 459 */ 460static void 461vm_pageout_object_deactivate_pages(map, object, desired, map_remove_only) 462 vm_map_t map; 463 vm_object_t object; 464 vm_pindex_t desired; 465 int map_remove_only; 466{ 467 vm_page_t p, next; 468 int actcount, rcount, remove_mode; 469 470 GIANT_REQUIRED; 471 if (object->type == OBJT_DEVICE || object->type == OBJT_PHYS) 472 return; 473 474 while (object) { 475 if (pmap_resident_count(vm_map_pmap(map)) <= desired) 476 return; 477 if (object->paging_in_progress) 478 return; 479 480 remove_mode = map_remove_only; 481 if (object->shadow_count > 1) 482 remove_mode = 1; 483 /* 484 * scan the objects entire memory queue 485 */ 486 rcount = object->resident_page_count; 487 p = TAILQ_FIRST(&object->memq); 488 vm_page_lock_queues(); 489 while (p && (rcount-- > 0)) { 490 if (pmap_resident_count(map->pmap) <= desired) { 491 vm_page_unlock_queues(); 492 return; 493 } 494 next = TAILQ_NEXT(p, listq); 495 cnt.v_pdpages++; 496 if (p->wire_count != 0 || 497 p->hold_count != 0 || 498 p->busy != 0 || 499 (p->flags & (PG_BUSY|PG_UNMANAGED)) || 500 !pmap_page_exists_quick(vm_map_pmap(map), p)) { 501 p = next; 502 continue; 503 } 504 actcount = pmap_ts_referenced(p); 505 if (actcount) { 506 vm_page_flag_set(p, PG_REFERENCED); 507 } else if (p->flags & PG_REFERENCED) { 508 actcount = 1; 509 } 510 if ((p->queue != PQ_ACTIVE) && 511 (p->flags & PG_REFERENCED)) { 512 vm_page_activate(p); 513 p->act_count += actcount; 514 vm_page_flag_clear(p, PG_REFERENCED); 515 } else if (p->queue == PQ_ACTIVE) { 516 if ((p->flags & PG_REFERENCED) == 0) { 517 p->act_count -= min(p->act_count, ACT_DECLINE); 518 if (!remove_mode && (vm_pageout_algorithm || (p->act_count == 0))) { 519 pmap_remove_all(p); 520 vm_page_deactivate(p); 521 } else { 522 vm_pageq_requeue(p); 523 } 524 } else { 525 vm_page_activate(p); 526 vm_page_flag_clear(p, PG_REFERENCED); 527 if (p->act_count < (ACT_MAX - ACT_ADVANCE)) 528 p->act_count += ACT_ADVANCE; 529 vm_pageq_requeue(p); 530 } 531 } else if (p->queue == PQ_INACTIVE) { 532 pmap_remove_all(p); 533 } 534 p = next; 535 } 536 vm_page_unlock_queues(); 537 object = object->backing_object; 538 } 539} 540 541/* 542 * deactivate some number of pages in a map, try to do it fairly, but 543 * that is really hard to do. 544 */ 545static void 546vm_pageout_map_deactivate_pages(map, desired) 547 vm_map_t map; 548 vm_pindex_t desired; 549{ 550 vm_map_entry_t tmpe; 551 vm_object_t obj, bigobj; 552 int nothingwired; 553 554 GIANT_REQUIRED; 555 if (!vm_map_trylock(map)) 556 return; 557 558 bigobj = NULL; 559 nothingwired = TRUE; 560 561 /* 562 * first, search out the biggest object, and try to free pages from 563 * that. 564 */ 565 tmpe = map->header.next; 566 while (tmpe != &map->header) { 567 if ((tmpe->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { 568 obj = tmpe->object.vm_object; 569 if ((obj != NULL) && (obj->shadow_count <= 1) && 570 ((bigobj == NULL) || 571 (bigobj->resident_page_count < obj->resident_page_count))) { 572 bigobj = obj; 573 } 574 } 575 if (tmpe->wired_count > 0) 576 nothingwired = FALSE; 577 tmpe = tmpe->next; 578 } 579 580 if (bigobj) 581 vm_pageout_object_deactivate_pages(map, bigobj, desired, 0); 582 583 /* 584 * Next, hunt around for other pages to deactivate. We actually 585 * do this search sort of wrong -- .text first is not the best idea. 586 */ 587 tmpe = map->header.next; 588 while (tmpe != &map->header) { 589 if (pmap_resident_count(vm_map_pmap(map)) <= desired) 590 break; 591 if ((tmpe->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { 592 obj = tmpe->object.vm_object; 593 if (obj) 594 vm_pageout_object_deactivate_pages(map, obj, desired, 0); 595 } 596 tmpe = tmpe->next; 597 } 598 599 /* 600 * Remove all mappings if a process is swapped out, this will free page 601 * table pages. 602 */ 603 if (desired == 0 && nothingwired) { 604 vm_page_lock_queues(); 605 pmap_remove(vm_map_pmap(map), vm_map_min(map), 606 vm_map_max(map)); 607 vm_page_unlock_queues(); 608 } 609 vm_map_unlock(map); 610} 611#endif /* !defined(NO_SWAPPING) */ 612 613/* 614 * Don't try to be fancy - being fancy can lead to VOP_LOCK's and therefore 615 * to vnode deadlocks. We only do it for OBJT_DEFAULT and OBJT_SWAP objects 616 * which we know can be trivially freed. 617 */ 618static void 619vm_pageout_page_free(vm_page_t m) 620{ 621 vm_object_t object = m->object; 622 int type = object->type; 623 624 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 625 if (type == OBJT_SWAP || type == OBJT_DEFAULT) 626 vm_object_reference(object); 627 vm_page_busy(m); 628 pmap_remove_all(m); 629 vm_page_free(m); 630 cnt.v_dfree++; 631 if (type == OBJT_SWAP || type == OBJT_DEFAULT) 632 vm_object_deallocate(object); 633} 634 635/* 636 * This routine is very drastic, but can save the system 637 * in a pinch. 638 */ 639static void 640vm_pageout_pmap_collect(void) 641{ 642 int i; 643 vm_page_t m; 644 static int warningdone; 645 646 if (pmap_pagedaemon_waken == 0) 647 return; 648 if (warningdone < 5) { 649 printf("collecting pv entries -- suggest increasing PMAP_SHPGPERPROC\n"); 650 warningdone++; 651 } 652 vm_page_lock_queues(); 653 for (i = 0; i < vm_page_array_size; i++) { 654 m = &vm_page_array[i]; 655 if (m->wire_count || m->hold_count || m->busy || 656 (m->flags & (PG_BUSY | PG_UNMANAGED))) 657 continue; 658 pmap_remove_all(m); 659 } 660 vm_page_unlock_queues(); 661 pmap_pagedaemon_waken = 0; 662} 663 664/* 665 * vm_pageout_scan does the dirty work for the pageout daemon. 666 */ 667static void 668vm_pageout_scan(int pass) 669{ 670 vm_page_t m, next; 671 struct vm_page marker; 672 int save_page_shortage; 673 int save_inactive_count; 674 int page_shortage, maxscan, pcount; 675 int addl_page_shortage, addl_page_shortage_init; 676 struct proc *p, *bigproc; 677 vm_offset_t size, bigsize; 678 vm_object_t object; 679 int actcount; 680 int vnodes_skipped = 0; 681 int maxlaunder; 682 int s; 683 struct thread *td; 684 685 GIANT_REQUIRED; 686 /* 687 * Decrease registered cache sizes. 688 */ 689 EVENTHANDLER_INVOKE(vm_lowmem, 0); 690 /* 691 * We do this explicitly after the caches have been drained above. 692 */ 693 uma_reclaim(); 694 /* 695 * Do whatever cleanup that the pmap code can. 696 */ 697 vm_pageout_pmap_collect(); 698 699 addl_page_shortage_init = atomic_readandclear_int(&vm_pageout_deficit); 700 701 /* 702 * Calculate the number of pages we want to either free or move 703 * to the cache. 704 */ 705 page_shortage = vm_paging_target() + addl_page_shortage_init; 706 save_page_shortage = page_shortage; 707 save_inactive_count = cnt.v_inactive_count; 708 709 /* 710 * Initialize our marker 711 */ 712 bzero(&marker, sizeof(marker)); 713 marker.flags = PG_BUSY | PG_FICTITIOUS | PG_MARKER; 714 marker.queue = PQ_INACTIVE; 715 marker.wire_count = 1; 716 717 /* 718 * Start scanning the inactive queue for pages we can move to the 719 * cache or free. The scan will stop when the target is reached or 720 * we have scanned the entire inactive queue. Note that m->act_count 721 * is not used to form decisions for the inactive queue, only for the 722 * active queue. 723 * 724 * maxlaunder limits the number of dirty pages we flush per scan. 725 * For most systems a smaller value (16 or 32) is more robust under 726 * extreme memory and disk pressure because any unnecessary writes 727 * to disk can result in extreme performance degredation. However, 728 * systems with excessive dirty pages (especially when MAP_NOSYNC is 729 * used) will die horribly with limited laundering. If the pageout 730 * daemon cannot clean enough pages in the first pass, we let it go 731 * all out in succeeding passes. 732 */ 733 if ((maxlaunder = vm_max_launder) <= 1) 734 maxlaunder = 1; 735 if (pass) 736 maxlaunder = 10000; 737rescan0: 738 addl_page_shortage = addl_page_shortage_init; 739 maxscan = cnt.v_inactive_count; 740 741 for (m = TAILQ_FIRST(&vm_page_queues[PQ_INACTIVE].pl); 742 m != NULL && maxscan-- > 0 && page_shortage > 0; 743 m = next) { 744 745 cnt.v_pdpages++; 746 747 if (m->queue != PQ_INACTIVE) { 748 goto rescan0; 749 } 750 751 next = TAILQ_NEXT(m, pageq); 752 753 /* 754 * skip marker pages 755 */ 756 if (m->flags & PG_MARKER) 757 continue; 758 759 /* 760 * A held page may be undergoing I/O, so skip it. 761 */ 762 if (m->hold_count) { 763 vm_pageq_requeue(m); 764 addl_page_shortage++; 765 continue; 766 } 767 /* 768 * Don't mess with busy pages, keep in the front of the 769 * queue, most likely are being paged out. 770 */ 771 if (m->busy || (m->flags & PG_BUSY)) { 772 addl_page_shortage++; 773 continue; 774 } 775 776 vm_page_lock_queues(); 777 /* 778 * If the object is not being used, we ignore previous 779 * references. 780 */ 781 if (m->object->ref_count == 0) { 782 vm_page_flag_clear(m, PG_REFERENCED); 783 pmap_clear_reference(m); 784 785 /* 786 * Otherwise, if the page has been referenced while in the 787 * inactive queue, we bump the "activation count" upwards, 788 * making it less likely that the page will be added back to 789 * the inactive queue prematurely again. Here we check the 790 * page tables (or emulated bits, if any), given the upper 791 * level VM system not knowing anything about existing 792 * references. 793 */ 794 } else if (((m->flags & PG_REFERENCED) == 0) && 795 (actcount = pmap_ts_referenced(m))) { 796 vm_page_activate(m); 797 vm_page_unlock_queues(); 798 m->act_count += (actcount + ACT_ADVANCE); 799 continue; 800 } 801 802 /* 803 * If the upper level VM system knows about any page 804 * references, we activate the page. We also set the 805 * "activation count" higher than normal so that we will less 806 * likely place pages back onto the inactive queue again. 807 */ 808 if ((m->flags & PG_REFERENCED) != 0) { 809 vm_page_flag_clear(m, PG_REFERENCED); 810 actcount = pmap_ts_referenced(m); 811 vm_page_activate(m); 812 vm_page_unlock_queues(); 813 m->act_count += (actcount + ACT_ADVANCE + 1); 814 continue; 815 } 816 817 /* 818 * If the upper level VM system doesn't know anything about 819 * the page being dirty, we have to check for it again. As 820 * far as the VM code knows, any partially dirty pages are 821 * fully dirty. 822 */ 823 if (m->dirty == 0) { 824 vm_page_test_dirty(m); 825 } else { 826 vm_page_dirty(m); 827 } 828 vm_page_unlock_queues(); 829 830 /* 831 * Invalid pages can be easily freed 832 */ 833 if (m->valid == 0) { 834 vm_page_lock_queues(); 835 vm_pageout_page_free(m); 836 vm_page_unlock_queues(); 837 --page_shortage; 838 839 /* 840 * Clean pages can be placed onto the cache queue. This 841 * effectively frees them. 842 */ 843 } else if (m->dirty == 0) { 844 vm_page_lock_queues(); 845 vm_page_cache(m); 846 vm_page_unlock_queues(); 847 --page_shortage; 848 } else if ((m->flags & PG_WINATCFLS) == 0 && pass == 0) { 849 /* 850 * Dirty pages need to be paged out, but flushing 851 * a page is extremely expensive verses freeing 852 * a clean page. Rather then artificially limiting 853 * the number of pages we can flush, we instead give 854 * dirty pages extra priority on the inactive queue 855 * by forcing them to be cycled through the queue 856 * twice before being flushed, after which the 857 * (now clean) page will cycle through once more 858 * before being freed. This significantly extends 859 * the thrash point for a heavily loaded machine. 860 */ 861 vm_page_lock_queues(); 862 vm_page_flag_set(m, PG_WINATCFLS); 863 vm_pageq_requeue(m); 864 vm_page_unlock_queues(); 865 } else if (maxlaunder > 0) { 866 /* 867 * We always want to try to flush some dirty pages if 868 * we encounter them, to keep the system stable. 869 * Normally this number is small, but under extreme 870 * pressure where there are insufficient clean pages 871 * on the inactive queue, we may have to go all out. 872 */ 873 int swap_pageouts_ok; 874 struct vnode *vp = NULL; 875 struct mount *mp; 876 877 object = m->object; 878 879 if ((object->type != OBJT_SWAP) && (object->type != OBJT_DEFAULT)) { 880 swap_pageouts_ok = 1; 881 } else { 882 swap_pageouts_ok = !(defer_swap_pageouts || disable_swap_pageouts); 883 swap_pageouts_ok |= (!disable_swap_pageouts && defer_swap_pageouts && 884 vm_page_count_min()); 885 886 } 887 888 /* 889 * We don't bother paging objects that are "dead". 890 * Those objects are in a "rundown" state. 891 */ 892 if (!swap_pageouts_ok || (object->flags & OBJ_DEAD)) { 893 vm_pageq_requeue(m); 894 continue; 895 } 896 897 /* 898 * The object is already known NOT to be dead. It 899 * is possible for the vget() to block the whole 900 * pageout daemon, but the new low-memory handling 901 * code should prevent it. 902 * 903 * The previous code skipped locked vnodes and, worse, 904 * reordered pages in the queue. This results in 905 * completely non-deterministic operation and, on a 906 * busy system, can lead to extremely non-optimal 907 * pageouts. For example, it can cause clean pages 908 * to be freed and dirty pages to be moved to the end 909 * of the queue. Since dirty pages are also moved to 910 * the end of the queue once-cleaned, this gives 911 * way too large a weighting to defering the freeing 912 * of dirty pages. 913 * 914 * We can't wait forever for the vnode lock, we might 915 * deadlock due to a vn_read() getting stuck in 916 * vm_wait while holding this vnode. We skip the 917 * vnode if we can't get it in a reasonable amount 918 * of time. 919 */ 920 if (object->type == OBJT_VNODE) { 921 vp = object->handle; 922 923 mp = NULL; 924 if (vp->v_type == VREG) 925 vn_start_write(vp, &mp, V_NOWAIT); 926 if (vget(vp, LK_EXCLUSIVE|LK_TIMELOCK, curthread)) { 927 ++pageout_lock_miss; 928 vn_finished_write(mp); 929 if (object->flags & OBJ_MIGHTBEDIRTY) 930 vnodes_skipped++; 931 continue; 932 } 933 934 /* 935 * The page might have been moved to another 936 * queue during potential blocking in vget() 937 * above. The page might have been freed and 938 * reused for another vnode. The object might 939 * have been reused for another vnode. 940 */ 941 if (m->queue != PQ_INACTIVE || 942 m->object != object || 943 object->handle != vp) { 944 if (object->flags & OBJ_MIGHTBEDIRTY) 945 vnodes_skipped++; 946 vput(vp); 947 vn_finished_write(mp); 948 continue; 949 } 950 951 /* 952 * The page may have been busied during the 953 * blocking in vput(); We don't move the 954 * page back onto the end of the queue so that 955 * statistics are more correct if we don't. 956 */ 957 if (m->busy || (m->flags & PG_BUSY)) { 958 vput(vp); 959 vn_finished_write(mp); 960 continue; 961 } 962 963 /* 964 * If the page has become held it might 965 * be undergoing I/O, so skip it 966 */ 967 if (m->hold_count) { 968 vm_pageq_requeue(m); 969 if (object->flags & OBJ_MIGHTBEDIRTY) 970 vnodes_skipped++; 971 vput(vp); 972 vn_finished_write(mp); 973 continue; 974 } 975 } 976 977 /* 978 * If a page is dirty, then it is either being washed 979 * (but not yet cleaned) or it is still in the 980 * laundry. If it is still in the laundry, then we 981 * start the cleaning operation. 982 * 983 * This operation may cluster, invalidating the 'next' 984 * pointer. To prevent an inordinate number of 985 * restarts we use our marker to remember our place. 986 * 987 * decrement page_shortage on success to account for 988 * the (future) cleaned page. Otherwise we could wind 989 * up laundering or cleaning too many pages. 990 */ 991 vm_page_lock_queues(); 992 s = splvm(); 993 TAILQ_INSERT_AFTER(&vm_page_queues[PQ_INACTIVE].pl, m, &marker, pageq); 994 splx(s); 995 if (vm_pageout_clean(m) != 0) { 996 --page_shortage; 997 --maxlaunder; 998 } 999 s = splvm(); 1000 next = TAILQ_NEXT(&marker, pageq); 1001 TAILQ_REMOVE(&vm_page_queues[PQ_INACTIVE].pl, &marker, pageq); 1002 splx(s); 1003 vm_page_unlock_queues(); 1004 if (vp) { 1005 vput(vp); 1006 vn_finished_write(mp); 1007 } 1008 } 1009 } 1010 1011 /* 1012 * Compute the number of pages we want to try to move from the 1013 * active queue to the inactive queue. 1014 */ 1015 page_shortage = vm_paging_target() + 1016 cnt.v_inactive_target - cnt.v_inactive_count; 1017 page_shortage += addl_page_shortage; 1018 1019 vm_page_lock_queues(); 1020 /* 1021 * Scan the active queue for things we can deactivate. We nominally 1022 * track the per-page activity counter and use it to locate 1023 * deactivation candidates. 1024 */ 1025 pcount = cnt.v_active_count; 1026 m = TAILQ_FIRST(&vm_page_queues[PQ_ACTIVE].pl); 1027 1028 while ((m != NULL) && (pcount-- > 0) && (page_shortage > 0)) { 1029 1030 /* 1031 * This is a consistency check, and should likely be a panic 1032 * or warning. 1033 */ 1034 if (m->queue != PQ_ACTIVE) { 1035 break; 1036 } 1037 1038 next = TAILQ_NEXT(m, pageq); 1039 /* 1040 * Don't deactivate pages that are busy. 1041 */ 1042 if ((m->busy != 0) || 1043 (m->flags & PG_BUSY) || 1044 (m->hold_count != 0)) { 1045 vm_pageq_requeue(m); 1046 m = next; 1047 continue; 1048 } 1049 1050 /* 1051 * The count for pagedaemon pages is done after checking the 1052 * page for eligibility... 1053 */ 1054 cnt.v_pdpages++; 1055 1056 /* 1057 * Check to see "how much" the page has been used. 1058 */ 1059 actcount = 0; 1060 if (m->object->ref_count != 0) { 1061 if (m->flags & PG_REFERENCED) { 1062 actcount += 1; 1063 } 1064 actcount += pmap_ts_referenced(m); 1065 if (actcount) { 1066 m->act_count += ACT_ADVANCE + actcount; 1067 if (m->act_count > ACT_MAX) 1068 m->act_count = ACT_MAX; 1069 } 1070 } 1071 1072 /* 1073 * Since we have "tested" this bit, we need to clear it now. 1074 */ 1075 vm_page_flag_clear(m, PG_REFERENCED); 1076 1077 /* 1078 * Only if an object is currently being used, do we use the 1079 * page activation count stats. 1080 */ 1081 if (actcount && (m->object->ref_count != 0)) { 1082 vm_pageq_requeue(m); 1083 } else { 1084 m->act_count -= min(m->act_count, ACT_DECLINE); 1085 if (vm_pageout_algorithm || 1086 m->object->ref_count == 0 || 1087 m->act_count == 0) { 1088 page_shortage--; 1089 if (m->object->ref_count == 0) { 1090 pmap_remove_all(m); 1091 if (m->dirty == 0) 1092 vm_page_cache(m); 1093 else 1094 vm_page_deactivate(m); 1095 } else { 1096 vm_page_deactivate(m); 1097 } 1098 } else { 1099 vm_pageq_requeue(m); 1100 } 1101 } 1102 m = next; 1103 } 1104 s = splvm(); 1105 1106 /* 1107 * We try to maintain some *really* free pages, this allows interrupt 1108 * code to be guaranteed space. Since both cache and free queues 1109 * are considered basically 'free', moving pages from cache to free 1110 * does not effect other calculations. 1111 */ 1112 while (cnt.v_free_count < cnt.v_free_reserved) { 1113 static int cache_rover = 0; 1114 m = vm_pageq_find(PQ_CACHE, cache_rover, FALSE); 1115 if (!m) 1116 break; 1117 if ((m->flags & (PG_BUSY|PG_UNMANAGED)) || 1118 m->busy || 1119 m->hold_count || 1120 m->wire_count) { 1121#ifdef INVARIANTS 1122 printf("Warning: busy page %p found in cache\n", m); 1123#endif 1124 vm_page_deactivate(m); 1125 continue; 1126 } 1127 cache_rover = (cache_rover + PQ_PRIME2) & PQ_L2_MASK; 1128 vm_pageout_page_free(m); 1129 } 1130 splx(s); 1131 vm_page_unlock_queues(); 1132#if !defined(NO_SWAPPING) 1133 /* 1134 * Idle process swapout -- run once per second. 1135 */ 1136 if (vm_swap_idle_enabled) { 1137 static long lsec; 1138 if (time_second != lsec) { 1139 vm_pageout_req_swapout |= VM_SWAP_IDLE; 1140 vm_req_vmdaemon(); 1141 lsec = time_second; 1142 } 1143 } 1144#endif 1145 1146 /* 1147 * If we didn't get enough free pages, and we have skipped a vnode 1148 * in a writeable object, wakeup the sync daemon. And kick swapout 1149 * if we did not get enough free pages. 1150 */ 1151 if (vm_paging_target() > 0) { 1152 if (vnodes_skipped && vm_page_count_min()) 1153 (void) speedup_syncer(); 1154#if !defined(NO_SWAPPING) 1155 if (vm_swap_enabled && vm_page_count_target()) { 1156 vm_req_vmdaemon(); 1157 vm_pageout_req_swapout |= VM_SWAP_NORMAL; 1158 } 1159#endif 1160 } 1161 1162 /* 1163 * If we are out of swap and were not able to reach our paging 1164 * target, kill the largest process. 1165 * 1166 * We keep the process bigproc locked once we find it to keep anyone 1167 * from messing with it; however, there is a possibility of 1168 * deadlock if process B is bigproc and one of it's child processes 1169 * attempts to propagate a signal to B while we are waiting for A's 1170 * lock while walking this list. To avoid this, we don't block on 1171 * the process lock but just skip a process if it is already locked. 1172 */ 1173 if ((vm_swap_size < 64 && vm_page_count_min()) || 1174 (swap_pager_full && vm_paging_target() > 0)) { 1175#if 0 1176 if ((vm_swap_size < 64 || swap_pager_full) && vm_page_count_min()) { 1177#endif 1178 bigproc = NULL; 1179 bigsize = 0; 1180 sx_slock(&allproc_lock); 1181 FOREACH_PROC_IN_SYSTEM(p) { 1182 int breakout; 1183 /* 1184 * If this process is already locked, skip it. 1185 */ 1186 if (PROC_TRYLOCK(p) == 0) 1187 continue; 1188 /* 1189 * If this is a system or protected process, skip it. 1190 */ 1191 if ((p->p_flag & P_SYSTEM) || (p->p_pid == 1) || 1192 (p->p_flag & P_PROTECTED) || 1193 ((p->p_pid < 48) && (vm_swap_size != 0))) { 1194 PROC_UNLOCK(p); 1195 continue; 1196 } 1197 /* 1198 * if the process is in a non-running type state, 1199 * don't touch it. Check all the threads individually. 1200 */ 1201 mtx_lock_spin(&sched_lock); 1202 breakout = 0; 1203 FOREACH_THREAD_IN_PROC(p, td) { 1204 if (!TD_ON_RUNQ(td) && 1205 !TD_IS_RUNNING(td) && 1206 !TD_IS_SLEEPING(td)) { 1207 breakout = 1; 1208 break; 1209 } 1210 } 1211 if (breakout) { 1212 mtx_unlock_spin(&sched_lock); 1213 PROC_UNLOCK(p); 1214 continue; 1215 } 1216 mtx_unlock_spin(&sched_lock); 1217 /* 1218 * get the process size 1219 */ 1220 if (!vm_map_trylock_read(&p->p_vmspace->vm_map)) { 1221 PROC_UNLOCK(p); 1222 continue; 1223 } 1224 size = vmspace_swap_count(p->p_vmspace); 1225 vm_map_unlock_read(&p->p_vmspace->vm_map); 1226 size += vmspace_resident_count(p->p_vmspace); 1227 /* 1228 * if the this process is bigger than the biggest one 1229 * remember it. 1230 */ 1231 if (size > bigsize) { 1232 if (bigproc != NULL) 1233 PROC_UNLOCK(bigproc); 1234 bigproc = p; 1235 bigsize = size; 1236 } else 1237 PROC_UNLOCK(p); 1238 } 1239 sx_sunlock(&allproc_lock); 1240 if (bigproc != NULL) { 1241 struct ksegrp *kg; 1242 killproc(bigproc, "out of swap space"); 1243 mtx_lock_spin(&sched_lock); 1244 FOREACH_KSEGRP_IN_PROC(bigproc, kg) { 1245 sched_nice(kg, PRIO_MIN); /* XXXKSE ??? */ 1246 } 1247 mtx_unlock_spin(&sched_lock); 1248 PROC_UNLOCK(bigproc); 1249 wakeup(&cnt.v_free_count); 1250 } 1251 } 1252} 1253 1254/* 1255 * This routine tries to maintain the pseudo LRU active queue, 1256 * so that during long periods of time where there is no paging, 1257 * that some statistic accumulation still occurs. This code 1258 * helps the situation where paging just starts to occur. 1259 */ 1260static void 1261vm_pageout_page_stats() 1262{ 1263 vm_page_t m,next; 1264 int pcount,tpcount; /* Number of pages to check */ 1265 static int fullintervalcount = 0; 1266 int page_shortage; 1267 int s0; 1268 1269 page_shortage = 1270 (cnt.v_inactive_target + cnt.v_cache_max + cnt.v_free_min) - 1271 (cnt.v_free_count + cnt.v_inactive_count + cnt.v_cache_count); 1272 1273 if (page_shortage <= 0) 1274 return; 1275 1276 s0 = splvm(); 1277 vm_page_lock_queues(); 1278 pcount = cnt.v_active_count; 1279 fullintervalcount += vm_pageout_stats_interval; 1280 if (fullintervalcount < vm_pageout_full_stats_interval) { 1281 tpcount = (vm_pageout_stats_max * cnt.v_active_count) / cnt.v_page_count; 1282 if (pcount > tpcount) 1283 pcount = tpcount; 1284 } else { 1285 fullintervalcount = 0; 1286 } 1287 1288 m = TAILQ_FIRST(&vm_page_queues[PQ_ACTIVE].pl); 1289 while ((m != NULL) && (pcount-- > 0)) { 1290 int actcount; 1291 1292 if (m->queue != PQ_ACTIVE) { 1293 break; 1294 } 1295 1296 next = TAILQ_NEXT(m, pageq); 1297 /* 1298 * Don't deactivate pages that are busy. 1299 */ 1300 if ((m->busy != 0) || 1301 (m->flags & PG_BUSY) || 1302 (m->hold_count != 0)) { 1303 vm_pageq_requeue(m); 1304 m = next; 1305 continue; 1306 } 1307 1308 actcount = 0; 1309 if (m->flags & PG_REFERENCED) { 1310 vm_page_flag_clear(m, PG_REFERENCED); 1311 actcount += 1; 1312 } 1313 1314 actcount += pmap_ts_referenced(m); 1315 if (actcount) { 1316 m->act_count += ACT_ADVANCE + actcount; 1317 if (m->act_count > ACT_MAX) 1318 m->act_count = ACT_MAX; 1319 vm_pageq_requeue(m); 1320 } else { 1321 if (m->act_count == 0) { 1322 /* 1323 * We turn off page access, so that we have 1324 * more accurate RSS stats. We don't do this 1325 * in the normal page deactivation when the 1326 * system is loaded VM wise, because the 1327 * cost of the large number of page protect 1328 * operations would be higher than the value 1329 * of doing the operation. 1330 */ 1331 pmap_remove_all(m); 1332 vm_page_deactivate(m); 1333 } else { 1334 m->act_count -= min(m->act_count, ACT_DECLINE); 1335 vm_pageq_requeue(m); 1336 } 1337 } 1338 1339 m = next; 1340 } 1341 vm_page_unlock_queues(); 1342 splx(s0); 1343} 1344 1345static int 1346vm_pageout_free_page_calc(count) 1347vm_size_t count; 1348{ 1349 if (count < cnt.v_page_count) 1350 return 0; 1351 /* 1352 * free_reserved needs to include enough for the largest swap pager 1353 * structures plus enough for any pv_entry structs when paging. 1354 */ 1355 if (cnt.v_page_count > 1024) 1356 cnt.v_free_min = 4 + (cnt.v_page_count - 1024) / 200; 1357 else 1358 cnt.v_free_min = 4; 1359 cnt.v_pageout_free_min = (2*MAXBSIZE)/PAGE_SIZE + 1360 cnt.v_interrupt_free_min; 1361 cnt.v_free_reserved = vm_pageout_page_count + 1362 cnt.v_pageout_free_min + (count / 768) + PQ_L2_SIZE; 1363 cnt.v_free_severe = cnt.v_free_min / 2; 1364 cnt.v_free_min += cnt.v_free_reserved; 1365 cnt.v_free_severe += cnt.v_free_reserved; 1366 return 1; 1367} 1368 1369/* 1370 * vm_pageout is the high level pageout daemon. 1371 */ 1372static void 1373vm_pageout() 1374{ 1375 int error, pass, s; 1376 1377 mtx_lock(&Giant); 1378 1379 /* 1380 * Initialize some paging parameters. 1381 */ 1382 cnt.v_interrupt_free_min = 2; 1383 if (cnt.v_page_count < 2000) 1384 vm_pageout_page_count = 8; 1385 1386 vm_pageout_free_page_calc(cnt.v_page_count); 1387 /* 1388 * v_free_target and v_cache_min control pageout hysteresis. Note 1389 * that these are more a measure of the VM cache queue hysteresis 1390 * then the VM free queue. Specifically, v_free_target is the 1391 * high water mark (free+cache pages). 1392 * 1393 * v_free_reserved + v_cache_min (mostly means v_cache_min) is the 1394 * low water mark, while v_free_min is the stop. v_cache_min must 1395 * be big enough to handle memory needs while the pageout daemon 1396 * is signalled and run to free more pages. 1397 */ 1398 if (cnt.v_free_count > 6144) 1399 cnt.v_free_target = 4 * cnt.v_free_min + cnt.v_free_reserved; 1400 else 1401 cnt.v_free_target = 2 * cnt.v_free_min + cnt.v_free_reserved; 1402 1403 if (cnt.v_free_count > 2048) { 1404 cnt.v_cache_min = cnt.v_free_target; 1405 cnt.v_cache_max = 2 * cnt.v_cache_min; 1406 cnt.v_inactive_target = (3 * cnt.v_free_target) / 2; 1407 } else { 1408 cnt.v_cache_min = 0; 1409 cnt.v_cache_max = 0; 1410 cnt.v_inactive_target = cnt.v_free_count / 4; 1411 } 1412 if (cnt.v_inactive_target > cnt.v_free_count / 3) 1413 cnt.v_inactive_target = cnt.v_free_count / 3; 1414 1415 /* XXX does not really belong here */ 1416 if (vm_page_max_wired == 0) 1417 vm_page_max_wired = cnt.v_free_count / 3; 1418 1419 if (vm_pageout_stats_max == 0) 1420 vm_pageout_stats_max = cnt.v_free_target; 1421 1422 /* 1423 * Set interval in seconds for stats scan. 1424 */ 1425 if (vm_pageout_stats_interval == 0) 1426 vm_pageout_stats_interval = 5; 1427 if (vm_pageout_full_stats_interval == 0) 1428 vm_pageout_full_stats_interval = vm_pageout_stats_interval * 4; 1429 1430 /* 1431 * Set maximum free per pass 1432 */ 1433 if (vm_pageout_stats_free_max == 0) 1434 vm_pageout_stats_free_max = 5; 1435 1436 swap_pager_swap_init(); 1437 pass = 0; 1438 /* 1439 * The pageout daemon is never done, so loop forever. 1440 */ 1441 while (TRUE) { 1442 s = splvm(); 1443 vm_page_lock_queues(); 1444 /* 1445 * If we have enough free memory, wakeup waiters. Do 1446 * not clear vm_pages_needed until we reach our target, 1447 * otherwise we may be woken up over and over again and 1448 * waste a lot of cpu. 1449 */ 1450 if (vm_pages_needed && !vm_page_count_min()) { 1451 if (!vm_paging_needed()) 1452 vm_pages_needed = 0; 1453 wakeup(&cnt.v_free_count); 1454 } 1455 if (vm_pages_needed) { 1456 /* 1457 * Still not done, take a second pass without waiting 1458 * (unlimited dirty cleaning), otherwise sleep a bit 1459 * and try again. 1460 */ 1461 ++pass; 1462 if (pass > 1) 1463 msleep(&vm_pages_needed, &vm_page_queue_mtx, PVM, 1464 "psleep", hz/2); 1465 } else { 1466 /* 1467 * Good enough, sleep & handle stats. Prime the pass 1468 * for the next run. 1469 */ 1470 if (pass > 1) 1471 pass = 1; 1472 else 1473 pass = 0; 1474 error = msleep(&vm_pages_needed, &vm_page_queue_mtx, PVM, 1475 "psleep", vm_pageout_stats_interval * hz); 1476 if (error && !vm_pages_needed) { 1477 vm_page_unlock_queues(); 1478 splx(s); 1479 pass = 0; 1480 vm_pageout_page_stats(); 1481 continue; 1482 } 1483 } 1484 if (vm_pages_needed) 1485 cnt.v_pdwakeups++; 1486 vm_page_unlock_queues(); 1487 splx(s); 1488 vm_pageout_scan(pass); 1489 } 1490} 1491 1492/* 1493 * Unless the page queue lock is held by the caller, this function 1494 * should be regarded as advisory. Specifically, the caller should 1495 * not msleep() on &cnt.v_free_count following this function unless 1496 * the page queue lock is held until the msleep() is performed. 1497 */ 1498void 1499pagedaemon_wakeup() 1500{ 1501 1502 if (!vm_pages_needed && curthread->td_proc != pageproc) { 1503 vm_pages_needed = 1; 1504 wakeup(&vm_pages_needed); 1505 } 1506} 1507 1508#if !defined(NO_SWAPPING) 1509static void 1510vm_req_vmdaemon() 1511{ 1512 static int lastrun = 0; 1513 1514 if ((ticks > (lastrun + hz)) || (ticks < lastrun)) { 1515 wakeup(&vm_daemon_needed); 1516 lastrun = ticks; 1517 } 1518} 1519 1520static void 1521vm_daemon() 1522{ 1523 struct proc *p; 1524 int breakout; 1525 struct thread *td; 1526 1527 mtx_lock(&Giant); 1528 while (TRUE) { 1529 tsleep(&vm_daemon_needed, PPAUSE, "psleep", 0); 1530 if (vm_pageout_req_swapout) { 1531 swapout_procs(vm_pageout_req_swapout); 1532 vm_pageout_req_swapout = 0; 1533 } 1534 /* 1535 * scan the processes for exceeding their rlimits or if 1536 * process is swapped out -- deactivate pages 1537 */ 1538 sx_slock(&allproc_lock); 1539 LIST_FOREACH(p, &allproc, p_list) { 1540 vm_pindex_t limit, size; 1541 1542 /* 1543 * if this is a system process or if we have already 1544 * looked at this process, skip it. 1545 */ 1546 PROC_LOCK(p); 1547 if (p->p_flag & (P_SYSTEM | P_WEXIT)) { 1548 PROC_UNLOCK(p); 1549 continue; 1550 } 1551 /* 1552 * if the process is in a non-running type state, 1553 * don't touch it. 1554 */ 1555 mtx_lock_spin(&sched_lock); 1556 breakout = 0; 1557 FOREACH_THREAD_IN_PROC(p, td) { 1558 if (!TD_ON_RUNQ(td) && 1559 !TD_IS_RUNNING(td) && 1560 !TD_IS_SLEEPING(td)) { 1561 breakout = 1; 1562 break; 1563 } 1564 } 1565 mtx_unlock_spin(&sched_lock); 1566 if (breakout) { 1567 PROC_UNLOCK(p); 1568 continue; 1569 } 1570 /* 1571 * get a limit 1572 */ 1573 limit = OFF_TO_IDX( 1574 qmin(p->p_rlimit[RLIMIT_RSS].rlim_cur, 1575 p->p_rlimit[RLIMIT_RSS].rlim_max)); 1576 1577 /* 1578 * let processes that are swapped out really be 1579 * swapped out set the limit to nothing (will force a 1580 * swap-out.) 1581 */ 1582 if ((p->p_sflag & PS_INMEM) == 0) 1583 limit = 0; /* XXX */ 1584 PROC_UNLOCK(p); 1585 1586 size = vmspace_resident_count(p->p_vmspace); 1587 if (limit >= 0 && size >= limit) { 1588 vm_pageout_map_deactivate_pages( 1589 &p->p_vmspace->vm_map, limit); 1590 } 1591 } 1592 sx_sunlock(&allproc_lock); 1593 } 1594} 1595#endif /* !defined(NO_SWAPPING) */ 1596