vm_pageout.c revision 117038
1/* 2 * Copyright (c) 1991 Regents of the University of California. 3 * All rights reserved. 4 * Copyright (c) 1994 John S. Dyson 5 * All rights reserved. 6 * Copyright (c) 1994 David Greenman 7 * All rights reserved. 8 * 9 * This code is derived from software contributed to Berkeley by 10 * The Mach Operating System project at Carnegie-Mellon University. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. All advertising materials mentioning features or use of this software 21 * must display the following acknowledgement: 22 * This product includes software developed by the University of 23 * California, Berkeley and its contributors. 24 * 4. Neither the name of the University nor the names of its contributors 25 * may be used to endorse or promote products derived from this software 26 * without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 38 * SUCH DAMAGE. 39 * 40 * from: @(#)vm_pageout.c 7.4 (Berkeley) 5/7/91 41 * 42 * 43 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 44 * All rights reserved. 45 * 46 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 47 * 48 * Permission to use, copy, modify and distribute this software and 49 * its documentation is hereby granted, provided that both the copyright 50 * notice and this permission notice appear in all copies of the 51 * software, derivative works or modified versions, and any portions 52 * thereof, and that both notices appear in supporting documentation. 53 * 54 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 55 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 56 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 57 * 58 * Carnegie Mellon requests users of this software to return to 59 * 60 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 61 * School of Computer Science 62 * Carnegie Mellon University 63 * Pittsburgh PA 15213-3890 64 * 65 * any improvements or extensions that they make and grant Carnegie the 66 * rights to redistribute these changes. 67 */ 68 69/* 70 * The proverbial page-out daemon. 71 */ 72 73#include <sys/cdefs.h> 74__FBSDID("$FreeBSD: head/sys/vm/vm_pageout.c 117038 2003-06-29 19:51:24Z alc $"); 75 76#include "opt_vm.h" 77#include <sys/param.h> 78#include <sys/systm.h> 79#include <sys/kernel.h> 80#include <sys/eventhandler.h> 81#include <sys/lock.h> 82#include <sys/mutex.h> 83#include <sys/proc.h> 84#include <sys/kthread.h> 85#include <sys/ktr.h> 86#include <sys/resourcevar.h> 87#include <sys/sched.h> 88#include <sys/signalvar.h> 89#include <sys/vnode.h> 90#include <sys/vmmeter.h> 91#include <sys/sx.h> 92#include <sys/sysctl.h> 93 94#include <vm/vm.h> 95#include <vm/vm_param.h> 96#include <vm/vm_object.h> 97#include <vm/vm_page.h> 98#include <vm/vm_map.h> 99#include <vm/vm_pageout.h> 100#include <vm/vm_pager.h> 101#include <vm/swap_pager.h> 102#include <vm/vm_extern.h> 103#include <vm/uma.h> 104 105#include <machine/mutex.h> 106 107/* 108 * System initialization 109 */ 110 111/* the kernel process "vm_pageout"*/ 112static void vm_pageout(void); 113static int vm_pageout_clean(vm_page_t); 114static void vm_pageout_page_free(vm_page_t); 115static void vm_pageout_pmap_collect(void); 116static void vm_pageout_scan(int pass); 117static int vm_pageout_free_page_calc(vm_size_t count); 118struct proc *pageproc; 119 120static struct kproc_desc page_kp = { 121 "pagedaemon", 122 vm_pageout, 123 &pageproc 124}; 125SYSINIT(pagedaemon, SI_SUB_KTHREAD_PAGE, SI_ORDER_FIRST, kproc_start, &page_kp) 126 127#if !defined(NO_SWAPPING) 128/* the kernel process "vm_daemon"*/ 129static void vm_daemon(void); 130static struct proc *vmproc; 131 132static struct kproc_desc vm_kp = { 133 "vmdaemon", 134 vm_daemon, 135 &vmproc 136}; 137SYSINIT(vmdaemon, SI_SUB_KTHREAD_VM, SI_ORDER_FIRST, kproc_start, &vm_kp) 138#endif 139 140 141int vm_pages_needed; /* Event on which pageout daemon sleeps */ 142int vm_pageout_deficit; /* Estimated number of pages deficit */ 143int vm_pageout_pages_needed; /* flag saying that the pageout daemon needs pages */ 144 145#if !defined(NO_SWAPPING) 146static int vm_pageout_req_swapout; /* XXX */ 147static int vm_daemon_needed; 148#endif 149static int vm_max_launder = 32; 150static int vm_pageout_stats_max=0, vm_pageout_stats_interval = 0; 151static int vm_pageout_full_stats_interval = 0; 152static int vm_pageout_stats_free_max=0, vm_pageout_algorithm=0; 153static int defer_swap_pageouts=0; 154static int disable_swap_pageouts=0; 155 156#if defined(NO_SWAPPING) 157static int vm_swap_enabled=0; 158static int vm_swap_idle_enabled=0; 159#else 160static int vm_swap_enabled=1; 161static int vm_swap_idle_enabled=0; 162#endif 163 164SYSCTL_INT(_vm, VM_PAGEOUT_ALGORITHM, pageout_algorithm, 165 CTLFLAG_RW, &vm_pageout_algorithm, 0, "LRU page mgmt"); 166 167SYSCTL_INT(_vm, OID_AUTO, max_launder, 168 CTLFLAG_RW, &vm_max_launder, 0, "Limit dirty flushes in pageout"); 169 170SYSCTL_INT(_vm, OID_AUTO, pageout_stats_max, 171 CTLFLAG_RW, &vm_pageout_stats_max, 0, "Max pageout stats scan length"); 172 173SYSCTL_INT(_vm, OID_AUTO, pageout_full_stats_interval, 174 CTLFLAG_RW, &vm_pageout_full_stats_interval, 0, "Interval for full stats scan"); 175 176SYSCTL_INT(_vm, OID_AUTO, pageout_stats_interval, 177 CTLFLAG_RW, &vm_pageout_stats_interval, 0, "Interval for partial stats scan"); 178 179SYSCTL_INT(_vm, OID_AUTO, pageout_stats_free_max, 180 CTLFLAG_RW, &vm_pageout_stats_free_max, 0, "Not implemented"); 181 182#if defined(NO_SWAPPING) 183SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled, 184 CTLFLAG_RD, &vm_swap_enabled, 0, ""); 185SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled, 186 CTLFLAG_RD, &vm_swap_idle_enabled, 0, ""); 187#else 188SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled, 189 CTLFLAG_RW, &vm_swap_enabled, 0, "Enable entire process swapout"); 190SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled, 191 CTLFLAG_RW, &vm_swap_idle_enabled, 0, "Allow swapout on idle criteria"); 192#endif 193 194SYSCTL_INT(_vm, OID_AUTO, defer_swapspace_pageouts, 195 CTLFLAG_RW, &defer_swap_pageouts, 0, "Give preference to dirty pages in mem"); 196 197SYSCTL_INT(_vm, OID_AUTO, disable_swapspace_pageouts, 198 CTLFLAG_RW, &disable_swap_pageouts, 0, "Disallow swapout of dirty pages"); 199 200static int pageout_lock_miss; 201SYSCTL_INT(_vm, OID_AUTO, pageout_lock_miss, 202 CTLFLAG_RD, &pageout_lock_miss, 0, "vget() lock misses during pageout"); 203 204#define VM_PAGEOUT_PAGE_COUNT 16 205int vm_pageout_page_count = VM_PAGEOUT_PAGE_COUNT; 206 207int vm_page_max_wired; /* XXX max # of wired pages system-wide */ 208 209#if !defined(NO_SWAPPING) 210typedef void freeer_fcn_t(vm_map_t, vm_object_t, vm_pindex_t); 211static void vm_pageout_map_deactivate_pages(vm_map_t, vm_pindex_t); 212static freeer_fcn_t vm_pageout_object_deactivate_pages; 213static void vm_req_vmdaemon(void); 214#endif 215static void vm_pageout_page_stats(void); 216 217/* 218 * vm_pageout_clean: 219 * 220 * Clean the page and remove it from the laundry. 221 * 222 * We set the busy bit to cause potential page faults on this page to 223 * block. Note the careful timing, however, the busy bit isn't set till 224 * late and we cannot do anything that will mess with the page. 225 */ 226static int 227vm_pageout_clean(m) 228 vm_page_t m; 229{ 230 vm_object_t object; 231 vm_page_t mc[2*vm_pageout_page_count]; 232 int numpagedout, pageout_count; 233 int ib, is, page_base; 234 vm_pindex_t pindex = m->pindex; 235 236 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 237 238 /* 239 * It doesn't cost us anything to pageout OBJT_DEFAULT or OBJT_SWAP 240 * with the new swapper, but we could have serious problems paging 241 * out other object types if there is insufficient memory. 242 * 243 * Unfortunately, checking free memory here is far too late, so the 244 * check has been moved up a procedural level. 245 */ 246 247 /* 248 * Don't mess with the page if it's busy, held, or special 249 */ 250 if ((m->hold_count != 0) || 251 ((m->busy != 0) || (m->flags & (PG_BUSY|PG_UNMANAGED))) || 252 !VM_OBJECT_TRYLOCK(m->object)) { 253 return 0; 254 } 255 256 mc[vm_pageout_page_count] = m; 257 pageout_count = 1; 258 page_base = vm_pageout_page_count; 259 ib = 1; 260 is = 1; 261 262 /* 263 * Scan object for clusterable pages. 264 * 265 * We can cluster ONLY if: ->> the page is NOT 266 * clean, wired, busy, held, or mapped into a 267 * buffer, and one of the following: 268 * 1) The page is inactive, or a seldom used 269 * active page. 270 * -or- 271 * 2) we force the issue. 272 * 273 * During heavy mmap/modification loads the pageout 274 * daemon can really fragment the underlying file 275 * due to flushing pages out of order and not trying 276 * align the clusters (which leave sporatic out-of-order 277 * holes). To solve this problem we do the reverse scan 278 * first and attempt to align our cluster, then do a 279 * forward scan if room remains. 280 */ 281 object = m->object; 282more: 283 while (ib && pageout_count < vm_pageout_page_count) { 284 vm_page_t p; 285 286 if (ib > pindex) { 287 ib = 0; 288 break; 289 } 290 291 if ((p = vm_page_lookup(object, pindex - ib)) == NULL) { 292 ib = 0; 293 break; 294 } 295 if (((p->queue - p->pc) == PQ_CACHE) || 296 (p->flags & (PG_BUSY|PG_UNMANAGED)) || p->busy) { 297 ib = 0; 298 break; 299 } 300 vm_page_test_dirty(p); 301 if ((p->dirty & p->valid) == 0 || 302 p->queue != PQ_INACTIVE || 303 p->wire_count != 0 || /* may be held by buf cache */ 304 p->hold_count != 0) { /* may be undergoing I/O */ 305 ib = 0; 306 break; 307 } 308 mc[--page_base] = p; 309 ++pageout_count; 310 ++ib; 311 /* 312 * alignment boundry, stop here and switch directions. Do 313 * not clear ib. 314 */ 315 if ((pindex - (ib - 1)) % vm_pageout_page_count == 0) 316 break; 317 } 318 319 while (pageout_count < vm_pageout_page_count && 320 pindex + is < object->size) { 321 vm_page_t p; 322 323 if ((p = vm_page_lookup(object, pindex + is)) == NULL) 324 break; 325 if (((p->queue - p->pc) == PQ_CACHE) || 326 (p->flags & (PG_BUSY|PG_UNMANAGED)) || p->busy) { 327 break; 328 } 329 vm_page_test_dirty(p); 330 if ((p->dirty & p->valid) == 0 || 331 p->queue != PQ_INACTIVE || 332 p->wire_count != 0 || /* may be held by buf cache */ 333 p->hold_count != 0) { /* may be undergoing I/O */ 334 break; 335 } 336 mc[page_base + pageout_count] = p; 337 ++pageout_count; 338 ++is; 339 } 340 341 /* 342 * If we exhausted our forward scan, continue with the reverse scan 343 * when possible, even past a page boundry. This catches boundry 344 * conditions. 345 */ 346 if (ib && pageout_count < vm_pageout_page_count) 347 goto more; 348 349 /* 350 * we allow reads during pageouts... 351 */ 352 numpagedout = vm_pageout_flush(&mc[page_base], pageout_count, 0, TRUE); 353 VM_OBJECT_UNLOCK(object); 354 return (numpagedout); 355} 356 357/* 358 * vm_pageout_flush() - launder the given pages 359 * 360 * The given pages are laundered. Note that we setup for the start of 361 * I/O ( i.e. busy the page ), mark it read-only, and bump the object 362 * reference count all in here rather then in the parent. If we want 363 * the parent to do more sophisticated things we may have to change 364 * the ordering. 365 */ 366int 367vm_pageout_flush(mc, count, flags, is_object_locked) 368 vm_page_t *mc; 369 int count; 370 int flags; 371 int is_object_locked; 372{ 373 vm_object_t object; 374 int pageout_status[count]; 375 int numpagedout = 0; 376 int i; 377 378 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 379 /* 380 * Initiate I/O. Bump the vm_page_t->busy counter and 381 * mark the pages read-only. 382 * 383 * We do not have to fixup the clean/dirty bits here... we can 384 * allow the pager to do it after the I/O completes. 385 * 386 * NOTE! mc[i]->dirty may be partial or fragmented due to an 387 * edge case with file fragments. 388 */ 389 for (i = 0; i < count; i++) { 390 KASSERT(mc[i]->valid == VM_PAGE_BITS_ALL, ("vm_pageout_flush page %p index %d/%d: partially invalid page", mc[i], i, count)); 391 vm_page_io_start(mc[i]); 392 pmap_page_protect(mc[i], VM_PROT_READ); 393 } 394 object = mc[0]->object; 395 vm_page_unlock_queues(); 396 if (!is_object_locked) 397 VM_OBJECT_LOCK(object); 398 vm_object_pip_add(object, count); 399 VM_OBJECT_UNLOCK(object); 400 401 vm_pager_put_pages(object, mc, count, 402 (flags | ((object == kernel_object) ? VM_PAGER_PUT_SYNC : 0)), 403 pageout_status); 404 405 VM_OBJECT_LOCK(object); 406 vm_page_lock_queues(); 407 for (i = 0; i < count; i++) { 408 vm_page_t mt = mc[i]; 409 410 switch (pageout_status[i]) { 411 case VM_PAGER_OK: 412 case VM_PAGER_PEND: 413 numpagedout++; 414 break; 415 case VM_PAGER_BAD: 416 /* 417 * Page outside of range of object. Right now we 418 * essentially lose the changes by pretending it 419 * worked. 420 */ 421 pmap_clear_modify(mt); 422 vm_page_undirty(mt); 423 break; 424 case VM_PAGER_ERROR: 425 case VM_PAGER_FAIL: 426 /* 427 * If page couldn't be paged out, then reactivate the 428 * page so it doesn't clog the inactive list. (We 429 * will try paging out it again later). 430 */ 431 vm_page_activate(mt); 432 break; 433 case VM_PAGER_AGAIN: 434 break; 435 } 436 437 /* 438 * If the operation is still going, leave the page busy to 439 * block all other accesses. Also, leave the paging in 440 * progress indicator set so that we don't attempt an object 441 * collapse. 442 */ 443 if (pageout_status[i] != VM_PAGER_PEND) { 444 vm_object_pip_wakeup(object); 445 vm_page_io_finish(mt); 446 if (!vm_page_count_severe() || !vm_page_try_to_cache(mt)) 447 pmap_page_protect(mt, VM_PROT_READ); 448 } 449 } 450 if (!is_object_locked) 451 VM_OBJECT_UNLOCK(object); 452 return numpagedout; 453} 454 455#if !defined(NO_SWAPPING) 456/* 457 * vm_pageout_object_deactivate_pages 458 * 459 * deactivate enough pages to satisfy the inactive target 460 * requirements or if vm_page_proc_limit is set, then 461 * deactivate all of the pages in the object and its 462 * backing_objects. 463 * 464 * The object and map must be locked. 465 */ 466static void 467vm_pageout_object_deactivate_pages(map, object, desired) 468 vm_map_t map; 469 vm_object_t object; 470 vm_pindex_t desired; 471{ 472 vm_page_t p, next; 473 int actcount, rcount, remove_mode; 474 475 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 476 if (object->type == OBJT_DEVICE || object->type == OBJT_PHYS) 477 return; 478 479 while (object) { 480 if (pmap_resident_count(vm_map_pmap(map)) <= desired) 481 return; 482 if (object->paging_in_progress) 483 return; 484 485 remove_mode = 0; 486 if (object->shadow_count > 1) 487 remove_mode = 1; 488 /* 489 * scan the objects entire memory queue 490 */ 491 rcount = object->resident_page_count; 492 p = TAILQ_FIRST(&object->memq); 493 vm_page_lock_queues(); 494 while (p && (rcount-- > 0)) { 495 if (pmap_resident_count(map->pmap) <= desired) { 496 vm_page_unlock_queues(); 497 return; 498 } 499 next = TAILQ_NEXT(p, listq); 500 cnt.v_pdpages++; 501 if (p->wire_count != 0 || 502 p->hold_count != 0 || 503 p->busy != 0 || 504 (p->flags & (PG_BUSY|PG_UNMANAGED)) || 505 !pmap_page_exists_quick(vm_map_pmap(map), p)) { 506 p = next; 507 continue; 508 } 509 actcount = pmap_ts_referenced(p); 510 if (actcount) { 511 vm_page_flag_set(p, PG_REFERENCED); 512 } else if (p->flags & PG_REFERENCED) { 513 actcount = 1; 514 } 515 if ((p->queue != PQ_ACTIVE) && 516 (p->flags & PG_REFERENCED)) { 517 vm_page_activate(p); 518 p->act_count += actcount; 519 vm_page_flag_clear(p, PG_REFERENCED); 520 } else if (p->queue == PQ_ACTIVE) { 521 if ((p->flags & PG_REFERENCED) == 0) { 522 p->act_count -= min(p->act_count, ACT_DECLINE); 523 if (!remove_mode && (vm_pageout_algorithm || (p->act_count == 0))) { 524 pmap_remove_all(p); 525 vm_page_deactivate(p); 526 } else { 527 vm_pageq_requeue(p); 528 } 529 } else { 530 vm_page_activate(p); 531 vm_page_flag_clear(p, PG_REFERENCED); 532 if (p->act_count < (ACT_MAX - ACT_ADVANCE)) 533 p->act_count += ACT_ADVANCE; 534 vm_pageq_requeue(p); 535 } 536 } else if (p->queue == PQ_INACTIVE) { 537 pmap_remove_all(p); 538 } 539 p = next; 540 } 541 vm_page_unlock_queues(); 542 object = object->backing_object; 543 } 544} 545 546/* 547 * deactivate some number of pages in a map, try to do it fairly, but 548 * that is really hard to do. 549 */ 550static void 551vm_pageout_map_deactivate_pages(map, desired) 552 vm_map_t map; 553 vm_pindex_t desired; 554{ 555 vm_map_entry_t tmpe; 556 vm_object_t obj, bigobj; 557 int nothingwired; 558 559 if (!vm_map_trylock(map)) 560 return; 561 562 bigobj = NULL; 563 nothingwired = TRUE; 564 565 /* 566 * first, search out the biggest object, and try to free pages from 567 * that. 568 */ 569 tmpe = map->header.next; 570 while (tmpe != &map->header) { 571 if ((tmpe->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { 572 obj = tmpe->object.vm_object; 573 if (obj != NULL && VM_OBJECT_TRYLOCK(obj)) { 574 if (obj->shadow_count <= 1 && 575 (bigobj == NULL || 576 bigobj->resident_page_count < obj->resident_page_count)) { 577 if (bigobj != NULL) 578 VM_OBJECT_UNLOCK(bigobj); 579 bigobj = obj; 580 } else 581 VM_OBJECT_UNLOCK(obj); 582 } 583 } 584 if (tmpe->wired_count > 0) 585 nothingwired = FALSE; 586 tmpe = tmpe->next; 587 } 588 589 if (bigobj != NULL) { 590 vm_pageout_object_deactivate_pages(map, bigobj, desired); 591 VM_OBJECT_UNLOCK(bigobj); 592 } 593 /* 594 * Next, hunt around for other pages to deactivate. We actually 595 * do this search sort of wrong -- .text first is not the best idea. 596 */ 597 tmpe = map->header.next; 598 while (tmpe != &map->header) { 599 if (pmap_resident_count(vm_map_pmap(map)) <= desired) 600 break; 601 if ((tmpe->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { 602 obj = tmpe->object.vm_object; 603 if (obj != NULL) { 604 VM_OBJECT_LOCK(obj); 605 vm_pageout_object_deactivate_pages(map, obj, desired); 606 VM_OBJECT_UNLOCK(obj); 607 } 608 } 609 tmpe = tmpe->next; 610 } 611 612 /* 613 * Remove all mappings if a process is swapped out, this will free page 614 * table pages. 615 */ 616 if (desired == 0 && nothingwired) { 617 GIANT_REQUIRED; 618 vm_page_lock_queues(); 619 pmap_remove(vm_map_pmap(map), vm_map_min(map), 620 vm_map_max(map)); 621 vm_page_unlock_queues(); 622 } 623 vm_map_unlock(map); 624} 625#endif /* !defined(NO_SWAPPING) */ 626 627/* 628 * Warning! The page queue lock is released and reacquired. 629 */ 630static void 631vm_pageout_page_free(vm_page_t m) 632{ 633 vm_object_t object = m->object; 634 635 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 636 vm_page_busy(m); 637 vm_page_unlock_queues(); 638 /* 639 * Avoid a lock order reversal. The page must be busy. 640 */ 641 VM_OBJECT_LOCK(object); 642 vm_page_lock_queues(); 643 pmap_remove_all(m); 644 vm_page_free(m); 645 VM_OBJECT_UNLOCK(object); 646 cnt.v_dfree++; 647} 648 649/* 650 * This routine is very drastic, but can save the system 651 * in a pinch. 652 */ 653static void 654vm_pageout_pmap_collect(void) 655{ 656 int i; 657 vm_page_t m; 658 static int warningdone; 659 660 if (pmap_pagedaemon_waken == 0) 661 return; 662 if (warningdone < 5) { 663 printf("collecting pv entries -- suggest increasing PMAP_SHPGPERPROC\n"); 664 warningdone++; 665 } 666 vm_page_lock_queues(); 667 for (i = 0; i < vm_page_array_size; i++) { 668 m = &vm_page_array[i]; 669 if (m->wire_count || m->hold_count || m->busy || 670 (m->flags & (PG_BUSY | PG_UNMANAGED))) 671 continue; 672 pmap_remove_all(m); 673 } 674 vm_page_unlock_queues(); 675 pmap_pagedaemon_waken = 0; 676} 677 678/* 679 * vm_pageout_scan does the dirty work for the pageout daemon. 680 */ 681static void 682vm_pageout_scan(int pass) 683{ 684 vm_page_t m, next; 685 struct vm_page marker; 686 int save_page_shortage; 687 int save_inactive_count; 688 int page_shortage, maxscan, pcount; 689 int addl_page_shortage, addl_page_shortage_init; 690 struct proc *p, *bigproc; 691 vm_offset_t size, bigsize; 692 vm_object_t object; 693 int actcount; 694 int vnodes_skipped = 0; 695 int maxlaunder; 696 int s; 697 struct thread *td; 698 699 GIANT_REQUIRED; 700 /* 701 * Decrease registered cache sizes. 702 */ 703 EVENTHANDLER_INVOKE(vm_lowmem, 0); 704 /* 705 * We do this explicitly after the caches have been drained above. 706 */ 707 uma_reclaim(); 708 /* 709 * Do whatever cleanup that the pmap code can. 710 */ 711 vm_pageout_pmap_collect(); 712 713 addl_page_shortage_init = atomic_readandclear_int(&vm_pageout_deficit); 714 715 /* 716 * Calculate the number of pages we want to either free or move 717 * to the cache. 718 */ 719 page_shortage = vm_paging_target() + addl_page_shortage_init; 720 save_page_shortage = page_shortage; 721 save_inactive_count = cnt.v_inactive_count; 722 723 /* 724 * Initialize our marker 725 */ 726 bzero(&marker, sizeof(marker)); 727 marker.flags = PG_BUSY | PG_FICTITIOUS | PG_MARKER; 728 marker.queue = PQ_INACTIVE; 729 marker.wire_count = 1; 730 731 /* 732 * Start scanning the inactive queue for pages we can move to the 733 * cache or free. The scan will stop when the target is reached or 734 * we have scanned the entire inactive queue. Note that m->act_count 735 * is not used to form decisions for the inactive queue, only for the 736 * active queue. 737 * 738 * maxlaunder limits the number of dirty pages we flush per scan. 739 * For most systems a smaller value (16 or 32) is more robust under 740 * extreme memory and disk pressure because any unnecessary writes 741 * to disk can result in extreme performance degredation. However, 742 * systems with excessive dirty pages (especially when MAP_NOSYNC is 743 * used) will die horribly with limited laundering. If the pageout 744 * daemon cannot clean enough pages in the first pass, we let it go 745 * all out in succeeding passes. 746 */ 747 if ((maxlaunder = vm_max_launder) <= 1) 748 maxlaunder = 1; 749 if (pass) 750 maxlaunder = 10000; 751rescan0: 752 addl_page_shortage = addl_page_shortage_init; 753 maxscan = cnt.v_inactive_count; 754 755 for (m = TAILQ_FIRST(&vm_page_queues[PQ_INACTIVE].pl); 756 m != NULL && maxscan-- > 0 && page_shortage > 0; 757 m = next) { 758 759 cnt.v_pdpages++; 760 761 if (m->queue != PQ_INACTIVE) { 762 goto rescan0; 763 } 764 765 next = TAILQ_NEXT(m, pageq); 766 767 /* 768 * skip marker pages 769 */ 770 if (m->flags & PG_MARKER) 771 continue; 772 773 /* 774 * A held page may be undergoing I/O, so skip it. 775 */ 776 if (m->hold_count) { 777 vm_pageq_requeue(m); 778 addl_page_shortage++; 779 continue; 780 } 781 /* 782 * Don't mess with busy pages, keep in the front of the 783 * queue, most likely are being paged out. 784 */ 785 if (m->busy || (m->flags & PG_BUSY)) { 786 addl_page_shortage++; 787 continue; 788 } 789 790 vm_page_lock_queues(); 791 /* 792 * If the object is not being used, we ignore previous 793 * references. 794 */ 795 if (m->object->ref_count == 0) { 796 vm_page_flag_clear(m, PG_REFERENCED); 797 pmap_clear_reference(m); 798 799 /* 800 * Otherwise, if the page has been referenced while in the 801 * inactive queue, we bump the "activation count" upwards, 802 * making it less likely that the page will be added back to 803 * the inactive queue prematurely again. Here we check the 804 * page tables (or emulated bits, if any), given the upper 805 * level VM system not knowing anything about existing 806 * references. 807 */ 808 } else if (((m->flags & PG_REFERENCED) == 0) && 809 (actcount = pmap_ts_referenced(m))) { 810 vm_page_activate(m); 811 vm_page_unlock_queues(); 812 m->act_count += (actcount + ACT_ADVANCE); 813 continue; 814 } 815 816 /* 817 * If the upper level VM system knows about any page 818 * references, we activate the page. We also set the 819 * "activation count" higher than normal so that we will less 820 * likely place pages back onto the inactive queue again. 821 */ 822 if ((m->flags & PG_REFERENCED) != 0) { 823 vm_page_flag_clear(m, PG_REFERENCED); 824 actcount = pmap_ts_referenced(m); 825 vm_page_activate(m); 826 vm_page_unlock_queues(); 827 m->act_count += (actcount + ACT_ADVANCE + 1); 828 continue; 829 } 830 831 /* 832 * If the upper level VM system doesn't know anything about 833 * the page being dirty, we have to check for it again. As 834 * far as the VM code knows, any partially dirty pages are 835 * fully dirty. 836 */ 837 if (m->dirty == 0) { 838 vm_page_test_dirty(m); 839 } else { 840 vm_page_dirty(m); 841 } 842 vm_page_unlock_queues(); 843 844 /* 845 * Invalid pages can be easily freed 846 */ 847 if (m->valid == 0) { 848 vm_page_lock_queues(); 849 vm_pageout_page_free(m); 850 vm_page_unlock_queues(); 851 --page_shortage; 852 853 /* 854 * Clean pages can be placed onto the cache queue. This 855 * effectively frees them. 856 */ 857 } else if (m->dirty == 0) { 858 vm_page_lock_queues(); 859 vm_page_cache(m); 860 vm_page_unlock_queues(); 861 --page_shortage; 862 } else if ((m->flags & PG_WINATCFLS) == 0 && pass == 0) { 863 /* 864 * Dirty pages need to be paged out, but flushing 865 * a page is extremely expensive verses freeing 866 * a clean page. Rather then artificially limiting 867 * the number of pages we can flush, we instead give 868 * dirty pages extra priority on the inactive queue 869 * by forcing them to be cycled through the queue 870 * twice before being flushed, after which the 871 * (now clean) page will cycle through once more 872 * before being freed. This significantly extends 873 * the thrash point for a heavily loaded machine. 874 */ 875 vm_page_lock_queues(); 876 vm_page_flag_set(m, PG_WINATCFLS); 877 vm_pageq_requeue(m); 878 vm_page_unlock_queues(); 879 } else if (maxlaunder > 0) { 880 /* 881 * We always want to try to flush some dirty pages if 882 * we encounter them, to keep the system stable. 883 * Normally this number is small, but under extreme 884 * pressure where there are insufficient clean pages 885 * on the inactive queue, we may have to go all out. 886 */ 887 int swap_pageouts_ok; 888 struct vnode *vp = NULL; 889 struct mount *mp; 890 891 object = m->object; 892 893 if ((object->type != OBJT_SWAP) && (object->type != OBJT_DEFAULT)) { 894 swap_pageouts_ok = 1; 895 } else { 896 swap_pageouts_ok = !(defer_swap_pageouts || disable_swap_pageouts); 897 swap_pageouts_ok |= (!disable_swap_pageouts && defer_swap_pageouts && 898 vm_page_count_min()); 899 900 } 901 902 /* 903 * We don't bother paging objects that are "dead". 904 * Those objects are in a "rundown" state. 905 */ 906 if (!swap_pageouts_ok || (object->flags & OBJ_DEAD)) { 907 vm_pageq_requeue(m); 908 continue; 909 } 910 911 /* 912 * The object is already known NOT to be dead. It 913 * is possible for the vget() to block the whole 914 * pageout daemon, but the new low-memory handling 915 * code should prevent it. 916 * 917 * The previous code skipped locked vnodes and, worse, 918 * reordered pages in the queue. This results in 919 * completely non-deterministic operation and, on a 920 * busy system, can lead to extremely non-optimal 921 * pageouts. For example, it can cause clean pages 922 * to be freed and dirty pages to be moved to the end 923 * of the queue. Since dirty pages are also moved to 924 * the end of the queue once-cleaned, this gives 925 * way too large a weighting to defering the freeing 926 * of dirty pages. 927 * 928 * We can't wait forever for the vnode lock, we might 929 * deadlock due to a vn_read() getting stuck in 930 * vm_wait while holding this vnode. We skip the 931 * vnode if we can't get it in a reasonable amount 932 * of time. 933 */ 934 if (object->type == OBJT_VNODE) { 935 vp = object->handle; 936 937 mp = NULL; 938 if (vp->v_type == VREG) 939 vn_start_write(vp, &mp, V_NOWAIT); 940 if (vget(vp, LK_EXCLUSIVE|LK_TIMELOCK, curthread)) { 941 ++pageout_lock_miss; 942 vn_finished_write(mp); 943 if (object->flags & OBJ_MIGHTBEDIRTY) 944 vnodes_skipped++; 945 continue; 946 } 947 948 /* 949 * The page might have been moved to another 950 * queue during potential blocking in vget() 951 * above. The page might have been freed and 952 * reused for another vnode. The object might 953 * have been reused for another vnode. 954 */ 955 if (m->queue != PQ_INACTIVE || 956 m->object != object || 957 object->handle != vp) { 958 if (object->flags & OBJ_MIGHTBEDIRTY) 959 vnodes_skipped++; 960 vput(vp); 961 vn_finished_write(mp); 962 continue; 963 } 964 965 /* 966 * The page may have been busied during the 967 * blocking in vput(); We don't move the 968 * page back onto the end of the queue so that 969 * statistics are more correct if we don't. 970 */ 971 if (m->busy || (m->flags & PG_BUSY)) { 972 vput(vp); 973 vn_finished_write(mp); 974 continue; 975 } 976 977 /* 978 * If the page has become held it might 979 * be undergoing I/O, so skip it 980 */ 981 if (m->hold_count) { 982 vm_pageq_requeue(m); 983 if (object->flags & OBJ_MIGHTBEDIRTY) 984 vnodes_skipped++; 985 vput(vp); 986 vn_finished_write(mp); 987 continue; 988 } 989 } 990 991 /* 992 * If a page is dirty, then it is either being washed 993 * (but not yet cleaned) or it is still in the 994 * laundry. If it is still in the laundry, then we 995 * start the cleaning operation. 996 * 997 * This operation may cluster, invalidating the 'next' 998 * pointer. To prevent an inordinate number of 999 * restarts we use our marker to remember our place. 1000 * 1001 * decrement page_shortage on success to account for 1002 * the (future) cleaned page. Otherwise we could wind 1003 * up laundering or cleaning too many pages. 1004 */ 1005 vm_page_lock_queues(); 1006 s = splvm(); 1007 TAILQ_INSERT_AFTER(&vm_page_queues[PQ_INACTIVE].pl, m, &marker, pageq); 1008 splx(s); 1009 if (vm_pageout_clean(m) != 0) { 1010 --page_shortage; 1011 --maxlaunder; 1012 } 1013 s = splvm(); 1014 next = TAILQ_NEXT(&marker, pageq); 1015 TAILQ_REMOVE(&vm_page_queues[PQ_INACTIVE].pl, &marker, pageq); 1016 splx(s); 1017 vm_page_unlock_queues(); 1018 if (vp) { 1019 vput(vp); 1020 vn_finished_write(mp); 1021 } 1022 } 1023 } 1024 1025 /* 1026 * Compute the number of pages we want to try to move from the 1027 * active queue to the inactive queue. 1028 */ 1029 page_shortage = vm_paging_target() + 1030 cnt.v_inactive_target - cnt.v_inactive_count; 1031 page_shortage += addl_page_shortage; 1032 1033 vm_page_lock_queues(); 1034 /* 1035 * Scan the active queue for things we can deactivate. We nominally 1036 * track the per-page activity counter and use it to locate 1037 * deactivation candidates. 1038 */ 1039 pcount = cnt.v_active_count; 1040 m = TAILQ_FIRST(&vm_page_queues[PQ_ACTIVE].pl); 1041 1042 while ((m != NULL) && (pcount-- > 0) && (page_shortage > 0)) { 1043 1044 /* 1045 * This is a consistency check, and should likely be a panic 1046 * or warning. 1047 */ 1048 if (m->queue != PQ_ACTIVE) { 1049 break; 1050 } 1051 1052 next = TAILQ_NEXT(m, pageq); 1053 /* 1054 * Don't deactivate pages that are busy. 1055 */ 1056 if ((m->busy != 0) || 1057 (m->flags & PG_BUSY) || 1058 (m->hold_count != 0)) { 1059 vm_pageq_requeue(m); 1060 m = next; 1061 continue; 1062 } 1063 1064 /* 1065 * The count for pagedaemon pages is done after checking the 1066 * page for eligibility... 1067 */ 1068 cnt.v_pdpages++; 1069 1070 /* 1071 * Check to see "how much" the page has been used. 1072 */ 1073 actcount = 0; 1074 if (m->object->ref_count != 0) { 1075 if (m->flags & PG_REFERENCED) { 1076 actcount += 1; 1077 } 1078 actcount += pmap_ts_referenced(m); 1079 if (actcount) { 1080 m->act_count += ACT_ADVANCE + actcount; 1081 if (m->act_count > ACT_MAX) 1082 m->act_count = ACT_MAX; 1083 } 1084 } 1085 1086 /* 1087 * Since we have "tested" this bit, we need to clear it now. 1088 */ 1089 vm_page_flag_clear(m, PG_REFERENCED); 1090 1091 /* 1092 * Only if an object is currently being used, do we use the 1093 * page activation count stats. 1094 */ 1095 if (actcount && (m->object->ref_count != 0)) { 1096 vm_pageq_requeue(m); 1097 } else { 1098 m->act_count -= min(m->act_count, ACT_DECLINE); 1099 if (vm_pageout_algorithm || 1100 m->object->ref_count == 0 || 1101 m->act_count == 0) { 1102 page_shortage--; 1103 if (m->object->ref_count == 0) { 1104 pmap_remove_all(m); 1105 if (m->dirty == 0) 1106 vm_page_cache(m); 1107 else 1108 vm_page_deactivate(m); 1109 } else { 1110 vm_page_deactivate(m); 1111 } 1112 } else { 1113 vm_pageq_requeue(m); 1114 } 1115 } 1116 m = next; 1117 } 1118 s = splvm(); 1119 1120 /* 1121 * We try to maintain some *really* free pages, this allows interrupt 1122 * code to be guaranteed space. Since both cache and free queues 1123 * are considered basically 'free', moving pages from cache to free 1124 * does not effect other calculations. 1125 */ 1126 while (cnt.v_free_count < cnt.v_free_reserved) { 1127 static int cache_rover = 0; 1128 m = vm_pageq_find(PQ_CACHE, cache_rover, FALSE); 1129 if (!m) 1130 break; 1131 if ((m->flags & (PG_BUSY|PG_UNMANAGED)) || 1132 m->busy || 1133 m->hold_count || 1134 m->wire_count) { 1135#ifdef INVARIANTS 1136 printf("Warning: busy page %p found in cache\n", m); 1137#endif 1138 vm_page_deactivate(m); 1139 continue; 1140 } 1141 cache_rover = (cache_rover + PQ_PRIME2) & PQ_L2_MASK; 1142 vm_pageout_page_free(m); 1143 } 1144 splx(s); 1145 vm_page_unlock_queues(); 1146#if !defined(NO_SWAPPING) 1147 /* 1148 * Idle process swapout -- run once per second. 1149 */ 1150 if (vm_swap_idle_enabled) { 1151 static long lsec; 1152 if (time_second != lsec) { 1153 vm_pageout_req_swapout |= VM_SWAP_IDLE; 1154 vm_req_vmdaemon(); 1155 lsec = time_second; 1156 } 1157 } 1158#endif 1159 1160 /* 1161 * If we didn't get enough free pages, and we have skipped a vnode 1162 * in a writeable object, wakeup the sync daemon. And kick swapout 1163 * if we did not get enough free pages. 1164 */ 1165 if (vm_paging_target() > 0) { 1166 if (vnodes_skipped && vm_page_count_min()) 1167 (void) speedup_syncer(); 1168#if !defined(NO_SWAPPING) 1169 if (vm_swap_enabled && vm_page_count_target()) { 1170 vm_req_vmdaemon(); 1171 vm_pageout_req_swapout |= VM_SWAP_NORMAL; 1172 } 1173#endif 1174 } 1175 1176 /* 1177 * If we are critically low on one of RAM or swap and low on 1178 * the other, kill the largest process. However, we avoid 1179 * doing this on the first pass in order to give ourselves a 1180 * chance to flush out dirty vnode-backed pages and to allow 1181 * active pages to be moved to the inactive queue and reclaimed. 1182 * 1183 * We keep the process bigproc locked once we find it to keep anyone 1184 * from messing with it; however, there is a possibility of 1185 * deadlock if process B is bigproc and one of it's child processes 1186 * attempts to propagate a signal to B while we are waiting for A's 1187 * lock while walking this list. To avoid this, we don't block on 1188 * the process lock but just skip a process if it is already locked. 1189 */ 1190 if (pass != 0 && 1191 ((vm_swap_size < 64 && vm_page_count_min()) || 1192 (swap_pager_full && vm_paging_target() > 0))) { 1193 bigproc = NULL; 1194 bigsize = 0; 1195 sx_slock(&allproc_lock); 1196 FOREACH_PROC_IN_SYSTEM(p) { 1197 int breakout; 1198 /* 1199 * If this process is already locked, skip it. 1200 */ 1201 if (PROC_TRYLOCK(p) == 0) 1202 continue; 1203 /* 1204 * If this is a system or protected process, skip it. 1205 */ 1206 if ((p->p_flag & P_SYSTEM) || (p->p_pid == 1) || 1207 (p->p_flag & P_PROTECTED) || 1208 ((p->p_pid < 48) && (vm_swap_size != 0))) { 1209 PROC_UNLOCK(p); 1210 continue; 1211 } 1212 /* 1213 * if the process is in a non-running type state, 1214 * don't touch it. Check all the threads individually. 1215 */ 1216 mtx_lock_spin(&sched_lock); 1217 breakout = 0; 1218 FOREACH_THREAD_IN_PROC(p, td) { 1219 if (!TD_ON_RUNQ(td) && 1220 !TD_IS_RUNNING(td) && 1221 !TD_IS_SLEEPING(td)) { 1222 breakout = 1; 1223 break; 1224 } 1225 } 1226 if (breakout) { 1227 mtx_unlock_spin(&sched_lock); 1228 PROC_UNLOCK(p); 1229 continue; 1230 } 1231 mtx_unlock_spin(&sched_lock); 1232 /* 1233 * get the process size 1234 */ 1235 if (!vm_map_trylock_read(&p->p_vmspace->vm_map)) { 1236 PROC_UNLOCK(p); 1237 continue; 1238 } 1239 size = vmspace_swap_count(p->p_vmspace); 1240 vm_map_unlock_read(&p->p_vmspace->vm_map); 1241 size += vmspace_resident_count(p->p_vmspace); 1242 /* 1243 * if the this process is bigger than the biggest one 1244 * remember it. 1245 */ 1246 if (size > bigsize) { 1247 if (bigproc != NULL) 1248 PROC_UNLOCK(bigproc); 1249 bigproc = p; 1250 bigsize = size; 1251 } else 1252 PROC_UNLOCK(p); 1253 } 1254 sx_sunlock(&allproc_lock); 1255 if (bigproc != NULL) { 1256 struct ksegrp *kg; 1257 killproc(bigproc, "out of swap space"); 1258 mtx_lock_spin(&sched_lock); 1259 FOREACH_KSEGRP_IN_PROC(bigproc, kg) { 1260 sched_nice(kg, PRIO_MIN); /* XXXKSE ??? */ 1261 } 1262 mtx_unlock_spin(&sched_lock); 1263 PROC_UNLOCK(bigproc); 1264 wakeup(&cnt.v_free_count); 1265 } 1266 } 1267} 1268 1269/* 1270 * This routine tries to maintain the pseudo LRU active queue, 1271 * so that during long periods of time where there is no paging, 1272 * that some statistic accumulation still occurs. This code 1273 * helps the situation where paging just starts to occur. 1274 */ 1275static void 1276vm_pageout_page_stats() 1277{ 1278 vm_page_t m,next; 1279 int pcount,tpcount; /* Number of pages to check */ 1280 static int fullintervalcount = 0; 1281 int page_shortage; 1282 int s0; 1283 1284 page_shortage = 1285 (cnt.v_inactive_target + cnt.v_cache_max + cnt.v_free_min) - 1286 (cnt.v_free_count + cnt.v_inactive_count + cnt.v_cache_count); 1287 1288 if (page_shortage <= 0) 1289 return; 1290 1291 s0 = splvm(); 1292 vm_page_lock_queues(); 1293 pcount = cnt.v_active_count; 1294 fullintervalcount += vm_pageout_stats_interval; 1295 if (fullintervalcount < vm_pageout_full_stats_interval) { 1296 tpcount = (vm_pageout_stats_max * cnt.v_active_count) / cnt.v_page_count; 1297 if (pcount > tpcount) 1298 pcount = tpcount; 1299 } else { 1300 fullintervalcount = 0; 1301 } 1302 1303 m = TAILQ_FIRST(&vm_page_queues[PQ_ACTIVE].pl); 1304 while ((m != NULL) && (pcount-- > 0)) { 1305 int actcount; 1306 1307 if (m->queue != PQ_ACTIVE) { 1308 break; 1309 } 1310 1311 next = TAILQ_NEXT(m, pageq); 1312 /* 1313 * Don't deactivate pages that are busy. 1314 */ 1315 if ((m->busy != 0) || 1316 (m->flags & PG_BUSY) || 1317 (m->hold_count != 0)) { 1318 vm_pageq_requeue(m); 1319 m = next; 1320 continue; 1321 } 1322 1323 actcount = 0; 1324 if (m->flags & PG_REFERENCED) { 1325 vm_page_flag_clear(m, PG_REFERENCED); 1326 actcount += 1; 1327 } 1328 1329 actcount += pmap_ts_referenced(m); 1330 if (actcount) { 1331 m->act_count += ACT_ADVANCE + actcount; 1332 if (m->act_count > ACT_MAX) 1333 m->act_count = ACT_MAX; 1334 vm_pageq_requeue(m); 1335 } else { 1336 if (m->act_count == 0) { 1337 /* 1338 * We turn off page access, so that we have 1339 * more accurate RSS stats. We don't do this 1340 * in the normal page deactivation when the 1341 * system is loaded VM wise, because the 1342 * cost of the large number of page protect 1343 * operations would be higher than the value 1344 * of doing the operation. 1345 */ 1346 pmap_remove_all(m); 1347 vm_page_deactivate(m); 1348 } else { 1349 m->act_count -= min(m->act_count, ACT_DECLINE); 1350 vm_pageq_requeue(m); 1351 } 1352 } 1353 1354 m = next; 1355 } 1356 vm_page_unlock_queues(); 1357 splx(s0); 1358} 1359 1360static int 1361vm_pageout_free_page_calc(count) 1362vm_size_t count; 1363{ 1364 if (count < cnt.v_page_count) 1365 return 0; 1366 /* 1367 * free_reserved needs to include enough for the largest swap pager 1368 * structures plus enough for any pv_entry structs when paging. 1369 */ 1370 if (cnt.v_page_count > 1024) 1371 cnt.v_free_min = 4 + (cnt.v_page_count - 1024) / 200; 1372 else 1373 cnt.v_free_min = 4; 1374 cnt.v_pageout_free_min = (2*MAXBSIZE)/PAGE_SIZE + 1375 cnt.v_interrupt_free_min; 1376 cnt.v_free_reserved = vm_pageout_page_count + 1377 cnt.v_pageout_free_min + (count / 768) + PQ_L2_SIZE; 1378 cnt.v_free_severe = cnt.v_free_min / 2; 1379 cnt.v_free_min += cnt.v_free_reserved; 1380 cnt.v_free_severe += cnt.v_free_reserved; 1381 return 1; 1382} 1383 1384/* 1385 * vm_pageout is the high level pageout daemon. 1386 */ 1387static void 1388vm_pageout() 1389{ 1390 int error, pass, s; 1391 1392 mtx_lock(&Giant); 1393 1394 /* 1395 * Initialize some paging parameters. 1396 */ 1397 cnt.v_interrupt_free_min = 2; 1398 if (cnt.v_page_count < 2000) 1399 vm_pageout_page_count = 8; 1400 1401 vm_pageout_free_page_calc(cnt.v_page_count); 1402 /* 1403 * v_free_target and v_cache_min control pageout hysteresis. Note 1404 * that these are more a measure of the VM cache queue hysteresis 1405 * then the VM free queue. Specifically, v_free_target is the 1406 * high water mark (free+cache pages). 1407 * 1408 * v_free_reserved + v_cache_min (mostly means v_cache_min) is the 1409 * low water mark, while v_free_min is the stop. v_cache_min must 1410 * be big enough to handle memory needs while the pageout daemon 1411 * is signalled and run to free more pages. 1412 */ 1413 if (cnt.v_free_count > 6144) 1414 cnt.v_free_target = 4 * cnt.v_free_min + cnt.v_free_reserved; 1415 else 1416 cnt.v_free_target = 2 * cnt.v_free_min + cnt.v_free_reserved; 1417 1418 if (cnt.v_free_count > 2048) { 1419 cnt.v_cache_min = cnt.v_free_target; 1420 cnt.v_cache_max = 2 * cnt.v_cache_min; 1421 cnt.v_inactive_target = (3 * cnt.v_free_target) / 2; 1422 } else { 1423 cnt.v_cache_min = 0; 1424 cnt.v_cache_max = 0; 1425 cnt.v_inactive_target = cnt.v_free_count / 4; 1426 } 1427 if (cnt.v_inactive_target > cnt.v_free_count / 3) 1428 cnt.v_inactive_target = cnt.v_free_count / 3; 1429 1430 /* XXX does not really belong here */ 1431 if (vm_page_max_wired == 0) 1432 vm_page_max_wired = cnt.v_free_count / 3; 1433 1434 if (vm_pageout_stats_max == 0) 1435 vm_pageout_stats_max = cnt.v_free_target; 1436 1437 /* 1438 * Set interval in seconds for stats scan. 1439 */ 1440 if (vm_pageout_stats_interval == 0) 1441 vm_pageout_stats_interval = 5; 1442 if (vm_pageout_full_stats_interval == 0) 1443 vm_pageout_full_stats_interval = vm_pageout_stats_interval * 4; 1444 1445 /* 1446 * Set maximum free per pass 1447 */ 1448 if (vm_pageout_stats_free_max == 0) 1449 vm_pageout_stats_free_max = 5; 1450 1451 swap_pager_swap_init(); 1452 pass = 0; 1453 /* 1454 * The pageout daemon is never done, so loop forever. 1455 */ 1456 while (TRUE) { 1457 s = splvm(); 1458 vm_page_lock_queues(); 1459 /* 1460 * If we have enough free memory, wakeup waiters. Do 1461 * not clear vm_pages_needed until we reach our target, 1462 * otherwise we may be woken up over and over again and 1463 * waste a lot of cpu. 1464 */ 1465 if (vm_pages_needed && !vm_page_count_min()) { 1466 if (!vm_paging_needed()) 1467 vm_pages_needed = 0; 1468 wakeup(&cnt.v_free_count); 1469 } 1470 if (vm_pages_needed) { 1471 /* 1472 * Still not done, take a second pass without waiting 1473 * (unlimited dirty cleaning), otherwise sleep a bit 1474 * and try again. 1475 */ 1476 ++pass; 1477 if (pass > 1) 1478 msleep(&vm_pages_needed, &vm_page_queue_mtx, PVM, 1479 "psleep", hz/2); 1480 } else { 1481 /* 1482 * Good enough, sleep & handle stats. Prime the pass 1483 * for the next run. 1484 */ 1485 if (pass > 1) 1486 pass = 1; 1487 else 1488 pass = 0; 1489 error = msleep(&vm_pages_needed, &vm_page_queue_mtx, PVM, 1490 "psleep", vm_pageout_stats_interval * hz); 1491 if (error && !vm_pages_needed) { 1492 vm_page_unlock_queues(); 1493 splx(s); 1494 pass = 0; 1495 vm_pageout_page_stats(); 1496 continue; 1497 } 1498 } 1499 if (vm_pages_needed) 1500 cnt.v_pdwakeups++; 1501 vm_page_unlock_queues(); 1502 splx(s); 1503 vm_pageout_scan(pass); 1504 } 1505} 1506 1507/* 1508 * Unless the page queue lock is held by the caller, this function 1509 * should be regarded as advisory. Specifically, the caller should 1510 * not msleep() on &cnt.v_free_count following this function unless 1511 * the page queue lock is held until the msleep() is performed. 1512 */ 1513void 1514pagedaemon_wakeup() 1515{ 1516 1517 if (!vm_pages_needed && curthread->td_proc != pageproc) { 1518 vm_pages_needed = 1; 1519 wakeup(&vm_pages_needed); 1520 } 1521} 1522 1523#if !defined(NO_SWAPPING) 1524static void 1525vm_req_vmdaemon() 1526{ 1527 static int lastrun = 0; 1528 1529 if ((ticks > (lastrun + hz)) || (ticks < lastrun)) { 1530 wakeup(&vm_daemon_needed); 1531 lastrun = ticks; 1532 } 1533} 1534 1535static void 1536vm_daemon() 1537{ 1538 struct proc *p; 1539 int breakout; 1540 struct thread *td; 1541 1542 mtx_lock(&Giant); 1543 while (TRUE) { 1544 tsleep(&vm_daemon_needed, PPAUSE, "psleep", 0); 1545 if (vm_pageout_req_swapout) { 1546 swapout_procs(vm_pageout_req_swapout); 1547 vm_pageout_req_swapout = 0; 1548 } 1549 /* 1550 * scan the processes for exceeding their rlimits or if 1551 * process is swapped out -- deactivate pages 1552 */ 1553 sx_slock(&allproc_lock); 1554 LIST_FOREACH(p, &allproc, p_list) { 1555 vm_pindex_t limit, size; 1556 1557 /* 1558 * if this is a system process or if we have already 1559 * looked at this process, skip it. 1560 */ 1561 PROC_LOCK(p); 1562 if (p->p_flag & (P_SYSTEM | P_WEXIT)) { 1563 PROC_UNLOCK(p); 1564 continue; 1565 } 1566 /* 1567 * if the process is in a non-running type state, 1568 * don't touch it. 1569 */ 1570 mtx_lock_spin(&sched_lock); 1571 breakout = 0; 1572 FOREACH_THREAD_IN_PROC(p, td) { 1573 if (!TD_ON_RUNQ(td) && 1574 !TD_IS_RUNNING(td) && 1575 !TD_IS_SLEEPING(td)) { 1576 breakout = 1; 1577 break; 1578 } 1579 } 1580 mtx_unlock_spin(&sched_lock); 1581 if (breakout) { 1582 PROC_UNLOCK(p); 1583 continue; 1584 } 1585 /* 1586 * get a limit 1587 */ 1588 limit = OFF_TO_IDX( 1589 qmin(p->p_rlimit[RLIMIT_RSS].rlim_cur, 1590 p->p_rlimit[RLIMIT_RSS].rlim_max)); 1591 1592 /* 1593 * let processes that are swapped out really be 1594 * swapped out set the limit to nothing (will force a 1595 * swap-out.) 1596 */ 1597 if ((p->p_sflag & PS_INMEM) == 0) 1598 limit = 0; /* XXX */ 1599 PROC_UNLOCK(p); 1600 1601 size = vmspace_resident_count(p->p_vmspace); 1602 if (limit >= 0 && size >= limit) { 1603 vm_pageout_map_deactivate_pages( 1604 &p->p_vmspace->vm_map, limit); 1605 } 1606 } 1607 sx_sunlock(&allproc_lock); 1608 } 1609} 1610#endif /* !defined(NO_SWAPPING) */ 1611