vm_pageout.c revision 120217
1/* 2 * Copyright (c) 1991 Regents of the University of California. 3 * All rights reserved. 4 * Copyright (c) 1994 John S. Dyson 5 * All rights reserved. 6 * Copyright (c) 1994 David Greenman 7 * All rights reserved. 8 * 9 * This code is derived from software contributed to Berkeley by 10 * The Mach Operating System project at Carnegie-Mellon University. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. All advertising materials mentioning features or use of this software 21 * must display the following acknowledgement: 22 * This product includes software developed by the University of 23 * California, Berkeley and its contributors. 24 * 4. Neither the name of the University nor the names of its contributors 25 * may be used to endorse or promote products derived from this software 26 * without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 38 * SUCH DAMAGE. 39 * 40 * from: @(#)vm_pageout.c 7.4 (Berkeley) 5/7/91 41 * 42 * 43 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 44 * All rights reserved. 45 * 46 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 47 * 48 * Permission to use, copy, modify and distribute this software and 49 * its documentation is hereby granted, provided that both the copyright 50 * notice and this permission notice appear in all copies of the 51 * software, derivative works or modified versions, and any portions 52 * thereof, and that both notices appear in supporting documentation. 53 * 54 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 55 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 56 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 57 * 58 * Carnegie Mellon requests users of this software to return to 59 * 60 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 61 * School of Computer Science 62 * Carnegie Mellon University 63 * Pittsburgh PA 15213-3890 64 * 65 * any improvements or extensions that they make and grant Carnegie the 66 * rights to redistribute these changes. 67 */ 68 69/* 70 * The proverbial page-out daemon. 71 */ 72 73#include <sys/cdefs.h> 74__FBSDID("$FreeBSD: head/sys/vm/vm_pageout.c 120217 2003-09-19 05:03:45Z alc $"); 75 76#include "opt_vm.h" 77#include <sys/param.h> 78#include <sys/systm.h> 79#include <sys/kernel.h> 80#include <sys/eventhandler.h> 81#include <sys/lock.h> 82#include <sys/mutex.h> 83#include <sys/proc.h> 84#include <sys/kthread.h> 85#include <sys/ktr.h> 86#include <sys/resourcevar.h> 87#include <sys/sched.h> 88#include <sys/signalvar.h> 89#include <sys/vnode.h> 90#include <sys/vmmeter.h> 91#include <sys/sx.h> 92#include <sys/sysctl.h> 93 94#include <vm/vm.h> 95#include <vm/vm_param.h> 96#include <vm/vm_object.h> 97#include <vm/vm_page.h> 98#include <vm/vm_map.h> 99#include <vm/vm_pageout.h> 100#include <vm/vm_pager.h> 101#include <vm/swap_pager.h> 102#include <vm/vm_extern.h> 103#include <vm/uma.h> 104 105#include <machine/mutex.h> 106 107/* 108 * System initialization 109 */ 110 111/* the kernel process "vm_pageout"*/ 112static void vm_pageout(void); 113static int vm_pageout_clean(vm_page_t); 114static void vm_pageout_page_free(vm_page_t); 115static void vm_pageout_pmap_collect(void); 116static void vm_pageout_scan(int pass); 117 118struct proc *pageproc; 119 120static struct kproc_desc page_kp = { 121 "pagedaemon", 122 vm_pageout, 123 &pageproc 124}; 125SYSINIT(pagedaemon, SI_SUB_KTHREAD_PAGE, SI_ORDER_FIRST, kproc_start, &page_kp) 126 127#if !defined(NO_SWAPPING) 128/* the kernel process "vm_daemon"*/ 129static void vm_daemon(void); 130static struct proc *vmproc; 131 132static struct kproc_desc vm_kp = { 133 "vmdaemon", 134 vm_daemon, 135 &vmproc 136}; 137SYSINIT(vmdaemon, SI_SUB_KTHREAD_VM, SI_ORDER_FIRST, kproc_start, &vm_kp) 138#endif 139 140 141int vm_pages_needed; /* Event on which pageout daemon sleeps */ 142int vm_pageout_deficit; /* Estimated number of pages deficit */ 143int vm_pageout_pages_needed; /* flag saying that the pageout daemon needs pages */ 144 145#if !defined(NO_SWAPPING) 146static int vm_pageout_req_swapout; /* XXX */ 147static int vm_daemon_needed; 148#endif 149static int vm_max_launder = 32; 150static int vm_pageout_stats_max=0, vm_pageout_stats_interval = 0; 151static int vm_pageout_full_stats_interval = 0; 152static int vm_pageout_stats_free_max=0, vm_pageout_algorithm=0; 153static int defer_swap_pageouts=0; 154static int disable_swap_pageouts=0; 155 156#if defined(NO_SWAPPING) 157static int vm_swap_enabled=0; 158static int vm_swap_idle_enabled=0; 159#else 160static int vm_swap_enabled=1; 161static int vm_swap_idle_enabled=0; 162#endif 163 164SYSCTL_INT(_vm, VM_PAGEOUT_ALGORITHM, pageout_algorithm, 165 CTLFLAG_RW, &vm_pageout_algorithm, 0, "LRU page mgmt"); 166 167SYSCTL_INT(_vm, OID_AUTO, max_launder, 168 CTLFLAG_RW, &vm_max_launder, 0, "Limit dirty flushes in pageout"); 169 170SYSCTL_INT(_vm, OID_AUTO, pageout_stats_max, 171 CTLFLAG_RW, &vm_pageout_stats_max, 0, "Max pageout stats scan length"); 172 173SYSCTL_INT(_vm, OID_AUTO, pageout_full_stats_interval, 174 CTLFLAG_RW, &vm_pageout_full_stats_interval, 0, "Interval for full stats scan"); 175 176SYSCTL_INT(_vm, OID_AUTO, pageout_stats_interval, 177 CTLFLAG_RW, &vm_pageout_stats_interval, 0, "Interval for partial stats scan"); 178 179SYSCTL_INT(_vm, OID_AUTO, pageout_stats_free_max, 180 CTLFLAG_RW, &vm_pageout_stats_free_max, 0, "Not implemented"); 181 182#if defined(NO_SWAPPING) 183SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled, 184 CTLFLAG_RD, &vm_swap_enabled, 0, ""); 185SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled, 186 CTLFLAG_RD, &vm_swap_idle_enabled, 0, ""); 187#else 188SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled, 189 CTLFLAG_RW, &vm_swap_enabled, 0, "Enable entire process swapout"); 190SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled, 191 CTLFLAG_RW, &vm_swap_idle_enabled, 0, "Allow swapout on idle criteria"); 192#endif 193 194SYSCTL_INT(_vm, OID_AUTO, defer_swapspace_pageouts, 195 CTLFLAG_RW, &defer_swap_pageouts, 0, "Give preference to dirty pages in mem"); 196 197SYSCTL_INT(_vm, OID_AUTO, disable_swapspace_pageouts, 198 CTLFLAG_RW, &disable_swap_pageouts, 0, "Disallow swapout of dirty pages"); 199 200static int pageout_lock_miss; 201SYSCTL_INT(_vm, OID_AUTO, pageout_lock_miss, 202 CTLFLAG_RD, &pageout_lock_miss, 0, "vget() lock misses during pageout"); 203 204#define VM_PAGEOUT_PAGE_COUNT 16 205int vm_pageout_page_count = VM_PAGEOUT_PAGE_COUNT; 206 207int vm_page_max_wired; /* XXX max # of wired pages system-wide */ 208 209#if !defined(NO_SWAPPING) 210static void vm_pageout_map_deactivate_pages(vm_map_t, long); 211static void vm_pageout_object_deactivate_pages(pmap_t, vm_object_t, long); 212static void vm_req_vmdaemon(void); 213#endif 214static void vm_pageout_page_stats(void); 215 216/* 217 * vm_pageout_clean: 218 * 219 * Clean the page and remove it from the laundry. 220 * 221 * We set the busy bit to cause potential page faults on this page to 222 * block. Note the careful timing, however, the busy bit isn't set till 223 * late and we cannot do anything that will mess with the page. 224 */ 225static int 226vm_pageout_clean(m) 227 vm_page_t m; 228{ 229 vm_object_t object; 230 vm_page_t mc[2*vm_pageout_page_count]; 231 int pageout_count; 232 int ib, is, page_base; 233 vm_pindex_t pindex = m->pindex; 234 235 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 236 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 237 238 /* 239 * It doesn't cost us anything to pageout OBJT_DEFAULT or OBJT_SWAP 240 * with the new swapper, but we could have serious problems paging 241 * out other object types if there is insufficient memory. 242 * 243 * Unfortunately, checking free memory here is far too late, so the 244 * check has been moved up a procedural level. 245 */ 246 247 /* 248 * Don't mess with the page if it's busy, held, or special 249 */ 250 if ((m->hold_count != 0) || 251 ((m->busy != 0) || (m->flags & (PG_BUSY|PG_UNMANAGED)))) { 252 return 0; 253 } 254 255 mc[vm_pageout_page_count] = m; 256 pageout_count = 1; 257 page_base = vm_pageout_page_count; 258 ib = 1; 259 is = 1; 260 261 /* 262 * Scan object for clusterable pages. 263 * 264 * We can cluster ONLY if: ->> the page is NOT 265 * clean, wired, busy, held, or mapped into a 266 * buffer, and one of the following: 267 * 1) The page is inactive, or a seldom used 268 * active page. 269 * -or- 270 * 2) we force the issue. 271 * 272 * During heavy mmap/modification loads the pageout 273 * daemon can really fragment the underlying file 274 * due to flushing pages out of order and not trying 275 * align the clusters (which leave sporatic out-of-order 276 * holes). To solve this problem we do the reverse scan 277 * first and attempt to align our cluster, then do a 278 * forward scan if room remains. 279 */ 280 object = m->object; 281more: 282 while (ib && pageout_count < vm_pageout_page_count) { 283 vm_page_t p; 284 285 if (ib > pindex) { 286 ib = 0; 287 break; 288 } 289 290 if ((p = vm_page_lookup(object, pindex - ib)) == NULL) { 291 ib = 0; 292 break; 293 } 294 if (((p->queue - p->pc) == PQ_CACHE) || 295 (p->flags & (PG_BUSY|PG_UNMANAGED)) || p->busy) { 296 ib = 0; 297 break; 298 } 299 vm_page_test_dirty(p); 300 if ((p->dirty & p->valid) == 0 || 301 p->queue != PQ_INACTIVE || 302 p->wire_count != 0 || /* may be held by buf cache */ 303 p->hold_count != 0) { /* may be undergoing I/O */ 304 ib = 0; 305 break; 306 } 307 mc[--page_base] = p; 308 ++pageout_count; 309 ++ib; 310 /* 311 * alignment boundry, stop here and switch directions. Do 312 * not clear ib. 313 */ 314 if ((pindex - (ib - 1)) % vm_pageout_page_count == 0) 315 break; 316 } 317 318 while (pageout_count < vm_pageout_page_count && 319 pindex + is < object->size) { 320 vm_page_t p; 321 322 if ((p = vm_page_lookup(object, pindex + is)) == NULL) 323 break; 324 if (((p->queue - p->pc) == PQ_CACHE) || 325 (p->flags & (PG_BUSY|PG_UNMANAGED)) || p->busy) { 326 break; 327 } 328 vm_page_test_dirty(p); 329 if ((p->dirty & p->valid) == 0 || 330 p->queue != PQ_INACTIVE || 331 p->wire_count != 0 || /* may be held by buf cache */ 332 p->hold_count != 0) { /* may be undergoing I/O */ 333 break; 334 } 335 mc[page_base + pageout_count] = p; 336 ++pageout_count; 337 ++is; 338 } 339 340 /* 341 * If we exhausted our forward scan, continue with the reverse scan 342 * when possible, even past a page boundry. This catches boundry 343 * conditions. 344 */ 345 if (ib && pageout_count < vm_pageout_page_count) 346 goto more; 347 348 /* 349 * we allow reads during pageouts... 350 */ 351 return (vm_pageout_flush(&mc[page_base], pageout_count, 0, TRUE)); 352} 353 354/* 355 * vm_pageout_flush() - launder the given pages 356 * 357 * The given pages are laundered. Note that we setup for the start of 358 * I/O ( i.e. busy the page ), mark it read-only, and bump the object 359 * reference count all in here rather then in the parent. If we want 360 * the parent to do more sophisticated things we may have to change 361 * the ordering. 362 */ 363int 364vm_pageout_flush(mc, count, flags, is_object_locked) 365 vm_page_t *mc; 366 int count; 367 int flags; 368 int is_object_locked; 369{ 370 vm_object_t object; 371 int pageout_status[count]; 372 int numpagedout = 0; 373 int i; 374 375 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 376 /* 377 * Initiate I/O. Bump the vm_page_t->busy counter and 378 * mark the pages read-only. 379 * 380 * We do not have to fixup the clean/dirty bits here... we can 381 * allow the pager to do it after the I/O completes. 382 * 383 * NOTE! mc[i]->dirty may be partial or fragmented due to an 384 * edge case with file fragments. 385 */ 386 for (i = 0; i < count; i++) { 387 KASSERT(mc[i]->valid == VM_PAGE_BITS_ALL, ("vm_pageout_flush page %p index %d/%d: partially invalid page", mc[i], i, count)); 388 vm_page_io_start(mc[i]); 389 pmap_page_protect(mc[i], VM_PROT_READ); 390 } 391 object = mc[0]->object; 392 vm_page_unlock_queues(); 393 if (!is_object_locked) 394 VM_OBJECT_LOCK(object); 395 vm_object_pip_add(object, count); 396 VM_OBJECT_UNLOCK(object); 397 398 vm_pager_put_pages(object, mc, count, 399 (flags | ((object == kernel_object) ? VM_PAGER_PUT_SYNC : 0)), 400 pageout_status); 401 402 VM_OBJECT_LOCK(object); 403 vm_page_lock_queues(); 404 for (i = 0; i < count; i++) { 405 vm_page_t mt = mc[i]; 406 407 switch (pageout_status[i]) { 408 case VM_PAGER_OK: 409 case VM_PAGER_PEND: 410 numpagedout++; 411 break; 412 case VM_PAGER_BAD: 413 /* 414 * Page outside of range of object. Right now we 415 * essentially lose the changes by pretending it 416 * worked. 417 */ 418 pmap_clear_modify(mt); 419 vm_page_undirty(mt); 420 break; 421 case VM_PAGER_ERROR: 422 case VM_PAGER_FAIL: 423 /* 424 * If page couldn't be paged out, then reactivate the 425 * page so it doesn't clog the inactive list. (We 426 * will try paging out it again later). 427 */ 428 vm_page_activate(mt); 429 break; 430 case VM_PAGER_AGAIN: 431 break; 432 } 433 434 /* 435 * If the operation is still going, leave the page busy to 436 * block all other accesses. Also, leave the paging in 437 * progress indicator set so that we don't attempt an object 438 * collapse. 439 */ 440 if (pageout_status[i] != VM_PAGER_PEND) { 441 vm_object_pip_wakeup(object); 442 vm_page_io_finish(mt); 443 if (!vm_page_count_severe() || !vm_page_try_to_cache(mt)) 444 pmap_page_protect(mt, VM_PROT_READ); 445 } 446 } 447 if (!is_object_locked) 448 VM_OBJECT_UNLOCK(object); 449 return numpagedout; 450} 451 452#if !defined(NO_SWAPPING) 453/* 454 * vm_pageout_object_deactivate_pages 455 * 456 * deactivate enough pages to satisfy the inactive target 457 * requirements or if vm_page_proc_limit is set, then 458 * deactivate all of the pages in the object and its 459 * backing_objects. 460 * 461 * The object and map must be locked. 462 */ 463static void 464vm_pageout_object_deactivate_pages(pmap, first_object, desired) 465 pmap_t pmap; 466 vm_object_t first_object; 467 long desired; 468{ 469 vm_object_t backing_object, object; 470 vm_page_t p, next; 471 int actcount, rcount, remove_mode; 472 473 VM_OBJECT_LOCK_ASSERT(first_object, MA_OWNED); 474 if (first_object->type == OBJT_DEVICE || first_object->type == OBJT_PHYS) 475 return; 476 for (object = first_object;; object = backing_object) { 477 if (pmap_resident_count(pmap) <= desired) 478 goto unlock_return; 479 if (object->paging_in_progress) 480 goto unlock_return; 481 482 remove_mode = 0; 483 if (object->shadow_count > 1) 484 remove_mode = 1; 485 /* 486 * scan the objects entire memory queue 487 */ 488 rcount = object->resident_page_count; 489 p = TAILQ_FIRST(&object->memq); 490 vm_page_lock_queues(); 491 while (p && (rcount-- > 0)) { 492 if (pmap_resident_count(pmap) <= desired) { 493 vm_page_unlock_queues(); 494 goto unlock_return; 495 } 496 next = TAILQ_NEXT(p, listq); 497 cnt.v_pdpages++; 498 if (p->wire_count != 0 || 499 p->hold_count != 0 || 500 p->busy != 0 || 501 (p->flags & (PG_BUSY|PG_UNMANAGED)) || 502 !pmap_page_exists_quick(pmap, p)) { 503 p = next; 504 continue; 505 } 506 actcount = pmap_ts_referenced(p); 507 if (actcount) { 508 vm_page_flag_set(p, PG_REFERENCED); 509 } else if (p->flags & PG_REFERENCED) { 510 actcount = 1; 511 } 512 if ((p->queue != PQ_ACTIVE) && 513 (p->flags & PG_REFERENCED)) { 514 vm_page_activate(p); 515 p->act_count += actcount; 516 vm_page_flag_clear(p, PG_REFERENCED); 517 } else if (p->queue == PQ_ACTIVE) { 518 if ((p->flags & PG_REFERENCED) == 0) { 519 p->act_count -= min(p->act_count, ACT_DECLINE); 520 if (!remove_mode && (vm_pageout_algorithm || (p->act_count == 0))) { 521 pmap_remove_all(p); 522 vm_page_deactivate(p); 523 } else { 524 vm_pageq_requeue(p); 525 } 526 } else { 527 vm_page_activate(p); 528 vm_page_flag_clear(p, PG_REFERENCED); 529 if (p->act_count < (ACT_MAX - ACT_ADVANCE)) 530 p->act_count += ACT_ADVANCE; 531 vm_pageq_requeue(p); 532 } 533 } else if (p->queue == PQ_INACTIVE) { 534 pmap_remove_all(p); 535 } 536 p = next; 537 } 538 vm_page_unlock_queues(); 539 if ((backing_object = object->backing_object) == NULL) 540 goto unlock_return; 541 VM_OBJECT_LOCK(backing_object); 542 if (object != first_object) 543 VM_OBJECT_UNLOCK(object); 544 } 545unlock_return: 546 if (object != first_object) 547 VM_OBJECT_UNLOCK(object); 548} 549 550/* 551 * deactivate some number of pages in a map, try to do it fairly, but 552 * that is really hard to do. 553 */ 554static void 555vm_pageout_map_deactivate_pages(map, desired) 556 vm_map_t map; 557 long desired; 558{ 559 vm_map_entry_t tmpe; 560 vm_object_t obj, bigobj; 561 int nothingwired; 562 563 if (!vm_map_trylock(map)) 564 return; 565 566 bigobj = NULL; 567 nothingwired = TRUE; 568 569 /* 570 * first, search out the biggest object, and try to free pages from 571 * that. 572 */ 573 tmpe = map->header.next; 574 while (tmpe != &map->header) { 575 if ((tmpe->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { 576 obj = tmpe->object.vm_object; 577 if (obj != NULL && VM_OBJECT_TRYLOCK(obj)) { 578 if (obj->shadow_count <= 1 && 579 (bigobj == NULL || 580 bigobj->resident_page_count < obj->resident_page_count)) { 581 if (bigobj != NULL) 582 VM_OBJECT_UNLOCK(bigobj); 583 bigobj = obj; 584 } else 585 VM_OBJECT_UNLOCK(obj); 586 } 587 } 588 if (tmpe->wired_count > 0) 589 nothingwired = FALSE; 590 tmpe = tmpe->next; 591 } 592 593 if (bigobj != NULL) { 594 vm_pageout_object_deactivate_pages(map->pmap, bigobj, desired); 595 VM_OBJECT_UNLOCK(bigobj); 596 } 597 /* 598 * Next, hunt around for other pages to deactivate. We actually 599 * do this search sort of wrong -- .text first is not the best idea. 600 */ 601 tmpe = map->header.next; 602 while (tmpe != &map->header) { 603 if (pmap_resident_count(vm_map_pmap(map)) <= desired) 604 break; 605 if ((tmpe->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { 606 obj = tmpe->object.vm_object; 607 if (obj != NULL) { 608 VM_OBJECT_LOCK(obj); 609 vm_pageout_object_deactivate_pages(map->pmap, obj, desired); 610 VM_OBJECT_UNLOCK(obj); 611 } 612 } 613 tmpe = tmpe->next; 614 } 615 616 /* 617 * Remove all mappings if a process is swapped out, this will free page 618 * table pages. 619 */ 620 if (desired == 0 && nothingwired) { 621 GIANT_REQUIRED; 622 vm_page_lock_queues(); 623 pmap_remove(vm_map_pmap(map), vm_map_min(map), 624 vm_map_max(map)); 625 vm_page_unlock_queues(); 626 } 627 vm_map_unlock(map); 628} 629#endif /* !defined(NO_SWAPPING) */ 630 631/* 632 * Warning! The page queue lock is released and reacquired. 633 */ 634static void 635vm_pageout_page_free(vm_page_t m) 636{ 637 vm_object_t object = m->object; 638 639 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 640 vm_page_busy(m); 641 vm_page_unlock_queues(); 642 /* 643 * Avoid a lock order reversal. The page must be busy. 644 */ 645 VM_OBJECT_LOCK(object); 646 vm_page_lock_queues(); 647 pmap_remove_all(m); 648 vm_page_free(m); 649 VM_OBJECT_UNLOCK(object); 650 cnt.v_dfree++; 651} 652 653/* 654 * This routine is very drastic, but can save the system 655 * in a pinch. 656 */ 657static void 658vm_pageout_pmap_collect(void) 659{ 660 int i; 661 vm_page_t m; 662 static int warningdone; 663 664 if (pmap_pagedaemon_waken == 0) 665 return; 666 if (warningdone < 5) { 667 printf("collecting pv entries -- suggest increasing PMAP_SHPGPERPROC\n"); 668 warningdone++; 669 } 670 vm_page_lock_queues(); 671 for (i = 0; i < vm_page_array_size; i++) { 672 m = &vm_page_array[i]; 673 if (m->wire_count || m->hold_count || m->busy || 674 (m->flags & (PG_BUSY | PG_UNMANAGED))) 675 continue; 676 pmap_remove_all(m); 677 } 678 vm_page_unlock_queues(); 679 pmap_pagedaemon_waken = 0; 680} 681 682/* 683 * vm_pageout_scan does the dirty work for the pageout daemon. 684 */ 685static void 686vm_pageout_scan(int pass) 687{ 688 vm_page_t m, next; 689 struct vm_page marker; 690 int page_shortage, maxscan, pcount; 691 int addl_page_shortage, addl_page_shortage_init; 692 struct proc *p, *bigproc; 693 vm_offset_t size, bigsize; 694 vm_object_t object; 695 int actcount; 696 int vnodes_skipped = 0; 697 int maxlaunder; 698 int s; 699 struct thread *td; 700 701 GIANT_REQUIRED; 702 /* 703 * Decrease registered cache sizes. 704 */ 705 EVENTHANDLER_INVOKE(vm_lowmem, 0); 706 /* 707 * We do this explicitly after the caches have been drained above. 708 */ 709 uma_reclaim(); 710 /* 711 * Do whatever cleanup that the pmap code can. 712 */ 713 vm_pageout_pmap_collect(); 714 715 addl_page_shortage_init = atomic_readandclear_int(&vm_pageout_deficit); 716 717 /* 718 * Calculate the number of pages we want to either free or move 719 * to the cache. 720 */ 721 page_shortage = vm_paging_target() + addl_page_shortage_init; 722 723 /* 724 * Initialize our marker 725 */ 726 bzero(&marker, sizeof(marker)); 727 marker.flags = PG_BUSY | PG_FICTITIOUS | PG_MARKER; 728 marker.queue = PQ_INACTIVE; 729 marker.wire_count = 1; 730 731 /* 732 * Start scanning the inactive queue for pages we can move to the 733 * cache or free. The scan will stop when the target is reached or 734 * we have scanned the entire inactive queue. Note that m->act_count 735 * is not used to form decisions for the inactive queue, only for the 736 * active queue. 737 * 738 * maxlaunder limits the number of dirty pages we flush per scan. 739 * For most systems a smaller value (16 or 32) is more robust under 740 * extreme memory and disk pressure because any unnecessary writes 741 * to disk can result in extreme performance degredation. However, 742 * systems with excessive dirty pages (especially when MAP_NOSYNC is 743 * used) will die horribly with limited laundering. If the pageout 744 * daemon cannot clean enough pages in the first pass, we let it go 745 * all out in succeeding passes. 746 */ 747 if ((maxlaunder = vm_max_launder) <= 1) 748 maxlaunder = 1; 749 if (pass) 750 maxlaunder = 10000; 751 vm_page_lock_queues(); 752rescan0: 753 addl_page_shortage = addl_page_shortage_init; 754 maxscan = cnt.v_inactive_count; 755 756 for (m = TAILQ_FIRST(&vm_page_queues[PQ_INACTIVE].pl); 757 m != NULL && maxscan-- > 0 && page_shortage > 0; 758 m = next) { 759 760 cnt.v_pdpages++; 761 762 if (m->queue != PQ_INACTIVE) { 763 goto rescan0; 764 } 765 766 next = TAILQ_NEXT(m, pageq); 767 768 /* 769 * skip marker pages 770 */ 771 if (m->flags & PG_MARKER) 772 continue; 773 774 /* 775 * A held page may be undergoing I/O, so skip it. 776 */ 777 if (m->hold_count) { 778 vm_pageq_requeue(m); 779 addl_page_shortage++; 780 continue; 781 } 782 /* 783 * Don't mess with busy pages, keep in the front of the 784 * queue, most likely are being paged out. 785 */ 786 if (m->busy || (m->flags & PG_BUSY)) { 787 addl_page_shortage++; 788 continue; 789 } 790 791 /* 792 * If the object is not being used, we ignore previous 793 * references. 794 */ 795 if (m->object->ref_count == 0) { 796 vm_page_flag_clear(m, PG_REFERENCED); 797 pmap_clear_reference(m); 798 799 /* 800 * Otherwise, if the page has been referenced while in the 801 * inactive queue, we bump the "activation count" upwards, 802 * making it less likely that the page will be added back to 803 * the inactive queue prematurely again. Here we check the 804 * page tables (or emulated bits, if any), given the upper 805 * level VM system not knowing anything about existing 806 * references. 807 */ 808 } else if (((m->flags & PG_REFERENCED) == 0) && 809 (actcount = pmap_ts_referenced(m))) { 810 vm_page_activate(m); 811 m->act_count += (actcount + ACT_ADVANCE); 812 continue; 813 } 814 815 /* 816 * If the upper level VM system knows about any page 817 * references, we activate the page. We also set the 818 * "activation count" higher than normal so that we will less 819 * likely place pages back onto the inactive queue again. 820 */ 821 if ((m->flags & PG_REFERENCED) != 0) { 822 vm_page_flag_clear(m, PG_REFERENCED); 823 actcount = pmap_ts_referenced(m); 824 vm_page_activate(m); 825 m->act_count += (actcount + ACT_ADVANCE + 1); 826 continue; 827 } 828 829 /* 830 * If the upper level VM system doesn't know anything about 831 * the page being dirty, we have to check for it again. As 832 * far as the VM code knows, any partially dirty pages are 833 * fully dirty. 834 */ 835 if (m->dirty == 0) { 836 vm_page_test_dirty(m); 837 } else { 838 vm_page_dirty(m); 839 } 840 841 /* 842 * Invalid pages can be easily freed 843 */ 844 if (m->valid == 0) { 845 vm_pageout_page_free(m); 846 --page_shortage; 847 848 /* 849 * Clean pages can be placed onto the cache queue. This 850 * effectively frees them. 851 */ 852 } else if (m->dirty == 0) { 853 vm_page_cache(m); 854 --page_shortage; 855 } else if ((m->flags & PG_WINATCFLS) == 0 && pass == 0) { 856 /* 857 * Dirty pages need to be paged out, but flushing 858 * a page is extremely expensive verses freeing 859 * a clean page. Rather then artificially limiting 860 * the number of pages we can flush, we instead give 861 * dirty pages extra priority on the inactive queue 862 * by forcing them to be cycled through the queue 863 * twice before being flushed, after which the 864 * (now clean) page will cycle through once more 865 * before being freed. This significantly extends 866 * the thrash point for a heavily loaded machine. 867 */ 868 vm_page_flag_set(m, PG_WINATCFLS); 869 vm_pageq_requeue(m); 870 } else if (maxlaunder > 0) { 871 /* 872 * We always want to try to flush some dirty pages if 873 * we encounter them, to keep the system stable. 874 * Normally this number is small, but under extreme 875 * pressure where there are insufficient clean pages 876 * on the inactive queue, we may have to go all out. 877 */ 878 int swap_pageouts_ok; 879 struct vnode *vp = NULL; 880 struct mount *mp; 881 882 object = m->object; 883 if (!VM_OBJECT_TRYLOCK(object)) 884 continue; 885 if ((object->type != OBJT_SWAP) && (object->type != OBJT_DEFAULT)) { 886 swap_pageouts_ok = 1; 887 } else { 888 swap_pageouts_ok = !(defer_swap_pageouts || disable_swap_pageouts); 889 swap_pageouts_ok |= (!disable_swap_pageouts && defer_swap_pageouts && 890 vm_page_count_min()); 891 892 } 893 894 /* 895 * We don't bother paging objects that are "dead". 896 * Those objects are in a "rundown" state. 897 */ 898 if (!swap_pageouts_ok || (object->flags & OBJ_DEAD)) { 899 VM_OBJECT_UNLOCK(object); 900 vm_pageq_requeue(m); 901 continue; 902 } 903 904 /* 905 * The object is already known NOT to be dead. It 906 * is possible for the vget() to block the whole 907 * pageout daemon, but the new low-memory handling 908 * code should prevent it. 909 * 910 * The previous code skipped locked vnodes and, worse, 911 * reordered pages in the queue. This results in 912 * completely non-deterministic operation and, on a 913 * busy system, can lead to extremely non-optimal 914 * pageouts. For example, it can cause clean pages 915 * to be freed and dirty pages to be moved to the end 916 * of the queue. Since dirty pages are also moved to 917 * the end of the queue once-cleaned, this gives 918 * way too large a weighting to defering the freeing 919 * of dirty pages. 920 * 921 * We can't wait forever for the vnode lock, we might 922 * deadlock due to a vn_read() getting stuck in 923 * vm_wait while holding this vnode. We skip the 924 * vnode if we can't get it in a reasonable amount 925 * of time. 926 */ 927 if (object->type == OBJT_VNODE) { 928 vp = object->handle; 929 mp = NULL; 930 if (vp->v_type == VREG) 931 vn_start_write(vp, &mp, V_NOWAIT); 932 vm_page_unlock_queues(); 933 VI_LOCK(vp); 934 VM_OBJECT_UNLOCK(object); 935 if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK | 936 LK_TIMELOCK, curthread)) { 937 VM_OBJECT_LOCK(object); 938 vm_page_lock_queues(); 939 ++pageout_lock_miss; 940 vn_finished_write(mp); 941 if (object->flags & OBJ_MIGHTBEDIRTY) 942 vnodes_skipped++; 943 VM_OBJECT_UNLOCK(object); 944 continue; 945 } 946 VM_OBJECT_LOCK(object); 947 vm_page_lock_queues(); 948 /* 949 * The page might have been moved to another 950 * queue during potential blocking in vget() 951 * above. The page might have been freed and 952 * reused for another vnode. The object might 953 * have been reused for another vnode. 954 */ 955 if (m->queue != PQ_INACTIVE || 956 m->object != object || 957 object->handle != vp) { 958 if (object->flags & OBJ_MIGHTBEDIRTY) 959 vnodes_skipped++; 960 goto unlock_and_continue; 961 } 962 963 /* 964 * The page may have been busied during the 965 * blocking in vput(); We don't move the 966 * page back onto the end of the queue so that 967 * statistics are more correct if we don't. 968 */ 969 if (m->busy || (m->flags & PG_BUSY)) { 970 goto unlock_and_continue; 971 } 972 973 /* 974 * If the page has become held it might 975 * be undergoing I/O, so skip it 976 */ 977 if (m->hold_count) { 978 vm_pageq_requeue(m); 979 if (object->flags & OBJ_MIGHTBEDIRTY) 980 vnodes_skipped++; 981 goto unlock_and_continue; 982 } 983 } 984 985 /* 986 * If a page is dirty, then it is either being washed 987 * (but not yet cleaned) or it is still in the 988 * laundry. If it is still in the laundry, then we 989 * start the cleaning operation. 990 * 991 * This operation may cluster, invalidating the 'next' 992 * pointer. To prevent an inordinate number of 993 * restarts we use our marker to remember our place. 994 * 995 * decrement page_shortage on success to account for 996 * the (future) cleaned page. Otherwise we could wind 997 * up laundering or cleaning too many pages. 998 */ 999 s = splvm(); 1000 TAILQ_INSERT_AFTER(&vm_page_queues[PQ_INACTIVE].pl, m, &marker, pageq); 1001 splx(s); 1002 if (vm_pageout_clean(m) != 0) { 1003 --page_shortage; 1004 --maxlaunder; 1005 } 1006 s = splvm(); 1007 next = TAILQ_NEXT(&marker, pageq); 1008 TAILQ_REMOVE(&vm_page_queues[PQ_INACTIVE].pl, &marker, pageq); 1009 splx(s); 1010unlock_and_continue: 1011 if (vp) { 1012 vput(vp); 1013 vn_finished_write(mp); 1014 } 1015 VM_OBJECT_UNLOCK(object); 1016 } 1017 } 1018 1019 /* 1020 * Compute the number of pages we want to try to move from the 1021 * active queue to the inactive queue. 1022 */ 1023 page_shortage = vm_paging_target() + 1024 cnt.v_inactive_target - cnt.v_inactive_count; 1025 page_shortage += addl_page_shortage; 1026 1027 /* 1028 * Scan the active queue for things we can deactivate. We nominally 1029 * track the per-page activity counter and use it to locate 1030 * deactivation candidates. 1031 */ 1032 pcount = cnt.v_active_count; 1033 m = TAILQ_FIRST(&vm_page_queues[PQ_ACTIVE].pl); 1034 1035 while ((m != NULL) && (pcount-- > 0) && (page_shortage > 0)) { 1036 1037 /* 1038 * This is a consistency check, and should likely be a panic 1039 * or warning. 1040 */ 1041 if (m->queue != PQ_ACTIVE) { 1042 break; 1043 } 1044 1045 next = TAILQ_NEXT(m, pageq); 1046 /* 1047 * Don't deactivate pages that are busy. 1048 */ 1049 if ((m->busy != 0) || 1050 (m->flags & PG_BUSY) || 1051 (m->hold_count != 0)) { 1052 vm_pageq_requeue(m); 1053 m = next; 1054 continue; 1055 } 1056 1057 /* 1058 * The count for pagedaemon pages is done after checking the 1059 * page for eligibility... 1060 */ 1061 cnt.v_pdpages++; 1062 1063 /* 1064 * Check to see "how much" the page has been used. 1065 */ 1066 actcount = 0; 1067 if (m->object->ref_count != 0) { 1068 if (m->flags & PG_REFERENCED) { 1069 actcount += 1; 1070 } 1071 actcount += pmap_ts_referenced(m); 1072 if (actcount) { 1073 m->act_count += ACT_ADVANCE + actcount; 1074 if (m->act_count > ACT_MAX) 1075 m->act_count = ACT_MAX; 1076 } 1077 } 1078 1079 /* 1080 * Since we have "tested" this bit, we need to clear it now. 1081 */ 1082 vm_page_flag_clear(m, PG_REFERENCED); 1083 1084 /* 1085 * Only if an object is currently being used, do we use the 1086 * page activation count stats. 1087 */ 1088 if (actcount && (m->object->ref_count != 0)) { 1089 vm_pageq_requeue(m); 1090 } else { 1091 m->act_count -= min(m->act_count, ACT_DECLINE); 1092 if (vm_pageout_algorithm || 1093 m->object->ref_count == 0 || 1094 m->act_count == 0) { 1095 page_shortage--; 1096 if (m->object->ref_count == 0) { 1097 pmap_remove_all(m); 1098 if (m->dirty == 0) 1099 vm_page_cache(m); 1100 else 1101 vm_page_deactivate(m); 1102 } else { 1103 vm_page_deactivate(m); 1104 } 1105 } else { 1106 vm_pageq_requeue(m); 1107 } 1108 } 1109 m = next; 1110 } 1111 s = splvm(); 1112 1113 /* 1114 * We try to maintain some *really* free pages, this allows interrupt 1115 * code to be guaranteed space. Since both cache and free queues 1116 * are considered basically 'free', moving pages from cache to free 1117 * does not effect other calculations. 1118 */ 1119 while (cnt.v_free_count < cnt.v_free_reserved) { 1120 static int cache_rover = 0; 1121 m = vm_pageq_find(PQ_CACHE, cache_rover, FALSE); 1122 if (!m) 1123 break; 1124 if ((m->flags & (PG_BUSY|PG_UNMANAGED)) || 1125 m->busy || 1126 m->hold_count || 1127 m->wire_count) { 1128#ifdef INVARIANTS 1129 printf("Warning: busy page %p found in cache\n", m); 1130#endif 1131 vm_page_deactivate(m); 1132 continue; 1133 } 1134 cache_rover = (cache_rover + PQ_PRIME2) & PQ_L2_MASK; 1135 vm_pageout_page_free(m); 1136 } 1137 splx(s); 1138 vm_page_unlock_queues(); 1139#if !defined(NO_SWAPPING) 1140 /* 1141 * Idle process swapout -- run once per second. 1142 */ 1143 if (vm_swap_idle_enabled) { 1144 static long lsec; 1145 if (time_second != lsec) { 1146 vm_pageout_req_swapout |= VM_SWAP_IDLE; 1147 vm_req_vmdaemon(); 1148 lsec = time_second; 1149 } 1150 } 1151#endif 1152 1153 /* 1154 * If we didn't get enough free pages, and we have skipped a vnode 1155 * in a writeable object, wakeup the sync daemon. And kick swapout 1156 * if we did not get enough free pages. 1157 */ 1158 if (vm_paging_target() > 0) { 1159 if (vnodes_skipped && vm_page_count_min()) 1160 (void) speedup_syncer(); 1161#if !defined(NO_SWAPPING) 1162 if (vm_swap_enabled && vm_page_count_target()) { 1163 vm_req_vmdaemon(); 1164 vm_pageout_req_swapout |= VM_SWAP_NORMAL; 1165 } 1166#endif 1167 } 1168 1169 /* 1170 * If we are critically low on one of RAM or swap and low on 1171 * the other, kill the largest process. However, we avoid 1172 * doing this on the first pass in order to give ourselves a 1173 * chance to flush out dirty vnode-backed pages and to allow 1174 * active pages to be moved to the inactive queue and reclaimed. 1175 * 1176 * We keep the process bigproc locked once we find it to keep anyone 1177 * from messing with it; however, there is a possibility of 1178 * deadlock if process B is bigproc and one of it's child processes 1179 * attempts to propagate a signal to B while we are waiting for A's 1180 * lock while walking this list. To avoid this, we don't block on 1181 * the process lock but just skip a process if it is already locked. 1182 */ 1183 if (pass != 0 && 1184 ((swap_pager_avail < 64 && vm_page_count_min()) || 1185 (swap_pager_full && vm_paging_target() > 0))) { 1186 bigproc = NULL; 1187 bigsize = 0; 1188 sx_slock(&allproc_lock); 1189 FOREACH_PROC_IN_SYSTEM(p) { 1190 int breakout; 1191 /* 1192 * If this process is already locked, skip it. 1193 */ 1194 if (PROC_TRYLOCK(p) == 0) 1195 continue; 1196 /* 1197 * If this is a system or protected process, skip it. 1198 */ 1199 if ((p->p_flag & P_SYSTEM) || (p->p_pid == 1) || 1200 (p->p_flag & P_PROTECTED) || 1201 ((p->p_pid < 48) && (swap_pager_avail != 0))) { 1202 PROC_UNLOCK(p); 1203 continue; 1204 } 1205 /* 1206 * if the process is in a non-running type state, 1207 * don't touch it. Check all the threads individually. 1208 */ 1209 mtx_lock_spin(&sched_lock); 1210 breakout = 0; 1211 FOREACH_THREAD_IN_PROC(p, td) { 1212 if (!TD_ON_RUNQ(td) && 1213 !TD_IS_RUNNING(td) && 1214 !TD_IS_SLEEPING(td)) { 1215 breakout = 1; 1216 break; 1217 } 1218 } 1219 if (breakout) { 1220 mtx_unlock_spin(&sched_lock); 1221 PROC_UNLOCK(p); 1222 continue; 1223 } 1224 mtx_unlock_spin(&sched_lock); 1225 /* 1226 * get the process size 1227 */ 1228 if (!vm_map_trylock_read(&p->p_vmspace->vm_map)) { 1229 PROC_UNLOCK(p); 1230 continue; 1231 } 1232 size = vmspace_swap_count(p->p_vmspace); 1233 vm_map_unlock_read(&p->p_vmspace->vm_map); 1234 size += vmspace_resident_count(p->p_vmspace); 1235 /* 1236 * if the this process is bigger than the biggest one 1237 * remember it. 1238 */ 1239 if (size > bigsize) { 1240 if (bigproc != NULL) 1241 PROC_UNLOCK(bigproc); 1242 bigproc = p; 1243 bigsize = size; 1244 } else 1245 PROC_UNLOCK(p); 1246 } 1247 sx_sunlock(&allproc_lock); 1248 if (bigproc != NULL) { 1249 struct ksegrp *kg; 1250 killproc(bigproc, "out of swap space"); 1251 mtx_lock_spin(&sched_lock); 1252 FOREACH_KSEGRP_IN_PROC(bigproc, kg) { 1253 sched_nice(kg, PRIO_MIN); /* XXXKSE ??? */ 1254 } 1255 mtx_unlock_spin(&sched_lock); 1256 PROC_UNLOCK(bigproc); 1257 wakeup(&cnt.v_free_count); 1258 } 1259 } 1260} 1261 1262/* 1263 * This routine tries to maintain the pseudo LRU active queue, 1264 * so that during long periods of time where there is no paging, 1265 * that some statistic accumulation still occurs. This code 1266 * helps the situation where paging just starts to occur. 1267 */ 1268static void 1269vm_pageout_page_stats() 1270{ 1271 vm_page_t m,next; 1272 int pcount,tpcount; /* Number of pages to check */ 1273 static int fullintervalcount = 0; 1274 int page_shortage; 1275 int s0; 1276 1277 page_shortage = 1278 (cnt.v_inactive_target + cnt.v_cache_max + cnt.v_free_min) - 1279 (cnt.v_free_count + cnt.v_inactive_count + cnt.v_cache_count); 1280 1281 if (page_shortage <= 0) 1282 return; 1283 1284 s0 = splvm(); 1285 vm_page_lock_queues(); 1286 pcount = cnt.v_active_count; 1287 fullintervalcount += vm_pageout_stats_interval; 1288 if (fullintervalcount < vm_pageout_full_stats_interval) { 1289 tpcount = (vm_pageout_stats_max * cnt.v_active_count) / cnt.v_page_count; 1290 if (pcount > tpcount) 1291 pcount = tpcount; 1292 } else { 1293 fullintervalcount = 0; 1294 } 1295 1296 m = TAILQ_FIRST(&vm_page_queues[PQ_ACTIVE].pl); 1297 while ((m != NULL) && (pcount-- > 0)) { 1298 int actcount; 1299 1300 if (m->queue != PQ_ACTIVE) { 1301 break; 1302 } 1303 1304 next = TAILQ_NEXT(m, pageq); 1305 /* 1306 * Don't deactivate pages that are busy. 1307 */ 1308 if ((m->busy != 0) || 1309 (m->flags & PG_BUSY) || 1310 (m->hold_count != 0)) { 1311 vm_pageq_requeue(m); 1312 m = next; 1313 continue; 1314 } 1315 1316 actcount = 0; 1317 if (m->flags & PG_REFERENCED) { 1318 vm_page_flag_clear(m, PG_REFERENCED); 1319 actcount += 1; 1320 } 1321 1322 actcount += pmap_ts_referenced(m); 1323 if (actcount) { 1324 m->act_count += ACT_ADVANCE + actcount; 1325 if (m->act_count > ACT_MAX) 1326 m->act_count = ACT_MAX; 1327 vm_pageq_requeue(m); 1328 } else { 1329 if (m->act_count == 0) { 1330 /* 1331 * We turn off page access, so that we have 1332 * more accurate RSS stats. We don't do this 1333 * in the normal page deactivation when the 1334 * system is loaded VM wise, because the 1335 * cost of the large number of page protect 1336 * operations would be higher than the value 1337 * of doing the operation. 1338 */ 1339 pmap_remove_all(m); 1340 vm_page_deactivate(m); 1341 } else { 1342 m->act_count -= min(m->act_count, ACT_DECLINE); 1343 vm_pageq_requeue(m); 1344 } 1345 } 1346 1347 m = next; 1348 } 1349 vm_page_unlock_queues(); 1350 splx(s0); 1351} 1352 1353/* 1354 * vm_pageout is the high level pageout daemon. 1355 */ 1356static void 1357vm_pageout() 1358{ 1359 int error, pass, s; 1360 1361 mtx_lock(&Giant); 1362 1363 /* 1364 * Initialize some paging parameters. 1365 */ 1366 cnt.v_interrupt_free_min = 2; 1367 if (cnt.v_page_count < 2000) 1368 vm_pageout_page_count = 8; 1369 1370 /* 1371 * v_free_reserved needs to include enough for the largest 1372 * swap pager structures plus enough for any pv_entry structs 1373 * when paging. 1374 */ 1375 if (cnt.v_page_count > 1024) 1376 cnt.v_free_min = 4 + (cnt.v_page_count - 1024) / 200; 1377 else 1378 cnt.v_free_min = 4; 1379 cnt.v_pageout_free_min = (2*MAXBSIZE)/PAGE_SIZE + 1380 cnt.v_interrupt_free_min; 1381 cnt.v_free_reserved = vm_pageout_page_count + 1382 cnt.v_pageout_free_min + (cnt.v_page_count / 768) + PQ_L2_SIZE; 1383 cnt.v_free_severe = cnt.v_free_min / 2; 1384 cnt.v_free_min += cnt.v_free_reserved; 1385 cnt.v_free_severe += cnt.v_free_reserved; 1386 1387 /* 1388 * v_free_target and v_cache_min control pageout hysteresis. Note 1389 * that these are more a measure of the VM cache queue hysteresis 1390 * then the VM free queue. Specifically, v_free_target is the 1391 * high water mark (free+cache pages). 1392 * 1393 * v_free_reserved + v_cache_min (mostly means v_cache_min) is the 1394 * low water mark, while v_free_min is the stop. v_cache_min must 1395 * be big enough to handle memory needs while the pageout daemon 1396 * is signalled and run to free more pages. 1397 */ 1398 if (cnt.v_free_count > 6144) 1399 cnt.v_free_target = 4 * cnt.v_free_min + cnt.v_free_reserved; 1400 else 1401 cnt.v_free_target = 2 * cnt.v_free_min + cnt.v_free_reserved; 1402 1403 if (cnt.v_free_count > 2048) { 1404 cnt.v_cache_min = cnt.v_free_target; 1405 cnt.v_cache_max = 2 * cnt.v_cache_min; 1406 cnt.v_inactive_target = (3 * cnt.v_free_target) / 2; 1407 } else { 1408 cnt.v_cache_min = 0; 1409 cnt.v_cache_max = 0; 1410 cnt.v_inactive_target = cnt.v_free_count / 4; 1411 } 1412 if (cnt.v_inactive_target > cnt.v_free_count / 3) 1413 cnt.v_inactive_target = cnt.v_free_count / 3; 1414 1415 /* XXX does not really belong here */ 1416 if (vm_page_max_wired == 0) 1417 vm_page_max_wired = cnt.v_free_count / 3; 1418 1419 if (vm_pageout_stats_max == 0) 1420 vm_pageout_stats_max = cnt.v_free_target; 1421 1422 /* 1423 * Set interval in seconds for stats scan. 1424 */ 1425 if (vm_pageout_stats_interval == 0) 1426 vm_pageout_stats_interval = 5; 1427 if (vm_pageout_full_stats_interval == 0) 1428 vm_pageout_full_stats_interval = vm_pageout_stats_interval * 4; 1429 1430 /* 1431 * Set maximum free per pass 1432 */ 1433 if (vm_pageout_stats_free_max == 0) 1434 vm_pageout_stats_free_max = 5; 1435 1436 swap_pager_swap_init(); 1437 pass = 0; 1438 /* 1439 * The pageout daemon is never done, so loop forever. 1440 */ 1441 while (TRUE) { 1442 s = splvm(); 1443 vm_page_lock_queues(); 1444 /* 1445 * If we have enough free memory, wakeup waiters. Do 1446 * not clear vm_pages_needed until we reach our target, 1447 * otherwise we may be woken up over and over again and 1448 * waste a lot of cpu. 1449 */ 1450 if (vm_pages_needed && !vm_page_count_min()) { 1451 if (!vm_paging_needed()) 1452 vm_pages_needed = 0; 1453 wakeup(&cnt.v_free_count); 1454 } 1455 if (vm_pages_needed) { 1456 /* 1457 * Still not done, take a second pass without waiting 1458 * (unlimited dirty cleaning), otherwise sleep a bit 1459 * and try again. 1460 */ 1461 ++pass; 1462 if (pass > 1) 1463 msleep(&vm_pages_needed, &vm_page_queue_mtx, PVM, 1464 "psleep", hz/2); 1465 } else { 1466 /* 1467 * Good enough, sleep & handle stats. Prime the pass 1468 * for the next run. 1469 */ 1470 if (pass > 1) 1471 pass = 1; 1472 else 1473 pass = 0; 1474 error = msleep(&vm_pages_needed, &vm_page_queue_mtx, PVM, 1475 "psleep", vm_pageout_stats_interval * hz); 1476 if (error && !vm_pages_needed) { 1477 vm_page_unlock_queues(); 1478 splx(s); 1479 pass = 0; 1480 vm_pageout_page_stats(); 1481 continue; 1482 } 1483 } 1484 if (vm_pages_needed) 1485 cnt.v_pdwakeups++; 1486 vm_page_unlock_queues(); 1487 splx(s); 1488 vm_pageout_scan(pass); 1489 } 1490} 1491 1492/* 1493 * Unless the page queue lock is held by the caller, this function 1494 * should be regarded as advisory. Specifically, the caller should 1495 * not msleep() on &cnt.v_free_count following this function unless 1496 * the page queue lock is held until the msleep() is performed. 1497 */ 1498void 1499pagedaemon_wakeup() 1500{ 1501 1502 if (!vm_pages_needed && curthread->td_proc != pageproc) { 1503 vm_pages_needed = 1; 1504 wakeup(&vm_pages_needed); 1505 } 1506} 1507 1508#if !defined(NO_SWAPPING) 1509static void 1510vm_req_vmdaemon() 1511{ 1512 static int lastrun = 0; 1513 1514 if ((ticks > (lastrun + hz)) || (ticks < lastrun)) { 1515 wakeup(&vm_daemon_needed); 1516 lastrun = ticks; 1517 } 1518} 1519 1520static void 1521vm_daemon() 1522{ 1523 struct proc *p; 1524 int breakout; 1525 struct thread *td; 1526 1527 mtx_lock(&Giant); 1528 while (TRUE) { 1529 tsleep(&vm_daemon_needed, PPAUSE, "psleep", 0); 1530 if (vm_pageout_req_swapout) { 1531 swapout_procs(vm_pageout_req_swapout); 1532 vm_pageout_req_swapout = 0; 1533 } 1534 /* 1535 * scan the processes for exceeding their rlimits or if 1536 * process is swapped out -- deactivate pages 1537 */ 1538 sx_slock(&allproc_lock); 1539 LIST_FOREACH(p, &allproc, p_list) { 1540 vm_pindex_t limit, size; 1541 1542 /* 1543 * if this is a system process or if we have already 1544 * looked at this process, skip it. 1545 */ 1546 PROC_LOCK(p); 1547 if (p->p_flag & (P_SYSTEM | P_WEXIT)) { 1548 PROC_UNLOCK(p); 1549 continue; 1550 } 1551 /* 1552 * if the process is in a non-running type state, 1553 * don't touch it. 1554 */ 1555 mtx_lock_spin(&sched_lock); 1556 breakout = 0; 1557 FOREACH_THREAD_IN_PROC(p, td) { 1558 if (!TD_ON_RUNQ(td) && 1559 !TD_IS_RUNNING(td) && 1560 !TD_IS_SLEEPING(td)) { 1561 breakout = 1; 1562 break; 1563 } 1564 } 1565 mtx_unlock_spin(&sched_lock); 1566 if (breakout) { 1567 PROC_UNLOCK(p); 1568 continue; 1569 } 1570 /* 1571 * get a limit 1572 */ 1573 limit = OFF_TO_IDX( 1574 qmin(p->p_rlimit[RLIMIT_RSS].rlim_cur, 1575 p->p_rlimit[RLIMIT_RSS].rlim_max)); 1576 1577 /* 1578 * let processes that are swapped out really be 1579 * swapped out set the limit to nothing (will force a 1580 * swap-out.) 1581 */ 1582 if ((p->p_sflag & PS_INMEM) == 0) 1583 limit = 0; /* XXX */ 1584 PROC_UNLOCK(p); 1585 1586 size = vmspace_resident_count(p->p_vmspace); 1587 if (limit >= 0 && size >= limit) { 1588 vm_pageout_map_deactivate_pages( 1589 &p->p_vmspace->vm_map, limit); 1590 } 1591 } 1592 sx_sunlock(&allproc_lock); 1593 } 1594} 1595#endif /* !defined(NO_SWAPPING) */ 1596