vm_pageout.c revision 107136
1/* 2 * Copyright (c) 1991 Regents of the University of California. 3 * All rights reserved. 4 * Copyright (c) 1994 John S. Dyson 5 * All rights reserved. 6 * Copyright (c) 1994 David Greenman 7 * All rights reserved. 8 * 9 * This code is derived from software contributed to Berkeley by 10 * The Mach Operating System project at Carnegie-Mellon University. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. All advertising materials mentioning features or use of this software 21 * must display the following acknowledgement: 22 * This product includes software developed by the University of 23 * California, Berkeley and its contributors. 24 * 4. Neither the name of the University nor the names of its contributors 25 * may be used to endorse or promote products derived from this software 26 * without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 38 * SUCH DAMAGE. 39 * 40 * from: @(#)vm_pageout.c 7.4 (Berkeley) 5/7/91 41 * 42 * 43 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 44 * All rights reserved. 45 * 46 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 47 * 48 * Permission to use, copy, modify and distribute this software and 49 * its documentation is hereby granted, provided that both the copyright 50 * notice and this permission notice appear in all copies of the 51 * software, derivative works or modified versions, and any portions 52 * thereof, and that both notices appear in supporting documentation. 53 * 54 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 55 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 56 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 57 * 58 * Carnegie Mellon requests users of this software to return to 59 * 60 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 61 * School of Computer Science 62 * Carnegie Mellon University 63 * Pittsburgh PA 15213-3890 64 * 65 * any improvements or extensions that they make and grant Carnegie the 66 * rights to redistribute these changes. 67 * 68 * $FreeBSD: head/sys/vm/vm_pageout.c 107136 2002-11-21 09:17:56Z jeff $ 69 */ 70 71/* 72 * The proverbial page-out daemon. 73 */ 74 75#include "opt_vm.h" 76#include <sys/param.h> 77#include <sys/systm.h> 78#include <sys/kernel.h> 79#include <sys/eventhandler.h> 80#include <sys/lock.h> 81#include <sys/mutex.h> 82#include <sys/proc.h> 83#include <sys/kthread.h> 84#include <sys/ktr.h> 85#include <sys/resourcevar.h> 86#include <sys/sched.h> 87#include <sys/signalvar.h> 88#include <sys/vnode.h> 89#include <sys/vmmeter.h> 90#include <sys/sx.h> 91#include <sys/sysctl.h> 92 93#include <vm/vm.h> 94#include <vm/vm_param.h> 95#include <vm/vm_object.h> 96#include <vm/vm_page.h> 97#include <vm/vm_map.h> 98#include <vm/vm_pageout.h> 99#include <vm/vm_pager.h> 100#include <vm/swap_pager.h> 101#include <vm/vm_extern.h> 102#include <vm/uma.h> 103 104#include <machine/mutex.h> 105 106/* 107 * System initialization 108 */ 109 110/* the kernel process "vm_pageout"*/ 111static void vm_pageout(void); 112static int vm_pageout_clean(vm_page_t); 113static void vm_pageout_pmap_collect(void); 114static void vm_pageout_scan(int pass); 115static int vm_pageout_free_page_calc(vm_size_t count); 116struct proc *pageproc; 117 118static struct kproc_desc page_kp = { 119 "pagedaemon", 120 vm_pageout, 121 &pageproc 122}; 123SYSINIT(pagedaemon, SI_SUB_KTHREAD_PAGE, SI_ORDER_FIRST, kproc_start, &page_kp) 124 125#if !defined(NO_SWAPPING) 126/* the kernel process "vm_daemon"*/ 127static void vm_daemon(void); 128static struct proc *vmproc; 129 130static struct kproc_desc vm_kp = { 131 "vmdaemon", 132 vm_daemon, 133 &vmproc 134}; 135SYSINIT(vmdaemon, SI_SUB_KTHREAD_VM, SI_ORDER_FIRST, kproc_start, &vm_kp) 136#endif 137 138 139int vm_pages_needed=0; /* Event on which pageout daemon sleeps */ 140int vm_pageout_deficit=0; /* Estimated number of pages deficit */ 141int vm_pageout_pages_needed=0; /* flag saying that the pageout daemon needs pages */ 142 143#if !defined(NO_SWAPPING) 144static int vm_pageout_req_swapout; /* XXX */ 145static int vm_daemon_needed; 146#endif 147extern int vm_swap_size; 148static int vm_max_launder = 32; 149static int vm_pageout_stats_max=0, vm_pageout_stats_interval = 0; 150static int vm_pageout_full_stats_interval = 0; 151static int vm_pageout_stats_free_max=0, vm_pageout_algorithm=0; 152static int defer_swap_pageouts=0; 153static int disable_swap_pageouts=0; 154 155#if defined(NO_SWAPPING) 156static int vm_swap_enabled=0; 157static int vm_swap_idle_enabled=0; 158#else 159static int vm_swap_enabled=1; 160static int vm_swap_idle_enabled=0; 161#endif 162 163SYSCTL_INT(_vm, VM_PAGEOUT_ALGORITHM, pageout_algorithm, 164 CTLFLAG_RW, &vm_pageout_algorithm, 0, "LRU page mgmt"); 165 166SYSCTL_INT(_vm, OID_AUTO, max_launder, 167 CTLFLAG_RW, &vm_max_launder, 0, "Limit dirty flushes in pageout"); 168 169SYSCTL_INT(_vm, OID_AUTO, pageout_stats_max, 170 CTLFLAG_RW, &vm_pageout_stats_max, 0, "Max pageout stats scan length"); 171 172SYSCTL_INT(_vm, OID_AUTO, pageout_full_stats_interval, 173 CTLFLAG_RW, &vm_pageout_full_stats_interval, 0, "Interval for full stats scan"); 174 175SYSCTL_INT(_vm, OID_AUTO, pageout_stats_interval, 176 CTLFLAG_RW, &vm_pageout_stats_interval, 0, "Interval for partial stats scan"); 177 178SYSCTL_INT(_vm, OID_AUTO, pageout_stats_free_max, 179 CTLFLAG_RW, &vm_pageout_stats_free_max, 0, "Not implemented"); 180 181#if defined(NO_SWAPPING) 182SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled, 183 CTLFLAG_RD, &vm_swap_enabled, 0, ""); 184SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled, 185 CTLFLAG_RD, &vm_swap_idle_enabled, 0, ""); 186#else 187SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled, 188 CTLFLAG_RW, &vm_swap_enabled, 0, "Enable entire process swapout"); 189SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled, 190 CTLFLAG_RW, &vm_swap_idle_enabled, 0, "Allow swapout on idle criteria"); 191#endif 192 193SYSCTL_INT(_vm, OID_AUTO, defer_swapspace_pageouts, 194 CTLFLAG_RW, &defer_swap_pageouts, 0, "Give preference to dirty pages in mem"); 195 196SYSCTL_INT(_vm, OID_AUTO, disable_swapspace_pageouts, 197 CTLFLAG_RW, &disable_swap_pageouts, 0, "Disallow swapout of dirty pages"); 198 199static int pageout_lock_miss; 200SYSCTL_INT(_vm, OID_AUTO, pageout_lock_miss, 201 CTLFLAG_RD, &pageout_lock_miss, 0, "vget() lock misses during pageout"); 202 203#define VM_PAGEOUT_PAGE_COUNT 16 204int vm_pageout_page_count = VM_PAGEOUT_PAGE_COUNT; 205 206int vm_page_max_wired; /* XXX max # of wired pages system-wide */ 207 208#if !defined(NO_SWAPPING) 209typedef void freeer_fcn_t(vm_map_t, vm_object_t, vm_pindex_t, int); 210static void vm_pageout_map_deactivate_pages(vm_map_t, vm_pindex_t); 211static freeer_fcn_t vm_pageout_object_deactivate_pages; 212static void vm_req_vmdaemon(void); 213#endif 214static void vm_pageout_page_stats(void); 215 216/* 217 * vm_pageout_clean: 218 * 219 * Clean the page and remove it from the laundry. 220 * 221 * We set the busy bit to cause potential page faults on this page to 222 * block. Note the careful timing, however, the busy bit isn't set till 223 * late and we cannot do anything that will mess with the page. 224 */ 225static int 226vm_pageout_clean(m) 227 vm_page_t m; 228{ 229 vm_object_t object; 230 vm_page_t mc[2*vm_pageout_page_count]; 231 int pageout_count; 232 int ib, is, page_base; 233 vm_pindex_t pindex = m->pindex; 234 235 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 236 237 object = m->object; 238 239 /* 240 * It doesn't cost us anything to pageout OBJT_DEFAULT or OBJT_SWAP 241 * with the new swapper, but we could have serious problems paging 242 * out other object types if there is insufficient memory. 243 * 244 * Unfortunately, checking free memory here is far too late, so the 245 * check has been moved up a procedural level. 246 */ 247 248 /* 249 * Don't mess with the page if it's busy, held, or special 250 */ 251 if ((m->hold_count != 0) || 252 ((m->busy != 0) || (m->flags & (PG_BUSY|PG_UNMANAGED)))) { 253 return 0; 254 } 255 256 mc[vm_pageout_page_count] = m; 257 pageout_count = 1; 258 page_base = vm_pageout_page_count; 259 ib = 1; 260 is = 1; 261 262 /* 263 * Scan object for clusterable pages. 264 * 265 * We can cluster ONLY if: ->> the page is NOT 266 * clean, wired, busy, held, or mapped into a 267 * buffer, and one of the following: 268 * 1) The page is inactive, or a seldom used 269 * active page. 270 * -or- 271 * 2) we force the issue. 272 * 273 * During heavy mmap/modification loads the pageout 274 * daemon can really fragment the underlying file 275 * due to flushing pages out of order and not trying 276 * align the clusters (which leave sporatic out-of-order 277 * holes). To solve this problem we do the reverse scan 278 * first and attempt to align our cluster, then do a 279 * forward scan if room remains. 280 */ 281more: 282 while (ib && pageout_count < vm_pageout_page_count) { 283 vm_page_t p; 284 285 if (ib > pindex) { 286 ib = 0; 287 break; 288 } 289 290 if ((p = vm_page_lookup(object, pindex - ib)) == NULL) { 291 ib = 0; 292 break; 293 } 294 if (((p->queue - p->pc) == PQ_CACHE) || 295 (p->flags & (PG_BUSY|PG_UNMANAGED)) || p->busy) { 296 ib = 0; 297 break; 298 } 299 vm_page_test_dirty(p); 300 if ((p->dirty & p->valid) == 0 || 301 p->queue != PQ_INACTIVE || 302 p->wire_count != 0 || /* may be held by buf cache */ 303 p->hold_count != 0) { /* may be undergoing I/O */ 304 ib = 0; 305 break; 306 } 307 mc[--page_base] = p; 308 ++pageout_count; 309 ++ib; 310 /* 311 * alignment boundry, stop here and switch directions. Do 312 * not clear ib. 313 */ 314 if ((pindex - (ib - 1)) % vm_pageout_page_count == 0) 315 break; 316 } 317 318 while (pageout_count < vm_pageout_page_count && 319 pindex + is < object->size) { 320 vm_page_t p; 321 322 if ((p = vm_page_lookup(object, pindex + is)) == NULL) 323 break; 324 if (((p->queue - p->pc) == PQ_CACHE) || 325 (p->flags & (PG_BUSY|PG_UNMANAGED)) || p->busy) { 326 break; 327 } 328 vm_page_test_dirty(p); 329 if ((p->dirty & p->valid) == 0 || 330 p->queue != PQ_INACTIVE || 331 p->wire_count != 0 || /* may be held by buf cache */ 332 p->hold_count != 0) { /* may be undergoing I/O */ 333 break; 334 } 335 mc[page_base + pageout_count] = p; 336 ++pageout_count; 337 ++is; 338 } 339 340 /* 341 * If we exhausted our forward scan, continue with the reverse scan 342 * when possible, even past a page boundry. This catches boundry 343 * conditions. 344 */ 345 if (ib && pageout_count < vm_pageout_page_count) 346 goto more; 347 348 /* 349 * we allow reads during pageouts... 350 */ 351 return vm_pageout_flush(&mc[page_base], pageout_count, 0); 352} 353 354/* 355 * vm_pageout_flush() - launder the given pages 356 * 357 * The given pages are laundered. Note that we setup for the start of 358 * I/O ( i.e. busy the page ), mark it read-only, and bump the object 359 * reference count all in here rather then in the parent. If we want 360 * the parent to do more sophisticated things we may have to change 361 * the ordering. 362 */ 363int 364vm_pageout_flush(mc, count, flags) 365 vm_page_t *mc; 366 int count; 367 int flags; 368{ 369 vm_object_t object; 370 int pageout_status[count]; 371 int numpagedout = 0; 372 int i; 373 374 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 375 /* 376 * Initiate I/O. Bump the vm_page_t->busy counter and 377 * mark the pages read-only. 378 * 379 * We do not have to fixup the clean/dirty bits here... we can 380 * allow the pager to do it after the I/O completes. 381 * 382 * NOTE! mc[i]->dirty may be partial or fragmented due to an 383 * edge case with file fragments. 384 */ 385 for (i = 0; i < count; i++) { 386 KASSERT(mc[i]->valid == VM_PAGE_BITS_ALL, ("vm_pageout_flush page %p index %d/%d: partially invalid page", mc[i], i, count)); 387 vm_page_io_start(mc[i]); 388 pmap_page_protect(mc[i], VM_PROT_READ); 389 } 390 object = mc[0]->object; 391 vm_page_unlock_queues(); 392 vm_object_pip_add(object, count); 393 394 vm_pager_put_pages(object, mc, count, 395 (flags | ((object == kernel_object) ? OBJPC_SYNC : 0)), 396 pageout_status); 397 398 vm_page_lock_queues(); 399 for (i = 0; i < count; i++) { 400 vm_page_t mt = mc[i]; 401 402 switch (pageout_status[i]) { 403 case VM_PAGER_OK: 404 numpagedout++; 405 break; 406 case VM_PAGER_PEND: 407 numpagedout++; 408 break; 409 case VM_PAGER_BAD: 410 /* 411 * Page outside of range of object. Right now we 412 * essentially lose the changes by pretending it 413 * worked. 414 */ 415 pmap_clear_modify(mt); 416 vm_page_undirty(mt); 417 break; 418 case VM_PAGER_ERROR: 419 case VM_PAGER_FAIL: 420 /* 421 * If page couldn't be paged out, then reactivate the 422 * page so it doesn't clog the inactive list. (We 423 * will try paging out it again later). 424 */ 425 vm_page_activate(mt); 426 break; 427 case VM_PAGER_AGAIN: 428 break; 429 } 430 431 /* 432 * If the operation is still going, leave the page busy to 433 * block all other accesses. Also, leave the paging in 434 * progress indicator set so that we don't attempt an object 435 * collapse. 436 */ 437 if (pageout_status[i] != VM_PAGER_PEND) { 438 vm_object_pip_wakeup(object); 439 vm_page_io_finish(mt); 440 if (!vm_page_count_severe() || !vm_page_try_to_cache(mt)) 441 pmap_page_protect(mt, VM_PROT_READ); 442 } 443 } 444 return numpagedout; 445} 446 447#if !defined(NO_SWAPPING) 448/* 449 * vm_pageout_object_deactivate_pages 450 * 451 * deactivate enough pages to satisfy the inactive target 452 * requirements or if vm_page_proc_limit is set, then 453 * deactivate all of the pages in the object and its 454 * backing_objects. 455 * 456 * The object and map must be locked. 457 */ 458static void 459vm_pageout_object_deactivate_pages(map, object, desired, map_remove_only) 460 vm_map_t map; 461 vm_object_t object; 462 vm_pindex_t desired; 463 int map_remove_only; 464{ 465 vm_page_t p, next; 466 int actcount, rcount, remove_mode; 467 468 GIANT_REQUIRED; 469 if (object->type == OBJT_DEVICE || object->type == OBJT_PHYS) 470 return; 471 472 while (object) { 473 if (pmap_resident_count(vm_map_pmap(map)) <= desired) 474 return; 475 if (object->paging_in_progress) 476 return; 477 478 remove_mode = map_remove_only; 479 if (object->shadow_count > 1) 480 remove_mode = 1; 481 /* 482 * scan the objects entire memory queue 483 */ 484 rcount = object->resident_page_count; 485 p = TAILQ_FIRST(&object->memq); 486 vm_page_lock_queues(); 487 while (p && (rcount-- > 0)) { 488 if (pmap_resident_count(map->pmap) <= desired) { 489 vm_page_unlock_queues(); 490 return; 491 } 492 next = TAILQ_NEXT(p, listq); 493 cnt.v_pdpages++; 494 if (p->wire_count != 0 || 495 p->hold_count != 0 || 496 p->busy != 0 || 497 (p->flags & (PG_BUSY|PG_UNMANAGED)) || 498 !pmap_page_exists_quick(vm_map_pmap(map), p)) { 499 p = next; 500 continue; 501 } 502 actcount = pmap_ts_referenced(p); 503 if (actcount) { 504 vm_page_flag_set(p, PG_REFERENCED); 505 } else if (p->flags & PG_REFERENCED) { 506 actcount = 1; 507 } 508 if ((p->queue != PQ_ACTIVE) && 509 (p->flags & PG_REFERENCED)) { 510 vm_page_activate(p); 511 p->act_count += actcount; 512 vm_page_flag_clear(p, PG_REFERENCED); 513 } else if (p->queue == PQ_ACTIVE) { 514 if ((p->flags & PG_REFERENCED) == 0) { 515 p->act_count -= min(p->act_count, ACT_DECLINE); 516 if (!remove_mode && (vm_pageout_algorithm || (p->act_count == 0))) { 517 pmap_remove_all(p); 518 vm_page_deactivate(p); 519 } else { 520 vm_pageq_requeue(p); 521 } 522 } else { 523 vm_page_activate(p); 524 vm_page_flag_clear(p, PG_REFERENCED); 525 if (p->act_count < (ACT_MAX - ACT_ADVANCE)) 526 p->act_count += ACT_ADVANCE; 527 vm_pageq_requeue(p); 528 } 529 } else if (p->queue == PQ_INACTIVE) { 530 pmap_remove_all(p); 531 } 532 p = next; 533 } 534 vm_page_unlock_queues(); 535 object = object->backing_object; 536 } 537} 538 539/* 540 * deactivate some number of pages in a map, try to do it fairly, but 541 * that is really hard to do. 542 */ 543static void 544vm_pageout_map_deactivate_pages(map, desired) 545 vm_map_t map; 546 vm_pindex_t desired; 547{ 548 vm_map_entry_t tmpe; 549 vm_object_t obj, bigobj; 550 int nothingwired; 551 552 GIANT_REQUIRED; 553 if (!vm_map_trylock(map)) 554 return; 555 556 bigobj = NULL; 557 nothingwired = TRUE; 558 559 /* 560 * first, search out the biggest object, and try to free pages from 561 * that. 562 */ 563 tmpe = map->header.next; 564 while (tmpe != &map->header) { 565 if ((tmpe->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { 566 obj = tmpe->object.vm_object; 567 if ((obj != NULL) && (obj->shadow_count <= 1) && 568 ((bigobj == NULL) || 569 (bigobj->resident_page_count < obj->resident_page_count))) { 570 bigobj = obj; 571 } 572 } 573 if (tmpe->wired_count > 0) 574 nothingwired = FALSE; 575 tmpe = tmpe->next; 576 } 577 578 if (bigobj) 579 vm_pageout_object_deactivate_pages(map, bigobj, desired, 0); 580 581 /* 582 * Next, hunt around for other pages to deactivate. We actually 583 * do this search sort of wrong -- .text first is not the best idea. 584 */ 585 tmpe = map->header.next; 586 while (tmpe != &map->header) { 587 if (pmap_resident_count(vm_map_pmap(map)) <= desired) 588 break; 589 if ((tmpe->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { 590 obj = tmpe->object.vm_object; 591 if (obj) 592 vm_pageout_object_deactivate_pages(map, obj, desired, 0); 593 } 594 tmpe = tmpe->next; 595 }; 596 597 /* 598 * Remove all mappings if a process is swapped out, this will free page 599 * table pages. 600 */ 601 if (desired == 0 && nothingwired) 602 pmap_remove(vm_map_pmap(map), vm_map_min(map), 603 vm_map_max(map)); 604 vm_map_unlock(map); 605 return; 606} 607#endif /* !defined(NO_SWAPPING) */ 608 609/* 610 * Don't try to be fancy - being fancy can lead to VOP_LOCK's and therefore 611 * to vnode deadlocks. We only do it for OBJT_DEFAULT and OBJT_SWAP objects 612 * which we know can be trivially freed. 613 */ 614void 615vm_pageout_page_free(vm_page_t m) { 616 vm_object_t object = m->object; 617 int type = object->type; 618 619 GIANT_REQUIRED; 620 if (type == OBJT_SWAP || type == OBJT_DEFAULT) 621 vm_object_reference(object); 622 vm_page_busy(m); 623 pmap_remove_all(m); 624 vm_page_free(m); 625 cnt.v_dfree++; 626 if (type == OBJT_SWAP || type == OBJT_DEFAULT) 627 vm_object_deallocate(object); 628} 629 630/* 631 * This routine is very drastic, but can save the system 632 * in a pinch. 633 */ 634static void 635vm_pageout_pmap_collect(void) 636{ 637 int i; 638 vm_page_t m; 639 static int warningdone; 640 641 if (pmap_pagedaemon_waken == 0) 642 return; 643 if (warningdone < 5) { 644 printf("collecting pv entries -- suggest increasing PMAP_SHPGPERPROC\n"); 645 warningdone++; 646 } 647 vm_page_lock_queues(); 648 for (i = 0; i < vm_page_array_size; i++) { 649 m = &vm_page_array[i]; 650 if (m->wire_count || m->hold_count || m->busy || 651 (m->flags & (PG_BUSY | PG_UNMANAGED))) 652 continue; 653 pmap_remove_all(m); 654 } 655 vm_page_unlock_queues(); 656 pmap_pagedaemon_waken = 0; 657} 658 659/* 660 * vm_pageout_scan does the dirty work for the pageout daemon. 661 */ 662static void 663vm_pageout_scan(int pass) 664{ 665 vm_page_t m, next; 666 struct vm_page marker; 667 int save_page_shortage; 668 int save_inactive_count; 669 int page_shortage, maxscan, pcount; 670 int addl_page_shortage, addl_page_shortage_init; 671 struct proc *p, *bigproc; 672 vm_offset_t size, bigsize; 673 vm_object_t object; 674 int actcount; 675 int vnodes_skipped = 0; 676 int maxlaunder; 677 int s; 678 struct thread *td; 679 680 GIANT_REQUIRED; 681 /* 682 * Decrease registered cache sizes. 683 */ 684 EVENTHANDLER_INVOKE(vm_lowmem, 0); 685 /* 686 * We do this explicitly after the caches have been drained above. 687 */ 688 uma_reclaim(); 689 /* 690 * Do whatever cleanup that the pmap code can. 691 */ 692 vm_pageout_pmap_collect(); 693 694 addl_page_shortage_init = vm_pageout_deficit; 695 vm_pageout_deficit = 0; 696 697 /* 698 * Calculate the number of pages we want to either free or move 699 * to the cache. 700 */ 701 page_shortage = vm_paging_target() + addl_page_shortage_init; 702 save_page_shortage = page_shortage; 703 save_inactive_count = cnt.v_inactive_count; 704 705 /* 706 * Initialize our marker 707 */ 708 bzero(&marker, sizeof(marker)); 709 marker.flags = PG_BUSY | PG_FICTITIOUS | PG_MARKER; 710 marker.queue = PQ_INACTIVE; 711 marker.wire_count = 1; 712 713 /* 714 * Start scanning the inactive queue for pages we can move to the 715 * cache or free. The scan will stop when the target is reached or 716 * we have scanned the entire inactive queue. Note that m->act_count 717 * is not used to form decisions for the inactive queue, only for the 718 * active queue. 719 * 720 * maxlaunder limits the number of dirty pages we flush per scan. 721 * For most systems a smaller value (16 or 32) is more robust under 722 * extreme memory and disk pressure because any unnecessary writes 723 * to disk can result in extreme performance degredation. However, 724 * systems with excessive dirty pages (especially when MAP_NOSYNC is 725 * used) will die horribly with limited laundering. If the pageout 726 * daemon cannot clean enough pages in the first pass, we let it go 727 * all out in succeeding passes. 728 */ 729 if ((maxlaunder = vm_max_launder) <= 1) 730 maxlaunder = 1; 731 if (pass) 732 maxlaunder = 10000; 733rescan0: 734 addl_page_shortage = addl_page_shortage_init; 735 maxscan = cnt.v_inactive_count; 736 737 for (m = TAILQ_FIRST(&vm_page_queues[PQ_INACTIVE].pl); 738 m != NULL && maxscan-- > 0 && page_shortage > 0; 739 m = next) { 740 741 cnt.v_pdpages++; 742 743 if (m->queue != PQ_INACTIVE) { 744 goto rescan0; 745 } 746 747 next = TAILQ_NEXT(m, pageq); 748 749 /* 750 * skip marker pages 751 */ 752 if (m->flags & PG_MARKER) 753 continue; 754 755 /* 756 * A held page may be undergoing I/O, so skip it. 757 */ 758 if (m->hold_count) { 759 vm_pageq_requeue(m); 760 addl_page_shortage++; 761 continue; 762 } 763 /* 764 * Don't mess with busy pages, keep in the front of the 765 * queue, most likely are being paged out. 766 */ 767 if (m->busy || (m->flags & PG_BUSY)) { 768 addl_page_shortage++; 769 continue; 770 } 771 772 /* 773 * If the object is not being used, we ignore previous 774 * references. 775 */ 776 if (m->object->ref_count == 0) { 777 vm_page_flag_clear(m, PG_REFERENCED); 778 pmap_clear_reference(m); 779 780 /* 781 * Otherwise, if the page has been referenced while in the 782 * inactive queue, we bump the "activation count" upwards, 783 * making it less likely that the page will be added back to 784 * the inactive queue prematurely again. Here we check the 785 * page tables (or emulated bits, if any), given the upper 786 * level VM system not knowing anything about existing 787 * references. 788 */ 789 } else if (((m->flags & PG_REFERENCED) == 0) && 790 (actcount = pmap_ts_referenced(m))) { 791 vm_page_lock_queues(); 792 vm_page_activate(m); 793 vm_page_unlock_queues(); 794 m->act_count += (actcount + ACT_ADVANCE); 795 continue; 796 } 797 798 /* 799 * If the upper level VM system knows about any page 800 * references, we activate the page. We also set the 801 * "activation count" higher than normal so that we will less 802 * likely place pages back onto the inactive queue again. 803 */ 804 if ((m->flags & PG_REFERENCED) != 0) { 805 vm_page_flag_clear(m, PG_REFERENCED); 806 actcount = pmap_ts_referenced(m); 807 vm_page_lock_queues(); 808 vm_page_activate(m); 809 vm_page_unlock_queues(); 810 m->act_count += (actcount + ACT_ADVANCE + 1); 811 continue; 812 } 813 814 /* 815 * If the upper level VM system doesn't know anything about 816 * the page being dirty, we have to check for it again. As 817 * far as the VM code knows, any partially dirty pages are 818 * fully dirty. 819 */ 820 if (m->dirty == 0) { 821 vm_page_test_dirty(m); 822 } else { 823 vm_page_dirty(m); 824 } 825 826 /* 827 * Invalid pages can be easily freed 828 */ 829 if (m->valid == 0) { 830 vm_page_lock_queues(); 831 vm_pageout_page_free(m); 832 vm_page_unlock_queues(); 833 --page_shortage; 834 835 /* 836 * Clean pages can be placed onto the cache queue. This 837 * effectively frees them. 838 */ 839 } else if (m->dirty == 0) { 840 vm_page_lock_queues(); 841 vm_page_cache(m); 842 vm_page_unlock_queues(); 843 --page_shortage; 844 } else if ((m->flags & PG_WINATCFLS) == 0 && pass == 0) { 845 /* 846 * Dirty pages need to be paged out, but flushing 847 * a page is extremely expensive verses freeing 848 * a clean page. Rather then artificially limiting 849 * the number of pages we can flush, we instead give 850 * dirty pages extra priority on the inactive queue 851 * by forcing them to be cycled through the queue 852 * twice before being flushed, after which the 853 * (now clean) page will cycle through once more 854 * before being freed. This significantly extends 855 * the thrash point for a heavily loaded machine. 856 */ 857 vm_page_flag_set(m, PG_WINATCFLS); 858 vm_pageq_requeue(m); 859 } else if (maxlaunder > 0) { 860 /* 861 * We always want to try to flush some dirty pages if 862 * we encounter them, to keep the system stable. 863 * Normally this number is small, but under extreme 864 * pressure where there are insufficient clean pages 865 * on the inactive queue, we may have to go all out. 866 */ 867 int swap_pageouts_ok; 868 struct vnode *vp = NULL; 869 struct mount *mp; 870 871 object = m->object; 872 873 if ((object->type != OBJT_SWAP) && (object->type != OBJT_DEFAULT)) { 874 swap_pageouts_ok = 1; 875 } else { 876 swap_pageouts_ok = !(defer_swap_pageouts || disable_swap_pageouts); 877 swap_pageouts_ok |= (!disable_swap_pageouts && defer_swap_pageouts && 878 vm_page_count_min()); 879 880 } 881 882 /* 883 * We don't bother paging objects that are "dead". 884 * Those objects are in a "rundown" state. 885 */ 886 if (!swap_pageouts_ok || (object->flags & OBJ_DEAD)) { 887 vm_pageq_requeue(m); 888 continue; 889 } 890 891 /* 892 * The object is already known NOT to be dead. It 893 * is possible for the vget() to block the whole 894 * pageout daemon, but the new low-memory handling 895 * code should prevent it. 896 * 897 * The previous code skipped locked vnodes and, worse, 898 * reordered pages in the queue. This results in 899 * completely non-deterministic operation and, on a 900 * busy system, can lead to extremely non-optimal 901 * pageouts. For example, it can cause clean pages 902 * to be freed and dirty pages to be moved to the end 903 * of the queue. Since dirty pages are also moved to 904 * the end of the queue once-cleaned, this gives 905 * way too large a weighting to defering the freeing 906 * of dirty pages. 907 * 908 * We can't wait forever for the vnode lock, we might 909 * deadlock due to a vn_read() getting stuck in 910 * vm_wait while holding this vnode. We skip the 911 * vnode if we can't get it in a reasonable amount 912 * of time. 913 */ 914 if (object->type == OBJT_VNODE) { 915 vp = object->handle; 916 917 mp = NULL; 918 if (vp->v_type == VREG) 919 vn_start_write(vp, &mp, V_NOWAIT); 920 if (vget(vp, LK_EXCLUSIVE|LK_TIMELOCK, curthread)) { 921 ++pageout_lock_miss; 922 vn_finished_write(mp); 923 if (object->flags & OBJ_MIGHTBEDIRTY) 924 vnodes_skipped++; 925 continue; 926 } 927 928 /* 929 * The page might have been moved to another 930 * queue during potential blocking in vget() 931 * above. The page might have been freed and 932 * reused for another vnode. The object might 933 * have been reused for another vnode. 934 */ 935 if (m->queue != PQ_INACTIVE || 936 m->object != object || 937 object->handle != vp) { 938 if (object->flags & OBJ_MIGHTBEDIRTY) 939 vnodes_skipped++; 940 vput(vp); 941 vn_finished_write(mp); 942 continue; 943 } 944 945 /* 946 * The page may have been busied during the 947 * blocking in vput(); We don't move the 948 * page back onto the end of the queue so that 949 * statistics are more correct if we don't. 950 */ 951 if (m->busy || (m->flags & PG_BUSY)) { 952 vput(vp); 953 vn_finished_write(mp); 954 continue; 955 } 956 957 /* 958 * If the page has become held it might 959 * be undergoing I/O, so skip it 960 */ 961 if (m->hold_count) { 962 vm_pageq_requeue(m); 963 if (object->flags & OBJ_MIGHTBEDIRTY) 964 vnodes_skipped++; 965 vput(vp); 966 vn_finished_write(mp); 967 continue; 968 } 969 } 970 971 /* 972 * If a page is dirty, then it is either being washed 973 * (but not yet cleaned) or it is still in the 974 * laundry. If it is still in the laundry, then we 975 * start the cleaning operation. 976 * 977 * This operation may cluster, invalidating the 'next' 978 * pointer. To prevent an inordinate number of 979 * restarts we use our marker to remember our place. 980 * 981 * decrement page_shortage on success to account for 982 * the (future) cleaned page. Otherwise we could wind 983 * up laundering or cleaning too many pages. 984 */ 985 vm_page_lock_queues(); 986 s = splvm(); 987 TAILQ_INSERT_AFTER(&vm_page_queues[PQ_INACTIVE].pl, m, &marker, pageq); 988 splx(s); 989 if (vm_pageout_clean(m) != 0) { 990 --page_shortage; 991 --maxlaunder; 992 } 993 s = splvm(); 994 next = TAILQ_NEXT(&marker, pageq); 995 TAILQ_REMOVE(&vm_page_queues[PQ_INACTIVE].pl, &marker, pageq); 996 splx(s); 997 vm_page_unlock_queues(); 998 if (vp) { 999 vput(vp); 1000 vn_finished_write(mp); 1001 } 1002 } 1003 } 1004 1005 /* 1006 * Compute the number of pages we want to try to move from the 1007 * active queue to the inactive queue. 1008 */ 1009 page_shortage = vm_paging_target() + 1010 cnt.v_inactive_target - cnt.v_inactive_count; 1011 page_shortage += addl_page_shortage; 1012 1013 vm_page_lock_queues(); 1014 /* 1015 * Scan the active queue for things we can deactivate. We nominally 1016 * track the per-page activity counter and use it to locate 1017 * deactivation candidates. 1018 */ 1019 pcount = cnt.v_active_count; 1020 m = TAILQ_FIRST(&vm_page_queues[PQ_ACTIVE].pl); 1021 1022 while ((m != NULL) && (pcount-- > 0) && (page_shortage > 0)) { 1023 1024 /* 1025 * This is a consistency check, and should likely be a panic 1026 * or warning. 1027 */ 1028 if (m->queue != PQ_ACTIVE) { 1029 break; 1030 } 1031 1032 next = TAILQ_NEXT(m, pageq); 1033 /* 1034 * Don't deactivate pages that are busy. 1035 */ 1036 if ((m->busy != 0) || 1037 (m->flags & PG_BUSY) || 1038 (m->hold_count != 0)) { 1039 vm_pageq_requeue(m); 1040 m = next; 1041 continue; 1042 } 1043 1044 /* 1045 * The count for pagedaemon pages is done after checking the 1046 * page for eligibility... 1047 */ 1048 cnt.v_pdpages++; 1049 1050 /* 1051 * Check to see "how much" the page has been used. 1052 */ 1053 actcount = 0; 1054 if (m->object->ref_count != 0) { 1055 if (m->flags & PG_REFERENCED) { 1056 actcount += 1; 1057 } 1058 actcount += pmap_ts_referenced(m); 1059 if (actcount) { 1060 m->act_count += ACT_ADVANCE + actcount; 1061 if (m->act_count > ACT_MAX) 1062 m->act_count = ACT_MAX; 1063 } 1064 } 1065 1066 /* 1067 * Since we have "tested" this bit, we need to clear it now. 1068 */ 1069 vm_page_flag_clear(m, PG_REFERENCED); 1070 1071 /* 1072 * Only if an object is currently being used, do we use the 1073 * page activation count stats. 1074 */ 1075 if (actcount && (m->object->ref_count != 0)) { 1076 vm_pageq_requeue(m); 1077 } else { 1078 m->act_count -= min(m->act_count, ACT_DECLINE); 1079 if (vm_pageout_algorithm || 1080 m->object->ref_count == 0 || 1081 m->act_count == 0) { 1082 page_shortage--; 1083 if (m->object->ref_count == 0) { 1084 pmap_remove_all(m); 1085 if (m->dirty == 0) 1086 vm_page_cache(m); 1087 else 1088 vm_page_deactivate(m); 1089 } else { 1090 vm_page_deactivate(m); 1091 } 1092 } else { 1093 vm_pageq_requeue(m); 1094 } 1095 } 1096 m = next; 1097 } 1098 s = splvm(); 1099 1100 /* 1101 * We try to maintain some *really* free pages, this allows interrupt 1102 * code to be guaranteed space. Since both cache and free queues 1103 * are considered basically 'free', moving pages from cache to free 1104 * does not effect other calculations. 1105 */ 1106 while (cnt.v_free_count < cnt.v_free_reserved) { 1107 static int cache_rover = 0; 1108 m = vm_pageq_find(PQ_CACHE, cache_rover, FALSE); 1109 if (!m) 1110 break; 1111 if ((m->flags & (PG_BUSY|PG_UNMANAGED)) || 1112 m->busy || 1113 m->hold_count || 1114 m->wire_count) { 1115#ifdef INVARIANTS 1116 printf("Warning: busy page %p found in cache\n", m); 1117#endif 1118 vm_page_deactivate(m); 1119 continue; 1120 } 1121 cache_rover = (cache_rover + PQ_PRIME2) & PQ_L2_MASK; 1122 vm_pageout_page_free(m); 1123 } 1124 splx(s); 1125 vm_page_unlock_queues(); 1126#if !defined(NO_SWAPPING) 1127 /* 1128 * Idle process swapout -- run once per second. 1129 */ 1130 if (vm_swap_idle_enabled) { 1131 static long lsec; 1132 if (time_second != lsec) { 1133 vm_pageout_req_swapout |= VM_SWAP_IDLE; 1134 vm_req_vmdaemon(); 1135 lsec = time_second; 1136 } 1137 } 1138#endif 1139 1140 /* 1141 * If we didn't get enough free pages, and we have skipped a vnode 1142 * in a writeable object, wakeup the sync daemon. And kick swapout 1143 * if we did not get enough free pages. 1144 */ 1145 if (vm_paging_target() > 0) { 1146 if (vnodes_skipped && vm_page_count_min()) 1147 (void) speedup_syncer(); 1148#if !defined(NO_SWAPPING) 1149 if (vm_swap_enabled && vm_page_count_target()) { 1150 vm_req_vmdaemon(); 1151 vm_pageout_req_swapout |= VM_SWAP_NORMAL; 1152 } 1153#endif 1154 } 1155 1156 /* 1157 * If we are out of swap and were not able to reach our paging 1158 * target, kill the largest process. 1159 * 1160 * We keep the process bigproc locked once we find it to keep anyone 1161 * from messing with it; however, there is a possibility of 1162 * deadlock if process B is bigproc and one of it's child processes 1163 * attempts to propagate a signal to B while we are waiting for A's 1164 * lock while walking this list. To avoid this, we don't block on 1165 * the process lock but just skip a process if it is already locked. 1166 */ 1167 if ((vm_swap_size < 64 && vm_page_count_min()) || 1168 (swap_pager_full && vm_paging_target() > 0)) { 1169#if 0 1170 if ((vm_swap_size < 64 || swap_pager_full) && vm_page_count_min()) { 1171#endif 1172 bigproc = NULL; 1173 bigsize = 0; 1174 sx_slock(&allproc_lock); 1175 FOREACH_PROC_IN_SYSTEM(p) { 1176 int breakout; 1177 /* 1178 * If this process is already locked, skip it. 1179 */ 1180 if (PROC_TRYLOCK(p) == 0) 1181 continue; 1182 /* 1183 * if this is a system process, skip it 1184 */ 1185 if ((p->p_flag & P_SYSTEM) || (p->p_pid == 1) || 1186 ((p->p_pid < 48) && (vm_swap_size != 0))) { 1187 PROC_UNLOCK(p); 1188 continue; 1189 } 1190 /* 1191 * if the process is in a non-running type state, 1192 * don't touch it. Check all the threads individually. 1193 */ 1194 mtx_lock_spin(&sched_lock); 1195 breakout = 0; 1196 FOREACH_THREAD_IN_PROC(p, td) { 1197 if (!TD_ON_RUNQ(td) && 1198 !TD_IS_RUNNING(td) && 1199 !TD_IS_SLEEPING(td)) { 1200 breakout = 1; 1201 break; 1202 } 1203 } 1204 if (breakout) { 1205 mtx_unlock_spin(&sched_lock); 1206 PROC_UNLOCK(p); 1207 continue; 1208 } 1209 mtx_unlock_spin(&sched_lock); 1210 /* 1211 * get the process size 1212 */ 1213 size = vmspace_resident_count(p->p_vmspace) + 1214 vmspace_swap_count(p->p_vmspace); 1215 /* 1216 * if the this process is bigger than the biggest one 1217 * remember it. 1218 */ 1219 if (size > bigsize) { 1220 if (bigproc != NULL) 1221 PROC_UNLOCK(bigproc); 1222 bigproc = p; 1223 bigsize = size; 1224 } else 1225 PROC_UNLOCK(p); 1226 } 1227 sx_sunlock(&allproc_lock); 1228 if (bigproc != NULL) { 1229 struct ksegrp *kg; 1230 killproc(bigproc, "out of swap space"); 1231 mtx_lock_spin(&sched_lock); 1232 FOREACH_KSEGRP_IN_PROC(bigproc, kg) { 1233 sched_nice(kg, PRIO_MIN); /* XXXKSE ??? */ 1234 } 1235 mtx_unlock_spin(&sched_lock); 1236 PROC_UNLOCK(bigproc); 1237 wakeup(&cnt.v_free_count); 1238 } 1239 } 1240} 1241 1242/* 1243 * This routine tries to maintain the pseudo LRU active queue, 1244 * so that during long periods of time where there is no paging, 1245 * that some statistic accumulation still occurs. This code 1246 * helps the situation where paging just starts to occur. 1247 */ 1248static void 1249vm_pageout_page_stats() 1250{ 1251 vm_page_t m,next; 1252 int pcount,tpcount; /* Number of pages to check */ 1253 static int fullintervalcount = 0; 1254 int page_shortage; 1255 int s0; 1256 1257 page_shortage = 1258 (cnt.v_inactive_target + cnt.v_cache_max + cnt.v_free_min) - 1259 (cnt.v_free_count + cnt.v_inactive_count + cnt.v_cache_count); 1260 1261 if (page_shortage <= 0) 1262 return; 1263 1264 s0 = splvm(); 1265 vm_page_lock_queues(); 1266 pcount = cnt.v_active_count; 1267 fullintervalcount += vm_pageout_stats_interval; 1268 if (fullintervalcount < vm_pageout_full_stats_interval) { 1269 tpcount = (vm_pageout_stats_max * cnt.v_active_count) / cnt.v_page_count; 1270 if (pcount > tpcount) 1271 pcount = tpcount; 1272 } else { 1273 fullintervalcount = 0; 1274 } 1275 1276 m = TAILQ_FIRST(&vm_page_queues[PQ_ACTIVE].pl); 1277 while ((m != NULL) && (pcount-- > 0)) { 1278 int actcount; 1279 1280 if (m->queue != PQ_ACTIVE) { 1281 break; 1282 } 1283 1284 next = TAILQ_NEXT(m, pageq); 1285 /* 1286 * Don't deactivate pages that are busy. 1287 */ 1288 if ((m->busy != 0) || 1289 (m->flags & PG_BUSY) || 1290 (m->hold_count != 0)) { 1291 vm_pageq_requeue(m); 1292 m = next; 1293 continue; 1294 } 1295 1296 actcount = 0; 1297 if (m->flags & PG_REFERENCED) { 1298 vm_page_flag_clear(m, PG_REFERENCED); 1299 actcount += 1; 1300 } 1301 1302 actcount += pmap_ts_referenced(m); 1303 if (actcount) { 1304 m->act_count += ACT_ADVANCE + actcount; 1305 if (m->act_count > ACT_MAX) 1306 m->act_count = ACT_MAX; 1307 vm_pageq_requeue(m); 1308 } else { 1309 if (m->act_count == 0) { 1310 /* 1311 * We turn off page access, so that we have 1312 * more accurate RSS stats. We don't do this 1313 * in the normal page deactivation when the 1314 * system is loaded VM wise, because the 1315 * cost of the large number of page protect 1316 * operations would be higher than the value 1317 * of doing the operation. 1318 */ 1319 pmap_remove_all(m); 1320 vm_page_deactivate(m); 1321 } else { 1322 m->act_count -= min(m->act_count, ACT_DECLINE); 1323 vm_pageq_requeue(m); 1324 } 1325 } 1326 1327 m = next; 1328 } 1329 vm_page_unlock_queues(); 1330 splx(s0); 1331} 1332 1333static int 1334vm_pageout_free_page_calc(count) 1335vm_size_t count; 1336{ 1337 if (count < cnt.v_page_count) 1338 return 0; 1339 /* 1340 * free_reserved needs to include enough for the largest swap pager 1341 * structures plus enough for any pv_entry structs when paging. 1342 */ 1343 if (cnt.v_page_count > 1024) 1344 cnt.v_free_min = 4 + (cnt.v_page_count - 1024) / 200; 1345 else 1346 cnt.v_free_min = 4; 1347 cnt.v_pageout_free_min = (2*MAXBSIZE)/PAGE_SIZE + 1348 cnt.v_interrupt_free_min; 1349 cnt.v_free_reserved = vm_pageout_page_count + 1350 cnt.v_pageout_free_min + (count / 768) + PQ_L2_SIZE; 1351 cnt.v_free_severe = cnt.v_free_min / 2; 1352 cnt.v_free_min += cnt.v_free_reserved; 1353 cnt.v_free_severe += cnt.v_free_reserved; 1354 return 1; 1355} 1356 1357/* 1358 * vm_pageout is the high level pageout daemon. 1359 */ 1360static void 1361vm_pageout() 1362{ 1363 int pass; 1364 1365 mtx_lock(&Giant); 1366 1367 /* 1368 * Initialize some paging parameters. 1369 */ 1370 cnt.v_interrupt_free_min = 2; 1371 if (cnt.v_page_count < 2000) 1372 vm_pageout_page_count = 8; 1373 1374 vm_pageout_free_page_calc(cnt.v_page_count); 1375 /* 1376 * v_free_target and v_cache_min control pageout hysteresis. Note 1377 * that these are more a measure of the VM cache queue hysteresis 1378 * then the VM free queue. Specifically, v_free_target is the 1379 * high water mark (free+cache pages). 1380 * 1381 * v_free_reserved + v_cache_min (mostly means v_cache_min) is the 1382 * low water mark, while v_free_min is the stop. v_cache_min must 1383 * be big enough to handle memory needs while the pageout daemon 1384 * is signalled and run to free more pages. 1385 */ 1386 if (cnt.v_free_count > 6144) 1387 cnt.v_free_target = 4 * cnt.v_free_min + cnt.v_free_reserved; 1388 else 1389 cnt.v_free_target = 2 * cnt.v_free_min + cnt.v_free_reserved; 1390 1391 if (cnt.v_free_count > 2048) { 1392 cnt.v_cache_min = cnt.v_free_target; 1393 cnt.v_cache_max = 2 * cnt.v_cache_min; 1394 cnt.v_inactive_target = (3 * cnt.v_free_target) / 2; 1395 } else { 1396 cnt.v_cache_min = 0; 1397 cnt.v_cache_max = 0; 1398 cnt.v_inactive_target = cnt.v_free_count / 4; 1399 } 1400 if (cnt.v_inactive_target > cnt.v_free_count / 3) 1401 cnt.v_inactive_target = cnt.v_free_count / 3; 1402 1403 /* XXX does not really belong here */ 1404 if (vm_page_max_wired == 0) 1405 vm_page_max_wired = cnt.v_free_count / 3; 1406 1407 if (vm_pageout_stats_max == 0) 1408 vm_pageout_stats_max = cnt.v_free_target; 1409 1410 /* 1411 * Set interval in seconds for stats scan. 1412 */ 1413 if (vm_pageout_stats_interval == 0) 1414 vm_pageout_stats_interval = 5; 1415 if (vm_pageout_full_stats_interval == 0) 1416 vm_pageout_full_stats_interval = vm_pageout_stats_interval * 4; 1417 1418 /* 1419 * Set maximum free per pass 1420 */ 1421 if (vm_pageout_stats_free_max == 0) 1422 vm_pageout_stats_free_max = 5; 1423 1424 swap_pager_swap_init(); 1425 pass = 0; 1426 /* 1427 * The pageout daemon is never done, so loop forever. 1428 */ 1429 while (TRUE) { 1430 int error; 1431 int s = splvm(); 1432 1433 /* 1434 * If we have enough free memory, wakeup waiters. Do 1435 * not clear vm_pages_needed until we reach our target, 1436 * otherwise we may be woken up over and over again and 1437 * waste a lot of cpu. 1438 */ 1439 if (vm_pages_needed && !vm_page_count_min()) { 1440 if (vm_paging_needed() <= 0) 1441 vm_pages_needed = 0; 1442 wakeup(&cnt.v_free_count); 1443 } 1444 if (vm_pages_needed) { 1445 /* 1446 * Still not done, take a second pass without waiting 1447 * (unlimited dirty cleaning), otherwise sleep a bit 1448 * and try again. 1449 */ 1450 ++pass; 1451 if (pass > 1) 1452 tsleep(&vm_pages_needed, PVM, 1453 "psleep", hz/2); 1454 } else { 1455 /* 1456 * Good enough, sleep & handle stats. Prime the pass 1457 * for the next run. 1458 */ 1459 if (pass > 1) 1460 pass = 1; 1461 else 1462 pass = 0; 1463 error = tsleep(&vm_pages_needed, PVM, 1464 "psleep", vm_pageout_stats_interval * hz); 1465 if (error && !vm_pages_needed) { 1466 splx(s); 1467 pass = 0; 1468 vm_pageout_page_stats(); 1469 continue; 1470 } 1471 } 1472 1473 if (vm_pages_needed) 1474 cnt.v_pdwakeups++; 1475 splx(s); 1476 vm_pageout_scan(pass); 1477 vm_pageout_deficit = 0; 1478 } 1479} 1480 1481void 1482pagedaemon_wakeup() 1483{ 1484 if (!vm_pages_needed && curthread->td_proc != pageproc) { 1485 vm_pages_needed++; 1486 wakeup(&vm_pages_needed); 1487 } 1488} 1489 1490#if !defined(NO_SWAPPING) 1491static void 1492vm_req_vmdaemon() 1493{ 1494 static int lastrun = 0; 1495 1496 if ((ticks > (lastrun + hz)) || (ticks < lastrun)) { 1497 wakeup(&vm_daemon_needed); 1498 lastrun = ticks; 1499 } 1500} 1501 1502static void 1503vm_daemon() 1504{ 1505 struct proc *p; 1506 int breakout; 1507 struct thread *td; 1508 1509 mtx_lock(&Giant); 1510 while (TRUE) { 1511 tsleep(&vm_daemon_needed, PPAUSE, "psleep", 0); 1512 if (vm_pageout_req_swapout) { 1513 swapout_procs(vm_pageout_req_swapout); 1514 vm_pageout_req_swapout = 0; 1515 } 1516 /* 1517 * scan the processes for exceeding their rlimits or if 1518 * process is swapped out -- deactivate pages 1519 */ 1520 sx_slock(&allproc_lock); 1521 LIST_FOREACH(p, &allproc, p_list) { 1522 vm_pindex_t limit, size; 1523 1524 /* 1525 * if this is a system process or if we have already 1526 * looked at this process, skip it. 1527 */ 1528 if (p->p_flag & (P_SYSTEM | P_WEXIT)) { 1529 continue; 1530 } 1531 /* 1532 * if the process is in a non-running type state, 1533 * don't touch it. 1534 */ 1535 mtx_lock_spin(&sched_lock); 1536 breakout = 0; 1537 FOREACH_THREAD_IN_PROC(p, td) { 1538 if (!TD_ON_RUNQ(td) && 1539 !TD_IS_RUNNING(td) && 1540 !TD_IS_SLEEPING(td)) { 1541 breakout = 1; 1542 break; 1543 } 1544 } 1545 if (breakout) { 1546 mtx_unlock_spin(&sched_lock); 1547 continue; 1548 } 1549 /* 1550 * get a limit 1551 */ 1552 limit = OFF_TO_IDX( 1553 qmin(p->p_rlimit[RLIMIT_RSS].rlim_cur, 1554 p->p_rlimit[RLIMIT_RSS].rlim_max)); 1555 1556 /* 1557 * let processes that are swapped out really be 1558 * swapped out set the limit to nothing (will force a 1559 * swap-out.) 1560 */ 1561 if ((p->p_sflag & PS_INMEM) == 0) 1562 limit = 0; /* XXX */ 1563 mtx_unlock_spin(&sched_lock); 1564 1565 size = vmspace_resident_count(p->p_vmspace); 1566 if (limit >= 0 && size >= limit) { 1567 vm_pageout_map_deactivate_pages( 1568 &p->p_vmspace->vm_map, limit); 1569 } 1570 } 1571 sx_sunlock(&allproc_lock); 1572 } 1573} 1574#endif /* !defined(NO_SWAPPING) */ 1575