vm_pageout.c revision 69847
1/* 2 * Copyright (c) 1991 Regents of the University of California. 3 * All rights reserved. 4 * Copyright (c) 1994 John S. Dyson 5 * All rights reserved. 6 * Copyright (c) 1994 David Greenman 7 * All rights reserved. 8 * 9 * This code is derived from software contributed to Berkeley by 10 * The Mach Operating System project at Carnegie-Mellon University. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. All advertising materials mentioning features or use of this software 21 * must display the following acknowledgement: 22 * This product includes software developed by the University of 23 * California, Berkeley and its contributors. 24 * 4. Neither the name of the University nor the names of its contributors 25 * may be used to endorse or promote products derived from this software 26 * without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 38 * SUCH DAMAGE. 39 * 40 * from: @(#)vm_pageout.c 7.4 (Berkeley) 5/7/91 41 * 42 * 43 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 44 * All rights reserved. 45 * 46 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 47 * 48 * Permission to use, copy, modify and distribute this software and 49 * its documentation is hereby granted, provided that both the copyright 50 * notice and this permission notice appear in all copies of the 51 * software, derivative works or modified versions, and any portions 52 * thereof, and that both notices appear in supporting documentation. 53 * 54 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 55 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 56 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 57 * 58 * Carnegie Mellon requests users of this software to return to 59 * 60 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 61 * School of Computer Science 62 * Carnegie Mellon University 63 * Pittsburgh PA 15213-3890 64 * 65 * any improvements or extensions that they make and grant Carnegie the 66 * rights to redistribute these changes. 67 * 68 * $FreeBSD: head/sys/vm/vm_pageout.c 69847 2000-12-11 07:52:47Z dillon $ 69 */ 70 71/* 72 * The proverbial page-out daemon. 73 */ 74 75#include "opt_vm.h" 76#include <sys/param.h> 77#include <sys/systm.h> 78#include <sys/kernel.h> 79#include <sys/proc.h> 80#include <sys/kthread.h> 81#include <sys/ktr.h> 82#include <sys/resourcevar.h> 83#include <sys/signalvar.h> 84#include <sys/vnode.h> 85#include <sys/vmmeter.h> 86#include <sys/sysctl.h> 87 88#include <vm/vm.h> 89#include <vm/vm_param.h> 90#include <sys/lock.h> 91#include <vm/vm_object.h> 92#include <vm/vm_page.h> 93#include <vm/vm_map.h> 94#include <vm/vm_pageout.h> 95#include <vm/vm_pager.h> 96#include <vm/swap_pager.h> 97#include <vm/vm_extern.h> 98 99#include <machine/mutex.h> 100 101/* 102 * System initialization 103 */ 104 105/* the kernel process "vm_pageout"*/ 106static void vm_pageout __P((void)); 107static int vm_pageout_clean __P((vm_page_t)); 108static int vm_pageout_scan __P((void)); 109static int vm_pageout_free_page_calc __P((vm_size_t count)); 110struct proc *pageproc; 111 112static struct kproc_desc page_kp = { 113 "pagedaemon", 114 vm_pageout, 115 &pageproc 116}; 117SYSINIT(pagedaemon, SI_SUB_KTHREAD_PAGE, SI_ORDER_FIRST, kproc_start, &page_kp) 118 119#if !defined(NO_SWAPPING) 120/* the kernel process "vm_daemon"*/ 121static void vm_daemon __P((void)); 122static struct proc *vmproc; 123 124static struct kproc_desc vm_kp = { 125 "vmdaemon", 126 vm_daemon, 127 &vmproc 128}; 129SYSINIT(vmdaemon, SI_SUB_KTHREAD_VM, SI_ORDER_FIRST, kproc_start, &vm_kp) 130#endif 131 132 133int vm_pages_needed=0; /* Event on which pageout daemon sleeps */ 134int vm_pageout_deficit=0; /* Estimated number of pages deficit */ 135int vm_pageout_pages_needed=0; /* flag saying that the pageout daemon needs pages */ 136 137#if !defined(NO_SWAPPING) 138static int vm_pageout_req_swapout; /* XXX */ 139static int vm_daemon_needed; 140#endif 141extern int vm_swap_size; 142static int vm_pageout_stats_max=0, vm_pageout_stats_interval = 0; 143static int vm_pageout_full_stats_interval = 0; 144static int vm_pageout_stats_free_max=0, vm_pageout_algorithm_lru=0; 145static int defer_swap_pageouts=0; 146static int disable_swap_pageouts=0; 147 148static int max_page_launder=100; 149static int vm_pageout_actcmp=0; 150#if defined(NO_SWAPPING) 151static int vm_swap_enabled=0; 152static int vm_swap_idle_enabled=0; 153#else 154static int vm_swap_enabled=1; 155static int vm_swap_idle_enabled=0; 156#endif 157 158SYSCTL_INT(_vm, VM_PAGEOUT_ALGORITHM, pageout_algorithm, 159 CTLFLAG_RW, &vm_pageout_algorithm_lru, 0, "LRU page mgmt"); 160 161SYSCTL_INT(_vm, OID_AUTO, pageout_stats_max, 162 CTLFLAG_RW, &vm_pageout_stats_max, 0, "Max pageout stats scan length"); 163 164SYSCTL_INT(_vm, OID_AUTO, pageout_full_stats_interval, 165 CTLFLAG_RW, &vm_pageout_full_stats_interval, 0, "Interval for full stats scan"); 166 167SYSCTL_INT(_vm, OID_AUTO, pageout_stats_interval, 168 CTLFLAG_RW, &vm_pageout_stats_interval, 0, "Interval for partial stats scan"); 169 170SYSCTL_INT(_vm, OID_AUTO, pageout_stats_free_max, 171 CTLFLAG_RW, &vm_pageout_stats_free_max, 0, "Not implemented"); 172 173#if defined(NO_SWAPPING) 174SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled, 175 CTLFLAG_RD, &vm_swap_enabled, 0, ""); 176SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled, 177 CTLFLAG_RD, &vm_swap_idle_enabled, 0, ""); 178#else 179SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled, 180 CTLFLAG_RW, &vm_swap_enabled, 0, "Enable entire process swapout"); 181SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled, 182 CTLFLAG_RW, &vm_swap_idle_enabled, 0, "Allow swapout on idle criteria"); 183#endif 184 185SYSCTL_INT(_vm, OID_AUTO, defer_swapspace_pageouts, 186 CTLFLAG_RW, &defer_swap_pageouts, 0, "Give preference to dirty pages in mem"); 187 188SYSCTL_INT(_vm, OID_AUTO, disable_swapspace_pageouts, 189 CTLFLAG_RW, &disable_swap_pageouts, 0, "Disallow swapout of dirty pages"); 190 191SYSCTL_INT(_vm, OID_AUTO, max_page_launder, 192 CTLFLAG_RW, &max_page_launder, 0, "Maximum number of pages to clean per pass"); 193SYSCTL_INT(_vm, OID_AUTO, vm_pageout_actcmp, 194 CTLFLAG_RD, &vm_pageout_actcmp, 0, "pagedaemon agressiveness"); 195 196 197#define VM_PAGEOUT_PAGE_COUNT 16 198int vm_pageout_page_count = VM_PAGEOUT_PAGE_COUNT; 199 200int vm_page_max_wired; /* XXX max # of wired pages system-wide */ 201 202#if !defined(NO_SWAPPING) 203typedef void freeer_fcn_t __P((vm_map_t, vm_object_t, vm_pindex_t, int)); 204static void vm_pageout_map_deactivate_pages __P((vm_map_t, vm_pindex_t)); 205static freeer_fcn_t vm_pageout_object_deactivate_pages; 206static void vm_req_vmdaemon __P((void)); 207#endif 208static void vm_pageout_page_stats(void); 209 210/* 211 * vm_pageout_clean: 212 * 213 * Clean the page and remove it from the laundry. 214 * 215 * We set the busy bit to cause potential page faults on this page to 216 * block. Note the careful timing, however, the busy bit isn't set till 217 * late and we cannot do anything that will mess with the page. 218 */ 219 220static int 221vm_pageout_clean(m) 222 vm_page_t m; 223{ 224 register vm_object_t object; 225 vm_page_t mc[2*vm_pageout_page_count]; 226 int pageout_count; 227 int ib, is, page_base; 228 vm_pindex_t pindex = m->pindex; 229 230 object = m->object; 231 232 /* 233 * It doesn't cost us anything to pageout OBJT_DEFAULT or OBJT_SWAP 234 * with the new swapper, but we could have serious problems paging 235 * out other object types if there is insufficient memory. 236 * 237 * Unfortunately, checking free memory here is far too late, so the 238 * check has been moved up a procedural level. 239 */ 240 241 /* 242 * Don't mess with the page if it's busy, held, or special 243 */ 244 if ((m->hold_count != 0) || 245 ((m->busy != 0) || (m->flags & (PG_BUSY|PG_UNMANAGED)))) { 246 return 0; 247 } 248 249 mc[vm_pageout_page_count] = m; 250 pageout_count = 1; 251 page_base = vm_pageout_page_count; 252 ib = 1; 253 is = 1; 254 255 /* 256 * Scan object for clusterable pages. 257 * 258 * We can cluster ONLY if: ->> the page is NOT 259 * clean, wired, busy, held, or mapped into a 260 * buffer, and one of the following: 261 * 1) The page is inactive, or a seldom used 262 * active page. 263 * -or- 264 * 2) we force the issue. 265 * 266 * During heavy mmap/modification loads the pageout 267 * daemon can really fragment the underlying file 268 * due to flushing pages out of order and not trying 269 * align the clusters (which leave sporatic out-of-order 270 * holes). To solve this problem we do the reverse scan 271 * first and attempt to align our cluster, then do a 272 * forward scan if room remains. 273 */ 274 275more: 276 while (ib && pageout_count < vm_pageout_page_count) { 277 vm_page_t p; 278 279 if (ib > pindex) { 280 ib = 0; 281 break; 282 } 283 284 if ((p = vm_page_lookup(object, pindex - ib)) == NULL) { 285 ib = 0; 286 break; 287 } 288 if (((p->queue - p->pc) == PQ_CACHE) || 289 (p->flags & (PG_BUSY|PG_UNMANAGED)) || p->busy) { 290 ib = 0; 291 break; 292 } 293 vm_page_test_dirty(p); 294 if ((p->dirty & p->valid) == 0 || 295 p->queue != PQ_INACTIVE || 296 p->wire_count != 0 || 297 p->hold_count != 0) { 298 ib = 0; 299 break; 300 } 301 mc[--page_base] = p; 302 ++pageout_count; 303 ++ib; 304 /* 305 * alignment boundry, stop here and switch directions. Do 306 * not clear ib. 307 */ 308 if ((pindex - (ib - 1)) % vm_pageout_page_count == 0) 309 break; 310 } 311 312 while (pageout_count < vm_pageout_page_count && 313 pindex + is < object->size) { 314 vm_page_t p; 315 316 if ((p = vm_page_lookup(object, pindex + is)) == NULL) 317 break; 318 if (((p->queue - p->pc) == PQ_CACHE) || 319 (p->flags & (PG_BUSY|PG_UNMANAGED)) || p->busy) { 320 break; 321 } 322 vm_page_test_dirty(p); 323 if ((p->dirty & p->valid) == 0 || 324 p->queue != PQ_INACTIVE || 325 p->wire_count != 0 || 326 p->hold_count != 0) { 327 break; 328 } 329 mc[page_base + pageout_count] = p; 330 ++pageout_count; 331 ++is; 332 } 333 334 /* 335 * If we exhausted our forward scan, continue with the reverse scan 336 * when possible, even past a page boundry. This catches boundry 337 * conditions. 338 */ 339 if (ib && pageout_count < vm_pageout_page_count) 340 goto more; 341 342 /* 343 * we allow reads during pageouts... 344 */ 345 return vm_pageout_flush(&mc[page_base], pageout_count, 0); 346} 347 348/* 349 * vm_pageout_flush() - launder the given pages 350 * 351 * The given pages are laundered. Note that we setup for the start of 352 * I/O ( i.e. busy the page ), mark it read-only, and bump the object 353 * reference count all in here rather then in the parent. If we want 354 * the parent to do more sophisticated things we may have to change 355 * the ordering. 356 */ 357 358int 359vm_pageout_flush(mc, count, flags) 360 vm_page_t *mc; 361 int count; 362 int flags; 363{ 364 register vm_object_t object; 365 int pageout_status[count]; 366 int numpagedout = 0; 367 int i; 368 369 /* 370 * Initiate I/O. Bump the vm_page_t->busy counter and 371 * mark the pages read-only. 372 * 373 * We do not have to fixup the clean/dirty bits here... we can 374 * allow the pager to do it after the I/O completes. 375 * 376 * NOTE! mc[i]->dirty may be partial or fragmented due to an 377 * edge case with file fragments. 378 */ 379 380 for (i = 0; i < count; i++) { 381 KASSERT(mc[i]->valid == VM_PAGE_BITS_ALL, ("vm_pageout_flush page %p index %d/%d: partially invalid page", mc[i], i, count)); 382 vm_page_io_start(mc[i]); 383 vm_page_protect(mc[i], VM_PROT_READ); 384 } 385 386 object = mc[0]->object; 387 vm_object_pip_add(object, count); 388 389 vm_pager_put_pages(object, mc, count, 390 (flags | ((object == kernel_object) ? OBJPC_SYNC : 0)), 391 pageout_status); 392 393 for (i = 0; i < count; i++) { 394 vm_page_t mt = mc[i]; 395 396 switch (pageout_status[i]) { 397 case VM_PAGER_OK: 398 numpagedout++; 399 break; 400 case VM_PAGER_PEND: 401 numpagedout++; 402 break; 403 case VM_PAGER_BAD: 404 /* 405 * Page outside of range of object. Right now we 406 * essentially lose the changes by pretending it 407 * worked. 408 */ 409 pmap_clear_modify(mt); 410 vm_page_undirty(mt); 411 break; 412 case VM_PAGER_ERROR: 413 case VM_PAGER_FAIL: 414 /* 415 * If page couldn't be paged out, then reactivate the 416 * page so it doesn't clog the inactive list. (We 417 * will try paging out it again later). 418 */ 419 vm_page_activate(mt); 420 break; 421 case VM_PAGER_AGAIN: 422 break; 423 } 424 425 /* 426 * If the operation is still going, leave the page busy to 427 * block all other accesses. Also, leave the paging in 428 * progress indicator set so that we don't attempt an object 429 * collapse. 430 */ 431 if (pageout_status[i] != VM_PAGER_PEND) { 432 vm_object_pip_wakeup(object); 433 vm_page_io_finish(mt); 434 if (!vm_page_count_severe() || !vm_page_try_to_cache(mt)) 435 vm_page_protect(mt, VM_PROT_READ); 436 } 437 } 438 return numpagedout; 439} 440 441#if !defined(NO_SWAPPING) 442/* 443 * vm_pageout_object_deactivate_pages 444 * 445 * deactivate enough pages to satisfy the inactive target 446 * requirements or if vm_page_proc_limit is set, then 447 * deactivate all of the pages in the object and its 448 * backing_objects. 449 * 450 * The object and map must be locked. 451 */ 452static void 453vm_pageout_object_deactivate_pages(map, object, desired, map_remove_only) 454 vm_map_t map; 455 vm_object_t object; 456 vm_pindex_t desired; 457 int map_remove_only; 458{ 459 register vm_page_t p, next; 460 int rcount; 461 int remove_mode; 462 int s; 463 464 if (object->type == OBJT_DEVICE || object->type == OBJT_PHYS) 465 return; 466 467 while (object) { 468 if (pmap_resident_count(vm_map_pmap(map)) <= desired) 469 return; 470 if (object->paging_in_progress) 471 return; 472 473 remove_mode = map_remove_only; 474 if (object->shadow_count > 1) 475 remove_mode = 1; 476 /* 477 * scan the objects entire memory queue 478 */ 479 rcount = object->resident_page_count; 480 p = TAILQ_FIRST(&object->memq); 481 while (p && (rcount-- > 0)) { 482 int actcount; 483 if (pmap_resident_count(vm_map_pmap(map)) <= desired) 484 return; 485 next = TAILQ_NEXT(p, listq); 486 cnt.v_pdpages++; 487 if (p->wire_count != 0 || 488 p->hold_count != 0 || 489 p->busy != 0 || 490 (p->flags & (PG_BUSY|PG_UNMANAGED)) || 491 !pmap_page_exists(vm_map_pmap(map), p)) { 492 p = next; 493 continue; 494 } 495 496 actcount = pmap_ts_referenced(p); 497 if (actcount) { 498 vm_page_flag_set(p, PG_REFERENCED); 499 } else if (p->flags & PG_REFERENCED) { 500 actcount = 1; 501 } 502 503 if ((p->queue != PQ_ACTIVE) && 504 (p->flags & PG_REFERENCED)) { 505 vm_page_activate(p); 506 p->act_count += actcount; 507 vm_page_flag_clear(p, PG_REFERENCED); 508 } else if (p->queue == PQ_ACTIVE) { 509 if ((p->flags & PG_REFERENCED) == 0) { 510 p->act_count -= min(p->act_count, ACT_DECLINE); 511 if (!remove_mode && (vm_pageout_algorithm_lru || (p->act_count == 0))) { 512 vm_page_protect(p, VM_PROT_NONE); 513 vm_page_deactivate(p); 514 } else { 515 s = splvm(); 516 TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE].pl, p, pageq); 517 TAILQ_INSERT_TAIL(&vm_page_queues[PQ_ACTIVE].pl, p, pageq); 518 splx(s); 519 } 520 } else { 521 vm_page_activate(p); 522 vm_page_flag_clear(p, PG_REFERENCED); 523 if (p->act_count < (ACT_MAX - ACT_ADVANCE)) 524 p->act_count += ACT_ADVANCE; 525 s = splvm(); 526 TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE].pl, p, pageq); 527 TAILQ_INSERT_TAIL(&vm_page_queues[PQ_ACTIVE].pl, p, pageq); 528 splx(s); 529 } 530 } else if (p->queue == PQ_INACTIVE) { 531 vm_page_protect(p, VM_PROT_NONE); 532 } 533 p = next; 534 } 535 object = object->backing_object; 536 } 537 return; 538} 539 540/* 541 * deactivate some number of pages in a map, try to do it fairly, but 542 * that is really hard to do. 543 */ 544static void 545vm_pageout_map_deactivate_pages(map, desired) 546 vm_map_t map; 547 vm_pindex_t desired; 548{ 549 vm_map_entry_t tmpe; 550 vm_object_t obj, bigobj; 551 552 if (lockmgr(&map->lock, LK_EXCLUSIVE | LK_NOWAIT, (void *)0, curproc)) { 553 return; 554 } 555 556 bigobj = NULL; 557 558 /* 559 * first, search out the biggest object, and try to free pages from 560 * that. 561 */ 562 tmpe = map->header.next; 563 while (tmpe != &map->header) { 564 if ((tmpe->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { 565 obj = tmpe->object.vm_object; 566 if ((obj != NULL) && (obj->shadow_count <= 1) && 567 ((bigobj == NULL) || 568 (bigobj->resident_page_count < obj->resident_page_count))) { 569 bigobj = obj; 570 } 571 } 572 tmpe = tmpe->next; 573 } 574 575 if (bigobj) 576 vm_pageout_object_deactivate_pages(map, bigobj, desired, 0); 577 578 /* 579 * Next, hunt around for other pages to deactivate. We actually 580 * do this search sort of wrong -- .text first is not the best idea. 581 */ 582 tmpe = map->header.next; 583 while (tmpe != &map->header) { 584 if (pmap_resident_count(vm_map_pmap(map)) <= desired) 585 break; 586 if ((tmpe->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { 587 obj = tmpe->object.vm_object; 588 if (obj) 589 vm_pageout_object_deactivate_pages(map, obj, desired, 0); 590 } 591 tmpe = tmpe->next; 592 }; 593 594 /* 595 * Remove all mappings if a process is swapped out, this will free page 596 * table pages. 597 */ 598 if (desired == 0) 599 pmap_remove(vm_map_pmap(map), 600 VM_MIN_ADDRESS, VM_MAXUSER_ADDRESS); 601 vm_map_unlock(map); 602 return; 603} 604#endif 605 606/* 607 * Don't try to be fancy - being fancy can lead to VOP_LOCK's and therefore 608 * to vnode deadlocks. We only do it for OBJT_DEFAULT and OBJT_SWAP objects 609 * which we know can be trivially freed. 610 */ 611 612void 613vm_pageout_page_free(vm_page_t m) { 614 vm_object_t object = m->object; 615 int type = object->type; 616 617 if (type == OBJT_SWAP || type == OBJT_DEFAULT) 618 vm_object_reference(object); 619 vm_page_busy(m); 620 vm_page_protect(m, VM_PROT_NONE); 621 vm_page_free(m); 622 if (type == OBJT_SWAP || type == OBJT_DEFAULT) 623 vm_object_deallocate(object); 624} 625 626/* 627 * vm_pageout_scan does the dirty work for the pageout daemon. 628 */ 629static int 630vm_pageout_scan() 631{ 632 vm_page_t m, next; 633 struct vm_page marker; 634 int page_shortage, maxscan, pcount; 635 int addl_page_shortage, addl_page_shortage_init; 636 int maxlaunder; 637 struct proc *p, *bigproc; 638 vm_offset_t size, bigsize; 639 vm_object_t object; 640 int force_wakeup = 0; 641 int actcount; 642 int vnodes_skipped = 0; 643 int s; 644 645 /* 646 * Do whatever cleanup that the pmap code can. 647 */ 648 pmap_collect(); 649 650 addl_page_shortage_init = vm_pageout_deficit; 651 vm_pageout_deficit = 0; 652 653 if (max_page_launder == 0) 654 max_page_launder = 1; 655 656 /* 657 * Calculate the number of pages we want to either free or move 658 * to the cache. Be more agressive if we aren't making our target. 659 */ 660 661 page_shortage = vm_paging_target() + 662 addl_page_shortage_init + vm_pageout_actcmp; 663 664 /* 665 * Figure out how agressively we should flush dirty pages. 666 */ 667 { 668 int factor = vm_pageout_actcmp; 669 670 maxlaunder = cnt.v_inactive_target / 3 + factor; 671 if (maxlaunder > max_page_launder + factor) 672 maxlaunder = max_page_launder + factor; 673 } 674 675 /* 676 * Initialize our marker 677 */ 678 bzero(&marker, sizeof(marker)); 679 marker.flags = PG_BUSY | PG_FICTITIOUS | PG_MARKER; 680 marker.queue = PQ_INACTIVE; 681 marker.wire_count = 1; 682 683 /* 684 * Start scanning the inactive queue for pages we can move to the 685 * cache or free. The scan will stop when the target is reached or 686 * we have scanned the entire inactive queue. Note that m->act_count 687 * is not used to form decisions for the inactive queue, only for the 688 * active queue. 689 */ 690 691rescan0: 692 addl_page_shortage = addl_page_shortage_init; 693 maxscan = cnt.v_inactive_count; 694 for (m = TAILQ_FIRST(&vm_page_queues[PQ_INACTIVE].pl); 695 m != NULL && maxscan-- > 0 && page_shortage > 0; 696 m = next) { 697 698 cnt.v_pdpages++; 699 700 if (m->queue != PQ_INACTIVE) { 701 goto rescan0; 702 } 703 704 next = TAILQ_NEXT(m, pageq); 705 706 /* 707 * skip marker pages 708 */ 709 if (m->flags & PG_MARKER) 710 continue; 711 712 if (m->hold_count) { 713 s = splvm(); 714 TAILQ_REMOVE(&vm_page_queues[PQ_INACTIVE].pl, m, pageq); 715 TAILQ_INSERT_TAIL(&vm_page_queues[PQ_INACTIVE].pl, m, pageq); 716 splx(s); 717 addl_page_shortage++; 718 continue; 719 } 720 /* 721 * Dont mess with busy pages, keep in the front of the 722 * queue, most likely are being paged out. 723 */ 724 if (m->busy || (m->flags & PG_BUSY)) { 725 addl_page_shortage++; 726 continue; 727 } 728 729 /* 730 * If the object is not being used, we ignore previous 731 * references. 732 */ 733 if (m->object->ref_count == 0) { 734 vm_page_flag_clear(m, PG_REFERENCED); 735 pmap_clear_reference(m); 736 737 /* 738 * Otherwise, if the page has been referenced while in the 739 * inactive queue, we bump the "activation count" upwards, 740 * making it less likely that the page will be added back to 741 * the inactive queue prematurely again. Here we check the 742 * page tables (or emulated bits, if any), given the upper 743 * level VM system not knowing anything about existing 744 * references. 745 */ 746 } else if (((m->flags & PG_REFERENCED) == 0) && 747 (actcount = pmap_ts_referenced(m))) { 748 vm_page_activate(m); 749 m->act_count += (actcount + ACT_ADVANCE); 750 continue; 751 } 752 753 /* 754 * If the upper level VM system knows about any page 755 * references, we activate the page. We also set the 756 * "activation count" higher than normal so that we will less 757 * likely place pages back onto the inactive queue again. 758 */ 759 if ((m->flags & PG_REFERENCED) != 0) { 760 vm_page_flag_clear(m, PG_REFERENCED); 761 actcount = pmap_ts_referenced(m); 762 vm_page_activate(m); 763 m->act_count += (actcount + ACT_ADVANCE + 1); 764 continue; 765 } 766 767 /* 768 * If the upper level VM system doesn't know anything about 769 * the page being dirty, we have to check for it again. As 770 * far as the VM code knows, any partially dirty pages are 771 * fully dirty. 772 */ 773 if (m->dirty == 0) { 774 vm_page_test_dirty(m); 775 } else { 776 vm_page_dirty(m); 777 } 778 779 /* 780 * Invalid pages can be easily freed 781 */ 782 if (m->valid == 0) { 783 vm_pageout_page_free(m); 784 cnt.v_dfree++; 785 --page_shortage; 786 787 /* 788 * Clean pages can be placed onto the cache queue. This 789 * effectively frees them. 790 */ 791 } else if (m->dirty == 0) { 792 vm_page_cache(m); 793 --page_shortage; 794 795 /* 796 * Dirty pages need to be paged out. Note that we clean 797 * only a limited number of pages per pagedaemon pass. 798 */ 799 } else if (maxlaunder > 0) { 800 int swap_pageouts_ok; 801 struct vnode *vp = NULL; 802 struct mount *mp; 803 804 object = m->object; 805 806 if ((object->type != OBJT_SWAP) && (object->type != OBJT_DEFAULT)) { 807 swap_pageouts_ok = 1; 808 } else { 809 swap_pageouts_ok = !(defer_swap_pageouts || disable_swap_pageouts); 810 swap_pageouts_ok |= (!disable_swap_pageouts && defer_swap_pageouts && 811 vm_page_count_min()); 812 813 } 814 815 /* 816 * We don't bother paging objects that are "dead". 817 * Those objects are in a "rundown" state. 818 */ 819 if (!swap_pageouts_ok || (object->flags & OBJ_DEAD)) { 820 s = splvm(); 821 TAILQ_REMOVE(&vm_page_queues[PQ_INACTIVE].pl, m, pageq); 822 TAILQ_INSERT_TAIL(&vm_page_queues[PQ_INACTIVE].pl, m, pageq); 823 splx(s); 824 continue; 825 } 826 827 /* 828 * Presumably we have sufficient free memory to do 829 * the more sophisticated checks and locking required 830 * for vnodes. 831 * 832 * The object is already known NOT to be dead. The 833 * vget() may still block, though, because 834 * VOP_ISLOCKED() doesn't check to see if an inode 835 * (v_data) is associated with the vnode. If it isn't, 836 * vget() will load in it from disk. Worse, vget() 837 * may actually get stuck waiting on "inode" if another 838 * process is in the process of bringing the inode in. 839 * This is bad news for us either way. 840 * 841 * So for the moment we check v_data == NULL as a 842 * workaround. This means that vnodes which do not 843 * use v_data in the way we expect probably will not 844 * wind up being paged out by the pager and it will be 845 * up to the syncer to get them. That's better then 846 * us blocking here. 847 * 848 * This whole code section is bogus - we need to fix 849 * the vnode pager to handle vm_page_t's without us 850 * having to do any sophisticated VOP tests. 851 */ 852 853 if (object->type == OBJT_VNODE) { 854 vp = object->handle; 855 856 mp = NULL; 857 if (vp->v_type == VREG) 858 vn_start_write(vp, &mp, V_NOWAIT); 859 if (VOP_ISLOCKED(vp, NULL) || 860 vp->v_data == NULL || 861 vget(vp, LK_EXCLUSIVE|LK_NOOBJ, curproc)) { 862 vn_finished_write(mp); 863 if ((m->queue == PQ_INACTIVE) && 864 (m->hold_count == 0) && 865 (m->busy == 0) && 866 (m->flags & PG_BUSY) == 0) { 867 s = splvm(); 868 TAILQ_REMOVE(&vm_page_queues[PQ_INACTIVE].pl, m, pageq); 869 TAILQ_INSERT_TAIL(&vm_page_queues[PQ_INACTIVE].pl, m, pageq); 870 splx(s); 871 } 872 if (object->flags & OBJ_MIGHTBEDIRTY) 873 vnodes_skipped++; 874 continue; 875 } 876 877 /* 878 * The page might have been moved to another 879 * queue during potential blocking in vget() 880 * above. The page might have been freed and 881 * reused for another vnode. The object might 882 * have been reused for another vnode. 883 */ 884 if (m->queue != PQ_INACTIVE || 885 m->object != object || 886 object->handle != vp) { 887 if (object->flags & OBJ_MIGHTBEDIRTY) 888 vnodes_skipped++; 889 vput(vp); 890 vn_finished_write(mp); 891 continue; 892 } 893 894 /* 895 * The page may have been busied during the 896 * blocking in vput(); We don't move the 897 * page back onto the end of the queue so that 898 * statistics are more correct if we don't. 899 */ 900 if (m->busy || (m->flags & PG_BUSY)) { 901 vput(vp); 902 vn_finished_write(mp); 903 continue; 904 } 905 906 /* 907 * If the page has become held, then skip it 908 */ 909 if (m->hold_count) { 910 s = splvm(); 911 TAILQ_REMOVE(&vm_page_queues[PQ_INACTIVE].pl, m, pageq); 912 TAILQ_INSERT_TAIL(&vm_page_queues[PQ_INACTIVE].pl, m, pageq); 913 splx(s); 914 if (object->flags & OBJ_MIGHTBEDIRTY) 915 vnodes_skipped++; 916 vput(vp); 917 vn_finished_write(mp); 918 continue; 919 } 920 } 921 922 /* 923 * If a page is dirty, then it is either being washed 924 * (but not yet cleaned) or it is still in the 925 * laundry. If it is still in the laundry, then we 926 * start the cleaning operation. maxlaunder nominally 927 * counts I/O cost (seeks) rather then bytes. 928 * 929 * This operation may cluster, invalidating the 'next' 930 * pointer. To prevent an inordinate number of 931 * restarts we use our marker to remember our place. 932 */ 933 s = splvm(); 934 TAILQ_INSERT_AFTER(&vm_page_queues[PQ_INACTIVE].pl, m, &marker, pageq); 935 splx(s); 936 if (vm_pageout_clean(m) != 0) 937 --maxlaunder; 938 s = splvm(); 939 next = TAILQ_NEXT(&marker, pageq); 940 TAILQ_REMOVE(&vm_page_queues[PQ_INACTIVE].pl, &marker, pageq); 941 splx(s); 942 if (vp) { 943 vput(vp); 944 vn_finished_write(mp); 945 } 946 } 947 } 948 949 /* 950 * If we were not able to meet our target, increase actcmp 951 */ 952 953 if (vm_page_count_min()) { 954 if (vm_pageout_actcmp < ACT_MAX / 2) 955 vm_pageout_actcmp += ACT_ADVANCE; 956 } else { 957 if (vm_pageout_actcmp < ACT_DECLINE) 958 vm_pageout_actcmp = 0; 959 else 960 vm_pageout_actcmp -= ACT_DECLINE; 961 } 962 963 /* 964 * Compute the number of pages we want to try to move from the 965 * active queue to the inactive queue. 966 */ 967 968 page_shortage = vm_paging_target() + 969 cnt.v_inactive_target - cnt.v_inactive_count; 970 page_shortage += addl_page_shortage; 971 page_shortage += vm_pageout_actcmp; 972 973 /* 974 * Scan the active queue for things we can deactivate. We nominally 975 * track the per-page activity counter and use it to locate 976 * deactivation candidates. 977 */ 978 979 pcount = cnt.v_active_count; 980 m = TAILQ_FIRST(&vm_page_queues[PQ_ACTIVE].pl); 981 982 while ((m != NULL) && (pcount-- > 0) && (page_shortage > 0)) { 983 984 /* 985 * This is a consistency check, and should likely be a panic 986 * or warning. 987 */ 988 if (m->queue != PQ_ACTIVE) { 989 break; 990 } 991 992 next = TAILQ_NEXT(m, pageq); 993 /* 994 * Don't deactivate pages that are busy. 995 */ 996 if ((m->busy != 0) || 997 (m->flags & PG_BUSY) || 998 (m->hold_count != 0)) { 999 s = splvm(); 1000 TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE].pl, m, pageq); 1001 TAILQ_INSERT_TAIL(&vm_page_queues[PQ_ACTIVE].pl, m, pageq); 1002 splx(s); 1003 m = next; 1004 continue; 1005 } 1006 1007 /* 1008 * The count for pagedaemon pages is done after checking the 1009 * page for eligibility... 1010 */ 1011 cnt.v_pdpages++; 1012 1013 /* 1014 * Check to see "how much" the page has been used. 1015 */ 1016 actcount = 0; 1017 if (m->object->ref_count != 0) { 1018 if (m->flags & PG_REFERENCED) { 1019 actcount += 1; 1020 } 1021 actcount += pmap_ts_referenced(m); 1022 if (actcount) { 1023 m->act_count += ACT_ADVANCE + actcount; 1024 if (m->act_count > ACT_MAX) 1025 m->act_count = ACT_MAX; 1026 } 1027 } 1028 1029 /* 1030 * Since we have "tested" this bit, we need to clear it now. 1031 */ 1032 vm_page_flag_clear(m, PG_REFERENCED); 1033 1034 /* 1035 * Only if an object is currently being used, do we use the 1036 * page activation count stats. 1037 */ 1038 if (actcount && (m->object->ref_count != 0)) { 1039 s = splvm(); 1040 TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE].pl, m, pageq); 1041 TAILQ_INSERT_TAIL(&vm_page_queues[PQ_ACTIVE].pl, m, pageq); 1042 splx(s); 1043 } else { 1044 m->act_count -= min(m->act_count, ACT_DECLINE); 1045 if (vm_pageout_algorithm_lru || 1046 (m->object->ref_count == 0) || 1047 (m->act_count <= vm_pageout_actcmp)) { 1048 page_shortage--; 1049 if (m->object->ref_count == 0) { 1050 vm_page_protect(m, VM_PROT_NONE); 1051 if (m->dirty == 0) 1052 vm_page_cache(m); 1053 else 1054 vm_page_deactivate(m); 1055 } else { 1056 vm_page_deactivate(m); 1057 } 1058 } else { 1059 s = splvm(); 1060 TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE].pl, m, pageq); 1061 TAILQ_INSERT_TAIL(&vm_page_queues[PQ_ACTIVE].pl, m, pageq); 1062 splx(s); 1063 } 1064 } 1065 m = next; 1066 } 1067 1068 s = splvm(); 1069 1070 /* 1071 * We try to maintain some *really* free pages, this allows interrupt 1072 * code to be guaranteed space. Since both cache and free queues 1073 * are considered basically 'free', moving pages from cache to free 1074 * does not effect other calculations. 1075 */ 1076 1077 while (cnt.v_free_count < cnt.v_free_reserved) { 1078 static int cache_rover = 0; 1079 m = vm_page_list_find(PQ_CACHE, cache_rover, FALSE); 1080 if (!m) 1081 break; 1082 if ((m->flags & (PG_BUSY|PG_UNMANAGED)) || 1083 m->busy || 1084 m->hold_count || 1085 m->wire_count) { 1086#ifdef INVARIANTS 1087 printf("Warning: busy page %p found in cache\n", m); 1088#endif 1089 vm_page_deactivate(m); 1090 continue; 1091 } 1092 cache_rover = (cache_rover + PQ_PRIME2) & PQ_L2_MASK; 1093 vm_pageout_page_free(m); 1094 cnt.v_dfree++; 1095 } 1096 splx(s); 1097 1098#if !defined(NO_SWAPPING) 1099 /* 1100 * Idle process swapout -- run once per second. 1101 */ 1102 if (vm_swap_idle_enabled) { 1103 static long lsec; 1104 if (time_second != lsec) { 1105 vm_pageout_req_swapout |= VM_SWAP_IDLE; 1106 vm_req_vmdaemon(); 1107 lsec = time_second; 1108 } 1109 } 1110#endif 1111 1112 /* 1113 * If we didn't get enough free pages, and we have skipped a vnode 1114 * in a writeable object, wakeup the sync daemon. And kick swapout 1115 * if we did not get enough free pages. 1116 */ 1117 if (vm_paging_target() > 0) { 1118 if (vnodes_skipped && vm_page_count_min()) 1119 (void) speedup_syncer(); 1120#if !defined(NO_SWAPPING) 1121 if (vm_swap_enabled && vm_page_count_target()) { 1122 vm_req_vmdaemon(); 1123 vm_pageout_req_swapout |= VM_SWAP_NORMAL; 1124 } 1125#endif 1126 } 1127 1128 /* 1129 * make sure that we have swap space -- if we are low on memory and 1130 * swap -- then kill the biggest process. 1131 */ 1132 if ((vm_swap_size < 64 || swap_pager_full) && vm_page_count_min()) { 1133 bigproc = NULL; 1134 bigsize = 0; 1135 lockmgr(&allproc_lock, LK_SHARED, NULL, CURPROC); 1136 for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) { 1137 /* 1138 * if this is a system process, skip it 1139 */ 1140 if ((p->p_flag & P_SYSTEM) || (p->p_lock > 0) || 1141 (p->p_pid == 1) || 1142 ((p->p_pid < 48) && (vm_swap_size != 0))) { 1143 continue; 1144 } 1145 /* 1146 * if the process is in a non-running type state, 1147 * don't touch it. 1148 */ 1149 mtx_enter(&sched_lock, MTX_SPIN); 1150 if (p->p_stat != SRUN && p->p_stat != SSLEEP) { 1151 mtx_exit(&sched_lock, MTX_SPIN); 1152 continue; 1153 } 1154 mtx_exit(&sched_lock, MTX_SPIN); 1155 /* 1156 * get the process size 1157 */ 1158 size = vmspace_resident_count(p->p_vmspace); 1159 /* 1160 * if the this process is bigger than the biggest one 1161 * remember it. 1162 */ 1163 if (size > bigsize) { 1164 bigproc = p; 1165 bigsize = size; 1166 } 1167 } 1168 lockmgr(&allproc_lock, LK_RELEASE, NULL, CURPROC); 1169 if (bigproc != NULL) { 1170 killproc(bigproc, "out of swap space"); 1171 bigproc->p_estcpu = 0; 1172 bigproc->p_nice = PRIO_MIN; 1173 resetpriority(bigproc); 1174 wakeup(&cnt.v_free_count); 1175 } 1176 } 1177 return force_wakeup; 1178} 1179 1180/* 1181 * This routine tries to maintain the pseudo LRU active queue, 1182 * so that during long periods of time where there is no paging, 1183 * that some statistic accumulation still occurs. This code 1184 * helps the situation where paging just starts to occur. 1185 */ 1186static void 1187vm_pageout_page_stats() 1188{ 1189 int s; 1190 vm_page_t m,next; 1191 int pcount,tpcount; /* Number of pages to check */ 1192 static int fullintervalcount = 0; 1193 int page_shortage; 1194 int s0; 1195 1196 page_shortage = 1197 (cnt.v_inactive_target + cnt.v_cache_max + cnt.v_free_min) - 1198 (cnt.v_free_count + cnt.v_inactive_count + cnt.v_cache_count); 1199 1200 if (page_shortage <= 0) 1201 return; 1202 1203 s0 = splvm(); 1204 1205 pcount = cnt.v_active_count; 1206 fullintervalcount += vm_pageout_stats_interval; 1207 if (fullintervalcount < vm_pageout_full_stats_interval) { 1208 tpcount = (vm_pageout_stats_max * cnt.v_active_count) / cnt.v_page_count; 1209 if (pcount > tpcount) 1210 pcount = tpcount; 1211 } else { 1212 fullintervalcount = 0; 1213 } 1214 1215 m = TAILQ_FIRST(&vm_page_queues[PQ_ACTIVE].pl); 1216 while ((m != NULL) && (pcount-- > 0)) { 1217 int actcount; 1218 1219 if (m->queue != PQ_ACTIVE) { 1220 break; 1221 } 1222 1223 next = TAILQ_NEXT(m, pageq); 1224 /* 1225 * Don't deactivate pages that are busy. 1226 */ 1227 if ((m->busy != 0) || 1228 (m->flags & PG_BUSY) || 1229 (m->hold_count != 0)) { 1230 s = splvm(); 1231 TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE].pl, m, pageq); 1232 TAILQ_INSERT_TAIL(&vm_page_queues[PQ_ACTIVE].pl, m, pageq); 1233 splx(s); 1234 m = next; 1235 continue; 1236 } 1237 1238 actcount = 0; 1239 if (m->flags & PG_REFERENCED) { 1240 vm_page_flag_clear(m, PG_REFERENCED); 1241 actcount += 1; 1242 } 1243 1244 actcount += pmap_ts_referenced(m); 1245 if (actcount) { 1246 m->act_count += ACT_ADVANCE + actcount; 1247 if (m->act_count > ACT_MAX) 1248 m->act_count = ACT_MAX; 1249 s = splvm(); 1250 TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE].pl, m, pageq); 1251 TAILQ_INSERT_TAIL(&vm_page_queues[PQ_ACTIVE].pl, m, pageq); 1252 splx(s); 1253 } else { 1254 if (m->act_count == 0) { 1255 /* 1256 * We turn off page access, so that we have more accurate 1257 * RSS stats. We don't do this in the normal page deactivation 1258 * when the system is loaded VM wise, because the cost of 1259 * the large number of page protect operations would be higher 1260 * than the value of doing the operation. 1261 */ 1262 vm_page_protect(m, VM_PROT_NONE); 1263 vm_page_deactivate(m); 1264 } else { 1265 m->act_count -= min(m->act_count, ACT_DECLINE); 1266 s = splvm(); 1267 TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE].pl, m, pageq); 1268 TAILQ_INSERT_TAIL(&vm_page_queues[PQ_ACTIVE].pl, m, pageq); 1269 splx(s); 1270 } 1271 } 1272 1273 m = next; 1274 } 1275 splx(s0); 1276} 1277 1278static int 1279vm_pageout_free_page_calc(count) 1280vm_size_t count; 1281{ 1282 if (count < cnt.v_page_count) 1283 return 0; 1284 /* 1285 * free_reserved needs to include enough for the largest swap pager 1286 * structures plus enough for any pv_entry structs when paging. 1287 */ 1288 if (cnt.v_page_count > 1024) 1289 cnt.v_free_min = 4 + (cnt.v_page_count - 1024) / 200; 1290 else 1291 cnt.v_free_min = 4; 1292 cnt.v_pageout_free_min = (2*MAXBSIZE)/PAGE_SIZE + 1293 cnt.v_interrupt_free_min; 1294 cnt.v_free_reserved = vm_pageout_page_count + 1295 cnt.v_pageout_free_min + (count / 768) + PQ_L2_SIZE; 1296 cnt.v_free_severe = cnt.v_free_min / 2; 1297 cnt.v_free_min += cnt.v_free_reserved; 1298 cnt.v_free_severe += cnt.v_free_reserved; 1299 return 1; 1300} 1301 1302 1303/* 1304 * vm_pageout is the high level pageout daemon. 1305 */ 1306static void 1307vm_pageout() 1308{ 1309 1310 mtx_enter(&Giant, MTX_DEF); 1311 1312 /* 1313 * Initialize some paging parameters. 1314 */ 1315 1316 cnt.v_interrupt_free_min = 2; 1317 if (cnt.v_page_count < 2000) 1318 vm_pageout_page_count = 8; 1319 1320 vm_pageout_free_page_calc(cnt.v_page_count); 1321 /* 1322 * free_reserved needs to include enough for the largest swap pager 1323 * structures plus enough for any pv_entry structs when paging. 1324 */ 1325 if (cnt.v_free_count > 6144) 1326 cnt.v_free_target = 3 * cnt.v_free_min + cnt.v_free_reserved; 1327 else 1328 cnt.v_free_target = 2 * cnt.v_free_min + cnt.v_free_reserved; 1329 1330 if (cnt.v_free_count > 2048) { 1331 cnt.v_cache_min = cnt.v_free_target; 1332 cnt.v_cache_max = 2 * cnt.v_cache_min; 1333 cnt.v_inactive_target = (3 * cnt.v_free_target) / 2; 1334 } else { 1335 cnt.v_cache_min = 0; 1336 cnt.v_cache_max = 0; 1337 cnt.v_inactive_target = cnt.v_free_count / 4; 1338 } 1339 if (cnt.v_inactive_target > cnt.v_free_count / 3) 1340 cnt.v_inactive_target = cnt.v_free_count / 3; 1341 1342 /* XXX does not really belong here */ 1343 if (vm_page_max_wired == 0) 1344 vm_page_max_wired = cnt.v_free_count / 3; 1345 1346 if (vm_pageout_stats_max == 0) 1347 vm_pageout_stats_max = cnt.v_free_target; 1348 1349 /* 1350 * Set interval in seconds for stats scan. 1351 */ 1352 if (vm_pageout_stats_interval == 0) 1353 vm_pageout_stats_interval = 5; 1354 if (vm_pageout_full_stats_interval == 0) 1355 vm_pageout_full_stats_interval = vm_pageout_stats_interval * 4; 1356 1357 1358 /* 1359 * Set maximum free per pass 1360 */ 1361 if (vm_pageout_stats_free_max == 0) 1362 vm_pageout_stats_free_max = 5; 1363 1364 max_page_launder = (cnt.v_page_count > 1800 ? 32 : 16); 1365 1366 curproc->p_flag |= P_BUFEXHAUST; 1367 swap_pager_swap_init(); 1368 /* 1369 * The pageout daemon is never done, so loop forever. 1370 */ 1371 while (TRUE) { 1372 int error; 1373 int s = splvm(); 1374 1375 /* 1376 * If we have enough free memory, wakeup waiters. Do 1377 * not clear vm_pages_needed until we reach our target, 1378 * otherwise we may be woken up over and over again and 1379 * waste a lot of cpu. 1380 */ 1381 if (vm_pages_needed && !vm_page_count_min()) { 1382 if (vm_paging_needed() <= 0) 1383 vm_pages_needed = 0; 1384 wakeup(&cnt.v_free_count); 1385 } 1386 if (vm_pages_needed) { 1387 /* 1388 * Still not done, sleep a bit and go again 1389 */ 1390 tsleep(&vm_pages_needed, PVM, "psleep", hz/2); 1391 } else { 1392 /* 1393 * Good enough, sleep & handle stats 1394 */ 1395 error = tsleep(&vm_pages_needed, 1396 PVM, "psleep", vm_pageout_stats_interval * hz); 1397 if (error && !vm_pages_needed) { 1398 if (vm_pageout_actcmp > 0) 1399 --vm_pageout_actcmp; 1400 splx(s); 1401 vm_pageout_page_stats(); 1402 continue; 1403 } 1404 } 1405 1406 if (vm_pages_needed) 1407 cnt.v_pdwakeups++; 1408 splx(s); 1409 vm_pageout_scan(); 1410 vm_pageout_deficit = 0; 1411 } 1412} 1413 1414void 1415pagedaemon_wakeup() 1416{ 1417 if (!vm_pages_needed && curproc != pageproc) { 1418 vm_pages_needed++; 1419 wakeup(&vm_pages_needed); 1420 } 1421} 1422 1423#if !defined(NO_SWAPPING) 1424static void 1425vm_req_vmdaemon() 1426{ 1427 static int lastrun = 0; 1428 1429 if ((ticks > (lastrun + hz)) || (ticks < lastrun)) { 1430 wakeup(&vm_daemon_needed); 1431 lastrun = ticks; 1432 } 1433} 1434 1435static void 1436vm_daemon() 1437{ 1438 struct proc *p; 1439 1440 mtx_enter(&Giant, MTX_DEF); 1441 1442 while (TRUE) { 1443 tsleep(&vm_daemon_needed, PPAUSE, "psleep", 0); 1444 if (vm_pageout_req_swapout) { 1445 swapout_procs(vm_pageout_req_swapout); 1446 vm_pageout_req_swapout = 0; 1447 } 1448 /* 1449 * scan the processes for exceeding their rlimits or if 1450 * process is swapped out -- deactivate pages 1451 */ 1452 1453 lockmgr(&allproc_lock, LK_SHARED, NULL, CURPROC); 1454 for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) { 1455 vm_pindex_t limit, size; 1456 1457 /* 1458 * if this is a system process or if we have already 1459 * looked at this process, skip it. 1460 */ 1461 if (p->p_flag & (P_SYSTEM | P_WEXIT)) { 1462 continue; 1463 } 1464 /* 1465 * if the process is in a non-running type state, 1466 * don't touch it. 1467 */ 1468 mtx_enter(&sched_lock, MTX_SPIN); 1469 if (p->p_stat != SRUN && p->p_stat != SSLEEP) { 1470 mtx_exit(&sched_lock, MTX_SPIN); 1471 continue; 1472 } 1473 mtx_exit(&sched_lock, MTX_SPIN); 1474 /* 1475 * get a limit 1476 */ 1477 limit = OFF_TO_IDX( 1478 qmin(p->p_rlimit[RLIMIT_RSS].rlim_cur, 1479 p->p_rlimit[RLIMIT_RSS].rlim_max)); 1480 1481 /* 1482 * let processes that are swapped out really be 1483 * swapped out set the limit to nothing (will force a 1484 * swap-out.) 1485 */ 1486 if ((p->p_flag & P_INMEM) == 0) 1487 limit = 0; /* XXX */ 1488 1489 size = vmspace_resident_count(p->p_vmspace); 1490 if (limit >= 0 && size >= limit) { 1491 vm_pageout_map_deactivate_pages( 1492 &p->p_vmspace->vm_map, limit); 1493 } 1494 } 1495 lockmgr(&allproc_lock, LK_RELEASE, NULL, CURPROC); 1496 } 1497} 1498#endif 1499