vm_pageout.c revision 3766
1/* 2 * Copyright (c) 1991 Regents of the University of California. 3 * All rights reserved. 4 * Copyright (c) 1994 John S. Dyson 5 * All rights reserved. 6 * Copyright (c) 1994 David Greenman 7 * All rights reserved. 8 * 9 * This code is derived from software contributed to Berkeley by 10 * The Mach Operating System project at Carnegie-Mellon University. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. All advertising materials mentioning features or use of this software 21 * must display the following acknowledgement: 22 * This product includes software developed by the University of 23 * California, Berkeley and its contributors. 24 * 4. Neither the name of the University nor the names of its contributors 25 * may be used to endorse or promote products derived from this software 26 * without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 38 * SUCH DAMAGE. 39 * 40 * from: @(#)vm_pageout.c 7.4 (Berkeley) 5/7/91 41 * 42 * 43 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 44 * All rights reserved. 45 * 46 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 47 * 48 * Permission to use, copy, modify and distribute this software and 49 * its documentation is hereby granted, provided that both the copyright 50 * notice and this permission notice appear in all copies of the 51 * software, derivative works or modified versions, and any portions 52 * thereof, and that both notices appear in supporting documentation. 53 * 54 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 55 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 56 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 57 * 58 * Carnegie Mellon requests users of this software to return to 59 * 60 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 61 * School of Computer Science 62 * Carnegie Mellon University 63 * Pittsburgh PA 15213-3890 64 * 65 * any improvements or extensions that they make and grant Carnegie the 66 * rights to redistribute these changes. 67 * 68 * $Id: vm_pageout.c,v 1.19 1994/10/18 14:59:20 davidg Exp $ 69 */ 70 71/* 72 * The proverbial page-out daemon. 73 */ 74 75#include <sys/param.h> 76#include <sys/systm.h> 77#include <sys/proc.h> 78#include <sys/resourcevar.h> 79#include <sys/malloc.h> 80 81#include <vm/vm.h> 82#include <vm/vm_page.h> 83#include <vm/vm_pageout.h> 84#include <vm/swap_pager.h> 85 86extern vm_map_t kmem_map; 87int vm_pages_needed; /* Event on which pageout daemon sleeps */ 88int vm_pagescanner; /* Event on which pagescanner sleeps */ 89int vm_pageout_free_min = 0; /* Stop pageout to wait for pagers at this free level */ 90 91int vm_pageout_pages_needed = 0; /* flag saying that the pageout daemon needs pages */ 92int vm_page_pagesfreed; 93int vm_desired_cache_size; 94 95extern int npendingio; 96extern int hz; 97int vm_pageout_proc_limit; 98extern int nswiodone; 99extern int swap_pager_full; 100extern int vm_swap_size; 101extern int swap_pager_ready(); 102 103#define MAXREF 32767 104 105#define MAXSCAN 512 /* maximum number of pages to scan in active queue */ 106 /* set the "clock" hands to be (MAXSCAN * 4096) Bytes */ 107#define ACT_DECLINE 1 108#define ACT_ADVANCE 3 109#define ACT_MAX 100 110 111#define LOWATER ((2048*1024)/NBPG) 112 113#define VM_PAGEOUT_PAGE_COUNT 8 114int vm_pageout_page_count = VM_PAGEOUT_PAGE_COUNT; 115int vm_pageout_req_do_stats; 116 117int vm_page_max_wired = 0; /* XXX max # of wired pages system-wide */ 118 119 120/* 121 * vm_pageout_clean: 122 * cleans a vm_page 123 */ 124int 125vm_pageout_clean(m, sync) 126 register vm_page_t m; 127 int sync; 128{ 129 /* 130 * Clean the page and remove it from the 131 * laundry. 132 * 133 * We set the busy bit to cause 134 * potential page faults on this page to 135 * block. 136 * 137 * And we set pageout-in-progress to keep 138 * the object from disappearing during 139 * pageout. This guarantees that the 140 * page won't move from the inactive 141 * queue. (However, any other page on 142 * the inactive queue may move!) 143 */ 144 145 register vm_object_t object; 146 register vm_pager_t pager; 147 int pageout_status[VM_PAGEOUT_PAGE_COUNT]; 148 vm_page_t ms[VM_PAGEOUT_PAGE_COUNT]; 149 int pageout_count; 150 int anyok=0; 151 int i; 152 vm_offset_t offset = m->offset; 153 154 object = m->object; 155 if (!object) { 156 printf("pager: object missing\n"); 157 return 0; 158 } 159 160 /* 161 * Try to collapse the object before 162 * making a pager for it. We must 163 * unlock the page queues first. 164 * We try to defer the creation of a pager 165 * until all shadows are not paging. This 166 * allows vm_object_collapse to work better and 167 * helps control swap space size. 168 * (J. Dyson 11 Nov 93) 169 */ 170 171 if (!object->pager && 172 cnt.v_free_count < vm_pageout_free_min) 173 return 0; 174 175 if (!object->pager && 176 object->shadow && 177 object->shadow->paging_in_progress) 178 return 0; 179 180 if( !sync) { 181 if (object->shadow) { 182 vm_object_collapse(object); 183 if (!vm_page_lookup(object, offset)) 184 return 0; 185 } 186 187 if ((m->busy != 0) || 188 (m->flags & PG_BUSY) || (m->hold_count != 0)) { 189 return 0; 190 } 191 } 192 193 pageout_count = 1; 194 ms[0] = m; 195 196 pager = object->pager; 197 if (pager) { 198 for (i = 1; i < vm_pageout_page_count; i++) { 199 ms[i] = vm_page_lookup(object, offset+i*NBPG); 200 if (ms[i]) { 201 if (( ((ms[i]->flags & (PG_CLEAN|PG_INACTIVE|PG_BUSY)) == PG_INACTIVE) 202 || ( (ms[i]->flags & (PG_CLEAN|PG_BUSY)) == 0 && sync == VM_PAGEOUT_FORCE)) 203 && (ms[i]->wire_count == 0) 204 && (ms[i]->busy == 0) 205 && (ms[i]->hold_count == 0)) 206 pageout_count++; 207 else 208 break; 209 } else 210 break; 211 } 212 for(i=0;i<pageout_count;i++) { 213 ms[i]->flags |= PG_BUSY; 214 pmap_page_protect(VM_PAGE_TO_PHYS(ms[i]), VM_PROT_READ); 215 } 216 object->paging_in_progress += pageout_count; 217 } else { 218 219 m->flags |= PG_BUSY; 220 221 pmap_page_protect(VM_PAGE_TO_PHYS(m), VM_PROT_READ); 222 223 object->paging_in_progress++; 224 225 pager = vm_pager_allocate(PG_DFLT, (caddr_t)0, 226 object->size, VM_PROT_ALL, 0); 227 if (pager != NULL) { 228 vm_object_setpager(object, pager, 0, FALSE); 229 } 230 } 231 232 /* 233 * If there is no pager for the page, 234 * use the default pager. If there's 235 * no place to put the page at the 236 * moment, leave it in the laundry and 237 * hope that there will be paging space 238 * later. 239 */ 240 241 if ((pager && pager->pg_type == PG_SWAP) || 242 cnt.v_free_count >= vm_pageout_free_min) { 243 if( pageout_count == 1) { 244 pageout_status[0] = pager ? 245 vm_pager_put(pager, m, 246 ((sync || (object == kernel_object)) ? TRUE: FALSE)) : 247 VM_PAGER_FAIL; 248 } else { 249 if( !pager) { 250 for(i=0;i<pageout_count;i++) 251 pageout_status[i] = VM_PAGER_FAIL; 252 } else { 253 vm_pager_put_pages(pager, ms, pageout_count, 254 ((sync || (object == kernel_object)) ? TRUE : FALSE), 255 pageout_status); 256 } 257 } 258 259 } else { 260 for(i=0;i<pageout_count;i++) 261 pageout_status[i] = VM_PAGER_FAIL; 262 } 263 264 for(i=0;i<pageout_count;i++) { 265 switch (pageout_status[i]) { 266 case VM_PAGER_OK: 267 ms[i]->flags &= ~PG_LAUNDRY; 268 ++anyok; 269 break; 270 case VM_PAGER_PEND: 271 ms[i]->flags &= ~PG_LAUNDRY; 272 ++anyok; 273 break; 274 case VM_PAGER_BAD: 275 /* 276 * Page outside of range of object. 277 * Right now we essentially lose the 278 * changes by pretending it worked. 279 */ 280 ms[i]->flags &= ~PG_LAUNDRY; 281 ms[i]->flags |= PG_CLEAN; 282 pmap_clear_modify(VM_PAGE_TO_PHYS(ms[i])); 283 break; 284 case VM_PAGER_ERROR: 285 case VM_PAGER_FAIL: 286 /* 287 * If page couldn't be paged out, then 288 * reactivate the page so it doesn't 289 * clog the inactive list. (We will 290 * try paging out it again later). 291 */ 292 if (ms[i]->flags & PG_INACTIVE) 293 vm_page_activate(ms[i]); 294 break; 295 case VM_PAGER_AGAIN: 296 break; 297 } 298 299 300 /* 301 * If the operation is still going, leave 302 * the page busy to block all other accesses. 303 * Also, leave the paging in progress 304 * indicator set so that we don't attempt an 305 * object collapse. 306 */ 307 if (pageout_status[i] != VM_PAGER_PEND) { 308 PAGE_WAKEUP(ms[i]); 309 if (--object->paging_in_progress == 0) 310 wakeup((caddr_t) object); 311 if ((ms[i]->flags & PG_REFERENCED) || 312 pmap_is_referenced(VM_PAGE_TO_PHYS(ms[i]))) { 313 pmap_clear_reference(VM_PAGE_TO_PHYS(ms[i])); 314 ms[i]->flags &= ~PG_REFERENCED; 315 if( ms[i]->flags & PG_INACTIVE) 316 vm_page_activate(ms[i]); 317 } 318 } 319 } 320 return anyok; 321} 322 323/* 324 * vm_pageout_object_deactivate_pages 325 * 326 * deactivate enough pages to satisfy the inactive target 327 * requirements or if vm_page_proc_limit is set, then 328 * deactivate all of the pages in the object and its 329 * shadows. 330 * 331 * The object and map must be locked. 332 */ 333int 334vm_pageout_object_deactivate_pages(map, object, count) 335 vm_map_t map; 336 vm_object_t object; 337 int count; 338{ 339 register vm_page_t p, next; 340 int rcount; 341 int dcount; 342 343 dcount = 0; 344 if (count == 0) 345 count = 1; 346 347 if (object->shadow) { 348 int scount = count; 349 if( object->shadow->ref_count > 1) 350 scount /= object->shadow->ref_count; 351 if( scount) 352 dcount += vm_pageout_object_deactivate_pages(map, object->shadow, scount); 353 } 354 355 if (object->paging_in_progress) 356 return dcount; 357 358 /* 359 * scan the objects entire memory queue 360 */ 361 rcount = object->resident_page_count; 362 p = object->memq.tqh_first; 363 while (p && (rcount-- > 0)) { 364 next = p->listq.tqe_next; 365 cnt.v_pdpages++; 366 vm_page_lock_queues(); 367 /* 368 * if a page is active, not wired and is in the processes pmap, 369 * then deactivate the page. 370 */ 371 if ((p->flags & (PG_ACTIVE|PG_BUSY)) == PG_ACTIVE && 372 p->wire_count == 0 && 373 p->hold_count == 0 && 374 p->busy == 0 && 375 pmap_page_exists(vm_map_pmap(map), VM_PAGE_TO_PHYS(p))) { 376 if (!pmap_is_referenced(VM_PAGE_TO_PHYS(p)) && 377 (p->flags & PG_REFERENCED) == 0) { 378 p->act_count -= min(p->act_count, ACT_DECLINE); 379 /* 380 * if the page act_count is zero -- then we deactivate 381 */ 382 if (!p->act_count) { 383 vm_page_deactivate(p); 384 pmap_page_protect(VM_PAGE_TO_PHYS(p), 385 VM_PROT_NONE); 386 /* 387 * else if on the next go-around we will deactivate the page 388 * we need to place the page on the end of the queue to age 389 * the other pages in memory. 390 */ 391 } else { 392 TAILQ_REMOVE(&vm_page_queue_active, p, pageq); 393 TAILQ_INSERT_TAIL(&vm_page_queue_active, p, pageq); 394 TAILQ_REMOVE(&object->memq, p, listq); 395 TAILQ_INSERT_TAIL(&object->memq, p, listq); 396 } 397 /* 398 * see if we are done yet 399 */ 400 if (p->flags & PG_INACTIVE) { 401 --count; 402 ++dcount; 403 if (count <= 0 && 404 cnt.v_inactive_count > cnt.v_inactive_target) { 405 vm_page_unlock_queues(); 406 return dcount; 407 } 408 } 409 410 } else { 411 /* 412 * Move the page to the bottom of the queue. 413 */ 414 pmap_clear_reference(VM_PAGE_TO_PHYS(p)); 415 p->flags &= ~PG_REFERENCED; 416 if (p->act_count < ACT_MAX) 417 p->act_count += ACT_ADVANCE; 418 419 TAILQ_REMOVE(&vm_page_queue_active, p, pageq); 420 TAILQ_INSERT_TAIL(&vm_page_queue_active, p, pageq); 421 TAILQ_REMOVE(&object->memq, p, listq); 422 TAILQ_INSERT_TAIL(&object->memq, p, listq); 423 } 424 } 425 426 vm_page_unlock_queues(); 427 p = next; 428 } 429 return dcount; 430} 431 432 433/* 434 * deactivate some number of pages in a map, try to do it fairly, but 435 * that is really hard to do. 436 */ 437 438void 439vm_pageout_map_deactivate_pages(map, entry, count, freeer) 440 vm_map_t map; 441 vm_map_entry_t entry; 442 int *count; 443 int (*freeer)(vm_map_t, vm_object_t, int); 444{ 445 vm_map_t tmpm; 446 vm_map_entry_t tmpe; 447 vm_object_t obj; 448 if (*count <= 0) 449 return; 450 vm_map_reference(map); 451 if (!lock_try_read(&map->lock)) { 452 vm_map_deallocate(map); 453 return; 454 } 455 if (entry == 0) { 456 tmpe = map->header.next; 457 while (tmpe != &map->header && *count > 0) { 458 vm_pageout_map_deactivate_pages(map, tmpe, count, freeer); 459 tmpe = tmpe->next; 460 }; 461 } else if (entry->is_sub_map || entry->is_a_map) { 462 tmpm = entry->object.share_map; 463 tmpe = tmpm->header.next; 464 while (tmpe != &tmpm->header && *count > 0) { 465 vm_pageout_map_deactivate_pages(tmpm, tmpe, count, freeer); 466 tmpe = tmpe->next; 467 }; 468 } else if ((obj = entry->object.vm_object) != 0) { 469 *count -= (*freeer)(map, obj, *count); 470 } 471 lock_read_done(&map->lock); 472 vm_map_deallocate(map); 473 return; 474} 475 476/* 477 * vm_pageout_scan does the dirty work for the pageout daemon. 478 */ 479int 480vm_pageout_scan() 481{ 482 vm_page_t m; 483 int page_shortage, maxscan, maxlaunder; 484 int pages_freed; 485 int desired_free; 486 vm_page_t next; 487 struct proc *p, *bigproc; 488 vm_offset_t size, bigsize; 489 vm_object_t object; 490 int force_wakeup = 0; 491 int cache_size, orig_cache_size; 492 493 /* 494 * We manage the cached memory by attempting to keep it 495 * at about the desired level. 496 * We deactivate the pages for the oldest cached objects 497 * first. This keeps pages that are "cached" from hogging 498 * physical memory. 499 */ 500 orig_cache_size = 0; 501 object = vm_object_cached_list.tqh_first; 502 503 /* calculate the total cached size */ 504 505 while( object) { 506 orig_cache_size += object->resident_page_count; 507 object = object->cached_list.tqe_next; 508 } 509 510redeact: 511 cache_size = orig_cache_size; 512 object = vm_object_cached_list.tqh_first; 513 vm_object_cache_lock(); 514 while ( object && (cnt.v_inactive_count < cnt.v_inactive_target) && 515 (cache_size >= (vm_swap_size?vm_desired_cache_size:0))) { 516 vm_object_cache_unlock(); 517 518 /* 519 * if there are no resident pages -- get rid of the object 520 */ 521 if( object->resident_page_count == 0) { 522 if (object != vm_object_lookup(object->pager)) 523 panic("vm_object_deactivate: I'm sooo confused."); 524 pager_cache(object, FALSE); 525 goto redeact; 526 } else { 527 /* 528 * if there are resident pages -- deactivate them 529 */ 530 vm_object_deactivate_pages(object); 531 cache_size -= object->resident_page_count; 532 object = object->cached_list.tqe_next; 533 } 534 535 vm_object_cache_lock(); 536 } 537 vm_object_cache_unlock(); 538 539morefree: 540 /* 541 * now swap processes out if we are in low memory conditions 542 */ 543 if ((cnt.v_free_count <= cnt.v_free_min) && !swap_pager_full && vm_swap_size) { 544 /* 545 * swap out inactive processes 546 */ 547 swapout_threads(); 548 } 549 550 /* 551 * scan the processes for exceeding their rlimits or if process 552 * is swapped out -- deactivate pages 553 */ 554 555 for (p = (struct proc *)allproc; p != NULL; p = p->p_next) { 556 int overage; 557 quad_t limit; 558 559 /* 560 * if this is a system process or if we have already 561 * looked at this process, skip it. 562 */ 563 if (p->p_flag & (P_SYSTEM|P_WEXIT)) { 564 continue; 565 } 566 567 /* 568 * if the process is in a non-running type state, 569 * don't touch it. 570 */ 571 if (p->p_stat != SRUN && p->p_stat != SSLEEP) { 572 continue; 573 } 574 575 /* 576 * get a limit 577 */ 578 limit = qmin(p->p_rlimit[RLIMIT_RSS].rlim_cur, 579 p->p_rlimit[RLIMIT_RSS].rlim_max); 580 581 /* 582 * let processes that are swapped out really be swapped out 583 * set the limit to nothing (will force a swap-out.) 584 */ 585 if ((p->p_flag & P_INMEM) == 0) 586 limit = 0; 587 588 size = p->p_vmspace->vm_pmap.pm_stats.resident_count * NBPG; 589 if (limit >= 0 && size >= limit) { 590 overage = (size - limit) / NBPG; 591 vm_pageout_map_deactivate_pages(&p->p_vmspace->vm_map, 592 (vm_map_entry_t) 0, &overage, vm_pageout_object_deactivate_pages); 593 } 594 } 595 596 if (((cnt.v_free_count + cnt.v_inactive_count) >= 597 (cnt.v_inactive_target + cnt.v_free_target)) && 598 (cnt.v_free_count >= cnt.v_free_target)) 599 return force_wakeup; 600 601 pages_freed = 0; 602 desired_free = cnt.v_free_target; 603 604 /* 605 * Start scanning the inactive queue for pages we can free. 606 * We keep scanning until we have enough free pages or 607 * we have scanned through the entire queue. If we 608 * encounter dirty pages, we start cleaning them. 609 */ 610 611 maxlaunder = (cnt.v_free_target - cnt.v_free_count); 612 maxscan = cnt.v_inactive_count; 613rescan1: 614 m = vm_page_queue_inactive.tqh_first; 615 while (m && (maxscan-- > 0) && 616 (cnt.v_free_count < desired_free) ) { 617 vm_page_t next; 618 619 cnt.v_pdpages++; 620 next = m->pageq.tqe_next; 621 622 if( (m->flags & PG_INACTIVE) == 0) { 623 printf("vm_pageout_scan: page not inactive?"); 624 continue; 625 } 626 627 /* 628 * activate held pages 629 */ 630 if (m->hold_count != 0) { 631 vm_page_activate(m); 632 m = next; 633 continue; 634 } 635 636 /* 637 * dont mess with busy pages 638 */ 639 if (m->busy || (m->flags & PG_BUSY)) { 640 m = next; 641 continue; 642 } 643 644 /* 645 * NOTE: PG_CLEAN doesn't guarantee that the page is clean. 646 */ 647 if (m->flags & PG_CLEAN) { 648 /* 649 * If we're not low on memory and the page has been reference, 650 * or if the page has been modified, then reactivate the page. 651 */ 652 if (((cnt.v_free_count > vm_pageout_free_min) && 653 (pmap_is_referenced(VM_PAGE_TO_PHYS(m)) || ((m->flags & PG_REFERENCED) != 0))) || 654 pmap_is_modified(VM_PAGE_TO_PHYS(m))) { 655 m->flags &= ~PG_REFERENCED; 656 vm_page_activate(m); 657 } else if (!m->act_count) { 658 pmap_page_protect(VM_PAGE_TO_PHYS(m), 659 VM_PROT_NONE); 660 vm_page_free(m); 661 ++cnt.v_dfree; 662 ++pages_freed; 663 } else { 664 m->act_count -= min(m->act_count, ACT_DECLINE); 665 TAILQ_REMOVE(&vm_page_queue_inactive, m, pageq); 666 TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq); 667 } 668 } else if ((m->flags & PG_LAUNDRY) && maxlaunder > 0) { 669 int written; 670 if (pmap_is_referenced(VM_PAGE_TO_PHYS(m)) || 671 ((m->flags & PG_REFERENCED) != 0)) { 672 pmap_clear_reference(VM_PAGE_TO_PHYS(m)); 673 vm_page_activate(m); 674 m->flags &= ~PG_REFERENCED; 675 m = next; 676 continue; 677 } 678 /* 679 * If a page is dirty, then it is either 680 * being washed (but not yet cleaned) 681 * or it is still in the laundry. If it is 682 * still in the laundry, then we start the 683 * cleaning operation. 684 */ 685 686 written = vm_pageout_clean(m,0); 687 if (written) 688 maxlaunder -= written; 689 690 if (!next) 691 break; 692 /* 693 * if the next page has been re-activated, start scanning again 694 */ 695 if ((next->flags & PG_INACTIVE) == 0) 696 goto rescan1; 697 } else if ((m->flags & PG_REFERENCED) || 698 pmap_is_referenced(VM_PAGE_TO_PHYS(m))) { 699 pmap_clear_reference(VM_PAGE_TO_PHYS(m)); 700 m->flags &= ~PG_REFERENCED; 701 vm_page_activate(m); 702 } 703 m = next; 704 } 705 706 /* 707 * Compute the page shortage. If we are still very low on memory 708 * be sure that we will move a minimal amount of pages from active 709 * to inactive. 710 */ 711 712 page_shortage = cnt.v_inactive_target - 713 (cnt.v_free_count + cnt.v_inactive_count); 714 715 if (page_shortage <= 0) { 716 if (pages_freed == 0) { 717 if( cnt.v_free_count < cnt.v_free_min) { 718 page_shortage = cnt.v_free_min - cnt.v_free_count + 1; 719 } else if(((cnt.v_free_count + cnt.v_inactive_count) < 720 (cnt.v_free_min + cnt.v_inactive_target))) { 721 page_shortage = 1; 722 } else { 723 page_shortage = 0; 724 } 725 } 726 727 } 728 729 maxscan = cnt.v_active_count; 730 m = vm_page_queue_active.tqh_first; 731 while (m && maxscan-- && (page_shortage > 0)) { 732 733 cnt.v_pdpages++; 734 next = m->pageq.tqe_next; 735 736 /* 737 * Don't deactivate pages that are busy. 738 */ 739 if ((m->busy != 0) || 740 (m->flags & PG_BUSY) || (m->hold_count != 0)) { 741 m = next; 742 continue; 743 } 744 745 if ((m->flags & PG_REFERENCED) || 746 pmap_is_referenced(VM_PAGE_TO_PHYS(m))) { 747 pmap_clear_reference(VM_PAGE_TO_PHYS(m)); 748 m->flags &= ~PG_REFERENCED; 749 if (m->act_count < ACT_MAX) 750 m->act_count += ACT_ADVANCE; 751 TAILQ_REMOVE(&vm_page_queue_active, m, pageq); 752 TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq); 753 TAILQ_REMOVE(&m->object->memq, m, listq); 754 TAILQ_INSERT_TAIL(&m->object->memq, m, listq); 755 } else { 756 m->act_count -= min(m->act_count, ACT_DECLINE); 757 758 /* 759 * if the page act_count is zero -- then we deactivate 760 */ 761 if (!m->act_count) { 762 vm_page_deactivate(m); 763 --page_shortage; 764 /* 765 * else if on the next go-around we will deactivate the page 766 * we need to place the page on the end of the queue to age 767 * the other pages in memory. 768 */ 769 } else { 770 TAILQ_REMOVE(&vm_page_queue_active, m, pageq); 771 TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq); 772 TAILQ_REMOVE(&m->object->memq, m, listq); 773 TAILQ_INSERT_TAIL(&m->object->memq, m, listq); 774 } 775 } 776 m = next; 777 } 778 779 /* 780 * if we have not freed any pages and we are desparate for memory 781 * then we keep trying until we get some (any) memory. 782 */ 783 784 if (!force_wakeup && (swap_pager_full || !force_wakeup || 785 (pages_freed == 0 && (cnt.v_free_count < cnt.v_free_min)))){ 786 vm_pager_sync(); 787 force_wakeup = 1; 788 goto morefree; 789 } 790 791 /* 792 * make sure that we have swap space -- if we are low on 793 * memory and swap -- then kill the biggest process. 794 */ 795 if ((vm_swap_size == 0 || swap_pager_full) && 796 (cnt.v_free_count < cnt.v_free_min)) { 797 bigproc = NULL; 798 bigsize = 0; 799 for (p = (struct proc *)allproc; p != NULL; p = p->p_next) { 800 /* 801 * if this is a system process, skip it 802 */ 803 if ((p->p_flag & P_SYSTEM) || ((p->p_pid < 48) && (vm_swap_size != 0))) { 804 continue; 805 } 806 807 /* 808 * if the process is in a non-running type state, 809 * don't touch it. 810 */ 811 if (p->p_stat != SRUN && p->p_stat != SSLEEP) { 812 continue; 813 } 814 /* 815 * get the process size 816 */ 817 size = p->p_vmspace->vm_pmap.pm_stats.resident_count; 818 /* 819 * if the this process is bigger than the biggest one 820 * remember it. 821 */ 822 if (size > bigsize) { 823 bigproc = p; 824 bigsize = size; 825 } 826 } 827 if (bigproc != NULL) { 828 printf("Process %lu killed by vm_pageout -- out of swap\n", (u_long)bigproc->p_pid); 829 psignal(bigproc, SIGKILL); 830 bigproc->p_estcpu = 0; 831 bigproc->p_nice = PRIO_MIN; 832 resetpriority(bigproc); 833 wakeup( (caddr_t) &cnt.v_free_count); 834 } 835 } 836 vm_page_pagesfreed += pages_freed; 837 return force_wakeup; 838} 839 840/* 841 * vm_pageout is the high level pageout daemon. 842 */ 843void 844vm_pageout() 845{ 846 (void) spl0(); 847 848 /* 849 * Initialize some paging parameters. 850 */ 851 852 cnt.v_free_min = 12; 853 /* 854 * free_reserved needs to include enough for the largest 855 * swap pager structures plus enough for any pv_entry 856 * structs when paging. 857 */ 858 cnt.v_free_reserved = 4 + cnt.v_page_count / 1024; 859 if (cnt.v_free_min < 8) 860 cnt.v_free_min = 8; 861 if (cnt.v_free_min > 32) 862 cnt.v_free_min = 32; 863 vm_pageout_free_min = cnt.v_free_reserved; 864 cnt.v_free_target = 2*cnt.v_free_min + cnt.v_free_reserved; 865 cnt.v_inactive_target = cnt.v_free_count / 12; 866 cnt.v_free_min += cnt.v_free_reserved; 867 vm_desired_cache_size = cnt.v_page_count / 3; 868 869 /* XXX does not really belong here */ 870 if (vm_page_max_wired == 0) 871 vm_page_max_wired = cnt.v_free_count / 3; 872 873 874 (void) swap_pager_alloc(0, 0, 0, 0); 875 876 /* 877 * The pageout daemon is never done, so loop 878 * forever. 879 */ 880 while (TRUE) { 881 int force_wakeup; 882/* 883 cnt.v_free_min = 12 + averunnable.ldavg[0] / 1024; 884 cnt.v_free_target = 2*cnt.v_free_min + cnt.v_free_reserved; 885 cnt.v_inactive_target = cnt.v_free_target*2; 886*/ 887 888 tsleep((caddr_t) &vm_pages_needed, PVM, "psleep", 0); 889 cnt.v_pdwakeups++; 890 891 vm_pager_sync(); 892 /* 893 * The force wakeup hack added to eliminate delays and potiential 894 * deadlock. It was possible for the page daemon to indefintely 895 * postpone waking up a process that it might be waiting for memory 896 * on. The putmulti stuff seems to have aggravated the situation. 897 */ 898 force_wakeup = vm_pageout_scan(); 899 vm_pager_sync(); 900 if( force_wakeup) 901 wakeup( (caddr_t) &cnt.v_free_count); 902 wakeup((caddr_t) kmem_map); 903 } 904} 905 906