vm_pageout.c revision 3815
1/* 2 * Copyright (c) 1991 Regents of the University of California. 3 * All rights reserved. 4 * Copyright (c) 1994 John S. Dyson 5 * All rights reserved. 6 * Copyright (c) 1994 David Greenman 7 * All rights reserved. 8 * 9 * This code is derived from software contributed to Berkeley by 10 * The Mach Operating System project at Carnegie-Mellon University. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. All advertising materials mentioning features or use of this software 21 * must display the following acknowledgement: 22 * This product includes software developed by the University of 23 * California, Berkeley and its contributors. 24 * 4. Neither the name of the University nor the names of its contributors 25 * may be used to endorse or promote products derived from this software 26 * without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 38 * SUCH DAMAGE. 39 * 40 * from: @(#)vm_pageout.c 7.4 (Berkeley) 5/7/91 41 * 42 * 43 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 44 * All rights reserved. 45 * 46 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 47 * 48 * Permission to use, copy, modify and distribute this software and 49 * its documentation is hereby granted, provided that both the copyright 50 * notice and this permission notice appear in all copies of the 51 * software, derivative works or modified versions, and any portions 52 * thereof, and that both notices appear in supporting documentation. 53 * 54 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 55 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 56 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 57 * 58 * Carnegie Mellon requests users of this software to return to 59 * 60 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 61 * School of Computer Science 62 * Carnegie Mellon University 63 * Pittsburgh PA 15213-3890 64 * 65 * any improvements or extensions that they make and grant Carnegie the 66 * rights to redistribute these changes. 67 * 68 * $Id: vm_pageout.c,v 1.21 1994/10/23 20:53:33 davidg Exp $ 69 */ 70 71/* 72 * The proverbial page-out daemon. 73 */ 74 75#include <sys/param.h> 76#include <sys/systm.h> 77#include <sys/proc.h> 78#include <sys/resourcevar.h> 79#include <sys/malloc.h> 80 81#include <vm/vm.h> 82#include <vm/vm_page.h> 83#include <vm/vm_pageout.h> 84#include <vm/swap_pager.h> 85 86extern vm_map_t kmem_map; 87int vm_pages_needed; /* Event on which pageout daemon sleeps */ 88int vm_pagescanner; /* Event on which pagescanner sleeps */ 89int vm_pageout_free_min = 0; /* Stop pageout to wait for pagers at this free level */ 90 91int vm_pageout_pages_needed = 0; /* flag saying that the pageout daemon needs pages */ 92int vm_page_pagesfreed; 93int vm_desired_cache_size; 94 95extern int npendingio; 96extern int hz; 97int vm_pageout_proc_limit; 98extern int nswiodone; 99extern int swap_pager_full; 100extern int vm_swap_size; 101extern int swap_pager_ready(); 102 103#define MAXREF 32767 104 105#define MAXSCAN 512 /* maximum number of pages to scan in active queue */ 106 /* set the "clock" hands to be (MAXSCAN * 4096) Bytes */ 107#define ACT_DECLINE 1 108#define ACT_ADVANCE 3 109#define ACT_MAX 100 110 111#define LOWATER ((2048*1024)/NBPG) 112 113#define VM_PAGEOUT_PAGE_COUNT 8 114int vm_pageout_page_count = VM_PAGEOUT_PAGE_COUNT; 115int vm_pageout_req_do_stats; 116 117int vm_page_max_wired = 0; /* XXX max # of wired pages system-wide */ 118 119 120/* 121 * vm_pageout_clean: 122 * cleans a vm_page 123 */ 124int 125vm_pageout_clean(m, sync) 126 register vm_page_t m; 127 int sync; 128{ 129 /* 130 * Clean the page and remove it from the 131 * laundry. 132 * 133 * We set the busy bit to cause 134 * potential page faults on this page to 135 * block. 136 * 137 * And we set pageout-in-progress to keep 138 * the object from disappearing during 139 * pageout. This guarantees that the 140 * page won't move from the inactive 141 * queue. (However, any other page on 142 * the inactive queue may move!) 143 */ 144 145 register vm_object_t object; 146 register vm_pager_t pager; 147 int pageout_status[VM_PAGEOUT_PAGE_COUNT]; 148 vm_page_t ms[VM_PAGEOUT_PAGE_COUNT]; 149 int pageout_count; 150 int anyok=0; 151 int i; 152 vm_offset_t offset = m->offset; 153 154 object = m->object; 155 if (!object) { 156 printf("pager: object missing\n"); 157 return 0; 158 } 159 160 /* 161 * Try to collapse the object before 162 * making a pager for it. We must 163 * unlock the page queues first. 164 * We try to defer the creation of a pager 165 * until all shadows are not paging. This 166 * allows vm_object_collapse to work better and 167 * helps control swap space size. 168 * (J. Dyson 11 Nov 93) 169 */ 170 171 if (!object->pager && 172 cnt.v_free_count < vm_pageout_free_min) 173 return 0; 174 175 if (!object->pager && 176 object->shadow && 177 object->shadow->paging_in_progress) 178 return 0; 179 180 if( !sync) { 181 if (object->shadow) { 182 vm_object_collapse(object); 183 if (!vm_page_lookup(object, offset)) 184 return 0; 185 } 186 187 if ((m->busy != 0) || 188 (m->flags & PG_BUSY) || (m->hold_count != 0)) { 189 return 0; 190 } 191 } 192 193 pageout_count = 1; 194 ms[0] = m; 195 196 pager = object->pager; 197 if (pager) { 198 for (i = 1; i < vm_pageout_page_count; i++) { 199 ms[i] = vm_page_lookup(object, offset+i*NBPG); 200 if (ms[i]) { 201 if (( ((ms[i]->flags & (PG_CLEAN|PG_INACTIVE|PG_BUSY)) == PG_INACTIVE) 202 || ( (ms[i]->flags & (PG_CLEAN|PG_BUSY)) == 0 && sync == VM_PAGEOUT_FORCE)) 203 && (ms[i]->wire_count == 0) 204 && (ms[i]->busy == 0) 205 && (ms[i]->hold_count == 0)) 206 pageout_count++; 207 else 208 break; 209 } else 210 break; 211 } 212 for(i=0;i<pageout_count;i++) { 213 ms[i]->flags |= PG_BUSY; 214 pmap_page_protect(VM_PAGE_TO_PHYS(ms[i]), VM_PROT_READ); 215 } 216 object->paging_in_progress += pageout_count; 217 } else { 218 219 m->flags |= PG_BUSY; 220 221 pmap_page_protect(VM_PAGE_TO_PHYS(m), VM_PROT_READ); 222 223 object->paging_in_progress++; 224 225 pager = vm_pager_allocate(PG_DFLT, (caddr_t)0, 226 object->size, VM_PROT_ALL, 0); 227 if (pager != NULL) { 228 vm_object_setpager(object, pager, 0, FALSE); 229 } 230 } 231 232 /* 233 * If there is no pager for the page, 234 * use the default pager. If there's 235 * no place to put the page at the 236 * moment, leave it in the laundry and 237 * hope that there will be paging space 238 * later. 239 */ 240 241 if ((pager && pager->pg_type == PG_SWAP) || 242 cnt.v_free_count >= vm_pageout_free_min) { 243 if( pageout_count == 1) { 244 pageout_status[0] = pager ? 245 vm_pager_put(pager, m, 246 ((sync || (object == kernel_object)) ? TRUE: FALSE)) : 247 VM_PAGER_FAIL; 248 } else { 249 if( !pager) { 250 for(i=0;i<pageout_count;i++) 251 pageout_status[i] = VM_PAGER_FAIL; 252 } else { 253 vm_pager_put_pages(pager, ms, pageout_count, 254 ((sync || (object == kernel_object)) ? TRUE : FALSE), 255 pageout_status); 256 } 257 } 258 259 } else { 260 for(i=0;i<pageout_count;i++) 261 pageout_status[i] = VM_PAGER_FAIL; 262 } 263 264 for(i=0;i<pageout_count;i++) { 265 switch (pageout_status[i]) { 266 case VM_PAGER_OK: 267 ms[i]->flags &= ~PG_LAUNDRY; 268 ++anyok; 269 break; 270 case VM_PAGER_PEND: 271 ms[i]->flags &= ~PG_LAUNDRY; 272 ++anyok; 273 break; 274 case VM_PAGER_BAD: 275 /* 276 * Page outside of range of object. 277 * Right now we essentially lose the 278 * changes by pretending it worked. 279 */ 280 ms[i]->flags &= ~PG_LAUNDRY; 281 ms[i]->flags |= PG_CLEAN; 282 pmap_clear_modify(VM_PAGE_TO_PHYS(ms[i])); 283 break; 284 case VM_PAGER_ERROR: 285 case VM_PAGER_FAIL: 286 /* 287 * If page couldn't be paged out, then 288 * reactivate the page so it doesn't 289 * clog the inactive list. (We will 290 * try paging out it again later). 291 */ 292 if (ms[i]->flags & PG_INACTIVE) 293 vm_page_activate(ms[i]); 294 break; 295 case VM_PAGER_AGAIN: 296 break; 297 } 298 299 300 /* 301 * If the operation is still going, leave 302 * the page busy to block all other accesses. 303 * Also, leave the paging in progress 304 * indicator set so that we don't attempt an 305 * object collapse. 306 */ 307 if (pageout_status[i] != VM_PAGER_PEND) { 308 PAGE_WAKEUP(ms[i]); 309 if (--object->paging_in_progress == 0) 310 wakeup((caddr_t) object); 311 if ((ms[i]->flags & PG_REFERENCED) || 312 pmap_is_referenced(VM_PAGE_TO_PHYS(ms[i]))) { 313 pmap_clear_reference(VM_PAGE_TO_PHYS(ms[i])); 314 ms[i]->flags &= ~PG_REFERENCED; 315 if( ms[i]->flags & PG_INACTIVE) 316 vm_page_activate(ms[i]); 317 } 318 } 319 } 320 return anyok; 321} 322 323/* 324 * vm_pageout_object_deactivate_pages 325 * 326 * deactivate enough pages to satisfy the inactive target 327 * requirements or if vm_page_proc_limit is set, then 328 * deactivate all of the pages in the object and its 329 * shadows. 330 * 331 * The object and map must be locked. 332 */ 333int 334vm_pageout_object_deactivate_pages(map, object, count) 335 vm_map_t map; 336 vm_object_t object; 337 int count; 338{ 339 register vm_page_t p, next; 340 int rcount; 341 int dcount; 342 343 dcount = 0; 344 if (count == 0) 345 count = 1; 346 347 if (object->shadow) { 348 int scount = count; 349 if( object->shadow->ref_count > 1) 350 scount /= object->shadow->ref_count; 351 if( scount) 352 dcount += vm_pageout_object_deactivate_pages(map, object->shadow, scount); 353 } 354 355 if (object->paging_in_progress) 356 return dcount; 357 358 /* 359 * scan the objects entire memory queue 360 */ 361 rcount = object->resident_page_count; 362 p = object->memq.tqh_first; 363 while (p && (rcount-- > 0)) { 364 next = p->listq.tqe_next; 365 cnt.v_pdpages++; 366 vm_page_lock_queues(); 367 /* 368 * if a page is active, not wired and is in the processes pmap, 369 * then deactivate the page. 370 */ 371 if ((p->flags & (PG_ACTIVE|PG_BUSY)) == PG_ACTIVE && 372 p->wire_count == 0 && 373 p->hold_count == 0 && 374 p->busy == 0 && 375 pmap_page_exists(vm_map_pmap(map), VM_PAGE_TO_PHYS(p))) { 376 if (!pmap_is_referenced(VM_PAGE_TO_PHYS(p)) && 377 (p->flags & PG_REFERENCED) == 0) { 378 p->act_count -= min(p->act_count, ACT_DECLINE); 379 /* 380 * if the page act_count is zero -- then we deactivate 381 */ 382 if (!p->act_count) { 383 vm_page_deactivate(p); 384 pmap_page_protect(VM_PAGE_TO_PHYS(p), 385 VM_PROT_NONE); 386 /* 387 * else if on the next go-around we will deactivate the page 388 * we need to place the page on the end of the queue to age 389 * the other pages in memory. 390 */ 391 } else { 392 TAILQ_REMOVE(&vm_page_queue_active, p, pageq); 393 TAILQ_INSERT_TAIL(&vm_page_queue_active, p, pageq); 394 TAILQ_REMOVE(&object->memq, p, listq); 395 TAILQ_INSERT_TAIL(&object->memq, p, listq); 396 } 397 /* 398 * see if we are done yet 399 */ 400 if (p->flags & PG_INACTIVE) { 401 --count; 402 ++dcount; 403 if (count <= 0 && 404 cnt.v_inactive_count > cnt.v_inactive_target) { 405 vm_page_unlock_queues(); 406 return dcount; 407 } 408 } 409 410 } else { 411 /* 412 * Move the page to the bottom of the queue. 413 */ 414 pmap_clear_reference(VM_PAGE_TO_PHYS(p)); 415 p->flags &= ~PG_REFERENCED; 416 if (p->act_count < ACT_MAX) 417 p->act_count += ACT_ADVANCE; 418 419 TAILQ_REMOVE(&vm_page_queue_active, p, pageq); 420 TAILQ_INSERT_TAIL(&vm_page_queue_active, p, pageq); 421 TAILQ_REMOVE(&object->memq, p, listq); 422 TAILQ_INSERT_TAIL(&object->memq, p, listq); 423 } 424 } 425 426 vm_page_unlock_queues(); 427 p = next; 428 } 429 return dcount; 430} 431 432 433/* 434 * deactivate some number of pages in a map, try to do it fairly, but 435 * that is really hard to do. 436 */ 437 438void 439vm_pageout_map_deactivate_pages(map, entry, count, freeer) 440 vm_map_t map; 441 vm_map_entry_t entry; 442 int *count; 443 int (*freeer)(vm_map_t, vm_object_t, int); 444{ 445 vm_map_t tmpm; 446 vm_map_entry_t tmpe; 447 vm_object_t obj; 448 if (*count <= 0) 449 return; 450 vm_map_reference(map); 451 if (!lock_try_read(&map->lock)) { 452 vm_map_deallocate(map); 453 return; 454 } 455 if (entry == 0) { 456 tmpe = map->header.next; 457 while (tmpe != &map->header && *count > 0) { 458 vm_pageout_map_deactivate_pages(map, tmpe, count, freeer); 459 tmpe = tmpe->next; 460 }; 461 } else if (entry->is_sub_map || entry->is_a_map) { 462 tmpm = entry->object.share_map; 463 tmpe = tmpm->header.next; 464 while (tmpe != &tmpm->header && *count > 0) { 465 vm_pageout_map_deactivate_pages(tmpm, tmpe, count, freeer); 466 tmpe = tmpe->next; 467 }; 468 } else if ((obj = entry->object.vm_object) != 0) { 469 *count -= (*freeer)(map, obj, *count); 470 } 471 lock_read_done(&map->lock); 472 vm_map_deallocate(map); 473 return; 474} 475 476/* 477 * vm_pageout_scan does the dirty work for the pageout daemon. 478 */ 479int 480vm_pageout_scan() 481{ 482 vm_page_t m; 483 int page_shortage, maxscan, maxlaunder; 484 int pages_freed; 485 int desired_free; 486 vm_page_t next; 487 struct proc *p, *bigproc; 488 vm_offset_t size, bigsize; 489 vm_object_t object; 490 int force_wakeup = 0; 491 int cache_size, orig_cache_size; 492 493 /* 494 * We manage the cached memory by attempting to keep it 495 * at about the desired level. 496 * We deactivate the pages for the oldest cached objects 497 * first. This keeps pages that are "cached" from hogging 498 * physical memory. 499 */ 500 orig_cache_size = 0; 501 object = vm_object_cached_list.tqh_first; 502 503 /* calculate the total cached size */ 504 505 while( object) { 506 orig_cache_size += object->resident_page_count; 507 object = object->cached_list.tqe_next; 508 } 509 510redeact: 511 cache_size = orig_cache_size; 512 object = vm_object_cached_list.tqh_first; 513 vm_object_cache_lock(); 514 while ( object && (cnt.v_inactive_count < cnt.v_inactive_target)) { 515 vm_object_cache_unlock(); 516 /* 517 * if there are no resident pages -- get rid of the object 518 */ 519 if( object->resident_page_count == 0) { 520 if (object != vm_object_lookup(object->pager)) 521 panic("vm_pageout_scan: I'm sooo confused."); 522 pager_cache(object, FALSE); 523 goto redeact; 524 } else if( cache_size >= (vm_swap_size?vm_desired_cache_size:0)) { 525 /* 526 * if there are resident pages -- deactivate them 527 */ 528 vm_object_deactivate_pages(object); 529 cache_size -= object->resident_page_count; 530 } 531 object = object->cached_list.tqe_next; 532 533 vm_object_cache_lock(); 534 } 535 vm_object_cache_unlock(); 536 537morefree: 538 /* 539 * now swap processes out if we are in low memory conditions 540 */ 541 if ((cnt.v_free_count <= cnt.v_free_min) && !swap_pager_full && vm_swap_size) { 542 /* 543 * swap out inactive processes 544 */ 545 swapout_threads(); 546 } 547 548 /* 549 * scan the processes for exceeding their rlimits or if process 550 * is swapped out -- deactivate pages 551 */ 552 553 for (p = (struct proc *)allproc; p != NULL; p = p->p_next) { 554 int overage; 555 quad_t limit; 556 557 /* 558 * if this is a system process or if we have already 559 * looked at this process, skip it. 560 */ 561 if (p->p_flag & (P_SYSTEM|P_WEXIT)) { 562 continue; 563 } 564 565 /* 566 * if the process is in a non-running type state, 567 * don't touch it. 568 */ 569 if (p->p_stat != SRUN && p->p_stat != SSLEEP) { 570 continue; 571 } 572 573 /* 574 * get a limit 575 */ 576 limit = qmin(p->p_rlimit[RLIMIT_RSS].rlim_cur, 577 p->p_rlimit[RLIMIT_RSS].rlim_max); 578 579 /* 580 * let processes that are swapped out really be swapped out 581 * set the limit to nothing (will force a swap-out.) 582 */ 583 if ((p->p_flag & P_INMEM) == 0) 584 limit = 0; 585 586 size = p->p_vmspace->vm_pmap.pm_stats.resident_count * NBPG; 587 if (limit >= 0 && size >= limit) { 588 overage = (size - limit) / NBPG; 589 vm_pageout_map_deactivate_pages(&p->p_vmspace->vm_map, 590 (vm_map_entry_t) 0, &overage, vm_pageout_object_deactivate_pages); 591 } 592 } 593 594 if (((cnt.v_free_count + cnt.v_inactive_count) >= 595 (cnt.v_inactive_target + cnt.v_free_target)) && 596 (cnt.v_free_count >= cnt.v_free_target)) 597 return force_wakeup; 598 599 pages_freed = 0; 600 desired_free = cnt.v_free_target; 601 602 /* 603 * Start scanning the inactive queue for pages we can free. 604 * We keep scanning until we have enough free pages or 605 * we have scanned through the entire queue. If we 606 * encounter dirty pages, we start cleaning them. 607 */ 608 609 maxlaunder = (cnt.v_free_target - cnt.v_free_count); 610 maxscan = cnt.v_inactive_count; 611rescan1: 612 m = vm_page_queue_inactive.tqh_first; 613 while (m && (maxscan-- > 0) && 614 (cnt.v_free_count < desired_free) ) { 615 vm_page_t next; 616 617 cnt.v_pdpages++; 618 next = m->pageq.tqe_next; 619 620 if( (m->flags & PG_INACTIVE) == 0) { 621 printf("vm_pageout_scan: page not inactive?"); 622 continue; 623 } 624 625 /* 626 * activate held pages 627 */ 628 if (m->hold_count != 0) { 629 vm_page_activate(m); 630 m = next; 631 continue; 632 } 633 634 /* 635 * dont mess with busy pages 636 */ 637 if (m->busy || (m->flags & PG_BUSY)) { 638 m = next; 639 continue; 640 } 641 642 /* 643 * NOTE: PG_CLEAN doesn't guarantee that the page is clean. 644 */ 645 if (m->flags & PG_CLEAN) { 646 /* 647 * If we're not low on memory and the page has been reference, 648 * or if the page has been modified, then reactivate the page. 649 */ 650 if (((cnt.v_free_count > vm_pageout_free_min) && 651 (pmap_is_referenced(VM_PAGE_TO_PHYS(m)) || ((m->flags & PG_REFERENCED) != 0))) || 652 pmap_is_modified(VM_PAGE_TO_PHYS(m))) { 653 m->flags &= ~PG_REFERENCED; 654 vm_page_activate(m); 655 } else if (!m->act_count) { 656 pmap_page_protect(VM_PAGE_TO_PHYS(m), 657 VM_PROT_NONE); 658 vm_page_free(m); 659 ++cnt.v_dfree; 660 ++pages_freed; 661 } else { 662 m->act_count -= min(m->act_count, ACT_DECLINE); 663 TAILQ_REMOVE(&vm_page_queue_inactive, m, pageq); 664 TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq); 665 } 666 } else if ((m->flags & PG_LAUNDRY) && maxlaunder > 0) { 667 int written; 668 if (pmap_is_referenced(VM_PAGE_TO_PHYS(m)) || 669 ((m->flags & PG_REFERENCED) != 0)) { 670 pmap_clear_reference(VM_PAGE_TO_PHYS(m)); 671 vm_page_activate(m); 672 m->flags &= ~PG_REFERENCED; 673 m = next; 674 continue; 675 } 676 /* 677 * If a page is dirty, then it is either 678 * being washed (but not yet cleaned) 679 * or it is still in the laundry. If it is 680 * still in the laundry, then we start the 681 * cleaning operation. 682 */ 683 684 written = vm_pageout_clean(m,0); 685 if (written) 686 maxlaunder -= written; 687 688 if (!next) 689 break; 690 /* 691 * if the next page has been re-activated, start scanning again 692 */ 693 if ((next->flags & PG_INACTIVE) == 0) 694 goto rescan1; 695 } else if ((m->flags & PG_REFERENCED) || 696 pmap_is_referenced(VM_PAGE_TO_PHYS(m))) { 697 pmap_clear_reference(VM_PAGE_TO_PHYS(m)); 698 m->flags &= ~PG_REFERENCED; 699 vm_page_activate(m); 700 } 701 m = next; 702 } 703 704 /* 705 * Compute the page shortage. If we are still very low on memory 706 * be sure that we will move a minimal amount of pages from active 707 * to inactive. 708 */ 709 710 page_shortage = cnt.v_inactive_target - 711 (cnt.v_free_count + cnt.v_inactive_count); 712 713 if (page_shortage <= 0) { 714 if (pages_freed == 0) { 715 if( cnt.v_free_count < cnt.v_free_min) { 716 page_shortage = cnt.v_free_min - cnt.v_free_count + 1; 717 } else if(((cnt.v_free_count + cnt.v_inactive_count) < 718 (cnt.v_free_min + cnt.v_inactive_target))) { 719 page_shortage = 1; 720 } else { 721 page_shortage = 0; 722 } 723 } 724 725 } 726 727 maxscan = cnt.v_active_count; 728 m = vm_page_queue_active.tqh_first; 729 while (m && maxscan-- && (page_shortage > 0)) { 730 731 cnt.v_pdpages++; 732 next = m->pageq.tqe_next; 733 734 /* 735 * Don't deactivate pages that are busy. 736 */ 737 if ((m->busy != 0) || 738 (m->flags & PG_BUSY) || (m->hold_count != 0)) { 739 m = next; 740 continue; 741 } 742 743 if ((m->flags & PG_REFERENCED) || 744 pmap_is_referenced(VM_PAGE_TO_PHYS(m))) { 745 pmap_clear_reference(VM_PAGE_TO_PHYS(m)); 746 m->flags &= ~PG_REFERENCED; 747 if (m->act_count < ACT_MAX) 748 m->act_count += ACT_ADVANCE; 749 TAILQ_REMOVE(&vm_page_queue_active, m, pageq); 750 TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq); 751 TAILQ_REMOVE(&m->object->memq, m, listq); 752 TAILQ_INSERT_TAIL(&m->object->memq, m, listq); 753 } else { 754 m->act_count -= min(m->act_count, ACT_DECLINE); 755 756 /* 757 * if the page act_count is zero -- then we deactivate 758 */ 759 if (!m->act_count) { 760 vm_page_deactivate(m); 761 --page_shortage; 762 /* 763 * else if on the next go-around we will deactivate the page 764 * we need to place the page on the end of the queue to age 765 * the other pages in memory. 766 */ 767 } else { 768 TAILQ_REMOVE(&vm_page_queue_active, m, pageq); 769 TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq); 770 TAILQ_REMOVE(&m->object->memq, m, listq); 771 TAILQ_INSERT_TAIL(&m->object->memq, m, listq); 772 } 773 } 774 m = next; 775 } 776 777 /* 778 * if we have not freed any pages and we are desparate for memory 779 * then we keep trying until we get some (any) memory. 780 */ 781 782 if (!force_wakeup && (swap_pager_full || !force_wakeup || 783 (pages_freed == 0 && (cnt.v_free_count < cnt.v_free_min)))){ 784 vm_pager_sync(); 785 force_wakeup = 1; 786 goto morefree; 787 } 788 789 /* 790 * make sure that we have swap space -- if we are low on 791 * memory and swap -- then kill the biggest process. 792 */ 793 if ((vm_swap_size == 0 || swap_pager_full) && 794 (cnt.v_free_count < cnt.v_free_min)) { 795 bigproc = NULL; 796 bigsize = 0; 797 for (p = (struct proc *)allproc; p != NULL; p = p->p_next) { 798 /* 799 * if this is a system process, skip it 800 */ 801 if ((p->p_flag & P_SYSTEM) || ((p->p_pid < 48) && (vm_swap_size != 0))) { 802 continue; 803 } 804 805 /* 806 * if the process is in a non-running type state, 807 * don't touch it. 808 */ 809 if (p->p_stat != SRUN && p->p_stat != SSLEEP) { 810 continue; 811 } 812 /* 813 * get the process size 814 */ 815 size = p->p_vmspace->vm_pmap.pm_stats.resident_count; 816 /* 817 * if the this process is bigger than the biggest one 818 * remember it. 819 */ 820 if (size > bigsize) { 821 bigproc = p; 822 bigsize = size; 823 } 824 } 825 if (bigproc != NULL) { 826 printf("Process %lu killed by vm_pageout -- out of swap\n", (u_long)bigproc->p_pid); 827 psignal(bigproc, SIGKILL); 828 bigproc->p_estcpu = 0; 829 bigproc->p_nice = PRIO_MIN; 830 resetpriority(bigproc); 831 wakeup( (caddr_t) &cnt.v_free_count); 832 } 833 } 834 vm_page_pagesfreed += pages_freed; 835 return force_wakeup; 836} 837 838/* 839 * vm_pageout is the high level pageout daemon. 840 */ 841void 842vm_pageout() 843{ 844 (void) spl0(); 845 846 /* 847 * Initialize some paging parameters. 848 */ 849 850 cnt.v_free_min = 12; 851 /* 852 * free_reserved needs to include enough for the largest 853 * swap pager structures plus enough for any pv_entry 854 * structs when paging. 855 */ 856 vm_pageout_free_min = 4 + cnt.v_page_count / 1024; 857 cnt.v_free_reserved = vm_pageout_free_min + 2; 858 if (cnt.v_free_min < 8) 859 cnt.v_free_min = 8; 860 if (cnt.v_free_min > 32) 861 cnt.v_free_min = 32; 862 cnt.v_free_target = 2*cnt.v_free_min + cnt.v_free_reserved; 863 cnt.v_inactive_target = cnt.v_free_count / 12; 864 cnt.v_free_min += cnt.v_free_reserved; 865 vm_desired_cache_size = cnt.v_page_count / 3; 866 867 /* XXX does not really belong here */ 868 if (vm_page_max_wired == 0) 869 vm_page_max_wired = cnt.v_free_count / 3; 870 871 872 (void) swap_pager_alloc(0, 0, 0, 0); 873 874 /* 875 * The pageout daemon is never done, so loop 876 * forever. 877 */ 878 while (TRUE) { 879 int force_wakeup; 880/* 881 cnt.v_free_min = 12 + averunnable.ldavg[0] / 1024; 882 cnt.v_free_target = 2*cnt.v_free_min + cnt.v_free_reserved; 883 cnt.v_inactive_target = cnt.v_free_target*2; 884*/ 885 886 tsleep((caddr_t) &vm_pages_needed, PVM, "psleep", 0); 887 cnt.v_pdwakeups++; 888 889 vm_pager_sync(); 890 /* 891 * The force wakeup hack added to eliminate delays and potiential 892 * deadlock. It was possible for the page daemon to indefintely 893 * postpone waking up a process that it might be waiting for memory 894 * on. The putmulti stuff seems to have aggravated the situation. 895 */ 896 force_wakeup = vm_pageout_scan(); 897 vm_pager_sync(); 898 if( force_wakeup) 899 wakeup( (caddr_t) &cnt.v_free_count); 900 wakeup((caddr_t) kmem_map); 901 } 902} 903 904