vm_pageout.c revision 2688
1/* 2 * Copyright (c) 1991 Regents of the University of California. 3 * All rights reserved. 4 * Copyright (c) 1994 John S. Dyson 5 * All rights reserved. 6 * Copyright (c) 1994 David Greenman 7 * All rights reserved. 8 * 9 * This code is derived from software contributed to Berkeley by 10 * The Mach Operating System project at Carnegie-Mellon University. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. All advertising materials mentioning features or use of this software 21 * must display the following acknowledgement: 22 * This product includes software developed by the University of 23 * California, Berkeley and its contributors. 24 * 4. Neither the name of the University nor the names of its contributors 25 * may be used to endorse or promote products derived from this software 26 * without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 38 * SUCH DAMAGE. 39 * 40 * from: @(#)vm_pageout.c 7.4 (Berkeley) 5/7/91 41 * 42 * 43 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 44 * All rights reserved. 45 * 46 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 47 * 48 * Permission to use, copy, modify and distribute this software and 49 * its documentation is hereby granted, provided that both the copyright 50 * notice and this permission notice appear in all copies of the 51 * software, derivative works or modified versions, and any portions 52 * thereof, and that both notices appear in supporting documentation. 53 * 54 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 55 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 56 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 57 * 58 * Carnegie Mellon requests users of this software to return to 59 * 60 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 61 * School of Computer Science 62 * Carnegie Mellon University 63 * Pittsburgh PA 15213-3890 64 * 65 * any improvements or extensions that they make and grant Carnegie the 66 * rights to redistribute these changes. 67 * 68 * $Id: vm_pageout.c,v 1.10 1994/09/06 11:28:46 davidg Exp $ 69 */ 70 71/* 72 * The proverbial page-out daemon. 73 */ 74 75#include <sys/param.h> 76#include <sys/systm.h> 77#include <sys/proc.h> 78#include <sys/resourcevar.h> 79#include <sys/malloc.h> 80 81#include <vm/vm.h> 82#include <vm/vm_page.h> 83#include <vm/vm_pageout.h> 84 85extern vm_map_t kmem_map; 86int vm_pages_needed; /* Event on which pageout daemon sleeps */ 87int vm_pagescanner; /* Event on which pagescanner sleeps */ 88int vm_pageout_free_min = 0; /* Stop pageout to wait for pagers at this free level */ 89 90int vm_pageout_pages_needed = 0; /* flag saying that the pageout daemon needs pages */ 91int vm_page_pagesfreed; 92int vm_desired_cache_size; 93 94extern int npendingio; 95extern int hz; 96int vm_pageout_proc_limit; 97extern int nswiodone; 98extern int swap_pager_full; 99extern int swap_pager_ready(); 100 101#define MAXREF 32767 102 103#define MAXSCAN 512 /* maximum number of pages to scan in active queue */ 104 /* set the "clock" hands to be (MAXSCAN * 4096) Bytes */ 105#define ACT_DECLINE 1 106#define ACT_ADVANCE 3 107#define ACT_MAX 100 108 109#define LOWATER ((2048*1024)/NBPG) 110 111#define VM_PAGEOUT_PAGE_COUNT 8 112int vm_pageout_page_count = VM_PAGEOUT_PAGE_COUNT; 113static vm_offset_t vm_space_needed; 114int vm_pageout_req_do_stats; 115 116int vm_page_max_wired = 0; /* XXX max # of wired pages system-wide */ 117 118 119/* 120 * vm_pageout_clean: 121 * cleans a vm_page 122 */ 123int 124vm_pageout_clean(m, sync) 125 register vm_page_t m; 126 int sync; 127{ 128 /* 129 * Clean the page and remove it from the 130 * laundry. 131 * 132 * We set the busy bit to cause 133 * potential page faults on this page to 134 * block. 135 * 136 * And we set pageout-in-progress to keep 137 * the object from disappearing during 138 * pageout. This guarantees that the 139 * page won't move from the inactive 140 * queue. (However, any other page on 141 * the inactive queue may move!) 142 */ 143 144 register vm_object_t object; 145 register vm_pager_t pager; 146 int pageout_status[VM_PAGEOUT_PAGE_COUNT]; 147 vm_page_t ms[VM_PAGEOUT_PAGE_COUNT]; 148 int pageout_count; 149 int anyok=0; 150 int i; 151 vm_offset_t offset = m->offset; 152 153 object = m->object; 154 if (!object) { 155 printf("pager: object missing\n"); 156 return 0; 157 } 158 159 /* 160 * Try to collapse the object before 161 * making a pager for it. We must 162 * unlock the page queues first. 163 * We try to defer the creation of a pager 164 * until all shadows are not paging. This 165 * allows vm_object_collapse to work better and 166 * helps control swap space size. 167 * (J. Dyson 11 Nov 93) 168 */ 169 170 if (!object->pager && 171 cnt.v_free_count < vm_pageout_free_min) 172 return 0; 173 174 if (!object->pager && 175 object->shadow && 176 object->shadow->paging_in_progress) 177 return 0; 178 179 if( !sync) { 180 if (object->shadow) { 181 vm_object_collapse(object); 182 if (!vm_page_lookup(object, offset)) 183 return 0; 184 } 185 186 if ((m->busy != 0) || 187 (m->flags & PG_BUSY) || (m->hold_count != 0)) { 188 return 0; 189 } 190 } 191 192 pageout_count = 1; 193 ms[0] = m; 194 195 if (pager = object->pager) { 196 for (i = 1; i < vm_pageout_page_count; i++) { 197 if (ms[i] = vm_page_lookup(object, offset+i*NBPG)) { 198 if (( ((ms[i]->flags & (PG_CLEAN|PG_INACTIVE|PG_BUSY)) == PG_INACTIVE) 199 || ( (ms[i]->flags & PG_CLEAN|PG_BUSY) == 0 && sync == VM_PAGEOUT_FORCE)) 200 && (ms[i]->wire_count == 0) 201 && (ms[i]->busy == 0) 202 && (ms[i]->hold_count == 0)) 203 pageout_count++; 204 else 205 break; 206 } else 207 break; 208 } 209 for(i=0;i<pageout_count;i++) { 210 ms[i]->flags |= PG_BUSY; 211 pmap_page_protect(VM_PAGE_TO_PHYS(ms[i]), VM_PROT_READ); 212 } 213 object->paging_in_progress += pageout_count; 214 cnt.v_pageouts++; 215 cnt.v_pgpgout += pageout_count; 216 } else { 217 218 m->flags |= PG_BUSY; 219 220 pmap_page_protect(VM_PAGE_TO_PHYS(m), VM_PROT_READ); 221 222 cnt.v_pageouts++; 223 cnt.v_pgpgout++; 224 225 object->paging_in_progress++; 226 227 pager = vm_pager_allocate(PG_DFLT, (caddr_t)0, 228 object->size, VM_PROT_ALL, 0); 229 if (pager != NULL) { 230 vm_object_setpager(object, pager, 0, FALSE); 231 } 232 } 233 234 /* 235 * If there is no pager for the page, 236 * use the default pager. If there's 237 * no place to put the page at the 238 * moment, leave it in the laundry and 239 * hope that there will be paging space 240 * later. 241 */ 242 243 if ((pager && pager->pg_type == PG_SWAP) || 244 cnt.v_free_count >= vm_pageout_free_min) { 245 if( pageout_count == 1) { 246 pageout_status[0] = pager ? 247 vm_pager_put(pager, m, 248 ((sync || (object == kernel_object)) ? TRUE: FALSE)) : 249 VM_PAGER_FAIL; 250 } else { 251 if( !pager) { 252 for(i=0;i<pageout_count;i++) 253 pageout_status[i] = VM_PAGER_FAIL; 254 } else { 255 vm_pager_put_pages(pager, ms, pageout_count, 256 ((sync || (object == kernel_object)) ? TRUE : FALSE), 257 pageout_status); 258 } 259 } 260 261 } else { 262 for(i=0;i<pageout_count;i++) 263 pageout_status[i] = VM_PAGER_FAIL; 264 } 265 266 for(i=0;i<pageout_count;i++) { 267 switch (pageout_status[i]) { 268 case VM_PAGER_OK: 269 ms[i]->flags &= ~PG_LAUNDRY; 270 ++anyok; 271 break; 272 case VM_PAGER_PEND: 273 ms[i]->flags &= ~PG_LAUNDRY; 274 ++anyok; 275 break; 276 case VM_PAGER_BAD: 277 /* 278 * Page outside of range of object. 279 * Right now we essentially lose the 280 * changes by pretending it worked. 281 */ 282 ms[i]->flags &= ~PG_LAUNDRY; 283 ms[i]->flags |= PG_CLEAN; 284 pmap_clear_modify(VM_PAGE_TO_PHYS(ms[i])); 285 break; 286 case VM_PAGER_ERROR: 287 case VM_PAGER_FAIL: 288 /* 289 * If page couldn't be paged out, then 290 * reactivate the page so it doesn't 291 * clog the inactive list. (We will 292 * try paging out it again later). 293 */ 294 if (ms[i]->flags & PG_INACTIVE) 295 vm_page_activate(ms[i]); 296 break; 297 case VM_PAGER_AGAIN: 298 break; 299 } 300 301 302 /* 303 * If the operation is still going, leave 304 * the page busy to block all other accesses. 305 * Also, leave the paging in progress 306 * indicator set so that we don't attempt an 307 * object collapse. 308 */ 309 if (pageout_status[i] != VM_PAGER_PEND) { 310 PAGE_WAKEUP(ms[i]); 311 if (--object->paging_in_progress == 0) 312 wakeup((caddr_t) object); 313 if ((ms[i]->flags & PG_REFERENCED) || 314 pmap_is_referenced(VM_PAGE_TO_PHYS(ms[i]))) { 315 pmap_clear_reference(VM_PAGE_TO_PHYS(ms[i])); 316 ms[i]->flags &= ~PG_REFERENCED; 317 if( ms[i]->flags & PG_INACTIVE) 318 vm_page_activate(ms[i]); 319 } 320 } 321 } 322 return anyok; 323} 324 325/* 326 * vm_pageout_object_deactivate_pages 327 * 328 * deactivate enough pages to satisfy the inactive target 329 * requirements or if vm_page_proc_limit is set, then 330 * deactivate all of the pages in the object and its 331 * shadows. 332 * 333 * The object and map must be locked. 334 */ 335int 336vm_pageout_object_deactivate_pages(map, object, count) 337 vm_map_t map; 338 vm_object_t object; 339 int count; 340{ 341 register vm_page_t p, next; 342 int rcount; 343 int s; 344 int dcount; 345 346 dcount = 0; 347 if (count == 0) 348 count = 1; 349 350 if (object->shadow) { 351 int scount = count; 352 if( object->shadow->ref_count > 1) 353 scount /= object->shadow->ref_count; 354 if( scount) 355 dcount += vm_pageout_object_deactivate_pages(map, object->shadow, scount); 356 } 357 358 if (object->paging_in_progress) 359 return dcount; 360 361 /* 362 * scan the objects entire memory queue 363 */ 364 rcount = object->resident_page_count; 365 p = object->memq.tqh_first; 366 while (p && (rcount-- > 0)) { 367 next = p->listq.tqe_next; 368 vm_page_lock_queues(); 369 /* 370 * if a page is active, not wired and is in the processes pmap, 371 * then deactivate the page. 372 */ 373 if ((p->flags & (PG_ACTIVE|PG_BUSY)) == PG_ACTIVE && 374 p->wire_count == 0 && 375 p->hold_count == 0 && 376 p->busy == 0 && 377 pmap_page_exists(vm_map_pmap(map), VM_PAGE_TO_PHYS(p))) { 378 if (!pmap_is_referenced(VM_PAGE_TO_PHYS(p)) && 379 (p->flags & PG_REFERENCED) == 0) { 380 p->act_count -= min(p->act_count, ACT_DECLINE); 381 /* 382 * if the page act_count is zero -- then we deactivate 383 */ 384 if (!p->act_count) { 385 vm_page_deactivate(p); 386 pmap_page_protect(VM_PAGE_TO_PHYS(p), 387 VM_PROT_NONE); 388 /* 389 * else if on the next go-around we will deactivate the page 390 * we need to place the page on the end of the queue to age 391 * the other pages in memory. 392 */ 393 } else { 394 TAILQ_REMOVE(&vm_page_queue_active, p, pageq); 395 TAILQ_INSERT_TAIL(&vm_page_queue_active, p, pageq); 396 TAILQ_REMOVE(&object->memq, p, listq); 397 TAILQ_INSERT_TAIL(&object->memq, p, listq); 398 } 399 /* 400 * see if we are done yet 401 */ 402 if (p->flags & PG_INACTIVE) { 403 --count; 404 ++dcount; 405 if (count <= 0 && 406 cnt.v_inactive_count > cnt.v_inactive_target) { 407 vm_page_unlock_queues(); 408 return dcount; 409 } 410 } 411 412 } else { 413 /* 414 * Move the page to the bottom of the queue. 415 */ 416 pmap_clear_reference(VM_PAGE_TO_PHYS(p)); 417 p->flags &= ~PG_REFERENCED; 418 if (p->act_count < ACT_MAX) 419 p->act_count += ACT_ADVANCE; 420 421 TAILQ_REMOVE(&vm_page_queue_active, p, pageq); 422 TAILQ_INSERT_TAIL(&vm_page_queue_active, p, pageq); 423 TAILQ_REMOVE(&object->memq, p, listq); 424 TAILQ_INSERT_TAIL(&object->memq, p, listq); 425 } 426 } 427 428 vm_page_unlock_queues(); 429 p = next; 430 } 431 return dcount; 432} 433 434 435/* 436 * deactivate some number of pages in a map, try to do it fairly, but 437 * that is really hard to do. 438 */ 439 440void 441vm_pageout_map_deactivate_pages(map, entry, count, freeer) 442 vm_map_t map; 443 vm_map_entry_t entry; 444 int *count; 445 int (*freeer)(vm_map_t, vm_object_t, int); 446{ 447 vm_map_t tmpm; 448 vm_map_entry_t tmpe; 449 vm_object_t obj; 450 if (*count <= 0) 451 return; 452 vm_map_reference(map); 453 if (!lock_try_read(&map->lock)) { 454 vm_map_deallocate(map); 455 return; 456 } 457 if (entry == 0) { 458 tmpe = map->header.next; 459 while (tmpe != &map->header && *count > 0) { 460 vm_pageout_map_deactivate_pages(map, tmpe, count, freeer); 461 tmpe = tmpe->next; 462 }; 463 } else if (entry->is_sub_map || entry->is_a_map) { 464 tmpm = entry->object.share_map; 465 tmpe = tmpm->header.next; 466 while (tmpe != &tmpm->header && *count > 0) { 467 vm_pageout_map_deactivate_pages(tmpm, tmpe, count, freeer); 468 tmpe = tmpe->next; 469 }; 470 } else if (obj = entry->object.vm_object) { 471 *count -= (*freeer)(map, obj, *count); 472 } 473 lock_read_done(&map->lock); 474 vm_map_deallocate(map); 475 return; 476} 477 478/* 479 * vm_pageout_scan does the dirty work for the pageout daemon. 480 */ 481int 482vm_pageout_scan() 483{ 484 vm_page_t m; 485 int page_shortage, maxscan, maxlaunder; 486 int pages_freed, free, nproc; 487 int desired_free; 488 vm_page_t next; 489 struct proc *p; 490 vm_object_t object; 491 int s; 492 int force_wakeup = 0; 493 int cache_size, orig_cache_size; 494 495 /* 496 * We manage the cached memory by attempting to keep it 497 * at about the desired level. 498 * We deactivate the pages for the oldest cached objects 499 * first. This keeps pages that are "cached" from hogging 500 * physical memory. 501 */ 502 orig_cache_size = 0; 503 object = vm_object_cached_list.tqh_first; 504 505 /* calculate the total cached size */ 506 507 while( object) { 508 orig_cache_size += object->resident_page_count; 509 object = object->cached_list.tqe_next; 510 } 511 512redeact: 513 cache_size = orig_cache_size; 514 object = vm_object_cached_list.tqh_first; 515 vm_object_cache_lock(); 516 while ( object && (cnt.v_inactive_count < cnt.v_inactive_target) && 517 (cache_size >= vm_desired_cache_size)) { 518 vm_object_cache_unlock(); 519 520 if (object != vm_object_lookup(object->pager)) 521 panic("vm_object_deactivate: I'm sooo confused."); 522 523 /* 524 * if there are no resident pages -- get rid of the object 525 */ 526 if( object->resident_page_count == 0) { 527 pager_cache(object, FALSE); 528 goto redeact; 529 } else { 530 /* 531 * if there are resident pages -- deactivate them 532 */ 533 vm_object_deactivate_pages(object); 534 cache_size -= object->resident_page_count; 535 object = object->cached_list.tqe_next; 536 } 537 538 vm_object_cache_lock(); 539 } 540 vm_object_cache_unlock(); 541 542morefree: 543 /* 544 * now check malloc area or swap processes out if we are in low 545 * memory conditions 546 */ 547 if (cnt.v_free_count <= cnt.v_free_min) { 548 /* 549 * swap out inactive processes 550 */ 551 swapout_threads(); 552 } 553 /* 554 * scan the processes for exceeding their rlimits or if process 555 * is swapped out -- deactivate pages 556 */ 557 558rescanproc1: 559 for (p = (struct proc *)allproc; p != NULL; p = p->p_next) { 560 vm_offset_t size; 561 int overage; 562 quad_t limit; 563 564 /* 565 * if this is a system process or if we have already 566 * looked at this process, skip it. 567 */ 568 if (p->p_flag & (P_SYSTEM|P_WEXIT)) { 569 continue; 570 } 571 572 /* 573 * if the process is in a non-running type state, 574 * don't touch it. 575 */ 576 if (p->p_stat != SRUN && p->p_stat != SSLEEP) { 577 continue; 578 } 579 580 /* 581 * get a limit 582 */ 583 limit = qmin(p->p_rlimit[RLIMIT_RSS].rlim_cur, 584 p->p_rlimit[RLIMIT_RSS].rlim_max); 585 586 /* 587 * let processes that are swapped out really be swapped out 588 * set the limit to nothing (will force a swap-out.) 589 */ 590 if ((p->p_flag & P_INMEM) == 0) 591 limit = 0; 592 593 size = p->p_vmspace->vm_pmap.pm_stats.resident_count * NBPG; 594 if (limit > 0 && size >= limit) { 595 overage = (size - limit) / NBPG; 596 vm_pageout_map_deactivate_pages(&p->p_vmspace->vm_map, 597 (vm_map_entry_t) 0, &overage, vm_pageout_object_deactivate_pages); 598 } 599 } 600 601 if (((cnt.v_free_count + cnt.v_inactive_count) >= 602 (cnt.v_inactive_target + cnt.v_free_target)) && 603 (cnt.v_free_count >= cnt.v_free_target)) 604 return force_wakeup; 605 606 pages_freed = 0; 607 desired_free = cnt.v_free_target; 608 609 /* 610 * Start scanning the inactive queue for pages we can free. 611 * We keep scanning until we have enough free pages or 612 * we have scanned through the entire queue. If we 613 * encounter dirty pages, we start cleaning them. 614 */ 615 616 maxlaunder = (cnt.v_free_target - cnt.v_free_count); 617 maxscan = cnt.v_inactive_count; 618rescan1: 619 m = vm_page_queue_inactive.tqh_first; 620 while (m && (maxscan-- > 0) && 621 (cnt.v_free_count < desired_free) ) { 622 vm_page_t next; 623 624 next = m->pageq.tqe_next; 625 626 if( (m->flags & PG_INACTIVE) == 0) { 627 printf("vm_pageout_scan: page not inactive?"); 628 continue; 629 } 630 631 /* 632 * activate held pages 633 */ 634 if (m->hold_count != 0) { 635 vm_page_activate(m); 636 m = next; 637 continue; 638 } 639 640 /* 641 * dont mess with busy pages 642 */ 643 if (m->busy || (m->flags & PG_BUSY)) { 644 m = next; 645 continue; 646 } 647 648 /* 649 * if page is clean and but the page has been referenced, 650 * then reactivate the page, but if we are very low on memory 651 * or the page has not been referenced, then we free it to the 652 * vm system. 653 */ 654 if (m->flags & PG_CLEAN) { 655 if ((cnt.v_free_count > vm_pageout_free_min) /* XXX */ 656 && ((pmap_is_referenced(VM_PAGE_TO_PHYS(m)) || 657 (m->flags & PG_REFERENCED) != 0))) { 658 m->flags &= ~PG_REFERENCED; 659 vm_page_activate(m); 660 } else if (!m->act_count) { 661 pmap_page_protect(VM_PAGE_TO_PHYS(m), 662 VM_PROT_NONE); 663 vm_page_free(m); 664 ++cnt.v_dfree; 665 ++pages_freed; 666 } else { 667 m->act_count -= min(m->act_count, ACT_DECLINE); 668 TAILQ_REMOVE(&vm_page_queue_inactive, m, pageq); 669 TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq); 670 } 671 } else if ((m->flags & PG_LAUNDRY) && maxlaunder > 0) { 672 int written; 673 if (pmap_is_referenced(VM_PAGE_TO_PHYS(m)) || 674 ((m->flags & PG_REFERENCED) != 0)) { 675 pmap_clear_reference(VM_PAGE_TO_PHYS(m)); 676 vm_page_activate(m); 677 m->flags &= ~PG_REFERENCED; 678 m = next; 679 continue; 680 } 681 /* 682 * If a page is dirty, then it is either 683 * being washed (but not yet cleaned) 684 * or it is still in the laundry. If it is 685 * still in the laundry, then we start the 686 * cleaning operation. 687 */ 688 689 if (written = vm_pageout_clean(m,0)) { 690 maxlaunder -= written; 691 } 692 if (!next) 693 break; 694 /* 695 * if the next page has been re-activated, start scanning again 696 */ 697 if ((next->flags & PG_INACTIVE) == 0) 698 goto rescan1; 699 } else if ((m->flags & PG_REFERENCED) || 700 pmap_is_referenced(VM_PAGE_TO_PHYS(m))) { 701 pmap_clear_reference(VM_PAGE_TO_PHYS(m)); 702 m->flags &= ~PG_REFERENCED; 703 vm_page_activate(m); 704 } 705 m = next; 706 } 707 708 709 /* 710 * Compute the page shortage. If we are still very low on memory 711 * be sure that we will move a minimal amount of pages from active 712 * to inactive. 713 */ 714 715 page_shortage = cnt.v_inactive_target - 716 (cnt.v_free_count + cnt.v_inactive_count); 717 718 if (page_shortage <= 0) { 719 if (pages_freed == 0) { 720 if( cnt.v_free_count < cnt.v_free_min) { 721 page_shortage = cnt.v_free_min - cnt.v_free_count; 722 } else if(((cnt.v_free_count + cnt.v_inactive_count) < 723 (cnt.v_free_min + cnt.v_inactive_target))) { 724 page_shortage = 1; 725 } else { 726 page_shortage = 0; 727 } 728 } 729 730 } 731 732 maxscan = cnt.v_active_count; 733 m = vm_page_queue_active.tqh_first; 734 while (m && maxscan-- && (page_shortage > 0)) { 735 736 next = m->pageq.tqe_next; 737 738 /* 739 * Don't deactivate pages that are busy. 740 */ 741 if ((m->busy != 0) || 742 (m->flags & PG_BUSY) || (m->hold_count != 0)) { 743 m = next; 744 continue; 745 } 746 747 if ((m->flags & PG_REFERENCED) || 748 pmap_is_referenced(VM_PAGE_TO_PHYS(m))) { 749 pmap_clear_reference(VM_PAGE_TO_PHYS(m)); 750 m->flags &= ~PG_REFERENCED; 751 if (m->act_count < ACT_MAX) 752 m->act_count += ACT_ADVANCE; 753 TAILQ_REMOVE(&vm_page_queue_active, m, pageq); 754 TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq); 755 TAILQ_REMOVE(&m->object->memq, m, listq); 756 TAILQ_INSERT_TAIL(&m->object->memq, m, listq); 757 } else { 758 m->act_count -= min(m->act_count, ACT_DECLINE); 759 760 /* 761 * if the page act_count is zero -- then we deactivate 762 */ 763 if (!m->act_count) { 764 vm_page_deactivate(m); 765 --page_shortage; 766 /* 767 * else if on the next go-around we will deactivate the page 768 * we need to place the page on the end of the queue to age 769 * the other pages in memory. 770 */ 771 } else { 772 TAILQ_REMOVE(&vm_page_queue_active, m, pageq); 773 TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq); 774 TAILQ_REMOVE(&m->object->memq, m, listq); 775 TAILQ_INSERT_TAIL(&m->object->memq, m, listq); 776 } 777 } 778 m = next; 779 } 780 781 /* 782 * if we have not freed any pages and we are desparate for memory 783 * then we keep trying until we get some (any) memory. 784 */ 785 786 if( !force_wakeup && (swap_pager_full || !force_wakeup || 787 (pages_freed == 0 && (cnt.v_free_count < cnt.v_free_min)))){ 788 vm_pager_sync(); 789 force_wakeup = 1; 790 goto morefree; 791 } 792 vm_page_pagesfreed += pages_freed; 793 return force_wakeup; 794} 795 796/* 797 * vm_pageout is the high level pageout daemon. 798 */ 799void 800vm_pageout() 801{ 802 extern swiopend; 803 static int nowakeup; 804 (void) spl0(); 805 806 /* 807 * Initialize some paging parameters. 808 */ 809 810vmretry: 811 cnt.v_free_min = 12; 812 /* 813 * free_reserved needs to include enough for the largest 814 * swap pager structures plus enough for any pv_entry 815 * structs when paging. 816 */ 817 cnt.v_free_reserved = 4 + cnt.v_page_count / 1024; 818 if (cnt.v_free_min < 8) 819 cnt.v_free_min = 8; 820 if (cnt.v_free_min > 32) 821 cnt.v_free_min = 32; 822 vm_pageout_free_min = cnt.v_free_reserved; 823 cnt.v_free_target = 2*cnt.v_free_min + cnt.v_free_reserved; 824 cnt.v_inactive_target = cnt.v_free_count / 12; 825 cnt.v_free_min += cnt.v_free_reserved; 826 vm_desired_cache_size = cnt.v_page_count / 3; 827 828 /* XXX does not really belong here */ 829 if (vm_page_max_wired == 0) 830 vm_page_max_wired = cnt.v_free_count / 3; 831 832 833 (void) swap_pager_alloc(0, 0, 0, 0); 834 835 /* 836 * The pageout daemon is never done, so loop 837 * forever. 838 */ 839 while (TRUE) { 840 int force_wakeup; 841/* 842 cnt.v_free_min = 12 + averunnable.ldavg[0] / 1024; 843 cnt.v_free_target = 2*cnt.v_free_min + cnt.v_free_reserved; 844 cnt.v_inactive_target = cnt.v_free_target*2; 845*/ 846 847 tsleep((caddr_t) &vm_pages_needed, PVM, "psleep", 0); 848 849 vm_pager_sync(); 850 /* 851 * The force wakeup hack added to eliminate delays and potiential 852 * deadlock. It was possible for the page daemon to indefintely 853 * postpone waking up a process that it might be waiting for memory 854 * on. The putmulti stuff seems to have aggravated the situation. 855 */ 856 force_wakeup = vm_pageout_scan(); 857 vm_pager_sync(); 858 if( force_wakeup) 859 wakeup( (caddr_t) &cnt.v_free_count); 860 cnt.v_scan++; 861 wakeup((caddr_t) kmem_map); 862 } 863} 864 865