vm_pageout.c revision 5973
1/* 2 * Copyright (c) 1991 Regents of the University of California. 3 * All rights reserved. 4 * Copyright (c) 1994 John S. Dyson 5 * All rights reserved. 6 * Copyright (c) 1994 David Greenman 7 * All rights reserved. 8 * 9 * This code is derived from software contributed to Berkeley by 10 * The Mach Operating System project at Carnegie-Mellon University. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. All advertising materials mentioning features or use of this software 21 * must display the following acknowledgement: 22 * This product includes software developed by the University of 23 * California, Berkeley and its contributors. 24 * 4. Neither the name of the University nor the names of its contributors 25 * may be used to endorse or promote products derived from this software 26 * without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 38 * SUCH DAMAGE. 39 * 40 * from: @(#)vm_pageout.c 7.4 (Berkeley) 5/7/91 41 * 42 * 43 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 44 * All rights reserved. 45 * 46 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 47 * 48 * Permission to use, copy, modify and distribute this software and 49 * its documentation is hereby granted, provided that both the copyright 50 * notice and this permission notice appear in all copies of the 51 * software, derivative works or modified versions, and any portions 52 * thereof, and that both notices appear in supporting documentation. 53 * 54 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 55 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 56 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 57 * 58 * Carnegie Mellon requests users of this software to return to 59 * 60 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 61 * School of Computer Science 62 * Carnegie Mellon University 63 * Pittsburgh PA 15213-3890 64 * 65 * any improvements or extensions that they make and grant Carnegie the 66 * rights to redistribute these changes. 67 * 68 * $Id: vm_pageout.c,v 1.31 1995/01/24 10:13:58 davidg Exp $ 69 */ 70 71/* 72 * The proverbial page-out daemon. 73 */ 74 75#include <sys/param.h> 76#include <sys/systm.h> 77#include <sys/proc.h> 78#include <sys/resourcevar.h> 79#include <sys/malloc.h> 80#include <sys/kernel.h> 81 82#include <vm/vm.h> 83#include <vm/vm_page.h> 84#include <vm/vm_pageout.h> 85#include <vm/swap_pager.h> 86 87extern vm_map_t kmem_map; 88int vm_pages_needed; /* Event on which pageout daemon sleeps */ 89int vm_pagescanner; /* Event on which pagescanner sleeps */ 90 91int vm_pageout_pages_needed = 0;/* flag saying that the pageout daemon needs pages */ 92int vm_page_pagesfreed; 93 94extern int npendingio; 95int vm_pageout_proc_limit; 96int vm_pageout_req_swapout; 97int vm_daemon_needed; 98extern int nswiodone; 99extern int swap_pager_full; 100extern int vm_swap_size; 101extern int swap_pager_ready(); 102 103#define MAXREF 32767 104 105#define MAXSCAN 512 /* maximum number of pages to scan in active queue */ 106#define ACT_DECLINE 1 107#define ACT_ADVANCE 3 108#define ACT_MAX 100 109#define MAXISCAN 256 110#define MINTOFREE 6 111#define MINFREE 2 112 113#define MAXLAUNDER (cnt.v_page_count > 1800 ? 32 : 16) 114 115#define VM_PAGEOUT_PAGE_COUNT 8 116int vm_pageout_page_count = VM_PAGEOUT_PAGE_COUNT; 117int vm_pageout_req_do_stats; 118 119int vm_page_max_wired = 0; /* XXX max # of wired pages system-wide */ 120 121/* 122 * vm_pageout_clean: 123 * cleans a vm_page 124 */ 125int 126vm_pageout_clean(m, sync) 127 register vm_page_t m; 128 int sync; 129{ 130 /* 131 * Clean the page and remove it from the laundry. 132 * 133 * We set the busy bit to cause potential page faults on this page to 134 * block. 135 * 136 * And we set pageout-in-progress to keep the object from disappearing 137 * during pageout. This guarantees that the page won't move from the 138 * inactive queue. (However, any other page on the inactive queue may 139 * move!) 140 */ 141 142 register vm_object_t object; 143 register vm_pager_t pager; 144 int pageout_status[VM_PAGEOUT_PAGE_COUNT]; 145 vm_page_t ms[VM_PAGEOUT_PAGE_COUNT]; 146 int pageout_count; 147 int anyok = 0; 148 int i; 149 vm_offset_t offset = m->offset; 150 151 object = m->object; 152 if (!object) { 153 printf("pager: object missing\n"); 154 return 0; 155 } 156 if (!object->pager && (object->flags & OBJ_INTERNAL) == 0) { 157 printf("pager: non internal obj without pager\n"); 158 } 159 /* 160 * Try to collapse the object before making a pager for it. We must 161 * unlock the page queues first. We try to defer the creation of a 162 * pager until all shadows are not paging. This allows 163 * vm_object_collapse to work better and helps control swap space 164 * size. (J. Dyson 11 Nov 93) 165 */ 166 167 if (!object->pager && 168 (cnt.v_free_count + cnt.v_cache_count) < cnt.v_pageout_free_min) 169 return 0; 170 171 if ((!sync && m->bmapped != 0 && m->hold_count != 0) || 172 ((m->busy != 0) || (m->flags & PG_BUSY))) 173 return 0; 174 175 if (!sync && object->shadow) { 176 vm_object_collapse(object); 177 } 178 pageout_count = 1; 179 ms[0] = m; 180 181 pager = object->pager; 182 if (pager) { 183 for (i = 1; i < vm_pageout_page_count; i++) { 184 vm_page_t mt; 185 186 ms[i] = mt = vm_page_lookup(object, offset + i * NBPG); 187 if (mt) { 188 vm_page_test_dirty(mt); 189 /* 190 * we can cluster ONLY if: ->> the page is NOT 191 * busy, and is NOT clean the page is not 192 * wired, busy, held, or mapped into a buffer. 193 * and one of the following: 1) The page is 194 * inactive, or a seldom used active page. 2) 195 * or we force the issue. 196 */ 197 if ((mt->dirty & mt->valid) != 0 198 && (((mt->flags & (PG_BUSY | PG_INACTIVE)) == PG_INACTIVE) 199 || sync == VM_PAGEOUT_FORCE) 200 && (mt->wire_count == 0) 201 && (mt->busy == 0) 202 && (mt->hold_count == 0) 203 && (mt->bmapped == 0)) 204 pageout_count++; 205 else 206 break; 207 } else 208 break; 209 } 210 /* 211 * we allow reads during pageouts... 212 */ 213 for (i = 0; i < pageout_count; i++) { 214 ms[i]->flags |= PG_BUSY; 215 pmap_page_protect(VM_PAGE_TO_PHYS(ms[i]), VM_PROT_READ); 216 } 217 object->paging_in_progress += pageout_count; 218 } else { 219 220 m->flags |= PG_BUSY; 221 222 pmap_page_protect(VM_PAGE_TO_PHYS(m), VM_PROT_READ); 223 224 object->paging_in_progress++; 225 226 pager = vm_pager_allocate(PG_DFLT, (caddr_t) 0, 227 object->size, VM_PROT_ALL, 0); 228 if (pager != NULL) { 229 vm_object_setpager(object, pager, 0, FALSE); 230 } 231 } 232 233 /* 234 * If there is no pager for the page, use the default pager. If 235 * there's no place to put the page at the moment, leave it in the 236 * laundry and hope that there will be paging space later. 237 */ 238 239 if ((pager && pager->pg_type == PG_SWAP) || 240 (cnt.v_free_count + cnt.v_cache_count) >= cnt.v_pageout_free_min) { 241 if (pageout_count == 1) { 242 pageout_status[0] = pager ? 243 vm_pager_put(pager, m, 244 ((sync || (object == kernel_object)) ? TRUE : FALSE)) : 245 VM_PAGER_FAIL; 246 } else { 247 if (!pager) { 248 for (i = 0; i < pageout_count; i++) 249 pageout_status[i] = VM_PAGER_FAIL; 250 } else { 251 vm_pager_put_pages(pager, ms, pageout_count, 252 ((sync || (object == kernel_object)) ? TRUE : FALSE), 253 pageout_status); 254 } 255 } 256 } else { 257 for (i = 0; i < pageout_count; i++) 258 pageout_status[i] = VM_PAGER_FAIL; 259 } 260 261 for (i = 0; i < pageout_count; i++) { 262 switch (pageout_status[i]) { 263 case VM_PAGER_OK: 264 ++anyok; 265 break; 266 case VM_PAGER_PEND: 267 ++anyok; 268 break; 269 case VM_PAGER_BAD: 270 /* 271 * Page outside of range of object. Right now we 272 * essentially lose the changes by pretending it 273 * worked. 274 */ 275 pmap_clear_modify(VM_PAGE_TO_PHYS(ms[i])); 276 ms[i]->dirty = 0; 277 break; 278 case VM_PAGER_ERROR: 279 case VM_PAGER_FAIL: 280 /* 281 * If page couldn't be paged out, then reactivate the 282 * page so it doesn't clog the inactive list. (We 283 * will try paging out it again later). 284 */ 285 if (ms[i]->flags & PG_INACTIVE) 286 vm_page_activate(ms[i]); 287 break; 288 case VM_PAGER_AGAIN: 289 break; 290 } 291 292 293 /* 294 * If the operation is still going, leave the page busy to 295 * block all other accesses. Also, leave the paging in 296 * progress indicator set so that we don't attempt an object 297 * collapse. 298 */ 299 if (pageout_status[i] != VM_PAGER_PEND) { 300 PAGE_WAKEUP(ms[i]); 301 if (--object->paging_in_progress == 0) 302 wakeup((caddr_t) object); 303 if ((ms[i]->flags & PG_REFERENCED) || 304 pmap_is_referenced(VM_PAGE_TO_PHYS(ms[i]))) { 305 pmap_clear_reference(VM_PAGE_TO_PHYS(ms[i])); 306 ms[i]->flags &= ~PG_REFERENCED; 307 if (ms[i]->flags & PG_INACTIVE) 308 vm_page_activate(ms[i]); 309 } 310 } 311 } 312 return anyok; 313} 314 315/* 316 * vm_pageout_object_deactivate_pages 317 * 318 * deactivate enough pages to satisfy the inactive target 319 * requirements or if vm_page_proc_limit is set, then 320 * deactivate all of the pages in the object and its 321 * shadows. 322 * 323 * The object and map must be locked. 324 */ 325int 326vm_pageout_object_deactivate_pages(map, object, count, map_remove_only) 327 vm_map_t map; 328 vm_object_t object; 329 int count; 330 int map_remove_only; 331{ 332 register vm_page_t p, next; 333 int rcount; 334 int dcount; 335 336 dcount = 0; 337 if (count == 0) 338 count = 1; 339 340 if (object->pager && (object->pager->pg_type == PG_DEVICE)) 341 return 0; 342 343 if (object->shadow) { 344 if (object->shadow->ref_count == 1) 345 dcount += vm_pageout_object_deactivate_pages(map, object->shadow, count / 2 + 1, map_remove_only); 346 else 347 vm_pageout_object_deactivate_pages(map, object->shadow, count, 1); 348 } 349 if (object->paging_in_progress || !vm_object_lock_try(object)) 350 return dcount; 351 352 /* 353 * scan the objects entire memory queue 354 */ 355 rcount = object->resident_page_count; 356 p = object->memq.tqh_first; 357 while (p && (rcount-- > 0)) { 358 next = p->listq.tqe_next; 359 cnt.v_pdpages++; 360 vm_page_lock_queues(); 361 if (p->wire_count != 0 || 362 p->hold_count != 0 || 363 p->bmapped != 0 || 364 p->busy != 0 || 365 !pmap_page_exists(vm_map_pmap(map), VM_PAGE_TO_PHYS(p))) { 366 p = next; 367 continue; 368 } 369 /* 370 * if a page is active, not wired and is in the processes 371 * pmap, then deactivate the page. 372 */ 373 if ((p->flags & (PG_ACTIVE | PG_BUSY)) == PG_ACTIVE) { 374 if (!pmap_is_referenced(VM_PAGE_TO_PHYS(p)) && 375 (p->flags & PG_REFERENCED) == 0) { 376 p->act_count -= min(p->act_count, ACT_DECLINE); 377 /* 378 * if the page act_count is zero -- then we 379 * deactivate 380 */ 381 if (!p->act_count) { 382 if (!map_remove_only) 383 vm_page_deactivate(p); 384 pmap_page_protect(VM_PAGE_TO_PHYS(p), 385 VM_PROT_NONE); 386 /* 387 * else if on the next go-around we 388 * will deactivate the page we need to 389 * place the page on the end of the 390 * queue to age the other pages in 391 * memory. 392 */ 393 } else { 394 TAILQ_REMOVE(&vm_page_queue_active, p, pageq); 395 TAILQ_INSERT_TAIL(&vm_page_queue_active, p, pageq); 396 TAILQ_REMOVE(&object->memq, p, listq); 397 TAILQ_INSERT_TAIL(&object->memq, p, listq); 398 } 399 /* 400 * see if we are done yet 401 */ 402 if (p->flags & PG_INACTIVE) { 403 --count; 404 ++dcount; 405 if (count <= 0 && 406 cnt.v_inactive_count > cnt.v_inactive_target) { 407 vm_page_unlock_queues(); 408 vm_object_unlock(object); 409 return dcount; 410 } 411 } 412 } else { 413 /* 414 * Move the page to the bottom of the queue. 415 */ 416 pmap_clear_reference(VM_PAGE_TO_PHYS(p)); 417 p->flags &= ~PG_REFERENCED; 418 if (p->act_count < ACT_MAX) 419 p->act_count += ACT_ADVANCE; 420 421 TAILQ_REMOVE(&vm_page_queue_active, p, pageq); 422 TAILQ_INSERT_TAIL(&vm_page_queue_active, p, pageq); 423 TAILQ_REMOVE(&object->memq, p, listq); 424 TAILQ_INSERT_TAIL(&object->memq, p, listq); 425 } 426 } else if ((p->flags & (PG_INACTIVE | PG_BUSY)) == PG_INACTIVE) { 427 pmap_page_protect(VM_PAGE_TO_PHYS(p), 428 VM_PROT_NONE); 429 } 430 vm_page_unlock_queues(); 431 p = next; 432 } 433 vm_object_unlock(object); 434 return dcount; 435} 436 437 438/* 439 * deactivate some number of pages in a map, try to do it fairly, but 440 * that is really hard to do. 441 */ 442 443void 444vm_pageout_map_deactivate_pages(map, entry, count, freeer) 445 vm_map_t map; 446 vm_map_entry_t entry; 447 int *count; 448 int (*freeer) (vm_map_t, vm_object_t, int); 449{ 450 vm_map_t tmpm; 451 vm_map_entry_t tmpe; 452 vm_object_t obj; 453 454 if (*count <= 0) 455 return; 456 vm_map_reference(map); 457 if (!lock_try_read(&map->lock)) { 458 vm_map_deallocate(map); 459 return; 460 } 461 if (entry == 0) { 462 tmpe = map->header.next; 463 while (tmpe != &map->header && *count > 0) { 464 vm_pageout_map_deactivate_pages(map, tmpe, count, freeer, 0); 465 tmpe = tmpe->next; 466 }; 467 } else if (entry->is_sub_map || entry->is_a_map) { 468 tmpm = entry->object.share_map; 469 tmpe = tmpm->header.next; 470 while (tmpe != &tmpm->header && *count > 0) { 471 vm_pageout_map_deactivate_pages(tmpm, tmpe, count, freeer, 0); 472 tmpe = tmpe->next; 473 }; 474 } else if ((obj = entry->object.vm_object) != 0) { 475 *count -= (*freeer) (map, obj, *count); 476 } 477 lock_read_done(&map->lock); 478 vm_map_deallocate(map); 479 return; 480} 481 482void 483vm_req_vmdaemon() 484{ 485 extern int ticks; 486 static int lastrun = 0; 487 488 if ((ticks > (lastrun + hz / 10)) || (ticks < lastrun)) { 489 wakeup((caddr_t) &vm_daemon_needed); 490 lastrun = ticks; 491 } 492} 493 494void 495vm_pageout_inactive_stats(int maxiscan) 496{ 497 vm_page_t m; 498 int s; 499 500 if (maxiscan > cnt.v_inactive_count) 501 maxiscan = cnt.v_inactive_count; 502 m = vm_page_queue_inactive.tqh_first; 503 while (m && (maxiscan-- > 0)) { 504 vm_page_t next; 505 506 next = m->pageq.tqe_next; 507 508 if (((m->flags & PG_REFERENCED) == 0) && 509 pmap_is_referenced(VM_PAGE_TO_PHYS(m))) { 510 m->flags |= PG_REFERENCED; 511 } 512 if (m->object->ref_count == 0) { 513 m->flags &= ~PG_REFERENCED; 514 pmap_clear_reference(VM_PAGE_TO_PHYS(m)); 515 } 516 if (m->flags & PG_REFERENCED) { 517 m->flags &= ~PG_REFERENCED; 518 pmap_clear_reference(VM_PAGE_TO_PHYS(m)); 519 vm_page_activate(m); 520 /* 521 * heuristic alert -- if a page is being re-activated, 522 * it probably will be used one more time... 523 */ 524 if (m->act_count < ACT_MAX) 525 m->act_count += ACT_ADVANCE; 526 } 527 m = next; 528 } 529} 530 531 532/* 533 * vm_pageout_scan does the dirty work for the pageout daemon. 534 */ 535int 536vm_pageout_scan() 537{ 538 vm_page_t m; 539 int page_shortage, maxscan, maxlaunder; 540 int pages_freed; 541 int desired_free; 542 vm_page_t next; 543 struct proc *p, *bigproc; 544 vm_offset_t size, bigsize; 545 vm_object_t object; 546 int force_wakeup = 0; 547 int cache_size, orig_cache_size; 548 int minscan; 549 int mintofree; 550 551#ifdef LFS 552 lfs_reclaim_buffers(); 553#endif 554 555 /* calculate the total cached size */ 556 557 if ((cnt.v_inactive_count + cnt.v_free_count + cnt.v_cache_count) < 558 (cnt.v_inactive_target + cnt.v_free_min)) { 559 vm_req_vmdaemon(); 560 } 561 /* 562 * now swap processes out if we are in low memory conditions 563 */ 564 if ((cnt.v_free_count <= cnt.v_free_min) && 565 !swap_pager_full && vm_swap_size && vm_pageout_req_swapout == 0) { 566 vm_pageout_req_swapout = 1; 567 vm_req_vmdaemon(); 568 } 569 pages_freed = 0; 570 desired_free = cnt.v_free_target; 571 572 /* 573 * Start scanning the inactive queue for pages we can free. We keep 574 * scanning until we have enough free pages or we have scanned through 575 * the entire queue. If we encounter dirty pages, we start cleaning 576 * them. 577 */ 578 579 580rescan0: 581 vm_pageout_inactive_stats(MAXISCAN); 582 maxlaunder = (cnt.v_inactive_target > MAXLAUNDER) ? 583 MAXLAUNDER : cnt.v_inactive_target; 584 585rescan1: 586 maxscan = cnt.v_inactive_count; 587 mintofree = MINTOFREE; 588 m = vm_page_queue_inactive.tqh_first; 589 while (m && 590 (maxscan-- > 0) && 591 (((cnt.v_free_count + cnt.v_cache_count) < desired_free) || 592 (--mintofree > 0))) { 593 vm_page_t next; 594 595 cnt.v_pdpages++; 596 next = m->pageq.tqe_next; 597 598#if defined(VM_DIAGNOSE) 599 if ((m->flags & PG_INACTIVE) == 0) { 600 printf("vm_pageout_scan: page not inactive?\n"); 601 break; 602 } 603#endif 604 605 /* 606 * dont mess with busy pages 607 */ 608 if (m->hold_count || m->busy || (m->flags & PG_BUSY) || 609 m->bmapped != 0) { 610 TAILQ_REMOVE(&vm_page_queue_inactive, m, pageq); 611 TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq); 612 m = next; 613 continue; 614 } 615 if (((m->flags & PG_REFERENCED) == 0) && 616 pmap_is_referenced(VM_PAGE_TO_PHYS(m))) { 617 m->flags |= PG_REFERENCED; 618 } 619 if (m->object->ref_count == 0) { 620 m->flags &= ~PG_REFERENCED; 621 pmap_clear_reference(VM_PAGE_TO_PHYS(m)); 622 } 623 if ((m->flags & PG_REFERENCED) != 0) { 624 m->flags &= ~PG_REFERENCED; 625 pmap_clear_reference(VM_PAGE_TO_PHYS(m)); 626 vm_page_activate(m); 627 if (m->act_count < ACT_MAX) 628 m->act_count += ACT_ADVANCE; 629 m = next; 630 continue; 631 } 632 vm_page_test_dirty(m); 633 634 if ((m->dirty & m->valid) == 0) { 635 if (m->valid == 0) { 636 pmap_page_protect(VM_PAGE_TO_PHYS(m), VM_PROT_NONE); 637 vm_page_free(m); 638 } else if (((cnt.v_free_count + cnt.v_cache_count) < desired_free) || 639 (cnt.v_cache_count < cnt.v_cache_min)) { 640 vm_page_cache(m); 641 } 642 } else if (maxlaunder > 0) { 643 int written; 644 645 TAILQ_REMOVE(&vm_page_queue_inactive, m, pageq); 646 TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq); 647 648 object = m->object; 649 if (!vm_object_lock_try(object)) { 650 m = next; 651 continue; 652 } 653 /* 654 * If a page is dirty, then it is either being washed 655 * (but not yet cleaned) or it is still in the 656 * laundry. If it is still in the laundry, then we 657 * start the cleaning operation. 658 */ 659 written = vm_pageout_clean(m, 0); 660 vm_object_unlock(object); 661 662 if (!next) { 663 break; 664 } 665 maxlaunder -= written; 666 /* 667 * if the next page has been re-activated, start 668 * scanning again 669 */ 670 if ((next->flags & PG_INACTIVE) == 0) { 671 goto rescan1; 672 } 673 } else { 674 TAILQ_REMOVE(&vm_page_queue_inactive, m, pageq); 675 TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq); 676 } 677 m = next; 678 } 679 680 /* 681 * Compute the page shortage. If we are still very low on memory be 682 * sure that we will move a minimal amount of pages from active to 683 * inactive. 684 */ 685 686 page_shortage = cnt.v_inactive_target - 687 (cnt.v_free_count + cnt.v_inactive_count + cnt.v_cache_count); 688 if (page_shortage <= 0) { 689 if (pages_freed == 0) { 690 if ((cnt.v_free_count + cnt.v_cache_count) < desired_free) { 691 page_shortage = 692 desired_free - (cnt.v_free_count + cnt.v_cache_count); 693 } 694 } 695 if( (page_shortage <= 0) && (cnt.v_free_count < cnt.v_free_min)) 696 page_shortage = 1; 697 } 698 maxscan = cnt.v_active_count; 699 minscan = cnt.v_active_count; 700 if (minscan > MAXSCAN) 701 minscan = MAXSCAN; 702 m = vm_page_queue_active.tqh_first; 703 while (m && ((maxscan > 0 && (page_shortage > 0)) || minscan > 0)) { 704 if (maxscan) 705 --maxscan; 706 if (minscan) 707 --minscan; 708 709 cnt.v_pdpages++; 710 next = m->pageq.tqe_next; 711 712 /* 713 * Don't deactivate pages that are busy. 714 */ 715 if ((m->busy != 0) || 716 (m->flags & PG_BUSY) || 717 (m->hold_count != 0) || 718 (m->bmapped != 0)) { 719 TAILQ_REMOVE(&vm_page_queue_active, m, pageq); 720 TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq); 721 m = next; 722 continue; 723 } 724 if (m->object->ref_count && ((m->flags & PG_REFERENCED) || 725 pmap_is_referenced(VM_PAGE_TO_PHYS(m)))) { 726 int s; 727 728 pmap_clear_reference(VM_PAGE_TO_PHYS(m)); 729 m->flags &= ~PG_REFERENCED; 730 if (m->act_count < ACT_MAX) { 731 m->act_count += ACT_ADVANCE; 732 } 733 TAILQ_REMOVE(&vm_page_queue_active, m, pageq); 734 TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq); 735 s = splhigh(); 736 TAILQ_REMOVE(&m->object->memq, m, listq); 737 TAILQ_INSERT_TAIL(&m->object->memq, m, listq); 738 splx(s); 739 } else { 740 m->flags &= ~PG_REFERENCED; 741 pmap_clear_reference(VM_PAGE_TO_PHYS(m)); 742 m->act_count -= min(m->act_count, ACT_DECLINE); 743 744 /* 745 * if the page act_count is zero -- then we deactivate 746 */ 747 if (!m->act_count && (page_shortage > 0)) { 748 if (m->object->ref_count == 0) { 749 vm_page_test_dirty(m); 750 --page_shortage; 751 if ((m->dirty & m->valid) == 0) { 752 m->act_count = 0; 753 vm_page_cache(m); 754 } else { 755 vm_page_deactivate(m); 756 } 757 } else { 758 vm_page_deactivate(m); 759 --page_shortage; 760 } 761 } else if (m->act_count) { 762 TAILQ_REMOVE(&vm_page_queue_active, m, pageq); 763 TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq); 764 } 765 } 766 m = next; 767 } 768 769 /* 770 * We try to maintain some *really* free pages, this allows interrupt 771 * code to be guaranteed space. 772 */ 773 while (cnt.v_free_count < cnt.v_free_min) { 774 m = vm_page_queue_cache.tqh_first; 775 if (!m) 776 break; 777 vm_page_free(m); 778 } 779 780 /* 781 * make sure that we have swap space -- if we are low on memory and 782 * swap -- then kill the biggest process. 783 */ 784 if ((vm_swap_size == 0 || swap_pager_full) && 785 ((cnt.v_free_count + cnt.v_cache_count) < cnt.v_free_min)) { 786 bigproc = NULL; 787 bigsize = 0; 788 for (p = (struct proc *) allproc; p != NULL; p = p->p_next) { 789 /* 790 * if this is a system process, skip it 791 */ 792 if ((p->p_flag & P_SYSTEM) || (p->p_pid == 1) || 793 ((p->p_pid < 48) && (vm_swap_size != 0))) { 794 continue; 795 } 796 /* 797 * if the process is in a non-running type state, 798 * don't touch it. 799 */ 800 if (p->p_stat != SRUN && p->p_stat != SSLEEP) { 801 continue; 802 } 803 /* 804 * get the process size 805 */ 806 size = p->p_vmspace->vm_pmap.pm_stats.resident_count; 807 /* 808 * if the this process is bigger than the biggest one 809 * remember it. 810 */ 811 if (size > bigsize) { 812 bigproc = p; 813 bigsize = size; 814 } 815 } 816 if (bigproc != NULL) { 817 printf("Process %lu killed by vm_pageout -- out of swap\n", (u_long) bigproc->p_pid); 818 psignal(bigproc, SIGKILL); 819 bigproc->p_estcpu = 0; 820 bigproc->p_nice = PRIO_MIN; 821 resetpriority(bigproc); 822 wakeup((caddr_t) &cnt.v_free_count); 823 } 824 } 825 vm_page_pagesfreed += pages_freed; 826 return force_wakeup; 827} 828 829/* 830 * vm_pageout is the high level pageout daemon. 831 */ 832void 833vm_pageout() 834{ 835 (void) spl0(); 836 837 /* 838 * Initialize some paging parameters. 839 */ 840 841 if (cnt.v_page_count > 1024) 842 cnt.v_free_min = 4 + (cnt.v_page_count - 1024) / 200; 843 else 844 cnt.v_free_min = 4; 845 /* 846 * free_reserved needs to include enough for the largest swap pager 847 * structures plus enough for any pv_entry structs when paging. 848 */ 849 cnt.v_pageout_free_min = 6 + cnt.v_page_count / 1024; 850 cnt.v_free_reserved = cnt.v_pageout_free_min + 2; 851 cnt.v_free_target = 3 * cnt.v_free_min + cnt.v_free_reserved; 852 cnt.v_inactive_target = cnt.v_free_count / 4; 853 if (cnt.v_inactive_target > 512) 854 cnt.v_inactive_target = 512; 855 cnt.v_free_min += cnt.v_free_reserved; 856 if (cnt.v_page_count > 1024) { 857 cnt.v_cache_max = (cnt.v_free_count - 1024) / 2; 858 cnt.v_cache_min = (cnt.v_free_count - 1024) / 20; 859 } else { 860 cnt.v_cache_min = 0; 861 cnt.v_cache_max = 0; 862 } 863 864 /* XXX does not really belong here */ 865 if (vm_page_max_wired == 0) 866 vm_page_max_wired = cnt.v_free_count / 3; 867 868 869 (void) swap_pager_alloc(0, 0, 0, 0); 870 /* 871 * The pageout daemon is never done, so loop forever. 872 */ 873 while (TRUE) { 874 tsleep((caddr_t) &vm_pages_needed, PVM, "psleep", 0); 875 cnt.v_pdwakeups++; 876 vm_pager_sync(); 877 vm_pageout_scan(); 878 vm_pager_sync(); 879 wakeup((caddr_t) &cnt.v_free_count); 880 wakeup((caddr_t) kmem_map); 881 } 882} 883 884void 885vm_daemon() 886{ 887 int cache_size; 888 vm_object_t object; 889 struct proc *p; 890 891 while (TRUE) { 892 tsleep((caddr_t) &vm_daemon_needed, PUSER, "psleep", 0); 893 swapout_threads(); 894 /* 895 * scan the processes for exceeding their rlimits or if 896 * process is swapped out -- deactivate pages 897 */ 898 899 for (p = (struct proc *) allproc; p != NULL; p = p->p_next) { 900 int overage; 901 quad_t limit; 902 vm_offset_t size; 903 904 /* 905 * if this is a system process or if we have already 906 * looked at this process, skip it. 907 */ 908 if (p->p_flag & (P_SYSTEM | P_WEXIT)) { 909 continue; 910 } 911 /* 912 * if the process is in a non-running type state, 913 * don't touch it. 914 */ 915 if (p->p_stat != SRUN && p->p_stat != SSLEEP) { 916 continue; 917 } 918 /* 919 * get a limit 920 */ 921 limit = qmin(p->p_rlimit[RLIMIT_RSS].rlim_cur, 922 p->p_rlimit[RLIMIT_RSS].rlim_max); 923 924 /* 925 * let processes that are swapped out really be 926 * swapped out set the limit to nothing (will force a 927 * swap-out.) 928 */ 929 if ((p->p_flag & P_INMEM) == 0) 930 limit = 0; /* XXX */ 931 932 size = p->p_vmspace->vm_pmap.pm_stats.resident_count * NBPG; 933 if (limit >= 0 && size >= limit) { 934 overage = (size - limit) / NBPG; 935 vm_pageout_map_deactivate_pages(&p->p_vmspace->vm_map, 936 (vm_map_entry_t) 0, &overage, vm_pageout_object_deactivate_pages); 937 } 938 } 939 } 940 941 /* 942 * we remove cached objects that have no RSS... 943 */ 944restart: 945 vm_object_cache_lock(); 946 object = vm_object_cached_list.tqh_first; 947 while (object) { 948 vm_object_cache_unlock(); 949 /* 950 * if there are no resident pages -- get rid of the object 951 */ 952 if (object->resident_page_count == 0) { 953 if (object != vm_object_lookup(object->pager)) 954 panic("vm_object_cache_trim: I'm sooo confused."); 955 pager_cache(object, FALSE); 956 goto restart; 957 } 958 object = object->cached_list.tqe_next; 959 vm_object_cache_lock(); 960 } 961 vm_object_cache_unlock(); 962} 963