vm_pageout.c revision 6580
1/* 2 * Copyright (c) 1991 Regents of the University of California. 3 * All rights reserved. 4 * Copyright (c) 1994 John S. Dyson 5 * All rights reserved. 6 * Copyright (c) 1994 David Greenman 7 * All rights reserved. 8 * 9 * This code is derived from software contributed to Berkeley by 10 * The Mach Operating System project at Carnegie-Mellon University. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. All advertising materials mentioning features or use of this software 21 * must display the following acknowledgement: 22 * This product includes software developed by the University of 23 * California, Berkeley and its contributors. 24 * 4. Neither the name of the University nor the names of its contributors 25 * may be used to endorse or promote products derived from this software 26 * without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 38 * SUCH DAMAGE. 39 * 40 * from: @(#)vm_pageout.c 7.4 (Berkeley) 5/7/91 41 * 42 * 43 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 44 * All rights reserved. 45 * 46 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 47 * 48 * Permission to use, copy, modify and distribute this software and 49 * its documentation is hereby granted, provided that both the copyright 50 * notice and this permission notice appear in all copies of the 51 * software, derivative works or modified versions, and any portions 52 * thereof, and that both notices appear in supporting documentation. 53 * 54 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 55 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 56 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 57 * 58 * Carnegie Mellon requests users of this software to return to 59 * 60 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 61 * School of Computer Science 62 * Carnegie Mellon University 63 * Pittsburgh PA 15213-3890 64 * 65 * any improvements or extensions that they make and grant Carnegie the 66 * rights to redistribute these changes. 67 * 68 * $Id: vm_pageout.c,v 1.35 1995/02/14 06:09:15 phk Exp $ 69 */ 70 71/* 72 * The proverbial page-out daemon. 73 */ 74 75#include <sys/param.h> 76#include <sys/systm.h> 77#include <sys/proc.h> 78#include <sys/resourcevar.h> 79#include <sys/malloc.h> 80#include <sys/kernel.h> 81#include <sys/signalvar.h> 82 83#include <vm/vm.h> 84#include <vm/vm_page.h> 85#include <vm/vm_pageout.h> 86#include <vm/swap_pager.h> 87 88extern vm_map_t kmem_map; 89int vm_pages_needed; /* Event on which pageout daemon sleeps */ 90int vm_pagescanner; /* Event on which pagescanner sleeps */ 91 92int vm_pageout_pages_needed = 0;/* flag saying that the pageout daemon needs pages */ 93int vm_page_pagesfreed; 94 95extern int npendingio; 96int vm_pageout_proc_limit; 97int vm_pageout_req_swapout; 98int vm_daemon_needed; 99extern int nswiodone; 100extern int swap_pager_full; 101extern int vm_swap_size; 102extern int swap_pager_ready(); 103 104#define MAXSCAN 1024 /* maximum number of pages to scan in queues */ 105 106#define MAXLAUNDER (cnt.v_page_count > 1800 ? 32 : 16) 107 108#define VM_PAGEOUT_PAGE_COUNT 8 109int vm_pageout_page_count = VM_PAGEOUT_PAGE_COUNT; 110int vm_pageout_req_do_stats; 111 112int vm_page_max_wired = 0; /* XXX max # of wired pages system-wide */ 113 114/* 115 * vm_pageout_clean: 116 * cleans a vm_page 117 */ 118int 119vm_pageout_clean(m, sync) 120 register vm_page_t m; 121 int sync; 122{ 123 /* 124 * Clean the page and remove it from the laundry. 125 * 126 * We set the busy bit to cause potential page faults on this page to 127 * block. 128 * 129 * And we set pageout-in-progress to keep the object from disappearing 130 * during pageout. This guarantees that the page won't move from the 131 * inactive queue. (However, any other page on the inactive queue may 132 * move!) 133 */ 134 135 register vm_object_t object; 136 register vm_pager_t pager; 137 int pageout_status[VM_PAGEOUT_PAGE_COUNT]; 138 vm_page_t ms[VM_PAGEOUT_PAGE_COUNT]; 139 int pageout_count; 140 int anyok = 0; 141 int i; 142 vm_offset_t offset = m->offset; 143 144 object = m->object; 145 if (!object) { 146 printf("pager: object missing\n"); 147 return 0; 148 } 149 if (!object->pager && (object->flags & OBJ_INTERNAL) == 0) { 150 printf("pager: non internal obj without pager\n"); 151 } 152 /* 153 * Try to collapse the object before making a pager for it. We must 154 * unlock the page queues first. We try to defer the creation of a 155 * pager until all shadows are not paging. This allows 156 * vm_object_collapse to work better and helps control swap space 157 * size. (J. Dyson 11 Nov 93) 158 */ 159 160 if (!object->pager && 161 (cnt.v_free_count + cnt.v_cache_count) < cnt.v_pageout_free_min) 162 return 0; 163 164 if ((!sync && m->bmapped != 0 && m->hold_count != 0) || 165 ((m->busy != 0) || (m->flags & PG_BUSY))) 166 return 0; 167 168 if (!sync && object->shadow) { 169 vm_object_collapse(object); 170 } 171 pageout_count = 1; 172 ms[0] = m; 173 174 pager = object->pager; 175 if (pager) { 176 for (i = 1; i < vm_pageout_page_count; i++) { 177 vm_page_t mt; 178 179 ms[i] = mt = vm_page_lookup(object, offset + i * NBPG); 180 if (mt) { 181 vm_page_test_dirty(mt); 182 /* 183 * we can cluster ONLY if: ->> the page is NOT 184 * busy, and is NOT clean the page is not 185 * wired, busy, held, or mapped into a buffer. 186 * and one of the following: 1) The page is 187 * inactive, or a seldom used active page. 2) 188 * or we force the issue. 189 */ 190 if ((mt->dirty & mt->valid) != 0 191 && (((mt->flags & (PG_BUSY | PG_INACTIVE)) == PG_INACTIVE) 192 || sync == VM_PAGEOUT_FORCE) 193 && (mt->wire_count == 0) 194 && (mt->busy == 0) 195 && (mt->hold_count == 0) 196 && (mt->bmapped == 0)) 197 pageout_count++; 198 else 199 break; 200 } else 201 break; 202 } 203 /* 204 * we allow reads during pageouts... 205 */ 206 for (i = 0; i < pageout_count; i++) { 207 ms[i]->flags |= PG_BUSY; 208 pmap_page_protect(VM_PAGE_TO_PHYS(ms[i]), VM_PROT_READ); 209 } 210 object->paging_in_progress += pageout_count; 211 } else { 212 213 m->flags |= PG_BUSY; 214 215 pmap_page_protect(VM_PAGE_TO_PHYS(m), VM_PROT_READ); 216 217 object->paging_in_progress++; 218 219 pager = vm_pager_allocate(PG_DFLT, (caddr_t) 0, 220 object->size, VM_PROT_ALL, 0); 221 if (pager != NULL) { 222 vm_object_setpager(object, pager, 0, FALSE); 223 } 224 } 225 226 /* 227 * If there is no pager for the page, use the default pager. If 228 * there's no place to put the page at the moment, leave it in the 229 * laundry and hope that there will be paging space later. 230 */ 231 232 if ((pager && pager->pg_type == PG_SWAP) || 233 (cnt.v_free_count + cnt.v_cache_count) >= cnt.v_pageout_free_min) { 234 if (pageout_count == 1) { 235 pageout_status[0] = pager ? 236 vm_pager_put(pager, m, 237 ((sync || (object == kernel_object)) ? TRUE : FALSE)) : 238 VM_PAGER_FAIL; 239 } else { 240 if (!pager) { 241 for (i = 0; i < pageout_count; i++) 242 pageout_status[i] = VM_PAGER_FAIL; 243 } else { 244 vm_pager_put_pages(pager, ms, pageout_count, 245 ((sync || (object == kernel_object)) ? TRUE : FALSE), 246 pageout_status); 247 } 248 } 249 } else { 250 for (i = 0; i < pageout_count; i++) 251 pageout_status[i] = VM_PAGER_FAIL; 252 } 253 254 for (i = 0; i < pageout_count; i++) { 255 switch (pageout_status[i]) { 256 case VM_PAGER_OK: 257 ++anyok; 258 break; 259 case VM_PAGER_PEND: 260 ++anyok; 261 break; 262 case VM_PAGER_BAD: 263 /* 264 * Page outside of range of object. Right now we 265 * essentially lose the changes by pretending it 266 * worked. 267 */ 268 pmap_clear_modify(VM_PAGE_TO_PHYS(ms[i])); 269 ms[i]->dirty = 0; 270 break; 271 case VM_PAGER_ERROR: 272 case VM_PAGER_FAIL: 273 /* 274 * If page couldn't be paged out, then reactivate the 275 * page so it doesn't clog the inactive list. (We 276 * will try paging out it again later). 277 */ 278 if (ms[i]->flags & PG_INACTIVE) 279 vm_page_activate(ms[i]); 280 break; 281 case VM_PAGER_AGAIN: 282 break; 283 } 284 285 286 /* 287 * If the operation is still going, leave the page busy to 288 * block all other accesses. Also, leave the paging in 289 * progress indicator set so that we don't attempt an object 290 * collapse. 291 */ 292 if (pageout_status[i] != VM_PAGER_PEND) { 293 if (--object->paging_in_progress == 0) 294 wakeup((caddr_t) object); 295 if ((ms[i]->flags & (PG_REFERENCED|PG_WANTED)) || 296 pmap_is_referenced(VM_PAGE_TO_PHYS(ms[i]))) { 297 pmap_clear_reference(VM_PAGE_TO_PHYS(ms[i])); 298 ms[i]->flags &= ~PG_REFERENCED; 299 if (ms[i]->flags & PG_INACTIVE) 300 vm_page_activate(ms[i]); 301 } 302 PAGE_WAKEUP(ms[i]); 303 } 304 } 305 return anyok; 306} 307 308/* 309 * vm_pageout_object_deactivate_pages 310 * 311 * deactivate enough pages to satisfy the inactive target 312 * requirements or if vm_page_proc_limit is set, then 313 * deactivate all of the pages in the object and its 314 * shadows. 315 * 316 * The object and map must be locked. 317 */ 318int 319vm_pageout_object_deactivate_pages(map, object, count, map_remove_only) 320 vm_map_t map; 321 vm_object_t object; 322 int count; 323 int map_remove_only; 324{ 325 register vm_page_t p, next; 326 int rcount; 327 int dcount; 328 329 dcount = 0; 330 if (count == 0) 331 count = 1; 332 333 if (object->pager && (object->pager->pg_type == PG_DEVICE)) 334 return 0; 335 336 if (object->shadow) { 337 if (object->shadow->ref_count == 1) 338 dcount += vm_pageout_object_deactivate_pages(map, object->shadow, count / 2 + 1, map_remove_only); 339 else 340 vm_pageout_object_deactivate_pages(map, object->shadow, count, 1); 341 } 342 if (object->paging_in_progress || !vm_object_lock_try(object)) 343 return dcount; 344 345 /* 346 * scan the objects entire memory queue 347 */ 348 rcount = object->resident_page_count; 349 p = object->memq.tqh_first; 350 while (p && (rcount-- > 0)) { 351 next = p->listq.tqe_next; 352 cnt.v_pdpages++; 353 vm_page_lock_queues(); 354 if (p->wire_count != 0 || 355 p->hold_count != 0 || 356 p->bmapped != 0 || 357 p->busy != 0 || 358 !pmap_page_exists(vm_map_pmap(map), VM_PAGE_TO_PHYS(p))) { 359 p = next; 360 continue; 361 } 362 /* 363 * if a page is active, not wired and is in the processes 364 * pmap, then deactivate the page. 365 */ 366 if ((p->flags & (PG_ACTIVE | PG_BUSY)) == PG_ACTIVE) { 367 if (!pmap_is_referenced(VM_PAGE_TO_PHYS(p)) && 368 (p->flags & (PG_REFERENCED|PG_WANTED)) == 0) { 369 p->act_count -= min(p->act_count, ACT_DECLINE); 370 /* 371 * if the page act_count is zero -- then we 372 * deactivate 373 */ 374 if (!p->act_count) { 375 if (!map_remove_only) 376 vm_page_deactivate(p); 377 pmap_page_protect(VM_PAGE_TO_PHYS(p), 378 VM_PROT_NONE); 379 /* 380 * else if on the next go-around we 381 * will deactivate the page we need to 382 * place the page on the end of the 383 * queue to age the other pages in 384 * memory. 385 */ 386 } else { 387 TAILQ_REMOVE(&vm_page_queue_active, p, pageq); 388 TAILQ_INSERT_TAIL(&vm_page_queue_active, p, pageq); 389 TAILQ_REMOVE(&object->memq, p, listq); 390 TAILQ_INSERT_TAIL(&object->memq, p, listq); 391 } 392 /* 393 * see if we are done yet 394 */ 395 if (p->flags & PG_INACTIVE) { 396 --count; 397 ++dcount; 398 if (count <= 0 && 399 cnt.v_inactive_count > cnt.v_inactive_target) { 400 vm_page_unlock_queues(); 401 vm_object_unlock(object); 402 return dcount; 403 } 404 } 405 } else { 406 /* 407 * Move the page to the bottom of the queue. 408 */ 409 pmap_clear_reference(VM_PAGE_TO_PHYS(p)); 410 p->flags &= ~PG_REFERENCED; 411 if (p->act_count < ACT_MAX) 412 p->act_count += ACT_ADVANCE; 413 414 TAILQ_REMOVE(&vm_page_queue_active, p, pageq); 415 TAILQ_INSERT_TAIL(&vm_page_queue_active, p, pageq); 416 TAILQ_REMOVE(&object->memq, p, listq); 417 TAILQ_INSERT_TAIL(&object->memq, p, listq); 418 } 419 } else if ((p->flags & (PG_INACTIVE | PG_BUSY)) == PG_INACTIVE) { 420 pmap_page_protect(VM_PAGE_TO_PHYS(p), 421 VM_PROT_NONE); 422 } 423 vm_page_unlock_queues(); 424 p = next; 425 } 426 vm_object_unlock(object); 427 return dcount; 428} 429 430 431/* 432 * deactivate some number of pages in a map, try to do it fairly, but 433 * that is really hard to do. 434 */ 435 436void 437vm_pageout_map_deactivate_pages(map, entry, count, freeer) 438 vm_map_t map; 439 vm_map_entry_t entry; 440 int *count; 441 int (*freeer) (vm_map_t, vm_object_t, int); 442{ 443 vm_map_t tmpm; 444 vm_map_entry_t tmpe; 445 vm_object_t obj; 446 447 if (*count <= 0) 448 return; 449 vm_map_reference(map); 450 if (!lock_try_read(&map->lock)) { 451 vm_map_deallocate(map); 452 return; 453 } 454 if (entry == 0) { 455 tmpe = map->header.next; 456 while (tmpe != &map->header && *count > 0) { 457 vm_pageout_map_deactivate_pages(map, tmpe, count, freeer, 0); 458 tmpe = tmpe->next; 459 }; 460 } else if (entry->is_sub_map || entry->is_a_map) { 461 tmpm = entry->object.share_map; 462 tmpe = tmpm->header.next; 463 while (tmpe != &tmpm->header && *count > 0) { 464 vm_pageout_map_deactivate_pages(tmpm, tmpe, count, freeer, 0); 465 tmpe = tmpe->next; 466 }; 467 } else if ((obj = entry->object.vm_object) != 0) { 468 *count -= (*freeer) (map, obj, *count); 469 } 470 lock_read_done(&map->lock); 471 vm_map_deallocate(map); 472 return; 473} 474 475void 476vm_req_vmdaemon() 477{ 478 extern int ticks; 479 static int lastrun = 0; 480 481 if ((ticks > (lastrun + hz / 10)) || (ticks < lastrun)) { 482 wakeup((caddr_t) &vm_daemon_needed); 483 lastrun = ticks; 484 } 485} 486 487/* 488 * vm_pageout_scan does the dirty work for the pageout daemon. 489 */ 490int 491vm_pageout_scan() 492{ 493 vm_page_t m; 494 int page_shortage, maxscan, maxlaunder; 495 int pages_freed; 496 int desired_free; 497 vm_page_t next; 498 struct proc *p, *bigproc; 499 vm_offset_t size, bigsize; 500 vm_object_t object; 501 int force_wakeup = 0; 502 503 /* calculate the total cached size */ 504 505 if ((cnt.v_inactive_count + cnt.v_free_count + cnt.v_cache_count) < 506 (cnt.v_inactive_target + cnt.v_free_min)) { 507 vm_req_vmdaemon(); 508 } 509 /* 510 * now swap processes out if we are in low memory conditions 511 */ 512 if ((cnt.v_free_count <= cnt.v_free_min) && 513 !swap_pager_full && vm_swap_size && vm_pageout_req_swapout == 0) { 514 vm_pageout_req_swapout = 1; 515 vm_req_vmdaemon(); 516 } 517 pages_freed = 0; 518 desired_free = cnt.v_free_target; 519 520 /* 521 * Start scanning the inactive queue for pages we can free. We keep 522 * scanning until we have enough free pages or we have scanned through 523 * the entire queue. If we encounter dirty pages, we start cleaning 524 * them. 525 */ 526 527 maxlaunder = (cnt.v_inactive_target > MAXLAUNDER) ? 528 MAXLAUNDER : cnt.v_inactive_target; 529 530rescan1: 531 maxscan = min(cnt.v_inactive_count, MAXSCAN); 532 m = vm_page_queue_inactive.tqh_first; 533 while (m && (maxscan-- > 0) && 534 ((cnt.v_free_count + cnt.v_cache_count) < desired_free)) { 535 vm_page_t next; 536 537 cnt.v_pdpages++; 538 next = m->pageq.tqe_next; 539 540#if defined(VM_DIAGNOSE) 541 if ((m->flags & PG_INACTIVE) == 0) { 542 printf("vm_pageout_scan: page not inactive?\n"); 543 break; 544 } 545#endif 546 547 /* 548 * dont mess with busy pages 549 */ 550 if (m->hold_count || m->busy || (m->flags & PG_BUSY) || 551 m->bmapped != 0) { 552 TAILQ_REMOVE(&vm_page_queue_inactive, m, pageq); 553 TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq); 554 m = next; 555 continue; 556 } 557 if (((m->flags & PG_REFERENCED) == 0) && 558 pmap_is_referenced(VM_PAGE_TO_PHYS(m))) { 559 m->flags |= PG_REFERENCED; 560 } 561 if (m->object->ref_count == 0) { 562 m->flags &= ~PG_REFERENCED; 563 pmap_clear_reference(VM_PAGE_TO_PHYS(m)); 564 } 565 if ((m->flags & (PG_REFERENCED|PG_WANTED)) != 0) { 566 m->flags &= ~PG_REFERENCED; 567 pmap_clear_reference(VM_PAGE_TO_PHYS(m)); 568 vm_page_activate(m); 569 if (m->act_count < ACT_MAX) 570 m->act_count += ACT_ADVANCE; 571 m = next; 572 continue; 573 } 574 vm_page_test_dirty(m); 575 576 if ((m->dirty & m->valid) == 0) { 577 if (m->valid == 0) { 578 pmap_page_protect(VM_PAGE_TO_PHYS(m), VM_PROT_NONE); 579 vm_page_free(m); 580 } else if (((cnt.v_free_count + cnt.v_cache_count) < desired_free) || 581 (cnt.v_cache_count < cnt.v_cache_min)) { 582 vm_page_cache(m); 583 } 584 } else if (maxlaunder > 0) { 585 int written; 586 587 object = m->object; 588 if ((object->flags & OBJ_DEAD) || !vm_object_lock_try(object)) { 589 m = next; 590 continue; 591 } 592 /* 593 * If a page is dirty, then it is either being washed 594 * (but not yet cleaned) or it is still in the 595 * laundry. If it is still in the laundry, then we 596 * start the cleaning operation. 597 */ 598 written = vm_pageout_clean(m, 0); 599 vm_object_unlock(object); 600 601 if (!next) { 602 break; 603 } 604 maxlaunder -= written; 605 /* 606 * if the next page has been re-activated, start 607 * scanning again 608 */ 609 if ((next->flags & PG_INACTIVE) == 0) { 610 goto rescan1; 611 } 612 } 613 m = next; 614 } 615 616 /* 617 * Compute the page shortage. If we are still very low on memory be 618 * sure that we will move a minimal amount of pages from active to 619 * inactive. 620 */ 621 622 page_shortage = cnt.v_inactive_target - 623 (cnt.v_free_count + cnt.v_inactive_count + cnt.v_cache_count); 624 if (page_shortage <= 0) { 625 if (pages_freed == 0) { 626 if ((cnt.v_free_count + cnt.v_cache_count) < desired_free) { 627 page_shortage = 628 desired_free - (cnt.v_free_count + cnt.v_cache_count); 629 } 630 } 631 if( (page_shortage <= 0) && (cnt.v_free_count < cnt.v_free_min)) 632 page_shortage = 1; 633 } 634 maxscan = min(cnt.v_active_count, MAXSCAN); 635 m = vm_page_queue_active.tqh_first; 636 while (m && (maxscan-- > 0) && (page_shortage > 0)) { 637 638 cnt.v_pdpages++; 639 next = m->pageq.tqe_next; 640 641 /* 642 * Don't deactivate pages that are busy. 643 */ 644 if ((m->busy != 0) || 645 (m->flags & PG_BUSY) || 646 (m->hold_count != 0) || 647 (m->bmapped != 0)) { 648 TAILQ_REMOVE(&vm_page_queue_active, m, pageq); 649 TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq); 650 m = next; 651 continue; 652 } 653 if (m->object->ref_count && ((m->flags & (PG_REFERENCED|PG_WANTED)) || 654 pmap_is_referenced(VM_PAGE_TO_PHYS(m)))) { 655 int s; 656 657 pmap_clear_reference(VM_PAGE_TO_PHYS(m)); 658 m->flags &= ~PG_REFERENCED; 659 if (m->act_count < ACT_MAX) { 660 m->act_count += ACT_ADVANCE; 661 } 662 TAILQ_REMOVE(&vm_page_queue_active, m, pageq); 663 TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq); 664 s = splhigh(); 665 TAILQ_REMOVE(&m->object->memq, m, listq); 666 TAILQ_INSERT_TAIL(&m->object->memq, m, listq); 667 splx(s); 668 } else { 669 m->flags &= ~PG_REFERENCED; 670 pmap_clear_reference(VM_PAGE_TO_PHYS(m)); 671 m->act_count -= min(m->act_count, ACT_DECLINE); 672 673 /* 674 * if the page act_count is zero -- then we deactivate 675 */ 676 if (!m->act_count && (page_shortage > 0)) { 677 if (m->object->ref_count == 0) { 678 vm_page_test_dirty(m); 679 --page_shortage; 680 if ((m->dirty & m->valid) == 0) { 681 m->act_count = 0; 682 vm_page_cache(m); 683 } else { 684 vm_page_deactivate(m); 685 } 686 } else { 687 vm_page_deactivate(m); 688 --page_shortage; 689 } 690 } else if (m->act_count) { 691 TAILQ_REMOVE(&vm_page_queue_active, m, pageq); 692 TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq); 693 } 694 } 695 m = next; 696 } 697 698 /* 699 * We try to maintain some *really* free pages, this allows interrupt 700 * code to be guaranteed space. 701 */ 702 while (cnt.v_free_count < cnt.v_free_reserved) { 703 m = vm_page_queue_cache.tqh_first; 704 if (!m) 705 break; 706 vm_page_free(m); 707 } 708 709 /* 710 * make sure that we have swap space -- if we are low on memory and 711 * swap -- then kill the biggest process. 712 */ 713 if ((vm_swap_size == 0 || swap_pager_full) && 714 ((cnt.v_free_count + cnt.v_cache_count) < cnt.v_free_min)) { 715 bigproc = NULL; 716 bigsize = 0; 717 for (p = (struct proc *) allproc; p != NULL; p = p->p_next) { 718 /* 719 * if this is a system process, skip it 720 */ 721 if ((p->p_flag & P_SYSTEM) || (p->p_pid == 1) || 722 ((p->p_pid < 48) && (vm_swap_size != 0))) { 723 continue; 724 } 725 /* 726 * if the process is in a non-running type state, 727 * don't touch it. 728 */ 729 if (p->p_stat != SRUN && p->p_stat != SSLEEP) { 730 continue; 731 } 732 /* 733 * get the process size 734 */ 735 size = p->p_vmspace->vm_pmap.pm_stats.resident_count; 736 /* 737 * if the this process is bigger than the biggest one 738 * remember it. 739 */ 740 if (size > bigsize) { 741 bigproc = p; 742 bigsize = size; 743 } 744 } 745 if (bigproc != NULL) { 746 printf("Process %lu killed by vm_pageout -- out of swap\n", (u_long) bigproc->p_pid); 747 psignal(bigproc, SIGKILL); 748 bigproc->p_estcpu = 0; 749 bigproc->p_nice = PRIO_MIN; 750 resetpriority(bigproc); 751 wakeup((caddr_t) &cnt.v_free_count); 752 } 753 } 754 vm_page_pagesfreed += pages_freed; 755 return force_wakeup; 756} 757 758/* 759 * vm_pageout is the high level pageout daemon. 760 */ 761void 762vm_pageout() 763{ 764 (void) spl0(); 765 766 /* 767 * Initialize some paging parameters. 768 */ 769 770 if (cnt.v_page_count > 1024) 771 cnt.v_free_min = 4 + (cnt.v_page_count - 1024) / 200; 772 else 773 cnt.v_free_min = 4; 774 /* 775 * free_reserved needs to include enough for the largest swap pager 776 * structures plus enough for any pv_entry structs when paging. 777 */ 778 cnt.v_pageout_free_min = 6 + cnt.v_page_count / 1024; 779 cnt.v_free_reserved = cnt.v_pageout_free_min + 2; 780 cnt.v_free_target = 3 * cnt.v_free_min + cnt.v_free_reserved; 781 cnt.v_inactive_target = cnt.v_free_count / 4; 782 if (cnt.v_inactive_target > 512) 783 cnt.v_inactive_target = 512; 784 cnt.v_free_min += cnt.v_free_reserved; 785 if (cnt.v_page_count > 1024) { 786 cnt.v_cache_max = (cnt.v_free_count - 1024) / 2; 787 cnt.v_cache_min = (cnt.v_free_count - 1024) / 20; 788 } else { 789 cnt.v_cache_min = 0; 790 cnt.v_cache_max = 0; 791 } 792 793 /* XXX does not really belong here */ 794 if (vm_page_max_wired == 0) 795 vm_page_max_wired = cnt.v_free_count / 3; 796 797 798 (void) swap_pager_alloc(0, 0, 0, 0); 799 /* 800 * The pageout daemon is never done, so loop forever. 801 */ 802 while (TRUE) { 803 tsleep((caddr_t) &vm_pages_needed, PVM, "psleep", 0); 804 cnt.v_pdwakeups++; 805 vm_pager_sync(); 806 vm_pageout_scan(); 807 vm_pager_sync(); 808 wakeup((caddr_t) &cnt.v_free_count); 809 wakeup((caddr_t) kmem_map); 810 } 811} 812 813void 814vm_daemon __P((void)) 815{ 816 vm_object_t object; 817 struct proc *p; 818 819 while (TRUE) { 820 tsleep((caddr_t) &vm_daemon_needed, PUSER, "psleep", 0); 821 swapout_threads(); 822 /* 823 * scan the processes for exceeding their rlimits or if 824 * process is swapped out -- deactivate pages 825 */ 826 827 for (p = (struct proc *) allproc; p != NULL; p = p->p_next) { 828 int overage; 829 quad_t limit; 830 vm_offset_t size; 831 832 /* 833 * if this is a system process or if we have already 834 * looked at this process, skip it. 835 */ 836 if (p->p_flag & (P_SYSTEM | P_WEXIT)) { 837 continue; 838 } 839 /* 840 * if the process is in a non-running type state, 841 * don't touch it. 842 */ 843 if (p->p_stat != SRUN && p->p_stat != SSLEEP) { 844 continue; 845 } 846 /* 847 * get a limit 848 */ 849 limit = qmin(p->p_rlimit[RLIMIT_RSS].rlim_cur, 850 p->p_rlimit[RLIMIT_RSS].rlim_max); 851 852 /* 853 * let processes that are swapped out really be 854 * swapped out set the limit to nothing (will force a 855 * swap-out.) 856 */ 857 if ((p->p_flag & P_INMEM) == 0) 858 limit = 0; /* XXX */ 859 860 size = p->p_vmspace->vm_pmap.pm_stats.resident_count * NBPG; 861 if (limit >= 0 && size >= limit) { 862 overage = (size - limit) / NBPG; 863 vm_pageout_map_deactivate_pages(&p->p_vmspace->vm_map, 864 (vm_map_entry_t) 0, &overage, vm_pageout_object_deactivate_pages); 865 } 866 } 867 } 868 869 /* 870 * we remove cached objects that have no RSS... 871 */ 872restart: 873 vm_object_cache_lock(); 874 object = vm_object_cached_list.tqh_first; 875 while (object) { 876 vm_object_cache_unlock(); 877 /* 878 * if there are no resident pages -- get rid of the object 879 */ 880 if (object->resident_page_count == 0) { 881 if (object != vm_object_lookup(object->pager)) 882 panic("vm_object_cache_trim: I'm sooo confused."); 883 pager_cache(object, FALSE); 884 goto restart; 885 } 886 object = object->cached_list.tqe_next; 887 vm_object_cache_lock(); 888 } 889 vm_object_cache_unlock(); 890} 891