vm_glue.c revision 114019
1/* 2 * Copyright (c) 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * The Mach Operating System project at Carnegie-Mellon University. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the University of 19 * California, Berkeley and its contributors. 20 * 4. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * from: @(#)vm_glue.c 8.6 (Berkeley) 1/5/94 37 * 38 * 39 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 40 * All rights reserved. 41 * 42 * Permission to use, copy, modify and distribute this software and 43 * its documentation is hereby granted, provided that both the copyright 44 * notice and this permission notice appear in all copies of the 45 * software, derivative works or modified versions, and any portions 46 * thereof, and that both notices appear in supporting documentation. 47 * 48 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 49 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 50 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 51 * 52 * Carnegie Mellon requests users of this software to return to 53 * 54 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 55 * School of Computer Science 56 * Carnegie Mellon University 57 * Pittsburgh PA 15213-3890 58 * 59 * any improvements or extensions that they make and grant Carnegie the 60 * rights to redistribute these changes. 61 * 62 * $FreeBSD: head/sys/vm/vm_glue.c 114019 2003-04-25 16:30:02Z alc $ 63 */ 64 65#include "opt_vm.h" 66 67#include <sys/param.h> 68#include <sys/systm.h> 69#include <sys/lock.h> 70#include <sys/mutex.h> 71#include <sys/proc.h> 72#include <sys/resourcevar.h> 73#include <sys/shm.h> 74#include <sys/vmmeter.h> 75#include <sys/sx.h> 76#include <sys/sysctl.h> 77 78#include <sys/kernel.h> 79#include <sys/ktr.h> 80#include <sys/unistd.h> 81 82#include <machine/limits.h> 83 84#include <vm/vm.h> 85#include <vm/vm_param.h> 86#include <vm/pmap.h> 87#include <vm/vm_map.h> 88#include <vm/vm_page.h> 89#include <vm/vm_pageout.h> 90#include <vm/vm_object.h> 91#include <vm/vm_kern.h> 92#include <vm/vm_extern.h> 93#include <vm/vm_pager.h> 94#include <vm/swap_pager.h> 95 96#include <sys/user.h> 97 98extern int maxslp; 99 100/* 101 * System initialization 102 * 103 * Note: proc0 from proc.h 104 */ 105static void vm_init_limits(void *); 106SYSINIT(vm_limits, SI_SUB_VM_CONF, SI_ORDER_FIRST, vm_init_limits, &proc0) 107 108/* 109 * THIS MUST BE THE LAST INITIALIZATION ITEM!!! 110 * 111 * Note: run scheduling should be divorced from the vm system. 112 */ 113static void scheduler(void *); 114SYSINIT(scheduler, SI_SUB_RUN_SCHEDULER, SI_ORDER_FIRST, scheduler, NULL) 115 116#ifndef NO_SWAPPING 117static void swapout(struct proc *); 118static void vm_proc_swapin(struct proc *p); 119static void vm_proc_swapout(struct proc *p); 120#endif 121 122/* 123 * MPSAFE 124 * 125 * WARNING! This code calls vm_map_check_protection() which only checks 126 * the associated vm_map_entry range. It does not determine whether the 127 * contents of the memory is actually readable or writable. In most cases 128 * just checking the vm_map_entry is sufficient within the kernel's address 129 * space. 130 */ 131int 132kernacc(addr, len, rw) 133 void *addr; 134 int len, rw; 135{ 136 boolean_t rv; 137 vm_offset_t saddr, eaddr; 138 vm_prot_t prot; 139 140 KASSERT((rw & ~VM_PROT_ALL) == 0, 141 ("illegal ``rw'' argument to kernacc (%x)\n", rw)); 142 prot = rw; 143 saddr = trunc_page((vm_offset_t)addr); 144 eaddr = round_page((vm_offset_t)addr + len); 145 rv = vm_map_check_protection(kernel_map, saddr, eaddr, prot); 146 return (rv == TRUE); 147} 148 149/* 150 * MPSAFE 151 * 152 * WARNING! This code calls vm_map_check_protection() which only checks 153 * the associated vm_map_entry range. It does not determine whether the 154 * contents of the memory is actually readable or writable. vmapbuf(), 155 * vm_fault_quick(), or copyin()/copout()/su*()/fu*() functions should be 156 * used in conjuction with this call. 157 */ 158int 159useracc(addr, len, rw) 160 void *addr; 161 int len, rw; 162{ 163 boolean_t rv; 164 vm_prot_t prot; 165 vm_map_t map; 166 167 KASSERT((rw & ~VM_PROT_ALL) == 0, 168 ("illegal ``rw'' argument to useracc (%x)\n", rw)); 169 prot = rw; 170 map = &curproc->p_vmspace->vm_map; 171 if ((vm_offset_t)addr + len > vm_map_max(map) || 172 (vm_offset_t)addr + len < (vm_offset_t)addr) { 173 return (FALSE); 174 } 175 rv = vm_map_check_protection(map, trunc_page((vm_offset_t)addr), 176 round_page((vm_offset_t)addr + len), prot); 177 return (rv == TRUE); 178} 179 180/* 181 * MPSAFE 182 */ 183void 184vslock(addr, len) 185 void *addr; 186 u_int len; 187{ 188 189 vm_map_wire(&curproc->p_vmspace->vm_map, trunc_page((vm_offset_t)addr), 190 round_page((vm_offset_t)addr + len), FALSE); 191} 192 193/* 194 * MPSAFE 195 */ 196void 197vsunlock(addr, len) 198 void *addr; 199 u_int len; 200{ 201 202 vm_map_unwire(&curproc->p_vmspace->vm_map, 203 trunc_page((vm_offset_t)addr), 204 round_page((vm_offset_t)addr + len), FALSE); 205} 206 207/* 208 * Create the U area for a new process. 209 * This routine directly affects the fork perf for a process. 210 */ 211void 212vm_proc_new(struct proc *p) 213{ 214 vm_page_t ma[UAREA_PAGES]; 215 vm_object_t upobj; 216 vm_offset_t up; 217 vm_page_t m; 218 u_int i; 219 220 /* 221 * Allocate object for the upage. 222 */ 223 upobj = vm_object_allocate(OBJT_DEFAULT, UAREA_PAGES); 224 p->p_upages_obj = upobj; 225 226 /* 227 * Get a kernel virtual address for the U area for this process. 228 */ 229 up = kmem_alloc_nofault(kernel_map, UAREA_PAGES * PAGE_SIZE); 230 if (up == 0) 231 panic("vm_proc_new: upage allocation failed"); 232 p->p_uarea = (struct user *)up; 233 234 for (i = 0; i < UAREA_PAGES; i++) { 235 /* 236 * Get a uarea page. 237 */ 238 m = vm_page_grab(upobj, i, 239 VM_ALLOC_NORMAL | VM_ALLOC_RETRY | VM_ALLOC_WIRED); 240 ma[i] = m; 241 242 vm_page_lock_queues(); 243 vm_page_wakeup(m); 244 vm_page_flag_clear(m, PG_ZERO); 245 m->valid = VM_PAGE_BITS_ALL; 246 vm_page_unlock_queues(); 247 } 248 249 /* 250 * Enter the pages into the kernel address space. 251 */ 252 pmap_qenter(up, ma, UAREA_PAGES); 253} 254 255/* 256 * Dispose the U area for a process that has exited. 257 * This routine directly impacts the exit perf of a process. 258 * XXX proc_zone is marked UMA_ZONE_NOFREE, so this should never be called. 259 */ 260void 261vm_proc_dispose(struct proc *p) 262{ 263 vm_object_t upobj; 264 vm_offset_t up; 265 vm_page_t m; 266 267 upobj = p->p_upages_obj; 268 VM_OBJECT_LOCK(upobj); 269 if (upobj->resident_page_count != UAREA_PAGES) 270 panic("vm_proc_dispose: incorrect number of pages in upobj"); 271 vm_page_lock_queues(); 272 while ((m = TAILQ_FIRST(&upobj->memq)) != NULL) { 273 vm_page_busy(m); 274 vm_page_unwire(m, 0); 275 vm_page_free(m); 276 } 277 vm_page_unlock_queues(); 278 VM_OBJECT_UNLOCK(upobj); 279 up = (vm_offset_t)p->p_uarea; 280 pmap_qremove(up, UAREA_PAGES); 281 kmem_free(kernel_map, up, UAREA_PAGES * PAGE_SIZE); 282 vm_object_deallocate(upobj); 283} 284 285#ifndef NO_SWAPPING 286/* 287 * Allow the U area for a process to be prejudicially paged out. 288 */ 289static void 290vm_proc_swapout(struct proc *p) 291{ 292 vm_object_t upobj; 293 vm_offset_t up; 294 vm_page_t m; 295 296 upobj = p->p_upages_obj; 297 VM_OBJECT_LOCK(upobj); 298 if (upobj->resident_page_count != UAREA_PAGES) 299 panic("vm_proc_dispose: incorrect number of pages in upobj"); 300 vm_page_lock_queues(); 301 TAILQ_FOREACH(m, &upobj->memq, listq) { 302 vm_page_dirty(m); 303 vm_page_unwire(m, 0); 304 } 305 vm_page_unlock_queues(); 306 VM_OBJECT_UNLOCK(upobj); 307 up = (vm_offset_t)p->p_uarea; 308 pmap_qremove(up, UAREA_PAGES); 309} 310 311/* 312 * Bring the U area for a specified process back in. 313 */ 314static void 315vm_proc_swapin(struct proc *p) 316{ 317 vm_page_t ma[UAREA_PAGES]; 318 vm_object_t upobj; 319 vm_offset_t up; 320 vm_page_t m; 321 int rv; 322 int i; 323 324 upobj = p->p_upages_obj; 325 for (i = 0; i < UAREA_PAGES; i++) { 326 m = vm_page_grab(upobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY); 327 if (m->valid != VM_PAGE_BITS_ALL) { 328 rv = vm_pager_get_pages(upobj, &m, 1, 0); 329 if (rv != VM_PAGER_OK) 330 panic("vm_proc_swapin: cannot get upage"); 331 } 332 ma[i] = m; 333 } 334 VM_OBJECT_LOCK(upobj); 335 if (upobj->resident_page_count != UAREA_PAGES) 336 panic("vm_proc_swapin: lost pages from upobj"); 337 vm_page_lock_queues(); 338 TAILQ_FOREACH(m, &upobj->memq, listq) { 339 m->valid = VM_PAGE_BITS_ALL; 340 vm_page_wire(m); 341 vm_page_wakeup(m); 342 } 343 vm_page_unlock_queues(); 344 VM_OBJECT_UNLOCK(upobj); 345 up = (vm_offset_t)p->p_uarea; 346 pmap_qenter(up, ma, UAREA_PAGES); 347} 348 349/* 350 * Swap in the UAREAs of all processes swapped out to the given device. 351 * The pages in the UAREA are marked dirty and their swap metadata is freed. 352 */ 353void 354vm_proc_swapin_all(int devidx) 355{ 356 struct proc *p; 357 vm_object_t object; 358 vm_page_t m; 359 360retry: 361 sx_slock(&allproc_lock); 362 FOREACH_PROC_IN_SYSTEM(p) { 363 PROC_LOCK(p); 364 365 object = p->p_upages_obj; 366 if (object != NULL && 367 swap_pager_isswapped(p->p_upages_obj, devidx)) { 368 sx_sunlock(&allproc_lock); 369 faultin(p); 370 PROC_UNLOCK(p); 371 VM_OBJECT_LOCK(object); 372 vm_page_lock_queues(); 373 TAILQ_FOREACH(m, &object->memq, listq) 374 vm_page_dirty(m); 375 vm_page_unlock_queues(); 376 swap_pager_freespace(object, 0, 377 object->un_pager.swp.swp_bcount); 378 VM_OBJECT_UNLOCK(object); 379 goto retry; 380 } 381 382 PROC_UNLOCK(p); 383 } 384 sx_sunlock(&allproc_lock); 385} 386#endif 387 388/* 389 * Implement fork's actions on an address space. 390 * Here we arrange for the address space to be copied or referenced, 391 * allocate a user struct (pcb and kernel stack), then call the 392 * machine-dependent layer to fill those in and make the new process 393 * ready to run. The new process is set up so that it returns directly 394 * to user mode to avoid stack copying and relocation problems. 395 */ 396void 397vm_forkproc(td, p2, td2, flags) 398 struct thread *td; 399 struct proc *p2; 400 struct thread *td2; 401 int flags; 402{ 403 struct proc *p1 = td->td_proc; 404 struct user *up; 405 406 GIANT_REQUIRED; 407 408 if ((flags & RFPROC) == 0) { 409 /* 410 * Divorce the memory, if it is shared, essentially 411 * this changes shared memory amongst threads, into 412 * COW locally. 413 */ 414 if ((flags & RFMEM) == 0) { 415 if (p1->p_vmspace->vm_refcnt > 1) { 416 vmspace_unshare(p1); 417 } 418 } 419 cpu_fork(td, p2, td2, flags); 420 return; 421 } 422 423 if (flags & RFMEM) { 424 p2->p_vmspace = p1->p_vmspace; 425 p1->p_vmspace->vm_refcnt++; 426 } 427 428 while (vm_page_count_severe()) { 429 VM_WAIT; 430 } 431 432 if ((flags & RFMEM) == 0) { 433 p2->p_vmspace = vmspace_fork(p1->p_vmspace); 434 435 pmap_pinit2(vmspace_pmap(p2->p_vmspace)); 436 437 if (p1->p_vmspace->vm_shm) 438 shmfork(p1, p2); 439 } 440 441 /* XXXKSE this is unsatisfactory but should be adequate */ 442 up = p2->p_uarea; 443 444 /* 445 * p_stats currently points at fields in the user struct 446 * but not at &u, instead at p_addr. Copy parts of 447 * p_stats; zero the rest of p_stats (statistics). 448 * 449 * If procsig->ps_refcnt is 1 and p2->p_sigacts is NULL we dont' need 450 * to share sigacts, so we use the up->u_sigacts. 451 */ 452 p2->p_stats = &up->u_stats; 453 if (p2->p_sigacts == NULL) { 454 if (p2->p_procsig->ps_refcnt != 1) 455 printf ("PID:%d NULL sigacts with refcnt not 1!\n",p2->p_pid); 456 p2->p_sigacts = &up->u_sigacts; 457 up->u_sigacts = *p1->p_sigacts; 458 } 459 460 bzero(&up->u_stats.pstat_startzero, 461 (unsigned) ((caddr_t) &up->u_stats.pstat_endzero - 462 (caddr_t) &up->u_stats.pstat_startzero)); 463 bcopy(&p1->p_stats->pstat_startcopy, &up->u_stats.pstat_startcopy, 464 ((caddr_t) &up->u_stats.pstat_endcopy - 465 (caddr_t) &up->u_stats.pstat_startcopy)); 466 467 468 /* 469 * cpu_fork will copy and update the pcb, set up the kernel stack, 470 * and make the child ready to run. 471 */ 472 cpu_fork(td, p2, td2, flags); 473} 474 475/* 476 * Called after process has been wait(2)'ed apon and is being reaped. 477 * The idea is to reclaim resources that we could not reclaim while 478 * the process was still executing. 479 */ 480void 481vm_waitproc(p) 482 struct proc *p; 483{ 484 485 GIANT_REQUIRED; 486 cpu_wait(p); 487 vmspace_exitfree(p); /* and clean-out the vmspace */ 488} 489 490/* 491 * Set default limits for VM system. 492 * Called for proc 0, and then inherited by all others. 493 * 494 * XXX should probably act directly on proc0. 495 */ 496static void 497vm_init_limits(udata) 498 void *udata; 499{ 500 struct proc *p = udata; 501 int rss_limit; 502 503 /* 504 * Set up the initial limits on process VM. Set the maximum resident 505 * set size to be half of (reasonably) available memory. Since this 506 * is a soft limit, it comes into effect only when the system is out 507 * of memory - half of main memory helps to favor smaller processes, 508 * and reduces thrashing of the object cache. 509 */ 510 p->p_rlimit[RLIMIT_STACK].rlim_cur = dflssiz; 511 p->p_rlimit[RLIMIT_STACK].rlim_max = maxssiz; 512 p->p_rlimit[RLIMIT_DATA].rlim_cur = dfldsiz; 513 p->p_rlimit[RLIMIT_DATA].rlim_max = maxdsiz; 514 /* limit the limit to no less than 2MB */ 515 rss_limit = max(cnt.v_free_count, 512); 516 p->p_rlimit[RLIMIT_RSS].rlim_cur = ptoa(rss_limit); 517 p->p_rlimit[RLIMIT_RSS].rlim_max = RLIM_INFINITY; 518} 519 520void 521faultin(p) 522 struct proc *p; 523{ 524#ifdef NO_SWAPPING 525 526 PROC_LOCK_ASSERT(p, MA_OWNED); 527 if ((p->p_sflag & PS_INMEM) == 0) 528 panic("faultin: proc swapped out with NO_SWAPPING!"); 529#else /* !NO_SWAPPING */ 530 struct thread *td; 531 532 GIANT_REQUIRED; 533 PROC_LOCK_ASSERT(p, MA_OWNED); 534 /* 535 * If another process is swapping in this process, 536 * just wait until it finishes. 537 */ 538 if (p->p_sflag & PS_SWAPPINGIN) 539 msleep(&p->p_sflag, &p->p_mtx, PVM, "faultin", 0); 540 else if ((p->p_sflag & PS_INMEM) == 0) { 541 /* 542 * Don't let another thread swap process p out while we are 543 * busy swapping it in. 544 */ 545 ++p->p_lock; 546 mtx_lock_spin(&sched_lock); 547 p->p_sflag |= PS_SWAPPINGIN; 548 mtx_unlock_spin(&sched_lock); 549 PROC_UNLOCK(p); 550 551 vm_proc_swapin(p); 552 FOREACH_THREAD_IN_PROC(p, td) 553 pmap_swapin_thread(td); 554 555 PROC_LOCK(p); 556 mtx_lock_spin(&sched_lock); 557 p->p_sflag &= ~PS_SWAPPINGIN; 558 p->p_sflag |= PS_INMEM; 559 FOREACH_THREAD_IN_PROC(p, td) { 560 TD_CLR_SWAPPED(td); 561 if (TD_CAN_RUN(td)) 562 setrunnable(td); 563 } 564 mtx_unlock_spin(&sched_lock); 565 566 wakeup(&p->p_sflag); 567 568 /* Allow other threads to swap p out now. */ 569 --p->p_lock; 570 } 571#endif /* NO_SWAPPING */ 572} 573 574/* 575 * This swapin algorithm attempts to swap-in processes only if there 576 * is enough space for them. Of course, if a process waits for a long 577 * time, it will be swapped in anyway. 578 * 579 * XXXKSE - process with the thread with highest priority counts.. 580 * 581 * Giant is still held at this point, to be released in tsleep. 582 */ 583/* ARGSUSED*/ 584static void 585scheduler(dummy) 586 void *dummy; 587{ 588 struct proc *p; 589 struct thread *td; 590 int pri; 591 struct proc *pp; 592 int ppri; 593 594 mtx_assert(&Giant, MA_OWNED | MA_NOTRECURSED); 595 /* GIANT_REQUIRED */ 596 597loop: 598 if (vm_page_count_min()) { 599 VM_WAIT; 600 goto loop; 601 } 602 603 pp = NULL; 604 ppri = INT_MIN; 605 sx_slock(&allproc_lock); 606 FOREACH_PROC_IN_SYSTEM(p) { 607 struct ksegrp *kg; 608 if (p->p_sflag & (PS_INMEM | PS_SWAPPINGOUT | PS_SWAPPINGIN)) { 609 continue; 610 } 611 mtx_lock_spin(&sched_lock); 612 FOREACH_THREAD_IN_PROC(p, td) { 613 /* 614 * An otherwise runnable thread of a process 615 * swapped out has only the TDI_SWAPPED bit set. 616 * 617 */ 618 if (td->td_inhibitors == TDI_SWAPPED) { 619 kg = td->td_ksegrp; 620 pri = p->p_swtime + kg->kg_slptime; 621 if ((p->p_sflag & PS_SWAPINREQ) == 0) { 622 pri -= kg->kg_nice * 8; 623 } 624 625 /* 626 * if this ksegrp is higher priority 627 * and there is enough space, then select 628 * this process instead of the previous 629 * selection. 630 */ 631 if (pri > ppri) { 632 pp = p; 633 ppri = pri; 634 } 635 } 636 } 637 mtx_unlock_spin(&sched_lock); 638 } 639 sx_sunlock(&allproc_lock); 640 641 /* 642 * Nothing to do, back to sleep. 643 */ 644 if ((p = pp) == NULL) { 645 tsleep(&proc0, PVM, "sched", maxslp * hz / 2); 646 goto loop; 647 } 648 PROC_LOCK(p); 649 650 /* 651 * Another process may be bringing or may have already 652 * brought this process in while we traverse all threads. 653 * Or, this process may even be being swapped out again. 654 */ 655 if (p->p_sflag & (PS_INMEM | PS_SWAPPINGOUT | PS_SWAPPINGIN)) { 656 PROC_UNLOCK(p); 657 goto loop; 658 } 659 660 mtx_lock_spin(&sched_lock); 661 p->p_sflag &= ~PS_SWAPINREQ; 662 mtx_unlock_spin(&sched_lock); 663 664 /* 665 * We would like to bring someone in. (only if there is space). 666 * [What checks the space? ] 667 */ 668 faultin(p); 669 PROC_UNLOCK(p); 670 mtx_lock_spin(&sched_lock); 671 p->p_swtime = 0; 672 mtx_unlock_spin(&sched_lock); 673 goto loop; 674} 675 676#ifndef NO_SWAPPING 677 678/* 679 * Swap_idle_threshold1 is the guaranteed swapped in time for a process 680 */ 681static int swap_idle_threshold1 = 2; 682SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold1, CTLFLAG_RW, 683 &swap_idle_threshold1, 0, "Guaranteed swapped in time for a process"); 684 685/* 686 * Swap_idle_threshold2 is the time that a process can be idle before 687 * it will be swapped out, if idle swapping is enabled. 688 */ 689static int swap_idle_threshold2 = 10; 690SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold2, CTLFLAG_RW, 691 &swap_idle_threshold2, 0, "Time before a process will be swapped out"); 692 693/* 694 * Swapout is driven by the pageout daemon. Very simple, we find eligible 695 * procs and unwire their u-areas. We try to always "swap" at least one 696 * process in case we need the room for a swapin. 697 * If any procs have been sleeping/stopped for at least maxslp seconds, 698 * they are swapped. Else, we swap the longest-sleeping or stopped process, 699 * if any, otherwise the longest-resident process. 700 */ 701void 702swapout_procs(action) 703int action; 704{ 705 struct proc *p; 706 struct thread *td; 707 struct ksegrp *kg; 708 struct proc *outp, *outp2; 709 int outpri, outpri2; 710 int didswap = 0; 711 712 GIANT_REQUIRED; 713 714 outp = outp2 = NULL; 715 outpri = outpri2 = INT_MIN; 716retry: 717 sx_slock(&allproc_lock); 718 FOREACH_PROC_IN_SYSTEM(p) { 719 struct vmspace *vm; 720 int minslptime = 100000; 721 722 /* 723 * Watch out for a process in 724 * creation. It may have no 725 * address space or lock yet. 726 */ 727 mtx_lock_spin(&sched_lock); 728 if (p->p_state == PRS_NEW) { 729 mtx_unlock_spin(&sched_lock); 730 continue; 731 } 732 mtx_unlock_spin(&sched_lock); 733 734 /* 735 * An aio daemon switches its 736 * address space while running. 737 * Perform a quick check whether 738 * a process has P_SYSTEM. 739 */ 740 PROC_LOCK(p); 741 if ((p->p_flag & P_SYSTEM) != 0) { 742 PROC_UNLOCK(p); 743 continue; 744 } 745 746 /* 747 * Do not swapout a process that 748 * is waiting for VM data 749 * structures as there is a possible 750 * deadlock. Test this first as 751 * this may block. 752 * 753 * Lock the map until swapout 754 * finishes, or a thread of this 755 * process may attempt to alter 756 * the map. 757 */ 758 vm = p->p_vmspace; 759 KASSERT(vm != NULL, 760 ("swapout_procs: a process has no address space")); 761 ++vm->vm_refcnt; 762 PROC_UNLOCK(p); 763 if (!vm_map_trylock(&vm->vm_map)) 764 goto nextproc1; 765 766 PROC_LOCK(p); 767 if (p->p_lock != 0 || 768 (p->p_flag & (P_STOPPED_SINGLE|P_TRACED|P_SYSTEM|P_WEXIT) 769 ) != 0) { 770 goto nextproc2; 771 } 772 /* 773 * only aiod changes vmspace, however it will be 774 * skipped because of the if statement above checking 775 * for P_SYSTEM 776 */ 777 if ((p->p_sflag & (PS_INMEM|PS_SWAPPINGOUT|PS_SWAPPINGIN)) != PS_INMEM) 778 goto nextproc2; 779 780 mtx_lock_spin(&sched_lock); 781 switch (p->p_state) { 782 default: 783 /* Don't swap out processes in any sort 784 * of 'special' state. */ 785 goto nextproc; 786 787 case PRS_NORMAL: 788 /* 789 * do not swapout a realtime process 790 * Check all the thread groups.. 791 */ 792 FOREACH_KSEGRP_IN_PROC(p, kg) { 793 if (PRI_IS_REALTIME(kg->kg_pri_class)) 794 goto nextproc; 795 796 /* 797 * Guarantee swap_idle_threshold1 798 * time in memory. 799 */ 800 if (kg->kg_slptime < swap_idle_threshold1) 801 goto nextproc; 802 803 /* 804 * Do not swapout a process if it is 805 * waiting on a critical event of some 806 * kind or there is a thread whose 807 * pageable memory may be accessed. 808 * 809 * This could be refined to support 810 * swapping out a thread. 811 */ 812 FOREACH_THREAD_IN_GROUP(kg, td) { 813 if ((td->td_priority) < PSOCK || 814 !thread_safetoswapout(td)) 815 goto nextproc; 816 } 817 /* 818 * If the system is under memory stress, 819 * or if we are swapping 820 * idle processes >= swap_idle_threshold2, 821 * then swap the process out. 822 */ 823 if (((action & VM_SWAP_NORMAL) == 0) && 824 (((action & VM_SWAP_IDLE) == 0) || 825 (kg->kg_slptime < swap_idle_threshold2))) 826 goto nextproc; 827 828 if (minslptime > kg->kg_slptime) 829 minslptime = kg->kg_slptime; 830 } 831 832 /* 833 * If the process has been asleep for awhile and had 834 * most of its pages taken away already, swap it out. 835 */ 836 if ((action & VM_SWAP_NORMAL) || 837 ((action & VM_SWAP_IDLE) && 838 (minslptime > swap_idle_threshold2))) { 839 swapout(p); 840 didswap++; 841 mtx_unlock_spin(&sched_lock); 842 PROC_UNLOCK(p); 843 vm_map_unlock(&vm->vm_map); 844 vmspace_free(vm); 845 sx_sunlock(&allproc_lock); 846 goto retry; 847 } 848 } 849nextproc: 850 mtx_unlock_spin(&sched_lock); 851nextproc2: 852 PROC_UNLOCK(p); 853 vm_map_unlock(&vm->vm_map); 854nextproc1: 855 vmspace_free(vm); 856 continue; 857 } 858 sx_sunlock(&allproc_lock); 859 /* 860 * If we swapped something out, and another process needed memory, 861 * then wakeup the sched process. 862 */ 863 if (didswap) 864 wakeup(&proc0); 865} 866 867static void 868swapout(p) 869 struct proc *p; 870{ 871 struct thread *td; 872 873 PROC_LOCK_ASSERT(p, MA_OWNED); 874 mtx_assert(&sched_lock, MA_OWNED | MA_NOTRECURSED); 875#if defined(SWAP_DEBUG) 876 printf("swapping out %d\n", p->p_pid); 877#endif 878 879 /* 880 * The states of this process and its threads may have changed 881 * by now. Assuming that there is only one pageout daemon thread, 882 * this process should still be in memory. 883 */ 884 KASSERT((p->p_sflag & (PS_INMEM|PS_SWAPPINGOUT|PS_SWAPPINGIN)) == PS_INMEM, 885 ("swapout: lost a swapout race?")); 886 887#if defined(INVARIANTS) 888 /* 889 * Make sure that all threads are safe to be swapped out. 890 * 891 * Alternatively, we could swap out only safe threads. 892 */ 893 FOREACH_THREAD_IN_PROC(p, td) { 894 KASSERT(thread_safetoswapout(td), 895 ("swapout: there is a thread not safe for swapout")); 896 } 897#endif /* INVARIANTS */ 898 899 ++p->p_stats->p_ru.ru_nswap; 900 /* 901 * remember the process resident count 902 */ 903 p->p_vmspace->vm_swrss = vmspace_resident_count(p->p_vmspace); 904 905 p->p_sflag &= ~PS_INMEM; 906 p->p_sflag |= PS_SWAPPINGOUT; 907 PROC_UNLOCK(p); 908 FOREACH_THREAD_IN_PROC(p, td) 909 TD_SET_SWAPPED(td); 910 mtx_unlock_spin(&sched_lock); 911 912 vm_proc_swapout(p); 913 FOREACH_THREAD_IN_PROC(p, td) 914 pmap_swapout_thread(td); 915 916 PROC_LOCK(p); 917 mtx_lock_spin(&sched_lock); 918 p->p_sflag &= ~PS_SWAPPINGOUT; 919 p->p_swtime = 0; 920} 921#endif /* !NO_SWAPPING */ 922