vm_glue.c revision 103123
1/* 2 * Copyright (c) 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * The Mach Operating System project at Carnegie-Mellon University. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the University of 19 * California, Berkeley and its contributors. 20 * 4. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * from: @(#)vm_glue.c 8.6 (Berkeley) 1/5/94 37 * 38 * 39 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 40 * All rights reserved. 41 * 42 * Permission to use, copy, modify and distribute this software and 43 * its documentation is hereby granted, provided that both the copyright 44 * notice and this permission notice appear in all copies of the 45 * software, derivative works or modified versions, and any portions 46 * thereof, and that both notices appear in supporting documentation. 47 * 48 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 49 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 50 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 51 * 52 * Carnegie Mellon requests users of this software to return to 53 * 54 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 55 * School of Computer Science 56 * Carnegie Mellon University 57 * Pittsburgh PA 15213-3890 58 * 59 * any improvements or extensions that they make and grant Carnegie the 60 * rights to redistribute these changes. 61 * 62 * $FreeBSD: head/sys/vm/vm_glue.c 103123 2002-09-09 09:05:06Z tanimura $ 63 */ 64 65#include "opt_vm.h" 66 67#include <sys/param.h> 68#include <sys/systm.h> 69#include <sys/lock.h> 70#include <sys/mutex.h> 71#include <sys/proc.h> 72#include <sys/resourcevar.h> 73#include <sys/shm.h> 74#include <sys/vmmeter.h> 75#include <sys/sx.h> 76#include <sys/sysctl.h> 77 78#include <sys/kernel.h> 79#include <sys/ktr.h> 80#include <sys/unistd.h> 81 82#include <machine/limits.h> 83 84#include <vm/vm.h> 85#include <vm/vm_param.h> 86#include <vm/pmap.h> 87#include <vm/vm_map.h> 88#include <vm/vm_page.h> 89#include <vm/vm_pageout.h> 90#include <vm/vm_object.h> 91#include <vm/vm_kern.h> 92#include <vm/vm_extern.h> 93#include <vm/vm_pager.h> 94 95#include <sys/user.h> 96 97extern int maxslp; 98 99/* 100 * System initialization 101 * 102 * Note: proc0 from proc.h 103 */ 104static void vm_init_limits(void *); 105SYSINIT(vm_limits, SI_SUB_VM_CONF, SI_ORDER_FIRST, vm_init_limits, &proc0) 106 107/* 108 * THIS MUST BE THE LAST INITIALIZATION ITEM!!! 109 * 110 * Note: run scheduling should be divorced from the vm system. 111 */ 112static void scheduler(void *); 113SYSINIT(scheduler, SI_SUB_RUN_SCHEDULER, SI_ORDER_FIRST, scheduler, NULL) 114 115#ifndef NO_SWAPPING 116static void swapout(struct proc *); 117static void vm_proc_swapin(struct proc *p); 118static void vm_proc_swapout(struct proc *p); 119#endif 120 121/* 122 * MPSAFE 123 */ 124int 125kernacc(addr, len, rw) 126 caddr_t addr; 127 int len, rw; 128{ 129 boolean_t rv; 130 vm_offset_t saddr, eaddr; 131 vm_prot_t prot; 132 133 KASSERT((rw & ~VM_PROT_ALL) == 0, 134 ("illegal ``rw'' argument to kernacc (%x)\n", rw)); 135 prot = rw; 136 saddr = trunc_page((vm_offset_t)addr); 137 eaddr = round_page((vm_offset_t)addr + len); 138 rv = vm_map_check_protection(kernel_map, saddr, eaddr, prot); 139 return (rv == TRUE); 140} 141 142/* 143 * MPSAFE 144 */ 145int 146useracc(addr, len, rw) 147 caddr_t addr; 148 int len, rw; 149{ 150 boolean_t rv; 151 vm_prot_t prot; 152 153 KASSERT((rw & ~VM_PROT_ALL) == 0, 154 ("illegal ``rw'' argument to useracc (%x)\n", rw)); 155 prot = rw; 156 /* 157 * XXX - check separately to disallow access to user area and user 158 * page tables - they are in the map. 159 * 160 * XXX - VM_MAXUSER_ADDRESS is an end address, not a max. It was once 161 * only used (as an end address) in trap.c. Use it as an end address 162 * here too. This bogusness has spread. I just fixed where it was 163 * used as a max in vm_mmap.c. 164 */ 165 if ((vm_offset_t) addr + len > /* XXX */ VM_MAXUSER_ADDRESS 166 || (vm_offset_t) addr + len < (vm_offset_t) addr) { 167 return (FALSE); 168 } 169 rv = vm_map_check_protection(&curproc->p_vmspace->vm_map, 170 trunc_page((vm_offset_t)addr), round_page((vm_offset_t)addr + len), 171 prot); 172 return (rv == TRUE); 173} 174 175/* 176 * MPSAFE 177 */ 178void 179vslock(addr, len) 180 caddr_t addr; 181 u_int len; 182{ 183 184 vm_map_wire(&curproc->p_vmspace->vm_map, trunc_page((vm_offset_t)addr), 185 round_page((vm_offset_t)addr + len), FALSE); 186} 187 188/* 189 * MPSAFE 190 */ 191void 192vsunlock(addr, len) 193 caddr_t addr; 194 u_int len; 195{ 196 197 vm_map_unwire(&curproc->p_vmspace->vm_map, 198 trunc_page((vm_offset_t)addr), 199 round_page((vm_offset_t)addr + len), FALSE); 200} 201 202/* 203 * Create the U area for a new process. 204 * This routine directly affects the fork perf for a process. 205 */ 206void 207vm_proc_new(struct proc *p) 208{ 209 vm_page_t ma[UAREA_PAGES]; 210 vm_object_t upobj; 211 vm_offset_t up; 212 vm_page_t m; 213 u_int i; 214 215 /* 216 * Allocate object for the upage. 217 */ 218 upobj = vm_object_allocate(OBJT_DEFAULT, UAREA_PAGES); 219 p->p_upages_obj = upobj; 220 221 /* 222 * Get a kernel virtual address for the U area for this process. 223 */ 224 up = kmem_alloc_nofault(kernel_map, UAREA_PAGES * PAGE_SIZE); 225 if (up == 0) 226 panic("vm_proc_new: upage allocation failed"); 227 p->p_uarea = (struct user *)up; 228 229 for (i = 0; i < UAREA_PAGES; i++) { 230 /* 231 * Get a uarea page. 232 */ 233 m = vm_page_grab(upobj, i, 234 VM_ALLOC_NORMAL | VM_ALLOC_RETRY | VM_ALLOC_WIRED); 235 ma[i] = m; 236 237 vm_page_wakeup(m); 238 vm_page_flag_clear(m, PG_ZERO); 239 m->valid = VM_PAGE_BITS_ALL; 240 } 241 242 /* 243 * Enter the pages into the kernel address space. 244 */ 245 pmap_qenter(up, ma, UAREA_PAGES); 246} 247 248/* 249 * Dispose the U area for a process that has exited. 250 * This routine directly impacts the exit perf of a process. 251 * XXX proc_zone is marked UMA_ZONE_NOFREE, so this should never be called. 252 */ 253void 254vm_proc_dispose(struct proc *p) 255{ 256 vm_object_t upobj; 257 vm_offset_t up; 258 vm_page_t m; 259 260 upobj = p->p_upages_obj; 261 if (upobj->resident_page_count != UAREA_PAGES) 262 panic("vm_proc_dispose: incorrect number of pages in upobj"); 263 vm_page_lock_queues(); 264 while ((m = TAILQ_FIRST(&upobj->memq)) != NULL) { 265 vm_page_busy(m); 266 vm_page_unwire(m, 0); 267 vm_page_free(m); 268 } 269 vm_page_unlock_queues(); 270 up = (vm_offset_t)p->p_uarea; 271 pmap_qremove(up, UAREA_PAGES); 272 kmem_free(kernel_map, up, UAREA_PAGES * PAGE_SIZE); 273 vm_object_deallocate(upobj); 274} 275 276#ifndef NO_SWAPPING 277/* 278 * Allow the U area for a process to be prejudicially paged out. 279 */ 280void 281vm_proc_swapout(struct proc *p) 282{ 283 vm_object_t upobj; 284 vm_offset_t up; 285 vm_page_t m; 286 287 upobj = p->p_upages_obj; 288 if (upobj->resident_page_count != UAREA_PAGES) 289 panic("vm_proc_dispose: incorrect number of pages in upobj"); 290 vm_page_lock_queues(); 291 TAILQ_FOREACH(m, &upobj->memq, listq) { 292 vm_page_dirty(m); 293 vm_page_unwire(m, 0); 294 } 295 vm_page_unlock_queues(); 296 up = (vm_offset_t)p->p_uarea; 297 pmap_qremove(up, UAREA_PAGES); 298} 299 300/* 301 * Bring the U area for a specified process back in. 302 */ 303void 304vm_proc_swapin(struct proc *p) 305{ 306 vm_page_t ma[UAREA_PAGES]; 307 vm_object_t upobj; 308 vm_offset_t up; 309 vm_page_t m; 310 int rv; 311 int i; 312 313 upobj = p->p_upages_obj; 314 for (i = 0; i < UAREA_PAGES; i++) { 315 m = vm_page_grab(upobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY); 316 if (m->valid != VM_PAGE_BITS_ALL) { 317 rv = vm_pager_get_pages(upobj, &m, 1, 0); 318 if (rv != VM_PAGER_OK) 319 panic("vm_proc_swapin: cannot get upage"); 320 } 321 ma[i] = m; 322 } 323 if (upobj->resident_page_count != UAREA_PAGES) 324 panic("vm_proc_swapin: lost pages from upobj"); 325 vm_page_lock_queues(); 326 TAILQ_FOREACH(m, &upobj->memq, listq) { 327 m->valid = VM_PAGE_BITS_ALL; 328 vm_page_wire(m); 329 vm_page_wakeup(m); 330 } 331 vm_page_unlock_queues(); 332 up = (vm_offset_t)p->p_uarea; 333 pmap_qenter(up, ma, UAREA_PAGES); 334} 335#endif 336 337/* 338 * Implement fork's actions on an address space. 339 * Here we arrange for the address space to be copied or referenced, 340 * allocate a user struct (pcb and kernel stack), then call the 341 * machine-dependent layer to fill those in and make the new process 342 * ready to run. The new process is set up so that it returns directly 343 * to user mode to avoid stack copying and relocation problems. 344 */ 345void 346vm_forkproc(td, p2, td2, flags) 347 struct thread *td; 348 struct proc *p2; 349 struct thread *td2; 350 int flags; 351{ 352 struct proc *p1 = td->td_proc; 353 struct user *up; 354 355 GIANT_REQUIRED; 356 357 if ((flags & RFPROC) == 0) { 358 /* 359 * Divorce the memory, if it is shared, essentially 360 * this changes shared memory amongst threads, into 361 * COW locally. 362 */ 363 if ((flags & RFMEM) == 0) { 364 if (p1->p_vmspace->vm_refcnt > 1) { 365 vmspace_unshare(p1); 366 } 367 } 368 cpu_fork(td, p2, td2, flags); 369 return; 370 } 371 372 if (flags & RFMEM) { 373 p2->p_vmspace = p1->p_vmspace; 374 p1->p_vmspace->vm_refcnt++; 375 } 376 377 while (vm_page_count_severe()) { 378 VM_WAIT; 379 } 380 381 if ((flags & RFMEM) == 0) { 382 p2->p_vmspace = vmspace_fork(p1->p_vmspace); 383 384 pmap_pinit2(vmspace_pmap(p2->p_vmspace)); 385 386 if (p1->p_vmspace->vm_shm) 387 shmfork(p1, p2); 388 } 389 390 /* XXXKSE this is unsatisfactory but should be adequate */ 391 up = p2->p_uarea; 392 393 /* 394 * p_stats currently points at fields in the user struct 395 * but not at &u, instead at p_addr. Copy parts of 396 * p_stats; zero the rest of p_stats (statistics). 397 * 398 * If procsig->ps_refcnt is 1 and p2->p_sigacts is NULL we dont' need 399 * to share sigacts, so we use the up->u_sigacts. 400 */ 401 p2->p_stats = &up->u_stats; 402 if (p2->p_sigacts == NULL) { 403 if (p2->p_procsig->ps_refcnt != 1) 404 printf ("PID:%d NULL sigacts with refcnt not 1!\n",p2->p_pid); 405 p2->p_sigacts = &up->u_sigacts; 406 up->u_sigacts = *p1->p_sigacts; 407 } 408 409 bzero(&up->u_stats.pstat_startzero, 410 (unsigned) ((caddr_t) &up->u_stats.pstat_endzero - 411 (caddr_t) &up->u_stats.pstat_startzero)); 412 bcopy(&p1->p_stats->pstat_startcopy, &up->u_stats.pstat_startcopy, 413 ((caddr_t) &up->u_stats.pstat_endcopy - 414 (caddr_t) &up->u_stats.pstat_startcopy)); 415 416 417 /* 418 * cpu_fork will copy and update the pcb, set up the kernel stack, 419 * and make the child ready to run. 420 */ 421 cpu_fork(td, p2, td2, flags); 422} 423 424/* 425 * Called after process has been wait(2)'ed apon and is being reaped. 426 * The idea is to reclaim resources that we could not reclaim while 427 * the process was still executing. 428 */ 429void 430vm_waitproc(p) 431 struct proc *p; 432{ 433 434 GIANT_REQUIRED; 435 cpu_wait(p); 436 vmspace_exitfree(p); /* and clean-out the vmspace */ 437} 438 439/* 440 * Set default limits for VM system. 441 * Called for proc 0, and then inherited by all others. 442 * 443 * XXX should probably act directly on proc0. 444 */ 445static void 446vm_init_limits(udata) 447 void *udata; 448{ 449 struct proc *p = udata; 450 int rss_limit; 451 452 /* 453 * Set up the initial limits on process VM. Set the maximum resident 454 * set size to be half of (reasonably) available memory. Since this 455 * is a soft limit, it comes into effect only when the system is out 456 * of memory - half of main memory helps to favor smaller processes, 457 * and reduces thrashing of the object cache. 458 */ 459 p->p_rlimit[RLIMIT_STACK].rlim_cur = dflssiz; 460 p->p_rlimit[RLIMIT_STACK].rlim_max = maxssiz; 461 p->p_rlimit[RLIMIT_DATA].rlim_cur = dfldsiz; 462 p->p_rlimit[RLIMIT_DATA].rlim_max = maxdsiz; 463 /* limit the limit to no less than 2MB */ 464 rss_limit = max(cnt.v_free_count, 512); 465 p->p_rlimit[RLIMIT_RSS].rlim_cur = ptoa(rss_limit); 466 p->p_rlimit[RLIMIT_RSS].rlim_max = RLIM_INFINITY; 467} 468 469void 470faultin(p) 471 struct proc *p; 472{ 473 474 GIANT_REQUIRED; 475 PROC_LOCK_ASSERT(p, MA_OWNED); 476 mtx_assert(&sched_lock, MA_OWNED); 477#ifdef NO_SWAPPING 478 if ((p->p_sflag & PS_INMEM) == 0) 479 panic("faultin: proc swapped out with NO_SWAPPING!"); 480#else 481 if ((p->p_sflag & PS_INMEM) == 0) { 482 struct thread *td; 483 484 ++p->p_lock; 485 /* 486 * If another process is swapping in this process, 487 * just wait until it finishes. 488 */ 489 if (p->p_sflag & PS_SWAPPINGIN) { 490 mtx_unlock_spin(&sched_lock); 491 msleep(&p->p_sflag, &p->p_mtx, PVM, "faultin", 0); 492 mtx_lock_spin(&sched_lock); 493 --p->p_lock; 494 return; 495 } 496 497 p->p_sflag |= PS_SWAPPINGIN; 498 mtx_unlock_spin(&sched_lock); 499 PROC_UNLOCK(p); 500 501 vm_proc_swapin(p); 502 FOREACH_THREAD_IN_PROC (p, td) 503 pmap_swapin_thread(td); 504 505 PROC_LOCK(p); 506 mtx_lock_spin(&sched_lock); 507 p->p_sflag &= ~PS_SWAPPINGIN; 508 p->p_sflag |= PS_INMEM; 509 FOREACH_THREAD_IN_PROC (p, td) 510 if (td->td_state == TDS_SWAPPED) 511 setrunqueue(td); 512 513 wakeup(&p->p_sflag); 514 515 /* undo the effect of setting SLOCK above */ 516 --p->p_lock; 517 } 518#endif 519} 520 521/* 522 * This swapin algorithm attempts to swap-in processes only if there 523 * is enough space for them. Of course, if a process waits for a long 524 * time, it will be swapped in anyway. 525 * 526 * XXXKSE - process with the thread with highest priority counts.. 527 * 528 * Giant is still held at this point, to be released in tsleep. 529 */ 530/* ARGSUSED*/ 531static void 532scheduler(dummy) 533 void *dummy; 534{ 535 struct proc *p; 536 struct thread *td; 537 int pri; 538 struct proc *pp; 539 int ppri; 540 541 mtx_assert(&Giant, MA_OWNED | MA_NOTRECURSED); 542 /* GIANT_REQUIRED */ 543 544loop: 545 if (vm_page_count_min()) { 546 VM_WAIT; 547 goto loop; 548 } 549 550 pp = NULL; 551 ppri = INT_MIN; 552 sx_slock(&allproc_lock); 553 FOREACH_PROC_IN_SYSTEM(p) { 554 struct ksegrp *kg; 555 if (p->p_sflag & (PS_INMEM | PS_SWAPPING | PS_SWAPPINGIN)) { 556 continue; 557 } 558 mtx_lock_spin(&sched_lock); 559 FOREACH_THREAD_IN_PROC(p, td) { 560 /* 561 * A runnable thread of a process swapped out is in 562 * TDS_SWAPPED. 563 */ 564 if (td->td_state == TDS_SWAPPED) { 565 kg = td->td_ksegrp; 566 pri = p->p_swtime + kg->kg_slptime; 567 if ((p->p_sflag & PS_SWAPINREQ) == 0) { 568 pri -= kg->kg_nice * 8; 569 } 570 571 /* 572 * if this ksegrp is higher priority 573 * and there is enough space, then select 574 * this process instead of the previous 575 * selection. 576 */ 577 if (pri > ppri) { 578 pp = p; 579 ppri = pri; 580 } 581 } 582 } 583 mtx_unlock_spin(&sched_lock); 584 } 585 sx_sunlock(&allproc_lock); 586 587 /* 588 * Nothing to do, back to sleep. 589 */ 590 if ((p = pp) == NULL) { 591 tsleep(&proc0, PVM, "sched", maxslp * hz / 2); 592 goto loop; 593 } 594 PROC_LOCK(p); 595 mtx_lock_spin(&sched_lock); 596 597 /* 598 * Another process may be bringing or may have already 599 * brought this process in while we traverse all threads. 600 * Or, this process may even be being swapped out again. 601 */ 602 if (p->p_sflag & (PS_INMEM|PS_SWAPPING|PS_SWAPPINGIN)) { 603 mtx_unlock_spin(&sched_lock); 604 PROC_UNLOCK(p); 605 goto loop; 606 } 607 608 p->p_sflag &= ~PS_SWAPINREQ; 609 610 /* 611 * We would like to bring someone in. (only if there is space). 612 * [What checks the space? ] 613 */ 614 faultin(p); 615 PROC_UNLOCK(p); 616 p->p_swtime = 0; 617 mtx_unlock_spin(&sched_lock); 618 goto loop; 619} 620 621#ifndef NO_SWAPPING 622 623/* 624 * Swap_idle_threshold1 is the guaranteed swapped in time for a process 625 */ 626static int swap_idle_threshold1 = 2; 627SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold1, 628 CTLFLAG_RW, &swap_idle_threshold1, 0, ""); 629 630/* 631 * Swap_idle_threshold2 is the time that a process can be idle before 632 * it will be swapped out, if idle swapping is enabled. 633 */ 634static int swap_idle_threshold2 = 10; 635SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold2, 636 CTLFLAG_RW, &swap_idle_threshold2, 0, ""); 637 638/* 639 * Swapout is driven by the pageout daemon. Very simple, we find eligible 640 * procs and unwire their u-areas. We try to always "swap" at least one 641 * process in case we need the room for a swapin. 642 * If any procs have been sleeping/stopped for at least maxslp seconds, 643 * they are swapped. Else, we swap the longest-sleeping or stopped process, 644 * if any, otherwise the longest-resident process. 645 */ 646void 647swapout_procs(action) 648int action; 649{ 650 struct proc *p; 651 struct thread *td; 652 struct ksegrp *kg; 653 struct proc *outp, *outp2; 654 int outpri, outpri2; 655 int didswap = 0; 656 657 GIANT_REQUIRED; 658 659 outp = outp2 = NULL; 660 outpri = outpri2 = INT_MIN; 661retry: 662 sx_slock(&allproc_lock); 663 FOREACH_PROC_IN_SYSTEM(p) { 664 struct vmspace *vm; 665 int minslptime = 100000; 666 667 /* 668 * Do not swapout a process that 669 * is waiting for VM data 670 * structures there is a possible 671 * deadlock. Test this first as 672 * this may block. 673 * 674 * Lock the map until swapout 675 * finishes, or a thread of this 676 * process may attempt to alter 677 * the map. 678 * 679 * Watch out for a process in 680 * creation. It may have no 681 * address space yet. 682 * 683 * An aio daemon switches its 684 * address space while running. 685 * Perform a quick check whether 686 * a process has P_SYSTEM. 687 */ 688 PROC_LOCK(p); 689 if ((p->p_flag & P_SYSTEM) != 0) { 690 PROC_UNLOCK(p); 691 continue; 692 } 693 mtx_lock_spin(&sched_lock); 694 if (p->p_state == PRS_NEW) { 695 mtx_unlock_spin(&sched_lock); 696 PROC_UNLOCK(p); 697 continue; 698 } 699 vm = p->p_vmspace; 700 KASSERT(vm != NULL, 701 ("swapout_procs: a process has no address space")); 702 ++vm->vm_refcnt; 703 mtx_unlock_spin(&sched_lock); 704 PROC_UNLOCK(p); 705 if (!vm_map_trylock(&vm->vm_map)) 706 goto nextproc1; 707 708 PROC_LOCK(p); 709 if (p->p_lock != 0 || 710 (p->p_flag & (P_STOPPED_SINGLE|P_TRACED|P_SYSTEM|P_WEXIT) 711 ) != 0) { 712 goto nextproc2; 713 } 714 /* 715 * only aiod changes vmspace, however it will be 716 * skipped because of the if statement above checking 717 * for P_SYSTEM 718 */ 719 mtx_lock_spin(&sched_lock); 720 if ((p->p_sflag & (PS_INMEM|PS_SWAPPING|PS_SWAPPINGIN)) != PS_INMEM) 721 goto nextproc; 722 723 switch (p->p_state) { 724 default: 725 /* Don't swap out processes in any sort 726 * of 'special' state. */ 727 goto nextproc; 728 729 case PRS_NORMAL: 730 /* 731 * do not swapout a realtime process 732 * Check all the thread groups.. 733 */ 734 FOREACH_KSEGRP_IN_PROC(p, kg) { 735 if (PRI_IS_REALTIME(kg->kg_pri_class)) 736 goto nextproc; 737 738 /* 739 * Guarantee swap_idle_threshold1 740 * time in memory. 741 */ 742 if (kg->kg_slptime < swap_idle_threshold1) 743 goto nextproc; 744 745 /* 746 * Do not swapout a process if it is 747 * waiting on a critical event of some 748 * kind or there is a thread whose 749 * pageable memory may be accessed. 750 * 751 * This could be refined to support 752 * swapping out a thread. 753 */ 754 FOREACH_THREAD_IN_GROUP(kg, td) { 755 if ((td->td_priority) < PSOCK || 756 !thread_safetoswapout(td)) 757 goto nextproc; 758 } 759 /* 760 * If the system is under memory stress, 761 * or if we are swapping 762 * idle processes >= swap_idle_threshold2, 763 * then swap the process out. 764 */ 765 if (((action & VM_SWAP_NORMAL) == 0) && 766 (((action & VM_SWAP_IDLE) == 0) || 767 (kg->kg_slptime < swap_idle_threshold2))) 768 goto nextproc; 769 770 if (minslptime > kg->kg_slptime) 771 minslptime = kg->kg_slptime; 772 } 773 774 /* 775 * If the process has been asleep for awhile and had 776 * most of its pages taken away already, swap it out. 777 */ 778 if ((action & VM_SWAP_NORMAL) || 779 ((action & VM_SWAP_IDLE) && 780 (minslptime > swap_idle_threshold2))) { 781 swapout(p); 782 didswap++; 783 784 /* 785 * swapout() unlocks a proc lock. This is 786 * ugly, but avoids superfluous lock. 787 */ 788 mtx_unlock_spin(&sched_lock); 789 vm_map_unlock(&vm->vm_map); 790 vmspace_free(vm); 791 sx_sunlock(&allproc_lock); 792 goto retry; 793 } 794 } 795nextproc: 796 mtx_unlock_spin(&sched_lock); 797nextproc2: 798 PROC_UNLOCK(p); 799 vm_map_unlock(&vm->vm_map); 800nextproc1: 801 vmspace_free(vm); 802 continue; 803 } 804 sx_sunlock(&allproc_lock); 805 /* 806 * If we swapped something out, and another process needed memory, 807 * then wakeup the sched process. 808 */ 809 if (didswap) 810 wakeup(&proc0); 811} 812 813static void 814swapout(p) 815 struct proc *p; 816{ 817 struct thread *td; 818 819 PROC_LOCK_ASSERT(p, MA_OWNED); 820 mtx_assert(&sched_lock, MA_OWNED | MA_NOTRECURSED); 821#if defined(SWAP_DEBUG) 822 printf("swapping out %d\n", p->p_pid); 823#endif 824 825 /* 826 * The states of this process and its threads may have changed 827 * by now. Assuming that there is only one pageout daemon thread, 828 * this process should still be in memory. 829 */ 830 KASSERT((p->p_sflag & (PS_INMEM|PS_SWAPPING|PS_SWAPPINGIN)) == PS_INMEM, 831 ("swapout: lost a swapout race?")); 832 833#if defined(INVARIANTS) 834 /* 835 * Make sure that all threads are safe to be swapped out. 836 * 837 * Alternatively, we could swap out only safe threads. 838 */ 839 FOREACH_THREAD_IN_PROC(p, td) { 840 KASSERT(thread_safetoswapout(td), 841 ("swapout: there is a thread not safe for swapout")); 842 } 843#endif /* INVARIANTS */ 844 845 ++p->p_stats->p_ru.ru_nswap; 846 /* 847 * remember the process resident count 848 */ 849 p->p_vmspace->vm_swrss = vmspace_resident_count(p->p_vmspace); 850 851 PROC_UNLOCK(p); 852 FOREACH_THREAD_IN_PROC (p, td) 853 if (td->td_state == TDS_RUNQ) { /* XXXKSE */ 854 remrunqueue(td); /* XXXKSE */ 855 td->td_state = TDS_SWAPPED; 856 } 857 p->p_sflag &= ~PS_INMEM; 858 p->p_sflag |= PS_SWAPPING; 859 mtx_unlock_spin(&sched_lock); 860 861 vm_proc_swapout(p); 862 FOREACH_THREAD_IN_PROC(p, td) 863 pmap_swapout_thread(td); 864 mtx_lock_spin(&sched_lock); 865 p->p_sflag &= ~PS_SWAPPING; 866 p->p_swtime = 0; 867} 868#endif /* !NO_SWAPPING */ 869