vm_glue.c revision 92475
1139799Simp/* 211394Sswallace * Copyright (c) 1991, 1993 311397Sswallace * The Regents of the University of California. All rights reserved. 411394Sswallace * 511394Sswallace * This code is derived from software contributed to Berkeley by 611394Sswallace * The Mach Operating System project at Carnegie-Mellon University. 711394Sswallace * 811394Sswallace * Redistribution and use in source and binary forms, with or without 911394Sswallace * modification, are permitted provided that the following conditions 1011394Sswallace * are met: 1111394Sswallace * 1. Redistributions of source code must retain the above copyright 1211394Sswallace * notice, this list of conditions and the following disclaimer. 1311394Sswallace * 2. Redistributions in binary form must reproduce the above copyright 1411394Sswallace * notice, this list of conditions and the following disclaimer in the 1511394Sswallace * documentation and/or other materials provided with the distribution. 1611394Sswallace * 3. All advertising materials mentioning features or use of this software 1711394Sswallace * must display the following acknowledgement: 1811394Sswallace * This product includes software developed by the University of 1911394Sswallace * California, Berkeley and its contributors. 2011394Sswallace * 4. Neither the name of the University nor the names of its contributors 2111394Sswallace * may be used to endorse or promote products derived from this software 2211394Sswallace * without specific prior written permission. 2311394Sswallace * 2411394Sswallace * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 2511394Sswallace * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 2611394Sswallace * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 2711394Sswallace * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 2811394Sswallace * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29115684Sobrien * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30115684Sobrien * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31115684Sobrien * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 3211394Sswallace * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 3311394Sswallace * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 3411394Sswallace * SUCH DAMAGE. 35274476Skib * 3611394Sswallace * from: @(#)vm_glue.c 8.6 (Berkeley) 1/5/94 3711394Sswallace * 3811394Sswallace * 3991388Srobert * Copyright (c) 1987, 1990 Carnegie-Mellon University. 4011394Sswallace * All rights reserved. 4111394Sswallace * 42141488Sjhb * Permission to use, copy, modify and distribute this software and 4311394Sswallace * its documentation is hereby granted, provided that both the copyright 44141488Sjhb * notice and this permission notice appear in all copies of the 4511397Sswallace * software, derivative works or modified versions, and any portions 4611397Sswallace * thereof, and that both notices appear in supporting documentation. 4711394Sswallace * 4811397Sswallace * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 4911397Sswallace * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 5011397Sswallace * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 5111397Sswallace * 5211397Sswallace * Carnegie Mellon requests users of this software to return to 5311397Sswallace * 5411394Sswallace * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 5554655Seivind * School of Computer Science 5692761Salfred * Carnegie Mellon University 5792761Salfred * Pittsburgh PA 15213-3890 5811394Sswallace * 5911394Sswallace * any improvements or extensions that they make and grant Carnegie the 6011394Sswallace * rights to redistribute these changes. 6111397Sswallace * 6211394Sswallace * $FreeBSD: head/sys/vm/vm_glue.c 92475 2002-03-17 07:01:42Z alc $ 6311394Sswallace */ 6411394Sswallace 6511397Sswallace#include "opt_vm.h" 6611397Sswallace 6711394Sswallace#include <sys/param.h> 6811397Sswallace#include <sys/systm.h> 6911397Sswallace#include <sys/lock.h> 7011397Sswallace#include <sys/mutex.h> 7111394Sswallace#include <sys/proc.h> 7211397Sswallace#include <sys/resourcevar.h> 7311397Sswallace#include <sys/shm.h> 7411397Sswallace#include <sys/vmmeter.h> 7511397Sswallace#include <sys/sx.h> 76205792Sed#include <sys/sysctl.h> 77205792Sed 78205792Sed#include <sys/kernel.h> 7911394Sswallace#include <sys/ktr.h> 8011394Sswallace#include <sys/unistd.h> 8111394Sswallace 8211394Sswallace#include <machine/limits.h> 8311394Sswallace 8411394Sswallace#include <vm/vm.h> 8511394Sswallace#include <vm/vm_param.h> 8611394Sswallace#include <vm/pmap.h> 8711394Sswallace#include <vm/vm_map.h> 8811394Sswallace#include <vm/vm_page.h> 89118754Snectar#include <vm/vm_pageout.h> 90118754Snectar#include <vm/vm_kern.h> 91118754Snectar#include <vm/vm_extern.h> 92118754Snectar 9311394Sswallace#include <sys/user.h> 9411394Sswallace 9511394Sswallaceextern int maxslp; 9611394Sswallace 9711394Sswallace/* 9811394Sswallace * System initialization 9911394Sswallace * 10011394Sswallace * Note: proc0 from proc.h 10111394Sswallace */ 10211394Sswallacestatic void vm_init_limits __P((void *)); 10311394SswallaceSYSINIT(vm_limits, SI_SUB_VM_CONF, SI_ORDER_FIRST, vm_init_limits, &proc0) 10411394Sswallace 10511394Sswallace/* 10611394Sswallace * THIS MUST BE THE LAST INITIALIZATION ITEM!!! 10783366Sjulian * 10883366Sjulian * Note: run scheduling should be divorced from the vm system. 10911394Sswallace */ 11011394Sswallacestatic void scheduler __P((void *)); 111141488SjhbSYSINIT(scheduler, SI_SUB_RUN_SCHEDULER, SI_ORDER_FIRST, scheduler, NULL) 112141488Sjhb 11311394Sswallace#ifndef NO_SWAPPING 11411394Sswallacestatic void swapout __P((struct proc *)); 115141488Sjhb#endif 116141488Sjhb 117141488Sjhbint 118141488Sjhbkernacc(addr, len, rw) 11911394Sswallace caddr_t addr; 120141488Sjhb int len, rw; 12111394Sswallace{ 12211394Sswallace boolean_t rv; 12311394Sswallace vm_offset_t saddr, eaddr; 12483366Sjulian vm_prot_t prot; 12583366Sjulian 12611394Sswallace KASSERT((rw & ~VM_PROT_ALL) == 0, 12711394Sswallace ("illegal ``rw'' argument to kernacc (%x)\n", rw)); 128141488Sjhb prot = rw; 12911394Sswallace saddr = trunc_page((vm_offset_t)addr); 13011394Sswallace eaddr = round_page((vm_offset_t)addr + len); 131141488Sjhb rv = vm_map_check_protection(kernel_map, saddr, eaddr, prot); 132141488Sjhb return (rv == TRUE); 13311394Sswallace} 134141488Sjhb 13511394Sswallaceint 13611394Sswallaceuseracc(addr, len, rw) 13711394Sswallace caddr_t addr; 13883366Sjulian int len, rw; 13983366Sjulian{ 14011394Sswallace boolean_t rv; 14111394Sswallace vm_prot_t prot; 142141488Sjhb 14311397Sswallace GIANT_REQUIRED; 144141488Sjhb 14511394Sswallace KASSERT((rw & ~VM_PROT_ALL) == 0, 14611394Sswallace ("illegal ``rw'' argument to useracc (%x)\n", rw)); 147141488Sjhb prot = rw; 14811397Sswallace /* 149274476Skib * XXX - check separately to disallow access to user area and user 150141488Sjhb * page tables - they are in the map. 151141488Sjhb * 152141488Sjhb * XXX - VM_MAXUSER_ADDRESS is an end address, not a max. It was once 15311394Sswallace * only used (as an end address) in trap.c. Use it as an end address 154107849Salfred * here too. This bogusness has spread. I just fixed where it was 15511394Sswallace * used as a max in vm_mmap.c. 15611394Sswallace */ 15711394Sswallace if ((vm_offset_t) addr + len > /* XXX */ VM_MAXUSER_ADDRESS 15811394Sswallace || (vm_offset_t) addr + len < (vm_offset_t) addr) { 15983366Sjulian return (FALSE); 16083366Sjulian } 16111394Sswallace rv = vm_map_check_protection(&curproc->p_vmspace->vm_map, 16211394Sswallace trunc_page((vm_offset_t)addr), round_page((vm_offset_t)addr + len), 163141488Sjhb prot); 16411397Sswallace return (rv == TRUE); 165141488Sjhb} 16611394Sswallace 16711394Sswallacevoid 168141488Sjhbvslock(addr, len) 16911397Sswallace caddr_t addr; 170274476Skib u_int len; 171274476Skib{ 172141488Sjhb GIANT_REQUIRED; 173141488Sjhb vm_map_pageable(&curproc->p_vmspace->vm_map, 174141488Sjhb trunc_page((vm_offset_t)addr), 17511394Sswallace round_page((vm_offset_t)addr + len), FALSE); 176107849Salfred} 17711394Sswallace 17811394Sswallacevoid 17911394Sswallacevsunlock(addr, len) 18011394Sswallace caddr_t addr; 18183366Sjulian u_int len; 18283366Sjulian{ 18311394Sswallace GIANT_REQUIRED; 18411394Sswallace vm_map_pageable(&curproc->p_vmspace->vm_map, 185141488Sjhb trunc_page((vm_offset_t)addr), 18611397Sswallace round_page((vm_offset_t)addr + len), TRUE); 18711394Sswallace} 18811394Sswallace 189141488Sjhb/* 190141488Sjhb * Implement fork's actions on an address space. 191141488Sjhb * Here we arrange for the address space to be copied or referenced, 19211394Sswallace * allocate a user struct (pcb and kernel stack), then call the 193107849Salfred * machine-dependent layer to fill those in and make the new process 19411394Sswallace * ready to run. The new process is set up so that it returns directly 19511394Sswallace * to user mode to avoid stack copying and relocation problems. 19611394Sswallace */ 19711394Sswallacevoid 19883366Sjulianvm_forkproc(td, p2, td2, flags) 19983366Sjulian struct thread *td; 20011394Sswallace struct proc *p2; 20111394Sswallace struct thread *td2; 202107849Salfred int flags; 20311394Sswallace{ 20411394Sswallace struct proc *p1 = td->td_proc; 20516193Snate struct user *up; 20611394Sswallace 20711525Sswallace GIANT_REQUIRED; 20811394Sswallace 20941514Sarchie if ((flags & RFPROC) == 0) { 21041514Sarchie /* 21141514Sarchie * Divorce the memory, if it is shared, essentially 21241514Sarchie * this changes shared memory amongst threads, into 21341514Sarchie * COW locally. 21441514Sarchie */ 21591393Srobert if ((flags & RFMEM) == 0) { 21691388Srobert if (p1->p_vmspace->vm_refcnt > 1) { 217229272Sed vmspace_unshare(p1); 21816193Snate } 21916193Snate } 22041514Sarchie cpu_fork(td, p2, td2, flags); 22141514Sarchie return; 22211394Sswallace } 22311525Sswallace 22411525Sswallace if (flags & RFMEM) { 22511525Sswallace p2->p_vmspace = p1->p_vmspace; 226107849Salfred p1->p_vmspace->vm_refcnt++; 22711394Sswallace } 22811394Sswallace 22911394Sswallace while (vm_page_count_severe()) { 23011394Sswallace VM_WAIT; 23111394Sswallace } 23211394Sswallace 23311394Sswallace if ((flags & RFMEM) == 0) { 23411394Sswallace p2->p_vmspace = vmspace_fork(p1->p_vmspace); 23511394Sswallace 23611394Sswallace pmap_pinit2(vmspace_pmap(p2->p_vmspace)); 23711394Sswallace 23811394Sswallace if (p1->p_vmspace->vm_shm) 239 shmfork(p1, p2); 240 } 241 242 pmap_new_proc(p2); 243 pmap_new_thread(td2); /* Initial thread */ 244 245 /* XXXKSE this is unsatisfactory but should be adequate */ 246 up = p2->p_uarea; 247 248 /* 249 * p_stats currently points at fields in the user struct 250 * but not at &u, instead at p_addr. Copy parts of 251 * p_stats; zero the rest of p_stats (statistics). 252 * 253 * If procsig->ps_refcnt is 1 and p2->p_sigacts is NULL we dont' need 254 * to share sigacts, so we use the up->u_sigacts. 255 */ 256 p2->p_stats = &up->u_stats; 257 if (p2->p_sigacts == NULL) { 258 if (p2->p_procsig->ps_refcnt != 1) 259 printf ("PID:%d NULL sigacts with refcnt not 1!\n",p2->p_pid); 260 p2->p_sigacts = &up->u_sigacts; 261 up->u_sigacts = *p1->p_sigacts; 262 } 263 264 bzero(&up->u_stats.pstat_startzero, 265 (unsigned) ((caddr_t) &up->u_stats.pstat_endzero - 266 (caddr_t) &up->u_stats.pstat_startzero)); 267 bcopy(&p1->p_stats->pstat_startcopy, &up->u_stats.pstat_startcopy, 268 ((caddr_t) &up->u_stats.pstat_endcopy - 269 (caddr_t) &up->u_stats.pstat_startcopy)); 270 271 272 /* 273 * cpu_fork will copy and update the pcb, set up the kernel stack, 274 * and make the child ready to run. 275 */ 276 cpu_fork(td, p2, td2, flags); 277} 278 279/* 280 * Called after process has been wait(2)'ed apon and is being reaped. 281 * The idea is to reclaim resources that we could not reclaim while 282 * the process was still executing. 283 */ 284void 285vm_waitproc(p) 286 struct proc *p; 287{ 288 struct thread *td; 289 290 GIANT_REQUIRED; 291 cpu_wait(p); 292 pmap_dispose_proc(p); /* drop per-process resources */ 293 FOREACH_THREAD_IN_PROC(p, td) 294 pmap_dispose_thread(td); 295 vmspace_exitfree(p); /* and clean-out the vmspace */ 296} 297 298/* 299 * Set default limits for VM system. 300 * Called for proc 0, and then inherited by all others. 301 * 302 * XXX should probably act directly on proc0. 303 */ 304static void 305vm_init_limits(udata) 306 void *udata; 307{ 308 struct proc *p = udata; 309 int rss_limit; 310 311 /* 312 * Set up the initial limits on process VM. Set the maximum resident 313 * set size to be half of (reasonably) available memory. Since this 314 * is a soft limit, it comes into effect only when the system is out 315 * of memory - half of main memory helps to favor smaller processes, 316 * and reduces thrashing of the object cache. 317 */ 318 p->p_rlimit[RLIMIT_STACK].rlim_cur = dflssiz; 319 p->p_rlimit[RLIMIT_STACK].rlim_max = maxssiz; 320 p->p_rlimit[RLIMIT_DATA].rlim_cur = dfldsiz; 321 p->p_rlimit[RLIMIT_DATA].rlim_max = maxdsiz; 322 /* limit the limit to no less than 2MB */ 323 rss_limit = max(cnt.v_free_count, 512); 324 p->p_rlimit[RLIMIT_RSS].rlim_cur = ptoa(rss_limit); 325 p->p_rlimit[RLIMIT_RSS].rlim_max = RLIM_INFINITY; 326} 327 328void 329faultin(p) 330 struct proc *p; 331{ 332 struct thread *td; 333 GIANT_REQUIRED; 334 335 PROC_LOCK_ASSERT(p, MA_OWNED); 336 mtx_lock_spin(&sched_lock); 337 if ((p->p_sflag & PS_INMEM) == 0) { 338 ++p->p_lock; 339 mtx_unlock_spin(&sched_lock); 340 PROC_UNLOCK(p); 341 342 pmap_swapin_proc(p); 343 FOREACH_THREAD_IN_PROC (p, td) 344 pmap_swapin_thread(td); 345 346 PROC_LOCK(p); 347 mtx_lock_spin(&sched_lock); 348 FOREACH_THREAD_IN_PROC (p, td) 349 if (td->td_proc->p_stat == SRUN) /* XXXKSE */ 350 setrunqueue(td); 351 352 p->p_sflag |= PS_INMEM; 353 354 /* undo the effect of setting SLOCK above */ 355 --p->p_lock; 356 } 357 mtx_unlock_spin(&sched_lock); 358} 359 360/* 361 * This swapin algorithm attempts to swap-in processes only if there 362 * is enough space for them. Of course, if a process waits for a long 363 * time, it will be swapped in anyway. 364 * 365 * XXXKSE - KSEGRP with highest priority counts.. 366 * 367 * Giant is still held at this point, to be released in tsleep. 368 */ 369/* ARGSUSED*/ 370static void 371scheduler(dummy) 372 void *dummy; 373{ 374 struct proc *p; 375 int pri; 376 struct proc *pp; 377 int ppri; 378 379 mtx_assert(&Giant, MA_OWNED | MA_NOTRECURSED); 380 /* GIANT_REQUIRED */ 381 382loop: 383 if (vm_page_count_min()) { 384 VM_WAIT; 385 goto loop; 386 } 387 388 pp = NULL; 389 ppri = INT_MIN; 390 sx_slock(&allproc_lock); 391 FOREACH_PROC_IN_SYSTEM(p) { 392 struct ksegrp *kg; 393 mtx_lock_spin(&sched_lock); 394 if (p->p_stat == SRUN 395 && (p->p_sflag & (PS_INMEM | PS_SWAPPING)) == 0) { 396 /* Find the minimum sleeptime for the process */ 397 FOREACH_KSEGRP_IN_PROC(p, kg) { 398 pri = p->p_swtime + kg->kg_slptime; 399 if ((p->p_sflag & PS_SWAPINREQ) == 0) { 400 pri -= kg->kg_nice * 8; 401 } 402 403 /* 404 * if this ksegrp is higher priority 405 * and there is enough space, then select 406 * this process instead of the previous 407 * selection. 408 */ 409 if (pri > ppri) { 410 pp = p; 411 ppri = pri; 412 } 413 } 414 } 415 mtx_unlock_spin(&sched_lock); 416 } 417 sx_sunlock(&allproc_lock); 418 419 /* 420 * Nothing to do, back to sleep. 421 */ 422 if ((p = pp) == NULL) { 423 tsleep(&proc0, PVM, "sched", maxslp * hz / 2); 424 goto loop; 425 } 426 mtx_lock_spin(&sched_lock); 427 p->p_sflag &= ~PS_SWAPINREQ; 428 mtx_unlock_spin(&sched_lock); 429 430 /* 431 * We would like to bring someone in. (only if there is space). 432 */ 433 PROC_LOCK(p); 434 faultin(p); 435 PROC_UNLOCK(p); 436 mtx_lock_spin(&sched_lock); 437 p->p_swtime = 0; 438 mtx_unlock_spin(&sched_lock); 439 goto loop; 440} 441 442#ifndef NO_SWAPPING 443 444/* 445 * Swap_idle_threshold1 is the guaranteed swapped in time for a process 446 */ 447static int swap_idle_threshold1 = 2; 448SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold1, 449 CTLFLAG_RW, &swap_idle_threshold1, 0, ""); 450 451/* 452 * Swap_idle_threshold2 is the time that a process can be idle before 453 * it will be swapped out, if idle swapping is enabled. 454 */ 455static int swap_idle_threshold2 = 10; 456SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold2, 457 CTLFLAG_RW, &swap_idle_threshold2, 0, ""); 458 459/* 460 * Swapout is driven by the pageout daemon. Very simple, we find eligible 461 * procs and unwire their u-areas. We try to always "swap" at least one 462 * process in case we need the room for a swapin. 463 * If any procs have been sleeping/stopped for at least maxslp seconds, 464 * they are swapped. Else, we swap the longest-sleeping or stopped process, 465 * if any, otherwise the longest-resident process. 466 */ 467void 468swapout_procs(action) 469int action; 470{ 471 struct proc *p; 472 struct ksegrp *kg; 473 struct proc *outp, *outp2; 474 int outpri, outpri2; 475 int didswap = 0; 476 477 GIANT_REQUIRED; 478 479 outp = outp2 = NULL; 480 outpri = outpri2 = INT_MIN; 481retry: 482 sx_slock(&allproc_lock); 483 LIST_FOREACH(p, &allproc, p_list) { 484 struct vmspace *vm; 485 int minslptime = 100000; 486 487 PROC_LOCK(p); 488 if (p->p_lock != 0 || 489 (p->p_flag & (P_TRACED|P_SYSTEM|P_WEXIT)) != 0) { 490 PROC_UNLOCK(p); 491 continue; 492 } 493 /* 494 * only aiod changes vmspace, however it will be 495 * skipped because of the if statement above checking 496 * for P_SYSTEM 497 */ 498 vm = p->p_vmspace; 499 mtx_lock_spin(&sched_lock); 500 if ((p->p_sflag & (PS_INMEM|PS_SWAPPING)) != PS_INMEM) { 501 mtx_unlock_spin(&sched_lock); 502 PROC_UNLOCK(p); 503 continue; 504 } 505 506 switch (p->p_stat) { 507 default: 508 mtx_unlock_spin(&sched_lock); 509 PROC_UNLOCK(p); 510 continue; 511 512 case SSLEEP: 513 case SSTOP: 514 /* 515 * do not swapout a realtime process 516 * Check all the thread groups.. 517 */ 518 FOREACH_KSEGRP_IN_PROC(p, kg) { 519 if (PRI_IS_REALTIME(kg->kg_pri_class)) { 520 mtx_unlock_spin(&sched_lock); 521 PROC_UNLOCK(p); 522 goto nextproc; 523 } 524 525 /* 526 * Do not swapout a process waiting 527 * on a critical event of some kind. 528 * Also guarantee swap_idle_threshold1 529 * time in memory. 530 */ 531 if (((FIRST_THREAD_IN_PROC(p)->td_priority) < PSOCK) || 532 (kg->kg_slptime < swap_idle_threshold1)) { 533 mtx_unlock_spin(&sched_lock); 534 PROC_UNLOCK(p); 535 goto nextproc; 536 } 537 538 /* 539 * If the system is under memory stress, 540 * or if we are swapping 541 * idle processes >= swap_idle_threshold2, 542 * then swap the process out. 543 */ 544 if (((action & VM_SWAP_NORMAL) == 0) && 545 (((action & VM_SWAP_IDLE) == 0) || 546 (kg->kg_slptime < swap_idle_threshold2))) { 547 mtx_unlock_spin(&sched_lock); 548 PROC_UNLOCK(p); 549 goto nextproc; 550 } 551 if (minslptime > kg->kg_slptime) 552 minslptime = kg->kg_slptime; 553 } 554 555 mtx_unlock_spin(&sched_lock); 556 ++vm->vm_refcnt; 557 /* 558 * do not swapout a process that 559 * is waiting for VM 560 * data structures there is a 561 * possible deadlock. 562 */ 563 if (vm_map_try_lock(&vm->vm_map)) { 564 vmspace_free(vm); 565 PROC_UNLOCK(p); 566 goto nextproc; 567 } 568 vm_map_unlock(&vm->vm_map); 569 /* 570 * If the process has been asleep for awhile and had 571 * most of its pages taken away already, swap it out. 572 */ 573 if ((action & VM_SWAP_NORMAL) || 574 ((action & VM_SWAP_IDLE) && 575 (minslptime > swap_idle_threshold2))) { 576 sx_sunlock(&allproc_lock); 577 swapout(p); 578 vmspace_free(vm); 579 didswap++; 580 goto retry; 581 } 582 PROC_UNLOCK(p); 583 vmspace_free(vm); 584 } 585nextproc: 586 } 587 sx_sunlock(&allproc_lock); 588 /* 589 * If we swapped something out, and another process needed memory, 590 * then wakeup the sched process. 591 */ 592 if (didswap) 593 wakeup(&proc0); 594} 595 596static void 597swapout(p) 598 struct proc *p; 599{ 600 struct thread *td; 601 602 PROC_LOCK_ASSERT(p, MA_OWNED); 603#if defined(SWAP_DEBUG) 604 printf("swapping out %d\n", p->p_pid); 605#endif 606 ++p->p_stats->p_ru.ru_nswap; 607 /* 608 * remember the process resident count 609 */ 610 p->p_vmspace->vm_swrss = vmspace_resident_count(p->p_vmspace); 611 612 mtx_lock_spin(&sched_lock); 613 p->p_sflag &= ~PS_INMEM; 614 p->p_sflag |= PS_SWAPPING; 615 PROC_UNLOCK(p); 616 FOREACH_THREAD_IN_PROC (p, td) 617 if (td->td_proc->p_stat == SRUN) /* XXXKSE */ 618 remrunqueue(td); /* XXXKSE */ 619 mtx_unlock_spin(&sched_lock); 620 621 pmap_swapout_proc(p); 622 FOREACH_THREAD_IN_PROC(p, td) 623 pmap_swapout_thread(td); 624 625 mtx_lock_spin(&sched_lock); 626 p->p_sflag &= ~PS_SWAPPING; 627 p->p_swtime = 0; 628 mtx_unlock_spin(&sched_lock); 629} 630#endif /* !NO_SWAPPING */ 631