vm_glue.c revision 105126
1327Sjkh/* 230221Scharnier * Copyright (c) 1991, 1993 349637Sbillf * The Regents of the University of California. All rights reserved. 4327Sjkh * 5327Sjkh * This code is derived from software contributed to Berkeley by 6327Sjkh * The Mach Operating System project at Carnegie-Mellon University. 7327Sjkh * 8327Sjkh * Redistribution and use in source and binary forms, with or without 9327Sjkh * modification, are permitted provided that the following conditions 10327Sjkh * are met: 11327Sjkh * 1. Redistributions of source code must retain the above copyright 12327Sjkh * notice, this list of conditions and the following disclaimer. 13327Sjkh * 2. Redistributions in binary form must reproduce the above copyright 14327Sjkh * notice, this list of conditions and the following disclaimer in the 15327Sjkh * documentation and/or other materials provided with the distribution. 16327Sjkh * 3. All advertising materials mentioning features or use of this software 17327Sjkh * must display the following acknowledgement: 18327Sjkh * This product includes software developed by the University of 19327Sjkh * California, Berkeley and its contributors. 20327Sjkh * 4. Neither the name of the University nor the names of its contributors 21327Sjkh * may be used to endorse or promote products derived from this software 22327Sjkh * without specific prior written permission. 23327Sjkh * 24327Sjkh * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25327Sjkh * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 2630221Scharnier * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27327Sjkh * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28327Sjkh * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29327Sjkh * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30327Sjkh * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 314996Sjkh * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32327Sjkh * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33327Sjkh * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34327Sjkh * SUCH DAMAGE. 35327Sjkh * 3637728Seivind * from: @(#)vm_glue.c 8.6 (Berkeley) 1/5/94 37327Sjkh * 38327Sjkh * 39327Sjkh * Copyright (c) 1987, 1990 Carnegie-Mellon University. 40327Sjkh * All rights reserved. 41327Sjkh * 42327Sjkh * Permission to use, copy, modify and distribute this software and 43327Sjkh * its documentation is hereby granted, provided that both the copyright 44327Sjkh * notice and this permission notice appear in all copies of the 45327Sjkh * software, derivative works or modified versions, and any portions 46382Sjkh * thereof, and that both notices appear in supporting documentation. 47382Sjkh * 48382Sjkh * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 49382Sjkh * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 50382Sjkh * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 51382Sjkh * 52327Sjkh * Carnegie Mellon requests users of this software to return to 53327Sjkh * 54327Sjkh * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 55327Sjkh * School of Computer Science 5611780Sjkh * Carnegie Mellon University 57327Sjkh * Pittsburgh PA 15213-3890 588083Sjkh * 598083Sjkh * any improvements or extensions that they make and grant Carnegie the 608083Sjkh * rights to redistribute these changes. 618083Sjkh * 628083Sjkh * $FreeBSD: head/sys/vm/vm_glue.c 105126 2002-10-14 20:31:54Z julian $ 63327Sjkh */ 64327Sjkh 65327Sjkh#include "opt_vm.h" 66327Sjkh 6711780Sjkh#include <sys/param.h> 687998Sjkh#include <sys/systm.h> 6914582Sjkh#include <sys/lock.h> 70327Sjkh#include <sys/mutex.h> 7111780Sjkh#include <sys/proc.h> 721545Sjkh#include <sys/resourcevar.h> 733364Sjkh#include <sys/shm.h> 7411780Sjkh#include <sys/vmmeter.h> 7541866Sjkh#include <sys/sx.h> 7641866Sjkh#include <sys/sysctl.h> 7741866Sjkh 7841866Sjkh#include <sys/kernel.h> 7941866Sjkh#include <sys/ktr.h> 80327Sjkh#include <sys/unistd.h> 8111780Sjkh 8237728Seivind#include <machine/limits.h> 83327Sjkh 8411780Sjkh#include <vm/vm.h> 8511780Sjkh#include <vm/vm_param.h> 868075Sjkh#include <vm/pmap.h> 878083Sjkh#include <vm/vm_map.h> 8811780Sjkh#include <vm/vm_page.h> 8911780Sjkh#include <vm/vm_pageout.h> 9011780Sjkh#include <vm/vm_object.h> 9111780Sjkh#include <vm/vm_kern.h> 9230221Scharnier#include <vm/vm_extern.h> 93382Sjkh#include <vm/vm_pager.h> 94382Sjkh 95382Sjkh#include <sys/user.h> 9611780Sjkh 97327Sjkhextern int maxslp; 988083Sjkh 99382Sjkh/* 10011780Sjkh * System initialization 1018083Sjkh * 10211780Sjkh * Note: proc0 from proc.h 10330221Scharnier */ 1048083Sjkhstatic void vm_init_limits(void *); 1058083SjkhSYSINIT(vm_limits, SI_SUB_VM_CONF, SI_ORDER_FIRST, vm_init_limits, &proc0) 10611780Sjkh 10711780Sjkh/* 10811780Sjkh * THIS MUST BE THE LAST INITIALIZATION ITEM!!! 10911780Sjkh * 11030221Scharnier * Note: run scheduling should be divorced from the vm system. 11130221Scharnier */ 11230221Scharnierstatic void scheduler(void *); 11311780SjkhSYSINIT(scheduler, SI_SUB_RUN_SCHEDULER, SI_ORDER_FIRST, scheduler, NULL) 11411780Sjkh 11511780Sjkh#ifndef NO_SWAPPING 11611780Sjkhstatic void swapout(struct proc *); 117382Sjkhstatic void vm_proc_swapin(struct proc *p); 1188083Sjkhstatic void vm_proc_swapout(struct proc *p); 11914582Sjkh#endif 12014582Sjkh 12114582Sjkh/* 12230221Scharnier * MPSAFE 12314582Sjkh */ 12414582Sjkhint 12514582Sjkhkernacc(addr, len, rw) 12614582Sjkh caddr_t addr; 12711780Sjkh int len, rw; 12822750Sjkh{ 12914582Sjkh boolean_t rv; 13022750Sjkh vm_offset_t saddr, eaddr; 13122750Sjkh vm_prot_t prot; 13211780Sjkh 13311780Sjkh KASSERT((rw & ~VM_PROT_ALL) == 0, 13449637Sbillf ("illegal ``rw'' argument to kernacc (%x)\n", rw)); 13511780Sjkh prot = rw; 13638583Sjkh saddr = trunc_page((vm_offset_t)addr); 13738583Sjkh eaddr = round_page((vm_offset_t)addr + len); 13838583Sjkh rv = vm_map_check_protection(kernel_map, saddr, eaddr, prot); 13914582Sjkh return (rv == TRUE); 14030221Scharnier} 14130221Scharnier 14230221Scharnier/* 14311780Sjkh * MPSAFE 14411780Sjkh */ 14511780Sjkhint 14611780Sjkhuseracc(addr, len, rw) 14730221Scharnier caddr_t addr; 14830221Scharnier int len, rw; 14930221Scharnier{ 15011780Sjkh boolean_t rv; 15111780Sjkh vm_prot_t prot; 15211780Sjkh vm_map_t map; 15311780Sjkh 1547998Sjkh KASSERT((rw & ~VM_PROT_ALL) == 0, 15511780Sjkh ("illegal ``rw'' argument to useracc (%x)\n", rw)); 15611780Sjkh prot = rw; 15711780Sjkh map = &curproc->p_vmspace->vm_map; 15811780Sjkh if ((vm_offset_t)addr + len > vm_map_max(map) || 15911780Sjkh (vm_offset_t)addr + len < (vm_offset_t)addr) { 16011780Sjkh return (FALSE); 16111780Sjkh } 16211780Sjkh rv = vm_map_check_protection(map, trunc_page((vm_offset_t)addr), 16311780Sjkh round_page((vm_offset_t)addr + len), prot); 16411780Sjkh return (rv == TRUE); 16511780Sjkh} 16630221Scharnier 16711780Sjkh/* 16811780Sjkh * MPSAFE 1697998Sjkh */ 17011780Sjkhvoid 17111780Sjkhvslock(addr, len) 1727998Sjkh caddr_t addr; 17311780Sjkh u_int len; 17430221Scharnier{ 17530221Scharnier 17630221Scharnier vm_map_wire(&curproc->p_vmspace->vm_map, trunc_page((vm_offset_t)addr), 17711780Sjkh round_page((vm_offset_t)addr + len), FALSE); 17811780Sjkh} 1797998Sjkh 18011780Sjkh/* 18111780Sjkh * MPSAFE 18211780Sjkh */ 18311780Sjkhvoid 18411780Sjkhvsunlock(addr, len) 18511780Sjkh caddr_t addr; 18611780Sjkh u_int len; 18714582Sjkh{ 18849637Sbillf 18930221Scharnier vm_map_unwire(&curproc->p_vmspace->vm_map, 19030221Scharnier trunc_page((vm_offset_t)addr), 19130221Scharnier round_page((vm_offset_t)addr + len), FALSE); 19230221Scharnier} 1938083Sjkh 1947998Sjkh/* 1959786Sjkh * Create the U area for a new process. 19611780Sjkh * This routine directly affects the fork perf for a process. 19711780Sjkh */ 19811780Sjkhvoid 1997998Sjkhvm_proc_new(struct proc *p) 20011780Sjkh{ 20111780Sjkh vm_page_t ma[UAREA_PAGES]; 20230221Scharnier vm_object_t upobj; 20311780Sjkh vm_offset_t up; 20411780Sjkh vm_page_t m; 20511780Sjkh u_int i; 20614582Sjkh 20711780Sjkh /* 20811780Sjkh * Allocate object for the upage. 2097998Sjkh */ 21011780Sjkh upobj = vm_object_allocate(OBJT_DEFAULT, UAREA_PAGES); 21111780Sjkh p->p_upages_obj = upobj; 21211780Sjkh 21311780Sjkh /* 21411780Sjkh * Get a kernel virtual address for the U area for this process. 21511780Sjkh */ 2167996Sjkh up = kmem_alloc_nofault(kernel_map, UAREA_PAGES * PAGE_SIZE); 21711780Sjkh if (up == 0) 218327Sjkh panic("vm_proc_new: upage allocation failed"); 21911780Sjkh p->p_uarea = (struct user *)up; 22011780Sjkh 22111780Sjkh for (i = 0; i < UAREA_PAGES; i++) { 22211780Sjkh /* 22311780Sjkh * Get a uarea page. 22411780Sjkh */ 22511780Sjkh m = vm_page_grab(upobj, i, 22611780Sjkh VM_ALLOC_NORMAL | VM_ALLOC_RETRY | VM_ALLOC_WIRED); 2277998Sjkh ma[i] = m; 22811780Sjkh 22911780Sjkh vm_page_wakeup(m); 23011780Sjkh vm_page_flag_clear(m, PG_ZERO); 2317998Sjkh m->valid = VM_PAGE_BITS_ALL; 23211780Sjkh } 23311780Sjkh 23417373Sjkh /* 23530221Scharnier * Enter the pages into the kernel address space. 23611780Sjkh */ 23711780Sjkh pmap_qenter(up, ma, UAREA_PAGES); 23811780Sjkh} 2397998Sjkh 24011780Sjkh/* 24111780Sjkh * Dispose the U area for a process that has exited. 24211780Sjkh * This routine directly impacts the exit perf of a process. 24311780Sjkh * XXX proc_zone is marked UMA_ZONE_NOFREE, so this should never be called. 24411780Sjkh */ 24511780Sjkhvoid 24618929Sjkhvm_proc_dispose(struct proc *p) 24711780Sjkh{ 2488083Sjkh vm_object_t upobj; 24918929Sjkh vm_offset_t up; 25018929Sjkh vm_page_t m; 25138583Sjkh 25218929Sjkh upobj = p->p_upages_obj; 25318929Sjkh if (upobj->resident_page_count != UAREA_PAGES) 25418929Sjkh panic("vm_proc_dispose: incorrect number of pages in upobj"); 25518929Sjkh vm_page_lock_queues(); 25618929Sjkh while ((m = TAILQ_FIRST(&upobj->memq)) != NULL) { 25718929Sjkh vm_page_busy(m); 25818929Sjkh vm_page_unwire(m, 0); 25930533Sjkh vm_page_free(m); 26030221Scharnier } 26130221Scharnier vm_page_unlock_queues(); 26218929Sjkh up = (vm_offset_t)p->p_uarea; 26318929Sjkh pmap_qremove(up, UAREA_PAGES); 26418929Sjkh kmem_free(kernel_map, up, UAREA_PAGES * PAGE_SIZE); 26518929Sjkh vm_object_deallocate(upobj); 26638931Sjkh} 26738931Sjkh 26838931Sjkh#ifndef NO_SWAPPING 26938931Sjkh/* 27038931Sjkh * Allow the U area for a process to be prejudicially paged out. 27138931Sjkh */ 27218929Sjkhstatic void 27318929Sjkhvm_proc_swapout(struct proc *p) 2748075Sjkh{ 27518929Sjkh vm_object_t upobj; 27618929Sjkh vm_offset_t up; 27730221Scharnier vm_page_t m; 27830221Scharnier 2798075Sjkh upobj = p->p_upages_obj; 2808075Sjkh if (upobj->resident_page_count != UAREA_PAGES) 2818075Sjkh panic("vm_proc_dispose: incorrect number of pages in upobj"); 28218929Sjkh vm_page_lock_queues(); 28330221Scharnier TAILQ_FOREACH(m, &upobj->memq, listq) { 28430221Scharnier vm_page_dirty(m); 28518929Sjkh vm_page_unwire(m, 0); 28618929Sjkh } 28718929Sjkh vm_page_unlock_queues(); 28818929Sjkh up = (vm_offset_t)p->p_uarea; 28918929Sjkh pmap_qremove(up, UAREA_PAGES); 29018929Sjkh} 29133427Sjkh 2928075Sjkh/* 29311780Sjkh * Bring the U area for a specified process back in. 29411780Sjkh */ 29511780Sjkhstatic void 29611780Sjkhvm_proc_swapin(struct proc *p) 29711780Sjkh{ 29811780Sjkh vm_page_t ma[UAREA_PAGES]; 29911780Sjkh vm_object_t upobj; 30011780Sjkh vm_offset_t up; 30111780Sjkh vm_page_t m; 30211780Sjkh int rv; 3038075Sjkh int i; 30411780Sjkh 30511780Sjkh upobj = p->p_upages_obj; 306327Sjkh for (i = 0; i < UAREA_PAGES; i++) { 3077713Sjkh m = vm_page_grab(upobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY); 30816087Sjkh if (m->valid != VM_PAGE_BITS_ALL) { 30916087Sjkh rv = vm_pager_get_pages(upobj, &m, 1, 0); 31016087Sjkh if (rv != VM_PAGER_OK) 3118075Sjkh panic("vm_proc_swapin: cannot get upage"); 312327Sjkh } 313327Sjkh ma[i] = m; 314327Sjkh } 315327Sjkh if (upobj->resident_page_count != UAREA_PAGES) 31632665Sjkh panic("vm_proc_swapin: lost pages from upobj"); 31730221Scharnier vm_page_lock_queues(); 31830221Scharnier TAILQ_FOREACH(m, &upobj->memq, listq) { 3194996Sjkh m->valid = VM_PAGE_BITS_ALL; 3204996Sjkh vm_page_wire(m); 3214996Sjkh vm_page_wakeup(m); 3224996Sjkh } 323327Sjkh vm_page_unlock_queues(); 324327Sjkh up = (vm_offset_t)p->p_uarea; 3258075Sjkh pmap_qenter(up, ma, UAREA_PAGES); 32641866Sjkh} 32741866Sjkh#endif 32841866Sjkh 32941866Sjkh/* 33041866Sjkh * Implement fork's actions on an address space. 33141866Sjkh * Here we arrange for the address space to be copied or referenced, 33241866Sjkh * allocate a user struct (pcb and kernel stack), then call the 33349637Sbillf * machine-dependent layer to fill those in and make the new process 33449637Sbillf * ready to run. The new process is set up so that it returns directly 33541866Sjkh * to user mode to avoid stack copying and relocation problems. 33641866Sjkh */ 33741866Sjkhvoid 33841866Sjkhvm_forkproc(td, p2, td2, flags) 33941866Sjkh struct thread *td; 34041866Sjkh struct proc *p2; 34141866Sjkh struct thread *td2; 34241866Sjkh int flags; 3438075Sjkh{ 34441866Sjkh struct proc *p1 = td->td_proc; 34541866Sjkh struct user *up; 346327Sjkh 34741866Sjkh GIANT_REQUIRED; 34841866Sjkh 34930221Scharnier if ((flags & RFPROC) == 0) { 35041866Sjkh /* 3514996Sjkh * Divorce the memory, if it is shared, essentially 3524996Sjkh * this changes shared memory amongst threads, into 353327Sjkh * COW locally. 35441866Sjkh */ 355327Sjkh if ((flags & RFMEM) == 0) { 3568075Sjkh if (p1->p_vmspace->vm_refcnt > 1) { 3578075Sjkh vmspace_unshare(p1); 35811780Sjkh } 35911780Sjkh } 3608075Sjkh cpu_fork(td, p2, td2, flags); 3618075Sjkh return; 3624996Sjkh } 3634996Sjkh 3644996Sjkh if (flags & RFMEM) { 3654996Sjkh p2->p_vmspace = p1->p_vmspace; 36611780Sjkh p1->p_vmspace->vm_refcnt++; 3678115Sjkh } 36811780Sjkh 36930221Scharnier while (vm_page_count_severe()) { 3708115Sjkh VM_WAIT; 3717998Sjkh } 3724996Sjkh 3738075Sjkh if ((flags & RFMEM) == 0) { 3748083Sjkh p2->p_vmspace = vmspace_fork(p1->p_vmspace); 37541866Sjkh 37641866Sjkh pmap_pinit2(vmspace_pmap(p2->p_vmspace)); 377327Sjkh 37841866Sjkh if (p1->p_vmspace->vm_shm) 37941866Sjkh shmfork(p1, p2); 38030221Scharnier } 38141866Sjkh 3824996Sjkh /* XXXKSE this is unsatisfactory but should be adequate */ 383327Sjkh up = p2->p_uarea; 384327Sjkh 38541866Sjkh /* 386327Sjkh * p_stats currently points at fields in the user struct 3878075Sjkh * but not at &u, instead at p_addr. Copy parts of 3888083Sjkh * p_stats; zero the rest of p_stats (statistics). 389327Sjkh * 390382Sjkh * If procsig->ps_refcnt is 1 and p2->p_sigacts is NULL we dont' need 391382Sjkh * to share sigacts, so we use the up->u_sigacts. 392382Sjkh */ 3934996Sjkh p2->p_stats = &up->u_stats; 394327Sjkh if (p2->p_sigacts == NULL) { 39530221Scharnier if (p2->p_procsig->ps_refcnt != 1) 396327Sjkh printf ("PID:%d NULL sigacts with refcnt not 1!\n",p2->p_pid); 39730221Scharnier p2->p_sigacts = &up->u_sigacts; 398327Sjkh up->u_sigacts = *p1->p_sigacts; 399327Sjkh } 400327Sjkh 40111780Sjkh bzero(&up->u_stats.pstat_startzero, 40237728Seivind (unsigned) ((caddr_t) &up->u_stats.pstat_endzero - 403327Sjkh (caddr_t) &up->u_stats.pstat_startzero)); 404327Sjkh bcopy(&p1->p_stats->pstat_startcopy, &up->u_stats.pstat_startcopy, 405327Sjkh ((caddr_t) &up->u_stats.pstat_endcopy - 40630221Scharnier (caddr_t) &up->u_stats.pstat_startcopy)); 407327Sjkh 408327Sjkh 409327Sjkh /* 410327Sjkh * cpu_fork will copy and update the pcb, set up the kernel stack, 411327Sjkh * and make the child ready to run. 412477Sjkh */ 413477Sjkh cpu_fork(td, p2, td2, flags); 414327Sjkh} 4157998Sjkh 41641866Sjkh/* 41741866Sjkh * Called after process has been wait(2)'ed apon and is being reaped. 418327Sjkh * The idea is to reclaim resources that we could not reclaim while 4197998Sjkh * the process was still executing. 420382Sjkh */ 421382Sjkhvoid 422382Sjkhvm_waitproc(p) 42330221Scharnier struct proc *p; 42430221Scharnier{ 425382Sjkh 426382Sjkh GIANT_REQUIRED; 427382Sjkh cpu_wait(p); 428382Sjkh vmspace_exitfree(p); /* and clean-out the vmspace */ 4297998Sjkh} 4307998Sjkh 4314996Sjkh/* 4327998Sjkh * Set default limits for VM system. 4334996Sjkh * Called for proc 0, and then inherited by all others. 4344996Sjkh * 4354996Sjkh * XXX should probably act directly on proc0. 4364996Sjkh */ 43711780Sjkhstatic void 43811780Sjkhvm_init_limits(udata) 4397937Sjkh void *udata; 4404996Sjkh{ 4419202Srgrimes struct proc *p = udata; 44230221Scharnier int rss_limit; 44330221Scharnier 4449202Srgrimes /* 44511780Sjkh * Set up the initial limits on process VM. Set the maximum resident 4469202Srgrimes * set size to be half of (reasonably) available memory. Since this 44730221Scharnier * is a soft limit, it comes into effect only when the system is out 4484996Sjkh * of memory - half of main memory helps to favor smaller processes, 4494996Sjkh * and reduces thrashing of the object cache. 450327Sjkh */ 451327Sjkh p->p_rlimit[RLIMIT_STACK].rlim_cur = dflssiz; 452327Sjkh p->p_rlimit[RLIMIT_STACK].rlim_max = maxssiz; 4538857Srgrimes p->p_rlimit[RLIMIT_DATA].rlim_cur = dfldsiz; 45416549Sjkh p->p_rlimit[RLIMIT_DATA].rlim_max = maxdsiz; 4554996Sjkh /* limit the limit to no less than 2MB */ 4564996Sjkh rss_limit = max(cnt.v_free_count, 512); 45716549Sjkh p->p_rlimit[RLIMIT_RSS].rlim_cur = ptoa(rss_limit); 45821650Sjkh p->p_rlimit[RLIMIT_RSS].rlim_max = RLIM_INFINITY; 45921650Sjkh} 4604996Sjkh 4614996Sjkhvoid 4624996Sjkhfaultin(p) 4634996Sjkh struct proc *p; 4644996Sjkh{ 4654996Sjkh 4664996Sjkh GIANT_REQUIRED; 46730221Scharnier PROC_LOCK_ASSERT(p, MA_OWNED); 4684996Sjkh mtx_assert(&sched_lock, MA_OWNED); 4694996Sjkh#ifdef NO_SWAPPING 470327Sjkh if ((p->p_sflag & PS_INMEM) == 0) 471327Sjkh panic("faultin: proc swapped out with NO_SWAPPING!"); 4727998Sjkh#else 4737998Sjkh if ((p->p_sflag & PS_INMEM) == 0) { 4747998Sjkh struct thread *td; 4757998Sjkh 476327Sjkh ++p->p_lock; 4774996Sjkh /* 478327Sjkh * If another process is swapping in this process, 4794996Sjkh * just wait until it finishes. 480327Sjkh */ 481327Sjkh if (p->p_sflag & PS_SWAPPINGIN) { 482327Sjkh mtx_unlock_spin(&sched_lock); 48311780Sjkh msleep(&p->p_sflag, &p->p_mtx, PVM, "faultin", 0); 48433427Sjkh mtx_lock_spin(&sched_lock); 485327Sjkh --p->p_lock; 486327Sjkh return; 487327Sjkh } 488327Sjkh 489327Sjkh p->p_sflag |= PS_SWAPPINGIN; 490327Sjkh mtx_unlock_spin(&sched_lock); 4918075Sjkh PROC_UNLOCK(p); 4928075Sjkh 493327Sjkh vm_proc_swapin(p); 49430221Scharnier FOREACH_THREAD_IN_PROC (p, td) { 4958075Sjkh pmap_swapin_thread(td); 496327Sjkh TD_CLR_SWAPPED(td); 4978075Sjkh } 49830221Scharnier 4998075Sjkh PROC_LOCK(p); 500327Sjkh mtx_lock_spin(&sched_lock); 5018075Sjkh p->p_sflag &= ~PS_SWAPPINGIN; 50230221Scharnier p->p_sflag |= PS_INMEM; 5038075Sjkh FOREACH_THREAD_IN_PROC (p, td) 504327Sjkh if (TD_CAN_RUN(td)) 5058075Sjkh setrunnable(td); 506327Sjkh 507327Sjkh wakeup(&p->p_sflag); 508327Sjkh 50939068Sjkh /* undo the effect of setting SLOCK above */ 510327Sjkh --p->p_lock; 51133427Sjkh } 51233427Sjkh#endif 51333427Sjkh} 51433427Sjkh 51539068Sjkh/* 51639068Sjkh * This swapin algorithm attempts to swap-in processes only if there 51737728Seivind * is enough space for them. Of course, if a process waits for a long 51839068Sjkh * time, it will be swapped in anyway. 51933427Sjkh * 52033427Sjkh * XXXKSE - process with the thread with highest priority counts.. 52139068Sjkh * 52239068Sjkh * Giant is still held at this point, to be released in tsleep. 523327Sjkh */ 524/* ARGSUSED*/ 525static void 526scheduler(dummy) 527 void *dummy; 528{ 529 struct proc *p; 530 struct thread *td; 531 int pri; 532 struct proc *pp; 533 int ppri; 534 535 mtx_assert(&Giant, MA_OWNED | MA_NOTRECURSED); 536 /* GIANT_REQUIRED */ 537 538loop: 539 if (vm_page_count_min()) { 540 VM_WAIT; 541 goto loop; 542 } 543 544 pp = NULL; 545 ppri = INT_MIN; 546 sx_slock(&allproc_lock); 547 FOREACH_PROC_IN_SYSTEM(p) { 548 struct ksegrp *kg; 549 if (p->p_sflag & (PS_INMEM | PS_SWAPPING | PS_SWAPPINGIN)) { 550 continue; 551 } 552 mtx_lock_spin(&sched_lock); 553 FOREACH_THREAD_IN_PROC(p, td) { 554 /* 555 * An otherwise runnable thread of a process 556 * swapped out has only the TDI_SWAPPED bit set. 557 * 558 */ 559 if (td->td_inhibitors == TDI_SWAPPED) { 560 kg = td->td_ksegrp; 561 pri = p->p_swtime + kg->kg_slptime; 562 if ((p->p_sflag & PS_SWAPINREQ) == 0) { 563 pri -= kg->kg_nice * 8; 564 } 565 566 /* 567 * if this ksegrp is higher priority 568 * and there is enough space, then select 569 * this process instead of the previous 570 * selection. 571 */ 572 if (pri > ppri) { 573 pp = p; 574 ppri = pri; 575 } 576 } 577 } 578 mtx_unlock_spin(&sched_lock); 579 } 580 sx_sunlock(&allproc_lock); 581 582 /* 583 * Nothing to do, back to sleep. 584 */ 585 if ((p = pp) == NULL) { 586 tsleep(&proc0, PVM, "sched", maxslp * hz / 2); 587 goto loop; 588 } 589 PROC_LOCK(p); 590 mtx_lock_spin(&sched_lock); 591 592 /* 593 * Another process may be bringing or may have already 594 * brought this process in while we traverse all threads. 595 * Or, this process may even be being swapped out again. 596 */ 597 if (p->p_sflag & (PS_INMEM|PS_SWAPPING|PS_SWAPPINGIN)) { 598 mtx_unlock_spin(&sched_lock); 599 PROC_UNLOCK(p); 600 goto loop; 601 } 602 603 p->p_sflag &= ~PS_SWAPINREQ; 604 605 /* 606 * We would like to bring someone in. (only if there is space). 607 * [What checks the space? ] 608 */ 609 faultin(p); 610 PROC_UNLOCK(p); 611 p->p_swtime = 0; 612 mtx_unlock_spin(&sched_lock); 613 goto loop; 614} 615 616#ifndef NO_SWAPPING 617 618/* 619 * Swap_idle_threshold1 is the guaranteed swapped in time for a process 620 */ 621static int swap_idle_threshold1 = 2; 622SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold1, 623 CTLFLAG_RW, &swap_idle_threshold1, 0, ""); 624 625/* 626 * Swap_idle_threshold2 is the time that a process can be idle before 627 * it will be swapped out, if idle swapping is enabled. 628 */ 629static int swap_idle_threshold2 = 10; 630SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold2, 631 CTLFLAG_RW, &swap_idle_threshold2, 0, ""); 632 633/* 634 * Swapout is driven by the pageout daemon. Very simple, we find eligible 635 * procs and unwire their u-areas. We try to always "swap" at least one 636 * process in case we need the room for a swapin. 637 * If any procs have been sleeping/stopped for at least maxslp seconds, 638 * they are swapped. Else, we swap the longest-sleeping or stopped process, 639 * if any, otherwise the longest-resident process. 640 */ 641void 642swapout_procs(action) 643int action; 644{ 645 struct proc *p; 646 struct thread *td; 647 struct ksegrp *kg; 648 struct proc *outp, *outp2; 649 int outpri, outpri2; 650 int didswap = 0; 651 652 GIANT_REQUIRED; 653 654 outp = outp2 = NULL; 655 outpri = outpri2 = INT_MIN; 656retry: 657 sx_slock(&allproc_lock); 658 FOREACH_PROC_IN_SYSTEM(p) { 659 struct vmspace *vm; 660 int minslptime = 100000; 661 662 /* 663 * Do not swapout a process that 664 * is waiting for VM data 665 * structures there is a possible 666 * deadlock. Test this first as 667 * this may block. 668 * 669 * Lock the map until swapout 670 * finishes, or a thread of this 671 * process may attempt to alter 672 * the map. 673 * 674 * Watch out for a process in 675 * creation. It may have no 676 * address space yet. 677 * 678 * An aio daemon switches its 679 * address space while running. 680 * Perform a quick check whether 681 * a process has P_SYSTEM. 682 */ 683 PROC_LOCK(p); 684 if ((p->p_flag & P_SYSTEM) != 0) { 685 PROC_UNLOCK(p); 686 continue; 687 } 688 mtx_lock_spin(&sched_lock); 689 if (p->p_state == PRS_NEW) { 690 mtx_unlock_spin(&sched_lock); 691 PROC_UNLOCK(p); 692 continue; 693 } 694 vm = p->p_vmspace; 695 KASSERT(vm != NULL, 696 ("swapout_procs: a process has no address space")); 697 ++vm->vm_refcnt; 698 mtx_unlock_spin(&sched_lock); 699 PROC_UNLOCK(p); 700 if (!vm_map_trylock(&vm->vm_map)) 701 goto nextproc1; 702 703 PROC_LOCK(p); 704 if (p->p_lock != 0 || 705 (p->p_flag & (P_STOPPED_SINGLE|P_TRACED|P_SYSTEM|P_WEXIT) 706 ) != 0) { 707 goto nextproc2; 708 } 709 /* 710 * only aiod changes vmspace, however it will be 711 * skipped because of the if statement above checking 712 * for P_SYSTEM 713 */ 714 mtx_lock_spin(&sched_lock); 715 if ((p->p_sflag & (PS_INMEM|PS_SWAPPING|PS_SWAPPINGIN)) != PS_INMEM) 716 goto nextproc; 717 718 switch (p->p_state) { 719 default: 720 /* Don't swap out processes in any sort 721 * of 'special' state. */ 722 goto nextproc; 723 724 case PRS_NORMAL: 725 /* 726 * do not swapout a realtime process 727 * Check all the thread groups.. 728 */ 729 FOREACH_KSEGRP_IN_PROC(p, kg) { 730 if (PRI_IS_REALTIME(kg->kg_pri_class)) 731 goto nextproc; 732 733 /* 734 * Guarantee swap_idle_threshold1 735 * time in memory. 736 */ 737 if (kg->kg_slptime < swap_idle_threshold1) 738 goto nextproc; 739 740 /* 741 * Do not swapout a process if it is 742 * waiting on a critical event of some 743 * kind or there is a thread whose 744 * pageable memory may be accessed. 745 * 746 * This could be refined to support 747 * swapping out a thread. 748 */ 749 FOREACH_THREAD_IN_GROUP(kg, td) { 750 if ((td->td_priority) < PSOCK || 751 !thread_safetoswapout(td)) 752 goto nextproc; 753 } 754 /* 755 * If the system is under memory stress, 756 * or if we are swapping 757 * idle processes >= swap_idle_threshold2, 758 * then swap the process out. 759 */ 760 if (((action & VM_SWAP_NORMAL) == 0) && 761 (((action & VM_SWAP_IDLE) == 0) || 762 (kg->kg_slptime < swap_idle_threshold2))) 763 goto nextproc; 764 765 if (minslptime > kg->kg_slptime) 766 minslptime = kg->kg_slptime; 767 } 768 769 /* 770 * If the process has been asleep for awhile and had 771 * most of its pages taken away already, swap it out. 772 */ 773 if ((action & VM_SWAP_NORMAL) || 774 ((action & VM_SWAP_IDLE) && 775 (minslptime > swap_idle_threshold2))) { 776 swapout(p); 777 didswap++; 778 779 /* 780 * swapout() unlocks a proc lock. This is 781 * ugly, but avoids superfluous lock. 782 */ 783 mtx_unlock_spin(&sched_lock); 784 vm_map_unlock(&vm->vm_map); 785 vmspace_free(vm); 786 sx_sunlock(&allproc_lock); 787 goto retry; 788 } 789 } 790nextproc: 791 mtx_unlock_spin(&sched_lock); 792nextproc2: 793 PROC_UNLOCK(p); 794 vm_map_unlock(&vm->vm_map); 795nextproc1: 796 vmspace_free(vm); 797 continue; 798 } 799 sx_sunlock(&allproc_lock); 800 /* 801 * If we swapped something out, and another process needed memory, 802 * then wakeup the sched process. 803 */ 804 if (didswap) 805 wakeup(&proc0); 806} 807 808static void 809swapout(p) 810 struct proc *p; 811{ 812 struct thread *td; 813 814 PROC_LOCK_ASSERT(p, MA_OWNED); 815 mtx_assert(&sched_lock, MA_OWNED | MA_NOTRECURSED); 816#if defined(SWAP_DEBUG) 817 printf("swapping out %d\n", p->p_pid); 818#endif 819 820 /* 821 * The states of this process and its threads may have changed 822 * by now. Assuming that there is only one pageout daemon thread, 823 * this process should still be in memory. 824 */ 825 KASSERT((p->p_sflag & (PS_INMEM|PS_SWAPPING|PS_SWAPPINGIN)) == PS_INMEM, 826 ("swapout: lost a swapout race?")); 827 828#if defined(INVARIANTS) 829 /* 830 * Make sure that all threads are safe to be swapped out. 831 * 832 * Alternatively, we could swap out only safe threads. 833 */ 834 FOREACH_THREAD_IN_PROC(p, td) { 835 KASSERT(thread_safetoswapout(td), 836 ("swapout: there is a thread not safe for swapout")); 837 } 838#endif /* INVARIANTS */ 839 840 ++p->p_stats->p_ru.ru_nswap; 841 /* 842 * remember the process resident count 843 */ 844 p->p_vmspace->vm_swrss = vmspace_resident_count(p->p_vmspace); 845 846 PROC_UNLOCK(p); 847 p->p_sflag &= ~PS_INMEM; 848 p->p_sflag |= PS_SWAPPING; 849 mtx_unlock_spin(&sched_lock); 850 851 vm_proc_swapout(p); 852 FOREACH_THREAD_IN_PROC(p, td) { 853 pmap_swapout_thread(td); 854 TD_SET_SWAPPED(td); 855 } 856 mtx_lock_spin(&sched_lock); 857 p->p_sflag &= ~PS_SWAPPING; 858 p->p_swtime = 0; 859} 860#endif /* !NO_SWAPPING */ 861