vm_glue.c revision 45363
1492SN/A/*
21345Sihse * Copyright (c) 1991, 1993
3492SN/A *	The Regents of the University of California.  All rights reserved.
4492SN/A *
5492SN/A * This code is derived from software contributed to Berkeley by
6492SN/A * The Mach Operating System project at Carnegie-Mellon University.
7492SN/A *
8492SN/A * Redistribution and use in source and binary forms, with or without
9492SN/A * modification, are permitted provided that the following conditions
10492SN/A * are met:
11492SN/A * 1. Redistributions of source code must retain the above copyright
12492SN/A *    notice, this list of conditions and the following disclaimer.
13492SN/A * 2. Redistributions in binary form must reproduce the above copyright
14492SN/A *    notice, this list of conditions and the following disclaimer in the
15492SN/A *    documentation and/or other materials provided with the distribution.
16492SN/A * 3. All advertising materials mentioning features or use of this software
17492SN/A *    must display the following acknowledgement:
18492SN/A *	This product includes software developed by the University of
19492SN/A *	California, Berkeley and its contributors.
20492SN/A * 4. Neither the name of the University nor the names of its contributors
21492SN/A *    may be used to endorse or promote products derived from this software
22492SN/A *    without specific prior written permission.
23492SN/A *
24492SN/A * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25492SN/A * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
261120Schegar * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
271120Schegar * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
281120Schegar * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
291410Sihse * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
301120Schegar * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
311120Schegar * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
321120Schegar * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33492SN/A * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
341410Sihse * SUCH DAMAGE.
351410Sihse *
361410Sihse *	from: @(#)vm_glue.c	8.6 (Berkeley) 1/5/94
371410Sihse *
38492SN/A *
39492SN/A * Copyright (c) 1987, 1990 Carnegie-Mellon University.
40492SN/A * All rights reserved.
411410Sihse *
421120Schegar * Permission to use, copy, modify and distribute this software and
43837SN/A * its documentation is hereby granted, provided that both the copyright
44910Sihse * notice and this permission notice appear in all copies of the
451131Serikj * software, derivative works or modified versions, and any portions
46492SN/A * thereof, and that both notices appear in supporting documentation.
471120Schegar *
481236Sihse * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
491120Schegar * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
501120Schegar * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
511120Schegar *
52968Sihse * Carnegie Mellon requests users of this software to return to
53968Sihse *
54492SN/A *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
551120Schegar *  School of Computer Science
561120Schegar *  Carnegie Mellon University
571120Schegar *  Pittsburgh PA 15213-3890
581120Schegar *
591120Schegar * any improvements or extensions that they make and grant Carnegie the
601120Schegar * rights to redistribute these changes.
611120Schegar *
621120Schegar * $Id: vm_glue.c,v 1.84 1999/02/19 14:25:36 luoqi Exp $
631120Schegar */
641120Schegar
651120Schegar#include "opt_rlimit.h"
661120Schegar#include "opt_vm.h"
671120Schegar
681120Schegar#include <sys/param.h>
691223Schegar#include <sys/systm.h>
701223Schegar#include <sys/proc.h>
711223Schegar#include <sys/resourcevar.h>
721120Schegar#include <sys/buf.h>
731120Schegar#include <sys/shm.h>
741120Schegar#include <sys/vmmeter.h>
751120Schegar#include <sys/sysctl.h>
761120Schegar
771120Schegar#include <sys/kernel.h>
781600Snaoto#include <sys/unistd.h>
791600Snaoto
801600Snaoto#include <machine/limits.h>
811223Schegar
821120Schegar#include <vm/vm.h>
831120Schegar#include <vm/vm_param.h>
841659Serikj#include <vm/vm_prot.h>
851600Snaoto#include <sys/lock.h>
861120Schegar#include <vm/pmap.h>
871120Schegar#include <vm/vm_map.h>
881120Schegar#include <vm/vm_page.h>
891120Schegar#include <vm/vm_pageout.h>
901120Schegar#include <vm/vm_kern.h>
911120Schegar#include <vm/vm_extern.h>
92492SN/A
931120Schegar#include <sys/user.h>
941120Schegar
951120Schegar/*
961223Schegar * System initialization
97492SN/A *
981120Schegar * Note: proc0 from proc.h
991120Schegar */
1001223Schegar
1011223Schegarstatic void vm_init_limits __P((void *));
1021223SchegarSYSINIT(vm_limits, SI_SUB_VM_CONF, SI_ORDER_FIRST, vm_init_limits, &proc0)
1031223Schegar
1041223Schegar/*
1051223Schegar * THIS MUST BE THE LAST INITIALIZATION ITEM!!!
106492SN/A *
1071223Schegar * Note: run scheduling should be divorced from the vm system.
1081223Schegar */
1091223Schegarstatic void scheduler __P((void *));
1101695StwistiSYSINIT(scheduler, SI_SUB_RUN_SCHEDULER, SI_ORDER_FIRST, scheduler, NULL)
111557SN/A
1121120Schegar
1131120Schegarstatic void swapout __P((struct proc *));
1141120Schegar
1151120Schegarextern char kstack[];
1161223Schegar
1171223Schegar/* vm_map_t upages_map; */
1181223Schegar
1191223Schegarint
1201223Schegarkernacc(addr, len, rw)
1211223Schegar	caddr_t addr;
1221120Schegar	int len, rw;
1231120Schegar{
1241120Schegar	boolean_t rv;
1251120Schegar	vm_offset_t saddr, eaddr;
1261120Schegar	vm_prot_t prot = rw == B_READ ? VM_PROT_READ : VM_PROT_WRITE;
1271223Schegar
1281223Schegar	saddr = trunc_page((vm_offset_t)addr);
1291223Schegar	eaddr = round_page((vm_offset_t)addr + len);
1301223Schegar	vm_map_lock_read(kernel_map);
1311223Schegar	rv = vm_map_check_protection(kernel_map, saddr, eaddr, prot);
1321695Stwisti	vm_map_unlock_read(kernel_map);
1331695Stwisti	return (rv == TRUE);
1341120Schegar}
1351120Schegar
136607SN/Aint
1371120Schegaruseracc(addr, len, rw)
1381120Schegar	caddr_t addr;
1391120Schegar	int len, rw;
1401120Schegar{
141492SN/A	boolean_t rv;
1421120Schegar	vm_prot_t prot = rw == B_READ ? VM_PROT_READ : VM_PROT_WRITE;
1431120Schegar	vm_map_t map;
1441120Schegar	vm_map_entry_t save_hint;
1451550Serikj
1461120Schegar	/*
1471120Schegar	 * XXX - check separately to disallow access to user area and user
1481120Schegar	 * page tables - they are in the map.
1491120Schegar	 *
1501120Schegar	 * XXX - VM_MAXUSER_ADDRESS is an end address, not a max.  It was once
1511120Schegar	 * only used (as an end address) in trap.c.  Use it as an end address
1521120Schegar	 * here too.  This bogusness has spread.  I just fixed where it was
1531120Schegar	 * used as a max in vm_mmap.c.
1541120Schegar	 */
155492SN/A	if ((vm_offset_t) addr + len > /* XXX */ VM_MAXUSER_ADDRESS
1561120Schegar	    || (vm_offset_t) addr + len < (vm_offset_t) addr) {
1571120Schegar		return (FALSE);
1581120Schegar	}
1591120Schegar	map = &curproc->p_vmspace->vm_map;
1601223Schegar	vm_map_lock_read(map);
1611223Schegar	/*
1621223Schegar	 * We save the map hint, and restore it.  Useracc appears to distort
1631223Schegar	 * the map hint unnecessarily.
1641223Schegar	 */
1651120Schegar	save_hint = map->hint;
1661120Schegar	rv = vm_map_check_protection(map,
1671120Schegar	    trunc_page((vm_offset_t)addr), round_page((vm_offset_t)addr + len), prot);
1681120Schegar	map->hint = save_hint;
1691120Schegar	vm_map_unlock_read(map);
1701223Schegar
1711223Schegar	return (rv == TRUE);
1721223Schegar}
1731223Schegar
1741223Schegarvoid
1751223Schegarvslock(addr, len)
176492SN/A	caddr_t addr;
1771223Schegar	u_int len;
178492SN/A{
1791120Schegar	vm_map_pageable(&curproc->p_vmspace->vm_map, trunc_page((vm_offset_t)addr),
1801120Schegar	    round_page((vm_offset_t)addr + len), FALSE);
1811223Schegar}
1821223Schegar
1831223Schegarvoid
1841223Schegarvsunlock(addr, len, dirtied)
1851223Schegar	caddr_t addr;
1861223Schegar	u_int len;
1871120Schegar	int dirtied;
1881120Schegar{
1891120Schegar#ifdef	lint
1901120Schegar	dirtied++;
1911120Schegar#endif	/* lint */
192492SN/A	vm_map_pageable(&curproc->p_vmspace->vm_map, trunc_page((vm_offset_t)addr),
193504SN/A	    round_page((vm_offset_t)addr + len), TRUE);
1941120Schegar}
1951120Schegar
196504SN/A/*
197492SN/A * Implement fork's actions on an address space.
1981120Schegar * Here we arrange for the address space to be copied or referenced,
199643SN/A * allocate a user struct (pcb and kernel stack), then call the
2001120Schegar * machine-dependent layer to fill those in and make the new process
2011120Schegar * ready to run.  The new process is set up so that it returns directly
202492SN/A * to user mode to avoid stack copying and relocation problems.
2031550Serikj */
2041120Schegarvoid
205492SN/Avm_fork(p1, p2, flags)
2061550Serikj	register struct proc *p1, *p2;
2071120Schegar	int flags;
208492SN/A{
2091550Serikj	register struct user *up;
210492SN/A
2111120Schegar	if (flags & RFMEM) {
2121120Schegar		p2->p_vmspace = p1->p_vmspace;
213492SN/A		p1->p_vmspace->vm_refcnt++;
214492SN/A	}
215492SN/A
2161223Schegar	/*
2171223Schegar	 * Great, so we have a memory-heavy process and the
218492SN/A	 * entire machine comes to a screaching halt because
219492SN/A	 * nobody can fork/exec anything.  What we really need
220745SN/A	 * to do is fix the process swapper so it swaps out the right
221492SN/A	 * processes.
2221317Sihse	 */
2231120Schegar#if 0
2241120Schegar	while ((cnt.v_free_count + cnt.v_cache_count) < cnt.v_free_min) {
2251411Serikj		vm_pageout_deficit += (UPAGES + VM_INITIAL_PAGEIN);
2261411Serikj		VM_WAIT;
2271223Schegar	}
2281223Schegar#endif
2291223Schegar
2301223Schegar	if ((flags & RFMEM) == 0) {
2311223Schegar		p2->p_vmspace = vmspace_fork(p1->p_vmspace);
2321223Schegar
2331223Schegar		if (p1->p_vmspace->vm_shm)
2341223Schegar			shmfork(p1, p2);
2351223Schegar	}
2361120Schegar
2371223Schegar	pmap_new_proc(p2);
2381223Schegar
2391223Schegar	up = p2->p_addr;
2401223Schegar
2411223Schegar	/*
2421223Schegar	 * p_stats currently points at fields in the user struct
2431223Schegar	 * but not at &u, instead at p_addr. Copy parts of
2441223Schegar	 * p_stats; zero the rest of p_stats (statistics).
2451223Schegar	 *
2461550Serikj	 * If procsig->ps_refcnt is 1 and p2->p_sigacts is NULL we dont' need
2471223Schegar	 * to share sigacts, so we use the up->u_sigacts.
2481223Schegar	 */
2491223Schegar	p2->p_stats = &up->u_stats;
2501550Serikj	if (p2->p_sigacts == NULL) {
2511120Schegar		if (p2->p_procsig->ps_refcnt != 1)
2521120Schegar			printf ("PID:%d NULL sigacts with refcnt not 1!\n",p2->p_pid);
2531120Schegar		p2->p_sigacts = &up->u_sigacts;
2541120Schegar		up->u_sigacts = *p1->p_sigacts;
2551120Schegar	}
2561120Schegar
2571120Schegar	bzero(&up->u_stats.pstat_startzero,
2581120Schegar	    (unsigned) ((caddr_t) &up->u_stats.pstat_endzero -
2591120Schegar		(caddr_t) &up->u_stats.pstat_startzero));
2601120Schegar	bcopy(&p1->p_stats->pstat_startcopy, &up->u_stats.pstat_startcopy,
2611120Schegar	    ((caddr_t) &up->u_stats.pstat_endcopy -
2621120Schegar		(caddr_t) &up->u_stats.pstat_startcopy));
2631120Schegar
2641345Sihse
2651345Sihse	/*
2661345Sihse	 * cpu_fork will copy and update the pcb, set up the kernel stack,
2671345Sihse	 * and make the child ready to run.
2681345Sihse	 */
2691345Sihse	cpu_fork(p1, p2);
2701345Sihse}
2711345Sihse
2721345Sihse/*
2731345Sihse * Set default limits for VM system.
2741345Sihse * Called for proc 0, and then inherited by all others.
2751345Sihse *
2761345Sihse * XXX should probably act directly on proc0.
2771345Sihse */
2781120Schegarstatic void
2791345Sihsevm_init_limits(udata)
2801345Sihse	void *udata;
2811345Sihse{
2821345Sihse	register struct proc *p = udata;
2831345Sihse	int rss_limit;
2841345Sihse
2851345Sihse	/*
2861345Sihse	 * Set up the initial limits on process VM. Set the maximum resident
2871641Sihse	 * set size to be half of (reasonably) available memory.  Since this
2881641Sihse	 * is a soft limit, it comes into effect only when the system is out
2891641Sihse	 * of memory - half of main memory helps to favor smaller processes,
2901345Sihse	 * and reduces thrashing of the object cache.
2911345Sihse	 */
2921641Sihse	p->p_rlimit[RLIMIT_STACK].rlim_cur = DFLSSIZ;
2931345Sihse	p->p_rlimit[RLIMIT_STACK].rlim_max = MAXSSIZ;
2941345Sihse	p->p_rlimit[RLIMIT_DATA].rlim_cur = DFLDSIZ;
2951345Sihse	p->p_rlimit[RLIMIT_DATA].rlim_max = MAXDSIZ;
2961345Sihse	/* limit the limit to no less than 2MB */
2971345Sihse	rss_limit = max(cnt.v_free_count, 512);
2981120Schegar	p->p_rlimit[RLIMIT_RSS].rlim_cur = ptoa(rss_limit);
2991345Sihse	p->p_rlimit[RLIMIT_RSS].rlim_max = RLIM_INFINITY;
3001345Sihse}
3011345Sihse
3021345Sihsevoid
3031345Sihsefaultin(p)
3041345Sihse	struct proc *p;
3051345Sihse{
3061120Schegar	int s;
3071120Schegar
3081120Schegar	if ((p->p_flag & P_INMEM) == 0) {
3091120Schegar
3101345Sihse		++p->p_lock;
3111120Schegar
3121120Schegar		pmap_swapin_proc(p);
3131120Schegar
3141120Schegar		s = splhigh();
3151147Smchung
3161168Smchung		if (p->p_stat == SRUN)
3171147Smchung			setrunqueue(p);
3181168Smchung
3191120Schegar		p->p_flag |= P_INMEM;
3201147Smchung
3211120Schegar		/* undo the effect of setting SLOCK above */
3221120Schegar		--p->p_lock;
3231120Schegar		splx(s);
3241120Schegar
3251120Schegar	}
3261120Schegar}
3271120Schegar
3281120Schegar/*
3291120Schegar * This swapin algorithm attempts to swap-in processes only if there
3301120Schegar * is enough space for them.  Of course, if a process waits for a long
3311120Schegar * time, it will be swapped in anyway.
3321236Sihse */
3331120Schegar/* ARGSUSED*/
3341236Sihsestatic void
3351120Schegarscheduler(dummy)
3361120Schegar	void *dummy;
3371120Schegar{
3381120Schegar	register struct proc *p;
3391120Schegar	register int pri;
3401120Schegar	struct proc *pp;
3411120Schegar	int ppri;
3421236Sihse
3431236Sihseloop:
3441120Schegar	while ((cnt.v_free_count + cnt.v_cache_count) < cnt.v_free_min) {
3451236Sihse		VM_WAIT;
3461120Schegar	}
3471120Schegar
3481120Schegar	pp = NULL;
3491223Schegar	ppri = INT_MIN;
3501223Schegar	for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) {
3511120Schegar		if (p->p_stat == SRUN &&
3521120Schegar			(p->p_flag & (P_INMEM | P_SWAPPING)) == 0) {
3531600Snaoto
3541120Schegar			pri = p->p_swtime + p->p_slptime;
3551120Schegar			if ((p->p_flag & P_SWAPINREQ) == 0) {
3561120Schegar				pri -= p->p_nice * 8;
3571695Stwisti			}
3581695Stwisti
3591223Schegar			/*
3601120Schegar			 * if this process is higher priority and there is
3611223Schegar			 * enough space, then select this process instead of
3621120Schegar			 * the previous selection.
3631120Schegar			 */
3641120Schegar			if (pri > ppri) {
3651659Serikj				pp = p;
3661120Schegar				ppri = pri;
3671550Serikj			}
3681550Serikj		}
3691120Schegar	}
3701120Schegar
3711223Schegar	/*
3721120Schegar	 * Nothing to do, back to sleep.
3731120Schegar	 */
3741120Schegar	if ((p = pp) == NULL) {
3751130Serikj		tsleep(&proc0, PVM, "sched", 0);
3761130Serikj		goto loop;
3771130Serikj	}
3781550Serikj	p->p_flag &= ~P_SWAPINREQ;
3791120Schegar
3801120Schegar	/*
3811120Schegar	 * We would like to bring someone in. (only if there is space).
3821120Schegar	 */
3831120Schegar	faultin(p);
3841120Schegar	p->p_swtime = 0;
3851120Schegar	goto loop;
3861120Schegar}
3871120Schegar
3881120Schegar#ifndef NO_SWAPPING
3891120Schegar
3901120Schegar#define	swappable(p) \
3911120Schegar	(((p)->p_lock == 0) && \
3921324Serikj		((p)->p_flag & (P_TRACED|P_SYSTEM|P_INMEM|P_WEXIT|P_SWAPPING)) == P_INMEM)
3931324Serikj
3941120Schegar
3951120Schegar/*
3961223Schegar * Swap_idle_threshold1 is the guaranteed swapped in time for a process
3971120Schegar */
3981236Sihsestatic int swap_idle_threshold1 = 2;
3991120SchegarSYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold1,
4001120Schegar	CTLFLAG_RW, &swap_idle_threshold1, 0, "");
4011120Schegar
4021433Sptbrunet/*
4031433Sptbrunet * Swap_idle_threshold2 is the time that a process can be idle before
4041433Sptbrunet * it will be swapped out, if idle swapping is enabled.
4051236Sihse */
4061223Schegarstatic int swap_idle_threshold2 = 10;
4071223SchegarSYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold2,
4081223Schegar	CTLFLAG_RW, &swap_idle_threshold2, 0, "");
4091223Schegar
4101223Schegar/*
4111223Schegar * Swapout is driven by the pageout daemon.  Very simple, we find eligible
4121223Schegar * procs and unwire their u-areas.  We try to always "swap" at least one
4131223Schegar * process in case we need the room for a swapin.
4141120Schegar * If any procs have been sleeping/stopped for at least maxslp seconds,
4151178Serikj * they are swapped.  Else, we swap the longest-sleeping or stopped process,
4161178Serikj * if any, otherwise the longest-resident process.
4171223Schegar */
4181178Serikjvoid
4191120Schegarswapout_procs(action)
4201120Schegarint action;
4211120Schegar{
4221695Stwisti	register struct proc *p;
4231695Stwisti	struct proc *outp, *outp2;
4241695Stwisti	int outpri, outpri2;
4251515Smchung	int didswap = 0;
4261147Smchung
4271223Schegar	outp = outp2 = NULL;
4281223Schegar	outpri = outpri2 = INT_MIN;
4291120Schegarretry:
4301223Schegar	for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) {
4311223Schegar		struct vmspace *vm;
4321223Schegar		if (!swappable(p))
4331223Schegar			continue;
4341223Schegar
4351120Schegar		vm = p->p_vmspace;
4361223Schegar
4371223Schegar		switch (p->p_stat) {
4381120Schegar		default:
4391223Schegar			continue;
4401120Schegar
4411550Serikj		case SSLEEP:
4421120Schegar		case SSTOP:
4431223Schegar			/*
4441120Schegar			 * do not swapout a realtime process
4451223Schegar			 */
4461120Schegar			if (RTP_PRIO_IS_REALTIME(p->p_rtprio.type))
4471120Schegar				continue;
4481120Schegar
4491345Sihse			/*
4501120Schegar			 * Do not swapout a process waiting on a critical
4511147Smchung			 * event of some kind.  Also guarantee swap_idle_threshold1
4521120Schegar			 * time in memory.
4531299Serikj			 */
4541299Serikj			if (((p->p_priority & 0x7f) < PSOCK) ||
4551724Serikj				(p->p_slptime < swap_idle_threshold1))
4561724Serikj				continue;
4571348Serikj
4581348Serikj			/*
4591348Serikj			 * If the system is under memory stress, or if we are swapping
4601348Serikj			 * idle processes >= swap_idle_threshold2, then swap the process
4611345Sihse			 * out.
4621345Sihse			 */
4631345Sihse			if (((action & VM_SWAP_NORMAL) == 0) &&
4641345Sihse				(((action & VM_SWAP_IDLE) == 0) ||
4651120Schegar				  (p->p_slptime < swap_idle_threshold2)))
4661120Schegar				continue;
4671120Schegar
4681120Schegar			++vm->vm_refcnt;
4691120Schegar			/*
4701659Serikj			 * do not swapout a process that is waiting for VM
4711223Schegar			 * data structures there is a possible deadlock.
4721223Schegar			 */
4731120Schegar			if (lockmgr(&vm->vm_map.lock,
4741120Schegar					LK_EXCLUSIVE | LK_NOWAIT,
4751120Schegar					(void *)0, curproc)) {
4761120Schegar				vmspace_free(vm);
4771120Schegar				continue;
4781120Schegar			}
4791120Schegar			vm_map_unlock(&vm->vm_map);
4801120Schegar			/*
4811120Schegar			 * If the process has been asleep for awhile and had
4821120Schegar			 * most of its pages taken away already, swap it out.
4831223Schegar			 */
4841120Schegar			if ((action & VM_SWAP_NORMAL) ||
4851120Schegar				((action & VM_SWAP_IDLE) &&
4861120Schegar				 (p->p_slptime > swap_idle_threshold2))) {
4871223Schegar				swapout(p);
4881223Schegar				vmspace_free(vm);
4891223Schegar				didswap++;
4901120Schegar				goto retry;
4911120Schegar			}
4921120Schegar		}
4931120Schegar	}
4941120Schegar	/*
4951120Schegar	 * If we swapped something out, and another process needed memory,
4961120Schegar	 * then wakeup the sched process.
4971223Schegar	 */
4981120Schegar	if (didswap)
4991120Schegar		wakeup(&proc0);
5001120Schegar}
5011120Schegar
5021223Schegarstatic void
5031120Schegarswapout(p)
5041550Serikj	register struct proc *p;
5051550Serikj{
5061550Serikj
5071550Serikj#if defined(SWAP_DEBUG)
5081317Sihse	printf("swapping out %d\n", p->p_pid);
5091147Smchung#endif
5101317Sihse	++p->p_stats->p_ru.ru_nswap;
5111550Serikj	/*
5121550Serikj	 * remember the process resident count
5131345Sihse	 */
5141317Sihse	p->p_vmspace->vm_swrss = vmspace_resident_count(p->p_vmspace);
5151120Schegar
5161317Sihse	(void) splhigh();
5171317Sihse	p->p_flag &= ~P_INMEM;
5181317Sihse	p->p_flag |= P_SWAPPING;
5191120Schegar	if (p->p_stat == SRUN)
5201223Schegar		remrq(p);
5211317Sihse	(void) spl0();
5221223Schegar
5231120Schegar	pmap_swapout_proc(p);
5241317Sihse
5251317Sihse	p->p_flag &= ~P_SWAPPING;
5261120Schegar	p->p_swtime = 0;
5271317Sihse}
5281345Sihse#endif /* !NO_SWAPPING */
5291345Sihse