vm_glue.c revision 100884
1/*
2 * Copyright (c) 1991, 1993
3 *	The Regents of the University of California.  All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * The Mach Operating System project at Carnegie-Mellon University.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 *    must display the following acknowledgement:
18 *	This product includes software developed by the University of
19 *	California, Berkeley and its contributors.
20 * 4. Neither the name of the University nor the names of its contributors
21 *    may be used to endorse or promote products derived from this software
22 *    without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 *	from: @(#)vm_glue.c	8.6 (Berkeley) 1/5/94
37 *
38 *
39 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
40 * All rights reserved.
41 *
42 * Permission to use, copy, modify and distribute this software and
43 * its documentation is hereby granted, provided that both the copyright
44 * notice and this permission notice appear in all copies of the
45 * software, derivative works or modified versions, and any portions
46 * thereof, and that both notices appear in supporting documentation.
47 *
48 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
49 * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
50 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
51 *
52 * Carnegie Mellon requests users of this software to return to
53 *
54 *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
55 *  School of Computer Science
56 *  Carnegie Mellon University
57 *  Pittsburgh PA 15213-3890
58 *
59 * any improvements or extensions that they make and grant Carnegie the
60 * rights to redistribute these changes.
61 *
62 * $FreeBSD: head/sys/vm/vm_glue.c 100884 2002-07-29 18:33:32Z julian $
63 */
64
65#include "opt_vm.h"
66
67#include <sys/param.h>
68#include <sys/systm.h>
69#include <sys/lock.h>
70#include <sys/mutex.h>
71#include <sys/proc.h>
72#include <sys/resourcevar.h>
73#include <sys/shm.h>
74#include <sys/vmmeter.h>
75#include <sys/sx.h>
76#include <sys/sysctl.h>
77
78#include <sys/kernel.h>
79#include <sys/ktr.h>
80#include <sys/unistd.h>
81
82#include <machine/limits.h>
83
84#include <vm/vm.h>
85#include <vm/vm_param.h>
86#include <vm/pmap.h>
87#include <vm/vm_map.h>
88#include <vm/vm_page.h>
89#include <vm/vm_pageout.h>
90#include <vm/vm_object.h>
91#include <vm/vm_kern.h>
92#include <vm/vm_extern.h>
93#include <vm/vm_pager.h>
94
95#include <sys/user.h>
96
97extern int maxslp;
98
99/*
100 * System initialization
101 *
102 * Note: proc0 from proc.h
103 */
104static void vm_init_limits(void *);
105SYSINIT(vm_limits, SI_SUB_VM_CONF, SI_ORDER_FIRST, vm_init_limits, &proc0)
106
107/*
108 * THIS MUST BE THE LAST INITIALIZATION ITEM!!!
109 *
110 * Note: run scheduling should be divorced from the vm system.
111 */
112static void scheduler(void *);
113SYSINIT(scheduler, SI_SUB_RUN_SCHEDULER, SI_ORDER_FIRST, scheduler, NULL)
114
115#ifndef NO_SWAPPING
116static void swapout(struct proc *);
117static void vm_proc_swapin(struct proc *p);
118static void vm_proc_swapout(struct proc *p);
119#endif
120
121/*
122 * MPSAFE
123 */
124int
125kernacc(addr, len, rw)
126	caddr_t addr;
127	int len, rw;
128{
129	boolean_t rv;
130	vm_offset_t saddr, eaddr;
131	vm_prot_t prot;
132
133	KASSERT((rw & ~VM_PROT_ALL) == 0,
134	    ("illegal ``rw'' argument to kernacc (%x)\n", rw));
135	prot = rw;
136	saddr = trunc_page((vm_offset_t)addr);
137	eaddr = round_page((vm_offset_t)addr + len);
138	rv = vm_map_check_protection(kernel_map, saddr, eaddr, prot);
139	return (rv == TRUE);
140}
141
142/*
143 * MPSAFE
144 */
145int
146useracc(addr, len, rw)
147	caddr_t addr;
148	int len, rw;
149{
150	boolean_t rv;
151	vm_prot_t prot;
152
153	KASSERT((rw & ~VM_PROT_ALL) == 0,
154	    ("illegal ``rw'' argument to useracc (%x)\n", rw));
155	prot = rw;
156	/*
157	 * XXX - check separately to disallow access to user area and user
158	 * page tables - they are in the map.
159	 *
160	 * XXX - VM_MAXUSER_ADDRESS is an end address, not a max.  It was once
161	 * only used (as an end address) in trap.c.  Use it as an end address
162	 * here too.  This bogusness has spread.  I just fixed where it was
163	 * used as a max in vm_mmap.c.
164	 */
165	if ((vm_offset_t) addr + len > /* XXX */ VM_MAXUSER_ADDRESS
166	    || (vm_offset_t) addr + len < (vm_offset_t) addr) {
167		return (FALSE);
168	}
169	rv = vm_map_check_protection(&curproc->p_vmspace->vm_map,
170	    trunc_page((vm_offset_t)addr), round_page((vm_offset_t)addr + len),
171	    prot);
172	return (rv == TRUE);
173}
174
175/*
176 * MPSAFE
177 */
178void
179vslock(addr, len)
180	caddr_t addr;
181	u_int len;
182{
183
184	vm_map_wire(&curproc->p_vmspace->vm_map, trunc_page((vm_offset_t)addr),
185	    round_page((vm_offset_t)addr + len), FALSE);
186}
187
188/*
189 * MPSAFE
190 */
191void
192vsunlock(addr, len)
193	caddr_t addr;
194	u_int len;
195{
196
197	vm_map_unwire(&curproc->p_vmspace->vm_map,
198	    trunc_page((vm_offset_t)addr),
199	    round_page((vm_offset_t)addr + len), FALSE);
200}
201
202/*
203 * Create the U area for a new process.
204 * This routine directly affects the fork perf for a process.
205 */
206void
207vm_proc_new(struct proc *p)
208{
209	vm_page_t ma[UAREA_PAGES];
210	vm_object_t upobj;
211	vm_offset_t up;
212	vm_page_t m;
213	u_int i;
214
215	/*
216	 * Allocate object for the upage.
217	 */
218	upobj = vm_object_allocate(OBJT_DEFAULT, UAREA_PAGES);
219	p->p_upages_obj = upobj;
220
221	/*
222	 * Get a kernel virtual address for the U area for this process.
223	 */
224	up = kmem_alloc_nofault(kernel_map, UAREA_PAGES * PAGE_SIZE);
225	if (up == 0)
226		panic("vm_proc_new: upage allocation failed");
227	p->p_uarea = (struct user *)up;
228
229	for (i = 0; i < UAREA_PAGES; i++) {
230		/*
231		 * Get a uarea page.
232		 */
233		m = vm_page_grab(upobj, i,
234		    VM_ALLOC_NORMAL | VM_ALLOC_RETRY | VM_ALLOC_WIRED);
235		ma[i] = m;
236
237		vm_page_wakeup(m);
238		vm_page_flag_clear(m, PG_ZERO);
239		vm_page_flag_set(m, PG_MAPPED | PG_WRITEABLE);
240		m->valid = VM_PAGE_BITS_ALL;
241	}
242
243	/*
244	 * Enter the pages into the kernel address space.
245	 */
246	pmap_qenter(up, ma, UAREA_PAGES);
247}
248
249/*
250 * Dispose the U area for a process that has exited.
251 * This routine directly impacts the exit perf of a process.
252 * XXX proc_zone is marked UMA_ZONE_NOFREE, so this should never be called.
253 */
254void
255vm_proc_dispose(struct proc *p)
256{
257	vm_object_t upobj;
258	vm_offset_t up;
259	vm_page_t m;
260
261	upobj = p->p_upages_obj;
262	if (upobj->resident_page_count != UAREA_PAGES)
263		panic("vm_proc_dispose: incorrect number of pages in upobj");
264	vm_page_lock_queues();
265	while ((m = TAILQ_FIRST(&upobj->memq)) != NULL) {
266		vm_page_busy(m);
267		vm_page_unwire(m, 0);
268		vm_page_free(m);
269	}
270	vm_page_unlock_queues();
271	up = (vm_offset_t)p->p_uarea;
272	pmap_qremove(up, UAREA_PAGES);
273	kmem_free(kernel_map, up, UAREA_PAGES * PAGE_SIZE);
274	vm_object_deallocate(upobj);
275}
276
277#ifndef NO_SWAPPING
278/*
279 * Allow the U area for a process to be prejudicially paged out.
280 */
281void
282vm_proc_swapout(struct proc *p)
283{
284	vm_object_t upobj;
285	vm_offset_t up;
286	vm_page_t m;
287
288	upobj = p->p_upages_obj;
289	if (upobj->resident_page_count != UAREA_PAGES)
290		panic("vm_proc_dispose: incorrect number of pages in upobj");
291	vm_page_lock_queues();
292	TAILQ_FOREACH(m, &upobj->memq, listq) {
293		vm_page_dirty(m);
294		vm_page_unwire(m, 0);
295	}
296	vm_page_unlock_queues();
297	up = (vm_offset_t)p->p_uarea;
298	pmap_qremove(up, UAREA_PAGES);
299}
300
301/*
302 * Bring the U area for a specified process back in.
303 */
304void
305vm_proc_swapin(struct proc *p)
306{
307	vm_page_t ma[UAREA_PAGES];
308	vm_object_t upobj;
309	vm_offset_t up;
310	vm_page_t m;
311	int rv;
312	int i;
313
314	upobj = p->p_upages_obj;
315	for (i = 0; i < UAREA_PAGES; i++) {
316		m = vm_page_grab(upobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
317		if (m->valid != VM_PAGE_BITS_ALL) {
318			rv = vm_pager_get_pages(upobj, &m, 1, 0);
319			if (rv != VM_PAGER_OK)
320				panic("vm_proc_swapin: cannot get upage");
321		}
322		ma[i] = m;
323	}
324	if (upobj->resident_page_count != UAREA_PAGES)
325		panic("vm_proc_swapin: lost pages from upobj");
326	vm_page_lock_queues();
327	TAILQ_FOREACH(m, &upobj->memq, listq) {
328		m->valid = VM_PAGE_BITS_ALL;
329		vm_page_wire(m);
330		vm_page_wakeup(m);
331		vm_page_flag_set(m, PG_MAPPED | PG_WRITEABLE);
332	}
333	vm_page_unlock_queues();
334	up = (vm_offset_t)p->p_uarea;
335	pmap_qenter(up, ma, UAREA_PAGES);
336}
337#endif
338
339/*
340 * Implement fork's actions on an address space.
341 * Here we arrange for the address space to be copied or referenced,
342 * allocate a user struct (pcb and kernel stack), then call the
343 * machine-dependent layer to fill those in and make the new process
344 * ready to run.  The new process is set up so that it returns directly
345 * to user mode to avoid stack copying and relocation problems.
346 */
347void
348vm_forkproc(td, p2, td2, flags)
349	struct thread *td;
350	struct proc *p2;
351	struct thread *td2;
352	int flags;
353{
354	struct proc *p1 = td->td_proc;
355	struct user *up;
356
357	GIANT_REQUIRED;
358
359	if ((flags & RFPROC) == 0) {
360		/*
361		 * Divorce the memory, if it is shared, essentially
362		 * this changes shared memory amongst threads, into
363		 * COW locally.
364		 */
365		if ((flags & RFMEM) == 0) {
366			if (p1->p_vmspace->vm_refcnt > 1) {
367				vmspace_unshare(p1);
368			}
369		}
370		cpu_fork(td, p2, td2, flags);
371		return;
372	}
373
374	if (flags & RFMEM) {
375		p2->p_vmspace = p1->p_vmspace;
376		p1->p_vmspace->vm_refcnt++;
377	}
378
379	while (vm_page_count_severe()) {
380		VM_WAIT;
381	}
382
383	if ((flags & RFMEM) == 0) {
384		p2->p_vmspace = vmspace_fork(p1->p_vmspace);
385
386		pmap_pinit2(vmspace_pmap(p2->p_vmspace));
387
388		if (p1->p_vmspace->vm_shm)
389			shmfork(p1, p2);
390	}
391
392	/* XXXKSE this is unsatisfactory but should be adequate */
393	up = p2->p_uarea;
394
395	/*
396	 * p_stats currently points at fields in the user struct
397	 * but not at &u, instead at p_addr. Copy parts of
398	 * p_stats; zero the rest of p_stats (statistics).
399	 *
400	 * If procsig->ps_refcnt is 1 and p2->p_sigacts is NULL we dont' need
401	 * to share sigacts, so we use the up->u_sigacts.
402	 */
403	p2->p_stats = &up->u_stats;
404	if (p2->p_sigacts == NULL) {
405		if (p2->p_procsig->ps_refcnt != 1)
406			printf ("PID:%d NULL sigacts with refcnt not 1!\n",p2->p_pid);
407		p2->p_sigacts = &up->u_sigacts;
408		up->u_sigacts = *p1->p_sigacts;
409	}
410
411	bzero(&up->u_stats.pstat_startzero,
412	    (unsigned) ((caddr_t) &up->u_stats.pstat_endzero -
413		(caddr_t) &up->u_stats.pstat_startzero));
414	bcopy(&p1->p_stats->pstat_startcopy, &up->u_stats.pstat_startcopy,
415	    ((caddr_t) &up->u_stats.pstat_endcopy -
416		(caddr_t) &up->u_stats.pstat_startcopy));
417
418
419	/*
420	 * cpu_fork will copy and update the pcb, set up the kernel stack,
421	 * and make the child ready to run.
422	 */
423	cpu_fork(td, p2, td2, flags);
424}
425
426/*
427 * Called after process has been wait(2)'ed apon and is being reaped.
428 * The idea is to reclaim resources that we could not reclaim while
429 * the process was still executing.
430 */
431void
432vm_waitproc(p)
433	struct proc *p;
434{
435	struct thread *td;
436
437	GIANT_REQUIRED;
438	cpu_wait(p);
439/* XXXKSE by here there should not be any threads left! */
440	FOREACH_THREAD_IN_PROC(p, td) {
441		panic("vm_waitproc: Survivor thread!");
442	}
443	vmspace_exitfree(p);		/* and clean-out the vmspace */
444}
445
446/*
447 * Set default limits for VM system.
448 * Called for proc 0, and then inherited by all others.
449 *
450 * XXX should probably act directly on proc0.
451 */
452static void
453vm_init_limits(udata)
454	void *udata;
455{
456	struct proc *p = udata;
457	int rss_limit;
458
459	/*
460	 * Set up the initial limits on process VM. Set the maximum resident
461	 * set size to be half of (reasonably) available memory.  Since this
462	 * is a soft limit, it comes into effect only when the system is out
463	 * of memory - half of main memory helps to favor smaller processes,
464	 * and reduces thrashing of the object cache.
465	 */
466	p->p_rlimit[RLIMIT_STACK].rlim_cur = dflssiz;
467	p->p_rlimit[RLIMIT_STACK].rlim_max = maxssiz;
468	p->p_rlimit[RLIMIT_DATA].rlim_cur = dfldsiz;
469	p->p_rlimit[RLIMIT_DATA].rlim_max = maxdsiz;
470	/* limit the limit to no less than 2MB */
471	rss_limit = max(cnt.v_free_count, 512);
472	p->p_rlimit[RLIMIT_RSS].rlim_cur = ptoa(rss_limit);
473	p->p_rlimit[RLIMIT_RSS].rlim_max = RLIM_INFINITY;
474}
475
476void
477faultin(p)
478	struct proc *p;
479{
480
481	GIANT_REQUIRED;
482	PROC_LOCK_ASSERT(p, MA_OWNED);
483	mtx_assert(&sched_lock, MA_OWNED);
484#ifdef NO_SWAPPING
485	if ((p->p_sflag & PS_INMEM) == 0)
486		panic("faultin: proc swapped out with NO_SWAPPING!");
487#else
488	if ((p->p_sflag & PS_INMEM) == 0) {
489		struct thread *td;
490
491		++p->p_lock;
492		/*
493		 * If another process is swapping in this process,
494		 * just wait until it finishes.
495		 */
496		if (p->p_sflag & PS_SWAPPINGIN) {
497			mtx_unlock_spin(&sched_lock);
498			msleep(&p->p_sflag, &p->p_mtx, PVM, "faultin", 0);
499			mtx_lock_spin(&sched_lock);
500			--p->p_lock;
501			return;
502		}
503
504		p->p_sflag |= PS_SWAPPINGIN;
505		mtx_unlock_spin(&sched_lock);
506		PROC_UNLOCK(p);
507
508		vm_proc_swapin(p);
509		FOREACH_THREAD_IN_PROC (p, td)
510			pmap_swapin_thread(td);
511
512		PROC_LOCK(p);
513		mtx_lock_spin(&sched_lock);
514		FOREACH_THREAD_IN_PROC (p, td)
515			if (td->td_state == TDS_SWAPPED)	/* XXXKSE */
516				setrunqueue(td);
517
518		p->p_sflag &= ~PS_SWAPPINGIN;
519		p->p_sflag |= PS_INMEM;
520		wakeup(&p->p_sflag);
521
522		/* undo the effect of setting SLOCK above */
523		--p->p_lock;
524	}
525#endif
526}
527
528/*
529 * This swapin algorithm attempts to swap-in processes only if there
530 * is enough space for them.  Of course, if a process waits for a long
531 * time, it will be swapped in anyway.
532 *
533 *  XXXKSE - process with the thread with highest priority counts..
534 *
535 * Giant is still held at this point, to be released in tsleep.
536 */
537/* ARGSUSED*/
538static void
539scheduler(dummy)
540	void *dummy;
541{
542	struct proc *p;
543	struct thread *td;
544	int pri;
545	struct proc *pp;
546	int ppri;
547
548	mtx_assert(&Giant, MA_OWNED | MA_NOTRECURSED);
549	/* GIANT_REQUIRED */
550
551loop:
552	if (vm_page_count_min()) {
553		VM_WAIT;
554		goto loop;
555	}
556
557	pp = NULL;
558	ppri = INT_MIN;
559	sx_slock(&allproc_lock);
560	FOREACH_PROC_IN_SYSTEM(p) {
561		struct ksegrp *kg;
562		if (p->p_sflag & (PS_INMEM | PS_SWAPPING | PS_SWAPPINGIN)) {
563			continue;
564		}
565		mtx_lock_spin(&sched_lock);
566		FOREACH_THREAD_IN_PROC(p, td) {
567			/*
568			 * A runnable thread of a process swapped out is in
569			 * TDS_SWAPPED.
570			 */
571			if (td->td_state == TDS_SWAPPED) {
572				kg = td->td_ksegrp;
573				pri = p->p_swtime + kg->kg_slptime;
574				if ((p->p_sflag & PS_SWAPINREQ) == 0) {
575					pri -= kg->kg_nice * 8;
576				}
577
578				/*
579				 * if this ksegrp is higher priority
580				 * and there is enough space, then select
581				 * this process instead of the previous
582				 * selection.
583				 */
584				if (pri > ppri) {
585					pp = p;
586					ppri = pri;
587				}
588			}
589		}
590		mtx_unlock_spin(&sched_lock);
591	}
592	sx_sunlock(&allproc_lock);
593
594	/*
595	 * Nothing to do, back to sleep.
596	 */
597	if ((p = pp) == NULL) {
598		tsleep(&proc0, PVM, "sched", maxslp * hz / 2);
599		goto loop;
600	}
601	PROC_LOCK(p);
602	mtx_lock_spin(&sched_lock);
603
604	/*
605	 * Another process may be bringing or may have already
606	 * brought this process in while we traverse all threads.
607	 * Or, this process may even be being swapped out again.
608	 */
609	if (p->p_sflag & (PS_INMEM|PS_SWAPPING|PS_SWAPPINGIN)) {
610		mtx_unlock_spin(&sched_lock);
611		PROC_UNLOCK(p);
612		goto loop;
613	}
614
615	p->p_sflag &= ~PS_SWAPINREQ;
616
617	/*
618	 * We would like to bring someone in. (only if there is space).
619	 * [What checks the space? ]
620	 */
621	faultin(p);
622	PROC_UNLOCK(p);
623	p->p_swtime = 0;
624	mtx_unlock_spin(&sched_lock);
625	goto loop;
626}
627
628#ifndef NO_SWAPPING
629
630/*
631 * Swap_idle_threshold1 is the guaranteed swapped in time for a process
632 */
633static int swap_idle_threshold1 = 2;
634SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold1,
635	CTLFLAG_RW, &swap_idle_threshold1, 0, "");
636
637/*
638 * Swap_idle_threshold2 is the time that a process can be idle before
639 * it will be swapped out, if idle swapping is enabled.
640 */
641static int swap_idle_threshold2 = 10;
642SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold2,
643	CTLFLAG_RW, &swap_idle_threshold2, 0, "");
644
645/*
646 * Swapout is driven by the pageout daemon.  Very simple, we find eligible
647 * procs and unwire their u-areas.  We try to always "swap" at least one
648 * process in case we need the room for a swapin.
649 * If any procs have been sleeping/stopped for at least maxslp seconds,
650 * they are swapped.  Else, we swap the longest-sleeping or stopped process,
651 * if any, otherwise the longest-resident process.
652 */
653void
654swapout_procs(action)
655int action;
656{
657	struct proc *p;
658	struct thread *td;
659	struct ksegrp *kg;
660	struct proc *outp, *outp2;
661	int outpri, outpri2;
662	int didswap = 0;
663
664	GIANT_REQUIRED;
665
666	outp = outp2 = NULL;
667	outpri = outpri2 = INT_MIN;
668retry:
669	sx_slock(&allproc_lock);
670	FOREACH_PROC_IN_SYSTEM(p) {
671		struct vmspace *vm;
672		int minslptime = 100000;
673
674		PROC_LOCK(p);
675		if (p->p_lock != 0 ||
676		    (p->p_flag & (P_STOPPED_SNGL|P_TRACED|P_SYSTEM|P_WEXIT)) != 0) {
677			PROC_UNLOCK(p);
678			continue;
679		}
680		/*
681		 * only aiod changes vmspace, however it will be
682		 * skipped because of the if statement above checking
683		 * for P_SYSTEM
684		 */
685		vm = p->p_vmspace;
686		mtx_lock_spin(&sched_lock);
687		if ((p->p_sflag & (PS_INMEM|PS_SWAPPING|PS_SWAPPINGIN)) != PS_INMEM) {
688			mtx_unlock_spin(&sched_lock);
689			PROC_UNLOCK(p);
690			continue;
691		}
692
693		switch (p->p_state) {
694		default:
695			/* Don't swap out processes in any sort
696			 * of 'special' state. */
697			mtx_unlock_spin(&sched_lock);
698			PROC_UNLOCK(p);
699			continue;
700
701		case PRS_NORMAL:
702			/*
703			 * do not swapout a realtime process
704			 * Check all the thread groups..
705			 */
706			FOREACH_KSEGRP_IN_PROC(p, kg) {
707				if (PRI_IS_REALTIME(kg->kg_pri_class)) {
708					mtx_unlock_spin(&sched_lock);
709					PROC_UNLOCK(p);
710					goto nextproc;
711				}
712
713				/*
714				 * Do not swapout a process waiting
715				 * on a critical event of some kind.
716				 * Also guarantee swap_idle_threshold1
717				 * time in memory.
718				 */
719				if (kg->kg_slptime < swap_idle_threshold1) {
720					mtx_unlock_spin(&sched_lock);
721					PROC_UNLOCK(p);
722					goto nextproc;
723				}
724				/*
725				 * Do not swapout a process if there is
726				 * a thread whose pageable memory may
727				 * be accessed.
728				 *
729				 * This could be refined to support
730				 * swapping out a thread.
731				 */
732				FOREACH_THREAD_IN_PROC(p, td) {
733					if ((td->td_priority) < PSOCK ||
734					    !(td->td_state == TDS_SLP ||
735					     td->td_state == TDS_RUNQ)) {
736						mtx_unlock_spin(&sched_lock);
737						PROC_UNLOCK(p);
738						goto nextproc;
739					}
740				}
741				/*
742				 * If the system is under memory stress,
743				 * or if we are swapping
744				 * idle processes >= swap_idle_threshold2,
745				 * then swap the process out.
746				 */
747				if (((action & VM_SWAP_NORMAL) == 0) &&
748				    (((action & VM_SWAP_IDLE) == 0) ||
749				    (kg->kg_slptime < swap_idle_threshold2))) {
750					mtx_unlock_spin(&sched_lock);
751					PROC_UNLOCK(p);
752					goto nextproc;
753				}
754				if (minslptime > kg->kg_slptime)
755					minslptime = kg->kg_slptime;
756			}
757
758			mtx_unlock_spin(&sched_lock);
759			++vm->vm_refcnt;
760			/*
761			 * do not swapout a process that
762			 * is waiting for VM
763			 * data structures there is a
764			 * possible deadlock.
765			 */
766			if (!vm_map_trylock(&vm->vm_map)) {
767				vmspace_free(vm);
768				PROC_UNLOCK(p);
769				goto nextproc;
770			}
771			vm_map_unlock(&vm->vm_map);
772			/*
773			 * If the process has been asleep for awhile and had
774			 * most of its pages taken away already, swap it out.
775			 */
776			if ((action & VM_SWAP_NORMAL) ||
777				((action & VM_SWAP_IDLE) &&
778				 (minslptime > swap_idle_threshold2))) {
779				sx_sunlock(&allproc_lock);
780				swapout(p);
781				vmspace_free(vm);
782				didswap++;
783				goto retry;
784			}
785			PROC_UNLOCK(p);
786			vmspace_free(vm);
787		}
788nextproc:
789		continue;
790	}
791	sx_sunlock(&allproc_lock);
792	/*
793	 * If we swapped something out, and another process needed memory,
794	 * then wakeup the sched process.
795	 */
796	if (didswap)
797		wakeup(&proc0);
798}
799
800static void
801swapout(p)
802	struct proc *p;
803{
804	struct thread *td;
805
806	PROC_LOCK_ASSERT(p, MA_OWNED);
807#if defined(SWAP_DEBUG)
808	printf("swapping out %d\n", p->p_pid);
809#endif
810	mtx_lock_spin(&sched_lock);
811
812	/*
813	 * Make sure that all threads are safe to be swapped out.
814	 *
815	 * Alternatively, we could swap out only safe threads.
816	 */
817	FOREACH_THREAD_IN_PROC(p, td) {
818		if (!(td->td_state == TDS_SLP ||
819		     td->td_state == TDS_RUNQ)) {
820			mtx_unlock_spin(&sched_lock);
821			return;
822		}
823	}
824
825	++p->p_stats->p_ru.ru_nswap;
826	/*
827	 * remember the process resident count
828	 */
829	p->p_vmspace->vm_swrss = vmspace_resident_count(p->p_vmspace);
830
831	p->p_sflag &= ~PS_INMEM;
832	p->p_sflag |= PS_SWAPPING;
833	PROC_UNLOCK(p);
834	FOREACH_THREAD_IN_PROC (p, td)
835		if (td->td_state == TDS_RUNQ) {	/* XXXKSE */
836			remrunqueue(td);	/* XXXKSE */
837			td->td_state = TDS_SWAPPED;
838		}
839	mtx_unlock_spin(&sched_lock);
840
841	vm_proc_swapout(p);
842	FOREACH_THREAD_IN_PROC(p, td)
843		pmap_swapout_thread(td);
844	mtx_lock_spin(&sched_lock);
845	p->p_sflag &= ~PS_SWAPPING;
846	p->p_swtime = 0;
847	mtx_unlock_spin(&sched_lock);
848}
849#endif /* !NO_SWAPPING */
850