vm_glue.c revision 104094
1/*
2 * Copyright (c) 1991, 1993
3 *	The Regents of the University of California.  All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * The Mach Operating System project at Carnegie-Mellon University.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 *    must display the following acknowledgement:
18 *	This product includes software developed by the University of
19 *	California, Berkeley and its contributors.
20 * 4. Neither the name of the University nor the names of its contributors
21 *    may be used to endorse or promote products derived from this software
22 *    without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 *	from: @(#)vm_glue.c	8.6 (Berkeley) 1/5/94
37 *
38 *
39 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
40 * All rights reserved.
41 *
42 * Permission to use, copy, modify and distribute this software and
43 * its documentation is hereby granted, provided that both the copyright
44 * notice and this permission notice appear in all copies of the
45 * software, derivative works or modified versions, and any portions
46 * thereof, and that both notices appear in supporting documentation.
47 *
48 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
49 * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
50 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
51 *
52 * Carnegie Mellon requests users of this software to return to
53 *
54 *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
55 *  School of Computer Science
56 *  Carnegie Mellon University
57 *  Pittsburgh PA 15213-3890
58 *
59 * any improvements or extensions that they make and grant Carnegie the
60 * rights to redistribute these changes.
61 *
62 * $FreeBSD: head/sys/vm/vm_glue.c 104094 2002-09-28 17:15:38Z phk $
63 */
64
65#include "opt_vm.h"
66
67#include <sys/param.h>
68#include <sys/systm.h>
69#include <sys/lock.h>
70#include <sys/mutex.h>
71#include <sys/proc.h>
72#include <sys/resourcevar.h>
73#include <sys/shm.h>
74#include <sys/vmmeter.h>
75#include <sys/sx.h>
76#include <sys/sysctl.h>
77
78#include <sys/kernel.h>
79#include <sys/ktr.h>
80#include <sys/unistd.h>
81
82#include <machine/limits.h>
83
84#include <vm/vm.h>
85#include <vm/vm_param.h>
86#include <vm/pmap.h>
87#include <vm/vm_map.h>
88#include <vm/vm_page.h>
89#include <vm/vm_pageout.h>
90#include <vm/vm_object.h>
91#include <vm/vm_kern.h>
92#include <vm/vm_extern.h>
93#include <vm/vm_pager.h>
94
95#include <sys/user.h>
96
97extern int maxslp;
98
99/*
100 * System initialization
101 *
102 * Note: proc0 from proc.h
103 */
104static void vm_init_limits(void *);
105SYSINIT(vm_limits, SI_SUB_VM_CONF, SI_ORDER_FIRST, vm_init_limits, &proc0)
106
107/*
108 * THIS MUST BE THE LAST INITIALIZATION ITEM!!!
109 *
110 * Note: run scheduling should be divorced from the vm system.
111 */
112static void scheduler(void *);
113SYSINIT(scheduler, SI_SUB_RUN_SCHEDULER, SI_ORDER_FIRST, scheduler, NULL)
114
115#ifndef NO_SWAPPING
116static void swapout(struct proc *);
117static void vm_proc_swapin(struct proc *p);
118static void vm_proc_swapout(struct proc *p);
119#endif
120
121/*
122 * MPSAFE
123 */
124int
125kernacc(addr, len, rw)
126	caddr_t addr;
127	int len, rw;
128{
129	boolean_t rv;
130	vm_offset_t saddr, eaddr;
131	vm_prot_t prot;
132
133	KASSERT((rw & ~VM_PROT_ALL) == 0,
134	    ("illegal ``rw'' argument to kernacc (%x)\n", rw));
135	prot = rw;
136	saddr = trunc_page((vm_offset_t)addr);
137	eaddr = round_page((vm_offset_t)addr + len);
138	rv = vm_map_check_protection(kernel_map, saddr, eaddr, prot);
139	return (rv == TRUE);
140}
141
142/*
143 * MPSAFE
144 */
145int
146useracc(addr, len, rw)
147	caddr_t addr;
148	int len, rw;
149{
150	boolean_t rv;
151	vm_prot_t prot;
152	vm_map_t map;
153
154	KASSERT((rw & ~VM_PROT_ALL) == 0,
155	    ("illegal ``rw'' argument to useracc (%x)\n", rw));
156	prot = rw;
157	map = &curproc->p_vmspace->vm_map;
158	if ((vm_offset_t)addr + len > vm_map_max(map) ||
159	    (vm_offset_t)addr + len < (vm_offset_t)addr) {
160		return (FALSE);
161	}
162	rv = vm_map_check_protection(map, trunc_page((vm_offset_t)addr),
163	    round_page((vm_offset_t)addr + len), prot);
164	return (rv == TRUE);
165}
166
167/*
168 * MPSAFE
169 */
170void
171vslock(addr, len)
172	caddr_t addr;
173	u_int len;
174{
175
176	vm_map_wire(&curproc->p_vmspace->vm_map, trunc_page((vm_offset_t)addr),
177	    round_page((vm_offset_t)addr + len), FALSE);
178}
179
180/*
181 * MPSAFE
182 */
183void
184vsunlock(addr, len)
185	caddr_t addr;
186	u_int len;
187{
188
189	vm_map_unwire(&curproc->p_vmspace->vm_map,
190	    trunc_page((vm_offset_t)addr),
191	    round_page((vm_offset_t)addr + len), FALSE);
192}
193
194/*
195 * Create the U area for a new process.
196 * This routine directly affects the fork perf for a process.
197 */
198void
199vm_proc_new(struct proc *p)
200{
201	vm_page_t ma[UAREA_PAGES];
202	vm_object_t upobj;
203	vm_offset_t up;
204	vm_page_t m;
205	u_int i;
206
207	/*
208	 * Allocate object for the upage.
209	 */
210	upobj = vm_object_allocate(OBJT_DEFAULT, UAREA_PAGES);
211	p->p_upages_obj = upobj;
212
213	/*
214	 * Get a kernel virtual address for the U area for this process.
215	 */
216	up = kmem_alloc_nofault(kernel_map, UAREA_PAGES * PAGE_SIZE);
217	if (up == 0)
218		panic("vm_proc_new: upage allocation failed");
219	p->p_uarea = (struct user *)up;
220
221	for (i = 0; i < UAREA_PAGES; i++) {
222		/*
223		 * Get a uarea page.
224		 */
225		m = vm_page_grab(upobj, i,
226		    VM_ALLOC_NORMAL | VM_ALLOC_RETRY | VM_ALLOC_WIRED);
227		ma[i] = m;
228
229		vm_page_wakeup(m);
230		vm_page_flag_clear(m, PG_ZERO);
231		m->valid = VM_PAGE_BITS_ALL;
232	}
233
234	/*
235	 * Enter the pages into the kernel address space.
236	 */
237	pmap_qenter(up, ma, UAREA_PAGES);
238}
239
240/*
241 * Dispose the U area for a process that has exited.
242 * This routine directly impacts the exit perf of a process.
243 * XXX proc_zone is marked UMA_ZONE_NOFREE, so this should never be called.
244 */
245void
246vm_proc_dispose(struct proc *p)
247{
248	vm_object_t upobj;
249	vm_offset_t up;
250	vm_page_t m;
251
252	upobj = p->p_upages_obj;
253	if (upobj->resident_page_count != UAREA_PAGES)
254		panic("vm_proc_dispose: incorrect number of pages in upobj");
255	vm_page_lock_queues();
256	while ((m = TAILQ_FIRST(&upobj->memq)) != NULL) {
257		vm_page_busy(m);
258		vm_page_unwire(m, 0);
259		vm_page_free(m);
260	}
261	vm_page_unlock_queues();
262	up = (vm_offset_t)p->p_uarea;
263	pmap_qremove(up, UAREA_PAGES);
264	kmem_free(kernel_map, up, UAREA_PAGES * PAGE_SIZE);
265	vm_object_deallocate(upobj);
266}
267
268#ifndef NO_SWAPPING
269/*
270 * Allow the U area for a process to be prejudicially paged out.
271 */
272static void
273vm_proc_swapout(struct proc *p)
274{
275	vm_object_t upobj;
276	vm_offset_t up;
277	vm_page_t m;
278
279	upobj = p->p_upages_obj;
280	if (upobj->resident_page_count != UAREA_PAGES)
281		panic("vm_proc_dispose: incorrect number of pages in upobj");
282	vm_page_lock_queues();
283	TAILQ_FOREACH(m, &upobj->memq, listq) {
284		vm_page_dirty(m);
285		vm_page_unwire(m, 0);
286	}
287	vm_page_unlock_queues();
288	up = (vm_offset_t)p->p_uarea;
289	pmap_qremove(up, UAREA_PAGES);
290}
291
292/*
293 * Bring the U area for a specified process back in.
294 */
295static void
296vm_proc_swapin(struct proc *p)
297{
298	vm_page_t ma[UAREA_PAGES];
299	vm_object_t upobj;
300	vm_offset_t up;
301	vm_page_t m;
302	int rv;
303	int i;
304
305	upobj = p->p_upages_obj;
306	for (i = 0; i < UAREA_PAGES; i++) {
307		m = vm_page_grab(upobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
308		if (m->valid != VM_PAGE_BITS_ALL) {
309			rv = vm_pager_get_pages(upobj, &m, 1, 0);
310			if (rv != VM_PAGER_OK)
311				panic("vm_proc_swapin: cannot get upage");
312		}
313		ma[i] = m;
314	}
315	if (upobj->resident_page_count != UAREA_PAGES)
316		panic("vm_proc_swapin: lost pages from upobj");
317	vm_page_lock_queues();
318	TAILQ_FOREACH(m, &upobj->memq, listq) {
319		m->valid = VM_PAGE_BITS_ALL;
320		vm_page_wire(m);
321		vm_page_wakeup(m);
322	}
323	vm_page_unlock_queues();
324	up = (vm_offset_t)p->p_uarea;
325	pmap_qenter(up, ma, UAREA_PAGES);
326}
327#endif
328
329/*
330 * Implement fork's actions on an address space.
331 * Here we arrange for the address space to be copied or referenced,
332 * allocate a user struct (pcb and kernel stack), then call the
333 * machine-dependent layer to fill those in and make the new process
334 * ready to run.  The new process is set up so that it returns directly
335 * to user mode to avoid stack copying and relocation problems.
336 */
337void
338vm_forkproc(td, p2, td2, flags)
339	struct thread *td;
340	struct proc *p2;
341	struct thread *td2;
342	int flags;
343{
344	struct proc *p1 = td->td_proc;
345	struct user *up;
346
347	GIANT_REQUIRED;
348
349	if ((flags & RFPROC) == 0) {
350		/*
351		 * Divorce the memory, if it is shared, essentially
352		 * this changes shared memory amongst threads, into
353		 * COW locally.
354		 */
355		if ((flags & RFMEM) == 0) {
356			if (p1->p_vmspace->vm_refcnt > 1) {
357				vmspace_unshare(p1);
358			}
359		}
360		cpu_fork(td, p2, td2, flags);
361		return;
362	}
363
364	if (flags & RFMEM) {
365		p2->p_vmspace = p1->p_vmspace;
366		p1->p_vmspace->vm_refcnt++;
367	}
368
369	while (vm_page_count_severe()) {
370		VM_WAIT;
371	}
372
373	if ((flags & RFMEM) == 0) {
374		p2->p_vmspace = vmspace_fork(p1->p_vmspace);
375
376		pmap_pinit2(vmspace_pmap(p2->p_vmspace));
377
378		if (p1->p_vmspace->vm_shm)
379			shmfork(p1, p2);
380	}
381
382	/* XXXKSE this is unsatisfactory but should be adequate */
383	up = p2->p_uarea;
384
385	/*
386	 * p_stats currently points at fields in the user struct
387	 * but not at &u, instead at p_addr. Copy parts of
388	 * p_stats; zero the rest of p_stats (statistics).
389	 *
390	 * If procsig->ps_refcnt is 1 and p2->p_sigacts is NULL we dont' need
391	 * to share sigacts, so we use the up->u_sigacts.
392	 */
393	p2->p_stats = &up->u_stats;
394	if (p2->p_sigacts == NULL) {
395		if (p2->p_procsig->ps_refcnt != 1)
396			printf ("PID:%d NULL sigacts with refcnt not 1!\n",p2->p_pid);
397		p2->p_sigacts = &up->u_sigacts;
398		up->u_sigacts = *p1->p_sigacts;
399	}
400
401	bzero(&up->u_stats.pstat_startzero,
402	    (unsigned) ((caddr_t) &up->u_stats.pstat_endzero -
403		(caddr_t) &up->u_stats.pstat_startzero));
404	bcopy(&p1->p_stats->pstat_startcopy, &up->u_stats.pstat_startcopy,
405	    ((caddr_t) &up->u_stats.pstat_endcopy -
406		(caddr_t) &up->u_stats.pstat_startcopy));
407
408
409	/*
410	 * cpu_fork will copy and update the pcb, set up the kernel stack,
411	 * and make the child ready to run.
412	 */
413	cpu_fork(td, p2, td2, flags);
414}
415
416/*
417 * Called after process has been wait(2)'ed apon and is being reaped.
418 * The idea is to reclaim resources that we could not reclaim while
419 * the process was still executing.
420 */
421void
422vm_waitproc(p)
423	struct proc *p;
424{
425
426	GIANT_REQUIRED;
427	cpu_wait(p);
428	vmspace_exitfree(p);		/* and clean-out the vmspace */
429}
430
431/*
432 * Set default limits for VM system.
433 * Called for proc 0, and then inherited by all others.
434 *
435 * XXX should probably act directly on proc0.
436 */
437static void
438vm_init_limits(udata)
439	void *udata;
440{
441	struct proc *p = udata;
442	int rss_limit;
443
444	/*
445	 * Set up the initial limits on process VM. Set the maximum resident
446	 * set size to be half of (reasonably) available memory.  Since this
447	 * is a soft limit, it comes into effect only when the system is out
448	 * of memory - half of main memory helps to favor smaller processes,
449	 * and reduces thrashing of the object cache.
450	 */
451	p->p_rlimit[RLIMIT_STACK].rlim_cur = dflssiz;
452	p->p_rlimit[RLIMIT_STACK].rlim_max = maxssiz;
453	p->p_rlimit[RLIMIT_DATA].rlim_cur = dfldsiz;
454	p->p_rlimit[RLIMIT_DATA].rlim_max = maxdsiz;
455	/* limit the limit to no less than 2MB */
456	rss_limit = max(cnt.v_free_count, 512);
457	p->p_rlimit[RLIMIT_RSS].rlim_cur = ptoa(rss_limit);
458	p->p_rlimit[RLIMIT_RSS].rlim_max = RLIM_INFINITY;
459}
460
461void
462faultin(p)
463	struct proc *p;
464{
465
466	GIANT_REQUIRED;
467	PROC_LOCK_ASSERT(p, MA_OWNED);
468	mtx_assert(&sched_lock, MA_OWNED);
469#ifdef NO_SWAPPING
470	if ((p->p_sflag & PS_INMEM) == 0)
471		panic("faultin: proc swapped out with NO_SWAPPING!");
472#else
473	if ((p->p_sflag & PS_INMEM) == 0) {
474		struct thread *td;
475
476		++p->p_lock;
477		/*
478		 * If another process is swapping in this process,
479		 * just wait until it finishes.
480		 */
481		if (p->p_sflag & PS_SWAPPINGIN) {
482			mtx_unlock_spin(&sched_lock);
483			msleep(&p->p_sflag, &p->p_mtx, PVM, "faultin", 0);
484			mtx_lock_spin(&sched_lock);
485			--p->p_lock;
486			return;
487		}
488
489		p->p_sflag |= PS_SWAPPINGIN;
490		mtx_unlock_spin(&sched_lock);
491		PROC_UNLOCK(p);
492
493		vm_proc_swapin(p);
494		FOREACH_THREAD_IN_PROC (p, td) {
495			pmap_swapin_thread(td);
496			TD_CLR_SWAPPED(td);
497		}
498
499		PROC_LOCK(p);
500		mtx_lock_spin(&sched_lock);
501		p->p_sflag &= ~PS_SWAPPINGIN;
502		p->p_sflag |= PS_INMEM;
503		FOREACH_THREAD_IN_PROC (p, td)
504			if (TD_CAN_RUN(td))
505				setrunnable(td);
506
507		wakeup(&p->p_sflag);
508
509		/* undo the effect of setting SLOCK above */
510		--p->p_lock;
511	}
512#endif
513}
514
515/*
516 * This swapin algorithm attempts to swap-in processes only if there
517 * is enough space for them.  Of course, if a process waits for a long
518 * time, it will be swapped in anyway.
519 *
520 *  XXXKSE - process with the thread with highest priority counts..
521 *
522 * Giant is still held at this point, to be released in tsleep.
523 */
524/* ARGSUSED*/
525static void
526scheduler(dummy)
527	void *dummy;
528{
529	struct proc *p;
530	struct thread *td;
531	int pri;
532	struct proc *pp;
533	int ppri;
534
535	mtx_assert(&Giant, MA_OWNED | MA_NOTRECURSED);
536	/* GIANT_REQUIRED */
537
538loop:
539	if (vm_page_count_min()) {
540		VM_WAIT;
541		goto loop;
542	}
543
544	pp = NULL;
545	ppri = INT_MIN;
546	sx_slock(&allproc_lock);
547	FOREACH_PROC_IN_SYSTEM(p) {
548		struct ksegrp *kg;
549		if (p->p_sflag & (PS_INMEM | PS_SWAPPING | PS_SWAPPINGIN)) {
550			continue;
551		}
552		mtx_lock_spin(&sched_lock);
553		FOREACH_THREAD_IN_PROC(p, td) {
554			/*
555			 * An otherwise runnable thread of a process
556			 * swapped out has only the TDI_SWAPPED bit set.
557			 *
558			 */
559			if (td->td_inhibitors == TDI_SWAPPED) {
560				kg = td->td_ksegrp;
561				pri = p->p_swtime + kg->kg_slptime;
562				if ((p->p_sflag & PS_SWAPINREQ) == 0) {
563					pri -= kg->kg_nice * 8;
564				}
565
566				/*
567				 * if this ksegrp is higher priority
568				 * and there is enough space, then select
569				 * this process instead of the previous
570				 * selection.
571				 */
572				if (pri > ppri) {
573					pp = p;
574					ppri = pri;
575				}
576			}
577		}
578		mtx_unlock_spin(&sched_lock);
579	}
580	sx_sunlock(&allproc_lock);
581
582	/*
583	 * Nothing to do, back to sleep.
584	 */
585	if ((p = pp) == NULL) {
586		tsleep(&proc0, PVM, "sched", maxslp * hz / 2);
587		goto loop;
588	}
589	PROC_LOCK(p);
590	mtx_lock_spin(&sched_lock);
591
592	/*
593	 * Another process may be bringing or may have already
594	 * brought this process in while we traverse all threads.
595	 * Or, this process may even be being swapped out again.
596	 */
597	if (p->p_sflag & (PS_INMEM|PS_SWAPPING|PS_SWAPPINGIN)) {
598		mtx_unlock_spin(&sched_lock);
599		PROC_UNLOCK(p);
600		goto loop;
601	}
602
603	p->p_sflag &= ~PS_SWAPINREQ;
604
605	/*
606	 * We would like to bring someone in. (only if there is space).
607	 * [What checks the space? ]
608	 */
609	faultin(p);
610	PROC_UNLOCK(p);
611	p->p_swtime = 0;
612	mtx_unlock_spin(&sched_lock);
613	goto loop;
614}
615
616#ifndef NO_SWAPPING
617
618/*
619 * Swap_idle_threshold1 is the guaranteed swapped in time for a process
620 */
621static int swap_idle_threshold1 = 2;
622SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold1,
623	CTLFLAG_RW, &swap_idle_threshold1, 0, "");
624
625/*
626 * Swap_idle_threshold2 is the time that a process can be idle before
627 * it will be swapped out, if idle swapping is enabled.
628 */
629static int swap_idle_threshold2 = 10;
630SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold2,
631	CTLFLAG_RW, &swap_idle_threshold2, 0, "");
632
633/*
634 * Swapout is driven by the pageout daemon.  Very simple, we find eligible
635 * procs and unwire their u-areas.  We try to always "swap" at least one
636 * process in case we need the room for a swapin.
637 * If any procs have been sleeping/stopped for at least maxslp seconds,
638 * they are swapped.  Else, we swap the longest-sleeping or stopped process,
639 * if any, otherwise the longest-resident process.
640 */
641void
642swapout_procs(action)
643int action;
644{
645	struct proc *p;
646	struct thread *td;
647	struct ksegrp *kg;
648	struct proc *outp, *outp2;
649	int outpri, outpri2;
650	int didswap = 0;
651
652	GIANT_REQUIRED;
653
654	outp = outp2 = NULL;
655	outpri = outpri2 = INT_MIN;
656retry:
657	sx_slock(&allproc_lock);
658	FOREACH_PROC_IN_SYSTEM(p) {
659		struct vmspace *vm;
660		int minslptime = 100000;
661
662		/*
663		 * Do not swapout a process that
664		 * is waiting for VM data
665		 * structures there is a possible
666		 * deadlock.  Test this first as
667		 * this may block.
668		 *
669		 * Lock the map until swapout
670		 * finishes, or a thread of this
671		 * process may attempt to alter
672		 * the map.
673		 *
674		 * Watch out for a process in
675		 * creation.  It may have no
676		 * address space yet.
677		 *
678		 * An aio daemon switches its
679		 * address space while running.
680		 * Perform a quick check whether
681		 * a process has P_SYSTEM.
682		 */
683		PROC_LOCK(p);
684		if ((p->p_flag & P_SYSTEM) != 0) {
685			PROC_UNLOCK(p);
686			continue;
687		}
688		mtx_lock_spin(&sched_lock);
689		if (p->p_state == PRS_NEW) {
690			mtx_unlock_spin(&sched_lock);
691			PROC_UNLOCK(p);
692			continue;
693		}
694		vm = p->p_vmspace;
695		KASSERT(vm != NULL,
696			("swapout_procs: a process has no address space"));
697		++vm->vm_refcnt;
698		mtx_unlock_spin(&sched_lock);
699		PROC_UNLOCK(p);
700		if (!vm_map_trylock(&vm->vm_map))
701			goto nextproc1;
702
703		PROC_LOCK(p);
704		if (p->p_lock != 0 ||
705		    (p->p_flag & (P_STOPPED_SINGLE|P_TRACED|P_SYSTEM|P_WEXIT)
706		    ) != 0) {
707			goto nextproc2;
708		}
709		/*
710		 * only aiod changes vmspace, however it will be
711		 * skipped because of the if statement above checking
712		 * for P_SYSTEM
713		 */
714		mtx_lock_spin(&sched_lock);
715		if ((p->p_sflag & (PS_INMEM|PS_SWAPPING|PS_SWAPPINGIN)) != PS_INMEM)
716			goto nextproc;
717
718		switch (p->p_state) {
719		default:
720			/* Don't swap out processes in any sort
721			 * of 'special' state. */
722			goto nextproc;
723
724		case PRS_NORMAL:
725			/*
726			 * do not swapout a realtime process
727			 * Check all the thread groups..
728			 */
729			FOREACH_KSEGRP_IN_PROC(p, kg) {
730				if (PRI_IS_REALTIME(kg->kg_pri_class))
731					goto nextproc;
732
733				/*
734				 * Guarantee swap_idle_threshold1
735				 * time in memory.
736				 */
737				if (kg->kg_slptime < swap_idle_threshold1)
738					goto nextproc;
739
740				/*
741				 * Do not swapout a process if it is
742				 * waiting on a critical event of some
743				 * kind or there is a thread whose
744				 * pageable memory may be accessed.
745				 *
746				 * This could be refined to support
747				 * swapping out a thread.
748				 */
749				FOREACH_THREAD_IN_GROUP(kg, td) {
750					if ((td->td_priority) < PSOCK ||
751					    !thread_safetoswapout(td))
752						goto nextproc;
753				}
754				/*
755				 * If the system is under memory stress,
756				 * or if we are swapping
757				 * idle processes >= swap_idle_threshold2,
758				 * then swap the process out.
759				 */
760				if (((action & VM_SWAP_NORMAL) == 0) &&
761				    (((action & VM_SWAP_IDLE) == 0) ||
762				    (kg->kg_slptime < swap_idle_threshold2)))
763					goto nextproc;
764
765				if (minslptime > kg->kg_slptime)
766					minslptime = kg->kg_slptime;
767			}
768
769			/*
770			 * If the process has been asleep for awhile and had
771			 * most of its pages taken away already, swap it out.
772			 */
773			if ((action & VM_SWAP_NORMAL) ||
774				((action & VM_SWAP_IDLE) &&
775				 (minslptime > swap_idle_threshold2))) {
776				swapout(p);
777				didswap++;
778
779				/*
780				 * swapout() unlocks a proc lock. This is
781				 * ugly, but avoids superfluous lock.
782				 */
783				mtx_unlock_spin(&sched_lock);
784				vm_map_unlock(&vm->vm_map);
785				vmspace_free(vm);
786				sx_sunlock(&allproc_lock);
787				goto retry;
788			}
789		}
790nextproc:
791		mtx_unlock_spin(&sched_lock);
792nextproc2:
793		PROC_UNLOCK(p);
794		vm_map_unlock(&vm->vm_map);
795nextproc1:
796		vmspace_free(vm);
797		continue;
798	}
799	sx_sunlock(&allproc_lock);
800	/*
801	 * If we swapped something out, and another process needed memory,
802	 * then wakeup the sched process.
803	 */
804	if (didswap)
805		wakeup(&proc0);
806}
807
808static void
809swapout(p)
810	struct proc *p;
811{
812	struct thread *td;
813
814	PROC_LOCK_ASSERT(p, MA_OWNED);
815	mtx_assert(&sched_lock, MA_OWNED | MA_NOTRECURSED);
816#if defined(SWAP_DEBUG)
817	printf("swapping out %d\n", p->p_pid);
818#endif
819
820	/*
821	 * The states of this process and its threads may have changed
822	 * by now.  Assuming that there is only one pageout daemon thread,
823	 * this process should still be in memory.
824	 */
825	KASSERT((p->p_sflag & (PS_INMEM|PS_SWAPPING|PS_SWAPPINGIN)) == PS_INMEM,
826		("swapout: lost a swapout race?"));
827
828#if defined(INVARIANTS)
829	/*
830	 * Make sure that all threads are safe to be swapped out.
831	 *
832	 * Alternatively, we could swap out only safe threads.
833	 */
834	FOREACH_THREAD_IN_PROC(p, td) {
835		KASSERT(thread_safetoswapout(td),
836			("swapout: there is a thread not safe for swapout"));
837	}
838#endif /* INVARIANTS */
839
840	++p->p_stats->p_ru.ru_nswap;
841	/*
842	 * remember the process resident count
843	 */
844	p->p_vmspace->vm_swrss = vmspace_resident_count(p->p_vmspace);
845
846	PROC_UNLOCK(p);
847	FOREACH_THREAD_IN_PROC (p, td) /* shouldn't be possible, but..... */
848		if (TD_ON_RUNQ(td)) {	/* XXXKSE */
849			panic("swapping out runnable process");
850			remrunqueue(td);	/* XXXKSE */
851		}
852	p->p_sflag &= ~PS_INMEM;
853	p->p_sflag |= PS_SWAPPING;
854	mtx_unlock_spin(&sched_lock);
855
856	vm_proc_swapout(p);
857	FOREACH_THREAD_IN_PROC(p, td) {
858		pmap_swapout_thread(td);
859		TD_SET_SWAPPED(td);
860	}
861	mtx_lock_spin(&sched_lock);
862	p->p_sflag &= ~PS_SWAPPING;
863	p->p_swtime = 0;
864}
865#endif /* !NO_SWAPPING */
866