vm_glue.c revision 99985
1/*
2 * Copyright (c) 1991, 1993
3 *	The Regents of the University of California.  All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * The Mach Operating System project at Carnegie-Mellon University.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 *    must display the following acknowledgement:
18 *	This product includes software developed by the University of
19 *	California, Berkeley and its contributors.
20 * 4. Neither the name of the University nor the names of its contributors
21 *    may be used to endorse or promote products derived from this software
22 *    without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 *	from: @(#)vm_glue.c	8.6 (Berkeley) 1/5/94
37 *
38 *
39 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
40 * All rights reserved.
41 *
42 * Permission to use, copy, modify and distribute this software and
43 * its documentation is hereby granted, provided that both the copyright
44 * notice and this permission notice appear in all copies of the
45 * software, derivative works or modified versions, and any portions
46 * thereof, and that both notices appear in supporting documentation.
47 *
48 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
49 * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
50 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
51 *
52 * Carnegie Mellon requests users of this software to return to
53 *
54 *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
55 *  School of Computer Science
56 *  Carnegie Mellon University
57 *  Pittsburgh PA 15213-3890
58 *
59 * any improvements or extensions that they make and grant Carnegie the
60 * rights to redistribute these changes.
61 *
62 * $FreeBSD: head/sys/vm/vm_glue.c 99985 2002-07-14 19:36:15Z alc $
63 */
64
65#include "opt_vm.h"
66
67#include <sys/param.h>
68#include <sys/systm.h>
69#include <sys/lock.h>
70#include <sys/mutex.h>
71#include <sys/proc.h>
72#include <sys/resourcevar.h>
73#include <sys/shm.h>
74#include <sys/vmmeter.h>
75#include <sys/sx.h>
76#include <sys/sysctl.h>
77
78#include <sys/kernel.h>
79#include <sys/ktr.h>
80#include <sys/unistd.h>
81
82#include <machine/limits.h>
83
84#include <vm/vm.h>
85#include <vm/vm_param.h>
86#include <vm/pmap.h>
87#include <vm/vm_map.h>
88#include <vm/vm_page.h>
89#include <vm/vm_pageout.h>
90#include <vm/vm_object.h>
91#include <vm/vm_kern.h>
92#include <vm/vm_extern.h>
93#include <vm/vm_pager.h>
94
95#include <sys/user.h>
96
97extern int maxslp;
98
99/*
100 * System initialization
101 *
102 * Note: proc0 from proc.h
103 */
104static void vm_init_limits(void *);
105SYSINIT(vm_limits, SI_SUB_VM_CONF, SI_ORDER_FIRST, vm_init_limits, &proc0)
106
107/*
108 * THIS MUST BE THE LAST INITIALIZATION ITEM!!!
109 *
110 * Note: run scheduling should be divorced from the vm system.
111 */
112static void scheduler(void *);
113SYSINIT(scheduler, SI_SUB_RUN_SCHEDULER, SI_ORDER_FIRST, scheduler, NULL)
114
115#ifndef NO_SWAPPING
116static void swapout(struct proc *);
117static void vm_proc_swapin(struct proc *p);
118static void vm_proc_swapout(struct proc *p);
119#endif
120
121/*
122 * MPSAFE
123 */
124int
125kernacc(addr, len, rw)
126	caddr_t addr;
127	int len, rw;
128{
129	boolean_t rv;
130	vm_offset_t saddr, eaddr;
131	vm_prot_t prot;
132
133	KASSERT((rw & ~VM_PROT_ALL) == 0,
134	    ("illegal ``rw'' argument to kernacc (%x)\n", rw));
135	prot = rw;
136	saddr = trunc_page((vm_offset_t)addr);
137	eaddr = round_page((vm_offset_t)addr + len);
138	rv = vm_map_check_protection(kernel_map, saddr, eaddr, prot);
139	return (rv == TRUE);
140}
141
142/*
143 * MPSAFE
144 */
145int
146useracc(addr, len, rw)
147	caddr_t addr;
148	int len, rw;
149{
150	boolean_t rv;
151	vm_prot_t prot;
152
153	KASSERT((rw & ~VM_PROT_ALL) == 0,
154	    ("illegal ``rw'' argument to useracc (%x)\n", rw));
155	prot = rw;
156	/*
157	 * XXX - check separately to disallow access to user area and user
158	 * page tables - they are in the map.
159	 *
160	 * XXX - VM_MAXUSER_ADDRESS is an end address, not a max.  It was once
161	 * only used (as an end address) in trap.c.  Use it as an end address
162	 * here too.  This bogusness has spread.  I just fixed where it was
163	 * used as a max in vm_mmap.c.
164	 */
165	if ((vm_offset_t) addr + len > /* XXX */ VM_MAXUSER_ADDRESS
166	    || (vm_offset_t) addr + len < (vm_offset_t) addr) {
167		return (FALSE);
168	}
169	rv = vm_map_check_protection(&curproc->p_vmspace->vm_map,
170	    trunc_page((vm_offset_t)addr), round_page((vm_offset_t)addr + len),
171	    prot);
172	return (rv == TRUE);
173}
174
175/*
176 * MPSAFE
177 */
178void
179vslock(addr, len)
180	caddr_t addr;
181	u_int len;
182{
183
184	vm_map_wire(&curproc->p_vmspace->vm_map, trunc_page((vm_offset_t)addr),
185	    round_page((vm_offset_t)addr + len), FALSE);
186}
187
188/*
189 * MPSAFE
190 */
191void
192vsunlock(addr, len)
193	caddr_t addr;
194	u_int len;
195{
196
197	vm_map_unwire(&curproc->p_vmspace->vm_map,
198	    trunc_page((vm_offset_t)addr),
199	    round_page((vm_offset_t)addr + len), FALSE);
200}
201
202/*
203 * Create the U area for a new process.
204 * This routine directly affects the fork perf for a process.
205 */
206void
207vm_proc_new(struct proc *p)
208{
209	vm_page_t ma[UAREA_PAGES];
210	vm_object_t upobj;
211	vm_offset_t up;
212	vm_page_t m;
213	u_int i;
214
215	/*
216	 * Allocate object for the upage.
217	 */
218	upobj = vm_object_allocate(OBJT_DEFAULT, UAREA_PAGES);
219	p->p_upages_obj = upobj;
220
221	/*
222	 * Get a kernel virtual address for the U area for this process.
223	 */
224	up = kmem_alloc_nofault(kernel_map, UAREA_PAGES * PAGE_SIZE);
225	if (up == 0)
226		panic("vm_proc_new: upage allocation failed");
227	p->p_uarea = (struct user *)up;
228
229	for (i = 0; i < UAREA_PAGES; i++) {
230		/*
231		 * Get a uarea page.
232		 */
233		m = vm_page_grab(upobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
234		ma[i] = m;
235
236		/*
237		 * Wire the page.
238		 */
239		m->wire_count++;
240		cnt.v_wire_count++;
241
242		vm_page_wakeup(m);
243		vm_page_flag_clear(m, PG_ZERO);
244		vm_page_flag_set(m, PG_MAPPED | PG_WRITEABLE);
245		m->valid = VM_PAGE_BITS_ALL;
246	}
247
248	/*
249	 * Enter the pages into the kernel address space.
250	 */
251	pmap_qenter(up, ma, UAREA_PAGES);
252}
253
254/*
255 * Dispose the U area for a process that has exited.
256 * This routine directly impacts the exit perf of a process.
257 * XXX proc_zone is marked UMA_ZONE_NOFREE, so this should never be called.
258 */
259void
260vm_proc_dispose(struct proc *p)
261{
262	vm_object_t upobj;
263	vm_offset_t up;
264	vm_page_t m;
265
266	upobj = p->p_upages_obj;
267	if (upobj->resident_page_count != UAREA_PAGES)
268		panic("vm_proc_dispose: incorrect number of pages in upobj");
269	vm_page_lock_queues();
270	while ((m = TAILQ_FIRST(&upobj->memq)) != NULL) {
271		vm_page_busy(m);
272		vm_page_unwire(m, 0);
273		vm_page_free(m);
274	}
275	vm_page_unlock_queues();
276	up = (vm_offset_t)p->p_uarea;
277	pmap_qremove(up, UAREA_PAGES);
278	kmem_free(kernel_map, up, UAREA_PAGES * PAGE_SIZE);
279	vm_object_deallocate(upobj);
280}
281
282#ifndef NO_SWAPPING
283/*
284 * Allow the U area for a process to be prejudicially paged out.
285 */
286void
287vm_proc_swapout(struct proc *p)
288{
289	vm_object_t upobj;
290	vm_offset_t up;
291	vm_page_t m;
292
293	upobj = p->p_upages_obj;
294	if (upobj->resident_page_count != UAREA_PAGES)
295		panic("vm_proc_dispose: incorrect number of pages in upobj");
296	vm_page_lock_queues();
297	TAILQ_FOREACH(m, &upobj->memq, listq) {
298		vm_page_dirty(m);
299		vm_page_unwire(m, 0);
300	}
301	vm_page_unlock_queues();
302	up = (vm_offset_t)p->p_uarea;
303	pmap_qremove(up, UAREA_PAGES);
304}
305
306/*
307 * Bring the U area for a specified process back in.
308 */
309void
310vm_proc_swapin(struct proc *p)
311{
312	vm_page_t ma[UAREA_PAGES];
313	vm_object_t upobj;
314	vm_offset_t up;
315	vm_page_t m;
316	int rv;
317	int i;
318
319	upobj = p->p_upages_obj;
320	for (i = 0; i < UAREA_PAGES; i++) {
321		m = vm_page_grab(upobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
322		if (m->valid != VM_PAGE_BITS_ALL) {
323			rv = vm_pager_get_pages(upobj, &m, 1, 0);
324			if (rv != VM_PAGER_OK)
325				panic("vm_proc_swapin: cannot get upage");
326		}
327		ma[i] = m;
328	}
329	if (upobj->resident_page_count != UAREA_PAGES)
330		panic("vm_proc_swapin: lost pages from upobj");
331	vm_page_lock_queues();
332	TAILQ_FOREACH(m, &upobj->memq, listq) {
333		m->valid = VM_PAGE_BITS_ALL;
334		vm_page_wire(m);
335		vm_page_wakeup(m);
336		vm_page_flag_set(m, PG_MAPPED | PG_WRITEABLE);
337	}
338	vm_page_unlock_queues();
339	up = (vm_offset_t)p->p_uarea;
340	pmap_qenter(up, ma, UAREA_PAGES);
341}
342#endif
343
344/*
345 * Implement fork's actions on an address space.
346 * Here we arrange for the address space to be copied or referenced,
347 * allocate a user struct (pcb and kernel stack), then call the
348 * machine-dependent layer to fill those in and make the new process
349 * ready to run.  The new process is set up so that it returns directly
350 * to user mode to avoid stack copying and relocation problems.
351 */
352void
353vm_forkproc(td, p2, td2, flags)
354	struct thread *td;
355	struct proc *p2;
356	struct thread *td2;
357	int flags;
358{
359	struct proc *p1 = td->td_proc;
360	struct user *up;
361
362	GIANT_REQUIRED;
363
364	if ((flags & RFPROC) == 0) {
365		/*
366		 * Divorce the memory, if it is shared, essentially
367		 * this changes shared memory amongst threads, into
368		 * COW locally.
369		 */
370		if ((flags & RFMEM) == 0) {
371			if (p1->p_vmspace->vm_refcnt > 1) {
372				vmspace_unshare(p1);
373			}
374		}
375		cpu_fork(td, p2, td2, flags);
376		return;
377	}
378
379	if (flags & RFMEM) {
380		p2->p_vmspace = p1->p_vmspace;
381		p1->p_vmspace->vm_refcnt++;
382	}
383
384	while (vm_page_count_severe()) {
385		VM_WAIT;
386	}
387
388	if ((flags & RFMEM) == 0) {
389		p2->p_vmspace = vmspace_fork(p1->p_vmspace);
390
391		pmap_pinit2(vmspace_pmap(p2->p_vmspace));
392
393		if (p1->p_vmspace->vm_shm)
394			shmfork(p1, p2);
395	}
396
397	/* XXXKSE this is unsatisfactory but should be adequate */
398	up = p2->p_uarea;
399
400	/*
401	 * p_stats currently points at fields in the user struct
402	 * but not at &u, instead at p_addr. Copy parts of
403	 * p_stats; zero the rest of p_stats (statistics).
404	 *
405	 * If procsig->ps_refcnt is 1 and p2->p_sigacts is NULL we dont' need
406	 * to share sigacts, so we use the up->u_sigacts.
407	 */
408	p2->p_stats = &up->u_stats;
409	if (p2->p_sigacts == NULL) {
410		if (p2->p_procsig->ps_refcnt != 1)
411			printf ("PID:%d NULL sigacts with refcnt not 1!\n",p2->p_pid);
412		p2->p_sigacts = &up->u_sigacts;
413		up->u_sigacts = *p1->p_sigacts;
414	}
415
416	bzero(&up->u_stats.pstat_startzero,
417	    (unsigned) ((caddr_t) &up->u_stats.pstat_endzero -
418		(caddr_t) &up->u_stats.pstat_startzero));
419	bcopy(&p1->p_stats->pstat_startcopy, &up->u_stats.pstat_startcopy,
420	    ((caddr_t) &up->u_stats.pstat_endcopy -
421		(caddr_t) &up->u_stats.pstat_startcopy));
422
423
424	/*
425	 * cpu_fork will copy and update the pcb, set up the kernel stack,
426	 * and make the child ready to run.
427	 */
428	cpu_fork(td, p2, td2, flags);
429}
430
431/*
432 * Called after process has been wait(2)'ed apon and is being reaped.
433 * The idea is to reclaim resources that we could not reclaim while
434 * the process was still executing.
435 */
436void
437vm_waitproc(p)
438	struct proc *p;
439{
440	struct thread *td;
441
442	GIANT_REQUIRED;
443	cpu_wait(p);
444/* XXXKSE by here there should not be any threads left! */
445	FOREACH_THREAD_IN_PROC(p, td) {
446		panic("vm_waitproc: Survivor thread!");
447	}
448	vmspace_exitfree(p);		/* and clean-out the vmspace */
449}
450
451/*
452 * Set default limits for VM system.
453 * Called for proc 0, and then inherited by all others.
454 *
455 * XXX should probably act directly on proc0.
456 */
457static void
458vm_init_limits(udata)
459	void *udata;
460{
461	struct proc *p = udata;
462	int rss_limit;
463
464	/*
465	 * Set up the initial limits on process VM. Set the maximum resident
466	 * set size to be half of (reasonably) available memory.  Since this
467	 * is a soft limit, it comes into effect only when the system is out
468	 * of memory - half of main memory helps to favor smaller processes,
469	 * and reduces thrashing of the object cache.
470	 */
471	p->p_rlimit[RLIMIT_STACK].rlim_cur = dflssiz;
472	p->p_rlimit[RLIMIT_STACK].rlim_max = maxssiz;
473	p->p_rlimit[RLIMIT_DATA].rlim_cur = dfldsiz;
474	p->p_rlimit[RLIMIT_DATA].rlim_max = maxdsiz;
475	/* limit the limit to no less than 2MB */
476	rss_limit = max(cnt.v_free_count, 512);
477	p->p_rlimit[RLIMIT_RSS].rlim_cur = ptoa(rss_limit);
478	p->p_rlimit[RLIMIT_RSS].rlim_max = RLIM_INFINITY;
479}
480
481void
482faultin(p)
483	struct proc *p;
484{
485
486	GIANT_REQUIRED;
487	PROC_LOCK_ASSERT(p, MA_OWNED);
488	mtx_lock_spin(&sched_lock);
489#ifdef NO_SWAPPING
490	if ((p->p_sflag & PS_INMEM) == 0)
491		panic("faultin: proc swapped out with NO_SWAPPING!");
492#else
493	if ((p->p_sflag & PS_INMEM) == 0) {
494		struct thread *td;
495
496		++p->p_lock;
497		mtx_unlock_spin(&sched_lock);
498		PROC_UNLOCK(p);
499
500		vm_proc_swapin(p);
501		FOREACH_THREAD_IN_PROC (p, td)
502			pmap_swapin_thread(td);
503
504		PROC_LOCK(p);
505		mtx_lock_spin(&sched_lock);
506		FOREACH_THREAD_IN_PROC (p, td)
507			if (td->td_state == TDS_RUNQ)	/* XXXKSE */
508				setrunqueue(td);
509
510		p->p_sflag |= PS_INMEM;
511
512		/* undo the effect of setting SLOCK above */
513		--p->p_lock;
514	}
515#endif
516	mtx_unlock_spin(&sched_lock);
517}
518
519/*
520 * This swapin algorithm attempts to swap-in processes only if there
521 * is enough space for them.  Of course, if a process waits for a long
522 * time, it will be swapped in anyway.
523 *
524 *  XXXKSE - process with the thread with highest priority counts..
525 *
526 * Giant is still held at this point, to be released in tsleep.
527 */
528/* ARGSUSED*/
529static void
530scheduler(dummy)
531	void *dummy;
532{
533	struct proc *p;
534	struct thread *td;
535	int pri;
536	struct proc *pp;
537	int ppri;
538
539	mtx_assert(&Giant, MA_OWNED | MA_NOTRECURSED);
540	/* GIANT_REQUIRED */
541
542loop:
543	if (vm_page_count_min()) {
544		VM_WAIT;
545		goto loop;
546	}
547
548	pp = NULL;
549	ppri = INT_MIN;
550	sx_slock(&allproc_lock);
551	FOREACH_PROC_IN_SYSTEM(p) {
552		struct ksegrp *kg;
553		if (p->p_sflag & (PS_INMEM | PS_SWAPPING)) {
554			continue;
555		}
556		mtx_lock_spin(&sched_lock);
557		FOREACH_THREAD_IN_PROC(p, td) {
558			/* Only consider runnable threads */
559			if (td->td_state == TDS_RUNQ) {
560				kg = td->td_ksegrp;
561				pri = p->p_swtime + kg->kg_slptime;
562				if ((p->p_sflag & PS_SWAPINREQ) == 0) {
563					pri -= kg->kg_nice * 8;
564				}
565
566				/*
567				 * if this ksegrp is higher priority
568				 * and there is enough space, then select
569				 * this process instead of the previous
570				 * selection.
571				 */
572				if (pri > ppri) {
573					pp = p;
574					ppri = pri;
575				}
576			}
577		}
578		mtx_unlock_spin(&sched_lock);
579	}
580	sx_sunlock(&allproc_lock);
581
582	/*
583	 * Nothing to do, back to sleep.
584	 */
585	if ((p = pp) == NULL) {
586		tsleep(&proc0, PVM, "sched", maxslp * hz / 2);
587		goto loop;
588	}
589	mtx_lock_spin(&sched_lock);
590	p->p_sflag &= ~PS_SWAPINREQ;
591	mtx_unlock_spin(&sched_lock);
592
593	/*
594	 * We would like to bring someone in. (only if there is space).
595	 * [What checks the space? ]
596	 */
597	PROC_LOCK(p);
598	faultin(p);
599	PROC_UNLOCK(p);
600	mtx_lock_spin(&sched_lock);
601	p->p_swtime = 0;
602	mtx_unlock_spin(&sched_lock);
603	goto loop;
604}
605
606#ifndef NO_SWAPPING
607
608/*
609 * Swap_idle_threshold1 is the guaranteed swapped in time for a process
610 */
611static int swap_idle_threshold1 = 2;
612SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold1,
613	CTLFLAG_RW, &swap_idle_threshold1, 0, "");
614
615/*
616 * Swap_idle_threshold2 is the time that a process can be idle before
617 * it will be swapped out, if idle swapping is enabled.
618 */
619static int swap_idle_threshold2 = 10;
620SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold2,
621	CTLFLAG_RW, &swap_idle_threshold2, 0, "");
622
623/*
624 * Swapout is driven by the pageout daemon.  Very simple, we find eligible
625 * procs and unwire their u-areas.  We try to always "swap" at least one
626 * process in case we need the room for a swapin.
627 * If any procs have been sleeping/stopped for at least maxslp seconds,
628 * they are swapped.  Else, we swap the longest-sleeping or stopped process,
629 * if any, otherwise the longest-resident process.
630 */
631void
632swapout_procs(action)
633int action;
634{
635	struct proc *p;
636	struct thread *td;
637	struct ksegrp *kg;
638	struct proc *outp, *outp2;
639	int outpri, outpri2;
640	int didswap = 0;
641
642	GIANT_REQUIRED;
643
644	outp = outp2 = NULL;
645	outpri = outpri2 = INT_MIN;
646retry:
647	sx_slock(&allproc_lock);
648	FOREACH_PROC_IN_SYSTEM(p) {
649		struct vmspace *vm;
650		int minslptime = 100000;
651
652		PROC_LOCK(p);
653		if (p->p_lock != 0 ||
654		    (p->p_flag & (P_STOPPED_SNGL|P_TRACED|P_SYSTEM|P_WEXIT)) != 0) {
655			PROC_UNLOCK(p);
656			continue;
657		}
658		/*
659		 * only aiod changes vmspace, however it will be
660		 * skipped because of the if statement above checking
661		 * for P_SYSTEM
662		 */
663		vm = p->p_vmspace;
664		mtx_lock_spin(&sched_lock);
665		if ((p->p_sflag & (PS_INMEM|PS_SWAPPING)) != PS_INMEM) {
666			mtx_unlock_spin(&sched_lock);
667			PROC_UNLOCK(p);
668			continue;
669		}
670
671		switch (p->p_state) {
672		default:
673			/* Don't swap out processes in any sort
674			 * of 'special' state. */
675			mtx_unlock_spin(&sched_lock);
676			PROC_UNLOCK(p);
677			continue;
678
679		case PRS_NORMAL:
680			/*
681			 * do not swapout a realtime process
682			 * Check all the thread groups..
683			 */
684			FOREACH_KSEGRP_IN_PROC(p, kg) {
685				if (PRI_IS_REALTIME(kg->kg_pri_class)) {
686					mtx_unlock_spin(&sched_lock);
687					PROC_UNLOCK(p);
688					goto nextproc;
689				}
690
691				/*
692				 * Do not swapout a process waiting
693				 * on a critical event of some kind.
694				 * Also guarantee swap_idle_threshold1
695				 * time in memory.
696				 */
697				if (kg->kg_slptime < swap_idle_threshold1) {
698					mtx_unlock_spin(&sched_lock);
699					PROC_UNLOCK(p);
700					goto nextproc;
701				}
702				FOREACH_THREAD_IN_PROC(p, td) {
703					if ((td->td_priority) < PSOCK) {
704						mtx_unlock_spin(&sched_lock);
705						PROC_UNLOCK(p);
706						goto nextproc;
707					}
708				}
709				/*
710				 * If the system is under memory stress,
711				 * or if we are swapping
712				 * idle processes >= swap_idle_threshold2,
713				 * then swap the process out.
714				 */
715				if (((action & VM_SWAP_NORMAL) == 0) &&
716				    (((action & VM_SWAP_IDLE) == 0) ||
717				    (kg->kg_slptime < swap_idle_threshold2))) {
718					mtx_unlock_spin(&sched_lock);
719					PROC_UNLOCK(p);
720					goto nextproc;
721				}
722				if (minslptime > kg->kg_slptime)
723					minslptime = kg->kg_slptime;
724			}
725
726			mtx_unlock_spin(&sched_lock);
727			++vm->vm_refcnt;
728			/*
729			 * do not swapout a process that
730			 * is waiting for VM
731			 * data structures there is a
732			 * possible deadlock.
733			 */
734			if (!vm_map_trylock(&vm->vm_map)) {
735				vmspace_free(vm);
736				PROC_UNLOCK(p);
737				goto nextproc;
738			}
739			vm_map_unlock(&vm->vm_map);
740			/*
741			 * If the process has been asleep for awhile and had
742			 * most of its pages taken away already, swap it out.
743			 */
744			if ((action & VM_SWAP_NORMAL) ||
745				((action & VM_SWAP_IDLE) &&
746				 (minslptime > swap_idle_threshold2))) {
747				sx_sunlock(&allproc_lock);
748				swapout(p);
749				vmspace_free(vm);
750				didswap++;
751				goto retry;
752			}
753			PROC_UNLOCK(p);
754			vmspace_free(vm);
755		}
756nextproc:
757		continue;
758	}
759	sx_sunlock(&allproc_lock);
760	/*
761	 * If we swapped something out, and another process needed memory,
762	 * then wakeup the sched process.
763	 */
764	if (didswap)
765		wakeup(&proc0);
766}
767
768static void
769swapout(p)
770	struct proc *p;
771{
772	struct thread *td;
773
774	PROC_LOCK_ASSERT(p, MA_OWNED);
775#if defined(SWAP_DEBUG)
776	printf("swapping out %d\n", p->p_pid);
777#endif
778	++p->p_stats->p_ru.ru_nswap;
779	/*
780	 * remember the process resident count
781	 */
782	p->p_vmspace->vm_swrss = vmspace_resident_count(p->p_vmspace);
783
784	mtx_lock_spin(&sched_lock);
785	p->p_sflag &= ~PS_INMEM;
786	p->p_sflag |= PS_SWAPPING;
787	PROC_UNLOCK(p);
788	FOREACH_THREAD_IN_PROC (p, td)
789		if (td->td_state == TDS_RUNQ)	/* XXXKSE */
790			remrunqueue(td);	/* XXXKSE */
791	mtx_unlock_spin(&sched_lock);
792
793	vm_proc_swapout(p);
794	FOREACH_THREAD_IN_PROC(p, td)
795		pmap_swapout_thread(td);
796	mtx_lock_spin(&sched_lock);
797	p->p_sflag &= ~PS_SWAPPING;
798	p->p_swtime = 0;
799	mtx_unlock_spin(&sched_lock);
800}
801#endif /* !NO_SWAPPING */
802