vm_pageout.c revision 45960
1/*
2 * Copyright (c) 1991 Regents of the University of California.
3 * All rights reserved.
4 * Copyright (c) 1994 John S. Dyson
5 * All rights reserved.
6 * Copyright (c) 1994 David Greenman
7 * All rights reserved.
8 *
9 * This code is derived from software contributed to Berkeley by
10 * The Mach Operating System project at Carnegie-Mellon University.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 *    notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 *    notice, this list of conditions and the following disclaimer in the
19 *    documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 *    must display the following acknowledgement:
22 *	This product includes software developed by the University of
23 *	California, Berkeley and its contributors.
24 * 4. Neither the name of the University nor the names of its contributors
25 *    may be used to endorse or promote products derived from this software
26 *    without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * SUCH DAMAGE.
39 *
40 *	from: @(#)vm_pageout.c	7.4 (Berkeley) 5/7/91
41 *
42 *
43 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
44 * All rights reserved.
45 *
46 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
47 *
48 * Permission to use, copy, modify and distribute this software and
49 * its documentation is hereby granted, provided that both the copyright
50 * notice and this permission notice appear in all copies of the
51 * software, derivative works or modified versions, and any portions
52 * thereof, and that both notices appear in supporting documentation.
53 *
54 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
55 * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
56 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
57 *
58 * Carnegie Mellon requests users of this software to return to
59 *
60 *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
61 *  School of Computer Science
62 *  Carnegie Mellon University
63 *  Pittsburgh PA 15213-3890
64 *
65 * any improvements or extensions that they make and grant Carnegie the
66 * rights to redistribute these changes.
67 *
68 * $Id: vm_pageout.c,v 1.140 1999/04/06 03:14:56 peter Exp $
69 */
70
71/*
72 *	The proverbial page-out daemon.
73 */
74
75#include "opt_vm.h"
76#include <sys/param.h>
77#include <sys/systm.h>
78#include <sys/kernel.h>
79#include <sys/proc.h>
80#include <sys/resourcevar.h>
81#include <sys/signalvar.h>
82#include <sys/vnode.h>
83#include <sys/vmmeter.h>
84#include <sys/sysctl.h>
85
86#include <vm/vm.h>
87#include <vm/vm_param.h>
88#include <vm/vm_prot.h>
89#include <sys/lock.h>
90#include <vm/vm_object.h>
91#include <vm/vm_page.h>
92#include <vm/vm_map.h>
93#include <vm/vm_pageout.h>
94#include <vm/vm_pager.h>
95#include <vm/swap_pager.h>
96#include <vm/vm_extern.h>
97
98/*
99 * System initialization
100 */
101
102/* the kernel process "vm_pageout"*/
103static void vm_pageout __P((void));
104static int vm_pageout_clean __P((vm_page_t));
105static int vm_pageout_scan __P((void));
106static int vm_pageout_free_page_calc __P((vm_size_t count));
107struct proc *pageproc;
108
109static struct kproc_desc page_kp = {
110	"pagedaemon",
111	vm_pageout,
112	&pageproc
113};
114SYSINIT_KT(pagedaemon, SI_SUB_KTHREAD_PAGE, SI_ORDER_FIRST, kproc_start, &page_kp)
115
116#if !defined(NO_SWAPPING)
117/* the kernel process "vm_daemon"*/
118static void vm_daemon __P((void));
119static struct	proc *vmproc;
120
121static struct kproc_desc vm_kp = {
122	"vmdaemon",
123	vm_daemon,
124	&vmproc
125};
126SYSINIT_KT(vmdaemon, SI_SUB_KTHREAD_VM, SI_ORDER_FIRST, kproc_start, &vm_kp)
127#endif
128
129
130int vm_pages_needed=0;		/* Event on which pageout daemon sleeps */
131int vm_pageout_deficit=0;	/* Estimated number of pages deficit */
132int vm_pageout_pages_needed=0;	/* flag saying that the pageout daemon needs pages */
133
134extern int npendingio;
135#if !defined(NO_SWAPPING)
136static int vm_pageout_req_swapout;	/* XXX */
137static int vm_daemon_needed;
138#endif
139extern int nswiodone;
140extern int vm_swap_size;
141extern int vfs_update_wakeup;
142static int vm_pageout_stats_max=0, vm_pageout_stats_interval = 0;
143static int vm_pageout_full_stats_interval = 0;
144static int vm_pageout_stats_free_max=0, vm_pageout_algorithm_lru=0;
145static int defer_swap_pageouts=0;
146static int disable_swap_pageouts=0;
147
148static int max_page_launder=100;
149#if defined(NO_SWAPPING)
150static int vm_swap_enabled=0;
151static int vm_swap_idle_enabled=0;
152#else
153static int vm_swap_enabled=1;
154static int vm_swap_idle_enabled=0;
155#endif
156
157SYSCTL_INT(_vm, VM_PAGEOUT_ALGORITHM, pageout_algorithm,
158	CTLFLAG_RW, &vm_pageout_algorithm_lru, 0, "LRU page mgmt");
159
160SYSCTL_INT(_vm, OID_AUTO, pageout_stats_max,
161	CTLFLAG_RW, &vm_pageout_stats_max, 0, "Max pageout stats scan length");
162
163SYSCTL_INT(_vm, OID_AUTO, pageout_full_stats_interval,
164	CTLFLAG_RW, &vm_pageout_full_stats_interval, 0, "Interval for full stats scan");
165
166SYSCTL_INT(_vm, OID_AUTO, pageout_stats_interval,
167	CTLFLAG_RW, &vm_pageout_stats_interval, 0, "Interval for partial stats scan");
168
169SYSCTL_INT(_vm, OID_AUTO, pageout_stats_free_max,
170	CTLFLAG_RW, &vm_pageout_stats_free_max, 0, "Not implemented");
171
172#if defined(NO_SWAPPING)
173SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled,
174	CTLFLAG_RD, &vm_swap_enabled, 0, "");
175SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled,
176	CTLFLAG_RD, &vm_swap_idle_enabled, 0, "");
177#else
178SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled,
179	CTLFLAG_RW, &vm_swap_enabled, 0, "Enable entire process swapout");
180SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled,
181	CTLFLAG_RW, &vm_swap_idle_enabled, 0, "Allow swapout on idle criteria");
182#endif
183
184SYSCTL_INT(_vm, OID_AUTO, defer_swapspace_pageouts,
185	CTLFLAG_RW, &defer_swap_pageouts, 0, "Give preference to dirty pages in mem");
186
187SYSCTL_INT(_vm, OID_AUTO, disable_swapspace_pageouts,
188	CTLFLAG_RW, &disable_swap_pageouts, 0, "Disallow swapout of dirty pages");
189
190SYSCTL_INT(_vm, OID_AUTO, max_page_launder,
191	CTLFLAG_RW, &max_page_launder, 0, "Maximum number of pages to clean per pass");
192
193
194#define VM_PAGEOUT_PAGE_COUNT 16
195int vm_pageout_page_count = VM_PAGEOUT_PAGE_COUNT;
196
197int vm_page_max_wired;		/* XXX max # of wired pages system-wide */
198
199#if !defined(NO_SWAPPING)
200typedef void freeer_fcn_t __P((vm_map_t, vm_object_t, vm_pindex_t, int));
201static void vm_pageout_map_deactivate_pages __P((vm_map_t, vm_pindex_t));
202static freeer_fcn_t vm_pageout_object_deactivate_pages;
203static void vm_req_vmdaemon __P((void));
204#endif
205static void vm_pageout_page_stats(void);
206
207/*
208 * vm_pageout_clean:
209 *
210 * Clean the page and remove it from the laundry.
211 *
212 * We set the busy bit to cause potential page faults on this page to
213 * block.  Note the careful timing, however, the busy bit isn't set till
214 * late and we cannot do anything that will mess with the page.
215 */
216
217static int
218vm_pageout_clean(m)
219	vm_page_t m;
220{
221	register vm_object_t object;
222	vm_page_t mc[2*vm_pageout_page_count];
223	int pageout_count;
224	int i, forward_okay, backward_okay, page_base;
225	vm_pindex_t pindex = m->pindex;
226
227	object = m->object;
228
229	/*
230	 * It doesn't cost us anything to pageout OBJT_DEFAULT or OBJT_SWAP
231	 * with the new swapper, but we could have serious problems paging
232	 * out other object types if there is insufficient memory.
233	 *
234	 * Unfortunately, checking free memory here is far too late, so the
235	 * check has been moved up a procedural level.
236	 */
237
238#if 0
239	/*
240	 * If not OBJT_SWAP, additional memory may be needed to do the pageout.
241	 * Try to avoid the deadlock.
242	 */
243	if ((object->type == OBJT_DEFAULT) &&
244	    ((cnt.v_free_count + cnt.v_cache_count) < cnt.v_pageout_free_min))
245		return 0;
246#endif
247
248	/*
249	 * Don't mess with the page if it's busy.
250	 */
251	if ((m->hold_count != 0) ||
252	    ((m->busy != 0) || (m->flags & PG_BUSY)))
253		return 0;
254
255#if 0
256	/*
257	 * XXX REMOVED XXX.  vm_object_collapse() can block, which can
258	 * change the page state.  Calling vm_object_collapse() might also
259	 * destroy or rename the page because we have not busied it yet!!!
260	 * So this code segment is removed.
261	 */
262	/*
263	 * Try collapsing before it's too late.   XXX huh?  Why are we doing
264	 * this here?
265	 */
266	if (object->backing_object) {
267		vm_object_collapse(object);
268	}
269#endif
270
271	mc[vm_pageout_page_count] = m;
272	pageout_count = 1;
273	page_base = vm_pageout_page_count;
274	forward_okay = TRUE;
275	if (pindex != 0)
276		backward_okay = TRUE;
277	else
278		backward_okay = FALSE;
279	/*
280	 * Scan object for clusterable pages.
281	 *
282	 * We can cluster ONLY if: ->> the page is NOT
283	 * clean, wired, busy, held, or mapped into a
284	 * buffer, and one of the following:
285	 * 1) The page is inactive, or a seldom used
286	 *    active page.
287	 * -or-
288	 * 2) we force the issue.
289	 */
290	for (i = 1; (i < vm_pageout_page_count) && (forward_okay || backward_okay); i++) {
291		vm_page_t p;
292
293		/*
294		 * See if forward page is clusterable.
295		 */
296		if (forward_okay) {
297			/*
298			 * Stop forward scan at end of object.
299			 */
300			if ((pindex + i) > object->size) {
301				forward_okay = FALSE;
302				goto do_backward;
303			}
304			p = vm_page_lookup(object, pindex + i);
305			if (p) {
306				if (((p->queue - p->pc) == PQ_CACHE) ||
307					(p->flags & PG_BUSY) || p->busy) {
308					forward_okay = FALSE;
309					goto do_backward;
310				}
311				vm_page_test_dirty(p);
312				if ((p->dirty & p->valid) != 0 &&
313				    (p->queue == PQ_INACTIVE) &&
314				    (p->wire_count == 0) &&
315				    (p->hold_count == 0)) {
316					mc[vm_pageout_page_count + i] = p;
317					pageout_count++;
318					if (pageout_count == vm_pageout_page_count)
319						break;
320				} else {
321					forward_okay = FALSE;
322				}
323			} else {
324				forward_okay = FALSE;
325			}
326		}
327do_backward:
328		/*
329		 * See if backward page is clusterable.
330		 */
331		if (backward_okay) {
332			/*
333			 * Stop backward scan at beginning of object.
334			 */
335			if ((pindex - i) == 0) {
336				backward_okay = FALSE;
337			}
338			p = vm_page_lookup(object, pindex - i);
339			if (p) {
340				if (((p->queue - p->pc) == PQ_CACHE) ||
341					(p->flags & PG_BUSY) || p->busy) {
342					backward_okay = FALSE;
343					continue;
344				}
345				vm_page_test_dirty(p);
346				if ((p->dirty & p->valid) != 0 &&
347				    (p->queue == PQ_INACTIVE) &&
348				    (p->wire_count == 0) &&
349				    (p->hold_count == 0)) {
350					mc[vm_pageout_page_count - i] = p;
351					pageout_count++;
352					page_base--;
353					if (pageout_count == vm_pageout_page_count)
354						break;
355				} else {
356					backward_okay = FALSE;
357				}
358			} else {
359				backward_okay = FALSE;
360			}
361		}
362	}
363
364	/*
365	 * we allow reads during pageouts...
366	 */
367	return vm_pageout_flush(&mc[page_base], pageout_count, 0);
368}
369
370/*
371 * vm_pageout_flush() - launder the given pages
372 *
373 *	The given pages are laundered.  Note that we setup for the start of
374 *	I/O ( i.e. busy the page ), mark it read-only, and bump the object
375 *	reference count all in here rather then in the parent.  If we want
376 *	the parent to do more sophisticated things we may have to change
377 *	the ordering.
378 */
379
380int
381vm_pageout_flush(mc, count, flags)
382	vm_page_t *mc;
383	int count;
384	int flags;
385{
386	register vm_object_t object;
387	int pageout_status[count];
388	int numpagedout = 0;
389	int i;
390
391	/*
392	 * Initiate I/O.  Bump the vm_page_t->busy counter and
393	 * mark the pages read-only.
394	 *
395	 * We do not have to fixup the clean/dirty bits here... we can
396	 * allow the pager to do it after the I/O completes.
397	 */
398
399	for (i = 0; i < count; i++) {
400		vm_page_io_start(mc[i]);
401		vm_page_protect(mc[i], VM_PROT_READ);
402	}
403
404	object = mc[0]->object;
405	vm_object_pip_add(object, count);
406
407	vm_pager_put_pages(object, mc, count,
408	    (flags | ((object == kernel_object) ? OBJPC_SYNC : 0)),
409	    pageout_status);
410
411	for (i = 0; i < count; i++) {
412		vm_page_t mt = mc[i];
413
414		switch (pageout_status[i]) {
415		case VM_PAGER_OK:
416			numpagedout++;
417			break;
418		case VM_PAGER_PEND:
419			numpagedout++;
420			break;
421		case VM_PAGER_BAD:
422			/*
423			 * Page outside of range of object. Right now we
424			 * essentially lose the changes by pretending it
425			 * worked.
426			 */
427			pmap_clear_modify(VM_PAGE_TO_PHYS(mt));
428			mt->dirty = 0;
429			break;
430		case VM_PAGER_ERROR:
431		case VM_PAGER_FAIL:
432			/*
433			 * If page couldn't be paged out, then reactivate the
434			 * page so it doesn't clog the inactive list.  (We
435			 * will try paging out it again later).
436			 */
437			vm_page_activate(mt);
438			break;
439		case VM_PAGER_AGAIN:
440			break;
441		}
442
443		/*
444		 * If the operation is still going, leave the page busy to
445		 * block all other accesses. Also, leave the paging in
446		 * progress indicator set so that we don't attempt an object
447		 * collapse.
448		 */
449		if (pageout_status[i] != VM_PAGER_PEND) {
450			vm_object_pip_wakeup(object);
451			vm_page_io_finish(mt);
452		}
453	}
454	return numpagedout;
455}
456
457#if !defined(NO_SWAPPING)
458/*
459 *	vm_pageout_object_deactivate_pages
460 *
461 *	deactivate enough pages to satisfy the inactive target
462 *	requirements or if vm_page_proc_limit is set, then
463 *	deactivate all of the pages in the object and its
464 *	backing_objects.
465 *
466 *	The object and map must be locked.
467 */
468static void
469vm_pageout_object_deactivate_pages(map, object, desired, map_remove_only)
470	vm_map_t map;
471	vm_object_t object;
472	vm_pindex_t desired;
473	int map_remove_only;
474{
475	register vm_page_t p, next;
476	int rcount;
477	int remove_mode;
478	int s;
479
480	if (object->type == OBJT_DEVICE)
481		return;
482
483	while (object) {
484		if (pmap_resident_count(vm_map_pmap(map)) <= desired)
485			return;
486		if (object->paging_in_progress)
487			return;
488
489		remove_mode = map_remove_only;
490		if (object->shadow_count > 1)
491			remove_mode = 1;
492	/*
493	 * scan the objects entire memory queue
494	 */
495		rcount = object->resident_page_count;
496		p = TAILQ_FIRST(&object->memq);
497		while (p && (rcount-- > 0)) {
498			int actcount;
499			if (pmap_resident_count(vm_map_pmap(map)) <= desired)
500				return;
501			next = TAILQ_NEXT(p, listq);
502			cnt.v_pdpages++;
503			if (p->wire_count != 0 ||
504			    p->hold_count != 0 ||
505			    p->busy != 0 ||
506			    (p->flags & PG_BUSY) ||
507			    !pmap_page_exists(vm_map_pmap(map), VM_PAGE_TO_PHYS(p))) {
508				p = next;
509				continue;
510			}
511
512			actcount = pmap_ts_referenced(VM_PAGE_TO_PHYS(p));
513			if (actcount) {
514				vm_page_flag_set(p, PG_REFERENCED);
515			} else if (p->flags & PG_REFERENCED) {
516				actcount = 1;
517			}
518
519			if ((p->queue != PQ_ACTIVE) &&
520				(p->flags & PG_REFERENCED)) {
521				vm_page_activate(p);
522				p->act_count += actcount;
523				vm_page_flag_clear(p, PG_REFERENCED);
524			} else if (p->queue == PQ_ACTIVE) {
525				if ((p->flags & PG_REFERENCED) == 0) {
526					p->act_count -= min(p->act_count, ACT_DECLINE);
527					if (!remove_mode && (vm_pageout_algorithm_lru || (p->act_count == 0))) {
528						vm_page_protect(p, VM_PROT_NONE);
529						vm_page_deactivate(p);
530					} else {
531						s = splvm();
532						TAILQ_REMOVE(&vm_page_queue_active, p, pageq);
533						TAILQ_INSERT_TAIL(&vm_page_queue_active, p, pageq);
534						splx(s);
535					}
536				} else {
537					vm_page_activate(p);
538					vm_page_flag_clear(p, PG_REFERENCED);
539					if (p->act_count < (ACT_MAX - ACT_ADVANCE))
540						p->act_count += ACT_ADVANCE;
541					s = splvm();
542					TAILQ_REMOVE(&vm_page_queue_active, p, pageq);
543					TAILQ_INSERT_TAIL(&vm_page_queue_active, p, pageq);
544					splx(s);
545				}
546			} else if (p->queue == PQ_INACTIVE) {
547				vm_page_protect(p, VM_PROT_NONE);
548			}
549			p = next;
550		}
551		object = object->backing_object;
552	}
553	return;
554}
555
556/*
557 * deactivate some number of pages in a map, try to do it fairly, but
558 * that is really hard to do.
559 */
560static void
561vm_pageout_map_deactivate_pages(map, desired)
562	vm_map_t map;
563	vm_pindex_t desired;
564{
565	vm_map_entry_t tmpe;
566	vm_object_t obj, bigobj;
567
568	if (lockmgr(&map->lock, LK_EXCLUSIVE | LK_NOWAIT, (void *)0, curproc)) {
569		return;
570	}
571
572	bigobj = NULL;
573
574	/*
575	 * first, search out the biggest object, and try to free pages from
576	 * that.
577	 */
578	tmpe = map->header.next;
579	while (tmpe != &map->header) {
580		if ((tmpe->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
581			obj = tmpe->object.vm_object;
582			if ((obj != NULL) && (obj->shadow_count <= 1) &&
583				((bigobj == NULL) ||
584				 (bigobj->resident_page_count < obj->resident_page_count))) {
585				bigobj = obj;
586			}
587		}
588		tmpe = tmpe->next;
589	}
590
591	if (bigobj)
592		vm_pageout_object_deactivate_pages(map, bigobj, desired, 0);
593
594	/*
595	 * Next, hunt around for other pages to deactivate.  We actually
596	 * do this search sort of wrong -- .text first is not the best idea.
597	 */
598	tmpe = map->header.next;
599	while (tmpe != &map->header) {
600		if (pmap_resident_count(vm_map_pmap(map)) <= desired)
601			break;
602		if ((tmpe->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
603			obj = tmpe->object.vm_object;
604			if (obj)
605				vm_pageout_object_deactivate_pages(map, obj, desired, 0);
606		}
607		tmpe = tmpe->next;
608	};
609
610	/*
611	 * Remove all mappings if a process is swapped out, this will free page
612	 * table pages.
613	 */
614	if (desired == 0)
615		pmap_remove(vm_map_pmap(map),
616			VM_MIN_ADDRESS, VM_MAXUSER_ADDRESS);
617	vm_map_unlock(map);
618	return;
619}
620#endif
621
622/*
623 * Don't try to be fancy - being fancy can lead to VOP_LOCK's and therefore
624 * to vnode deadlocks.  We only do it for OBJT_DEFAULT and OBJT_SWAP objects
625 * which we know can be trivially freed.
626 */
627
628void
629vm_pageout_page_free(vm_page_t m) {
630	vm_object_t object = m->object;
631	int type = object->type;
632
633	if (type == OBJT_SWAP || type == OBJT_DEFAULT)
634		vm_object_reference(object);
635	vm_page_busy(m);
636	vm_page_protect(m, VM_PROT_NONE);
637	vm_page_free(m);
638	if (type == OBJT_SWAP || type == OBJT_DEFAULT)
639		vm_object_deallocate(object);
640}
641
642/*
643 *	vm_pageout_scan does the dirty work for the pageout daemon.
644 */
645static int
646vm_pageout_scan()
647{
648	vm_page_t m, next;
649	int page_shortage, maxscan, pcount;
650	int addl_page_shortage, addl_page_shortage_init;
651	int maxlaunder;
652	int launder_loop = 0;
653	struct proc *p, *bigproc;
654	vm_offset_t size, bigsize;
655	vm_object_t object;
656	int force_wakeup = 0;
657	int actcount;
658	int vnodes_skipped = 0;
659	int s;
660
661	/*
662	 * Do whatever cleanup that the pmap code can.
663	 */
664	pmap_collect();
665
666	addl_page_shortage_init = vm_pageout_deficit;
667	vm_pageout_deficit = 0;
668
669	if (max_page_launder == 0)
670		max_page_launder = 1;
671
672	/*
673	 * Calculate the number of pages we want to either free or move
674	 * to the cache.
675	 */
676
677	page_shortage = (cnt.v_free_target + cnt.v_cache_min) -
678	    (cnt.v_free_count + cnt.v_cache_count);
679	page_shortage += addl_page_shortage_init;
680
681	/*
682	 * Figure out what to do with dirty pages when they are encountered.
683	 * Assume that 1/3 of the pages on the inactive list are clean.  If
684	 * we think we can reach our target, disable laundering (do not
685	 * clean any dirty pages).  If we miss the target we will loop back
686	 * up and do a laundering run.
687	 */
688
689	if (cnt.v_inactive_count / 3 > page_shortage) {
690		maxlaunder = 0;
691		launder_loop = 0;
692	} else {
693		maxlaunder =
694		    (cnt.v_inactive_target > max_page_launder) ?
695		    max_page_launder : cnt.v_inactive_target;
696		launder_loop = 1;
697	}
698
699	/*
700	 * Start scanning the inactive queue for pages we can move to the
701	 * cache or free.  The scan will stop when the target is reached or
702	 * we have scanned the entire inactive queue.
703	 */
704
705rescan0:
706	addl_page_shortage = addl_page_shortage_init;
707	maxscan = cnt.v_inactive_count;
708	for (
709	    m = TAILQ_FIRST(&vm_page_queue_inactive);
710	    m != NULL && maxscan-- > 0 && page_shortage > 0;
711	    m = next
712	) {
713
714		cnt.v_pdpages++;
715
716		if (m->queue != PQ_INACTIVE) {
717			goto rescan0;
718		}
719
720		next = TAILQ_NEXT(m, pageq);
721
722		if (m->hold_count) {
723			s = splvm();
724			TAILQ_REMOVE(&vm_page_queue_inactive, m, pageq);
725			TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq);
726			splx(s);
727			addl_page_shortage++;
728			continue;
729		}
730		/*
731		 * Dont mess with busy pages, keep in the front of the
732		 * queue, most likely are being paged out.
733		 */
734		if (m->busy || (m->flags & PG_BUSY)) {
735			addl_page_shortage++;
736			continue;
737		}
738
739		/*
740		 * If the object is not being used, we ignore previous
741		 * references.
742		 */
743		if (m->object->ref_count == 0) {
744			vm_page_flag_clear(m, PG_REFERENCED);
745			pmap_clear_reference(VM_PAGE_TO_PHYS(m));
746
747		/*
748		 * Otherwise, if the page has been referenced while in the
749		 * inactive queue, we bump the "activation count" upwards,
750		 * making it less likely that the page will be added back to
751		 * the inactive queue prematurely again.  Here we check the
752		 * page tables (or emulated bits, if any), given the upper
753		 * level VM system not knowing anything about existing
754		 * references.
755		 */
756		} else if (((m->flags & PG_REFERENCED) == 0) &&
757			(actcount = pmap_ts_referenced(VM_PAGE_TO_PHYS(m)))) {
758			vm_page_activate(m);
759			m->act_count += (actcount + ACT_ADVANCE);
760			continue;
761		}
762
763		/*
764		 * If the upper level VM system knows about any page
765		 * references, we activate the page.  We also set the
766		 * "activation count" higher than normal so that we will less
767		 * likely place pages back onto the inactive queue again.
768		 */
769		if ((m->flags & PG_REFERENCED) != 0) {
770			vm_page_flag_clear(m, PG_REFERENCED);
771			actcount = pmap_ts_referenced(VM_PAGE_TO_PHYS(m));
772			vm_page_activate(m);
773			m->act_count += (actcount + ACT_ADVANCE + 1);
774			continue;
775		}
776
777		/*
778		 * If the upper level VM system doesn't know anything about
779		 * the page being dirty, we have to check for it again.  As
780		 * far as the VM code knows, any partially dirty pages are
781		 * fully dirty.
782		 */
783		if (m->dirty == 0) {
784			vm_page_test_dirty(m);
785		} else {
786			vm_page_dirty(m);
787		}
788
789		/*
790		 * Invalid pages can be easily freed
791		 */
792		if (m->valid == 0) {
793			vm_pageout_page_free(m);
794			cnt.v_dfree++;
795			--page_shortage;
796
797		/*
798		 * Clean pages can be placed onto the cache queue.
799		 */
800		} else if (m->dirty == 0) {
801			vm_page_cache(m);
802			--page_shortage;
803
804		/*
805		 * Dirty pages need to be paged out.  Note that we clean
806		 * only a limited number of pages per pagedaemon pass.
807		 */
808		} else if (maxlaunder > 0) {
809			int written;
810			int swap_pageouts_ok;
811			struct vnode *vp = NULL;
812
813			object = m->object;
814
815			if ((object->type != OBJT_SWAP) && (object->type != OBJT_DEFAULT)) {
816				swap_pageouts_ok = 1;
817			} else {
818				swap_pageouts_ok = !(defer_swap_pageouts || disable_swap_pageouts);
819				swap_pageouts_ok |= (!disable_swap_pageouts && defer_swap_pageouts &&
820					(cnt.v_free_count + cnt.v_cache_count) < cnt.v_free_min);
821
822			}
823
824			/*
825			 * We don't bother paging objects that are "dead".
826			 * Those objects are in a "rundown" state.
827			 */
828			if (!swap_pageouts_ok || (object->flags & OBJ_DEAD)) {
829				s = splvm();
830				TAILQ_REMOVE(&vm_page_queue_inactive, m, pageq);
831				TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq);
832				splx(s);
833				continue;
834			}
835
836			/*
837			 * For now we protect against potential memory
838			 * deadlocks by requiring significant memory to be
839			 * free if the object is not OBJT_DEFAULT or OBJT_SWAP.
840			 * We do not 'trust' any other object type to operate
841			 * with low memory, not even OBJT_DEVICE.  The VM
842			 * allocator will special case allocations done by
843			 * the pageout daemon so the check below actually
844			 * does have some hysteresis in it.  It isn't the best
845			 * solution, though.
846			 */
847
848			if (
849			    object->type != OBJT_DEFAULT &&
850			    object->type != OBJT_SWAP &&
851			    cnt.v_free_count < cnt.v_free_reserved
852			) {
853				s = splvm();
854				TAILQ_REMOVE(&vm_page_queue_inactive, m, pageq);
855				TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq);
856				splx(s);
857				continue;
858			}
859
860			/*
861			 * Presumably we have sufficient free memory to do
862			 * the more sophisticated checks and locking required
863			 * for vnodes.
864			 *
865			 * The object is already known NOT to be dead.  The
866			 * vget() may still block, though, because
867			 * VOP_ISLOCKED() doesn't check to see if an inode
868			 * (v_data) is associated with the vnode.  If it isn't,
869			 * vget() will load in it from disk.  Worse, vget()
870			 * may actually get stuck waiting on "inode" if another
871			 * process is in the process of bringing the inode in.
872			 * This is bad news for us either way.
873			 *
874			 * So for the moment we check v_data == NULL as a
875			 * workaround.  This means that vnodes which do not
876			 * use v_data in the way we expect probably will not
877			 * wind up being paged out by the pager and it will be
878			 * up to the syncer to get them.  That's better then
879			 * us blocking here.
880			 *
881			 * This whole code section is bogus - we need to fix
882			 * the vnode pager to handle vm_page_t's without us
883			 * having to do any sophisticated VOP tests.
884			 */
885
886			if (object->type == OBJT_VNODE) {
887				vp = object->handle;
888
889				if (VOP_ISLOCKED(vp) ||
890				    vp->v_data == NULL ||
891				    vget(vp, LK_EXCLUSIVE|LK_NOOBJ, curproc)) {
892					if ((m->queue == PQ_INACTIVE) &&
893						(m->hold_count == 0) &&
894						(m->busy == 0) &&
895						(m->flags & PG_BUSY) == 0) {
896						s = splvm();
897						TAILQ_REMOVE(&vm_page_queue_inactive, m, pageq);
898						TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq);
899						splx(s);
900					}
901					if (object->flags & OBJ_MIGHTBEDIRTY)
902						vnodes_skipped++;
903					continue;
904				}
905
906				/*
907				 * The page might have been moved to another queue
908				 * during potential blocking in vget() above.
909				 */
910				if (m->queue != PQ_INACTIVE) {
911					if (object->flags & OBJ_MIGHTBEDIRTY)
912						vnodes_skipped++;
913					vput(vp);
914					continue;
915				}
916
917				/*
918				 * The page may have been busied during the blocking in
919				 * vput();  We don't move the page back onto the end of
920				 * the queue so that statistics are more correct if we don't.
921				 */
922				if (m->busy || (m->flags & PG_BUSY)) {
923					vput(vp);
924					continue;
925				}
926
927				/*
928				 * If the page has become held, then skip it
929				 */
930				if (m->hold_count) {
931					s = splvm();
932					TAILQ_REMOVE(&vm_page_queue_inactive, m, pageq);
933					TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq);
934					splx(s);
935					if (object->flags & OBJ_MIGHTBEDIRTY)
936						vnodes_skipped++;
937					vput(vp);
938					continue;
939				}
940			}
941
942			/*
943			 * If a page is dirty, then it is either being washed
944			 * (but not yet cleaned) or it is still in the
945			 * laundry.  If it is still in the laundry, then we
946			 * start the cleaning operation.
947			 */
948			written = vm_pageout_clean(m);
949			if (vp)
950				vput(vp);
951
952			maxlaunder -= written;
953		}
954	}
955
956	/*
957	 * If we still have a page shortage and we didn't launder anything,
958	 * run the inactive scan again and launder something this time.
959	 */
960
961	if (launder_loop == 0 && page_shortage > 0) {
962		launder_loop = 1;
963		maxlaunder =
964		    (cnt.v_inactive_target > max_page_launder) ?
965		    max_page_launder : cnt.v_inactive_target;
966		goto rescan0;
967	}
968
969	/*
970	 * Compute the page shortage from the point of view of having to
971	 * move pages from the active queue to the inactive queue.
972	 */
973
974	page_shortage = (cnt.v_inactive_target + cnt.v_cache_min) -
975	    (cnt.v_free_count + cnt.v_inactive_count + cnt.v_cache_count);
976	page_shortage += addl_page_shortage;
977
978	/*
979	 * Scan the active queue for things we can deactivate
980	 */
981
982	pcount = cnt.v_active_count;
983	m = TAILQ_FIRST(&vm_page_queue_active);
984
985	while ((m != NULL) && (pcount-- > 0) && (page_shortage > 0)) {
986
987		/*
988		 * This is a consistancy check, and should likely be a panic
989		 * or warning.
990		 */
991		if (m->queue != PQ_ACTIVE) {
992			break;
993		}
994
995		next = TAILQ_NEXT(m, pageq);
996		/*
997		 * Don't deactivate pages that are busy.
998		 */
999		if ((m->busy != 0) ||
1000		    (m->flags & PG_BUSY) ||
1001		    (m->hold_count != 0)) {
1002			s = splvm();
1003			TAILQ_REMOVE(&vm_page_queue_active, m, pageq);
1004			TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq);
1005			splx(s);
1006			m = next;
1007			continue;
1008		}
1009
1010		/*
1011		 * The count for pagedaemon pages is done after checking the
1012		 * page for eligbility...
1013		 */
1014		cnt.v_pdpages++;
1015
1016		/*
1017		 * Check to see "how much" the page has been used.
1018		 */
1019		actcount = 0;
1020		if (m->object->ref_count != 0) {
1021			if (m->flags & PG_REFERENCED) {
1022				actcount += 1;
1023			}
1024			actcount += pmap_ts_referenced(VM_PAGE_TO_PHYS(m));
1025			if (actcount) {
1026				m->act_count += ACT_ADVANCE + actcount;
1027				if (m->act_count > ACT_MAX)
1028					m->act_count = ACT_MAX;
1029			}
1030		}
1031
1032		/*
1033		 * Since we have "tested" this bit, we need to clear it now.
1034		 */
1035		vm_page_flag_clear(m, PG_REFERENCED);
1036
1037		/*
1038		 * Only if an object is currently being used, do we use the
1039		 * page activation count stats.
1040		 */
1041		if (actcount && (m->object->ref_count != 0)) {
1042			s = splvm();
1043			TAILQ_REMOVE(&vm_page_queue_active, m, pageq);
1044			TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq);
1045			splx(s);
1046		} else {
1047			m->act_count -= min(m->act_count, ACT_DECLINE);
1048			if (vm_pageout_algorithm_lru ||
1049				(m->object->ref_count == 0) || (m->act_count == 0)) {
1050				page_shortage--;
1051				if (m->object->ref_count == 0) {
1052					vm_page_protect(m, VM_PROT_NONE);
1053					if (m->dirty == 0)
1054						vm_page_cache(m);
1055					else
1056						vm_page_deactivate(m);
1057				} else {
1058					vm_page_deactivate(m);
1059				}
1060			} else {
1061				s = splvm();
1062				TAILQ_REMOVE(&vm_page_queue_active, m, pageq);
1063				TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq);
1064				splx(s);
1065			}
1066		}
1067		m = next;
1068	}
1069
1070	s = splvm();
1071
1072	/*
1073	 * We try to maintain some *really* free pages, this allows interrupt
1074	 * code to be guaranteed space.  Since both cache and free queues
1075	 * are considered basically 'free', moving pages from cache to free
1076	 * does not effect other calculations.
1077	 */
1078
1079	while (cnt.v_free_count < cnt.v_free_reserved) {
1080		static int cache_rover = 0;
1081		m = vm_page_list_find(PQ_CACHE, cache_rover, FALSE);
1082		if (!m)
1083			break;
1084		if ((m->flags & PG_BUSY) || m->busy || m->hold_count || m->wire_count) {
1085#ifdef INVARIANTS
1086			printf("Warning: busy page %p found in cache\n", m);
1087#endif
1088			vm_page_deactivate(m);
1089			continue;
1090		}
1091		cache_rover = (cache_rover + PQ_PRIME2) & PQ_L2_MASK;
1092		vm_pageout_page_free(m);
1093		cnt.v_dfree++;
1094	}
1095	splx(s);
1096
1097#if !defined(NO_SWAPPING)
1098	/*
1099	 * Idle process swapout -- run once per second.
1100	 */
1101	if (vm_swap_idle_enabled) {
1102		static long lsec;
1103		if (time_second != lsec) {
1104			vm_pageout_req_swapout |= VM_SWAP_IDLE;
1105			vm_req_vmdaemon();
1106			lsec = time_second;
1107		}
1108	}
1109#endif
1110
1111	/*
1112	 * If we didn't get enough free pages, and we have skipped a vnode
1113	 * in a writeable object, wakeup the sync daemon.  And kick swapout
1114	 * if we did not get enough free pages.
1115	 */
1116	if ((cnt.v_cache_count + cnt.v_free_count) <
1117		(cnt.v_free_target + cnt.v_cache_min) ) {
1118		if (vnodes_skipped &&
1119		    (cnt.v_cache_count + cnt.v_free_count) < cnt.v_free_min) {
1120			if (!vfs_update_wakeup) {
1121				vfs_update_wakeup = 1;
1122				wakeup(&vfs_update_wakeup);
1123			}
1124		}
1125#if !defined(NO_SWAPPING)
1126		if (vm_swap_enabled &&
1127			(cnt.v_free_count + cnt.v_cache_count < cnt.v_free_target)) {
1128			vm_req_vmdaemon();
1129			vm_pageout_req_swapout |= VM_SWAP_NORMAL;
1130		}
1131#endif
1132	}
1133
1134	/*
1135	 * make sure that we have swap space -- if we are low on memory and
1136	 * swap -- then kill the biggest process.
1137	 */
1138	if ((vm_swap_size == 0 || swap_pager_full) &&
1139	    ((cnt.v_free_count + cnt.v_cache_count) < cnt.v_free_min)) {
1140		bigproc = NULL;
1141		bigsize = 0;
1142		for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) {
1143			/*
1144			 * if this is a system process, skip it
1145			 */
1146			if ((p->p_flag & P_SYSTEM) || (p->p_lock > 0) ||
1147			    (p->p_pid == 1) ||
1148			    ((p->p_pid < 48) && (vm_swap_size != 0))) {
1149				continue;
1150			}
1151			/*
1152			 * if the process is in a non-running type state,
1153			 * don't touch it.
1154			 */
1155			if (p->p_stat != SRUN && p->p_stat != SSLEEP) {
1156				continue;
1157			}
1158			/*
1159			 * get the process size
1160			 */
1161			size = vmspace_resident_count(p->p_vmspace);
1162			/*
1163			 * if the this process is bigger than the biggest one
1164			 * remember it.
1165			 */
1166			if (size > bigsize) {
1167				bigproc = p;
1168				bigsize = size;
1169			}
1170		}
1171		if (bigproc != NULL) {
1172			killproc(bigproc, "out of swap space");
1173			bigproc->p_estcpu = 0;
1174			bigproc->p_nice = PRIO_MIN;
1175			resetpriority(bigproc);
1176			wakeup(&cnt.v_free_count);
1177		}
1178	}
1179	return force_wakeup;
1180}
1181
1182/*
1183 * This routine tries to maintain the pseudo LRU active queue,
1184 * so that during long periods of time where there is no paging,
1185 * that some statistic accumlation still occurs.  This code
1186 * helps the situation where paging just starts to occur.
1187 */
1188static void
1189vm_pageout_page_stats()
1190{
1191	int s;
1192	vm_page_t m,next;
1193	int pcount,tpcount;		/* Number of pages to check */
1194	static int fullintervalcount = 0;
1195	int page_shortage;
1196
1197	page_shortage = (cnt.v_inactive_target + cnt.v_cache_max + cnt.v_free_min) -
1198	    (cnt.v_free_count + cnt.v_inactive_count + cnt.v_cache_count);
1199	if (page_shortage <= 0)
1200		return;
1201
1202	pcount = cnt.v_active_count;
1203	fullintervalcount += vm_pageout_stats_interval;
1204	if (fullintervalcount < vm_pageout_full_stats_interval) {
1205		tpcount = (vm_pageout_stats_max * cnt.v_active_count) / cnt.v_page_count;
1206		if (pcount > tpcount)
1207			pcount = tpcount;
1208	}
1209
1210	m = TAILQ_FIRST(&vm_page_queue_active);
1211	while ((m != NULL) && (pcount-- > 0)) {
1212		int actcount;
1213
1214		if (m->queue != PQ_ACTIVE) {
1215			break;
1216		}
1217
1218		next = TAILQ_NEXT(m, pageq);
1219		/*
1220		 * Don't deactivate pages that are busy.
1221		 */
1222		if ((m->busy != 0) ||
1223		    (m->flags & PG_BUSY) ||
1224		    (m->hold_count != 0)) {
1225			s = splvm();
1226			TAILQ_REMOVE(&vm_page_queue_active, m, pageq);
1227			TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq);
1228			splx(s);
1229			m = next;
1230			continue;
1231		}
1232
1233		actcount = 0;
1234		if (m->flags & PG_REFERENCED) {
1235			vm_page_flag_clear(m, PG_REFERENCED);
1236			actcount += 1;
1237		}
1238
1239		actcount += pmap_ts_referenced(VM_PAGE_TO_PHYS(m));
1240		if (actcount) {
1241			m->act_count += ACT_ADVANCE + actcount;
1242			if (m->act_count > ACT_MAX)
1243				m->act_count = ACT_MAX;
1244			s = splvm();
1245			TAILQ_REMOVE(&vm_page_queue_active, m, pageq);
1246			TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq);
1247			splx(s);
1248		} else {
1249			if (m->act_count == 0) {
1250				/*
1251				 * We turn off page access, so that we have more accurate
1252				 * RSS stats.  We don't do this in the normal page deactivation
1253				 * when the system is loaded VM wise, because the cost of
1254				 * the large number of page protect operations would be higher
1255				 * than the value of doing the operation.
1256				 */
1257				vm_page_protect(m, VM_PROT_NONE);
1258				vm_page_deactivate(m);
1259			} else {
1260				m->act_count -= min(m->act_count, ACT_DECLINE);
1261				s = splvm();
1262				TAILQ_REMOVE(&vm_page_queue_active, m, pageq);
1263				TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq);
1264				splx(s);
1265			}
1266		}
1267
1268		m = next;
1269	}
1270}
1271
1272static int
1273vm_pageout_free_page_calc(count)
1274vm_size_t count;
1275{
1276	if (count < cnt.v_page_count)
1277		 return 0;
1278	/*
1279	 * free_reserved needs to include enough for the largest swap pager
1280	 * structures plus enough for any pv_entry structs when paging.
1281	 */
1282	if (cnt.v_page_count > 1024)
1283		cnt.v_free_min = 4 + (cnt.v_page_count - 1024) / 200;
1284	else
1285		cnt.v_free_min = 4;
1286	cnt.v_pageout_free_min = (2*MAXBSIZE)/PAGE_SIZE +
1287		cnt.v_interrupt_free_min;
1288	cnt.v_free_reserved = vm_pageout_page_count +
1289		cnt.v_pageout_free_min + (count / 768) + PQ_L2_SIZE;
1290	cnt.v_free_min += cnt.v_free_reserved;
1291	return 1;
1292}
1293
1294
1295/*
1296 *	vm_pageout is the high level pageout daemon.
1297 */
1298static void
1299vm_pageout()
1300{
1301	/*
1302	 * Initialize some paging parameters.
1303	 */
1304
1305	cnt.v_interrupt_free_min = 2;
1306	if (cnt.v_page_count < 2000)
1307		vm_pageout_page_count = 8;
1308
1309	vm_pageout_free_page_calc(cnt.v_page_count);
1310	/*
1311	 * free_reserved needs to include enough for the largest swap pager
1312	 * structures plus enough for any pv_entry structs when paging.
1313	 */
1314	if (cnt.v_free_count > 6144)
1315		cnt.v_free_target = 3 * cnt.v_free_min + cnt.v_free_reserved;
1316	else
1317		cnt.v_free_target = 2 * cnt.v_free_min + cnt.v_free_reserved;
1318
1319	if (cnt.v_free_count > 2048) {
1320		cnt.v_cache_min = cnt.v_free_target;
1321		cnt.v_cache_max = 2 * cnt.v_cache_min;
1322		cnt.v_inactive_target = (3 * cnt.v_free_target) / 2;
1323	} else {
1324		cnt.v_cache_min = 0;
1325		cnt.v_cache_max = 0;
1326		cnt.v_inactive_target = cnt.v_free_count / 4;
1327	}
1328	if (cnt.v_inactive_target > cnt.v_free_count / 3)
1329		cnt.v_inactive_target = cnt.v_free_count / 3;
1330
1331	/* XXX does not really belong here */
1332	if (vm_page_max_wired == 0)
1333		vm_page_max_wired = cnt.v_free_count / 3;
1334
1335	if (vm_pageout_stats_max == 0)
1336		vm_pageout_stats_max = cnt.v_free_target;
1337
1338	/*
1339	 * Set interval in seconds for stats scan.
1340	 */
1341	if (vm_pageout_stats_interval == 0)
1342		vm_pageout_stats_interval = 5;
1343	if (vm_pageout_full_stats_interval == 0)
1344		vm_pageout_full_stats_interval = vm_pageout_stats_interval * 4;
1345
1346
1347	/*
1348	 * Set maximum free per pass
1349	 */
1350	if (vm_pageout_stats_free_max == 0)
1351		vm_pageout_stats_free_max = 5;
1352
1353	max_page_launder = (cnt.v_page_count > 1800 ? 32 : 16);
1354
1355	swap_pager_swap_init();
1356	/*
1357	 * The pageout daemon is never done, so loop forever.
1358	 */
1359	while (TRUE) {
1360		int error;
1361		int s = splvm();
1362		if (!vm_pages_needed ||
1363			((cnt.v_free_count + cnt.v_cache_count) > cnt.v_free_min)) {
1364			vm_pages_needed = 0;
1365			error = tsleep(&vm_pages_needed,
1366				PVM, "psleep", vm_pageout_stats_interval * hz);
1367			if (error && !vm_pages_needed) {
1368				splx(s);
1369				vm_pageout_page_stats();
1370				continue;
1371			}
1372		} else if (vm_pages_needed) {
1373			vm_pages_needed = 0;
1374			tsleep(&vm_pages_needed, PVM, "psleep", hz/2);
1375		}
1376
1377		if (vm_pages_needed)
1378			cnt.v_pdwakeups++;
1379		vm_pages_needed = 0;
1380		splx(s);
1381		vm_pageout_scan();
1382		vm_pageout_deficit = 0;
1383		wakeup(&cnt.v_free_count);
1384	}
1385}
1386
1387void
1388pagedaemon_wakeup()
1389{
1390	if (!vm_pages_needed && curproc != pageproc) {
1391		vm_pages_needed++;
1392		wakeup(&vm_pages_needed);
1393	}
1394}
1395
1396#if !defined(NO_SWAPPING)
1397static void
1398vm_req_vmdaemon()
1399{
1400	static int lastrun = 0;
1401
1402	if ((ticks > (lastrun + hz)) || (ticks < lastrun)) {
1403		wakeup(&vm_daemon_needed);
1404		lastrun = ticks;
1405	}
1406}
1407
1408static void
1409vm_daemon()
1410{
1411	struct proc *p;
1412
1413	while (TRUE) {
1414		tsleep(&vm_daemon_needed, PPAUSE, "psleep", 0);
1415		if (vm_pageout_req_swapout) {
1416			swapout_procs(vm_pageout_req_swapout);
1417			vm_pageout_req_swapout = 0;
1418		}
1419		/*
1420		 * scan the processes for exceeding their rlimits or if
1421		 * process is swapped out -- deactivate pages
1422		 */
1423
1424		for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) {
1425			vm_pindex_t limit, size;
1426
1427			/*
1428			 * if this is a system process or if we have already
1429			 * looked at this process, skip it.
1430			 */
1431			if (p->p_flag & (P_SYSTEM | P_WEXIT)) {
1432				continue;
1433			}
1434			/*
1435			 * if the process is in a non-running type state,
1436			 * don't touch it.
1437			 */
1438			if (p->p_stat != SRUN && p->p_stat != SSLEEP) {
1439				continue;
1440			}
1441			/*
1442			 * get a limit
1443			 */
1444			limit = OFF_TO_IDX(
1445			    qmin(p->p_rlimit[RLIMIT_RSS].rlim_cur,
1446				p->p_rlimit[RLIMIT_RSS].rlim_max));
1447
1448			/*
1449			 * let processes that are swapped out really be
1450			 * swapped out set the limit to nothing (will force a
1451			 * swap-out.)
1452			 */
1453			if ((p->p_flag & P_INMEM) == 0)
1454				limit = 0;	/* XXX */
1455
1456			size = vmspace_resident_count(p->p_vmspace);
1457			if (limit >= 0 && size >= limit) {
1458				vm_pageout_map_deactivate_pages(
1459				    &p->p_vmspace->vm_map, limit);
1460			}
1461		}
1462	}
1463}
1464#endif
1465