vm_pageout.c revision 202529
1/*-
2 * Copyright (c) 1991 Regents of the University of California.
3 * All rights reserved.
4 * Copyright (c) 1994 John S. Dyson
5 * All rights reserved.
6 * Copyright (c) 1994 David Greenman
7 * All rights reserved.
8 * Copyright (c) 2005 Yahoo! Technologies Norway AS
9 * All rights reserved.
10 *
11 * This code is derived from software contributed to Berkeley by
12 * The Mach Operating System project at Carnegie-Mellon University.
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions
16 * are met:
17 * 1. Redistributions of source code must retain the above copyright
18 *    notice, this list of conditions and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 *    notice, this list of conditions and the following disclaimer in the
21 *    documentation and/or other materials provided with the distribution.
22 * 3. All advertising materials mentioning features or use of this software
23 *    must display the following acknowledgement:
24 *	This product includes software developed by the University of
25 *	California, Berkeley and its contributors.
26 * 4. Neither the name of the University nor the names of its contributors
27 *    may be used to endorse or promote products derived from this software
28 *    without specific prior written permission.
29 *
30 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
31 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
32 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
33 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
34 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
35 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
36 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
37 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
38 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
39 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
40 * SUCH DAMAGE.
41 *
42 *	from: @(#)vm_pageout.c	7.4 (Berkeley) 5/7/91
43 *
44 *
45 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
46 * All rights reserved.
47 *
48 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
49 *
50 * Permission to use, copy, modify and distribute this software and
51 * its documentation is hereby granted, provided that both the copyright
52 * notice and this permission notice appear in all copies of the
53 * software, derivative works or modified versions, and any portions
54 * thereof, and that both notices appear in supporting documentation.
55 *
56 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
57 * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
58 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
59 *
60 * Carnegie Mellon requests users of this software to return to
61 *
62 *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
63 *  School of Computer Science
64 *  Carnegie Mellon University
65 *  Pittsburgh PA 15213-3890
66 *
67 * any improvements or extensions that they make and grant Carnegie the
68 * rights to redistribute these changes.
69 */
70
71/*
72 *	The proverbial page-out daemon.
73 */
74
75#include <sys/cdefs.h>
76__FBSDID("$FreeBSD: head/sys/vm/vm_pageout.c 202529 2010-01-17 21:26:14Z kib $");
77
78#include "opt_vm.h"
79#include <sys/param.h>
80#include <sys/systm.h>
81#include <sys/kernel.h>
82#include <sys/eventhandler.h>
83#include <sys/lock.h>
84#include <sys/mutex.h>
85#include <sys/proc.h>
86#include <sys/kthread.h>
87#include <sys/ktr.h>
88#include <sys/mount.h>
89#include <sys/resourcevar.h>
90#include <sys/sched.h>
91#include <sys/signalvar.h>
92#include <sys/vnode.h>
93#include <sys/vmmeter.h>
94#include <sys/sx.h>
95#include <sys/sysctl.h>
96
97#include <vm/vm.h>
98#include <vm/vm_param.h>
99#include <vm/vm_object.h>
100#include <vm/vm_page.h>
101#include <vm/vm_map.h>
102#include <vm/vm_pageout.h>
103#include <vm/vm_pager.h>
104#include <vm/swap_pager.h>
105#include <vm/vm_extern.h>
106#include <vm/uma.h>
107
108/*
109 * System initialization
110 */
111
112/* the kernel process "vm_pageout"*/
113static void vm_pageout(void);
114static int vm_pageout_clean(vm_page_t);
115static void vm_pageout_scan(int pass);
116
117struct proc *pageproc;
118
119static struct kproc_desc page_kp = {
120	"pagedaemon",
121	vm_pageout,
122	&pageproc
123};
124SYSINIT(pagedaemon, SI_SUB_KTHREAD_PAGE, SI_ORDER_FIRST, kproc_start,
125    &page_kp);
126
127#if !defined(NO_SWAPPING)
128/* the kernel process "vm_daemon"*/
129static void vm_daemon(void);
130static struct	proc *vmproc;
131
132static struct kproc_desc vm_kp = {
133	"vmdaemon",
134	vm_daemon,
135	&vmproc
136};
137SYSINIT(vmdaemon, SI_SUB_KTHREAD_VM, SI_ORDER_FIRST, kproc_start, &vm_kp);
138#endif
139
140
141int vm_pages_needed;		/* Event on which pageout daemon sleeps */
142int vm_pageout_deficit;		/* Estimated number of pages deficit */
143int vm_pageout_pages_needed;	/* flag saying that the pageout daemon needs pages */
144
145#if !defined(NO_SWAPPING)
146static int vm_pageout_req_swapout;	/* XXX */
147static int vm_daemon_needed;
148static struct mtx vm_daemon_mtx;
149/* Allow for use by vm_pageout before vm_daemon is initialized. */
150MTX_SYSINIT(vm_daemon, &vm_daemon_mtx, "vm daemon", MTX_DEF);
151#endif
152static int vm_max_launder = 32;
153static int vm_pageout_stats_max=0, vm_pageout_stats_interval = 0;
154static int vm_pageout_full_stats_interval = 0;
155static int vm_pageout_algorithm=0;
156static int defer_swap_pageouts=0;
157static int disable_swap_pageouts=0;
158
159#if defined(NO_SWAPPING)
160static int vm_swap_enabled=0;
161static int vm_swap_idle_enabled=0;
162#else
163static int vm_swap_enabled=1;
164static int vm_swap_idle_enabled=0;
165#endif
166
167SYSCTL_INT(_vm, VM_PAGEOUT_ALGORITHM, pageout_algorithm,
168	CTLFLAG_RW, &vm_pageout_algorithm, 0, "LRU page mgmt");
169
170SYSCTL_INT(_vm, OID_AUTO, max_launder,
171	CTLFLAG_RW, &vm_max_launder, 0, "Limit dirty flushes in pageout");
172
173SYSCTL_INT(_vm, OID_AUTO, pageout_stats_max,
174	CTLFLAG_RW, &vm_pageout_stats_max, 0, "Max pageout stats scan length");
175
176SYSCTL_INT(_vm, OID_AUTO, pageout_full_stats_interval,
177	CTLFLAG_RW, &vm_pageout_full_stats_interval, 0, "Interval for full stats scan");
178
179SYSCTL_INT(_vm, OID_AUTO, pageout_stats_interval,
180	CTLFLAG_RW, &vm_pageout_stats_interval, 0, "Interval for partial stats scan");
181
182#if defined(NO_SWAPPING)
183SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled,
184	CTLFLAG_RD, &vm_swap_enabled, 0, "Enable entire process swapout");
185SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled,
186	CTLFLAG_RD, &vm_swap_idle_enabled, 0, "Allow swapout on idle criteria");
187#else
188SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled,
189	CTLFLAG_RW, &vm_swap_enabled, 0, "Enable entire process swapout");
190SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled,
191	CTLFLAG_RW, &vm_swap_idle_enabled, 0, "Allow swapout on idle criteria");
192#endif
193
194SYSCTL_INT(_vm, OID_AUTO, defer_swapspace_pageouts,
195	CTLFLAG_RW, &defer_swap_pageouts, 0, "Give preference to dirty pages in mem");
196
197SYSCTL_INT(_vm, OID_AUTO, disable_swapspace_pageouts,
198	CTLFLAG_RW, &disable_swap_pageouts, 0, "Disallow swapout of dirty pages");
199
200static int pageout_lock_miss;
201SYSCTL_INT(_vm, OID_AUTO, pageout_lock_miss,
202	CTLFLAG_RD, &pageout_lock_miss, 0, "vget() lock misses during pageout");
203
204#define VM_PAGEOUT_PAGE_COUNT 16
205int vm_pageout_page_count = VM_PAGEOUT_PAGE_COUNT;
206
207int vm_page_max_wired;		/* XXX max # of wired pages system-wide */
208SYSCTL_INT(_vm, OID_AUTO, max_wired,
209	CTLFLAG_RW, &vm_page_max_wired, 0, "System-wide limit to wired page count");
210
211#if !defined(NO_SWAPPING)
212static void vm_pageout_map_deactivate_pages(vm_map_t, long);
213static void vm_pageout_object_deactivate_pages(pmap_t, vm_object_t, long);
214static void vm_req_vmdaemon(int req);
215#endif
216static void vm_pageout_page_stats(void);
217
218/*
219 * vm_pageout_fallback_object_lock:
220 *
221 * Lock vm object currently associated with `m'. VM_OBJECT_TRYLOCK is
222 * known to have failed and page queue must be either PQ_ACTIVE or
223 * PQ_INACTIVE.  To avoid lock order violation, unlock the page queues
224 * while locking the vm object.  Use marker page to detect page queue
225 * changes and maintain notion of next page on page queue.  Return
226 * TRUE if no changes were detected, FALSE otherwise.  vm object is
227 * locked on return.
228 *
229 * This function depends on both the lock portion of struct vm_object
230 * and normal struct vm_page being type stable.
231 */
232boolean_t
233vm_pageout_fallback_object_lock(vm_page_t m, vm_page_t *next)
234{
235	struct vm_page marker;
236	boolean_t unchanged;
237	u_short queue;
238	vm_object_t object;
239
240	/*
241	 * Initialize our marker
242	 */
243	bzero(&marker, sizeof(marker));
244	marker.flags = PG_FICTITIOUS | PG_MARKER;
245	marker.oflags = VPO_BUSY;
246	marker.queue = m->queue;
247	marker.wire_count = 1;
248
249	queue = m->queue;
250	object = m->object;
251
252	TAILQ_INSERT_AFTER(&vm_page_queues[queue].pl,
253			   m, &marker, pageq);
254	vm_page_unlock_queues();
255	VM_OBJECT_LOCK(object);
256	vm_page_lock_queues();
257
258	/* Page queue might have changed. */
259	*next = TAILQ_NEXT(&marker, pageq);
260	unchanged = (m->queue == queue &&
261		     m->object == object &&
262		     &marker == TAILQ_NEXT(m, pageq));
263	TAILQ_REMOVE(&vm_page_queues[queue].pl,
264		     &marker, pageq);
265	return (unchanged);
266}
267
268/*
269 * vm_pageout_clean:
270 *
271 * Clean the page and remove it from the laundry.
272 *
273 * We set the busy bit to cause potential page faults on this page to
274 * block.  Note the careful timing, however, the busy bit isn't set till
275 * late and we cannot do anything that will mess with the page.
276 */
277static int
278vm_pageout_clean(m)
279	vm_page_t m;
280{
281	vm_object_t object;
282	vm_page_t mc[2*vm_pageout_page_count];
283	int pageout_count;
284	int ib, is, page_base;
285	vm_pindex_t pindex = m->pindex;
286
287	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
288	VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
289
290	/*
291	 * It doesn't cost us anything to pageout OBJT_DEFAULT or OBJT_SWAP
292	 * with the new swapper, but we could have serious problems paging
293	 * out other object types if there is insufficient memory.
294	 *
295	 * Unfortunately, checking free memory here is far too late, so the
296	 * check has been moved up a procedural level.
297	 */
298
299	/*
300	 * Can't clean the page if it's busy or held.
301	 */
302	if ((m->hold_count != 0) ||
303	    ((m->busy != 0) || (m->oflags & VPO_BUSY))) {
304		return 0;
305	}
306
307	mc[vm_pageout_page_count] = m;
308	pageout_count = 1;
309	page_base = vm_pageout_page_count;
310	ib = 1;
311	is = 1;
312
313	/*
314	 * Scan object for clusterable pages.
315	 *
316	 * We can cluster ONLY if: ->> the page is NOT
317	 * clean, wired, busy, held, or mapped into a
318	 * buffer, and one of the following:
319	 * 1) The page is inactive, or a seldom used
320	 *    active page.
321	 * -or-
322	 * 2) we force the issue.
323	 *
324	 * During heavy mmap/modification loads the pageout
325	 * daemon can really fragment the underlying file
326	 * due to flushing pages out of order and not trying
327	 * align the clusters (which leave sporatic out-of-order
328	 * holes).  To solve this problem we do the reverse scan
329	 * first and attempt to align our cluster, then do a
330	 * forward scan if room remains.
331	 */
332	object = m->object;
333more:
334	while (ib && pageout_count < vm_pageout_page_count) {
335		vm_page_t p;
336
337		if (ib > pindex) {
338			ib = 0;
339			break;
340		}
341
342		if ((p = vm_page_lookup(object, pindex - ib)) == NULL) {
343			ib = 0;
344			break;
345		}
346		if ((p->oflags & VPO_BUSY) || p->busy) {
347			ib = 0;
348			break;
349		}
350		vm_page_test_dirty(p);
351		if (p->dirty == 0 ||
352		    p->queue != PQ_INACTIVE ||
353		    p->wire_count != 0 ||	/* may be held by buf cache */
354		    p->hold_count != 0) {	/* may be undergoing I/O */
355			ib = 0;
356			break;
357		}
358		mc[--page_base] = p;
359		++pageout_count;
360		++ib;
361		/*
362		 * alignment boundry, stop here and switch directions.  Do
363		 * not clear ib.
364		 */
365		if ((pindex - (ib - 1)) % vm_pageout_page_count == 0)
366			break;
367	}
368
369	while (pageout_count < vm_pageout_page_count &&
370	    pindex + is < object->size) {
371		vm_page_t p;
372
373		if ((p = vm_page_lookup(object, pindex + is)) == NULL)
374			break;
375		if ((p->oflags & VPO_BUSY) || p->busy) {
376			break;
377		}
378		vm_page_test_dirty(p);
379		if (p->dirty == 0 ||
380		    p->queue != PQ_INACTIVE ||
381		    p->wire_count != 0 ||	/* may be held by buf cache */
382		    p->hold_count != 0) {	/* may be undergoing I/O */
383			break;
384		}
385		mc[page_base + pageout_count] = p;
386		++pageout_count;
387		++is;
388	}
389
390	/*
391	 * If we exhausted our forward scan, continue with the reverse scan
392	 * when possible, even past a page boundry.  This catches boundry
393	 * conditions.
394	 */
395	if (ib && pageout_count < vm_pageout_page_count)
396		goto more;
397
398	/*
399	 * we allow reads during pageouts...
400	 */
401	return (vm_pageout_flush(&mc[page_base], pageout_count, 0));
402}
403
404/*
405 * vm_pageout_flush() - launder the given pages
406 *
407 *	The given pages are laundered.  Note that we setup for the start of
408 *	I/O ( i.e. busy the page ), mark it read-only, and bump the object
409 *	reference count all in here rather then in the parent.  If we want
410 *	the parent to do more sophisticated things we may have to change
411 *	the ordering.
412 */
413int
414vm_pageout_flush(vm_page_t *mc, int count, int flags)
415{
416	vm_object_t object = mc[0]->object;
417	int pageout_status[count];
418	int numpagedout = 0;
419	int i;
420
421	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
422	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
423	/*
424	 * Initiate I/O.  Bump the vm_page_t->busy counter and
425	 * mark the pages read-only.
426	 *
427	 * We do not have to fixup the clean/dirty bits here... we can
428	 * allow the pager to do it after the I/O completes.
429	 *
430	 * NOTE! mc[i]->dirty may be partial or fragmented due to an
431	 * edge case with file fragments.
432	 */
433	for (i = 0; i < count; i++) {
434		KASSERT(mc[i]->valid == VM_PAGE_BITS_ALL,
435		    ("vm_pageout_flush: partially invalid page %p index %d/%d",
436			mc[i], i, count));
437		vm_page_io_start(mc[i]);
438		pmap_remove_write(mc[i]);
439	}
440	vm_page_unlock_queues();
441	vm_object_pip_add(object, count);
442
443	vm_pager_put_pages(object, mc, count, flags, pageout_status);
444
445	vm_page_lock_queues();
446	for (i = 0; i < count; i++) {
447		vm_page_t mt = mc[i];
448
449		KASSERT(pageout_status[i] == VM_PAGER_PEND ||
450		    (mt->flags & PG_WRITEABLE) == 0,
451		    ("vm_pageout_flush: page %p is not write protected", mt));
452		switch (pageout_status[i]) {
453		case VM_PAGER_OK:
454		case VM_PAGER_PEND:
455			numpagedout++;
456			break;
457		case VM_PAGER_BAD:
458			/*
459			 * Page outside of range of object. Right now we
460			 * essentially lose the changes by pretending it
461			 * worked.
462			 */
463			vm_page_undirty(mt);
464			break;
465		case VM_PAGER_ERROR:
466		case VM_PAGER_FAIL:
467			/*
468			 * If page couldn't be paged out, then reactivate the
469			 * page so it doesn't clog the inactive list.  (We
470			 * will try paging out it again later).
471			 */
472			vm_page_activate(mt);
473			break;
474		case VM_PAGER_AGAIN:
475			break;
476		}
477
478		/*
479		 * If the operation is still going, leave the page busy to
480		 * block all other accesses. Also, leave the paging in
481		 * progress indicator set so that we don't attempt an object
482		 * collapse.
483		 */
484		if (pageout_status[i] != VM_PAGER_PEND) {
485			vm_object_pip_wakeup(object);
486			vm_page_io_finish(mt);
487			if (vm_page_count_severe())
488				vm_page_try_to_cache(mt);
489		}
490	}
491	return numpagedout;
492}
493
494#if !defined(NO_SWAPPING)
495/*
496 *	vm_pageout_object_deactivate_pages
497 *
498 *	deactivate enough pages to satisfy the inactive target
499 *	requirements or if vm_page_proc_limit is set, then
500 *	deactivate all of the pages in the object and its
501 *	backing_objects.
502 *
503 *	The object and map must be locked.
504 */
505static void
506vm_pageout_object_deactivate_pages(pmap, first_object, desired)
507	pmap_t pmap;
508	vm_object_t first_object;
509	long desired;
510{
511	vm_object_t backing_object, object;
512	vm_page_t p, next;
513	int actcount, rcount, remove_mode;
514
515	VM_OBJECT_LOCK_ASSERT(first_object, MA_OWNED);
516	if (first_object->type == OBJT_DEVICE ||
517	    first_object->type == OBJT_SG ||
518	    first_object->type == OBJT_PHYS)
519		return;
520	for (object = first_object;; object = backing_object) {
521		if (pmap_resident_count(pmap) <= desired)
522			goto unlock_return;
523		if (object->paging_in_progress)
524			goto unlock_return;
525
526		remove_mode = 0;
527		if (object->shadow_count > 1)
528			remove_mode = 1;
529		/*
530		 * scan the objects entire memory queue
531		 */
532		rcount = object->resident_page_count;
533		p = TAILQ_FIRST(&object->memq);
534		vm_page_lock_queues();
535		while (p && (rcount-- > 0)) {
536			if (pmap_resident_count(pmap) <= desired) {
537				vm_page_unlock_queues();
538				goto unlock_return;
539			}
540			next = TAILQ_NEXT(p, listq);
541			cnt.v_pdpages++;
542			if (p->wire_count != 0 ||
543			    p->hold_count != 0 ||
544			    p->busy != 0 ||
545			    (p->oflags & VPO_BUSY) ||
546			    (p->flags & PG_UNMANAGED) ||
547			    !pmap_page_exists_quick(pmap, p)) {
548				p = next;
549				continue;
550			}
551			actcount = pmap_ts_referenced(p);
552			if (actcount) {
553				vm_page_flag_set(p, PG_REFERENCED);
554			} else if (p->flags & PG_REFERENCED) {
555				actcount = 1;
556			}
557			if ((p->queue != PQ_ACTIVE) &&
558				(p->flags & PG_REFERENCED)) {
559				vm_page_activate(p);
560				p->act_count += actcount;
561				vm_page_flag_clear(p, PG_REFERENCED);
562			} else if (p->queue == PQ_ACTIVE) {
563				if ((p->flags & PG_REFERENCED) == 0) {
564					p->act_count -= min(p->act_count, ACT_DECLINE);
565					if (!remove_mode && (vm_pageout_algorithm || (p->act_count == 0))) {
566						pmap_remove_all(p);
567						vm_page_deactivate(p);
568					} else {
569						vm_page_requeue(p);
570					}
571				} else {
572					vm_page_activate(p);
573					vm_page_flag_clear(p, PG_REFERENCED);
574					if (p->act_count < (ACT_MAX - ACT_ADVANCE))
575						p->act_count += ACT_ADVANCE;
576					vm_page_requeue(p);
577				}
578			} else if (p->queue == PQ_INACTIVE) {
579				pmap_remove_all(p);
580			}
581			p = next;
582		}
583		vm_page_unlock_queues();
584		if ((backing_object = object->backing_object) == NULL)
585			goto unlock_return;
586		VM_OBJECT_LOCK(backing_object);
587		if (object != first_object)
588			VM_OBJECT_UNLOCK(object);
589	}
590unlock_return:
591	if (object != first_object)
592		VM_OBJECT_UNLOCK(object);
593}
594
595/*
596 * deactivate some number of pages in a map, try to do it fairly, but
597 * that is really hard to do.
598 */
599static void
600vm_pageout_map_deactivate_pages(map, desired)
601	vm_map_t map;
602	long desired;
603{
604	vm_map_entry_t tmpe;
605	vm_object_t obj, bigobj;
606	int nothingwired;
607
608	if (!vm_map_trylock(map))
609		return;
610
611	bigobj = NULL;
612	nothingwired = TRUE;
613
614	/*
615	 * first, search out the biggest object, and try to free pages from
616	 * that.
617	 */
618	tmpe = map->header.next;
619	while (tmpe != &map->header) {
620		if ((tmpe->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
621			obj = tmpe->object.vm_object;
622			if (obj != NULL && VM_OBJECT_TRYLOCK(obj)) {
623				if (obj->shadow_count <= 1 &&
624				    (bigobj == NULL ||
625				     bigobj->resident_page_count < obj->resident_page_count)) {
626					if (bigobj != NULL)
627						VM_OBJECT_UNLOCK(bigobj);
628					bigobj = obj;
629				} else
630					VM_OBJECT_UNLOCK(obj);
631			}
632		}
633		if (tmpe->wired_count > 0)
634			nothingwired = FALSE;
635		tmpe = tmpe->next;
636	}
637
638	if (bigobj != NULL) {
639		vm_pageout_object_deactivate_pages(map->pmap, bigobj, desired);
640		VM_OBJECT_UNLOCK(bigobj);
641	}
642	/*
643	 * Next, hunt around for other pages to deactivate.  We actually
644	 * do this search sort of wrong -- .text first is not the best idea.
645	 */
646	tmpe = map->header.next;
647	while (tmpe != &map->header) {
648		if (pmap_resident_count(vm_map_pmap(map)) <= desired)
649			break;
650		if ((tmpe->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
651			obj = tmpe->object.vm_object;
652			if (obj != NULL) {
653				VM_OBJECT_LOCK(obj);
654				vm_pageout_object_deactivate_pages(map->pmap, obj, desired);
655				VM_OBJECT_UNLOCK(obj);
656			}
657		}
658		tmpe = tmpe->next;
659	}
660
661	/*
662	 * Remove all mappings if a process is swapped out, this will free page
663	 * table pages.
664	 */
665	if (desired == 0 && nothingwired) {
666		pmap_remove(vm_map_pmap(map), vm_map_min(map),
667		    vm_map_max(map));
668	}
669	vm_map_unlock(map);
670}
671#endif		/* !defined(NO_SWAPPING) */
672
673/*
674 *	vm_pageout_scan does the dirty work for the pageout daemon.
675 */
676static void
677vm_pageout_scan(int pass)
678{
679	vm_page_t m, next;
680	struct vm_page marker;
681	int page_shortage, maxscan, pcount;
682	int addl_page_shortage, addl_page_shortage_init;
683	vm_object_t object;
684	int actcount;
685	int vnodes_skipped = 0;
686	int maxlaunder;
687
688	/*
689	 * Decrease registered cache sizes.
690	 */
691	EVENTHANDLER_INVOKE(vm_lowmem, 0);
692	/*
693	 * We do this explicitly after the caches have been drained above.
694	 */
695	uma_reclaim();
696
697	addl_page_shortage_init = atomic_readandclear_int(&vm_pageout_deficit);
698
699	/*
700	 * Calculate the number of pages we want to either free or move
701	 * to the cache.
702	 */
703	page_shortage = vm_paging_target() + addl_page_shortage_init;
704
705	/*
706	 * Initialize our marker
707	 */
708	bzero(&marker, sizeof(marker));
709	marker.flags = PG_FICTITIOUS | PG_MARKER;
710	marker.oflags = VPO_BUSY;
711	marker.queue = PQ_INACTIVE;
712	marker.wire_count = 1;
713
714	/*
715	 * Start scanning the inactive queue for pages we can move to the
716	 * cache or free.  The scan will stop when the target is reached or
717	 * we have scanned the entire inactive queue.  Note that m->act_count
718	 * is not used to form decisions for the inactive queue, only for the
719	 * active queue.
720	 *
721	 * maxlaunder limits the number of dirty pages we flush per scan.
722	 * For most systems a smaller value (16 or 32) is more robust under
723	 * extreme memory and disk pressure because any unnecessary writes
724	 * to disk can result in extreme performance degredation.  However,
725	 * systems with excessive dirty pages (especially when MAP_NOSYNC is
726	 * used) will die horribly with limited laundering.  If the pageout
727	 * daemon cannot clean enough pages in the first pass, we let it go
728	 * all out in succeeding passes.
729	 */
730	if ((maxlaunder = vm_max_launder) <= 1)
731		maxlaunder = 1;
732	if (pass)
733		maxlaunder = 10000;
734	vm_page_lock_queues();
735rescan0:
736	addl_page_shortage = addl_page_shortage_init;
737	maxscan = cnt.v_inactive_count;
738
739	for (m = TAILQ_FIRST(&vm_page_queues[PQ_INACTIVE].pl);
740	     m != NULL && maxscan-- > 0 && page_shortage > 0;
741	     m = next) {
742
743		cnt.v_pdpages++;
744
745		if (VM_PAGE_GETQUEUE(m) != PQ_INACTIVE) {
746			goto rescan0;
747		}
748
749		next = TAILQ_NEXT(m, pageq);
750		object = m->object;
751
752		/*
753		 * skip marker pages
754		 */
755		if (m->flags & PG_MARKER)
756			continue;
757
758		/*
759		 * A held page may be undergoing I/O, so skip it.
760		 */
761		if (m->hold_count) {
762			vm_page_requeue(m);
763			addl_page_shortage++;
764			continue;
765		}
766		/*
767		 * Don't mess with busy pages, keep in the front of the
768		 * queue, most likely are being paged out.
769		 */
770		if (!VM_OBJECT_TRYLOCK(object) &&
771		    (!vm_pageout_fallback_object_lock(m, &next) ||
772		     m->hold_count != 0)) {
773			VM_OBJECT_UNLOCK(object);
774			addl_page_shortage++;
775			continue;
776		}
777		if (m->busy || (m->oflags & VPO_BUSY)) {
778			VM_OBJECT_UNLOCK(object);
779			addl_page_shortage++;
780			continue;
781		}
782
783		/*
784		 * If the object is not being used, we ignore previous
785		 * references.
786		 */
787		if (object->ref_count == 0) {
788			vm_page_flag_clear(m, PG_REFERENCED);
789			KASSERT(!pmap_page_is_mapped(m),
790			    ("vm_pageout_scan: page %p is mapped", m));
791
792		/*
793		 * Otherwise, if the page has been referenced while in the
794		 * inactive queue, we bump the "activation count" upwards,
795		 * making it less likely that the page will be added back to
796		 * the inactive queue prematurely again.  Here we check the
797		 * page tables (or emulated bits, if any), given the upper
798		 * level VM system not knowing anything about existing
799		 * references.
800		 */
801		} else if (((m->flags & PG_REFERENCED) == 0) &&
802			(actcount = pmap_ts_referenced(m))) {
803			vm_page_activate(m);
804			VM_OBJECT_UNLOCK(object);
805			m->act_count += (actcount + ACT_ADVANCE);
806			continue;
807		}
808
809		/*
810		 * If the upper level VM system knows about any page
811		 * references, we activate the page.  We also set the
812		 * "activation count" higher than normal so that we will less
813		 * likely place pages back onto the inactive queue again.
814		 */
815		if ((m->flags & PG_REFERENCED) != 0) {
816			vm_page_flag_clear(m, PG_REFERENCED);
817			actcount = pmap_ts_referenced(m);
818			vm_page_activate(m);
819			VM_OBJECT_UNLOCK(object);
820			m->act_count += (actcount + ACT_ADVANCE + 1);
821			continue;
822		}
823
824		/*
825		 * If the upper level VM system does not believe that the page
826		 * is fully dirty, but it is mapped for write access, then we
827		 * consult the pmap to see if the page's dirty status should
828		 * be updated.
829		 */
830		if (m->dirty != VM_PAGE_BITS_ALL &&
831		    (m->flags & PG_WRITEABLE) != 0) {
832			/*
833			 * Avoid a race condition: Unless write access is
834			 * removed from the page, another processor could
835			 * modify it before all access is removed by the call
836			 * to vm_page_cache() below.  If vm_page_cache() finds
837			 * that the page has been modified when it removes all
838			 * access, it panics because it cannot cache dirty
839			 * pages.  In principle, we could eliminate just write
840			 * access here rather than all access.  In the expected
841			 * case, when there are no last instant modifications
842			 * to the page, removing all access will be cheaper
843			 * overall.
844			 */
845			if (pmap_is_modified(m))
846				vm_page_dirty(m);
847			else if (m->dirty == 0)
848				pmap_remove_all(m);
849		}
850
851		if (m->valid == 0) {
852			/*
853			 * Invalid pages can be easily freed
854			 */
855			vm_page_free(m);
856			cnt.v_dfree++;
857			--page_shortage;
858		} else if (m->dirty == 0) {
859			/*
860			 * Clean pages can be placed onto the cache queue.
861			 * This effectively frees them.
862			 */
863			vm_page_cache(m);
864			--page_shortage;
865		} else if ((m->flags & PG_WINATCFLS) == 0 && pass == 0) {
866			/*
867			 * Dirty pages need to be paged out, but flushing
868			 * a page is extremely expensive verses freeing
869			 * a clean page.  Rather then artificially limiting
870			 * the number of pages we can flush, we instead give
871			 * dirty pages extra priority on the inactive queue
872			 * by forcing them to be cycled through the queue
873			 * twice before being flushed, after which the
874			 * (now clean) page will cycle through once more
875			 * before being freed.  This significantly extends
876			 * the thrash point for a heavily loaded machine.
877			 */
878			vm_page_flag_set(m, PG_WINATCFLS);
879			vm_page_requeue(m);
880		} else if (maxlaunder > 0) {
881			/*
882			 * We always want to try to flush some dirty pages if
883			 * we encounter them, to keep the system stable.
884			 * Normally this number is small, but under extreme
885			 * pressure where there are insufficient clean pages
886			 * on the inactive queue, we may have to go all out.
887			 */
888			int swap_pageouts_ok, vfslocked = 0;
889			struct vnode *vp = NULL;
890			struct mount *mp = NULL;
891
892			if ((object->type != OBJT_SWAP) && (object->type != OBJT_DEFAULT)) {
893				swap_pageouts_ok = 1;
894			} else {
895				swap_pageouts_ok = !(defer_swap_pageouts || disable_swap_pageouts);
896				swap_pageouts_ok |= (!disable_swap_pageouts && defer_swap_pageouts &&
897				vm_page_count_min());
898
899			}
900
901			/*
902			 * We don't bother paging objects that are "dead".
903			 * Those objects are in a "rundown" state.
904			 */
905			if (!swap_pageouts_ok || (object->flags & OBJ_DEAD)) {
906				VM_OBJECT_UNLOCK(object);
907				vm_page_requeue(m);
908				continue;
909			}
910
911			/*
912			 * Following operations may unlock
913			 * vm_page_queue_mtx, invalidating the 'next'
914			 * pointer.  To prevent an inordinate number
915			 * of restarts we use our marker to remember
916			 * our place.
917			 *
918			 */
919			TAILQ_INSERT_AFTER(&vm_page_queues[PQ_INACTIVE].pl,
920					   m, &marker, pageq);
921			/*
922			 * The object is already known NOT to be dead.   It
923			 * is possible for the vget() to block the whole
924			 * pageout daemon, but the new low-memory handling
925			 * code should prevent it.
926			 *
927			 * The previous code skipped locked vnodes and, worse,
928			 * reordered pages in the queue.  This results in
929			 * completely non-deterministic operation and, on a
930			 * busy system, can lead to extremely non-optimal
931			 * pageouts.  For example, it can cause clean pages
932			 * to be freed and dirty pages to be moved to the end
933			 * of the queue.  Since dirty pages are also moved to
934			 * the end of the queue once-cleaned, this gives
935			 * way too large a weighting to defering the freeing
936			 * of dirty pages.
937			 *
938			 * We can't wait forever for the vnode lock, we might
939			 * deadlock due to a vn_read() getting stuck in
940			 * vm_wait while holding this vnode.  We skip the
941			 * vnode if we can't get it in a reasonable amount
942			 * of time.
943			 */
944			if (object->type == OBJT_VNODE) {
945				vp = object->handle;
946				if (vp->v_type == VREG &&
947				    vn_start_write(vp, &mp, V_NOWAIT) != 0) {
948					mp = NULL;
949					++pageout_lock_miss;
950					if (object->flags & OBJ_MIGHTBEDIRTY)
951						vnodes_skipped++;
952					goto unlock_and_continue;
953				}
954				KASSERT(mp != NULL,
955				    ("vp %p with NULL v_mount", vp));
956				vm_page_unlock_queues();
957				vm_object_reference_locked(object);
958				VM_OBJECT_UNLOCK(object);
959				vfslocked = VFS_LOCK_GIANT(vp->v_mount);
960				if (vget(vp, LK_EXCLUSIVE | LK_TIMELOCK,
961				    curthread)) {
962					VM_OBJECT_LOCK(object);
963					vm_page_lock_queues();
964					++pageout_lock_miss;
965					if (object->flags & OBJ_MIGHTBEDIRTY)
966						vnodes_skipped++;
967					vp = NULL;
968					goto unlock_and_continue;
969				}
970				VM_OBJECT_LOCK(object);
971				vm_page_lock_queues();
972				/*
973				 * The page might have been moved to another
974				 * queue during potential blocking in vget()
975				 * above.  The page might have been freed and
976				 * reused for another vnode.
977				 */
978				if (VM_PAGE_GETQUEUE(m) != PQ_INACTIVE ||
979				    m->object != object ||
980				    TAILQ_NEXT(m, pageq) != &marker) {
981					if (object->flags & OBJ_MIGHTBEDIRTY)
982						vnodes_skipped++;
983					goto unlock_and_continue;
984				}
985
986				/*
987				 * The page may have been busied during the
988				 * blocking in vget().  We don't move the
989				 * page back onto the end of the queue so that
990				 * statistics are more correct if we don't.
991				 */
992				if (m->busy || (m->oflags & VPO_BUSY)) {
993					goto unlock_and_continue;
994				}
995
996				/*
997				 * If the page has become held it might
998				 * be undergoing I/O, so skip it
999				 */
1000				if (m->hold_count) {
1001					vm_page_requeue(m);
1002					if (object->flags & OBJ_MIGHTBEDIRTY)
1003						vnodes_skipped++;
1004					goto unlock_and_continue;
1005				}
1006			}
1007
1008			/*
1009			 * If a page is dirty, then it is either being washed
1010			 * (but not yet cleaned) or it is still in the
1011			 * laundry.  If it is still in the laundry, then we
1012			 * start the cleaning operation.
1013			 *
1014			 * decrement page_shortage on success to account for
1015			 * the (future) cleaned page.  Otherwise we could wind
1016			 * up laundering or cleaning too many pages.
1017			 */
1018			if (vm_pageout_clean(m) != 0) {
1019				--page_shortage;
1020				--maxlaunder;
1021			}
1022unlock_and_continue:
1023			VM_OBJECT_UNLOCK(object);
1024			if (mp != NULL) {
1025				vm_page_unlock_queues();
1026				if (vp != NULL)
1027					vput(vp);
1028				VFS_UNLOCK_GIANT(vfslocked);
1029				vm_object_deallocate(object);
1030				vn_finished_write(mp);
1031				vm_page_lock_queues();
1032			}
1033			next = TAILQ_NEXT(&marker, pageq);
1034			TAILQ_REMOVE(&vm_page_queues[PQ_INACTIVE].pl,
1035				     &marker, pageq);
1036			continue;
1037		}
1038		VM_OBJECT_UNLOCK(object);
1039	}
1040
1041	/*
1042	 * Compute the number of pages we want to try to move from the
1043	 * active queue to the inactive queue.
1044	 */
1045	page_shortage = vm_paging_target() +
1046		cnt.v_inactive_target - cnt.v_inactive_count;
1047	page_shortage += addl_page_shortage;
1048
1049	/*
1050	 * Scan the active queue for things we can deactivate. We nominally
1051	 * track the per-page activity counter and use it to locate
1052	 * deactivation candidates.
1053	 */
1054	pcount = cnt.v_active_count;
1055	m = TAILQ_FIRST(&vm_page_queues[PQ_ACTIVE].pl);
1056
1057	while ((m != NULL) && (pcount-- > 0) && (page_shortage > 0)) {
1058
1059		KASSERT(VM_PAGE_INQUEUE2(m, PQ_ACTIVE),
1060		    ("vm_pageout_scan: page %p isn't active", m));
1061
1062		next = TAILQ_NEXT(m, pageq);
1063		object = m->object;
1064		if ((m->flags & PG_MARKER) != 0) {
1065			m = next;
1066			continue;
1067		}
1068		if (!VM_OBJECT_TRYLOCK(object) &&
1069		    !vm_pageout_fallback_object_lock(m, &next)) {
1070			VM_OBJECT_UNLOCK(object);
1071			m = next;
1072			continue;
1073		}
1074
1075		/*
1076		 * Don't deactivate pages that are busy.
1077		 */
1078		if ((m->busy != 0) ||
1079		    (m->oflags & VPO_BUSY) ||
1080		    (m->hold_count != 0)) {
1081			VM_OBJECT_UNLOCK(object);
1082			vm_page_requeue(m);
1083			m = next;
1084			continue;
1085		}
1086
1087		/*
1088		 * The count for pagedaemon pages is done after checking the
1089		 * page for eligibility...
1090		 */
1091		cnt.v_pdpages++;
1092
1093		/*
1094		 * Check to see "how much" the page has been used.
1095		 */
1096		actcount = 0;
1097		if (object->ref_count != 0) {
1098			if (m->flags & PG_REFERENCED) {
1099				actcount += 1;
1100			}
1101			actcount += pmap_ts_referenced(m);
1102			if (actcount) {
1103				m->act_count += ACT_ADVANCE + actcount;
1104				if (m->act_count > ACT_MAX)
1105					m->act_count = ACT_MAX;
1106			}
1107		}
1108
1109		/*
1110		 * Since we have "tested" this bit, we need to clear it now.
1111		 */
1112		vm_page_flag_clear(m, PG_REFERENCED);
1113
1114		/*
1115		 * Only if an object is currently being used, do we use the
1116		 * page activation count stats.
1117		 */
1118		if (actcount && (object->ref_count != 0)) {
1119			vm_page_requeue(m);
1120		} else {
1121			m->act_count -= min(m->act_count, ACT_DECLINE);
1122			if (vm_pageout_algorithm ||
1123			    object->ref_count == 0 ||
1124			    m->act_count == 0) {
1125				page_shortage--;
1126				if (object->ref_count == 0) {
1127					pmap_remove_all(m);
1128					if (m->dirty == 0)
1129						vm_page_cache(m);
1130					else
1131						vm_page_deactivate(m);
1132				} else {
1133					vm_page_deactivate(m);
1134				}
1135			} else {
1136				vm_page_requeue(m);
1137			}
1138		}
1139		VM_OBJECT_UNLOCK(object);
1140		m = next;
1141	}
1142	vm_page_unlock_queues();
1143#if !defined(NO_SWAPPING)
1144	/*
1145	 * Idle process swapout -- run once per second.
1146	 */
1147	if (vm_swap_idle_enabled) {
1148		static long lsec;
1149		if (time_second != lsec) {
1150			vm_req_vmdaemon(VM_SWAP_IDLE);
1151			lsec = time_second;
1152		}
1153	}
1154#endif
1155
1156	/*
1157	 * If we didn't get enough free pages, and we have skipped a vnode
1158	 * in a writeable object, wakeup the sync daemon.  And kick swapout
1159	 * if we did not get enough free pages.
1160	 */
1161	if (vm_paging_target() > 0) {
1162		if (vnodes_skipped && vm_page_count_min())
1163			(void) speedup_syncer();
1164#if !defined(NO_SWAPPING)
1165		if (vm_swap_enabled && vm_page_count_target())
1166			vm_req_vmdaemon(VM_SWAP_NORMAL);
1167#endif
1168	}
1169
1170	/*
1171	 * If we are critically low on one of RAM or swap and low on
1172	 * the other, kill the largest process.  However, we avoid
1173	 * doing this on the first pass in order to give ourselves a
1174	 * chance to flush out dirty vnode-backed pages and to allow
1175	 * active pages to be moved to the inactive queue and reclaimed.
1176	 */
1177	if (pass != 0 &&
1178	    ((swap_pager_avail < 64 && vm_page_count_min()) ||
1179	     (swap_pager_full && vm_paging_target() > 0)))
1180		vm_pageout_oom(VM_OOM_MEM);
1181}
1182
1183
1184void
1185vm_pageout_oom(int shortage)
1186{
1187	struct proc *p, *bigproc;
1188	vm_offset_t size, bigsize;
1189	struct thread *td;
1190	struct vmspace *vm;
1191
1192	/*
1193	 * We keep the process bigproc locked once we find it to keep anyone
1194	 * from messing with it; however, there is a possibility of
1195	 * deadlock if process B is bigproc and one of it's child processes
1196	 * attempts to propagate a signal to B while we are waiting for A's
1197	 * lock while walking this list.  To avoid this, we don't block on
1198	 * the process lock but just skip a process if it is already locked.
1199	 */
1200	bigproc = NULL;
1201	bigsize = 0;
1202	sx_slock(&allproc_lock);
1203	FOREACH_PROC_IN_SYSTEM(p) {
1204		int breakout;
1205
1206		if (PROC_TRYLOCK(p) == 0)
1207			continue;
1208		/*
1209		 * If this is a system or protected process, skip it.
1210		 */
1211		if ((p->p_flag & (P_INEXEC | P_PROTECTED | P_SYSTEM)) ||
1212		    (p->p_pid == 1) ||
1213		    ((p->p_pid < 48) && (swap_pager_avail != 0))) {
1214			PROC_UNLOCK(p);
1215			continue;
1216		}
1217		/*
1218		 * If the process is in a non-running type state,
1219		 * don't touch it.  Check all the threads individually.
1220		 */
1221		breakout = 0;
1222		FOREACH_THREAD_IN_PROC(p, td) {
1223			thread_lock(td);
1224			if (!TD_ON_RUNQ(td) &&
1225			    !TD_IS_RUNNING(td) &&
1226			    !TD_IS_SLEEPING(td)) {
1227				thread_unlock(td);
1228				breakout = 1;
1229				break;
1230			}
1231			thread_unlock(td);
1232		}
1233		if (breakout) {
1234			PROC_UNLOCK(p);
1235			continue;
1236		}
1237		/*
1238		 * get the process size
1239		 */
1240		vm = vmspace_acquire_ref(p);
1241		if (vm == NULL) {
1242			PROC_UNLOCK(p);
1243			continue;
1244		}
1245		if (!vm_map_trylock_read(&vm->vm_map)) {
1246			vmspace_free(vm);
1247			PROC_UNLOCK(p);
1248			continue;
1249		}
1250		size = vmspace_swap_count(vm);
1251		vm_map_unlock_read(&vm->vm_map);
1252		if (shortage == VM_OOM_MEM)
1253			size += vmspace_resident_count(vm);
1254		vmspace_free(vm);
1255		/*
1256		 * if the this process is bigger than the biggest one
1257		 * remember it.
1258		 */
1259		if (size > bigsize) {
1260			if (bigproc != NULL)
1261				PROC_UNLOCK(bigproc);
1262			bigproc = p;
1263			bigsize = size;
1264		} else
1265			PROC_UNLOCK(p);
1266	}
1267	sx_sunlock(&allproc_lock);
1268	if (bigproc != NULL) {
1269		killproc(bigproc, "out of swap space");
1270		sched_nice(bigproc, PRIO_MIN);
1271		PROC_UNLOCK(bigproc);
1272		wakeup(&cnt.v_free_count);
1273	}
1274}
1275
1276/*
1277 * This routine tries to maintain the pseudo LRU active queue,
1278 * so that during long periods of time where there is no paging,
1279 * that some statistic accumulation still occurs.  This code
1280 * helps the situation where paging just starts to occur.
1281 */
1282static void
1283vm_pageout_page_stats()
1284{
1285	vm_object_t object;
1286	vm_page_t m,next;
1287	int pcount,tpcount;		/* Number of pages to check */
1288	static int fullintervalcount = 0;
1289	int page_shortage;
1290
1291	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1292	page_shortage =
1293	    (cnt.v_inactive_target + cnt.v_cache_max + cnt.v_free_min) -
1294	    (cnt.v_free_count + cnt.v_inactive_count + cnt.v_cache_count);
1295
1296	if (page_shortage <= 0)
1297		return;
1298
1299	pcount = cnt.v_active_count;
1300	fullintervalcount += vm_pageout_stats_interval;
1301	if (fullintervalcount < vm_pageout_full_stats_interval) {
1302		tpcount = (int64_t)vm_pageout_stats_max * cnt.v_active_count /
1303		    cnt.v_page_count;
1304		if (pcount > tpcount)
1305			pcount = tpcount;
1306	} else {
1307		fullintervalcount = 0;
1308	}
1309
1310	m = TAILQ_FIRST(&vm_page_queues[PQ_ACTIVE].pl);
1311	while ((m != NULL) && (pcount-- > 0)) {
1312		int actcount;
1313
1314		KASSERT(VM_PAGE_INQUEUE2(m, PQ_ACTIVE),
1315		    ("vm_pageout_page_stats: page %p isn't active", m));
1316
1317		next = TAILQ_NEXT(m, pageq);
1318		object = m->object;
1319
1320		if ((m->flags & PG_MARKER) != 0) {
1321			m = next;
1322			continue;
1323		}
1324		if (!VM_OBJECT_TRYLOCK(object) &&
1325		    !vm_pageout_fallback_object_lock(m, &next)) {
1326			VM_OBJECT_UNLOCK(object);
1327			m = next;
1328			continue;
1329		}
1330
1331		/*
1332		 * Don't deactivate pages that are busy.
1333		 */
1334		if ((m->busy != 0) ||
1335		    (m->oflags & VPO_BUSY) ||
1336		    (m->hold_count != 0)) {
1337			VM_OBJECT_UNLOCK(object);
1338			vm_page_requeue(m);
1339			m = next;
1340			continue;
1341		}
1342
1343		actcount = 0;
1344		if (m->flags & PG_REFERENCED) {
1345			vm_page_flag_clear(m, PG_REFERENCED);
1346			actcount += 1;
1347		}
1348
1349		actcount += pmap_ts_referenced(m);
1350		if (actcount) {
1351			m->act_count += ACT_ADVANCE + actcount;
1352			if (m->act_count > ACT_MAX)
1353				m->act_count = ACT_MAX;
1354			vm_page_requeue(m);
1355		} else {
1356			if (m->act_count == 0) {
1357				/*
1358				 * We turn off page access, so that we have
1359				 * more accurate RSS stats.  We don't do this
1360				 * in the normal page deactivation when the
1361				 * system is loaded VM wise, because the
1362				 * cost of the large number of page protect
1363				 * operations would be higher than the value
1364				 * of doing the operation.
1365				 */
1366				pmap_remove_all(m);
1367				vm_page_deactivate(m);
1368			} else {
1369				m->act_count -= min(m->act_count, ACT_DECLINE);
1370				vm_page_requeue(m);
1371			}
1372		}
1373		VM_OBJECT_UNLOCK(object);
1374		m = next;
1375	}
1376}
1377
1378/*
1379 *	vm_pageout is the high level pageout daemon.
1380 */
1381static void
1382vm_pageout()
1383{
1384	int error, pass;
1385
1386	/*
1387	 * Initialize some paging parameters.
1388	 */
1389	cnt.v_interrupt_free_min = 2;
1390	if (cnt.v_page_count < 2000)
1391		vm_pageout_page_count = 8;
1392
1393	/*
1394	 * v_free_reserved needs to include enough for the largest
1395	 * swap pager structures plus enough for any pv_entry structs
1396	 * when paging.
1397	 */
1398	if (cnt.v_page_count > 1024)
1399		cnt.v_free_min = 4 + (cnt.v_page_count - 1024) / 200;
1400	else
1401		cnt.v_free_min = 4;
1402	cnt.v_pageout_free_min = (2*MAXBSIZE)/PAGE_SIZE +
1403	    cnt.v_interrupt_free_min;
1404	cnt.v_free_reserved = vm_pageout_page_count +
1405	    cnt.v_pageout_free_min + (cnt.v_page_count / 768);
1406	cnt.v_free_severe = cnt.v_free_min / 2;
1407	cnt.v_free_min += cnt.v_free_reserved;
1408	cnt.v_free_severe += cnt.v_free_reserved;
1409
1410	/*
1411	 * v_free_target and v_cache_min control pageout hysteresis.  Note
1412	 * that these are more a measure of the VM cache queue hysteresis
1413	 * then the VM free queue.  Specifically, v_free_target is the
1414	 * high water mark (free+cache pages).
1415	 *
1416	 * v_free_reserved + v_cache_min (mostly means v_cache_min) is the
1417	 * low water mark, while v_free_min is the stop.  v_cache_min must
1418	 * be big enough to handle memory needs while the pageout daemon
1419	 * is signalled and run to free more pages.
1420	 */
1421	if (cnt.v_free_count > 6144)
1422		cnt.v_free_target = 4 * cnt.v_free_min + cnt.v_free_reserved;
1423	else
1424		cnt.v_free_target = 2 * cnt.v_free_min + cnt.v_free_reserved;
1425
1426	if (cnt.v_free_count > 2048) {
1427		cnt.v_cache_min = cnt.v_free_target;
1428		cnt.v_cache_max = 2 * cnt.v_cache_min;
1429		cnt.v_inactive_target = (3 * cnt.v_free_target) / 2;
1430	} else {
1431		cnt.v_cache_min = 0;
1432		cnt.v_cache_max = 0;
1433		cnt.v_inactive_target = cnt.v_free_count / 4;
1434	}
1435	if (cnt.v_inactive_target > cnt.v_free_count / 3)
1436		cnt.v_inactive_target = cnt.v_free_count / 3;
1437
1438	/* XXX does not really belong here */
1439	if (vm_page_max_wired == 0)
1440		vm_page_max_wired = cnt.v_free_count / 3;
1441
1442	if (vm_pageout_stats_max == 0)
1443		vm_pageout_stats_max = cnt.v_free_target;
1444
1445	/*
1446	 * Set interval in seconds for stats scan.
1447	 */
1448	if (vm_pageout_stats_interval == 0)
1449		vm_pageout_stats_interval = 5;
1450	if (vm_pageout_full_stats_interval == 0)
1451		vm_pageout_full_stats_interval = vm_pageout_stats_interval * 4;
1452
1453	swap_pager_swap_init();
1454	pass = 0;
1455	/*
1456	 * The pageout daemon is never done, so loop forever.
1457	 */
1458	while (TRUE) {
1459		/*
1460		 * If we have enough free memory, wakeup waiters.  Do
1461		 * not clear vm_pages_needed until we reach our target,
1462		 * otherwise we may be woken up over and over again and
1463		 * waste a lot of cpu.
1464		 */
1465		mtx_lock(&vm_page_queue_free_mtx);
1466		if (vm_pages_needed && !vm_page_count_min()) {
1467			if (!vm_paging_needed())
1468				vm_pages_needed = 0;
1469			wakeup(&cnt.v_free_count);
1470		}
1471		if (vm_pages_needed) {
1472			/*
1473			 * Still not done, take a second pass without waiting
1474			 * (unlimited dirty cleaning), otherwise sleep a bit
1475			 * and try again.
1476			 */
1477			++pass;
1478			if (pass > 1)
1479				msleep(&vm_pages_needed,
1480				    &vm_page_queue_free_mtx, PVM, "psleep",
1481				    hz / 2);
1482		} else {
1483			/*
1484			 * Good enough, sleep & handle stats.  Prime the pass
1485			 * for the next run.
1486			 */
1487			if (pass > 1)
1488				pass = 1;
1489			else
1490				pass = 0;
1491			error = msleep(&vm_pages_needed,
1492			    &vm_page_queue_free_mtx, PVM, "psleep",
1493			    vm_pageout_stats_interval * hz);
1494			if (error && !vm_pages_needed) {
1495				mtx_unlock(&vm_page_queue_free_mtx);
1496				pass = 0;
1497				vm_page_lock_queues();
1498				vm_pageout_page_stats();
1499				vm_page_unlock_queues();
1500				continue;
1501			}
1502		}
1503		if (vm_pages_needed)
1504			cnt.v_pdwakeups++;
1505		mtx_unlock(&vm_page_queue_free_mtx);
1506		vm_pageout_scan(pass);
1507	}
1508}
1509
1510/*
1511 * Unless the free page queue lock is held by the caller, this function
1512 * should be regarded as advisory.  Specifically, the caller should
1513 * not msleep() on &cnt.v_free_count following this function unless
1514 * the free page queue lock is held until the msleep() is performed.
1515 */
1516void
1517pagedaemon_wakeup()
1518{
1519
1520	if (!vm_pages_needed && curthread->td_proc != pageproc) {
1521		vm_pages_needed = 1;
1522		wakeup(&vm_pages_needed);
1523	}
1524}
1525
1526#if !defined(NO_SWAPPING)
1527static void
1528vm_req_vmdaemon(int req)
1529{
1530	static int lastrun = 0;
1531
1532	mtx_lock(&vm_daemon_mtx);
1533	vm_pageout_req_swapout |= req;
1534	if ((ticks > (lastrun + hz)) || (ticks < lastrun)) {
1535		wakeup(&vm_daemon_needed);
1536		lastrun = ticks;
1537	}
1538	mtx_unlock(&vm_daemon_mtx);
1539}
1540
1541static void
1542vm_daemon()
1543{
1544	struct rlimit rsslim;
1545	struct proc *p;
1546	struct thread *td;
1547	struct vmspace *vm;
1548	int breakout, swapout_flags;
1549
1550	while (TRUE) {
1551		mtx_lock(&vm_daemon_mtx);
1552		msleep(&vm_daemon_needed, &vm_daemon_mtx, PPAUSE, "psleep", 0);
1553		swapout_flags = vm_pageout_req_swapout;
1554		vm_pageout_req_swapout = 0;
1555		mtx_unlock(&vm_daemon_mtx);
1556		if (swapout_flags)
1557			swapout_procs(swapout_flags);
1558
1559		/*
1560		 * scan the processes for exceeding their rlimits or if
1561		 * process is swapped out -- deactivate pages
1562		 */
1563		sx_slock(&allproc_lock);
1564		FOREACH_PROC_IN_SYSTEM(p) {
1565			vm_pindex_t limit, size;
1566
1567			/*
1568			 * if this is a system process or if we have already
1569			 * looked at this process, skip it.
1570			 */
1571			PROC_LOCK(p);
1572			if (p->p_flag & (P_INEXEC | P_SYSTEM | P_WEXIT)) {
1573				PROC_UNLOCK(p);
1574				continue;
1575			}
1576			/*
1577			 * if the process is in a non-running type state,
1578			 * don't touch it.
1579			 */
1580			breakout = 0;
1581			FOREACH_THREAD_IN_PROC(p, td) {
1582				thread_lock(td);
1583				if (!TD_ON_RUNQ(td) &&
1584				    !TD_IS_RUNNING(td) &&
1585				    !TD_IS_SLEEPING(td)) {
1586					thread_unlock(td);
1587					breakout = 1;
1588					break;
1589				}
1590				thread_unlock(td);
1591			}
1592			if (breakout) {
1593				PROC_UNLOCK(p);
1594				continue;
1595			}
1596			/*
1597			 * get a limit
1598			 */
1599			lim_rlimit(p, RLIMIT_RSS, &rsslim);
1600			limit = OFF_TO_IDX(
1601			    qmin(rsslim.rlim_cur, rsslim.rlim_max));
1602
1603			/*
1604			 * let processes that are swapped out really be
1605			 * swapped out set the limit to nothing (will force a
1606			 * swap-out.)
1607			 */
1608			if ((p->p_flag & P_INMEM) == 0)
1609				limit = 0;	/* XXX */
1610			vm = vmspace_acquire_ref(p);
1611			PROC_UNLOCK(p);
1612			if (vm == NULL)
1613				continue;
1614
1615			size = vmspace_resident_count(vm);
1616			if (limit >= 0 && size >= limit) {
1617				vm_pageout_map_deactivate_pages(
1618				    &vm->vm_map, limit);
1619			}
1620			vmspace_free(vm);
1621		}
1622		sx_sunlock(&allproc_lock);
1623	}
1624}
1625#endif			/* !defined(NO_SWAPPING) */
1626