vm_pageout.c revision 12110
1/*
2 * Copyright (c) 1991 Regents of the University of California.
3 * All rights reserved.
4 * Copyright (c) 1994 John S. Dyson
5 * All rights reserved.
6 * Copyright (c) 1994 David Greenman
7 * All rights reserved.
8 *
9 * This code is derived from software contributed to Berkeley by
10 * The Mach Operating System project at Carnegie-Mellon University.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 *    notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 *    notice, this list of conditions and the following disclaimer in the
19 *    documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 *    must display the following acknowledgement:
22 *	This product includes software developed by the University of
23 *	California, Berkeley and its contributors.
24 * 4. Neither the name of the University nor the names of its contributors
25 *    may be used to endorse or promote products derived from this software
26 *    without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * SUCH DAMAGE.
39 *
40 *	from: @(#)vm_pageout.c	7.4 (Berkeley) 5/7/91
41 *
42 *
43 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
44 * All rights reserved.
45 *
46 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
47 *
48 * Permission to use, copy, modify and distribute this software and
49 * its documentation is hereby granted, provided that both the copyright
50 * notice and this permission notice appear in all copies of the
51 * software, derivative works or modified versions, and any portions
52 * thereof, and that both notices appear in supporting documentation.
53 *
54 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
55 * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
56 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
57 *
58 * Carnegie Mellon requests users of this software to return to
59 *
60 *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
61 *  School of Computer Science
62 *  Carnegie Mellon University
63 *  Pittsburgh PA 15213-3890
64 *
65 * any improvements or extensions that they make and grant Carnegie the
66 * rights to redistribute these changes.
67 *
68 * $Id: vm_pageout.c,v 1.58 1995/10/23 05:35:48 dyson Exp $
69 */
70
71/*
72 *	The proverbial page-out daemon.
73 */
74
75#include <sys/param.h>
76#include <sys/systm.h>
77#include <sys/kernel.h>
78#include <sys/proc.h>
79#include <sys/resourcevar.h>
80#include <sys/malloc.h>
81#include <sys/kernel.h>
82#include <sys/signalvar.h>
83#include <sys/vnode.h>
84
85#include <vm/vm.h>
86#include <vm/vm_page.h>
87#include <vm/vm_pageout.h>
88#include <vm/vm_kern.h>
89#include <vm/vm_pager.h>
90#include <vm/swap_pager.h>
91
92/*
93 * System initialization
94 */
95
96/* the kernel process "vm_pageout"*/
97static void vm_pageout __P((void));
98struct proc *pageproc;
99
100static struct kproc_desc page_kp = {
101	"pagedaemon",
102	vm_pageout,
103	&pageproc
104};
105SYSINIT_KT(pagedaemon, SI_SUB_KTHREAD_PAGE, SI_ORDER_FIRST, kproc_start, &page_kp)
106
107/* the kernel process "vm_daemon"*/
108static void vm_daemon __P((void));
109struct	proc *vmproc;
110
111static struct kproc_desc vm_kp = {
112	"vmdaemon",
113	vm_daemon,
114	&vmproc
115};
116SYSINIT_KT(vmdaemon, SI_SUB_KTHREAD_VM, SI_ORDER_FIRST, kproc_start, &vm_kp)
117
118
119int vm_pages_needed;		/* Event on which pageout daemon sleeps */
120
121int vm_pageout_pages_needed;	/* flag saying that the pageout daemon needs pages */
122
123extern int npendingio;
124int vm_pageout_req_swapout;	/* XXX */
125int vm_daemon_needed;
126extern int nswiodone;
127extern int vm_swap_size;
128extern int vfs_update_wakeup;
129
130#define MAXSCAN 1024		/* maximum number of pages to scan in queues */
131
132#define MAXLAUNDER (cnt.v_page_count > 1800 ? 32 : 16)
133
134#define VM_PAGEOUT_PAGE_COUNT 8
135int vm_pageout_page_count = VM_PAGEOUT_PAGE_COUNT;
136
137int vm_page_max_wired;		/* XXX max # of wired pages system-wide */
138
139typedef int freeer_fcn_t __P((vm_map_t, vm_object_t, int, int));
140static void vm_pageout_map_deactivate_pages __P((vm_map_t, vm_map_entry_t,
141						 int *, freeer_fcn_t *));
142static freeer_fcn_t vm_pageout_object_deactivate_pages;
143static void vm_req_vmdaemon __P((void));
144
145/*
146 * vm_pageout_clean:
147 *
148 * Clean the page and remove it from the laundry.
149 *
150 * We set the busy bit to cause potential page faults on this page to
151 * block.
152 *
153 * And we set pageout-in-progress to keep the object from disappearing
154 * during pageout.  This guarantees that the page won't move from the
155 * inactive queue.  (However, any other page on the inactive queue may
156 * move!)
157 */
158int
159vm_pageout_clean(m, sync)
160	vm_page_t m;
161	int sync;
162{
163	register vm_object_t object;
164	int pageout_status[VM_PAGEOUT_PAGE_COUNT];
165	vm_page_t mc[2*VM_PAGEOUT_PAGE_COUNT];
166	int pageout_count;
167	int i, forward_okay, backward_okay, page_base;
168	vm_offset_t offset = m->offset;
169
170	object = m->object;
171
172	/*
173	 * If not OBJT_SWAP, additional memory may be needed to do the pageout.
174	 * Try to avoid the deadlock.
175	 */
176	if ((sync != VM_PAGEOUT_FORCE) &&
177	    (object->type != OBJT_SWAP) &&
178	    ((cnt.v_free_count + cnt.v_cache_count) < cnt.v_pageout_free_min))
179		return 0;
180
181	/*
182	 * Don't mess with the page if it's busy.
183	 */
184	if ((!sync && m->hold_count != 0) ||
185	    ((m->busy != 0) || (m->flags & PG_BUSY)))
186		return 0;
187
188	/*
189	 * Try collapsing before it's too late.
190	 */
191	if (!sync && object->backing_object) {
192		vm_object_collapse(object);
193	}
194	mc[VM_PAGEOUT_PAGE_COUNT] = m;
195	pageout_count = 1;
196	page_base = VM_PAGEOUT_PAGE_COUNT;
197	forward_okay = TRUE;
198	if (offset != 0)
199		backward_okay = TRUE;
200	else
201		backward_okay = FALSE;
202	/*
203	 * Scan object for clusterable pages.
204	 *
205	 * We can cluster ONLY if: ->> the page is NOT
206	 * clean, wired, busy, held, or mapped into a
207	 * buffer, and one of the following:
208	 * 1) The page is inactive, or a seldom used
209	 *    active page.
210	 * -or-
211	 * 2) we force the issue.
212	 */
213	for (i = 1; (i < vm_pageout_page_count) && (forward_okay || backward_okay); i++) {
214		vm_page_t p;
215
216		/*
217		 * See if forward page is clusterable.
218		 */
219		if (forward_okay) {
220			/*
221			 * Stop forward scan at end of object.
222			 */
223			if ((offset + i * PAGE_SIZE) > object->size) {
224				forward_okay = FALSE;
225				goto do_backward;
226			}
227			p = vm_page_lookup(object, offset + i * PAGE_SIZE);
228			if (p) {
229				if ((p->flags & (PG_BUSY|PG_CACHE)) || p->busy) {
230					forward_okay = FALSE;
231					goto do_backward;
232				}
233				vm_page_test_dirty(p);
234				if ((p->dirty & p->valid) != 0 &&
235				    ((p->flags & PG_INACTIVE) ||
236				     (sync == VM_PAGEOUT_FORCE)) &&
237				    (p->wire_count == 0) &&
238				    (p->hold_count == 0)) {
239					mc[VM_PAGEOUT_PAGE_COUNT + i] = p;
240					pageout_count++;
241					if (pageout_count == vm_pageout_page_count)
242						break;
243				} else {
244					forward_okay = FALSE;
245				}
246			} else {
247				forward_okay = FALSE;
248			}
249		}
250do_backward:
251		/*
252		 * See if backward page is clusterable.
253		 */
254		if (backward_okay) {
255			/*
256			 * Stop backward scan at beginning of object.
257			 */
258			if ((offset - i * PAGE_SIZE) == 0) {
259				backward_okay = FALSE;
260			}
261			p = vm_page_lookup(object, offset - i * PAGE_SIZE);
262			if (p) {
263				if ((p->flags & (PG_BUSY|PG_CACHE)) || p->busy) {
264					backward_okay = FALSE;
265					continue;
266				}
267				vm_page_test_dirty(p);
268				if ((p->dirty & p->valid) != 0 &&
269				    ((p->flags & PG_INACTIVE) ||
270				     (sync == VM_PAGEOUT_FORCE)) &&
271				    (p->wire_count == 0) &&
272				    (p->hold_count == 0)) {
273					mc[VM_PAGEOUT_PAGE_COUNT - i] = p;
274					pageout_count++;
275					page_base--;
276					if (pageout_count == vm_pageout_page_count)
277						break;
278				} else {
279					backward_okay = FALSE;
280				}
281			} else {
282				backward_okay = FALSE;
283			}
284		}
285	}
286
287	/*
288	 * we allow reads during pageouts...
289	 */
290	for (i = page_base; i < (page_base + pageout_count); i++) {
291		mc[i]->flags |= PG_BUSY;
292		vm_page_protect(mc[i], VM_PROT_READ);
293	}
294
295	return vm_pageout_flush(&mc[page_base], pageout_count, sync);
296}
297
298int
299vm_pageout_flush(mc, count, sync)
300	vm_page_t *mc;
301	int count;
302	int sync;
303{
304	register vm_object_t object;
305	int pageout_status[count];
306	int anyok = 0;
307	int i;
308
309	object = mc[0]->object;
310	object->paging_in_progress += count;
311
312	vm_pager_put_pages(object, mc, count,
313	    ((sync || (object == kernel_object)) ? TRUE : FALSE),
314	    pageout_status);
315
316
317	for (i = 0; i < count; i++) {
318		vm_page_t mt = mc[i];
319
320		switch (pageout_status[i]) {
321		case VM_PAGER_OK:
322			++anyok;
323			break;
324		case VM_PAGER_PEND:
325			++anyok;
326			break;
327		case VM_PAGER_BAD:
328			/*
329			 * Page outside of range of object. Right now we
330			 * essentially lose the changes by pretending it
331			 * worked.
332			 */
333			pmap_clear_modify(VM_PAGE_TO_PHYS(mt));
334			mt->dirty = 0;
335			break;
336		case VM_PAGER_ERROR:
337		case VM_PAGER_FAIL:
338			/*
339			 * If page couldn't be paged out, then reactivate the
340			 * page so it doesn't clog the inactive list.  (We
341			 * will try paging out it again later).
342			 */
343			if (mt->flags & PG_INACTIVE)
344				vm_page_activate(mt);
345			break;
346		case VM_PAGER_AGAIN:
347			break;
348		}
349
350
351		/*
352		 * If the operation is still going, leave the page busy to
353		 * block all other accesses. Also, leave the paging in
354		 * progress indicator set so that we don't attempt an object
355		 * collapse.
356		 */
357		if (pageout_status[i] != VM_PAGER_PEND) {
358			vm_object_pip_wakeup(object);
359			if ((mt->flags & (PG_REFERENCED|PG_WANTED)) ||
360			    pmap_is_referenced(VM_PAGE_TO_PHYS(mt))) {
361				pmap_clear_reference(VM_PAGE_TO_PHYS(mt));
362				mt->flags &= ~PG_REFERENCED;
363				if (mt->flags & PG_INACTIVE)
364					vm_page_activate(mt);
365			}
366			PAGE_WAKEUP(mt);
367		}
368	}
369	return anyok;
370}
371
372/*
373 *	vm_pageout_object_deactivate_pages
374 *
375 *	deactivate enough pages to satisfy the inactive target
376 *	requirements or if vm_page_proc_limit is set, then
377 *	deactivate all of the pages in the object and its
378 *	backing_objects.
379 *
380 *	The object and map must be locked.
381 */
382static int
383vm_pageout_object_deactivate_pages(map, object, count, map_remove_only)
384	vm_map_t map;
385	vm_object_t object;
386	int count;
387	int map_remove_only;
388{
389	register vm_page_t p, next;
390	int rcount;
391	int dcount;
392
393	dcount = 0;
394	if (count == 0)
395		count = 1;
396
397	if (object->type == OBJT_DEVICE)
398		return 0;
399
400	if (object->backing_object) {
401		if (object->backing_object->ref_count == 1)
402			dcount += vm_pageout_object_deactivate_pages(map,
403			    object->backing_object, count / 2 + 1, map_remove_only);
404		else
405			vm_pageout_object_deactivate_pages(map,
406			    object->backing_object, count, 1);
407	}
408	if (object->paging_in_progress)
409		return dcount;
410
411	/*
412	 * scan the objects entire memory queue
413	 */
414	rcount = object->resident_page_count;
415	p = object->memq.tqh_first;
416	while (p && (rcount-- > 0)) {
417		next = p->listq.tqe_next;
418		cnt.v_pdpages++;
419		if (p->wire_count != 0 ||
420		    p->hold_count != 0 ||
421		    p->busy != 0 ||
422		    !pmap_page_exists(vm_map_pmap(map), VM_PAGE_TO_PHYS(p))) {
423			p = next;
424			continue;
425		}
426		/*
427		 * if a page is active, not wired and is in the processes
428		 * pmap, then deactivate the page.
429		 */
430		if ((p->flags & (PG_ACTIVE | PG_BUSY)) == PG_ACTIVE) {
431			if (!pmap_is_referenced(VM_PAGE_TO_PHYS(p)) &&
432			    (p->flags & (PG_REFERENCED|PG_WANTED)) == 0) {
433				p->act_count -= min(p->act_count, ACT_DECLINE);
434				/*
435				 * if the page act_count is zero -- then we
436				 * deactivate
437				 */
438				if (!p->act_count) {
439					if (!map_remove_only)
440						vm_page_deactivate(p);
441					vm_page_protect(p, VM_PROT_NONE);
442					/*
443					 * else if on the next go-around we
444					 * will deactivate the page we need to
445					 * place the page on the end of the
446					 * queue to age the other pages in
447					 * memory.
448					 */
449				} else {
450					TAILQ_REMOVE(&vm_page_queue_active, p, pageq);
451					TAILQ_INSERT_TAIL(&vm_page_queue_active, p, pageq);
452				}
453				/*
454				 * see if we are done yet
455				 */
456				if (p->flags & PG_INACTIVE) {
457					--count;
458					++dcount;
459					if (count <= 0 &&
460					    cnt.v_inactive_count > cnt.v_inactive_target) {
461						return dcount;
462					}
463				}
464			} else {
465				/*
466				 * Move the page to the bottom of the queue.
467				 */
468				pmap_clear_reference(VM_PAGE_TO_PHYS(p));
469				p->flags &= ~PG_REFERENCED;
470				if (p->act_count < ACT_MAX)
471					p->act_count += ACT_ADVANCE;
472
473				TAILQ_REMOVE(&vm_page_queue_active, p, pageq);
474				TAILQ_INSERT_TAIL(&vm_page_queue_active, p, pageq);
475			}
476		} else if ((p->flags & (PG_INACTIVE | PG_BUSY)) == PG_INACTIVE) {
477			vm_page_protect(p, VM_PROT_NONE);
478		}
479		p = next;
480	}
481	return dcount;
482}
483
484
485/*
486 * deactivate some number of pages in a map, try to do it fairly, but
487 * that is really hard to do.
488 */
489
490static void
491vm_pageout_map_deactivate_pages(map, entry, count, freeer)
492	vm_map_t map;
493	vm_map_entry_t entry;
494	int *count;
495	freeer_fcn_t *freeer;
496{
497	vm_map_t tmpm;
498	vm_map_entry_t tmpe;
499	vm_object_t obj;
500
501	if (*count <= 0)
502		return;
503	vm_map_reference(map);
504	if (!lock_try_read(&map->lock)) {
505		vm_map_deallocate(map);
506		return;
507	}
508	if (entry == 0) {
509		tmpe = map->header.next;
510		while (tmpe != &map->header && *count > 0) {
511			vm_pageout_map_deactivate_pages(map, tmpe, count, freeer);
512			tmpe = tmpe->next;
513		};
514	} else if (entry->is_sub_map || entry->is_a_map) {
515		tmpm = entry->object.share_map;
516		tmpe = tmpm->header.next;
517		while (tmpe != &tmpm->header && *count > 0) {
518			vm_pageout_map_deactivate_pages(tmpm, tmpe, count, freeer);
519			tmpe = tmpe->next;
520		};
521	} else if ((obj = entry->object.vm_object) != 0) {
522		*count -= (*freeer) (map, obj, *count, TRUE);
523	}
524	lock_read_done(&map->lock);
525	vm_map_deallocate(map);
526	return;
527}
528
529static void
530vm_req_vmdaemon()
531{
532	static int lastrun = 0;
533
534	if ((ticks > (lastrun + hz / 10)) || (ticks < lastrun)) {
535		wakeup(&vm_daemon_needed);
536		lastrun = ticks;
537	}
538}
539
540/*
541 *	vm_pageout_scan does the dirty work for the pageout daemon.
542 */
543int
544vm_pageout_scan()
545{
546	vm_page_t m;
547	int page_shortage, maxscan, maxlaunder, pcount;
548	int pages_freed;
549	vm_page_t next;
550	struct proc *p, *bigproc;
551	vm_offset_t size, bigsize;
552	vm_object_t object;
553	int force_wakeup = 0;
554	int vnodes_skipped = 0;
555
556	pages_freed = 0;
557
558	/*
559	 * Start scanning the inactive queue for pages we can free. We keep
560	 * scanning until we have enough free pages or we have scanned through
561	 * the entire queue.  If we encounter dirty pages, we start cleaning
562	 * them.
563	 */
564
565	maxlaunder = (cnt.v_inactive_target > MAXLAUNDER) ?
566	    MAXLAUNDER : cnt.v_inactive_target;
567
568rescan1:
569	maxscan = cnt.v_inactive_count;
570	m = vm_page_queue_inactive.tqh_first;
571	while ((m != NULL) && (maxscan-- > 0) &&
572	    ((cnt.v_cache_count + cnt.v_free_count) < (cnt.v_cache_min + cnt.v_free_target))) {
573		vm_page_t next;
574
575		cnt.v_pdpages++;
576		next = m->pageq.tqe_next;
577
578#if defined(VM_DIAGNOSE)
579		if ((m->flags & PG_INACTIVE) == 0) {
580			printf("vm_pageout_scan: page not inactive?\n");
581			break;
582		}
583#endif
584
585		/*
586		 * dont mess with busy pages
587		 */
588		if (m->hold_count || m->busy || (m->flags & PG_BUSY)) {
589			TAILQ_REMOVE(&vm_page_queue_inactive, m, pageq);
590			TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq);
591			m = next;
592			continue;
593		}
594		if (((m->flags & PG_REFERENCED) == 0) &&
595		    pmap_is_referenced(VM_PAGE_TO_PHYS(m))) {
596			m->flags |= PG_REFERENCED;
597		}
598		if (m->object->ref_count == 0) {
599			m->flags &= ~PG_REFERENCED;
600			pmap_clear_reference(VM_PAGE_TO_PHYS(m));
601		}
602		if ((m->flags & (PG_REFERENCED|PG_WANTED)) != 0) {
603			m->flags &= ~PG_REFERENCED;
604			pmap_clear_reference(VM_PAGE_TO_PHYS(m));
605			vm_page_activate(m);
606			if (m->act_count < ACT_MAX)
607				m->act_count += ACT_ADVANCE;
608			m = next;
609			continue;
610		}
611
612		vm_page_test_dirty(m);
613		if (m->dirty == 0) {
614			if (m->bmapped == 0) {
615				if (m->valid == 0) {
616					pmap_page_protect(VM_PAGE_TO_PHYS(m), VM_PROT_NONE);
617					vm_page_free(m);
618					cnt.v_dfree++;
619				} else {
620					vm_page_cache(m);
621				}
622				++pages_freed;
623			} else {
624				m = next;
625				continue;
626			}
627		} else if (maxlaunder > 0) {
628			int written;
629			struct vnode *vp = NULL;
630
631			object = m->object;
632			if (object->flags & OBJ_DEAD) {
633				m = next;
634				continue;
635			}
636
637			if (object->type == OBJT_VNODE) {
638				vp = object->handle;
639				if (VOP_ISLOCKED(vp) || vget(vp, 1)) {
640					if (object->flags & OBJ_MIGHTBEDIRTY)
641						++vnodes_skipped;
642					m = next;
643					continue;
644				}
645			}
646
647			/*
648			 * If a page is dirty, then it is either being washed
649			 * (but not yet cleaned) or it is still in the
650			 * laundry.  If it is still in the laundry, then we
651			 * start the cleaning operation.
652			 */
653			written = vm_pageout_clean(m, 0);
654
655			if (vp)
656				vput(vp);
657
658			if (!next) {
659				break;
660			}
661			maxlaunder -= written;
662			/*
663			 * if the next page has been re-activated, start
664			 * scanning again
665			 */
666			if ((next->flags & PG_INACTIVE) == 0) {
667				goto rescan1;
668			}
669		}
670		m = next;
671	}
672
673	/*
674	 * Compute the page shortage.  If we are still very low on memory be
675	 * sure that we will move a minimal amount of pages from active to
676	 * inactive.
677	 */
678
679	page_shortage = cnt.v_inactive_target -
680	    (cnt.v_free_count + cnt.v_inactive_count + cnt.v_cache_count);
681	if (page_shortage <= 0) {
682		if (pages_freed == 0) {
683			page_shortage = cnt.v_free_min - cnt.v_free_count;
684		} else {
685			page_shortage = 1;
686		}
687	}
688	maxscan = MAXSCAN;
689	pcount = cnt.v_active_count;
690	m = vm_page_queue_active.tqh_first;
691	while ((m != NULL) && (maxscan > 0) && (pcount-- > 0) && (page_shortage > 0)) {
692
693		cnt.v_pdpages++;
694		next = m->pageq.tqe_next;
695
696		/*
697		 * Don't deactivate pages that are busy.
698		 */
699		if ((m->busy != 0) ||
700		    (m->flags & PG_BUSY) ||
701		    (m->hold_count != 0)) {
702			TAILQ_REMOVE(&vm_page_queue_active, m, pageq);
703			TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq);
704			m = next;
705			continue;
706		}
707		if (m->object->ref_count && ((m->flags & (PG_REFERENCED|PG_WANTED)) ||
708			pmap_is_referenced(VM_PAGE_TO_PHYS(m)))) {
709			int s;
710
711			pmap_clear_reference(VM_PAGE_TO_PHYS(m));
712			m->flags &= ~PG_REFERENCED;
713			if (m->act_count < ACT_MAX) {
714				m->act_count += ACT_ADVANCE;
715			}
716			TAILQ_REMOVE(&vm_page_queue_active, m, pageq);
717			TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq);
718		} else {
719			m->flags &= ~PG_REFERENCED;
720			pmap_clear_reference(VM_PAGE_TO_PHYS(m));
721			m->act_count -= min(m->act_count, ACT_DECLINE);
722
723			/*
724			 * if the page act_count is zero -- then we deactivate
725			 */
726			if (!m->act_count && (page_shortage > 0)) {
727				if (m->object->ref_count == 0) {
728					--page_shortage;
729					vm_page_test_dirty(m);
730					if ((m->bmapped == 0) && (m->dirty == 0) ) {
731						m->act_count = 0;
732						vm_page_cache(m);
733					} else {
734						vm_page_deactivate(m);
735					}
736				} else {
737					vm_page_deactivate(m);
738					--page_shortage;
739				}
740			} else if (m->act_count) {
741				TAILQ_REMOVE(&vm_page_queue_active, m, pageq);
742				TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq);
743			}
744		}
745		maxscan--;
746		m = next;
747	}
748
749	/*
750	 * We try to maintain some *really* free pages, this allows interrupt
751	 * code to be guaranteed space.
752	 */
753	while (cnt.v_free_count < cnt.v_free_reserved) {
754		m = vm_page_queue_cache.tqh_first;
755		if (!m)
756			break;
757		vm_page_free(m);
758		cnt.v_dfree++;
759	}
760
761	/*
762	 * If we didn't get enough free pages, and we have skipped a vnode
763	 * in a writeable object, wakeup the sync daemon.  And kick swapout
764	 * if we did not get enough free pages.
765	 */
766	if ((cnt.v_cache_count + cnt.v_free_count) < cnt.v_free_target) {
767		if (vnodes_skipped &&
768		    (cnt.v_cache_count + cnt.v_free_count) < cnt.v_free_min) {
769			if (!vfs_update_wakeup) {
770				vfs_update_wakeup = 1;
771				wakeup(&vfs_update_wakeup);
772			}
773		}
774		/*
775		 * now swap processes out if we are in low memory conditions
776		 */
777		if (!swap_pager_full && vm_swap_size &&
778			vm_pageout_req_swapout == 0) {
779			vm_pageout_req_swapout = 1;
780			vm_req_vmdaemon();
781		}
782	}
783
784	if ((cnt.v_inactive_count + cnt.v_free_count + cnt.v_cache_count) <
785	    (cnt.v_inactive_target + cnt.v_free_min)) {
786		vm_req_vmdaemon();
787	}
788
789	/*
790	 * make sure that we have swap space -- if we are low on memory and
791	 * swap -- then kill the biggest process.
792	 */
793	if ((vm_swap_size == 0 || swap_pager_full) &&
794	    ((cnt.v_free_count + cnt.v_cache_count) < cnt.v_free_min)) {
795		bigproc = NULL;
796		bigsize = 0;
797		for (p = (struct proc *) allproc; p != NULL; p = p->p_next) {
798			/*
799			 * if this is a system process, skip it
800			 */
801			if ((p->p_flag & P_SYSTEM) || (p->p_pid == 1) ||
802			    ((p->p_pid < 48) && (vm_swap_size != 0))) {
803				continue;
804			}
805			/*
806			 * if the process is in a non-running type state,
807			 * don't touch it.
808			 */
809			if (p->p_stat != SRUN && p->p_stat != SSLEEP) {
810				continue;
811			}
812			/*
813			 * get the process size
814			 */
815			size = p->p_vmspace->vm_pmap.pm_stats.resident_count;
816			/*
817			 * if the this process is bigger than the biggest one
818			 * remember it.
819			 */
820			if (size > bigsize) {
821				bigproc = p;
822				bigsize = size;
823			}
824		}
825		if (bigproc != NULL) {
826			printf("Process %lu killed by vm_pageout -- out of swap\n", (u_long) bigproc->p_pid);
827			psignal(bigproc, SIGKILL);
828			bigproc->p_estcpu = 0;
829			bigproc->p_nice = PRIO_MIN;
830			resetpriority(bigproc);
831			wakeup(&cnt.v_free_count);
832		}
833	}
834	return force_wakeup;
835}
836
837/*
838 *	vm_pageout is the high level pageout daemon.
839 */
840static void
841vm_pageout()
842{
843	(void) spl0();
844
845	/*
846	 * Initialize some paging parameters.
847	 */
848
849	cnt.v_interrupt_free_min = 2;
850
851	if (cnt.v_page_count > 1024)
852		cnt.v_free_min = 4 + (cnt.v_page_count - 1024) / 200;
853	else
854		cnt.v_free_min = 4;
855	/*
856	 * free_reserved needs to include enough for the largest swap pager
857	 * structures plus enough for any pv_entry structs when paging.
858	 */
859	cnt.v_pageout_free_min = 6 + cnt.v_page_count / 1024 +
860				cnt.v_interrupt_free_min;
861	cnt.v_free_reserved = cnt.v_pageout_free_min + 6;
862	cnt.v_free_target = 3 * cnt.v_free_min + cnt.v_free_reserved;
863	cnt.v_free_min += cnt.v_free_reserved;
864
865	if (cnt.v_page_count > 1024) {
866		cnt.v_cache_max = (cnt.v_free_count - 1024) / 2;
867		cnt.v_cache_min = (cnt.v_free_count - 1024) / 8;
868		cnt.v_inactive_target = 2*cnt.v_cache_min + 192;
869	} else {
870		cnt.v_cache_min = 0;
871		cnt.v_cache_max = 0;
872		cnt.v_inactive_target = cnt.v_free_count / 4;
873	}
874
875	/* XXX does not really belong here */
876	if (vm_page_max_wired == 0)
877		vm_page_max_wired = cnt.v_free_count / 3;
878
879
880	swap_pager_swap_init();
881	/*
882	 * The pageout daemon is never done, so loop forever.
883	 */
884	while (TRUE) {
885		int s = splhigh();
886
887		if (!vm_pages_needed ||
888			((cnt.v_free_count >= cnt.v_free_reserved) &&
889			 (cnt.v_free_count + cnt.v_cache_count >= cnt.v_free_min))) {
890			vm_pages_needed = 0;
891			tsleep(&vm_pages_needed, PVM, "psleep", 0);
892		}
893		vm_pages_needed = 0;
894		splx(s);
895		cnt.v_pdwakeups++;
896		vm_pager_sync();
897		vm_pageout_scan();
898		vm_pager_sync();
899		wakeup(&cnt.v_free_count);
900		wakeup(kmem_map);
901	}
902}
903
904static void
905vm_daemon()
906{
907	vm_object_t object;
908	struct proc *p;
909
910	while (TRUE) {
911		tsleep(&vm_daemon_needed, PUSER, "psleep", 0);
912		if (vm_pageout_req_swapout) {
913			swapout_procs();
914			vm_pageout_req_swapout = 0;
915		}
916		/*
917		 * scan the processes for exceeding their rlimits or if
918		 * process is swapped out -- deactivate pages
919		 */
920
921		for (p = (struct proc *) allproc; p != NULL; p = p->p_next) {
922			int overage;
923			quad_t limit;
924			vm_offset_t size;
925
926			/*
927			 * if this is a system process or if we have already
928			 * looked at this process, skip it.
929			 */
930			if (p->p_flag & (P_SYSTEM | P_WEXIT)) {
931				continue;
932			}
933			/*
934			 * if the process is in a non-running type state,
935			 * don't touch it.
936			 */
937			if (p->p_stat != SRUN && p->p_stat != SSLEEP) {
938				continue;
939			}
940			/*
941			 * get a limit
942			 */
943			limit = qmin(p->p_rlimit[RLIMIT_RSS].rlim_cur,
944			    p->p_rlimit[RLIMIT_RSS].rlim_max);
945
946			/*
947			 * let processes that are swapped out really be
948			 * swapped out set the limit to nothing (will force a
949			 * swap-out.)
950			 */
951			if ((p->p_flag & P_INMEM) == 0)
952				limit = 0;	/* XXX */
953
954			size = p->p_vmspace->vm_pmap.pm_stats.resident_count * PAGE_SIZE;
955			if (limit >= 0 && size >= limit) {
956				overage = (size - limit) >> PAGE_SHIFT;
957				vm_pageout_map_deactivate_pages(&p->p_vmspace->vm_map,
958				    (vm_map_entry_t) 0, &overage, vm_pageout_object_deactivate_pages);
959			}
960		}
961
962		/*
963		 * we remove cached objects that have no RSS...
964		 */
965restart:
966		object = vm_object_cached_list.tqh_first;
967		while (object) {
968			/*
969			 * if there are no resident pages -- get rid of the object
970			 */
971			if (object->resident_page_count == 0) {
972				vm_object_reference(object);
973				pager_cache(object, FALSE);
974				goto restart;
975			}
976			object = object->cached_list.tqe_next;
977		}
978	}
979}
980