vm_pageout.c revision 5464
1/*
2 * Copyright (c) 1991 Regents of the University of California.
3 * All rights reserved.
4 * Copyright (c) 1994 John S. Dyson
5 * All rights reserved.
6 * Copyright (c) 1994 David Greenman
7 * All rights reserved.
8 *
9 * This code is derived from software contributed to Berkeley by
10 * The Mach Operating System project at Carnegie-Mellon University.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 *    notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 *    notice, this list of conditions and the following disclaimer in the
19 *    documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 *    must display the following acknowledgement:
22 *	This product includes software developed by the University of
23 *	California, Berkeley and its contributors.
24 * 4. Neither the name of the University nor the names of its contributors
25 *    may be used to endorse or promote products derived from this software
26 *    without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * SUCH DAMAGE.
39 *
40 *	from: @(#)vm_pageout.c	7.4 (Berkeley) 5/7/91
41 *
42 *
43 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
44 * All rights reserved.
45 *
46 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
47 *
48 * Permission to use, copy, modify and distribute this software and
49 * its documentation is hereby granted, provided that both the copyright
50 * notice and this permission notice appear in all copies of the
51 * software, derivative works or modified versions, and any portions
52 * thereof, and that both notices appear in supporting documentation.
53 *
54 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
55 * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
56 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
57 *
58 * Carnegie Mellon requests users of this software to return to
59 *
60 *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
61 *  School of Computer Science
62 *  Carnegie Mellon University
63 *  Pittsburgh PA 15213-3890
64 *
65 * any improvements or extensions that they make and grant Carnegie the
66 * rights to redistribute these changes.
67 *
68 * $Id: vm_pageout.c,v 1.29 1995/01/09 16:05:53 davidg Exp $
69 */
70
71/*
72 *	The proverbial page-out daemon.
73 */
74
75#include <sys/param.h>
76#include <sys/systm.h>
77#include <sys/proc.h>
78#include <sys/resourcevar.h>
79#include <sys/malloc.h>
80#include <sys/kernel.h>
81
82#include <vm/vm.h>
83#include <vm/vm_page.h>
84#include <vm/vm_pageout.h>
85#include <vm/swap_pager.h>
86
87extern vm_map_t kmem_map;
88int vm_pages_needed;		/* Event on which pageout daemon sleeps */
89int vm_pagescanner;		/* Event on which pagescanner sleeps */
90
91int vm_pageout_pages_needed = 0;/* flag saying that the pageout daemon needs pages */
92int vm_page_pagesfreed;
93
94extern int npendingio;
95int vm_pageout_proc_limit;
96int vm_pageout_req_swapout;
97int vm_daemon_needed;
98extern int nswiodone;
99extern int swap_pager_full;
100extern int vm_swap_size;
101extern int swap_pager_ready();
102
103#define MAXREF 32767
104
105#define MAXSCAN 512		/* maximum number of pages to scan in active queue */
106#define ACT_DECLINE	1
107#define ACT_ADVANCE	3
108#define ACT_MAX		100
109#define MAXISCAN 256
110#define MINTOFREE 6
111#define MINFREE 2
112
113#define MAXLAUNDER (cnt.v_page_count > 1800 ? 32 : 16)
114
115#define VM_PAGEOUT_PAGE_COUNT 8
116int vm_pageout_page_count = VM_PAGEOUT_PAGE_COUNT;
117int vm_pageout_req_do_stats;
118
119int vm_page_max_wired = 0;	/* XXX max # of wired pages system-wide */
120
121/*
122 * vm_pageout_clean:
123 * 	cleans a vm_page
124 */
125int
126vm_pageout_clean(m, sync)
127	register vm_page_t m;
128	int sync;
129{
130	/*
131	 * Clean the page and remove it from the laundry.
132	 *
133	 * We set the busy bit to cause potential page faults on this page to
134	 * block.
135	 *
136	 * And we set pageout-in-progress to keep the object from disappearing
137	 * during pageout.  This guarantees that the page won't move from the
138	 * inactive queue.  (However, any other page on the inactive queue may
139	 * move!)
140	 */
141
142	register vm_object_t object;
143	register vm_pager_t pager;
144	int pageout_status[VM_PAGEOUT_PAGE_COUNT];
145	vm_page_t ms[VM_PAGEOUT_PAGE_COUNT];
146	int pageout_count;
147	int anyok = 0;
148	int i;
149	vm_offset_t offset = m->offset;
150
151	object = m->object;
152	if (!object) {
153		printf("pager: object missing\n");
154		return 0;
155	}
156	if (!object->pager && (object->flags & OBJ_INTERNAL) == 0) {
157		printf("pager: non internal obj without pager\n");
158	}
159	/*
160	 * Try to collapse the object before making a pager for it.  We must
161	 * unlock the page queues first. We try to defer the creation of a
162	 * pager until all shadows are not paging.  This allows
163	 * vm_object_collapse to work better and helps control swap space
164	 * size. (J. Dyson 11 Nov 93)
165	 */
166
167	if (!object->pager &&
168	    (cnt.v_free_count + cnt.v_cache_count) < cnt.v_pageout_free_min)
169		return 0;
170
171	if ((!sync && m->bmapped != 0 && m->hold_count != 0) ||
172	    ((m->busy != 0) || (m->flags & PG_BUSY)))
173		return 0;
174
175	if (!sync && object->shadow) {
176		vm_object_collapse(object);
177	}
178	pageout_count = 1;
179	ms[0] = m;
180
181	pager = object->pager;
182	if (pager) {
183		for (i = 1; i < vm_pageout_page_count; i++) {
184			vm_page_t mt;
185
186			ms[i] = mt = vm_page_lookup(object, offset + i * NBPG);
187			if (mt) {
188				vm_page_test_dirty(mt);
189				/*
190				 * we can cluster ONLY if: ->> the page is NOT
191				 * busy, and is NOT clean the page is not
192				 * wired, busy, held, or mapped into a buffer.
193				 * and one of the following: 1) The page is
194				 * inactive, or a seldom used active page. 2)
195				 * or we force the issue.
196				 */
197				if ((mt->dirty & mt->valid) != 0
198				    && (((mt->flags & (PG_BUSY | PG_INACTIVE)) == PG_INACTIVE)
199					|| sync == VM_PAGEOUT_FORCE)
200				    && (mt->wire_count == 0)
201				    && (mt->busy == 0)
202				    && (mt->hold_count == 0)
203				    && (mt->bmapped == 0))
204					pageout_count++;
205				else
206					break;
207			} else
208				break;
209		}
210		/*
211		 * we allow reads during pageouts...
212		 */
213		for (i = 0; i < pageout_count; i++) {
214			ms[i]->flags |= PG_BUSY;
215			pmap_page_protect(VM_PAGE_TO_PHYS(ms[i]), VM_PROT_READ);
216		}
217		object->paging_in_progress += pageout_count;
218	} else {
219
220		m->flags |= PG_BUSY;
221
222		pmap_page_protect(VM_PAGE_TO_PHYS(m), VM_PROT_READ);
223
224		object->paging_in_progress++;
225
226		pager = vm_pager_allocate(PG_DFLT, (caddr_t) 0,
227		    object->size, VM_PROT_ALL, 0);
228		if (pager != NULL) {
229			vm_object_setpager(object, pager, 0, FALSE);
230		}
231	}
232
233	/*
234	 * If there is no pager for the page, use the default pager.  If
235	 * there's no place to put the page at the moment, leave it in the
236	 * laundry and hope that there will be paging space later.
237	 */
238
239	if ((pager && pager->pg_type == PG_SWAP) ||
240	    (cnt.v_free_count + cnt.v_cache_count) >= cnt.v_pageout_free_min) {
241		if (pageout_count == 1) {
242			pageout_status[0] = pager ?
243			    vm_pager_put(pager, m,
244			    ((sync || (object == kernel_object)) ? TRUE : FALSE)) :
245			    VM_PAGER_FAIL;
246		} else {
247			if (!pager) {
248				for (i = 0; i < pageout_count; i++)
249					pageout_status[i] = VM_PAGER_FAIL;
250			} else {
251				vm_pager_put_pages(pager, ms, pageout_count,
252				    ((sync || (object == kernel_object)) ? TRUE : FALSE),
253				    pageout_status);
254			}
255		}
256	} else {
257		for (i = 0; i < pageout_count; i++)
258			pageout_status[i] = VM_PAGER_FAIL;
259	}
260
261	for (i = 0; i < pageout_count; i++) {
262		switch (pageout_status[i]) {
263		case VM_PAGER_OK:
264			++anyok;
265			break;
266		case VM_PAGER_PEND:
267			++anyok;
268			break;
269		case VM_PAGER_BAD:
270			/*
271			 * Page outside of range of object. Right now we
272			 * essentially lose the changes by pretending it
273			 * worked.
274			 */
275			pmap_clear_modify(VM_PAGE_TO_PHYS(ms[i]));
276			ms[i]->dirty = 0;
277			break;
278		case VM_PAGER_ERROR:
279		case VM_PAGER_FAIL:
280			/*
281			 * If page couldn't be paged out, then reactivate the
282			 * page so it doesn't clog the inactive list.  (We
283			 * will try paging out it again later).
284			 */
285			if (ms[i]->flags & PG_INACTIVE)
286				vm_page_activate(ms[i]);
287			break;
288		case VM_PAGER_AGAIN:
289			break;
290		}
291
292
293		/*
294		 * If the operation is still going, leave the page busy to
295		 * block all other accesses. Also, leave the paging in
296		 * progress indicator set so that we don't attempt an object
297		 * collapse.
298		 */
299		if (pageout_status[i] != VM_PAGER_PEND) {
300			PAGE_WAKEUP(ms[i]);
301			if (--object->paging_in_progress == 0)
302				wakeup((caddr_t) object);
303			if ((ms[i]->flags & PG_REFERENCED) ||
304			    pmap_is_referenced(VM_PAGE_TO_PHYS(ms[i]))) {
305				pmap_clear_reference(VM_PAGE_TO_PHYS(ms[i]));
306				ms[i]->flags &= ~PG_REFERENCED;
307				if (ms[i]->flags & PG_INACTIVE)
308					vm_page_activate(ms[i]);
309			}
310		}
311	}
312	return anyok;
313}
314
315/*
316 *	vm_pageout_object_deactivate_pages
317 *
318 *	deactivate enough pages to satisfy the inactive target
319 *	requirements or if vm_page_proc_limit is set, then
320 *	deactivate all of the pages in the object and its
321 *	shadows.
322 *
323 *	The object and map must be locked.
324 */
325int
326vm_pageout_object_deactivate_pages(map, object, count, map_remove_only)
327	vm_map_t map;
328	vm_object_t object;
329	int count;
330	int map_remove_only;
331{
332	register vm_page_t p, next;
333	int rcount;
334	int dcount;
335
336	dcount = 0;
337	if (count == 0)
338		count = 1;
339
340	if (object->shadow) {
341		if (object->shadow->ref_count == 1)
342			dcount += vm_pageout_object_deactivate_pages(map, object->shadow, count / 2 + 1, map_remove_only);
343		else
344			dcount += vm_pageout_object_deactivate_pages(map, object->shadow, count / 2 + 1, 1);
345	}
346	if (object->paging_in_progress || !vm_object_lock_try(object))
347		return dcount;
348
349	/*
350	 * scan the objects entire memory queue
351	 */
352	rcount = object->resident_page_count;
353	p = object->memq.tqh_first;
354	while (p && (rcount-- > 0)) {
355		next = p->listq.tqe_next;
356		cnt.v_pdpages++;
357		vm_page_lock_queues();
358		if (p->wire_count != 0 ||
359		    p->hold_count != 0 ||
360		    p->bmapped != 0 ||
361		    p->busy != 0 ||
362		    !pmap_page_exists(vm_map_pmap(map), VM_PAGE_TO_PHYS(p))) {
363			p = next;
364			continue;
365		}
366		/*
367		 * if a page is active, not wired and is in the processes
368		 * pmap, then deactivate the page.
369		 */
370		if ((p->flags & (PG_ACTIVE | PG_BUSY)) == PG_ACTIVE) {
371			if (!pmap_is_referenced(VM_PAGE_TO_PHYS(p)) &&
372			    (p->flags & PG_REFERENCED) == 0) {
373				p->act_count -= min(p->act_count, ACT_DECLINE);
374				/*
375				 * if the page act_count is zero -- then we
376				 * deactivate
377				 */
378				if (!p->act_count) {
379					if (!map_remove_only)
380						vm_page_deactivate(p);
381					pmap_page_protect(VM_PAGE_TO_PHYS(p),
382					    VM_PROT_NONE);
383					/*
384					 * else if on the next go-around we
385					 * will deactivate the page we need to
386					 * place the page on the end of the
387					 * queue to age the other pages in
388					 * memory.
389					 */
390				} else {
391					TAILQ_REMOVE(&vm_page_queue_active, p, pageq);
392					TAILQ_INSERT_TAIL(&vm_page_queue_active, p, pageq);
393					TAILQ_REMOVE(&object->memq, p, listq);
394					TAILQ_INSERT_TAIL(&object->memq, p, listq);
395				}
396				/*
397				 * see if we are done yet
398				 */
399				if (p->flags & PG_INACTIVE) {
400					--count;
401					++dcount;
402					if (count <= 0 &&
403					    cnt.v_inactive_count > cnt.v_inactive_target) {
404						vm_page_unlock_queues();
405						vm_object_unlock(object);
406						return dcount;
407					}
408				}
409			} else {
410				/*
411				 * Move the page to the bottom of the queue.
412				 */
413				pmap_clear_reference(VM_PAGE_TO_PHYS(p));
414				p->flags &= ~PG_REFERENCED;
415				if (p->act_count < ACT_MAX)
416					p->act_count += ACT_ADVANCE;
417
418				TAILQ_REMOVE(&vm_page_queue_active, p, pageq);
419				TAILQ_INSERT_TAIL(&vm_page_queue_active, p, pageq);
420				TAILQ_REMOVE(&object->memq, p, listq);
421				TAILQ_INSERT_TAIL(&object->memq, p, listq);
422			}
423		} else if ((p->flags & (PG_INACTIVE | PG_BUSY)) == PG_INACTIVE) {
424			pmap_page_protect(VM_PAGE_TO_PHYS(p),
425			    VM_PROT_NONE);
426		}
427		vm_page_unlock_queues();
428		p = next;
429	}
430	vm_object_unlock(object);
431	return dcount;
432}
433
434
435/*
436 * deactivate some number of pages in a map, try to do it fairly, but
437 * that is really hard to do.
438 */
439
440void
441vm_pageout_map_deactivate_pages(map, entry, count, freeer)
442	vm_map_t map;
443	vm_map_entry_t entry;
444	int *count;
445	int (*freeer) (vm_map_t, vm_object_t, int);
446{
447	vm_map_t tmpm;
448	vm_map_entry_t tmpe;
449	vm_object_t obj;
450
451	if (*count <= 0)
452		return;
453	vm_map_reference(map);
454	if (!lock_try_read(&map->lock)) {
455		vm_map_deallocate(map);
456		return;
457	}
458	if (entry == 0) {
459		tmpe = map->header.next;
460		while (tmpe != &map->header && *count > 0) {
461			vm_pageout_map_deactivate_pages(map, tmpe, count, freeer, 0);
462			tmpe = tmpe->next;
463		};
464	} else if (entry->is_sub_map || entry->is_a_map) {
465		tmpm = entry->object.share_map;
466		tmpe = tmpm->header.next;
467		while (tmpe != &tmpm->header && *count > 0) {
468			vm_pageout_map_deactivate_pages(tmpm, tmpe, count, freeer, 0);
469			tmpe = tmpe->next;
470		};
471	} else if ((obj = entry->object.vm_object) != 0) {
472		*count -= (*freeer) (map, obj, *count);
473	}
474	lock_read_done(&map->lock);
475	vm_map_deallocate(map);
476	return;
477}
478
479void
480vm_req_vmdaemon()
481{
482	extern int ticks;
483	static int lastrun = 0;
484
485	if ((ticks > (lastrun + hz / 10)) || (ticks < lastrun)) {
486		wakeup((caddr_t) &vm_daemon_needed);
487		lastrun = ticks;
488	}
489}
490
491void
492vm_pageout_inactive_stats(int maxiscan)
493{
494	vm_page_t m;
495	int s;
496
497	if (maxiscan > cnt.v_inactive_count)
498		maxiscan = cnt.v_inactive_count;
499	m = vm_page_queue_inactive.tqh_first;
500	while (m && (maxiscan-- > 0)) {
501		vm_page_t next;
502
503		next = m->pageq.tqe_next;
504
505		if (((m->flags & PG_REFERENCED) == 0) &&
506		    pmap_is_referenced(VM_PAGE_TO_PHYS(m))) {
507			m->flags |= PG_REFERENCED;
508		}
509		if (m->object->ref_count == 0) {
510			m->flags &= ~PG_REFERENCED;
511			pmap_clear_reference(VM_PAGE_TO_PHYS(m));
512		}
513		if (m->flags & PG_REFERENCED) {
514			m->flags &= ~PG_REFERENCED;
515			pmap_clear_reference(VM_PAGE_TO_PHYS(m));
516			vm_page_activate(m);
517			/*
518			 * heuristic alert -- if a page is being re-activated,
519			 * it probably will be used one more time...
520			 */
521			++m->act_count;
522			++m->act_count;
523		}
524		m = next;
525	}
526}
527
528
529/*
530 *	vm_pageout_scan does the dirty work for the pageout daemon.
531 */
532int
533vm_pageout_scan()
534{
535	vm_page_t m;
536	int page_shortage, maxscan, maxlaunder;
537	int pages_freed;
538	int desired_free;
539	vm_page_t next;
540	struct proc *p, *bigproc;
541	vm_offset_t size, bigsize;
542	vm_object_t object;
543	int force_wakeup = 0;
544	int cache_size, orig_cache_size;
545	int minscan;
546	int mintofree;
547
548#ifdef LFS
549	lfs_reclaim_buffers();
550#endif
551
552	/* calculate the total cached size */
553
554	if ((cnt.v_inactive_count + cnt.v_free_count + cnt.v_cache_count) <
555	    (cnt.v_inactive_target + cnt.v_free_min)) {
556		vm_req_vmdaemon();
557	}
558	/*
559	 * now swap processes out if we are in low memory conditions
560	 */
561	if ((cnt.v_free_count <= cnt.v_free_min) &&
562	    !swap_pager_full && vm_swap_size && vm_pageout_req_swapout == 0) {
563		vm_pageout_req_swapout = 1;
564		vm_req_vmdaemon();
565	}
566	pages_freed = 0;
567	desired_free = cnt.v_free_target;
568
569	/*
570	 * Start scanning the inactive queue for pages we can free. We keep
571	 * scanning until we have enough free pages or we have scanned through
572	 * the entire queue.  If we encounter dirty pages, we start cleaning
573	 * them.
574	 */
575
576
577	vm_pageout_inactive_stats(MAXISCAN);
578	maxlaunder = (cnt.v_inactive_target > MAXLAUNDER) ?
579	    MAXLAUNDER : cnt.v_inactive_target;
580
581rescan1:
582	maxscan = cnt.v_inactive_count;
583	mintofree = MINTOFREE;
584	m = vm_page_queue_inactive.tqh_first;
585	while (m &&
586	    (maxscan-- > 0) &&
587	    (((cnt.v_free_count + cnt.v_cache_count) < desired_free) ||
588		(--mintofree > 0))) {
589		vm_page_t next;
590
591		cnt.v_pdpages++;
592		next = m->pageq.tqe_next;
593
594#if defined(VM_DIAGNOSE)
595		if ((m->flags & PG_INACTIVE) == 0) {
596			printf("vm_pageout_scan: page not inactive?\n");
597			break;
598		}
599#endif
600
601		/*
602		 * dont mess with busy pages
603		 */
604		if (m->hold_count || m->busy || (m->flags & PG_BUSY) ||
605		    m->bmapped != 0) {
606			TAILQ_REMOVE(&vm_page_queue_inactive, m, pageq);
607			TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq);
608			m = next;
609			continue;
610		}
611		if (((m->flags & PG_REFERENCED) == 0) &&
612		    pmap_is_referenced(VM_PAGE_TO_PHYS(m))) {
613			m->flags |= PG_REFERENCED;
614		}
615		if (m->object->ref_count == 0) {
616			m->flags &= ~PG_REFERENCED;
617			pmap_clear_reference(VM_PAGE_TO_PHYS(m));
618		}
619		if ((m->flags & PG_REFERENCED) != 0) {
620			m->flags &= ~PG_REFERENCED;
621			pmap_clear_reference(VM_PAGE_TO_PHYS(m));
622			vm_page_activate(m);
623			++m->act_count;
624			++m->act_count;
625			m = next;
626			continue;
627		}
628		vm_page_test_dirty(m);
629
630		if ((m->dirty & m->valid) == 0) {
631			if (((cnt.v_free_count + cnt.v_cache_count) < desired_free) ||
632			    (cnt.v_cache_count < cnt.v_cache_min))
633				vm_page_cache(m);
634		} else if (maxlaunder > 0) {
635			int written;
636
637			TAILQ_REMOVE(&vm_page_queue_inactive, m, pageq);
638			TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq);
639
640			object = m->object;
641			if (!vm_object_lock_try(object)) {
642				m = next;
643				continue;
644			}
645			/*
646			 * If a page is dirty, then it is either being washed
647			 * (but not yet cleaned) or it is still in the
648			 * laundry.  If it is still in the laundry, then we
649			 * start the cleaning operation.
650			 */
651			written = vm_pageout_clean(m, 0);
652			vm_object_unlock(object);
653
654			if (!next) {
655				break;
656			}
657			maxlaunder -= written;
658			/*
659			 * if the next page has been re-activated, start
660			 * scanning again
661			 */
662			if ((next->flags & PG_INACTIVE) == 0) {
663				goto rescan1;
664			}
665		} else {
666			TAILQ_REMOVE(&vm_page_queue_inactive, m, pageq);
667			TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq);
668		}
669		m = next;
670	}
671
672	/*
673	 * Compute the page shortage.  If we are still very low on memory be
674	 * sure that we will move a minimal amount of pages from active to
675	 * inactive.
676	 */
677
678	page_shortage = cnt.v_inactive_target -
679	    (cnt.v_free_count + cnt.v_inactive_count + cnt.v_cache_count);
680	if (page_shortage <= 0) {
681		if (pages_freed == 0) {
682			if ((cnt.v_free_count + cnt.v_cache_count) < desired_free) {
683				page_shortage =
684				    desired_free - (cnt.v_free_count + cnt.v_cache_count);
685			}
686		}
687	}
688	maxscan = cnt.v_active_count;
689	minscan = cnt.v_active_count;
690	if (minscan > MAXSCAN)
691		minscan = MAXSCAN;
692	m = vm_page_queue_active.tqh_first;
693	while (m && ((maxscan > 0 && (page_shortage > 0)) || minscan > 0)) {
694		if (maxscan)
695			--maxscan;
696		if (minscan)
697			--minscan;
698
699		cnt.v_pdpages++;
700		next = m->pageq.tqe_next;
701
702		/*
703		 * Don't deactivate pages that are busy.
704		 */
705		if ((m->busy != 0) ||
706		    (m->flags & PG_BUSY) ||
707		    (m->hold_count != 0) ||
708		    (m->bmapped != 0)) {
709			m = next;
710			continue;
711		}
712		if (m->object->ref_count && ((m->flags & PG_REFERENCED) ||
713			pmap_is_referenced(VM_PAGE_TO_PHYS(m)))) {
714			int s;
715
716			pmap_clear_reference(VM_PAGE_TO_PHYS(m));
717			m->flags &= ~PG_REFERENCED;
718			if (m->act_count < ACT_MAX) {
719				m->act_count += ACT_ADVANCE;
720			}
721			TAILQ_REMOVE(&vm_page_queue_active, m, pageq);
722			TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq);
723			s = splhigh();
724			TAILQ_REMOVE(&m->object->memq, m, listq);
725			TAILQ_INSERT_TAIL(&m->object->memq, m, listq);
726			splx(s);
727		} else {
728			m->act_count -= min(m->act_count, ACT_DECLINE);
729
730			/*
731			 * if the page act_count is zero -- then we deactivate
732			 */
733			if (!m->act_count && (page_shortage > 0)) {
734				if (m->object->ref_count == 0) {
735					vm_page_test_dirty(m);
736
737					m->flags &= ~PG_REFERENCED;
738					pmap_clear_reference(VM_PAGE_TO_PHYS(m));
739
740					--page_shortage;
741					if ((m->dirty & m->valid) == 0) {
742						m->act_count = 0;
743						vm_page_cache(m);
744					} else {
745						vm_page_deactivate(m);
746					}
747				} else {
748
749					m->flags &= ~PG_REFERENCED;
750					pmap_clear_reference(VM_PAGE_TO_PHYS(m));
751
752					vm_page_deactivate(m);
753					--page_shortage;
754				}
755			} else {
756				TAILQ_REMOVE(&vm_page_queue_active, m, pageq);
757				TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq);
758			}
759		}
760		m = next;
761	}
762
763	/*
764	 * We try to maintain some *really* free pages, this allows interrupt
765	 * code to be guaranteed space.
766	 */
767	while (cnt.v_free_count < MINFREE) {
768		m = vm_page_queue_cache.tqh_first;
769		if (!m)
770			break;
771		vm_page_free(m);
772	}
773
774	/*
775	 * make sure that we have swap space -- if we are low on memory and
776	 * swap -- then kill the biggest process.
777	 */
778	if ((vm_swap_size == 0 || swap_pager_full) &&
779	    ((cnt.v_free_count + cnt.v_cache_count) < cnt.v_free_min)) {
780		bigproc = NULL;
781		bigsize = 0;
782		for (p = (struct proc *) allproc; p != NULL; p = p->p_next) {
783			/*
784			 * if this is a system process, skip it
785			 */
786			if ((p->p_flag & P_SYSTEM) || (p->p_pid == 1) ||
787			    ((p->p_pid < 48) && (vm_swap_size != 0))) {
788				continue;
789			}
790			/*
791			 * if the process is in a non-running type state,
792			 * don't touch it.
793			 */
794			if (p->p_stat != SRUN && p->p_stat != SSLEEP) {
795				continue;
796			}
797			/*
798			 * get the process size
799			 */
800			size = p->p_vmspace->vm_pmap.pm_stats.resident_count;
801			/*
802			 * if the this process is bigger than the biggest one
803			 * remember it.
804			 */
805			if (size > bigsize) {
806				bigproc = p;
807				bigsize = size;
808			}
809		}
810		if (bigproc != NULL) {
811			printf("Process %lu killed by vm_pageout -- out of swap\n", (u_long) bigproc->p_pid);
812			psignal(bigproc, SIGKILL);
813			bigproc->p_estcpu = 0;
814			bigproc->p_nice = PRIO_MIN;
815			resetpriority(bigproc);
816			wakeup((caddr_t) &cnt.v_free_count);
817		}
818	}
819	vm_page_pagesfreed += pages_freed;
820	return force_wakeup;
821}
822
823/*
824 *	vm_pageout is the high level pageout daemon.
825 */
826void
827vm_pageout()
828{
829	(void) spl0();
830
831	/*
832	 * Initialize some paging parameters.
833	 */
834
835	if (cnt.v_page_count > 1024)
836		cnt.v_free_min = 4 + (cnt.v_page_count - 1024) / 200;
837	else
838		cnt.v_free_min = 4;
839	/*
840	 * free_reserved needs to include enough for the largest swap pager
841	 * structures plus enough for any pv_entry structs when paging.
842	 */
843	cnt.v_pageout_free_min = 5 + cnt.v_page_count / 1024;
844	cnt.v_free_reserved = cnt.v_pageout_free_min + 2;
845	cnt.v_free_target = 3 * cnt.v_free_min + cnt.v_free_reserved;
846	cnt.v_inactive_target = cnt.v_free_count / 4;
847	if (cnt.v_inactive_target > 512)
848		cnt.v_inactive_target = 512;
849	cnt.v_free_min += cnt.v_free_reserved;
850	if (cnt.v_page_count > 1024) {
851		cnt.v_cache_max = (cnt.v_free_count - 1024) / 2;
852		cnt.v_cache_min = (cnt.v_free_count - 1024) / 20;
853	} else {
854		cnt.v_cache_min = 0;
855		cnt.v_cache_max = 0;
856	}
857
858	/* XXX does not really belong here */
859	if (vm_page_max_wired == 0)
860		vm_page_max_wired = cnt.v_free_count / 3;
861
862
863	(void) swap_pager_alloc(0, 0, 0, 0);
864	/*
865	 * The pageout daemon is never done, so loop forever.
866	 */
867	while (TRUE) {
868		tsleep((caddr_t) &vm_pages_needed, PVM, "psleep", 0);
869		cnt.v_pdwakeups++;
870		vm_pager_sync();
871		vm_pageout_scan();
872		vm_pager_sync();
873		wakeup((caddr_t) &cnt.v_free_count);
874		wakeup((caddr_t) kmem_map);
875	}
876}
877
878void
879vm_daemon()
880{
881	int cache_size;
882	vm_object_t object;
883	struct proc *p;
884
885	while (TRUE) {
886		tsleep((caddr_t) &vm_daemon_needed, PUSER, "psleep", 0);
887		swapout_threads();
888		/*
889		 * scan the processes for exceeding their rlimits or if
890		 * process is swapped out -- deactivate pages
891		 */
892
893		for (p = (struct proc *) allproc; p != NULL; p = p->p_next) {
894			int overage;
895			quad_t limit;
896			vm_offset_t size;
897
898			/*
899			 * if this is a system process or if we have already
900			 * looked at this process, skip it.
901			 */
902			if (p->p_flag & (P_SYSTEM | P_WEXIT)) {
903				continue;
904			}
905			/*
906			 * if the process is in a non-running type state,
907			 * don't touch it.
908			 */
909			if (p->p_stat != SRUN && p->p_stat != SSLEEP) {
910				continue;
911			}
912			/*
913			 * get a limit
914			 */
915			limit = qmin(p->p_rlimit[RLIMIT_RSS].rlim_cur,
916			    p->p_rlimit[RLIMIT_RSS].rlim_max);
917
918			/*
919			 * let processes that are swapped out really be
920			 * swapped out set the limit to nothing (will force a
921			 * swap-out.)
922			 */
923			if ((p->p_flag & P_INMEM) == 0)
924				limit = 0;	/* XXX */
925
926			size = p->p_vmspace->vm_pmap.pm_stats.resident_count * NBPG;
927			if (limit >= 0 && size >= limit) {
928				overage = (size - limit) / NBPG;
929				if (limit == 0)
930					overage += 20;
931				vm_pageout_map_deactivate_pages(&p->p_vmspace->vm_map,
932				    (vm_map_entry_t) 0, &overage, vm_pageout_object_deactivate_pages);
933			}
934		}
935	}
936
937	/*
938	 * we remove cached objects that have no RSS...
939	 */
940restart:
941	vm_object_cache_lock();
942	object = vm_object_cached_list.tqh_first;
943	while (object) {
944		vm_object_cache_unlock();
945		/*
946		 * if there are no resident pages -- get rid of the object
947		 */
948		if (object->resident_page_count == 0) {
949			if (object != vm_object_lookup(object->pager))
950				panic("vm_object_cache_trim: I'm sooo confused.");
951			pager_cache(object, FALSE);
952			goto restart;
953		}
954		object = object->cached_list.tqe_next;
955		vm_object_cache_lock();
956	}
957	vm_object_cache_unlock();
958}
959