vm_page.c revision 123711
1264377Sdes/*
2192595Sdes * Copyright (c) 1991 Regents of the University of California.
376259Sgreen * All rights reserved.
457429Smarkm *
557429Smarkm * This code is derived from software contributed to Berkeley by
657429Smarkm * The Mach Operating System project at Carnegie-Mellon University.
757429Smarkm *
865668Skris * Redistribution and use in source and binary forms, with or without
965668Skris * modification, are permitted provided that the following conditions
1065668Skris * are met:
1165668Skris * 1. Redistributions of source code must retain the above copyright
1265668Skris *    notice, this list of conditions and the following disclaimer.
1357429Smarkm * 2. Redistributions in binary form must reproduce the above copyright
1457429Smarkm *    notice, this list of conditions and the following disclaimer in the
1576259Sgreen *    documentation and/or other materials provided with the distribution.
1676259Sgreen * 3. All advertising materials mentioning features or use of this software
1776259Sgreen *    must display the following acknowledgement:
1865668Skris *	This product includes software developed by the University of
1965668Skris *	California, Berkeley and its contributors.
2065668Skris * 4. Neither the name of the University nor the names of its contributors
2165668Skris *    may be used to endorse or promote products derived from this software
2265668Skris *    without specific prior written permission.
2365668Skris *
2465668Skris * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
2565668Skris * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
2665668Skris * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
2765668Skris * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
2865668Skris * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
2965668Skris * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
3065668Skris * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
3165668Skris * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
3265668Skris * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
3365668Skris * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
3465668Skris * SUCH DAMAGE.
3565668Skris *
3665668Skris *	from: @(#)vm_page.c	7.4 (Berkeley) 5/7/91
3765668Skris */
3865668Skris
39264377Sdes/*
4057429Smarkm * Copyright (c) 1987, 1990 Carnegie-Mellon University.
4157429Smarkm * All rights reserved.
4257429Smarkm *
4357429Smarkm * Authors: Avadis Tevanian, Jr., Michael Wayne Young
4476259Sgreen *
4557429Smarkm * Permission to use, copy, modify and distribute this software and
46215116Sdes * its documentation is hereby granted, provided that both the copyright
4757429Smarkm * notice and this permission notice appear in all copies of the
4876259Sgreen * software, derivative works or modified versions, and any portions
4957429Smarkm * thereof, and that both notices appear in supporting documentation.
50264377Sdes *
5157429Smarkm * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
5257429Smarkm * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
5365668Skris * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
5457429Smarkm *
5557429Smarkm * Carnegie Mellon requests users of this software to return to
5657429Smarkm *
5757429Smarkm *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
5857429Smarkm *  School of Computer Science
5957429Smarkm *  Carnegie Mellon University
6076259Sgreen *  Pittsburgh PA 15213-3890
61215116Sdes *
6265668Skris * any improvements or extensions that they make and grant Carnegie the
6360573Skris * rights to redistribute these changes.
6476259Sgreen */
65215116Sdes
6665668Skris/*
6760573Skris *			GENERAL RULES ON VM_PAGE MANIPULATION
6860573Skris *
6965668Skris *	- a pageq mutex is required when adding or removing a page from a
7060573Skris *	  page queue (vm_page_queue[]), regardless of other mutexes or the
7157429Smarkm *	  busy state of a page.
7257429Smarkm *
7357429Smarkm *	- a hash chain mutex is required when associating or disassociating
7457429Smarkm *	  a page from the VM PAGE CACHE hash table (vm_page_buckets),
7557429Smarkm *	  regardless of other mutexes or the busy state of a page.
7657429Smarkm *
7765668Skris *	- either a hash chain mutex OR a busied page is required in order
7860573Skris *	  to modify the page flags.  A hash chain mutex must be obtained in
7976259Sgreen *	  order to busy a page.  A page's flags cannot be modified by a
8076259Sgreen *	  hash chain mutex if the page is marked busy.
8192555Sdes *
82204917Sdes *	- The object memq mutex is held when inserting or removing
8392555Sdes *	  pages from an object (vm_page_insert() or vm_page_remove()).  This
84146998Sdes *	  is different from the object's main mutex.
85146998Sdes *
86192595Sdes *	Generally speaking, you have to be aware of side effects when running
87146998Sdes *	vm_page ops.  A vm_page_lookup() will return with the hash chain
88146998Sdes *	locked, whether it was able to lookup the page or not.  vm_page_free(),
89146998Sdes *	vm_page_cache(), vm_page_activate(), and a number of other routines
90146998Sdes *	will release the hash chain mutex for you.  Intermediate manipulation
91146998Sdes *	routines such as vm_page_flag_set() expect the hash chain to be held
92146998Sdes *	on entry and the hash chain will remain held on return.
93146998Sdes *
94124208Sdes *	pageq scanning can only occur with the pageq in question locked.
95124208Sdes *	We have a known bottleneck with the active queue, but the cache
96124208Sdes *	and free queues are actually arrays already.
97124208Sdes */
98124208Sdes
99126274Sdes/*
100124208Sdes *	Resident memory management module.
101124208Sdes */
102124208Sdes
103124208Sdes#include <sys/cdefs.h>
104124208Sdes__FBSDID("$FreeBSD: head/sys/vm/vm_page.c 123711 2003-12-22 02:04:08Z alc $");
105124208Sdes
106126274Sdes#include <sys/param.h>
107262566Sdes#include <sys/systm.h>
108240075Sdes#include <sys/lock.h>
109240075Sdes#include <sys/malloc.h>
110240075Sdes#include <sys/mutex.h>
111124208Sdes#include <sys/proc.h>
112204917Sdes#include <sys/vmmeter.h>
113204917Sdes#include <sys/vnode.h>
114204917Sdes
115204917Sdes#include <vm/vm.h>
116204917Sdes#include <vm/vm_param.h>
117215116Sdes#include <vm/vm_kern.h>
118204917Sdes#include <vm/vm_object.h>
119215116Sdes#include <vm/vm_page.h>
120204917Sdes#include <vm/vm_pageout.h>
121204917Sdes#include <vm/vm_pager.h>
122204917Sdes#include <vm/vm_extern.h>
123204917Sdes#include <vm/uma.h>
124226046Sdes#include <vm/uma_int.h>
125226046Sdes
126248619Sdes/*
127248619Sdes *	Associated with page of user-allocatable memory is a
128248619Sdes *	page structure.
129248619Sdes */
130248619Sdes
131248619Sdesstruct mtx vm_page_queue_mtx;
132248619Sdesstruct mtx vm_page_queue_free_mtx;
133248619Sdes
134248619Sdesvm_page_t vm_page_array = 0;
135248619Sdesint vm_page_array_size = 0;
136248619Sdeslong first_page = 0;
137204917Sdesint vm_page_zero_count = 0;
13860573Skris
13957429Smarkm/*
14076259Sgreen *	vm_set_page_size:
14157429Smarkm *
14260573Skris *	Sets the page size, perhaps based upon the memory
143262566Sdes *	size.  Must be called before any use of page-size
144262566Sdes *	dependent functions.
145124208Sdes */
14676259Sgreenvoid
14792555Sdesvm_set_page_size(void)
148157016Sdes{
149157016Sdes	if (cnt.v_page_size == 0)
150157016Sdes		cnt.v_page_size = PAGE_SIZE;
15160573Skris	if (((cnt.v_page_size - 1) & cnt.v_page_size) != 0)
152124208Sdes		panic("vm_set_page_size: page size not a power of two");
153124208Sdes}
154124208Sdes
155124208Sdes/*
156124208Sdes *	vm_page_startup:
157124208Sdes *
158124208Sdes *	Initializes the resident memory module.
159248619Sdes *
160248619Sdes *	Allocates memory for the page cells, and
161248619Sdes *	for the object/offset-to-page hash table headers.
162248619Sdes *	Each page cell is initialized and placed on the free list.
163248619Sdes */
164248619Sdesvm_offset_t
165248619Sdesvm_page_startup(vm_offset_t starta, vm_offset_t enda, vm_offset_t vaddr)
166248619Sdes{
16757429Smarkm	vm_offset_t mapped;
168221420Sdes	vm_size_t npages;
16957429Smarkm	vm_paddr_t page_range;
170149749Sdes	vm_paddr_t new_end;
171262566Sdes	int i;
172221420Sdes	vm_paddr_t pa;
173262566Sdes	int nblocks;
17460573Skris	vm_paddr_t last_pa;
175149749Sdes
17660573Skris	/* the biggest memory array is the second group of pages */
17760573Skris	vm_paddr_t end;
17860573Skris	vm_paddr_t biggestsize;
17957429Smarkm	int biggestone;
18057429Smarkm
18158582Skris	vm_paddr_t total;
18258582Skris	vm_size_t bootpages;
18357429Smarkm
18458582Skris	total = 0;
18558582Skris	biggestsize = 0;
18658582Skris	biggestone = 0;
18776259Sgreen	nblocks = 0;
18858582Skris	vaddr = round_page(vaddr);
18992555Sdes
19092555Sdes	for (i = 0; phys_avail[i + 1]; i += 2) {
19192555Sdes		phys_avail[i] = round_page(phys_avail[i]);
19292555Sdes		phys_avail[i + 1] = trunc_page(phys_avail[i + 1]);
19357429Smarkm	}
19492555Sdes
19592555Sdes	for (i = 0; phys_avail[i + 1]; i += 2) {
19692555Sdes		vm_paddr_t size = phys_avail[i + 1] - phys_avail[i];
19758582Skris
19857429Smarkm		if (size > biggestsize) {
19957429Smarkm			biggestone = i;
20057429Smarkm			biggestsize = size;
20158582Skris		}
202226046Sdes		++nblocks;
203226046Sdes		total += size;
20457429Smarkm	}
20576259Sgreen
20676259Sgreen	end = phys_avail[biggestone+1];
20758582Skris
20858582Skris	/*
20958582Skris	 * Initialize the locks.
21057429Smarkm	 */
21157429Smarkm	mtx_init(&vm_page_queue_mtx, "vm page queue mutex", NULL, MTX_DEF);
21257429Smarkm	mtx_init(&vm_page_queue_free_mtx, "vm page queue free mutex", NULL,
21357429Smarkm	   MTX_SPIN);
21457429Smarkm
21560573Skris	/*
21660573Skris	 * Initialize the queue headers for the free queue, the active queue
21760573Skris	 * and the inactive queue.
21857429Smarkm	 */
21957429Smarkm	vm_pageq_init();
220226046Sdes
221262566Sdes	/*
222262566Sdes	 * Allocate memory for use when boot strapping the kernel memory
223226046Sdes	 * allocator.
224226046Sdes	 */
225226046Sdes	bootpages = UMA_BOOT_PAGES * UMA_SLAB_SIZE;
226226046Sdes	new_end = end - bootpages;
227226046Sdes	new_end = trunc_page(new_end);
228262566Sdes	mapped = pmap_map(&vaddr, new_end, end,
229262566Sdes	    VM_PROT_READ | VM_PROT_WRITE);
230262566Sdes	bzero((caddr_t) mapped, end - new_end);
231262566Sdes	uma_startup((caddr_t)mapped);
232262566Sdes
233262566Sdes	/*
234262566Sdes	 * Compute the number of pages of memory that will be available for
235262566Sdes	 * use (taking into account the overhead of a page structure per
236262566Sdes	 * page).
237262566Sdes	 */
238262566Sdes	first_page = phys_avail[0] / PAGE_SIZE;
239124208Sdes	page_range = phys_avail[(nblocks - 1) * 2 + 1] / PAGE_SIZE - first_page;
240262566Sdes	npages = (total - (page_range * sizeof(struct vm_page)) -
241262566Sdes	    (end - new_end)) / PAGE_SIZE;
242146998Sdes	end = new_end;
243146998Sdes
24457429Smarkm	/*
24558582Skris	 * Reserve an unmapped guard page to trap access to vm_page_array[-1].
246157016Sdes	 */
247149749Sdes	vaddr += PAGE_SIZE;
248157016Sdes
249226046Sdes	/*
250226046Sdes	 * Initialize the mem entry structures now, and put them in the free
251240075Sdes	 * queue.
252226046Sdes	 */
253226046Sdes	new_end = trunc_page(end - page_range * sizeof(struct vm_page));
254226046Sdes	mapped = pmap_map(&vaddr, new_end, end,
255262566Sdes	    VM_PROT_READ | VM_PROT_WRITE);
256262566Sdes	vm_page_array = (vm_page_t) mapped;
257262566Sdes	phys_avail[biggestone + 1] = new_end;
258146998Sdes
259146998Sdes	/*
26057429Smarkm	 * Clear all of the page structures
26157429Smarkm	 */
26292555Sdes	bzero((caddr_t) vm_page_array, page_range * sizeof(struct vm_page));
26357429Smarkm	vm_page_array_size = page_range;
26492555Sdes
265204917Sdes	/*
266204917Sdes	 * Construct the free queue(s) in descending order (by physical
267204917Sdes	 * address) so that the first 16MB of physical memory is allocated
268215116Sdes	 * last rather than first.  On large-memory machines, this avoids
269215116Sdes	 * the exhaustion of low physical memory before isa_dmainit has run.
270215116Sdes	 */
271215116Sdes	cnt.v_page_count = 0;
272215116Sdes	cnt.v_free_count = 0;
27376259Sgreen	for (i = 0; phys_avail[i + 1] && npages > 0; i += 2) {
27476259Sgreen		pa = phys_avail[i];
275215116Sdes		last_pa = phys_avail[i + 1];
276215116Sdes		while (pa < last_pa && npages-- > 0) {
277215116Sdes			vm_pageq_add_new_page(pa);
278215116Sdes			pa += PAGE_SIZE;
279215116Sdes		}
280215116Sdes	}
281215116Sdes	return (vaddr);
282146998Sdes}
283146998Sdes
284146998Sdesvoid
285146998Sdesvm_page_flag_set(vm_page_t m, unsigned short bits)
286146998Sdes{
287146998Sdes
288146998Sdes	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
289146998Sdes	m->flags |= bits;
290146998Sdes}
291146998Sdes
292146998Sdesvoid
293146998Sdesvm_page_flag_clear(vm_page_t m, unsigned short bits)
294146998Sdes{
295146998Sdes
296146998Sdes	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
297146998Sdes	m->flags &= ~bits;
298146998Sdes}
299146998Sdes
300124208Sdesvoid
301137015Sdesvm_page_busy(vm_page_t m)
302137015Sdes{
303137015Sdes	KASSERT((m->flags & PG_BUSY) == 0,
304146998Sdes	    ("vm_page_busy: page already busy!!!"));
305146998Sdes	vm_page_flag_set(m, PG_BUSY);
306146998Sdes}
307147001Sdes
308147001Sdes/*
309147001Sdes *      vm_page_flash:
310147001Sdes *
311146998Sdes *      wakeup anyone waiting for the page.
312146998Sdes */
313146998Sdesvoid
314146998Sdesvm_page_flash(vm_page_t m)
315146998Sdes{
316146998Sdes	if (m->flags & PG_WANTED) {
317146998Sdes		vm_page_flag_clear(m, PG_WANTED);
318146998Sdes		wakeup(m);
319204917Sdes	}
320204917Sdes}
321204917Sdes
322204917Sdes/*
323204917Sdes *      vm_page_wakeup:
324204917Sdes *
325204917Sdes *      clear the PG_BUSY flag and wakeup anyone waiting for the
326204917Sdes *      page.
327204917Sdes *
328204917Sdes */
329204917Sdesvoid
33076259Sgreenvm_page_wakeup(vm_page_t m)
33176259Sgreen{
332215116Sdes	KASSERT(m->flags & PG_BUSY, ("vm_page_wakeup: page not busy!!!"));
333215116Sdes	vm_page_flag_clear(m, PG_BUSY);
334215116Sdes	vm_page_flash(m);
33576259Sgreen}
336240075Sdes
337240075Sdesvoid
338240075Sdesvm_page_io_start(vm_page_t m)
339240075Sdes{
340240075Sdes
341240075Sdes	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
342240075Sdes	m->busy++;
343240075Sdes}
344240075Sdes
345240075Sdesvoid
346240075Sdesvm_page_io_finish(vm_page_t m)
347240075Sdes{
348240075Sdes
349240075Sdes	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
350240075Sdes	m->busy--;
351240075Sdes	if (m->busy == 0)
352240075Sdes		vm_page_flash(m);
353240075Sdes}
354215116Sdes
355215116Sdes/*
356215116Sdes * Keep page from being freed by the page daemon
357215116Sdes * much of the same effect as wiring, except much lower
358248619Sdes * overhead and should be used only for *very* temporary
359248619Sdes * holding ("wiring").
360248619Sdes */
361248619Sdesvoid
362248619Sdesvm_page_hold(vm_page_t mem)
363248619Sdes{
364248619Sdes
365248619Sdes	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
366248619Sdes        mem->hold_count++;
367248619Sdes}
368248619Sdes
369204917Sdesvoid
370204917Sdesvm_page_unhold(vm_page_t mem)
37157429Smarkm{
37292555Sdes
37392555Sdes	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
37492555Sdes	--mem->hold_count;
37592555Sdes	KASSERT(mem->hold_count >= 0, ("vm_page_unhold: hold count < 0!!!"));
37692555Sdes	if (mem->hold_count == 0 && mem->queue == PQ_HOLD)
377181111Sdes		vm_page_free_toq(mem);
378181111Sdes}
379181111Sdes
380146998Sdes/*
381146998Sdes *	vm_page_free:
382146998Sdes *
383215116Sdes *	Free a page
384215116Sdes *
385215116Sdes *	The clearing of PG_ZERO is a temporary safety until the code can be
386215116Sdes *	reviewed to determine that PG_ZERO is being properly cleared on
387215116Sdes *	write faults or maps.  PG_ZERO was previously cleared in
388215116Sdes *	vm_page_alloc().
389215116Sdes */
390215116Sdesvoid
391215116Sdesvm_page_free(vm_page_t m)
392215116Sdes{
393215116Sdes	vm_page_flag_clear(m, PG_ZERO);
394215116Sdes	vm_page_free_toq(m);
395215116Sdes	vm_page_zero_idle_wakeup();
396215116Sdes}
397215116Sdes
398215116Sdes/*
399146998Sdes *	vm_page_free_zero:
400146998Sdes *
401204917Sdes *	Free a page to the zerod-pages queue
402204917Sdes */
403204917Sdesvoid
404204917Sdesvm_page_free_zero(vm_page_t m)
405204917Sdes{
406204917Sdes	vm_page_flag_set(m, PG_ZERO);
407204917Sdes	vm_page_free_toq(m);
408215116Sdes}
409215116Sdes
410204917Sdes/*
411204917Sdes *	vm_page_sleep_if_busy:
412204917Sdes *
413204917Sdes *	Sleep and release the page queues lock if PG_BUSY is set or,
414215116Sdes *	if also_m_busy is TRUE, busy is non-zero.  Returns TRUE if the
415204917Sdes *	thread slept and the page queues lock was released.
416207319Sdes *	Otherwise, retains the page queues lock and returns FALSE.
417207319Sdes */
418207319Sdesint
419207319Sdesvm_page_sleep_if_busy(vm_page_t m, int also_m_busy, const char *msg)
420207319Sdes{
421207319Sdes	int is_object_locked;
422207319Sdes
423207319Sdes	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
424207319Sdes	if ((m->flags & PG_BUSY) || (also_m_busy && m->busy)) {
425204917Sdes		vm_page_flag_set(m, PG_WANTED | PG_REFERENCED);
426204917Sdes		/*
427204917Sdes		 * Remove mtx_owned() after vm_object locking is finished.
428204917Sdes		 */
429204917Sdes		if ((is_object_locked = m->object != NULL &&
430204917Sdes		     mtx_owned(&m->object->mtx)))
431204917Sdes			mtx_unlock(&m->object->mtx);
432204917Sdes		msleep(m, &vm_page_queue_mtx, PDROP | PVM, msg, 0);
433204917Sdes		if (is_object_locked)
434204917Sdes			mtx_lock(&m->object->mtx);
435204917Sdes		return (TRUE);
436204917Sdes	}
437204917Sdes	return (FALSE);
438204917Sdes}
439207319Sdes
440207319Sdes/*
441204917Sdes *	vm_page_dirty:
442204917Sdes *
443204917Sdes *	make page all dirty
444204917Sdes */
445204917Sdesvoid
446204917Sdesvm_page_dirty(vm_page_t m)
447204917Sdes{
448204917Sdes	KASSERT(m->queue - m->pc != PQ_CACHE,
449204917Sdes	    ("vm_page_dirty: page in cache!"));
450204917Sdes	KASSERT(m->queue - m->pc != PQ_FREE,
451204917Sdes	    ("vm_page_dirty: page is free!"));
452204917Sdes	m->dirty = VM_PAGE_BITS_ALL;
453204917Sdes}
454207319Sdes
455207319Sdes/*
456207319Sdes *	vm_page_splay:
457207319Sdes *
458204917Sdes *	Implements Sleator and Tarjan's top-down splay algorithm.  Returns
459204917Sdes *	the vm_page containing the given pindex.  If, however, that
460204917Sdes *	pindex is not found in the vm_object, returns a vm_page that is
461204917Sdes *	adjacent to the pindex, coming before or after it.
462204917Sdes */
463204917Sdesvm_page_t
464215116Sdesvm_page_splay(vm_pindex_t pindex, vm_page_t root)
465262566Sdes{
466262566Sdes	struct vm_page dummy;
467262566Sdes	vm_page_t lefttreemax, righttreemin, y;
468262566Sdes
469262566Sdes	if (root == NULL)
470262566Sdes		return (root);
471262566Sdes	lefttreemax = righttreemin = &dummy;
472262566Sdes	for (;; root = y) {
473146998Sdes		if (pindex < root->pindex) {
474146998Sdes			if ((y = root->left) == NULL)
47557429Smarkm				break;
47657429Smarkm			if (pindex < y->pindex) {
47758582Skris				/* Rotate right. */
47858582Skris				root->left = y->right;
47957429Smarkm				y->right = root;
48057429Smarkm				root = y;
481248619Sdes				if ((y = root->left) == NULL)
482248619Sdes					break;
48357429Smarkm			}
48457429Smarkm			/* Link into the new root's right tree. */
48557429Smarkm			righttreemin->left = root;
486146998Sdes			righttreemin = root;
487146998Sdes		} else if (pindex > root->pindex) {
488146998Sdes			if ((y = root->right) == NULL)
489146998Sdes				break;
490146998Sdes			if (pindex > y->pindex) {
491146998Sdes				/* Rotate left. */
492146998Sdes				root->right = y->left;
493146998Sdes				y->left = root;
494146998Sdes				root = y;
495146998Sdes				if ((y = root->right) == NULL)
496146998Sdes					break;
497146998Sdes			}
498146998Sdes			/* Link into the new root's left tree. */
499146998Sdes			lefttreemax->right = root;
500146998Sdes			lefttreemax = root;
501204917Sdes		} else
502204917Sdes			break;
503204917Sdes	}
504204917Sdes	/* Assemble the new root. */
505204917Sdes	lefttreemax->right = root->left;
506248619Sdes	righttreemin->left = root->right;
507248619Sdes	root->left = dummy.right;
508248619Sdes	root->right = dummy.left;
509248619Sdes	return (root);
510248619Sdes}
511248619Sdes
512248619Sdes/*
513248619Sdes *	vm_page_insert:		[ internal use only ]
514146998Sdes *
515146998Sdes *	Inserts the given mem entry into the object and object list.
516146998Sdes *
517146998Sdes *	The pagetables are not updated but will presumably fault the page
51876259Sgreen *	in if necessary, or if a kernel page the caller will at some point
519146998Sdes *	enter the page into the kernel's pmap.  We are not allowed to block
52076259Sgreen *	here so we *can't* do this anyway.
52176259Sgreen *
52276259Sgreen *	The object and page must be locked, and must be splhigh.
523221420Sdes *	This routine may not block.
524262566Sdes */
525262566Sdesvoid
526221420Sdesvm_page_insert(vm_page_t m, vm_object_t object, vm_pindex_t pindex)
52776259Sgreen{
52876259Sgreen	vm_page_t root;
529248619Sdes
530248619Sdes	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
531248619Sdes	if (m->object != NULL)
532248619Sdes		panic("vm_page_insert: already inserted");
533248619Sdes
534248619Sdes	/*
535204917Sdes	 * Record the object/offset pair in this page
536204917Sdes	 */
537204917Sdes	m->object = object;
538204917Sdes	m->pindex = pindex;
539204917Sdes
540204917Sdes	/*
541204917Sdes	 * Now link into the object's ordered list of backed pages.
542204917Sdes	 */
543255767Sdes	root = object->root;
544207319Sdes	if (root == NULL) {
545204917Sdes		m->left = NULL;
546204917Sdes		m->right = NULL;
547204917Sdes		TAILQ_INSERT_TAIL(&object->memq, m, listq);
548204917Sdes	} else {
549204917Sdes		root = vm_page_splay(pindex, root);
550204917Sdes		if (pindex < root->pindex) {
551204917Sdes			m->left = root->left;
552204917Sdes			m->right = root;
553204917Sdes			root->left = NULL;
554204917Sdes			TAILQ_INSERT_BEFORE(root, m, listq);
555204917Sdes		} else {
556204917Sdes			m->right = root->right;
557126274Sdes			m->left = root;
558126274Sdes			root->right = NULL;
559126274Sdes			TAILQ_INSERT_AFTER(&object->memq, root, m, listq);
560126274Sdes		}
561126274Sdes	}
562126274Sdes	object->root = m;
563126274Sdes	object->generation++;
564126274Sdes
565126274Sdes	/*
566126274Sdes	 * show that the object has one more resident page.
567146998Sdes	 */
568146998Sdes	object->resident_page_count++;
569146998Sdes
570146998Sdes	/*
571146998Sdes	 * Since we are inserting a new and possibly dirty page,
572215116Sdes	 * update the object's OBJ_WRITEABLE and OBJ_MIGHTBEDIRTY flags.
573215116Sdes	 */
574215116Sdes	if (m->flags & PG_WRITEABLE)
575215116Sdes		vm_object_set_writeable_dirty(object);
576248619Sdes}
577248619Sdes
578248619Sdes/*
579248619Sdes *	vm_page_remove:
58057429Smarkm *				NOTE: used by device pager as well -wfj
581124208Sdes *
582124208Sdes *	Removes the given mem entry from the object/offset-page
583124208Sdes *	table and the object page list, but do not invalidate/terminate
584124208Sdes *	the backing store.
585124208Sdes *
586124208Sdes *	The object and page must be locked, and at splhigh.
587124208Sdes *	The underlying pmap entry (if any) is NOT removed here.
588124208Sdes *	This routine may not block.
589124208Sdes */
590124208Sdesvoid
591124208Sdesvm_page_remove(vm_page_t m)
592124208Sdes{
593124208Sdes	vm_object_t object;
594124208Sdes	vm_page_t root;
595124208Sdes
596124208Sdes	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
597124208Sdes	if (m->object == NULL)
598146998Sdes		return;
599124208Sdes#ifndef	__alpha__
600124208Sdes	VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
601124208Sdes#endif
602124208Sdes	if ((m->flags & PG_BUSY) == 0) {
603124208Sdes		panic("vm_page_remove: page not busy");
604124208Sdes	}
605124208Sdes
606240075Sdes	/*
607124208Sdes	 * Basically destroy the page.
608124208Sdes	 */
609124208Sdes	vm_page_wakeup(m);
610124208Sdes
611124208Sdes	object = m->object;
612124208Sdes
613124208Sdes	/*
614124208Sdes	 * Now remove from the object's list of backed pages.
615124208Sdes	 */
616124208Sdes	if (m != object->root)
617124208Sdes		vm_page_splay(m->pindex, object->root);
618146998Sdes	if (m->left == NULL)
619124208Sdes		root = m->right;
620124208Sdes	else {
621124208Sdes		root = vm_page_splay(m->pindex, m->left);
622124208Sdes		root->right = m->right;
623124208Sdes	}
624124208Sdes	object->root = root;
625124208Sdes	TAILQ_REMOVE(&object->memq, m, listq);
626124208Sdes
627124208Sdes	/*
628124208Sdes	 * And show that the object has one fewer resident page.
629146998Sdes	 */
630124208Sdes	object->resident_page_count--;
631124208Sdes	object->generation++;
632124208Sdes
633124208Sdes	m->object = NULL;
634124208Sdes}
635204917Sdes
636204917Sdes/*
637204917Sdes *	vm_page_lookup:
638204917Sdes *
639204917Sdes *	Returns the page associated with the object/offset
640215116Sdes *	pair specified; if none is found, NULL is returned.
641204917Sdes *
642204917Sdes *	The object must be locked.
643204917Sdes *	This routine may not block.
644204917Sdes *	This is a critical path routine
645204917Sdes */
646204917Sdesvm_page_t
647204917Sdesvm_page_lookup(vm_object_t object, vm_pindex_t pindex)
648204917Sdes{
649204917Sdes	vm_page_t m;
650204917Sdes
651204917Sdes	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
652204917Sdes	m = vm_page_splay(pindex, object->root);
653204917Sdes	if ((object->root = m) != NULL && m->pindex != pindex)
654204917Sdes		m = NULL;
655204917Sdes	return (m);
656204917Sdes}
657207319Sdes
658204917Sdes/*
659204917Sdes *	vm_page_rename:
660204917Sdes *
661204917Sdes *	Move the given memory entry from its
662204917Sdes *	current object to the specified target object/offset.
663204917Sdes *
664204917Sdes *	The object must be locked.
665207319Sdes *	This routine may not block.
666215116Sdes *
667215116Sdes *	Note: this routine will raise itself to splvm(), the caller need not.
668215116Sdes *
669215116Sdes *	Note: swap associated with the page must be invalidated by the move.  We
670215116Sdes *	      have to do this for several reasons:  (1) we aren't freeing the
671215116Sdes *	      page, (2) we are dirtying the page, (3) the VM system is probably
672215116Sdes *	      moving the page from object A to B, and will then later move
673215116Sdes *	      the backing store from A to B and we can't have a conflict.
674215116Sdes *
675215116Sdes *	Note: we *always* dirty the page.  It is necessary both for the
676215116Sdes *	      fact that we moved it, and because we may be invalidating
677204917Sdes *	      swap.  If the page is on the cache, we have to deactivate it
678204917Sdes *	      or vm_page_dirty() will panic.  Dirty pages are not allowed
679204917Sdes *	      on the cache.
680204917Sdes */
681204917Sdesvoid
682204917Sdesvm_page_rename(vm_page_t m, vm_object_t new_object, vm_pindex_t new_pindex)
683204917Sdes{
684204917Sdes	int s;
685204917Sdes
686204917Sdes	s = splvm();
687207319Sdes	vm_page_remove(m);
688204917Sdes	vm_page_insert(m, new_object, new_pindex);
689204917Sdes	if (m->queue - m->pc == PQ_CACHE)
690215116Sdes		vm_page_deactivate(m);
691215116Sdes	vm_page_dirty(m);
692204917Sdes	splx(s);
693204917Sdes}
694215116Sdes
695204917Sdes/*
696204917Sdes *	vm_page_select_cache:
697204917Sdes *
698204917Sdes *	Find a page on the cache queue with color optimization.  As pages
699204917Sdes *	might be found, but not applicable, they are deactivated.  This
700204917Sdes *	keeps us from using potentially busy cached pages.
701204917Sdes *
702204917Sdes *	This routine must be called at splvm().
703204917Sdes *	This routine may not block.
704248619Sdes */
705248619Sdesvm_page_t
706248619Sdesvm_page_select_cache(int color)
707204917Sdes{
708204917Sdes	vm_page_t m;
709204917Sdes
710204917Sdes	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
711204917Sdes	while (TRUE) {
712204917Sdes		m = vm_pageq_find(PQ_CACHE, color, FALSE);
713204917Sdes		if (m && ((m->flags & (PG_BUSY|PG_UNMANAGED)) || m->busy ||
714248619Sdes			       m->hold_count || m->wire_count ||
715248619Sdes			  (!VM_OBJECT_TRYLOCK(m->object) &&
716248619Sdes			   !VM_OBJECT_LOCKED(m->object)))) {
717248619Sdes			vm_page_deactivate(m);
718262566Sdes			continue;
719248619Sdes		}
720248619Sdes		return m;
721248619Sdes	}
722248619Sdes}
723248619Sdes
724248619Sdes/*
725248619Sdes *	vm_page_alloc:
726248619Sdes *
727248619Sdes *	Allocate and return a memory cell associated
728248619Sdes *	with this VM object/offset pair.
729248619Sdes *
730248619Sdes *	page_req classes:
731248619Sdes *	VM_ALLOC_NORMAL		normal process request
732248619Sdes *	VM_ALLOC_SYSTEM		system *really* needs a page
733248619Sdes *	VM_ALLOC_INTERRUPT	interrupt time request
734248619Sdes *	VM_ALLOC_ZERO		zero page
735248619Sdes *
736248619Sdes *	This routine may not block.
737248619Sdes *
738248619Sdes *	Additional special handling is required when called from an
739248619Sdes *	interrupt (VM_ALLOC_INTERRUPT).  We are not allowed to mess with
740248619Sdes *	the page cache in this case.
741248619Sdes */
742248619Sdesvm_page_t
743248619Sdesvm_page_alloc(vm_object_t object, vm_pindex_t pindex, int req)
744248619Sdes{
745248619Sdes	vm_object_t m_object;
746248619Sdes	vm_page_t m = NULL;
747248619Sdes	int color, flags, page_req, s;
748248619Sdes
749248619Sdes	page_req = req & VM_ALLOC_CLASS_MASK;
750248619Sdes
751248619Sdes	if ((req & VM_ALLOC_NOOBJ) == 0) {
752248619Sdes		KASSERT(object != NULL,
753248619Sdes		    ("vm_page_alloc: NULL object."));
754248619Sdes		VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
755248619Sdes		KASSERT(!vm_page_lookup(object, pindex),
756248619Sdes		    ("vm_page_alloc: page already allocated"));
757248619Sdes		color = (pindex + object->pg_color) & PQ_L2_MASK;
758248619Sdes	} else
759248619Sdes		color = pindex & PQ_L2_MASK;
760248619Sdes
761248619Sdes	/*
762248619Sdes	 * The pager is allowed to eat deeper into the free page list.
763248619Sdes	 */
764248619Sdes	if ((curproc == pageproc) && (page_req != VM_ALLOC_INTERRUPT)) {
765248619Sdes		page_req = VM_ALLOC_SYSTEM;
766248619Sdes	};
767248619Sdes
768248619Sdes	s = splvm();
769248619Sdesloop:
770248619Sdes	mtx_lock_spin(&vm_page_queue_free_mtx);
771248619Sdes	if (cnt.v_free_count > cnt.v_free_reserved ||
772248619Sdes	    (page_req == VM_ALLOC_SYSTEM &&
773248619Sdes	     cnt.v_cache_count == 0 &&
774248619Sdes	     cnt.v_free_count > cnt.v_interrupt_free_min) ||
775248619Sdes	    (page_req == VM_ALLOC_INTERRUPT && cnt.v_free_count > 0)) {
776248619Sdes		/*
777248619Sdes		 * Allocate from the free queue if the number of free pages
778248619Sdes		 * exceeds the minimum for the request class.
779248619Sdes		 */
780248619Sdes		m = vm_pageq_find(PQ_FREE, color, (req & VM_ALLOC_ZERO) != 0);
78157429Smarkm	} else if (page_req != VM_ALLOC_INTERRUPT) {
782221420Sdes		mtx_unlock_spin(&vm_page_queue_free_mtx);
783149749Sdes		/*
78476259Sgreen		 * Allocatable from cache (non-interrupt only).  On success,
78558582Skris		 * we must free the page and try again, thus ensuring that
78658582Skris		 * cnt.v_*_free_min counters are replenished.
78757429Smarkm		 */
788221420Sdes		vm_page_lock_queues();
78958582Skris		if ((m = vm_page_select_cache(color)) == NULL) {
79057429Smarkm			vm_page_unlock_queues();
79157429Smarkm			splx(s);
79292555Sdes#if defined(DIAGNOSTIC)
79360573Skris			if (cnt.v_cache_count > 0)
794221420Sdes				printf("vm_page_alloc(NORMAL): missing pages on cache queue: %d\n", cnt.v_cache_count);
795149749Sdes#endif
79676259Sgreen			atomic_add_int(&vm_pageout_deficit, 1);
79758582Skris			pagedaemon_wakeup();
798149749Sdes			return (NULL);
79957429Smarkm		}
80092555Sdes		KASSERT(m->dirty == 0, ("Found dirty cache page %p", m));
80158582Skris		m_object = m->object;
802221420Sdes		VM_OBJECT_LOCK_ASSERT(m_object, MA_OWNED);
803149749Sdes		vm_page_busy(m);
804221420Sdes		pmap_remove_all(m);
805262566Sdes		vm_page_free(m);
806221420Sdes		vm_page_unlock_queues();
807262566Sdes		if (m_object != object)
808262566Sdes			VM_OBJECT_UNLOCK(m_object);
80960573Skris		goto loop;
81060573Skris	} else {
81160573Skris		/*
812204917Sdes		 * Not allocatable from cache from interrupt, give up.
81360573Skris		 */
81460573Skris		mtx_unlock_spin(&vm_page_queue_free_mtx);
81560573Skris		splx(s);
81692555Sdes		atomic_add_int(&vm_pageout_deficit, 1);
81760573Skris		pagedaemon_wakeup();
818221420Sdes		return (NULL);
819149749Sdes	}
820221420Sdes
821262566Sdes	/*
822149749Sdes	 *  At this point we had better have found a good page.
823262566Sdes	 */
824262566Sdes
82576259Sgreen	KASSERT(
826149749Sdes	    m != NULL,
82776259Sgreen	    ("vm_page_alloc(): missing page on free queue\n")
82892555Sdes	);
82976259Sgreen
830221420Sdes	/*
831124208Sdes	 * Remove from free queue
832124208Sdes	 */
833124208Sdes
834124208Sdes	vm_pageq_remove_nowakeup(m);
83565668Skris
83657429Smarkm	/*
83757429Smarkm	 * Initialize structure.  Only the PG_ZERO flag is inherited.
83857429Smarkm	 */
83957429Smarkm	flags = PG_BUSY;
840124208Sdes	if (m->flags & PG_ZERO) {
84176259Sgreen		vm_page_zero_count--;
84276259Sgreen		if (req & VM_ALLOC_ZERO)
843181111Sdes			flags = PG_ZERO | PG_BUSY;
844181111Sdes	}
845181111Sdes	m->flags = flags;
84676259Sgreen	if (req & VM_ALLOC_WIRED) {
847124208Sdes		atomic_add_int(&cnt.v_wire_count, 1);
848124208Sdes		m->wire_count = 1;
849124208Sdes	} else
850124208Sdes		m->wire_count = 0;
851124208Sdes	m->hold_count = 0;
852124208Sdes	m->act_count = 0;
853124208Sdes	m->busy = 0;
854124208Sdes	m->valid = 0;
855124208Sdes	KASSERT(m->dirty == 0, ("vm_page_alloc: free/cache page %p was dirty", m));
856	mtx_unlock_spin(&vm_page_queue_free_mtx);
857
858	/*
859	 * vm_page_insert() is safe prior to the splx().  Note also that
860	 * inserting a page here does not insert it into the pmap (which
861	 * could cause us to block allocating memory).  We cannot block
862	 * anywhere.
863	 */
864	if ((req & VM_ALLOC_NOOBJ) == 0)
865		vm_page_insert(m, object, pindex);
866	else
867		m->pindex = pindex;
868
869	/*
870	 * Don't wakeup too often - wakeup the pageout daemon when
871	 * we would be nearly out of memory.
872	 */
873	if (vm_paging_needed())
874		pagedaemon_wakeup();
875
876	splx(s);
877	return (m);
878}
879
880/*
881 *	vm_wait:	(also see VM_WAIT macro)
882 *
883 *	Block until free pages are available for allocation
884 *	- Called in various places before memory allocations.
885 */
886void
887vm_wait(void)
888{
889	int s;
890
891	s = splvm();
892	vm_page_lock_queues();
893	if (curproc == pageproc) {
894		vm_pageout_pages_needed = 1;
895		msleep(&vm_pageout_pages_needed, &vm_page_queue_mtx,
896		    PDROP | PSWP, "VMWait", 0);
897	} else {
898		if (!vm_pages_needed) {
899			vm_pages_needed = 1;
900			wakeup(&vm_pages_needed);
901		}
902		msleep(&cnt.v_free_count, &vm_page_queue_mtx, PDROP | PVM,
903		    "vmwait", 0);
904	}
905	splx(s);
906}
907
908/*
909 *	vm_waitpfault:	(also see VM_WAITPFAULT macro)
910 *
911 *	Block until free pages are available for allocation
912 *	- Called only in vm_fault so that processes page faulting
913 *	  can be easily tracked.
914 *	- Sleeps at a lower priority than vm_wait() so that vm_wait()ing
915 *	  processes will be able to grab memory first.  Do not change
916 *	  this balance without careful testing first.
917 */
918void
919vm_waitpfault(void)
920{
921	int s;
922
923	s = splvm();
924	vm_page_lock_queues();
925	if (!vm_pages_needed) {
926		vm_pages_needed = 1;
927		wakeup(&vm_pages_needed);
928	}
929	msleep(&cnt.v_free_count, &vm_page_queue_mtx, PDROP | PUSER,
930	    "pfault", 0);
931	splx(s);
932}
933
934/*
935 *	vm_page_activate:
936 *
937 *	Put the specified page on the active list (if appropriate).
938 *	Ensure that act_count is at least ACT_INIT but do not otherwise
939 *	mess with it.
940 *
941 *	The page queues must be locked.
942 *	This routine may not block.
943 */
944void
945vm_page_activate(vm_page_t m)
946{
947	int s;
948
949	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
950	s = splvm();
951	if (m->queue != PQ_ACTIVE) {
952		if ((m->queue - m->pc) == PQ_CACHE)
953			cnt.v_reactivated++;
954		vm_pageq_remove(m);
955		if (m->wire_count == 0 && (m->flags & PG_UNMANAGED) == 0) {
956			if (m->act_count < ACT_INIT)
957				m->act_count = ACT_INIT;
958			vm_pageq_enqueue(PQ_ACTIVE, m);
959		}
960	} else {
961		if (m->act_count < ACT_INIT)
962			m->act_count = ACT_INIT;
963	}
964	splx(s);
965}
966
967/*
968 *	vm_page_free_wakeup:
969 *
970 *	Helper routine for vm_page_free_toq() and vm_page_cache().  This
971 *	routine is called when a page has been added to the cache or free
972 *	queues.
973 *
974 *	This routine may not block.
975 *	This routine must be called at splvm()
976 */
977static __inline void
978vm_page_free_wakeup(void)
979{
980
981	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
982	/*
983	 * if pageout daemon needs pages, then tell it that there are
984	 * some free.
985	 */
986	if (vm_pageout_pages_needed &&
987	    cnt.v_cache_count + cnt.v_free_count >= cnt.v_pageout_free_min) {
988		wakeup(&vm_pageout_pages_needed);
989		vm_pageout_pages_needed = 0;
990	}
991	/*
992	 * wakeup processes that are waiting on memory if we hit a
993	 * high water mark. And wakeup scheduler process if we have
994	 * lots of memory. this process will swapin processes.
995	 */
996	if (vm_pages_needed && !vm_page_count_min()) {
997		vm_pages_needed = 0;
998		wakeup(&cnt.v_free_count);
999	}
1000}
1001
1002/*
1003 *	vm_page_free_toq:
1004 *
1005 *	Returns the given page to the PQ_FREE list,
1006 *	disassociating it with any VM object.
1007 *
1008 *	Object and page must be locked prior to entry.
1009 *	This routine may not block.
1010 */
1011
1012void
1013vm_page_free_toq(vm_page_t m)
1014{
1015	int s;
1016	struct vpgqueues *pq;
1017	vm_object_t object = m->object;
1018
1019	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1020	s = splvm();
1021	cnt.v_tfree++;
1022
1023	if (m->busy || ((m->queue - m->pc) == PQ_FREE)) {
1024		printf(
1025		"vm_page_free: pindex(%lu), busy(%d), PG_BUSY(%d), hold(%d)\n",
1026		    (u_long)m->pindex, m->busy, (m->flags & PG_BUSY) ? 1 : 0,
1027		    m->hold_count);
1028		if ((m->queue - m->pc) == PQ_FREE)
1029			panic("vm_page_free: freeing free page");
1030		else
1031			panic("vm_page_free: freeing busy page");
1032	}
1033
1034	/*
1035	 * unqueue, then remove page.  Note that we cannot destroy
1036	 * the page here because we do not want to call the pager's
1037	 * callback routine until after we've put the page on the
1038	 * appropriate free queue.
1039	 */
1040	vm_pageq_remove_nowakeup(m);
1041	vm_page_remove(m);
1042
1043	/*
1044	 * If fictitious remove object association and
1045	 * return, otherwise delay object association removal.
1046	 */
1047	if ((m->flags & PG_FICTITIOUS) != 0) {
1048		splx(s);
1049		return;
1050	}
1051
1052	m->valid = 0;
1053	vm_page_undirty(m);
1054
1055	if (m->wire_count != 0) {
1056		if (m->wire_count > 1) {
1057			panic("vm_page_free: invalid wire count (%d), pindex: 0x%lx",
1058				m->wire_count, (long)m->pindex);
1059		}
1060		panic("vm_page_free: freeing wired page\n");
1061	}
1062
1063	/*
1064	 * If we've exhausted the object's resident pages we want to free
1065	 * it up.
1066	 */
1067	if (object &&
1068	    (object->type == OBJT_VNODE) &&
1069	    ((object->flags & OBJ_DEAD) == 0)
1070	) {
1071		struct vnode *vp = (struct vnode *)object->handle;
1072
1073		if (vp) {
1074			VI_LOCK(vp);
1075			if (VSHOULDFREE(vp))
1076				vfree(vp);
1077			VI_UNLOCK(vp);
1078		}
1079	}
1080
1081	/*
1082	 * Clear the UNMANAGED flag when freeing an unmanaged page.
1083	 */
1084	if (m->flags & PG_UNMANAGED) {
1085		m->flags &= ~PG_UNMANAGED;
1086	}
1087
1088	if (m->hold_count != 0) {
1089		m->flags &= ~PG_ZERO;
1090		m->queue = PQ_HOLD;
1091	} else
1092		m->queue = PQ_FREE + m->pc;
1093	pq = &vm_page_queues[m->queue];
1094	mtx_lock_spin(&vm_page_queue_free_mtx);
1095	pq->lcnt++;
1096	++(*pq->cnt);
1097
1098	/*
1099	 * Put zero'd pages on the end ( where we look for zero'd pages
1100	 * first ) and non-zerod pages at the head.
1101	 */
1102	if (m->flags & PG_ZERO) {
1103		TAILQ_INSERT_TAIL(&pq->pl, m, pageq);
1104		++vm_page_zero_count;
1105	} else {
1106		TAILQ_INSERT_HEAD(&pq->pl, m, pageq);
1107	}
1108	mtx_unlock_spin(&vm_page_queue_free_mtx);
1109	vm_page_free_wakeup();
1110	splx(s);
1111}
1112
1113/*
1114 *	vm_page_unmanage:
1115 *
1116 * 	Prevent PV management from being done on the page.  The page is
1117 *	removed from the paging queues as if it were wired, and as a
1118 *	consequence of no longer being managed the pageout daemon will not
1119 *	touch it (since there is no way to locate the pte mappings for the
1120 *	page).  madvise() calls that mess with the pmap will also no longer
1121 *	operate on the page.
1122 *
1123 *	Beyond that the page is still reasonably 'normal'.  Freeing the page
1124 *	will clear the flag.
1125 *
1126 *	This routine is used by OBJT_PHYS objects - objects using unswappable
1127 *	physical memory as backing store rather then swap-backed memory and
1128 *	will eventually be extended to support 4MB unmanaged physical
1129 *	mappings.
1130 */
1131void
1132vm_page_unmanage(vm_page_t m)
1133{
1134	int s;
1135
1136	s = splvm();
1137	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1138	if ((m->flags & PG_UNMANAGED) == 0) {
1139		if (m->wire_count == 0)
1140			vm_pageq_remove(m);
1141	}
1142	vm_page_flag_set(m, PG_UNMANAGED);
1143	splx(s);
1144}
1145
1146/*
1147 *	vm_page_wire:
1148 *
1149 *	Mark this page as wired down by yet
1150 *	another map, removing it from paging queues
1151 *	as necessary.
1152 *
1153 *	The page queues must be locked.
1154 *	This routine may not block.
1155 */
1156void
1157vm_page_wire(vm_page_t m)
1158{
1159	int s;
1160
1161	/*
1162	 * Only bump the wire statistics if the page is not already wired,
1163	 * and only unqueue the page if it is on some queue (if it is unmanaged
1164	 * it is already off the queues).
1165	 */
1166	s = splvm();
1167	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1168	if (m->wire_count == 0) {
1169		if ((m->flags & PG_UNMANAGED) == 0)
1170			vm_pageq_remove(m);
1171		atomic_add_int(&cnt.v_wire_count, 1);
1172	}
1173	m->wire_count++;
1174	KASSERT(m->wire_count != 0, ("vm_page_wire: wire_count overflow m=%p", m));
1175	splx(s);
1176}
1177
1178/*
1179 *	vm_page_unwire:
1180 *
1181 *	Release one wiring of this page, potentially
1182 *	enabling it to be paged again.
1183 *
1184 *	Many pages placed on the inactive queue should actually go
1185 *	into the cache, but it is difficult to figure out which.  What
1186 *	we do instead, if the inactive target is well met, is to put
1187 *	clean pages at the head of the inactive queue instead of the tail.
1188 *	This will cause them to be moved to the cache more quickly and
1189 *	if not actively re-referenced, freed more quickly.  If we just
1190 *	stick these pages at the end of the inactive queue, heavy filesystem
1191 *	meta-data accesses can cause an unnecessary paging load on memory bound
1192 *	processes.  This optimization causes one-time-use metadata to be
1193 *	reused more quickly.
1194 *
1195 *	BUT, if we are in a low-memory situation we have no choice but to
1196 *	put clean pages on the cache queue.
1197 *
1198 *	A number of routines use vm_page_unwire() to guarantee that the page
1199 *	will go into either the inactive or active queues, and will NEVER
1200 *	be placed in the cache - for example, just after dirtying a page.
1201 *	dirty pages in the cache are not allowed.
1202 *
1203 *	The page queues must be locked.
1204 *	This routine may not block.
1205 */
1206void
1207vm_page_unwire(vm_page_t m, int activate)
1208{
1209	int s;
1210
1211	s = splvm();
1212	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1213	if (m->wire_count > 0) {
1214		m->wire_count--;
1215		if (m->wire_count == 0) {
1216			atomic_subtract_int(&cnt.v_wire_count, 1);
1217			if (m->flags & PG_UNMANAGED) {
1218				;
1219			} else if (activate)
1220				vm_pageq_enqueue(PQ_ACTIVE, m);
1221			else {
1222				vm_page_flag_clear(m, PG_WINATCFLS);
1223				vm_pageq_enqueue(PQ_INACTIVE, m);
1224			}
1225		}
1226	} else {
1227		panic("vm_page_unwire: invalid wire count: %d\n", m->wire_count);
1228	}
1229	splx(s);
1230}
1231
1232
1233/*
1234 * Move the specified page to the inactive queue.  If the page has
1235 * any associated swap, the swap is deallocated.
1236 *
1237 * Normally athead is 0 resulting in LRU operation.  athead is set
1238 * to 1 if we want this page to be 'as if it were placed in the cache',
1239 * except without unmapping it from the process address space.
1240 *
1241 * This routine may not block.
1242 */
1243static __inline void
1244_vm_page_deactivate(vm_page_t m, int athead)
1245{
1246	int s;
1247
1248	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1249	/*
1250	 * Ignore if already inactive.
1251	 */
1252	if (m->queue == PQ_INACTIVE)
1253		return;
1254
1255	s = splvm();
1256	if (m->wire_count == 0 && (m->flags & PG_UNMANAGED) == 0) {
1257		if ((m->queue - m->pc) == PQ_CACHE)
1258			cnt.v_reactivated++;
1259		vm_page_flag_clear(m, PG_WINATCFLS);
1260		vm_pageq_remove(m);
1261		if (athead)
1262			TAILQ_INSERT_HEAD(&vm_page_queues[PQ_INACTIVE].pl, m, pageq);
1263		else
1264			TAILQ_INSERT_TAIL(&vm_page_queues[PQ_INACTIVE].pl, m, pageq);
1265		m->queue = PQ_INACTIVE;
1266		vm_page_queues[PQ_INACTIVE].lcnt++;
1267		cnt.v_inactive_count++;
1268	}
1269	splx(s);
1270}
1271
1272void
1273vm_page_deactivate(vm_page_t m)
1274{
1275    _vm_page_deactivate(m, 0);
1276}
1277
1278/*
1279 * vm_page_try_to_cache:
1280 *
1281 * Returns 0 on failure, 1 on success
1282 */
1283int
1284vm_page_try_to_cache(vm_page_t m)
1285{
1286
1287	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1288	if (m->dirty || m->hold_count || m->busy || m->wire_count ||
1289	    (m->flags & (PG_BUSY|PG_UNMANAGED))) {
1290		return (0);
1291	}
1292	vm_page_test_dirty(m);
1293	if (m->dirty)
1294		return (0);
1295	vm_page_cache(m);
1296	return (1);
1297}
1298
1299/*
1300 * vm_page_try_to_free()
1301 *
1302 *	Attempt to free the page.  If we cannot free it, we do nothing.
1303 *	1 is returned on success, 0 on failure.
1304 */
1305int
1306vm_page_try_to_free(vm_page_t m)
1307{
1308
1309	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1310	if (m->object != NULL)
1311		VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
1312	if (m->dirty || m->hold_count || m->busy || m->wire_count ||
1313	    (m->flags & (PG_BUSY|PG_UNMANAGED))) {
1314		return (0);
1315	}
1316	vm_page_test_dirty(m);
1317	if (m->dirty)
1318		return (0);
1319	vm_page_busy(m);
1320	pmap_remove_all(m);
1321	vm_page_free(m);
1322	return (1);
1323}
1324
1325/*
1326 * vm_page_cache
1327 *
1328 * Put the specified page onto the page cache queue (if appropriate).
1329 *
1330 * This routine may not block.
1331 */
1332void
1333vm_page_cache(vm_page_t m)
1334{
1335	int s;
1336
1337	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1338	if ((m->flags & (PG_BUSY|PG_UNMANAGED)) || m->busy ||
1339	    m->hold_count || m->wire_count) {
1340		printf("vm_page_cache: attempting to cache busy page\n");
1341		return;
1342	}
1343	if ((m->queue - m->pc) == PQ_CACHE)
1344		return;
1345
1346	/*
1347	 * Remove all pmaps and indicate that the page is not
1348	 * writeable or mapped.
1349	 */
1350	pmap_remove_all(m);
1351	if (m->dirty != 0) {
1352		panic("vm_page_cache: caching a dirty page, pindex: %ld",
1353			(long)m->pindex);
1354	}
1355	s = splvm();
1356	vm_pageq_remove_nowakeup(m);
1357	vm_pageq_enqueue(PQ_CACHE + m->pc, m);
1358	vm_page_free_wakeup();
1359	splx(s);
1360}
1361
1362/*
1363 * vm_page_dontneed
1364 *
1365 *	Cache, deactivate, or do nothing as appropriate.  This routine
1366 *	is typically used by madvise() MADV_DONTNEED.
1367 *
1368 *	Generally speaking we want to move the page into the cache so
1369 *	it gets reused quickly.  However, this can result in a silly syndrome
1370 *	due to the page recycling too quickly.  Small objects will not be
1371 *	fully cached.  On the otherhand, if we move the page to the inactive
1372 *	queue we wind up with a problem whereby very large objects
1373 *	unnecessarily blow away our inactive and cache queues.
1374 *
1375 *	The solution is to move the pages based on a fixed weighting.  We
1376 *	either leave them alone, deactivate them, or move them to the cache,
1377 *	where moving them to the cache has the highest weighting.
1378 *	By forcing some pages into other queues we eventually force the
1379 *	system to balance the queues, potentially recovering other unrelated
1380 *	space from active.  The idea is to not force this to happen too
1381 *	often.
1382 */
1383void
1384vm_page_dontneed(vm_page_t m)
1385{
1386	static int dnweight;
1387	int dnw;
1388	int head;
1389
1390	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1391	dnw = ++dnweight;
1392
1393	/*
1394	 * occassionally leave the page alone
1395	 */
1396	if ((dnw & 0x01F0) == 0 ||
1397	    m->queue == PQ_INACTIVE ||
1398	    m->queue - m->pc == PQ_CACHE
1399	) {
1400		if (m->act_count >= ACT_INIT)
1401			--m->act_count;
1402		return;
1403	}
1404
1405	if (m->dirty == 0)
1406		vm_page_test_dirty(m);
1407
1408	if (m->dirty || (dnw & 0x0070) == 0) {
1409		/*
1410		 * Deactivate the page 3 times out of 32.
1411		 */
1412		head = 0;
1413	} else {
1414		/*
1415		 * Cache the page 28 times out of every 32.  Note that
1416		 * the page is deactivated instead of cached, but placed
1417		 * at the head of the queue instead of the tail.
1418		 */
1419		head = 1;
1420	}
1421	_vm_page_deactivate(m, head);
1422}
1423
1424/*
1425 * Grab a page, waiting until we are waken up due to the page
1426 * changing state.  We keep on waiting, if the page continues
1427 * to be in the object.  If the page doesn't exist, allocate it.
1428 *
1429 * This routine may block.
1430 */
1431vm_page_t
1432vm_page_grab(vm_object_t object, vm_pindex_t pindex, int allocflags)
1433{
1434	vm_page_t m;
1435	int s, generation;
1436
1437	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
1438retrylookup:
1439	if ((m = vm_page_lookup(object, pindex)) != NULL) {
1440		vm_page_lock_queues();
1441		if (m->busy || (m->flags & PG_BUSY)) {
1442			generation = object->generation;
1443
1444			s = splvm();
1445			while ((object->generation == generation) &&
1446					(m->busy || (m->flags & PG_BUSY))) {
1447				vm_page_flag_set(m, PG_WANTED | PG_REFERENCED);
1448				VM_OBJECT_UNLOCK(object);
1449				msleep(m, &vm_page_queue_mtx, PDROP | PVM, "pgrbwt", 0);
1450				VM_OBJECT_LOCK(object);
1451				if ((allocflags & VM_ALLOC_RETRY) == 0) {
1452					splx(s);
1453					return NULL;
1454				}
1455				vm_page_lock_queues();
1456			}
1457			vm_page_unlock_queues();
1458			splx(s);
1459			goto retrylookup;
1460		} else {
1461			if (allocflags & VM_ALLOC_WIRED)
1462				vm_page_wire(m);
1463			vm_page_busy(m);
1464			vm_page_unlock_queues();
1465			return m;
1466		}
1467	}
1468
1469	m = vm_page_alloc(object, pindex, allocflags & ~VM_ALLOC_RETRY);
1470	if (m == NULL) {
1471		VM_OBJECT_UNLOCK(object);
1472		VM_WAIT;
1473		VM_OBJECT_LOCK(object);
1474		if ((allocflags & VM_ALLOC_RETRY) == 0)
1475			return NULL;
1476		goto retrylookup;
1477	}
1478
1479	return m;
1480}
1481
1482/*
1483 * Mapping function for valid bits or for dirty bits in
1484 * a page.  May not block.
1485 *
1486 * Inputs are required to range within a page.
1487 */
1488__inline int
1489vm_page_bits(int base, int size)
1490{
1491	int first_bit;
1492	int last_bit;
1493
1494	KASSERT(
1495	    base + size <= PAGE_SIZE,
1496	    ("vm_page_bits: illegal base/size %d/%d", base, size)
1497	);
1498
1499	if (size == 0)		/* handle degenerate case */
1500		return (0);
1501
1502	first_bit = base >> DEV_BSHIFT;
1503	last_bit = (base + size - 1) >> DEV_BSHIFT;
1504
1505	return ((2 << last_bit) - (1 << first_bit));
1506}
1507
1508/*
1509 *	vm_page_set_validclean:
1510 *
1511 *	Sets portions of a page valid and clean.  The arguments are expected
1512 *	to be DEV_BSIZE aligned but if they aren't the bitmap is inclusive
1513 *	of any partial chunks touched by the range.  The invalid portion of
1514 *	such chunks will be zero'd.
1515 *
1516 *	This routine may not block.
1517 *
1518 *	(base + size) must be less then or equal to PAGE_SIZE.
1519 */
1520void
1521vm_page_set_validclean(vm_page_t m, int base, int size)
1522{
1523	int pagebits;
1524	int frag;
1525	int endoff;
1526
1527	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1528	VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
1529	if (size == 0)	/* handle degenerate case */
1530		return;
1531
1532	/*
1533	 * If the base is not DEV_BSIZE aligned and the valid
1534	 * bit is clear, we have to zero out a portion of the
1535	 * first block.
1536	 */
1537	if ((frag = base & ~(DEV_BSIZE - 1)) != base &&
1538	    (m->valid & (1 << (base >> DEV_BSHIFT))) == 0)
1539		pmap_zero_page_area(m, frag, base - frag);
1540
1541	/*
1542	 * If the ending offset is not DEV_BSIZE aligned and the
1543	 * valid bit is clear, we have to zero out a portion of
1544	 * the last block.
1545	 */
1546	endoff = base + size;
1547	if ((frag = endoff & ~(DEV_BSIZE - 1)) != endoff &&
1548	    (m->valid & (1 << (endoff >> DEV_BSHIFT))) == 0)
1549		pmap_zero_page_area(m, endoff,
1550		    DEV_BSIZE - (endoff & (DEV_BSIZE - 1)));
1551
1552	/*
1553	 * Set valid, clear dirty bits.  If validating the entire
1554	 * page we can safely clear the pmap modify bit.  We also
1555	 * use this opportunity to clear the PG_NOSYNC flag.  If a process
1556	 * takes a write fault on a MAP_NOSYNC memory area the flag will
1557	 * be set again.
1558	 *
1559	 * We set valid bits inclusive of any overlap, but we can only
1560	 * clear dirty bits for DEV_BSIZE chunks that are fully within
1561	 * the range.
1562	 */
1563	pagebits = vm_page_bits(base, size);
1564	m->valid |= pagebits;
1565#if 0	/* NOT YET */
1566	if ((frag = base & (DEV_BSIZE - 1)) != 0) {
1567		frag = DEV_BSIZE - frag;
1568		base += frag;
1569		size -= frag;
1570		if (size < 0)
1571			size = 0;
1572	}
1573	pagebits = vm_page_bits(base, size & (DEV_BSIZE - 1));
1574#endif
1575	m->dirty &= ~pagebits;
1576	if (base == 0 && size == PAGE_SIZE) {
1577		pmap_clear_modify(m);
1578		vm_page_flag_clear(m, PG_NOSYNC);
1579	}
1580}
1581
1582void
1583vm_page_clear_dirty(vm_page_t m, int base, int size)
1584{
1585
1586	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1587	m->dirty &= ~vm_page_bits(base, size);
1588}
1589
1590/*
1591 *	vm_page_set_invalid:
1592 *
1593 *	Invalidates DEV_BSIZE'd chunks within a page.  Both the
1594 *	valid and dirty bits for the effected areas are cleared.
1595 *
1596 *	May not block.
1597 */
1598void
1599vm_page_set_invalid(vm_page_t m, int base, int size)
1600{
1601	int bits;
1602
1603	VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
1604	bits = vm_page_bits(base, size);
1605	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1606	m->valid &= ~bits;
1607	m->dirty &= ~bits;
1608	m->object->generation++;
1609}
1610
1611/*
1612 * vm_page_zero_invalid()
1613 *
1614 *	The kernel assumes that the invalid portions of a page contain
1615 *	garbage, but such pages can be mapped into memory by user code.
1616 *	When this occurs, we must zero out the non-valid portions of the
1617 *	page so user code sees what it expects.
1618 *
1619 *	Pages are most often semi-valid when the end of a file is mapped
1620 *	into memory and the file's size is not page aligned.
1621 */
1622void
1623vm_page_zero_invalid(vm_page_t m, boolean_t setvalid)
1624{
1625	int b;
1626	int i;
1627
1628	VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
1629	/*
1630	 * Scan the valid bits looking for invalid sections that
1631	 * must be zerod.  Invalid sub-DEV_BSIZE'd areas ( where the
1632	 * valid bit may be set ) have already been zerod by
1633	 * vm_page_set_validclean().
1634	 */
1635	for (b = i = 0; i <= PAGE_SIZE / DEV_BSIZE; ++i) {
1636		if (i == (PAGE_SIZE / DEV_BSIZE) ||
1637		    (m->valid & (1 << i))
1638		) {
1639			if (i > b) {
1640				pmap_zero_page_area(m,
1641				    b << DEV_BSHIFT, (i - b) << DEV_BSHIFT);
1642			}
1643			b = i + 1;
1644		}
1645	}
1646
1647	/*
1648	 * setvalid is TRUE when we can safely set the zero'd areas
1649	 * as being valid.  We can do this if there are no cache consistancy
1650	 * issues.  e.g. it is ok to do with UFS, but not ok to do with NFS.
1651	 */
1652	if (setvalid)
1653		m->valid = VM_PAGE_BITS_ALL;
1654}
1655
1656/*
1657 *	vm_page_is_valid:
1658 *
1659 *	Is (partial) page valid?  Note that the case where size == 0
1660 *	will return FALSE in the degenerate case where the page is
1661 *	entirely invalid, and TRUE otherwise.
1662 *
1663 *	May not block.
1664 */
1665int
1666vm_page_is_valid(vm_page_t m, int base, int size)
1667{
1668	int bits = vm_page_bits(base, size);
1669
1670	VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
1671	if (m->valid && ((m->valid & bits) == bits))
1672		return 1;
1673	else
1674		return 0;
1675}
1676
1677/*
1678 * update dirty bits from pmap/mmu.  May not block.
1679 */
1680void
1681vm_page_test_dirty(vm_page_t m)
1682{
1683	if ((m->dirty != VM_PAGE_BITS_ALL) && pmap_is_modified(m)) {
1684		vm_page_dirty(m);
1685	}
1686}
1687
1688int so_zerocp_fullpage = 0;
1689
1690void
1691vm_page_cowfault(vm_page_t m)
1692{
1693	vm_page_t mnew;
1694	vm_object_t object;
1695	vm_pindex_t pindex;
1696
1697	object = m->object;
1698	pindex = m->pindex;
1699	vm_page_busy(m);
1700
1701 retry_alloc:
1702	vm_page_remove(m);
1703	/*
1704	 * An interrupt allocation is requested because the page
1705	 * queues lock is held.
1706	 */
1707	mnew = vm_page_alloc(object, pindex, VM_ALLOC_INTERRUPT);
1708	if (mnew == NULL) {
1709		vm_page_insert(m, object, pindex);
1710		vm_page_unlock_queues();
1711		VM_OBJECT_UNLOCK(object);
1712		VM_WAIT;
1713		VM_OBJECT_LOCK(object);
1714		vm_page_lock_queues();
1715		goto retry_alloc;
1716	}
1717
1718	if (m->cow == 0) {
1719		/*
1720		 * check to see if we raced with an xmit complete when
1721		 * waiting to allocate a page.  If so, put things back
1722		 * the way they were
1723		 */
1724		vm_page_busy(mnew);
1725		vm_page_free(mnew);
1726		vm_page_insert(m, object, pindex);
1727	} else { /* clear COW & copy page */
1728		if (!so_zerocp_fullpage)
1729			pmap_copy_page(m, mnew);
1730		mnew->valid = VM_PAGE_BITS_ALL;
1731		vm_page_dirty(mnew);
1732		vm_page_flag_clear(mnew, PG_BUSY);
1733	}
1734}
1735
1736void
1737vm_page_cowclear(vm_page_t m)
1738{
1739
1740	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1741	if (m->cow) {
1742		m->cow--;
1743		/*
1744		 * let vm_fault add back write permission  lazily
1745		 */
1746	}
1747	/*
1748	 *  sf_buf_free() will free the page, so we needn't do it here
1749	 */
1750}
1751
1752void
1753vm_page_cowsetup(vm_page_t m)
1754{
1755
1756	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1757	m->cow++;
1758	pmap_page_protect(m, VM_PROT_READ);
1759}
1760
1761#include "opt_ddb.h"
1762#ifdef DDB
1763#include <sys/kernel.h>
1764
1765#include <ddb/ddb.h>
1766
1767DB_SHOW_COMMAND(page, vm_page_print_page_info)
1768{
1769	db_printf("cnt.v_free_count: %d\n", cnt.v_free_count);
1770	db_printf("cnt.v_cache_count: %d\n", cnt.v_cache_count);
1771	db_printf("cnt.v_inactive_count: %d\n", cnt.v_inactive_count);
1772	db_printf("cnt.v_active_count: %d\n", cnt.v_active_count);
1773	db_printf("cnt.v_wire_count: %d\n", cnt.v_wire_count);
1774	db_printf("cnt.v_free_reserved: %d\n", cnt.v_free_reserved);
1775	db_printf("cnt.v_free_min: %d\n", cnt.v_free_min);
1776	db_printf("cnt.v_free_target: %d\n", cnt.v_free_target);
1777	db_printf("cnt.v_cache_min: %d\n", cnt.v_cache_min);
1778	db_printf("cnt.v_inactive_target: %d\n", cnt.v_inactive_target);
1779}
1780
1781DB_SHOW_COMMAND(pageq, vm_page_print_pageq_info)
1782{
1783	int i;
1784	db_printf("PQ_FREE:");
1785	for (i = 0; i < PQ_L2_SIZE; i++) {
1786		db_printf(" %d", vm_page_queues[PQ_FREE + i].lcnt);
1787	}
1788	db_printf("\n");
1789
1790	db_printf("PQ_CACHE:");
1791	for (i = 0; i < PQ_L2_SIZE; i++) {
1792		db_printf(" %d", vm_page_queues[PQ_CACHE + i].lcnt);
1793	}
1794	db_printf("\n");
1795
1796	db_printf("PQ_ACTIVE: %d, PQ_INACTIVE: %d\n",
1797		vm_page_queues[PQ_ACTIVE].lcnt,
1798		vm_page_queues[PQ_INACTIVE].lcnt);
1799}
1800#endif /* DDB */
1801