vm_page.c revision 27899
1/*
2 * Copyright (c) 1991 Regents of the University of California.
3 * All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * The Mach Operating System project at Carnegie-Mellon University.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 *    must display the following acknowledgement:
18 *	This product includes software developed by the University of
19 *	California, Berkeley and its contributors.
20 * 4. Neither the name of the University nor the names of its contributors
21 *    may be used to endorse or promote products derived from this software
22 *    without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 *	from: @(#)vm_page.c	7.4 (Berkeley) 5/7/91
37 *	$Id: vm_page.c,v 1.78 1997/05/01 14:36:01 dyson Exp $
38 */
39
40/*
41 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
42 * All rights reserved.
43 *
44 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
45 *
46 * Permission to use, copy, modify and distribute this software and
47 * its documentation is hereby granted, provided that both the copyright
48 * notice and this permission notice appear in all copies of the
49 * software, derivative works or modified versions, and any portions
50 * thereof, and that both notices appear in supporting documentation.
51 *
52 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
53 * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
54 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
55 *
56 * Carnegie Mellon requests users of this software to return to
57 *
58 *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
59 *  School of Computer Science
60 *  Carnegie Mellon University
61 *  Pittsburgh PA 15213-3890
62 *
63 * any improvements or extensions that they make and grant Carnegie the
64 * rights to redistribute these changes.
65 */
66
67/*
68 *	Resident memory management module.
69 */
70
71#include <sys/param.h>
72#include <sys/systm.h>
73#include <sys/malloc.h>
74#include <sys/proc.h>
75#include <sys/vmmeter.h>
76
77#include <vm/vm.h>
78#include <vm/vm_param.h>
79#include <vm/vm_prot.h>
80#include <sys/lock.h>
81#include <vm/vm_kern.h>
82#include <vm/vm_object.h>
83#include <vm/vm_page.h>
84#include <vm/vm_map.h>
85#include <vm/vm_pageout.h>
86#include <vm/vm_extern.h>
87
88static void	vm_page_queue_init __P((void));
89static vm_page_t vm_page_select_free __P((vm_object_t object,
90			vm_pindex_t pindex, int prefqueue));
91
92/*
93 *	Associated with page of user-allocatable memory is a
94 *	page structure.
95 */
96
97static struct pglist *vm_page_buckets;	/* Array of buckets */
98static int vm_page_bucket_count;	/* How big is array? */
99static int vm_page_hash_mask;		/* Mask for hash function */
100
101struct pglist vm_page_queue_free[PQ_L2_SIZE] = {0};
102struct pglist vm_page_queue_zero[PQ_L2_SIZE] = {0};
103struct pglist vm_page_queue_active = {0};
104struct pglist vm_page_queue_inactive = {0};
105struct pglist vm_page_queue_cache[PQ_L2_SIZE] = {0};
106
107int no_queue=0;
108
109struct vpgqueues vm_page_queues[PQ_COUNT] = {0};
110int pqcnt[PQ_COUNT] = {0};
111
112static void
113vm_page_queue_init(void) {
114	int i;
115
116	vm_page_queues[PQ_NONE].pl = NULL;
117	vm_page_queues[PQ_NONE].cnt = &no_queue;
118	for(i=0;i<PQ_L2_SIZE;i++) {
119		vm_page_queues[PQ_FREE+i].pl = &vm_page_queue_free[i];
120		vm_page_queues[PQ_FREE+i].cnt = &cnt.v_free_count;
121	}
122	for(i=0;i<PQ_L2_SIZE;i++) {
123		vm_page_queues[PQ_ZERO+i].pl = &vm_page_queue_zero[i];
124		vm_page_queues[PQ_ZERO+i].cnt = &cnt.v_free_count;
125	}
126	vm_page_queues[PQ_INACTIVE].pl = &vm_page_queue_inactive;
127	vm_page_queues[PQ_INACTIVE].cnt = &cnt.v_inactive_count;
128
129	vm_page_queues[PQ_ACTIVE].pl = &vm_page_queue_active;
130	vm_page_queues[PQ_ACTIVE].cnt = &cnt.v_active_count;
131	for(i=0;i<PQ_L2_SIZE;i++) {
132		vm_page_queues[PQ_CACHE+i].pl = &vm_page_queue_cache[i];
133		vm_page_queues[PQ_CACHE+i].cnt = &cnt.v_cache_count;
134	}
135	for(i=0;i<PQ_COUNT;i++) {
136		if (vm_page_queues[i].pl) {
137			TAILQ_INIT(vm_page_queues[i].pl);
138		} else if (i != 0) {
139			panic("vm_page_queue_init: queue %d is null", i);
140		}
141		vm_page_queues[i].lcnt = &pqcnt[i];
142	}
143}
144
145vm_page_t vm_page_array = 0;
146int vm_page_array_size = 0;
147long first_page = 0;
148static long last_page;
149static vm_size_t page_mask;
150static int page_shift;
151int vm_page_zero_count = 0;
152
153/*
154 * map of contiguous valid DEV_BSIZE chunks in a page
155 * (this list is valid for page sizes upto 16*DEV_BSIZE)
156 */
157static u_short vm_page_dev_bsize_chunks[] = {
158	0x0, 0x1, 0x3, 0x7, 0xf, 0x1f, 0x3f, 0x7f, 0xff,
159	0x1ff, 0x3ff, 0x7ff, 0xfff, 0x1fff, 0x3fff, 0x7fff, 0xffff
160};
161
162static inline int vm_page_hash __P((vm_object_t object, vm_pindex_t pindex));
163static int vm_page_freechk_and_unqueue __P((vm_page_t m));
164static void vm_page_free_wakeup __P((void));
165
166/*
167 *	vm_set_page_size:
168 *
169 *	Sets the page size, perhaps based upon the memory
170 *	size.  Must be called before any use of page-size
171 *	dependent functions.
172 *
173 *	Sets page_shift and page_mask from cnt.v_page_size.
174 */
175void
176vm_set_page_size()
177{
178
179	if (cnt.v_page_size == 0)
180		cnt.v_page_size = DEFAULT_PAGE_SIZE;
181	page_mask = cnt.v_page_size - 1;
182	if ((page_mask & cnt.v_page_size) != 0)
183		panic("vm_set_page_size: page size not a power of two");
184	for (page_shift = 0;; page_shift++)
185		if ((1 << page_shift) == cnt.v_page_size)
186			break;
187}
188
189/*
190 *	vm_page_startup:
191 *
192 *	Initializes the resident memory module.
193 *
194 *	Allocates memory for the page cells, and
195 *	for the object/offset-to-page hash table headers.
196 *	Each page cell is initialized and placed on the free list.
197 */
198
199vm_offset_t
200vm_page_startup(starta, enda, vaddr)
201	register vm_offset_t starta;
202	vm_offset_t enda;
203	register vm_offset_t vaddr;
204{
205	register vm_offset_t mapped;
206	register vm_page_t m;
207	register struct pglist *bucket;
208	vm_size_t npages, page_range;
209	register vm_offset_t new_start;
210	int i;
211	vm_offset_t pa;
212	int nblocks;
213	vm_offset_t first_managed_page;
214
215	/* the biggest memory array is the second group of pages */
216	vm_offset_t start;
217	vm_offset_t biggestone, biggestsize;
218
219	vm_offset_t total;
220
221	total = 0;
222	biggestsize = 0;
223	biggestone = 0;
224	nblocks = 0;
225	vaddr = round_page(vaddr);
226
227	for (i = 0; phys_avail[i + 1]; i += 2) {
228		phys_avail[i] = round_page(phys_avail[i]);
229		phys_avail[i + 1] = trunc_page(phys_avail[i + 1]);
230	}
231
232	for (i = 0; phys_avail[i + 1]; i += 2) {
233		int size = phys_avail[i + 1] - phys_avail[i];
234
235		if (size > biggestsize) {
236			biggestone = i;
237			biggestsize = size;
238		}
239		++nblocks;
240		total += size;
241	}
242
243	start = phys_avail[biggestone];
244
245	/*
246	 * Initialize the queue headers for the free queue, the active queue
247	 * and the inactive queue.
248	 */
249
250	vm_page_queue_init();
251
252	/*
253	 * Allocate (and initialize) the hash table buckets.
254	 *
255	 * The number of buckets MUST BE a power of 2, and the actual value is
256	 * the next power of 2 greater than the number of physical pages in
257	 * the system.
258	 *
259	 * Note: This computation can be tweaked if desired.
260	 */
261	vm_page_buckets = (struct pglist *) vaddr;
262	bucket = vm_page_buckets;
263	if (vm_page_bucket_count == 0) {
264		vm_page_bucket_count = 1;
265		while (vm_page_bucket_count < atop(total))
266			vm_page_bucket_count <<= 1;
267	}
268	vm_page_hash_mask = vm_page_bucket_count - 1;
269
270	/*
271	 * Validate these addresses.
272	 */
273
274	new_start = start + vm_page_bucket_count * sizeof(struct pglist);
275	new_start = round_page(new_start);
276	mapped = vaddr;
277	vaddr = pmap_map(mapped, start, new_start,
278	    VM_PROT_READ | VM_PROT_WRITE);
279	start = new_start;
280	bzero((caddr_t) mapped, vaddr - mapped);
281	mapped = vaddr;
282
283	for (i = 0; i < vm_page_bucket_count; i++) {
284		TAILQ_INIT(bucket);
285		bucket++;
286	}
287
288	/*
289	 * Validate these zone addresses.
290	 */
291
292	new_start = start + (vaddr - mapped);
293	pmap_map(mapped, start, new_start, VM_PROT_READ | VM_PROT_WRITE);
294	bzero((caddr_t) mapped, (vaddr - mapped));
295	start = round_page(new_start);
296
297	/*
298	 * Compute the number of pages of memory that will be available for
299	 * use (taking into account the overhead of a page structure per
300	 * page).
301	 */
302
303	first_page = phys_avail[0] / PAGE_SIZE;
304	last_page = phys_avail[(nblocks - 1) * 2 + 1] / PAGE_SIZE;
305
306	page_range = last_page - (phys_avail[0] / PAGE_SIZE);
307	npages = (total - (page_range * sizeof(struct vm_page)) -
308	    (start - phys_avail[biggestone])) / PAGE_SIZE;
309
310	/*
311	 * Initialize the mem entry structures now, and put them in the free
312	 * queue.
313	 */
314
315	vm_page_array = (vm_page_t) vaddr;
316	mapped = vaddr;
317
318	/*
319	 * Validate these addresses.
320	 */
321
322	new_start = round_page(start + page_range * sizeof(struct vm_page));
323	mapped = pmap_map(mapped, start, new_start,
324	    VM_PROT_READ | VM_PROT_WRITE);
325	start = new_start;
326
327	first_managed_page = start / PAGE_SIZE;
328
329	/*
330	 * Clear all of the page structures
331	 */
332	bzero((caddr_t) vm_page_array, page_range * sizeof(struct vm_page));
333	vm_page_array_size = page_range;
334
335	cnt.v_page_count = 0;
336	cnt.v_free_count = 0;
337	for (i = 0; phys_avail[i + 1] && npages > 0; i += 2) {
338		if (i == biggestone)
339			pa = ptoa(first_managed_page);
340		else
341			pa = phys_avail[i];
342		while (pa < phys_avail[i + 1] && npages-- > 0) {
343			++cnt.v_page_count;
344			++cnt.v_free_count;
345			m = PHYS_TO_VM_PAGE(pa);
346			m->phys_addr = pa;
347			m->flags = 0;
348			m->pc = (pa >> PAGE_SHIFT) & PQ_L2_MASK;
349			m->queue = PQ_FREE + m->pc;
350			TAILQ_INSERT_TAIL(vm_page_queues[m->queue].pl, m, pageq);
351			++(*vm_page_queues[m->queue].lcnt);
352			pa += PAGE_SIZE;
353		}
354	}
355
356	return (mapped);
357}
358
359/*
360 *	vm_page_hash:
361 *
362 *	Distributes the object/offset key pair among hash buckets.
363 *
364 *	NOTE:  This macro depends on vm_page_bucket_count being a power of 2.
365 */
366static inline int
367vm_page_hash(object, pindex)
368	vm_object_t object;
369	vm_pindex_t pindex;
370{
371	return ((((unsigned) object) >> 5) + (pindex >> 1)) & vm_page_hash_mask;
372}
373
374/*
375 *	vm_page_insert:		[ internal use only ]
376 *
377 *	Inserts the given mem entry into the object/object-page
378 *	table and object list.
379 *
380 *	The object and page must be locked, and must be splhigh.
381 */
382
383void
384vm_page_insert(m, object, pindex)
385	register vm_page_t m;
386	register vm_object_t object;
387	register vm_pindex_t pindex;
388{
389	register struct pglist *bucket;
390
391	if (m->flags & PG_TABLED)
392		panic("vm_page_insert: already inserted");
393
394	/*
395	 * Record the object/offset pair in this page
396	 */
397
398	m->object = object;
399	m->pindex = pindex;
400
401	/*
402	 * Insert it into the object_object/offset hash table
403	 */
404
405	bucket = &vm_page_buckets[vm_page_hash(object, pindex)];
406	TAILQ_INSERT_TAIL(bucket, m, hashq);
407
408	/*
409	 * Now link into the object's list of backed pages.
410	 */
411
412	TAILQ_INSERT_TAIL(&object->memq, m, listq);
413	m->flags |= PG_TABLED;
414	m->object->page_hint = m;
415
416	/*
417	 * And show that the object has one more resident page.
418	 */
419
420	object->resident_page_count++;
421}
422
423/*
424 *	vm_page_remove:		[ internal use only ]
425 *				NOTE: used by device pager as well -wfj
426 *
427 *	Removes the given mem entry from the object/offset-page
428 *	table and the object page list.
429 *
430 *	The object and page must be locked, and at splhigh.
431 */
432
433void
434vm_page_remove(m)
435	register vm_page_t m;
436{
437	register struct pglist *bucket;
438
439	if (!(m->flags & PG_TABLED))
440		return;
441
442	if (m->object->page_hint == m)
443		m->object->page_hint = NULL;
444
445	/*
446	 * Remove from the object_object/offset hash table
447	 */
448
449	bucket = &vm_page_buckets[vm_page_hash(m->object, m->pindex)];
450	TAILQ_REMOVE(bucket, m, hashq);
451
452	/*
453	 * Now remove from the object's list of backed pages.
454	 */
455
456	TAILQ_REMOVE(&m->object->memq, m, listq);
457
458	/*
459	 * And show that the object has one fewer resident page.
460	 */
461
462	m->object->resident_page_count--;
463
464	m->flags &= ~PG_TABLED;
465}
466
467/*
468 *	vm_page_lookup:
469 *
470 *	Returns the page associated with the object/offset
471 *	pair specified; if none is found, NULL is returned.
472 *
473 *	The object must be locked.  No side effects.
474 */
475
476vm_page_t
477vm_page_lookup(object, pindex)
478	register vm_object_t object;
479	register vm_pindex_t pindex;
480{
481	register vm_page_t m;
482	register struct pglist *bucket;
483	int s;
484
485	/*
486	 * Search the hash table for this object/offset pair
487	 */
488
489	bucket = &vm_page_buckets[vm_page_hash(object, pindex)];
490
491	s = splvm();
492	for (m = TAILQ_FIRST(bucket); m != NULL; m = TAILQ_NEXT(m,hashq)) {
493		if ((m->object == object) && (m->pindex == pindex)) {
494			splx(s);
495			m->object->page_hint = m;
496			return (m);
497		}
498	}
499	splx(s);
500	return (NULL);
501}
502
503/*
504 *	vm_page_rename:
505 *
506 *	Move the given memory entry from its
507 *	current object to the specified target object/offset.
508 *
509 *	The object must be locked.
510 */
511void
512vm_page_rename(m, new_object, new_pindex)
513	register vm_page_t m;
514	register vm_object_t new_object;
515	vm_pindex_t new_pindex;
516{
517	int s;
518
519	s = splvm();
520	vm_page_remove(m);
521	vm_page_insert(m, new_object, new_pindex);
522	splx(s);
523}
524
525/*
526 * vm_page_unqueue without any wakeup
527 */
528void
529vm_page_unqueue_nowakeup(m)
530	vm_page_t m;
531{
532	int queue = m->queue;
533	struct vpgqueues *pq;
534	if (queue != PQ_NONE) {
535		pq = &vm_page_queues[queue];
536		m->queue = PQ_NONE;
537		TAILQ_REMOVE(pq->pl, m, pageq);
538		--(*pq->cnt);
539		--(*pq->lcnt);
540	}
541}
542
543/*
544 * vm_page_unqueue must be called at splhigh();
545 */
546void
547vm_page_unqueue(m)
548	vm_page_t m;
549{
550	int queue = m->queue;
551	struct vpgqueues *pq;
552	if (queue != PQ_NONE) {
553		m->queue = PQ_NONE;
554		pq = &vm_page_queues[queue];
555		TAILQ_REMOVE(pq->pl, m, pageq);
556		--(*pq->cnt);
557		--(*pq->lcnt);
558		if ((queue - m->pc) == PQ_CACHE) {
559			if ((cnt.v_cache_count + cnt.v_free_count) <
560				(cnt.v_free_reserved + cnt.v_cache_min))
561				pagedaemon_wakeup();
562		}
563	}
564}
565
566/*
567 * Find a page on the specified queue with color optimization.
568 */
569vm_page_t
570vm_page_list_find(basequeue, index)
571	int basequeue, index;
572{
573#if PQ_L2_SIZE > 1
574
575	int i,j;
576	vm_page_t m;
577	int hindex;
578
579	for(j = 0; j < PQ_L1_SIZE; j++) {
580		for(i = (PQ_L2_SIZE/2) - (PQ_L1_SIZE - 1);
581			i >= 0;
582			i -= PQ_L1_SIZE) {
583			hindex = (index + (i+j)) & PQ_L2_MASK;
584			m = TAILQ_FIRST(vm_page_queues[basequeue + hindex].pl);
585			if (m)
586				return m;
587
588			hindex = (index - (i+j)) & PQ_L2_MASK;
589			m = TAILQ_FIRST(vm_page_queues[basequeue + hindex].pl);
590			if (m)
591				return m;
592		}
593	}
594	return NULL;
595#else
596	return TAILQ_FIRST(vm_page_queues[basequeue].pl);
597#endif
598
599}
600
601/*
602 * Find a page on the specified queue with color optimization.
603 */
604vm_page_t
605vm_page_select(object, pindex, basequeue)
606	vm_object_t object;
607	vm_pindex_t pindex;
608	int basequeue;
609{
610
611#if PQ_L2_SIZE > 1
612	int index;
613	index = (pindex + object->pg_color) & PQ_L2_MASK;
614	return vm_page_list_find(basequeue, index);
615
616#else
617	return TAILQ_FIRST(vm_page_queues[basequeue].pl);
618#endif
619
620}
621
622/*
623 * Find a free or zero page, with specified preference.
624 */
625static vm_page_t
626vm_page_select_free(object, pindex, prefqueue)
627	vm_object_t object;
628	vm_pindex_t pindex;
629	int prefqueue;
630{
631#if PQ_L2_SIZE > 1
632	int i,j;
633	int index, hindex;
634#endif
635	vm_page_t m;
636	int oqueuediff;
637
638	if (prefqueue == PQ_ZERO)
639		oqueuediff = PQ_FREE - PQ_ZERO;
640	else
641		oqueuediff = PQ_ZERO - PQ_FREE;
642
643	if (object->page_hint) {
644		 if (object->page_hint->pindex == (pindex - 1)) {
645			vm_offset_t last_phys;
646			if ((object->page_hint->flags & PG_FICTITIOUS) == 0) {
647				if ((object->page_hint < &vm_page_array[cnt.v_page_count-1]) &&
648					(object->page_hint >= &vm_page_array[0])) {
649					int queue;
650					last_phys = VM_PAGE_TO_PHYS(object->page_hint);
651					m = PHYS_TO_VM_PAGE(last_phys + PAGE_SIZE);
652					queue = m->queue - m->pc;
653					if (queue == PQ_FREE || queue == PQ_ZERO) {
654						return m;
655					}
656				}
657			}
658		}
659	}
660
661
662#if PQ_L2_SIZE > 1
663
664	index = pindex + object->pg_color;
665	for(j = 0; j < PQ_L1_SIZE; j++) {
666		for(i = (PQ_L2_SIZE/2) - (PQ_L1_SIZE - 1);
667			(i + j) >= 0;
668			i -= PQ_L1_SIZE) {
669
670			hindex = prefqueue + ((index + (i+j)) & PQ_L2_MASK);
671			if (m = TAILQ_FIRST(vm_page_queues[hindex].pl))
672				return m;
673			if (m = TAILQ_FIRST(vm_page_queues[hindex + oqueuediff].pl))
674				return m;
675
676			hindex = prefqueue + ((index - (i+j)) & PQ_L2_MASK);
677			if (m = TAILQ_FIRST(vm_page_queues[hindex].pl))
678				return m;
679			if (m = TAILQ_FIRST(vm_page_queues[hindex + oqueuediff].pl))
680				return m;
681		}
682	}
683#else
684	if (m = TAILQ_FIRST(vm_page_queues[prefqueue].pl))
685		return m;
686	else
687		return TAILQ_FIRST(vm_page_queues[prefqueue + oqueuediff].pl);
688#endif
689
690	return NULL;
691}
692
693/*
694 *	vm_page_alloc:
695 *
696 *	Allocate and return a memory cell associated
697 *	with this VM object/offset pair.
698 *
699 *	page_req classes:
700 *	VM_ALLOC_NORMAL		normal process request
701 *	VM_ALLOC_SYSTEM		system *really* needs a page
702 *	VM_ALLOC_INTERRUPT	interrupt time request
703 *	VM_ALLOC_ZERO		zero page
704 *
705 *	Object must be locked.
706 */
707vm_page_t
708vm_page_alloc(object, pindex, page_req)
709	vm_object_t object;
710	vm_pindex_t pindex;
711	int page_req;
712{
713	register vm_page_t m;
714	struct vpgqueues *pq;
715	int queue, qtype;
716	int s;
717
718#ifdef DIAGNOSTIC
719	m = vm_page_lookup(object, pindex);
720	if (m)
721		panic("vm_page_alloc: page already allocated");
722#endif
723
724	if ((curproc == pageproc) && (page_req != VM_ALLOC_INTERRUPT)) {
725		page_req = VM_ALLOC_SYSTEM;
726	};
727
728	s = splvm();
729
730	switch (page_req) {
731
732	case VM_ALLOC_NORMAL:
733		if (cnt.v_free_count >= cnt.v_free_reserved) {
734			m = vm_page_select_free(object, pindex, PQ_FREE);
735#if defined(DIAGNOSTIC)
736			if (m == NULL)
737				panic("vm_page_alloc(NORMAL): missing page on free queue\n");
738#endif
739		} else {
740			m = vm_page_select(object, pindex, PQ_CACHE);
741			if (m == NULL) {
742				splx(s);
743#if defined(DIAGNOSTIC)
744				if (cnt.v_cache_count > 0)
745					printf("vm_page_alloc(NORMAL): missing pages on cache queue: %d\n", cnt.v_cache_count);
746#endif
747				pagedaemon_wakeup();
748				return (NULL);
749			}
750		}
751		break;
752
753	case VM_ALLOC_ZERO:
754		if (cnt.v_free_count >= cnt.v_free_reserved) {
755			m = vm_page_select_free(object, pindex, PQ_ZERO);
756#if defined(DIAGNOSTIC)
757			if (m == NULL)
758				panic("vm_page_alloc(ZERO): missing page on free queue\n");
759#endif
760		} else {
761			m = vm_page_select(object, pindex, PQ_CACHE);
762			if (m == NULL) {
763				splx(s);
764#if defined(DIAGNOSTIC)
765				if (cnt.v_cache_count > 0)
766					printf("vm_page_alloc(ZERO): missing pages on cache queue: %d\n", cnt.v_cache_count);
767#endif
768				pagedaemon_wakeup();
769				return (NULL);
770			}
771		}
772		break;
773
774	case VM_ALLOC_SYSTEM:
775		if ((cnt.v_free_count >= cnt.v_free_reserved) ||
776		    ((cnt.v_cache_count == 0) &&
777		    (cnt.v_free_count >= cnt.v_interrupt_free_min))) {
778			m = vm_page_select_free(object, pindex, PQ_FREE);
779#if defined(DIAGNOSTIC)
780			if (m == NULL)
781				panic("vm_page_alloc(SYSTEM): missing page on free queue\n");
782#endif
783		} else {
784			m = vm_page_select(object, pindex, PQ_CACHE);
785			if (m == NULL) {
786				splx(s);
787#if defined(DIAGNOSTIC)
788				if (cnt.v_cache_count > 0)
789					printf("vm_page_alloc(SYSTEM): missing pages on cache queue: %d\n", cnt.v_cache_count);
790#endif
791				pagedaemon_wakeup();
792				return (NULL);
793			}
794		}
795		break;
796
797	case VM_ALLOC_INTERRUPT:
798		if (cnt.v_free_count > 0) {
799			m = vm_page_select_free(object, pindex, PQ_FREE);
800#if defined(DIAGNOSTIC)
801			if (m == NULL)
802				panic("vm_page_alloc(INTERRUPT): missing page on free queue\n");
803#endif
804		} else {
805			splx(s);
806			pagedaemon_wakeup();
807			return (NULL);
808		}
809		break;
810
811	default:
812		panic("vm_page_alloc: invalid allocation class");
813	}
814
815	queue = m->queue;
816	qtype = queue - m->pc;
817	if (qtype == PQ_ZERO)
818		--vm_page_zero_count;
819	pq = &vm_page_queues[queue];
820	TAILQ_REMOVE(pq->pl, m, pageq);
821	--(*pq->cnt);
822	--(*pq->lcnt);
823	if (qtype == PQ_ZERO) {
824		m->flags = PG_ZERO|PG_BUSY;
825	} else if (qtype == PQ_CACHE) {
826		vm_page_remove(m);
827		m->flags = PG_BUSY;
828	} else {
829		m->flags = PG_BUSY;
830	}
831	m->wire_count = 0;
832	m->hold_count = 0;
833	m->act_count = 0;
834	m->busy = 0;
835	m->valid = 0;
836	m->dirty = 0;
837	m->queue = PQ_NONE;
838
839	/* XXX before splx until vm_page_insert is safe */
840	vm_page_insert(m, object, pindex);
841
842	splx(s);
843
844	/*
845	 * Don't wakeup too often - wakeup the pageout daemon when
846	 * we would be nearly out of memory.
847	 */
848	if (((cnt.v_free_count + cnt.v_cache_count) <
849		(cnt.v_free_reserved + cnt.v_cache_min)) ||
850			(cnt.v_free_count < cnt.v_pageout_free_min))
851		pagedaemon_wakeup();
852
853	return (m);
854}
855
856void
857vm_wait()
858{
859	int s;
860
861	s = splvm();
862	if (curproc == pageproc) {
863		vm_pageout_pages_needed = 1;
864		tsleep(&vm_pageout_pages_needed, PSWP, "vmwait", 0);
865	} else {
866		if (!vm_pages_needed) {
867			vm_pages_needed++;
868			wakeup(&vm_pages_needed);
869		}
870		tsleep(&cnt.v_free_count, PVM, "vmwait", 0);
871	}
872	splx(s);
873}
874
875
876/*
877 *	vm_page_activate:
878 *
879 *	Put the specified page on the active list (if appropriate).
880 *
881 *	The page queues must be locked.
882 */
883void
884vm_page_activate(m)
885	register vm_page_t m;
886{
887	int s;
888
889	s = splvm();
890	if (m->queue == PQ_ACTIVE)
891		panic("vm_page_activate: already active");
892
893	if ((m->queue - m->pc) == PQ_CACHE)
894		cnt.v_reactivated++;
895
896	vm_page_unqueue(m);
897
898	if (m->wire_count == 0) {
899		m->queue = PQ_ACTIVE;
900		++(*vm_page_queues[PQ_ACTIVE].lcnt);
901		TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq);
902		if (m->act_count < ACT_INIT)
903			m->act_count = ACT_INIT;
904		cnt.v_active_count++;
905	}
906	splx(s);
907}
908
909/*
910 * helper routine for vm_page_free and vm_page_free_zero
911 */
912static int
913vm_page_freechk_and_unqueue(m)
914	vm_page_t m;
915{
916	if (m->busy ||
917		(m->flags & PG_BUSY) ||
918		((m->queue - m->pc) == PQ_FREE) ||
919		(m->hold_count != 0)) {
920		printf("vm_page_free: pindex(%ld), busy(%d), PG_BUSY(%d), hold(%d)\n",
921			m->pindex, m->busy,
922			(m->flags & PG_BUSY) ? 1 : 0, m->hold_count);
923		if ((m->queue - m->pc) == PQ_FREE)
924			panic("vm_page_free: freeing free page");
925		else
926			panic("vm_page_free: freeing busy page");
927	}
928
929	vm_page_remove(m);
930	vm_page_unqueue_nowakeup(m);
931	if ((m->flags & PG_FICTITIOUS) != 0) {
932		return 0;
933	}
934	if (m->wire_count != 0) {
935		if (m->wire_count > 1) {
936			panic("vm_page_free: invalid wire count (%d), pindex: 0x%x",
937				m->wire_count, m->pindex);
938		}
939		m->wire_count = 0;
940		cnt.v_wire_count--;
941	}
942
943	return 1;
944}
945
946/*
947 * helper routine for vm_page_free and vm_page_free_zero
948 */
949static __inline void
950vm_page_free_wakeup()
951{
952
953/*
954 * if pageout daemon needs pages, then tell it that there are
955 * some free.
956 */
957	if (vm_pageout_pages_needed) {
958		wakeup(&vm_pageout_pages_needed);
959		vm_pageout_pages_needed = 0;
960	}
961	/*
962	 * wakeup processes that are waiting on memory if we hit a
963	 * high water mark. And wakeup scheduler process if we have
964	 * lots of memory. this process will swapin processes.
965	 */
966	if (vm_pages_needed &&
967		((cnt.v_free_count + cnt.v_cache_count) >= cnt.v_free_min)) {
968		wakeup(&cnt.v_free_count);
969		vm_pages_needed = 0;
970	}
971}
972
973/*
974 *	vm_page_free:
975 *
976 *	Returns the given page to the free list,
977 *	disassociating it with any VM object.
978 *
979 *	Object and page must be locked prior to entry.
980 */
981void
982vm_page_free(m)
983	register vm_page_t m;
984{
985	int s;
986	struct vpgqueues *pq;
987
988	s = splvm();
989
990	cnt.v_tfree++;
991
992	if (!vm_page_freechk_and_unqueue(m)) {
993		splx(s);
994		return;
995	}
996
997	m->queue = PQ_FREE + m->pc;
998	pq = &vm_page_queues[m->queue];
999	++(*pq->lcnt);
1000	++(*pq->cnt);
1001	/*
1002	 * If the pageout process is grabbing the page, it is likely
1003	 * that the page is NOT in the cache.  It is more likely that
1004	 * the page will be partially in the cache if it is being
1005	 * explicitly freed.
1006	 */
1007	if (curproc == pageproc) {
1008		TAILQ_INSERT_TAIL(pq->pl, m, pageq);
1009	} else {
1010		TAILQ_INSERT_HEAD(pq->pl, m, pageq);
1011	}
1012	vm_page_free_wakeup();
1013	splx(s);
1014}
1015
1016void
1017vm_page_free_zero(m)
1018	register vm_page_t m;
1019{
1020	int s;
1021	struct vpgqueues *pq;
1022
1023	s = splvm();
1024
1025	cnt.v_tfree++;
1026
1027	if (!vm_page_freechk_and_unqueue(m)) {
1028		splx(s);
1029		return;
1030	}
1031
1032	m->queue = PQ_ZERO + m->pc;
1033	pq = &vm_page_queues[m->queue];
1034	++(*pq->lcnt);
1035	++(*pq->cnt);
1036
1037	TAILQ_INSERT_HEAD(pq->pl, m, pageq);
1038	++vm_page_zero_count;
1039	vm_page_free_wakeup();
1040	splx(s);
1041}
1042
1043/*
1044 *	vm_page_wire:
1045 *
1046 *	Mark this page as wired down by yet
1047 *	another map, removing it from paging queues
1048 *	as necessary.
1049 *
1050 *	The page queues must be locked.
1051 */
1052void
1053vm_page_wire(m)
1054	register vm_page_t m;
1055{
1056	int s;
1057
1058	if (m->wire_count == 0) {
1059		s = splvm();
1060		vm_page_unqueue(m);
1061		splx(s);
1062		cnt.v_wire_count++;
1063	}
1064	++(*vm_page_queues[PQ_NONE].lcnt);
1065	m->wire_count++;
1066	m->flags |= PG_MAPPED;
1067}
1068
1069/*
1070 *	vm_page_unwire:
1071 *
1072 *	Release one wiring of this page, potentially
1073 *	enabling it to be paged again.
1074 *
1075 *	The page queues must be locked.
1076 */
1077void
1078vm_page_unwire(m)
1079	register vm_page_t m;
1080{
1081	int s;
1082
1083	s = splvm();
1084
1085	if (m->wire_count > 0)
1086		m->wire_count--;
1087
1088	if (m->wire_count == 0) {
1089		cnt.v_wire_count--;
1090		TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq);
1091		m->queue = PQ_ACTIVE;
1092		++(*vm_page_queues[PQ_ACTIVE].lcnt);
1093		cnt.v_active_count++;
1094	}
1095	splx(s);
1096}
1097
1098
1099/*
1100 *	vm_page_deactivate:
1101 *
1102 *	Returns the given page to the inactive list,
1103 *	indicating that no physical maps have access
1104 *	to this page.  [Used by the physical mapping system.]
1105 *
1106 *	The page queues must be locked.
1107 */
1108void
1109vm_page_deactivate(m)
1110	register vm_page_t m;
1111{
1112	int s;
1113
1114	/*
1115	 * Only move active pages -- ignore locked or already inactive ones.
1116	 *
1117	 * XXX: sometimes we get pages which aren't wired down or on any queue -
1118	 * we need to put them on the inactive queue also, otherwise we lose
1119	 * track of them. Paul Mackerras (paulus@cs.anu.edu.au) 9-Jan-93.
1120	 */
1121	if (m->queue == PQ_INACTIVE)
1122		return;
1123
1124	s = splvm();
1125	if (m->wire_count == 0 && m->hold_count == 0) {
1126		if ((m->queue - m->pc) == PQ_CACHE)
1127			cnt.v_reactivated++;
1128		vm_page_unqueue(m);
1129		TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq);
1130		m->queue = PQ_INACTIVE;
1131		++(*vm_page_queues[PQ_INACTIVE].lcnt);
1132		cnt.v_inactive_count++;
1133	}
1134	splx(s);
1135}
1136
1137/*
1138 * vm_page_cache
1139 *
1140 * Put the specified page onto the page cache queue (if appropriate).
1141 */
1142void
1143vm_page_cache(m)
1144	register vm_page_t m;
1145{
1146	int s;
1147
1148	if ((m->flags & PG_BUSY) || m->busy || m->wire_count) {
1149		printf("vm_page_cache: attempting to cache busy page\n");
1150		return;
1151	}
1152	if ((m->queue - m->pc) == PQ_CACHE)
1153		return;
1154
1155	vm_page_protect(m, VM_PROT_NONE);
1156	if (m->dirty != 0) {
1157		panic("vm_page_cache: caching a dirty page, pindex: %d", m->pindex);
1158	}
1159	s = splvm();
1160	vm_page_unqueue_nowakeup(m);
1161	m->queue = PQ_CACHE + m->pc;
1162	++(*vm_page_queues[m->queue].lcnt);
1163	TAILQ_INSERT_TAIL(vm_page_queues[m->queue].pl, m, pageq);
1164	cnt.v_cache_count++;
1165	vm_page_free_wakeup();
1166	splx(s);
1167}
1168
1169
1170/*
1171 * mapping function for valid bits or for dirty bits in
1172 * a page
1173 */
1174inline int
1175vm_page_bits(int base, int size)
1176{
1177	u_short chunk;
1178
1179	if ((base == 0) && (size >= PAGE_SIZE))
1180		return VM_PAGE_BITS_ALL;
1181	size = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1);
1182	base = (base % PAGE_SIZE) / DEV_BSIZE;
1183	chunk = vm_page_dev_bsize_chunks[size / DEV_BSIZE];
1184	return (chunk << base) & VM_PAGE_BITS_ALL;
1185}
1186
1187/*
1188 * set a page valid and clean
1189 */
1190void
1191vm_page_set_validclean(m, base, size)
1192	vm_page_t m;
1193	int base;
1194	int size;
1195{
1196	int pagebits = vm_page_bits(base, size);
1197	m->valid |= pagebits;
1198	m->dirty &= ~pagebits;
1199	if( base == 0 && size == PAGE_SIZE)
1200		pmap_clear_modify(VM_PAGE_TO_PHYS(m));
1201}
1202
1203/*
1204 * set a page (partially) invalid
1205 */
1206void
1207vm_page_set_invalid(m, base, size)
1208	vm_page_t m;
1209	int base;
1210	int size;
1211{
1212	int bits;
1213
1214	m->valid &= ~(bits = vm_page_bits(base, size));
1215	if (m->valid == 0)
1216		m->dirty &= ~bits;
1217}
1218
1219/*
1220 * is (partial) page valid?
1221 */
1222int
1223vm_page_is_valid(m, base, size)
1224	vm_page_t m;
1225	int base;
1226	int size;
1227{
1228	int bits = vm_page_bits(base, size);
1229
1230	if (m->valid && ((m->valid & bits) == bits))
1231		return 1;
1232	else
1233		return 0;
1234}
1235
1236void
1237vm_page_test_dirty(m)
1238	vm_page_t m;
1239{
1240	if ((m->dirty != VM_PAGE_BITS_ALL) &&
1241	    pmap_is_modified(VM_PAGE_TO_PHYS(m))) {
1242		m->dirty = VM_PAGE_BITS_ALL;
1243	}
1244}
1245
1246/*
1247 * This interface is for merging with malloc() someday.
1248 * Even if we never implement compaction so that contiguous allocation
1249 * works after initialization time, malloc()'s data structures are good
1250 * for statistics and for allocations of less than a page.
1251 */
1252void *
1253contigmalloc1(size, type, flags, low, high, alignment, boundary, map)
1254	unsigned long size;	/* should be size_t here and for malloc() */
1255	int type;
1256	int flags;
1257	unsigned long low;
1258	unsigned long high;
1259	unsigned long alignment;
1260	unsigned long boundary;
1261	vm_map_t map;
1262{
1263	int i, s, start;
1264	vm_offset_t addr, phys, tmp_addr;
1265	int pass;
1266	vm_page_t pga = vm_page_array;
1267
1268	size = round_page(size);
1269	if (size == 0)
1270		panic("vm_page_alloc_contig: size must not be 0");
1271	if ((alignment & (alignment - 1)) != 0)
1272		panic("vm_page_alloc_contig: alignment must be a power of 2");
1273	if ((boundary & (boundary - 1)) != 0)
1274		panic("vm_page_alloc_contig: boundary must be a power of 2");
1275
1276	start = 0;
1277	for (pass = 0; pass <= 1; pass++) {
1278		s = splvm();
1279again:
1280		/*
1281		 * Find first page in array that is free, within range, aligned, and
1282		 * such that the boundary won't be crossed.
1283		 */
1284		for (i = start; i < cnt.v_page_count; i++) {
1285			int pqtype;
1286			phys = VM_PAGE_TO_PHYS(&pga[i]);
1287			pqtype = pga[i].queue - pga[i].pc;
1288			if (((pqtype == PQ_ZERO) || (pqtype == PQ_FREE) || (pqtype == PQ_CACHE)) &&
1289			    (phys >= low) && (phys < high) &&
1290			    ((phys & (alignment - 1)) == 0) &&
1291			    (((phys ^ (phys + size - 1)) & ~(boundary - 1)) == 0))
1292				break;
1293		}
1294
1295		/*
1296		 * If the above failed or we will exceed the upper bound, fail.
1297		 */
1298		if ((i == cnt.v_page_count) ||
1299			((VM_PAGE_TO_PHYS(&pga[i]) + size) > high)) {
1300			vm_page_t m, next;
1301
1302again1:
1303			for (m = TAILQ_FIRST(&vm_page_queue_inactive);
1304				m != NULL;
1305				m = next) {
1306
1307				if (m->queue != PQ_INACTIVE) {
1308					break;
1309				}
1310
1311				next = TAILQ_NEXT(m, pageq);
1312				if (m->flags & PG_BUSY) {
1313					m->flags |= PG_WANTED;
1314					tsleep(m, PVM, "vpctw0", 0);
1315					goto again1;
1316				}
1317				vm_page_test_dirty(m);
1318				if (m->dirty) {
1319					if (m->object->type == OBJT_VNODE) {
1320						vm_object_page_clean(m->object, 0, 0, TRUE, TRUE);
1321						goto again1;
1322					} else if (m->object->type == OBJT_SWAP ||
1323								m->object->type == OBJT_DEFAULT) {
1324						vm_page_protect(m, VM_PROT_NONE);
1325						vm_pageout_flush(&m, 1, 0);
1326						goto again1;
1327					}
1328				}
1329				if ((m->dirty == 0) &&
1330					(m->busy == 0) &&
1331					(m->hold_count == 0))
1332					vm_page_cache(m);
1333			}
1334
1335			for (m = TAILQ_FIRST(&vm_page_queue_active);
1336				m != NULL;
1337				m = next) {
1338
1339				if (m->queue != PQ_ACTIVE) {
1340					break;
1341				}
1342
1343				next = TAILQ_NEXT(m, pageq);
1344				if (m->flags & PG_BUSY) {
1345					m->flags |= PG_WANTED;
1346					tsleep(m, PVM, "vpctw1", 0);
1347					goto again1;
1348				}
1349				vm_page_test_dirty(m);
1350				if (m->dirty) {
1351					if (m->object->type == OBJT_VNODE) {
1352						vm_object_page_clean(m->object, 0, 0, TRUE, TRUE);
1353						goto again1;
1354					} else if (m->object->type == OBJT_SWAP ||
1355								m->object->type == OBJT_DEFAULT) {
1356						vm_page_protect(m, VM_PROT_NONE);
1357						vm_pageout_flush(&m, 1, 0);
1358						goto again1;
1359					}
1360				}
1361				if ((m->dirty == 0) &&
1362					(m->busy == 0) &&
1363					(m->hold_count == 0))
1364					vm_page_cache(m);
1365			}
1366
1367			splx(s);
1368			continue;
1369		}
1370		start = i;
1371
1372		/*
1373		 * Check successive pages for contiguous and free.
1374		 */
1375		for (i = start + 1; i < (start + size / PAGE_SIZE); i++) {
1376			int pqtype;
1377			pqtype = pga[i].queue - pga[i].pc;
1378			if ((VM_PAGE_TO_PHYS(&pga[i]) !=
1379			    (VM_PAGE_TO_PHYS(&pga[i - 1]) + PAGE_SIZE)) ||
1380			    ((pqtype != PQ_ZERO) && (pqtype != PQ_FREE) && (pqtype != PQ_CACHE))) {
1381				start++;
1382				goto again;
1383			}
1384		}
1385
1386		for (i = start; i < (start + size / PAGE_SIZE); i++) {
1387			int pqtype;
1388			vm_page_t m = &pga[i];
1389
1390			pqtype = m->queue - m->pc;
1391			if (pqtype == PQ_CACHE)
1392				vm_page_free(m);
1393
1394			TAILQ_REMOVE(vm_page_queues[m->queue].pl, m, pageq);
1395			--(*vm_page_queues[m->queue].lcnt);
1396			cnt.v_free_count--;
1397			m->valid = VM_PAGE_BITS_ALL;
1398			m->flags = 0;
1399			m->dirty = 0;
1400			m->wire_count = 0;
1401			m->busy = 0;
1402			m->queue = PQ_NONE;
1403			m->object = NULL;
1404			vm_page_wire(m);
1405		}
1406
1407		/*
1408		 * We've found a contiguous chunk that meets are requirements.
1409		 * Allocate kernel VM, unfree and assign the physical pages to it and
1410		 * return kernel VM pointer.
1411		 */
1412		tmp_addr = addr = kmem_alloc_pageable(map, size);
1413		if (addr == 0) {
1414			/*
1415			 * XXX We almost never run out of kernel virtual
1416			 * space, so we don't make the allocated memory
1417			 * above available.
1418			 */
1419			splx(s);
1420			return (NULL);
1421		}
1422
1423		for (i = start; i < (start + size / PAGE_SIZE); i++) {
1424			vm_page_t m = &pga[i];
1425			vm_page_insert(m, kernel_object,
1426				OFF_TO_IDX(tmp_addr - VM_MIN_KERNEL_ADDRESS));
1427			pmap_kenter(tmp_addr, VM_PAGE_TO_PHYS(m));
1428			tmp_addr += PAGE_SIZE;
1429		}
1430
1431		splx(s);
1432		return ((void *)addr);
1433	}
1434	return NULL;
1435}
1436
1437void *
1438contigmalloc(size, type, flags, low, high, alignment, boundary)
1439	unsigned long size;	/* should be size_t here and for malloc() */
1440	int type;
1441	int flags;
1442	unsigned long low;
1443	unsigned long high;
1444	unsigned long alignment;
1445	unsigned long boundary;
1446{
1447	return contigmalloc1(size, type, flags, low, high, alignment, boundary,
1448			     kernel_map);
1449}
1450
1451vm_offset_t
1452vm_page_alloc_contig(size, low, high, alignment)
1453	vm_offset_t size;
1454	vm_offset_t low;
1455	vm_offset_t high;
1456	vm_offset_t alignment;
1457{
1458	return ((vm_offset_t)contigmalloc1(size, M_DEVBUF, M_NOWAIT, low, high,
1459					  alignment, 0ul, kernel_map));
1460}
1461
1462#include "opt_ddb.h"
1463#ifdef DDB
1464#include <sys/kernel.h>
1465
1466#include <ddb/ddb.h>
1467
1468DB_SHOW_COMMAND(page, vm_page_print_page_info)
1469{
1470	db_printf("cnt.v_free_count: %d\n", cnt.v_free_count);
1471	db_printf("cnt.v_cache_count: %d\n", cnt.v_cache_count);
1472	db_printf("cnt.v_inactive_count: %d\n", cnt.v_inactive_count);
1473	db_printf("cnt.v_active_count: %d\n", cnt.v_active_count);
1474	db_printf("cnt.v_wire_count: %d\n", cnt.v_wire_count);
1475	db_printf("cnt.v_free_reserved: %d\n", cnt.v_free_reserved);
1476	db_printf("cnt.v_free_min: %d\n", cnt.v_free_min);
1477	db_printf("cnt.v_free_target: %d\n", cnt.v_free_target);
1478	db_printf("cnt.v_cache_min: %d\n", cnt.v_cache_min);
1479	db_printf("cnt.v_inactive_target: %d\n", cnt.v_inactive_target);
1480}
1481
1482DB_SHOW_COMMAND(pageq, vm_page_print_pageq_info)
1483{
1484	int i;
1485	db_printf("PQ_FREE:");
1486	for(i=0;i<PQ_L2_SIZE;i++) {
1487		db_printf(" %d", *vm_page_queues[PQ_FREE + i].lcnt);
1488	}
1489	db_printf("\n");
1490
1491	db_printf("PQ_CACHE:");
1492	for(i=0;i<PQ_L2_SIZE;i++) {
1493		db_printf(" %d", *vm_page_queues[PQ_CACHE + i].lcnt);
1494	}
1495	db_printf("\n");
1496
1497	db_printf("PQ_ZERO:");
1498	for(i=0;i<PQ_L2_SIZE;i++) {
1499		db_printf(" %d", *vm_page_queues[PQ_ZERO + i].lcnt);
1500	}
1501	db_printf("\n");
1502
1503	db_printf("PQ_ACTIVE: %d, PQ_INACTIVE: %d\n",
1504		*vm_page_queues[PQ_ACTIVE].lcnt,
1505		*vm_page_queues[PQ_INACTIVE].lcnt);
1506}
1507#endif /* DDB */
1508