vm_page.c revision 30989
1/*
2 * Copyright (c) 1991 Regents of the University of California.
3 * All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * The Mach Operating System project at Carnegie-Mellon University.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 *    must display the following acknowledgement:
18 *	This product includes software developed by the University of
19 *	California, Berkeley and its contributors.
20 * 4. Neither the name of the University nor the names of its contributors
21 *    may be used to endorse or promote products derived from this software
22 *    without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 *	from: @(#)vm_page.c	7.4 (Berkeley) 5/7/91
37 *	$Id: vm_page.c,v 1.82 1997/10/10 18:18:47 phk Exp $
38 */
39
40/*
41 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
42 * All rights reserved.
43 *
44 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
45 *
46 * Permission to use, copy, modify and distribute this software and
47 * its documentation is hereby granted, provided that both the copyright
48 * notice and this permission notice appear in all copies of the
49 * software, derivative works or modified versions, and any portions
50 * thereof, and that both notices appear in supporting documentation.
51 *
52 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
53 * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
54 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
55 *
56 * Carnegie Mellon requests users of this software to return to
57 *
58 *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
59 *  School of Computer Science
60 *  Carnegie Mellon University
61 *  Pittsburgh PA 15213-3890
62 *
63 * any improvements or extensions that they make and grant Carnegie the
64 * rights to redistribute these changes.
65 */
66
67/*
68 *	Resident memory management module.
69 */
70
71#include <sys/param.h>
72#include <sys/systm.h>
73#include <sys/malloc.h>
74#include <sys/proc.h>
75#include <sys/vmmeter.h>
76
77#include <vm/vm.h>
78#include <vm/vm_param.h>
79#include <vm/vm_prot.h>
80#include <sys/lock.h>
81#include <vm/vm_kern.h>
82#include <vm/vm_object.h>
83#include <vm/vm_page.h>
84#include <vm/vm_pageout.h>
85#include <vm/vm_extern.h>
86
87static void	vm_page_queue_init __P((void));
88static vm_page_t vm_page_select_free __P((vm_object_t object,
89			vm_pindex_t pindex, int prefqueue));
90
91/*
92 *	Associated with page of user-allocatable memory is a
93 *	page structure.
94 */
95
96static struct pglist *vm_page_buckets;	/* Array of buckets */
97static int vm_page_bucket_count;	/* How big is array? */
98static int vm_page_hash_mask;		/* Mask for hash function */
99
100struct pglist vm_page_queue_free[PQ_L2_SIZE] = {0};
101struct pglist vm_page_queue_zero[PQ_L2_SIZE] = {0};
102struct pglist vm_page_queue_active = {0};
103struct pglist vm_page_queue_inactive = {0};
104struct pglist vm_page_queue_cache[PQ_L2_SIZE] = {0};
105
106int no_queue=0;
107
108struct vpgqueues vm_page_queues[PQ_COUNT] = {0};
109int pqcnt[PQ_COUNT] = {0};
110
111static void
112vm_page_queue_init(void) {
113	int i;
114
115	vm_page_queues[PQ_NONE].pl = NULL;
116	vm_page_queues[PQ_NONE].cnt = &no_queue;
117	for(i=0;i<PQ_L2_SIZE;i++) {
118		vm_page_queues[PQ_FREE+i].pl = &vm_page_queue_free[i];
119		vm_page_queues[PQ_FREE+i].cnt = &cnt.v_free_count;
120	}
121	for(i=0;i<PQ_L2_SIZE;i++) {
122		vm_page_queues[PQ_ZERO+i].pl = &vm_page_queue_zero[i];
123		vm_page_queues[PQ_ZERO+i].cnt = &cnt.v_free_count;
124	}
125	vm_page_queues[PQ_INACTIVE].pl = &vm_page_queue_inactive;
126	vm_page_queues[PQ_INACTIVE].cnt = &cnt.v_inactive_count;
127
128	vm_page_queues[PQ_ACTIVE].pl = &vm_page_queue_active;
129	vm_page_queues[PQ_ACTIVE].cnt = &cnt.v_active_count;
130	for(i=0;i<PQ_L2_SIZE;i++) {
131		vm_page_queues[PQ_CACHE+i].pl = &vm_page_queue_cache[i];
132		vm_page_queues[PQ_CACHE+i].cnt = &cnt.v_cache_count;
133	}
134	for(i=0;i<PQ_COUNT;i++) {
135		if (vm_page_queues[i].pl) {
136			TAILQ_INIT(vm_page_queues[i].pl);
137		} else if (i != 0) {
138			panic("vm_page_queue_init: queue %d is null", i);
139		}
140		vm_page_queues[i].lcnt = &pqcnt[i];
141	}
142}
143
144vm_page_t vm_page_array = 0;
145int vm_page_array_size = 0;
146long first_page = 0;
147static long last_page;
148static vm_size_t page_mask;
149static int page_shift;
150int vm_page_zero_count = 0;
151
152/*
153 * map of contiguous valid DEV_BSIZE chunks in a page
154 * (this list is valid for page sizes upto 16*DEV_BSIZE)
155 */
156static u_short vm_page_dev_bsize_chunks[] = {
157	0x0, 0x1, 0x3, 0x7, 0xf, 0x1f, 0x3f, 0x7f, 0xff,
158	0x1ff, 0x3ff, 0x7ff, 0xfff, 0x1fff, 0x3fff, 0x7fff, 0xffff
159};
160
161static inline int vm_page_hash __P((vm_object_t object, vm_pindex_t pindex));
162static int vm_page_freechk_and_unqueue __P((vm_page_t m));
163static void vm_page_free_wakeup __P((void));
164
165/*
166 *	vm_set_page_size:
167 *
168 *	Sets the page size, perhaps based upon the memory
169 *	size.  Must be called before any use of page-size
170 *	dependent functions.
171 *
172 *	Sets page_shift and page_mask from cnt.v_page_size.
173 */
174void
175vm_set_page_size()
176{
177
178	if (cnt.v_page_size == 0)
179		cnt.v_page_size = DEFAULT_PAGE_SIZE;
180	page_mask = cnt.v_page_size - 1;
181	if ((page_mask & cnt.v_page_size) != 0)
182		panic("vm_set_page_size: page size not a power of two");
183	for (page_shift = 0;; page_shift++)
184		if ((1 << page_shift) == cnt.v_page_size)
185			break;
186}
187
188/*
189 *	vm_page_startup:
190 *
191 *	Initializes the resident memory module.
192 *
193 *	Allocates memory for the page cells, and
194 *	for the object/offset-to-page hash table headers.
195 *	Each page cell is initialized and placed on the free list.
196 */
197
198vm_offset_t
199vm_page_startup(starta, enda, vaddr)
200	register vm_offset_t starta;
201	vm_offset_t enda;
202	register vm_offset_t vaddr;
203{
204	register vm_offset_t mapped;
205	register vm_page_t m;
206	register struct pglist *bucket;
207	vm_size_t npages, page_range;
208	register vm_offset_t new_start;
209	int i;
210	vm_offset_t pa;
211	int nblocks;
212	vm_offset_t first_managed_page;
213
214	/* the biggest memory array is the second group of pages */
215	vm_offset_t start;
216	vm_offset_t biggestone, biggestsize;
217
218	vm_offset_t total;
219
220	total = 0;
221	biggestsize = 0;
222	biggestone = 0;
223	nblocks = 0;
224	vaddr = round_page(vaddr);
225
226	for (i = 0; phys_avail[i + 1]; i += 2) {
227		phys_avail[i] = round_page(phys_avail[i]);
228		phys_avail[i + 1] = trunc_page(phys_avail[i + 1]);
229	}
230
231	for (i = 0; phys_avail[i + 1]; i += 2) {
232		int size = phys_avail[i + 1] - phys_avail[i];
233
234		if (size > biggestsize) {
235			biggestone = i;
236			biggestsize = size;
237		}
238		++nblocks;
239		total += size;
240	}
241
242	start = phys_avail[biggestone];
243
244	/*
245	 * Initialize the queue headers for the free queue, the active queue
246	 * and the inactive queue.
247	 */
248
249	vm_page_queue_init();
250
251	/*
252	 * Allocate (and initialize) the hash table buckets.
253	 *
254	 * The number of buckets MUST BE a power of 2, and the actual value is
255	 * the next power of 2 greater than the number of physical pages in
256	 * the system.
257	 *
258	 * Note: This computation can be tweaked if desired.
259	 */
260	vm_page_buckets = (struct pglist *) vaddr;
261	bucket = vm_page_buckets;
262	if (vm_page_bucket_count == 0) {
263		vm_page_bucket_count = 1;
264		while (vm_page_bucket_count < atop(total))
265			vm_page_bucket_count <<= 1;
266	}
267	vm_page_hash_mask = vm_page_bucket_count - 1;
268
269	/*
270	 * Validate these addresses.
271	 */
272
273	new_start = start + vm_page_bucket_count * sizeof(struct pglist);
274	new_start = round_page(new_start);
275	mapped = vaddr;
276	vaddr = pmap_map(mapped, start, new_start,
277	    VM_PROT_READ | VM_PROT_WRITE);
278	start = new_start;
279	bzero((caddr_t) mapped, vaddr - mapped);
280	mapped = vaddr;
281
282	for (i = 0; i < vm_page_bucket_count; i++) {
283		TAILQ_INIT(bucket);
284		bucket++;
285	}
286
287	/*
288	 * Validate these zone addresses.
289	 */
290
291	new_start = start + (vaddr - mapped);
292	pmap_map(mapped, start, new_start, VM_PROT_READ | VM_PROT_WRITE);
293	bzero((caddr_t) mapped, (vaddr - mapped));
294	start = round_page(new_start);
295
296	/*
297	 * Compute the number of pages of memory that will be available for
298	 * use (taking into account the overhead of a page structure per
299	 * page).
300	 */
301
302	first_page = phys_avail[0] / PAGE_SIZE;
303	last_page = phys_avail[(nblocks - 1) * 2 + 1] / PAGE_SIZE;
304
305	page_range = last_page - (phys_avail[0] / PAGE_SIZE);
306	npages = (total - (page_range * sizeof(struct vm_page)) -
307	    (start - phys_avail[biggestone])) / PAGE_SIZE;
308
309	/*
310	 * Initialize the mem entry structures now, and put them in the free
311	 * queue.
312	 */
313
314	vm_page_array = (vm_page_t) vaddr;
315	mapped = vaddr;
316
317	/*
318	 * Validate these addresses.
319	 */
320
321	new_start = round_page(start + page_range * sizeof(struct vm_page));
322	mapped = pmap_map(mapped, start, new_start,
323	    VM_PROT_READ | VM_PROT_WRITE);
324	start = new_start;
325
326	first_managed_page = start / PAGE_SIZE;
327
328	/*
329	 * Clear all of the page structures
330	 */
331	bzero((caddr_t) vm_page_array, page_range * sizeof(struct vm_page));
332	vm_page_array_size = page_range;
333
334	cnt.v_page_count = 0;
335	cnt.v_free_count = 0;
336	for (i = 0; phys_avail[i + 1] && npages > 0; i += 2) {
337		if (i == biggestone)
338			pa = ptoa(first_managed_page);
339		else
340			pa = phys_avail[i];
341		while (pa < phys_avail[i + 1] && npages-- > 0) {
342			++cnt.v_page_count;
343			++cnt.v_free_count;
344			m = PHYS_TO_VM_PAGE(pa);
345			m->phys_addr = pa;
346			m->flags = 0;
347			m->pc = (pa >> PAGE_SHIFT) & PQ_L2_MASK;
348			m->queue = PQ_FREE + m->pc;
349			TAILQ_INSERT_TAIL(vm_page_queues[m->queue].pl, m, pageq);
350			++(*vm_page_queues[m->queue].lcnt);
351			pa += PAGE_SIZE;
352		}
353	}
354
355	return (mapped);
356}
357
358/*
359 *	vm_page_hash:
360 *
361 *	Distributes the object/offset key pair among hash buckets.
362 *
363 *	NOTE:  This macro depends on vm_page_bucket_count being a power of 2.
364 */
365static inline int
366vm_page_hash(object, pindex)
367	vm_object_t object;
368	vm_pindex_t pindex;
369{
370	return ((((unsigned) object) >> 5) + (pindex >> 1)) & vm_page_hash_mask;
371}
372
373/*
374 *	vm_page_insert:		[ internal use only ]
375 *
376 *	Inserts the given mem entry into the object/object-page
377 *	table and object list.
378 *
379 *	The object and page must be locked, and must be splhigh.
380 */
381
382void
383vm_page_insert(m, object, pindex)
384	register vm_page_t m;
385	register vm_object_t object;
386	register vm_pindex_t pindex;
387{
388	register struct pglist *bucket;
389
390	if (m->flags & PG_TABLED)
391		panic("vm_page_insert: already inserted");
392
393	/*
394	 * Record the object/offset pair in this page
395	 */
396
397	m->object = object;
398	m->pindex = pindex;
399
400	/*
401	 * Insert it into the object_object/offset hash table
402	 */
403
404	bucket = &vm_page_buckets[vm_page_hash(object, pindex)];
405	TAILQ_INSERT_TAIL(bucket, m, hashq);
406
407	/*
408	 * Now link into the object's list of backed pages.
409	 */
410
411	TAILQ_INSERT_TAIL(&object->memq, m, listq);
412	m->flags |= PG_TABLED;
413	m->object->page_hint = m;
414
415	/*
416	 * And show that the object has one more resident page.
417	 */
418
419	object->resident_page_count++;
420}
421
422/*
423 *	vm_page_remove:		[ internal use only ]
424 *				NOTE: used by device pager as well -wfj
425 *
426 *	Removes the given mem entry from the object/offset-page
427 *	table and the object page list.
428 *
429 *	The object and page must be locked, and at splhigh.
430 */
431
432void
433vm_page_remove(m)
434	register vm_page_t m;
435{
436	register struct pglist *bucket;
437
438	if (!(m->flags & PG_TABLED))
439		return;
440
441	if (m->object->page_hint == m)
442		m->object->page_hint = NULL;
443
444	/*
445	 * Remove from the object_object/offset hash table
446	 */
447
448	bucket = &vm_page_buckets[vm_page_hash(m->object, m->pindex)];
449	TAILQ_REMOVE(bucket, m, hashq);
450
451	/*
452	 * Now remove from the object's list of backed pages.
453	 */
454
455	TAILQ_REMOVE(&m->object->memq, m, listq);
456
457	/*
458	 * And show that the object has one fewer resident page.
459	 */
460
461	m->object->resident_page_count--;
462
463	m->flags &= ~PG_TABLED;
464}
465
466/*
467 *	vm_page_lookup:
468 *
469 *	Returns the page associated with the object/offset
470 *	pair specified; if none is found, NULL is returned.
471 *
472 *	The object must be locked.  No side effects.
473 */
474
475vm_page_t
476vm_page_lookup(object, pindex)
477	register vm_object_t object;
478	register vm_pindex_t pindex;
479{
480	register vm_page_t m;
481	register struct pglist *bucket;
482	int s;
483
484	/*
485	 * Search the hash table for this object/offset pair
486	 */
487
488	bucket = &vm_page_buckets[vm_page_hash(object, pindex)];
489
490	s = splvm();
491	for (m = TAILQ_FIRST(bucket); m != NULL; m = TAILQ_NEXT(m,hashq)) {
492		if ((m->object == object) && (m->pindex == pindex)) {
493			splx(s);
494			m->object->page_hint = m;
495			return (m);
496		}
497	}
498	splx(s);
499	return (NULL);
500}
501
502/*
503 *	vm_page_rename:
504 *
505 *	Move the given memory entry from its
506 *	current object to the specified target object/offset.
507 *
508 *	The object must be locked.
509 */
510void
511vm_page_rename(m, new_object, new_pindex)
512	register vm_page_t m;
513	register vm_object_t new_object;
514	vm_pindex_t new_pindex;
515{
516	int s;
517
518	s = splvm();
519	vm_page_remove(m);
520	vm_page_insert(m, new_object, new_pindex);
521	splx(s);
522}
523
524/*
525 * vm_page_unqueue without any wakeup
526 */
527void
528vm_page_unqueue_nowakeup(m)
529	vm_page_t m;
530{
531	int queue = m->queue;
532	struct vpgqueues *pq;
533	if (queue != PQ_NONE) {
534		pq = &vm_page_queues[queue];
535		m->queue = PQ_NONE;
536		TAILQ_REMOVE(pq->pl, m, pageq);
537		--(*pq->cnt);
538		--(*pq->lcnt);
539	}
540}
541
542/*
543 * vm_page_unqueue must be called at splhigh();
544 */
545void
546vm_page_unqueue(m)
547	vm_page_t m;
548{
549	int queue = m->queue;
550	struct vpgqueues *pq;
551	if (queue != PQ_NONE) {
552		m->queue = PQ_NONE;
553		pq = &vm_page_queues[queue];
554		TAILQ_REMOVE(pq->pl, m, pageq);
555		--(*pq->cnt);
556		--(*pq->lcnt);
557		if ((queue - m->pc) == PQ_CACHE) {
558			if ((cnt.v_cache_count + cnt.v_free_count) <
559				(cnt.v_free_reserved + cnt.v_cache_min))
560				pagedaemon_wakeup();
561		}
562	}
563}
564
565/*
566 * Find a page on the specified queue with color optimization.
567 */
568vm_page_t
569vm_page_list_find(basequeue, index)
570	int basequeue, index;
571{
572#if PQ_L2_SIZE > 1
573
574	int i,j;
575	vm_page_t m;
576	int hindex;
577	struct vpgqueues *pq;
578
579	pq = &vm_page_queues[basequeue];
580
581	m = TAILQ_FIRST(pq[index].pl);
582	if (m)
583		return m;
584
585	for(j = 0; j < PQ_L1_SIZE; j++) {
586		int ij;
587		for(i = (PQ_L2_SIZE / 2) - PQ_L1_SIZE;
588			(ij = i + j) > 0;
589			i -= PQ_L1_SIZE) {
590
591			hindex = index + ij;
592			if (hindex >= PQ_L2_SIZE)
593				hindex -= PQ_L2_SIZE;
594			if (m = TAILQ_FIRST(pq[hindex].pl))
595				return m;
596
597			hindex = index - ij;
598			if (hindex < 0)
599				hindex += PQ_L2_SIZE;
600			if (m = TAILQ_FIRST(pq[hindex].pl))
601				return m;
602		}
603	}
604
605	hindex = index + PQ_L2_SIZE / 2;
606	if (hindex >= PQ_L2_SIZE)
607		hindex -= PQ_L2_SIZE;
608	m = TAILQ_FIRST(pq[hindex].pl);
609	if (m)
610		return m;
611
612	return NULL;
613#else
614	return TAILQ_FIRST(vm_page_queues[basequeue].pl);
615#endif
616
617}
618
619/*
620 * Find a page on the specified queue with color optimization.
621 */
622vm_page_t
623vm_page_select(object, pindex, basequeue)
624	vm_object_t object;
625	vm_pindex_t pindex;
626	int basequeue;
627{
628
629#if PQ_L2_SIZE > 1
630	int index;
631	index = (pindex + object->pg_color) & PQ_L2_MASK;
632	return vm_page_list_find(basequeue, index);
633
634#else
635	return TAILQ_FIRST(vm_page_queues[basequeue].pl);
636#endif
637
638}
639
640/*
641 * Find a free or zero page, with specified preference.
642 */
643static vm_page_t
644vm_page_select_free(object, pindex, prefqueue)
645	vm_object_t object;
646	vm_pindex_t pindex;
647	int prefqueue;
648{
649#if PQ_L2_SIZE > 1
650	int i,j;
651	int index, hindex;
652#endif
653	vm_page_t m, mh;
654	int oqueuediff;
655	struct vpgqueues *pq;
656
657	if (prefqueue == PQ_ZERO)
658		oqueuediff = PQ_FREE - PQ_ZERO;
659	else
660		oqueuediff = PQ_ZERO - PQ_FREE;
661
662	if (mh = object->page_hint) {
663		 if (mh->pindex == (pindex - 1)) {
664			if ((mh->flags & PG_FICTITIOUS) == 0) {
665				if ((mh < &vm_page_array[cnt.v_page_count-1]) &&
666					(mh >= &vm_page_array[0])) {
667					int queue;
668					m = mh + 1;
669					if (VM_PAGE_TO_PHYS(m) == (VM_PAGE_TO_PHYS(mh) + PAGE_SIZE)) {
670						queue = m->queue - m->pc;
671						if (queue == PQ_FREE || queue == PQ_ZERO) {
672							return m;
673						}
674					}
675				}
676			}
677		}
678	}
679
680	pq = &vm_page_queues[prefqueue];
681
682#if PQ_L2_SIZE > 1
683
684	index = (pindex + object->pg_color) & PQ_L2_MASK;
685
686	if (m = TAILQ_FIRST(pq[index].pl))
687		return m;
688	if (m = TAILQ_FIRST(pq[index + oqueuediff].pl))
689		return m;
690
691	for(j = 0; j < PQ_L1_SIZE; j++) {
692		int ij;
693		for(i = (PQ_L2_SIZE / 2) - PQ_L1_SIZE;
694			(ij = i + j) >= 0;
695			i -= PQ_L1_SIZE) {
696
697			hindex = index + ij;
698			if (hindex >= PQ_L2_SIZE)
699				hindex -= PQ_L2_SIZE;
700			if (m = TAILQ_FIRST(pq[hindex].pl))
701				return m;
702			if (m = TAILQ_FIRST(pq[hindex + oqueuediff].pl))
703				return m;
704
705			hindex = index - ij;
706			if (hindex < 0)
707				hindex += PQ_L2_SIZE;
708			if (m = TAILQ_FIRST(pq[hindex].pl))
709				return m;
710			if (m = TAILQ_FIRST(pq[hindex + oqueuediff].pl))
711				return m;
712		}
713	}
714
715	hindex = index + PQ_L2_SIZE / 2;
716	if (hindex >= PQ_L2_SIZE)
717		hindex -= PQ_L2_SIZE;
718	if (m = TAILQ_FIRST(pq[hindex].pl))
719		return m;
720	if (m = TAILQ_FIRST(pq[hindex+oqueuediff].pl))
721		return m;
722
723#else
724	if (m = TAILQ_FIRST(pq[0].pl))
725		return m;
726	else
727		return TAILQ_FIRST(pq[oqueuediff].pl);
728#endif
729
730	return NULL;
731}
732
733/*
734 *	vm_page_alloc:
735 *
736 *	Allocate and return a memory cell associated
737 *	with this VM object/offset pair.
738 *
739 *	page_req classes:
740 *	VM_ALLOC_NORMAL		normal process request
741 *	VM_ALLOC_SYSTEM		system *really* needs a page
742 *	VM_ALLOC_INTERRUPT	interrupt time request
743 *	VM_ALLOC_ZERO		zero page
744 *
745 *	Object must be locked.
746 */
747vm_page_t
748vm_page_alloc(object, pindex, page_req)
749	vm_object_t object;
750	vm_pindex_t pindex;
751	int page_req;
752{
753	register vm_page_t m;
754	struct vpgqueues *pq;
755	int queue, qtype;
756	int s;
757
758#ifdef DIAGNOSTIC
759	m = vm_page_lookup(object, pindex);
760	if (m)
761		panic("vm_page_alloc: page already allocated");
762#endif
763
764	if ((curproc == pageproc) && (page_req != VM_ALLOC_INTERRUPT)) {
765		page_req = VM_ALLOC_SYSTEM;
766	};
767
768	s = splvm();
769
770	switch (page_req) {
771
772	case VM_ALLOC_NORMAL:
773		if (cnt.v_free_count >= cnt.v_free_reserved) {
774			m = vm_page_select_free(object, pindex, PQ_FREE);
775#if defined(DIAGNOSTIC)
776			if (m == NULL)
777				panic("vm_page_alloc(NORMAL): missing page on free queue\n");
778#endif
779		} else {
780			m = vm_page_select(object, pindex, PQ_CACHE);
781			if (m == NULL) {
782				splx(s);
783#if defined(DIAGNOSTIC)
784				if (cnt.v_cache_count > 0)
785					printf("vm_page_alloc(NORMAL): missing pages on cache queue: %d\n", cnt.v_cache_count);
786#endif
787				pagedaemon_wakeup();
788				return (NULL);
789			}
790		}
791		break;
792
793	case VM_ALLOC_ZERO:
794		if (cnt.v_free_count >= cnt.v_free_reserved) {
795			m = vm_page_select_free(object, pindex, PQ_ZERO);
796#if defined(DIAGNOSTIC)
797			if (m == NULL)
798				panic("vm_page_alloc(ZERO): missing page on free queue\n");
799#endif
800		} else {
801			m = vm_page_select(object, pindex, PQ_CACHE);
802			if (m == NULL) {
803				splx(s);
804#if defined(DIAGNOSTIC)
805				if (cnt.v_cache_count > 0)
806					printf("vm_page_alloc(ZERO): missing pages on cache queue: %d\n", cnt.v_cache_count);
807#endif
808				pagedaemon_wakeup();
809				return (NULL);
810			}
811		}
812		break;
813
814	case VM_ALLOC_SYSTEM:
815		if ((cnt.v_free_count >= cnt.v_free_reserved) ||
816		    ((cnt.v_cache_count == 0) &&
817		    (cnt.v_free_count >= cnt.v_interrupt_free_min))) {
818			m = vm_page_select_free(object, pindex, PQ_FREE);
819#if defined(DIAGNOSTIC)
820			if (m == NULL)
821				panic("vm_page_alloc(SYSTEM): missing page on free queue\n");
822#endif
823		} else {
824			m = vm_page_select(object, pindex, PQ_CACHE);
825			if (m == NULL) {
826				splx(s);
827#if defined(DIAGNOSTIC)
828				if (cnt.v_cache_count > 0)
829					printf("vm_page_alloc(SYSTEM): missing pages on cache queue: %d\n", cnt.v_cache_count);
830#endif
831				pagedaemon_wakeup();
832				return (NULL);
833			}
834		}
835		break;
836
837	case VM_ALLOC_INTERRUPT:
838		if (cnt.v_free_count > 0) {
839			m = vm_page_select_free(object, pindex, PQ_FREE);
840#if defined(DIAGNOSTIC)
841			if (m == NULL)
842				panic("vm_page_alloc(INTERRUPT): missing page on free queue\n");
843#endif
844		} else {
845			splx(s);
846			pagedaemon_wakeup();
847			return (NULL);
848		}
849		break;
850
851	default:
852		panic("vm_page_alloc: invalid allocation class");
853	}
854
855	queue = m->queue;
856	qtype = queue - m->pc;
857	if (qtype == PQ_ZERO)
858		--vm_page_zero_count;
859	pq = &vm_page_queues[queue];
860	TAILQ_REMOVE(pq->pl, m, pageq);
861	--(*pq->cnt);
862	--(*pq->lcnt);
863	if (qtype == PQ_ZERO) {
864		m->flags = PG_ZERO|PG_BUSY;
865	} else if (qtype == PQ_CACHE) {
866		vm_page_remove(m);
867		m->flags = PG_BUSY;
868	} else {
869		m->flags = PG_BUSY;
870	}
871	m->wire_count = 0;
872	m->hold_count = 0;
873	m->act_count = 0;
874	m->busy = 0;
875	m->valid = 0;
876	m->dirty = 0;
877	m->queue = PQ_NONE;
878
879	/* XXX before splx until vm_page_insert is safe */
880	vm_page_insert(m, object, pindex);
881
882	splx(s);
883
884	/*
885	 * Don't wakeup too often - wakeup the pageout daemon when
886	 * we would be nearly out of memory.
887	 */
888	if (((cnt.v_free_count + cnt.v_cache_count) <
889		(cnt.v_free_reserved + cnt.v_cache_min)) ||
890			(cnt.v_free_count < cnt.v_pageout_free_min))
891		pagedaemon_wakeup();
892
893	return (m);
894}
895
896void
897vm_wait()
898{
899	int s;
900
901	s = splvm();
902	if (curproc == pageproc) {
903		vm_pageout_pages_needed = 1;
904		tsleep(&vm_pageout_pages_needed, PSWP, "vmwait", 0);
905	} else {
906		if (!vm_pages_needed) {
907			vm_pages_needed++;
908			wakeup(&vm_pages_needed);
909		}
910		tsleep(&cnt.v_free_count, PVM, "vmwait", 0);
911	}
912	splx(s);
913}
914
915
916/*
917 *	vm_page_activate:
918 *
919 *	Put the specified page on the active list (if appropriate).
920 *
921 *	The page queues must be locked.
922 */
923void
924vm_page_activate(m)
925	register vm_page_t m;
926{
927	int s;
928
929	s = splvm();
930	if (m->queue == PQ_ACTIVE)
931		panic("vm_page_activate: already active");
932
933	if ((m->queue - m->pc) == PQ_CACHE)
934		cnt.v_reactivated++;
935
936	vm_page_unqueue(m);
937
938	if (m->wire_count == 0) {
939		m->queue = PQ_ACTIVE;
940		++(*vm_page_queues[PQ_ACTIVE].lcnt);
941		TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq);
942		if (m->act_count < ACT_INIT)
943			m->act_count = ACT_INIT;
944		cnt.v_active_count++;
945	}
946	splx(s);
947}
948
949/*
950 * helper routine for vm_page_free and vm_page_free_zero
951 */
952static int
953vm_page_freechk_and_unqueue(m)
954	vm_page_t m;
955{
956	if (m->busy ||
957		(m->flags & PG_BUSY) ||
958		((m->queue - m->pc) == PQ_FREE) ||
959		(m->hold_count != 0)) {
960		printf("vm_page_free: pindex(%ld), busy(%d), PG_BUSY(%d), hold(%d)\n",
961			m->pindex, m->busy,
962			(m->flags & PG_BUSY) ? 1 : 0, m->hold_count);
963		if ((m->queue - m->pc) == PQ_FREE)
964			panic("vm_page_free: freeing free page");
965		else
966			panic("vm_page_free: freeing busy page");
967	}
968
969	vm_page_remove(m);
970	vm_page_unqueue_nowakeup(m);
971	if ((m->flags & PG_FICTITIOUS) != 0) {
972		return 0;
973	}
974	if (m->wire_count != 0) {
975		if (m->wire_count > 1) {
976			panic("vm_page_free: invalid wire count (%d), pindex: 0x%x",
977				m->wire_count, m->pindex);
978		}
979		m->wire_count = 0;
980		cnt.v_wire_count--;
981	}
982
983	return 1;
984}
985
986/*
987 * helper routine for vm_page_free and vm_page_free_zero
988 */
989static __inline void
990vm_page_free_wakeup()
991{
992
993/*
994 * if pageout daemon needs pages, then tell it that there are
995 * some free.
996 */
997	if (vm_pageout_pages_needed) {
998		wakeup(&vm_pageout_pages_needed);
999		vm_pageout_pages_needed = 0;
1000	}
1001	/*
1002	 * wakeup processes that are waiting on memory if we hit a
1003	 * high water mark. And wakeup scheduler process if we have
1004	 * lots of memory. this process will swapin processes.
1005	 */
1006	if (vm_pages_needed &&
1007		((cnt.v_free_count + cnt.v_cache_count) >= cnt.v_free_min)) {
1008		wakeup(&cnt.v_free_count);
1009		vm_pages_needed = 0;
1010	}
1011}
1012
1013/*
1014 *	vm_page_free:
1015 *
1016 *	Returns the given page to the free list,
1017 *	disassociating it with any VM object.
1018 *
1019 *	Object and page must be locked prior to entry.
1020 */
1021void
1022vm_page_free(m)
1023	register vm_page_t m;
1024{
1025	int s;
1026	struct vpgqueues *pq;
1027
1028	s = splvm();
1029
1030	cnt.v_tfree++;
1031
1032	if (!vm_page_freechk_and_unqueue(m)) {
1033		splx(s);
1034		return;
1035	}
1036
1037	m->queue = PQ_FREE + m->pc;
1038	pq = &vm_page_queues[m->queue];
1039	++(*pq->lcnt);
1040	++(*pq->cnt);
1041	/*
1042	 * If the pageout process is grabbing the page, it is likely
1043	 * that the page is NOT in the cache.  It is more likely that
1044	 * the page will be partially in the cache if it is being
1045	 * explicitly freed.
1046	 */
1047	if (curproc == pageproc) {
1048		TAILQ_INSERT_TAIL(pq->pl, m, pageq);
1049	} else {
1050		TAILQ_INSERT_HEAD(pq->pl, m, pageq);
1051	}
1052	vm_page_free_wakeup();
1053	splx(s);
1054}
1055
1056void
1057vm_page_free_zero(m)
1058	register vm_page_t m;
1059{
1060	int s;
1061	struct vpgqueues *pq;
1062
1063	s = splvm();
1064
1065	cnt.v_tfree++;
1066
1067	if (!vm_page_freechk_and_unqueue(m)) {
1068		splx(s);
1069		return;
1070	}
1071
1072	m->queue = PQ_ZERO + m->pc;
1073	pq = &vm_page_queues[m->queue];
1074	++(*pq->lcnt);
1075	++(*pq->cnt);
1076
1077	TAILQ_INSERT_HEAD(pq->pl, m, pageq);
1078	++vm_page_zero_count;
1079	vm_page_free_wakeup();
1080	splx(s);
1081}
1082
1083/*
1084 *	vm_page_wire:
1085 *
1086 *	Mark this page as wired down by yet
1087 *	another map, removing it from paging queues
1088 *	as necessary.
1089 *
1090 *	The page queues must be locked.
1091 */
1092void
1093vm_page_wire(m)
1094	register vm_page_t m;
1095{
1096	int s;
1097
1098	if (m->wire_count == 0) {
1099		s = splvm();
1100		vm_page_unqueue(m);
1101		splx(s);
1102		cnt.v_wire_count++;
1103	}
1104	++(*vm_page_queues[PQ_NONE].lcnt);
1105	m->wire_count++;
1106	m->flags |= PG_MAPPED;
1107}
1108
1109/*
1110 *	vm_page_unwire:
1111 *
1112 *	Release one wiring of this page, potentially
1113 *	enabling it to be paged again.
1114 *
1115 *	The page queues must be locked.
1116 */
1117void
1118vm_page_unwire(m)
1119	register vm_page_t m;
1120{
1121	int s;
1122
1123	s = splvm();
1124
1125	if (m->wire_count > 0)
1126		m->wire_count--;
1127
1128	if (m->wire_count == 0) {
1129		cnt.v_wire_count--;
1130		TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq);
1131		m->queue = PQ_ACTIVE;
1132		++(*vm_page_queues[PQ_ACTIVE].lcnt);
1133		cnt.v_active_count++;
1134	}
1135	splx(s);
1136}
1137
1138
1139/*
1140 *	vm_page_deactivate:
1141 *
1142 *	Returns the given page to the inactive list,
1143 *	indicating that no physical maps have access
1144 *	to this page.  [Used by the physical mapping system.]
1145 *
1146 *	The page queues must be locked.
1147 */
1148void
1149vm_page_deactivate(m)
1150	register vm_page_t m;
1151{
1152	int s;
1153
1154	/*
1155	 * Only move active pages -- ignore locked or already inactive ones.
1156	 *
1157	 * XXX: sometimes we get pages which aren't wired down or on any queue -
1158	 * we need to put them on the inactive queue also, otherwise we lose
1159	 * track of them. Paul Mackerras (paulus@cs.anu.edu.au) 9-Jan-93.
1160	 */
1161	if (m->queue == PQ_INACTIVE)
1162		return;
1163
1164	s = splvm();
1165	if (m->wire_count == 0 && m->hold_count == 0) {
1166		if ((m->queue - m->pc) == PQ_CACHE)
1167			cnt.v_reactivated++;
1168		vm_page_unqueue(m);
1169		TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq);
1170		m->queue = PQ_INACTIVE;
1171		++(*vm_page_queues[PQ_INACTIVE].lcnt);
1172		cnt.v_inactive_count++;
1173	}
1174	splx(s);
1175}
1176
1177/*
1178 * vm_page_cache
1179 *
1180 * Put the specified page onto the page cache queue (if appropriate).
1181 */
1182void
1183vm_page_cache(m)
1184	register vm_page_t m;
1185{
1186	int s;
1187
1188	if ((m->flags & PG_BUSY) || m->busy || m->wire_count) {
1189		printf("vm_page_cache: attempting to cache busy page\n");
1190		return;
1191	}
1192	if ((m->queue - m->pc) == PQ_CACHE)
1193		return;
1194
1195	vm_page_protect(m, VM_PROT_NONE);
1196	if (m->dirty != 0) {
1197		panic("vm_page_cache: caching a dirty page, pindex: %d", m->pindex);
1198	}
1199	s = splvm();
1200	vm_page_unqueue_nowakeup(m);
1201	m->queue = PQ_CACHE + m->pc;
1202	++(*vm_page_queues[m->queue].lcnt);
1203	TAILQ_INSERT_TAIL(vm_page_queues[m->queue].pl, m, pageq);
1204	cnt.v_cache_count++;
1205	vm_page_free_wakeup();
1206	splx(s);
1207}
1208
1209
1210/*
1211 * mapping function for valid bits or for dirty bits in
1212 * a page
1213 */
1214inline int
1215vm_page_bits(int base, int size)
1216{
1217	u_short chunk;
1218
1219	if ((base == 0) && (size >= PAGE_SIZE))
1220		return VM_PAGE_BITS_ALL;
1221	size = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1);
1222	base = (base % PAGE_SIZE) / DEV_BSIZE;
1223	chunk = vm_page_dev_bsize_chunks[size / DEV_BSIZE];
1224	return (chunk << base) & VM_PAGE_BITS_ALL;
1225}
1226
1227/*
1228 * set a page valid and clean
1229 */
1230void
1231vm_page_set_validclean(m, base, size)
1232	vm_page_t m;
1233	int base;
1234	int size;
1235{
1236	int pagebits = vm_page_bits(base, size);
1237	m->valid |= pagebits;
1238	m->dirty &= ~pagebits;
1239	if( base == 0 && size == PAGE_SIZE)
1240		pmap_clear_modify(VM_PAGE_TO_PHYS(m));
1241}
1242
1243/*
1244 * set a page (partially) invalid
1245 */
1246void
1247vm_page_set_invalid(m, base, size)
1248	vm_page_t m;
1249	int base;
1250	int size;
1251{
1252	int bits;
1253
1254	m->valid &= ~(bits = vm_page_bits(base, size));
1255	if (m->valid == 0)
1256		m->dirty &= ~bits;
1257}
1258
1259/*
1260 * is (partial) page valid?
1261 */
1262int
1263vm_page_is_valid(m, base, size)
1264	vm_page_t m;
1265	int base;
1266	int size;
1267{
1268	int bits = vm_page_bits(base, size);
1269
1270	if (m->valid && ((m->valid & bits) == bits))
1271		return 1;
1272	else
1273		return 0;
1274}
1275
1276void
1277vm_page_test_dirty(m)
1278	vm_page_t m;
1279{
1280	if ((m->dirty != VM_PAGE_BITS_ALL) &&
1281	    pmap_is_modified(VM_PAGE_TO_PHYS(m))) {
1282		m->dirty = VM_PAGE_BITS_ALL;
1283	}
1284}
1285
1286/*
1287 * This interface is for merging with malloc() someday.
1288 * Even if we never implement compaction so that contiguous allocation
1289 * works after initialization time, malloc()'s data structures are good
1290 * for statistics and for allocations of less than a page.
1291 */
1292void *
1293contigmalloc1(size, type, flags, low, high, alignment, boundary, map)
1294	unsigned long size;	/* should be size_t here and for malloc() */
1295	struct malloc_type *type;
1296	int flags;
1297	unsigned long low;
1298	unsigned long high;
1299	unsigned long alignment;
1300	unsigned long boundary;
1301	vm_map_t map;
1302{
1303	int i, s, start;
1304	vm_offset_t addr, phys, tmp_addr;
1305	int pass;
1306	vm_page_t pga = vm_page_array;
1307
1308	size = round_page(size);
1309	if (size == 0)
1310		panic("contigmalloc1: size must not be 0");
1311	if ((alignment & (alignment - 1)) != 0)
1312		panic("contigmalloc1: alignment must be a power of 2");
1313	if ((boundary & (boundary - 1)) != 0)
1314		panic("contigmalloc1: boundary must be a power of 2");
1315
1316	start = 0;
1317	for (pass = 0; pass <= 1; pass++) {
1318		s = splvm();
1319again:
1320		/*
1321		 * Find first page in array that is free, within range, aligned, and
1322		 * such that the boundary won't be crossed.
1323		 */
1324		for (i = start; i < cnt.v_page_count; i++) {
1325			int pqtype;
1326			phys = VM_PAGE_TO_PHYS(&pga[i]);
1327			pqtype = pga[i].queue - pga[i].pc;
1328			if (((pqtype == PQ_ZERO) || (pqtype == PQ_FREE) || (pqtype == PQ_CACHE)) &&
1329			    (phys >= low) && (phys < high) &&
1330			    ((phys & (alignment - 1)) == 0) &&
1331			    (((phys ^ (phys + size - 1)) & ~(boundary - 1)) == 0))
1332				break;
1333		}
1334
1335		/*
1336		 * If the above failed or we will exceed the upper bound, fail.
1337		 */
1338		if ((i == cnt.v_page_count) ||
1339			((VM_PAGE_TO_PHYS(&pga[i]) + size) > high)) {
1340			vm_page_t m, next;
1341
1342again1:
1343			for (m = TAILQ_FIRST(&vm_page_queue_inactive);
1344				m != NULL;
1345				m = next) {
1346
1347				if (m->queue != PQ_INACTIVE) {
1348					break;
1349				}
1350
1351				next = TAILQ_NEXT(m, pageq);
1352				if (m->flags & PG_BUSY) {
1353					m->flags |= PG_WANTED;
1354					tsleep(m, PVM, "vpctw0", 0);
1355					goto again1;
1356				}
1357				vm_page_test_dirty(m);
1358				if (m->dirty) {
1359					if (m->object->type == OBJT_VNODE) {
1360						vm_object_page_clean(m->object, 0, 0, TRUE, TRUE);
1361						goto again1;
1362					} else if (m->object->type == OBJT_SWAP ||
1363								m->object->type == OBJT_DEFAULT) {
1364						vm_page_protect(m, VM_PROT_NONE);
1365						vm_pageout_flush(&m, 1, 0);
1366						goto again1;
1367					}
1368				}
1369				if ((m->dirty == 0) &&
1370					(m->busy == 0) &&
1371					(m->hold_count == 0))
1372					vm_page_cache(m);
1373			}
1374
1375			for (m = TAILQ_FIRST(&vm_page_queue_active);
1376				m != NULL;
1377				m = next) {
1378
1379				if (m->queue != PQ_ACTIVE) {
1380					break;
1381				}
1382
1383				next = TAILQ_NEXT(m, pageq);
1384				if (m->flags & PG_BUSY) {
1385					m->flags |= PG_WANTED;
1386					tsleep(m, PVM, "vpctw1", 0);
1387					goto again1;
1388				}
1389				vm_page_test_dirty(m);
1390				if (m->dirty) {
1391					if (m->object->type == OBJT_VNODE) {
1392						vm_object_page_clean(m->object, 0, 0, TRUE, TRUE);
1393						goto again1;
1394					} else if (m->object->type == OBJT_SWAP ||
1395								m->object->type == OBJT_DEFAULT) {
1396						vm_page_protect(m, VM_PROT_NONE);
1397						vm_pageout_flush(&m, 1, 0);
1398						goto again1;
1399					}
1400				}
1401				if ((m->dirty == 0) &&
1402					(m->busy == 0) &&
1403					(m->hold_count == 0))
1404					vm_page_cache(m);
1405			}
1406
1407			splx(s);
1408			continue;
1409		}
1410		start = i;
1411
1412		/*
1413		 * Check successive pages for contiguous and free.
1414		 */
1415		for (i = start + 1; i < (start + size / PAGE_SIZE); i++) {
1416			int pqtype;
1417			pqtype = pga[i].queue - pga[i].pc;
1418			if ((VM_PAGE_TO_PHYS(&pga[i]) !=
1419			    (VM_PAGE_TO_PHYS(&pga[i - 1]) + PAGE_SIZE)) ||
1420			    ((pqtype != PQ_ZERO) && (pqtype != PQ_FREE) && (pqtype != PQ_CACHE))) {
1421				start++;
1422				goto again;
1423			}
1424		}
1425
1426		for (i = start; i < (start + size / PAGE_SIZE); i++) {
1427			int pqtype;
1428			vm_page_t m = &pga[i];
1429
1430			pqtype = m->queue - m->pc;
1431			if (pqtype == PQ_CACHE)
1432				vm_page_free(m);
1433
1434			TAILQ_REMOVE(vm_page_queues[m->queue].pl, m, pageq);
1435			--(*vm_page_queues[m->queue].lcnt);
1436			cnt.v_free_count--;
1437			m->valid = VM_PAGE_BITS_ALL;
1438			m->flags = 0;
1439			m->dirty = 0;
1440			m->wire_count = 0;
1441			m->busy = 0;
1442			m->queue = PQ_NONE;
1443			m->object = NULL;
1444			vm_page_wire(m);
1445		}
1446
1447		/*
1448		 * We've found a contiguous chunk that meets are requirements.
1449		 * Allocate kernel VM, unfree and assign the physical pages to it and
1450		 * return kernel VM pointer.
1451		 */
1452		tmp_addr = addr = kmem_alloc_pageable(map, size);
1453		if (addr == 0) {
1454			/*
1455			 * XXX We almost never run out of kernel virtual
1456			 * space, so we don't make the allocated memory
1457			 * above available.
1458			 */
1459			splx(s);
1460			return (NULL);
1461		}
1462
1463		for (i = start; i < (start + size / PAGE_SIZE); i++) {
1464			vm_page_t m = &pga[i];
1465			vm_page_insert(m, kernel_object,
1466				OFF_TO_IDX(tmp_addr - VM_MIN_KERNEL_ADDRESS));
1467			pmap_kenter(tmp_addr, VM_PAGE_TO_PHYS(m));
1468			tmp_addr += PAGE_SIZE;
1469		}
1470
1471		splx(s);
1472		return ((void *)addr);
1473	}
1474	return NULL;
1475}
1476
1477void *
1478contigmalloc(size, type, flags, low, high, alignment, boundary)
1479	unsigned long size;	/* should be size_t here and for malloc() */
1480	struct malloc_type *type;
1481	int flags;
1482	unsigned long low;
1483	unsigned long high;
1484	unsigned long alignment;
1485	unsigned long boundary;
1486{
1487	return contigmalloc1(size, type, flags, low, high, alignment, boundary,
1488			     kernel_map);
1489}
1490
1491vm_offset_t
1492vm_page_alloc_contig(size, low, high, alignment)
1493	vm_offset_t size;
1494	vm_offset_t low;
1495	vm_offset_t high;
1496	vm_offset_t alignment;
1497{
1498	return ((vm_offset_t)contigmalloc1(size, M_DEVBUF, M_NOWAIT, low, high,
1499					  alignment, 0ul, kernel_map));
1500}
1501
1502#include "opt_ddb.h"
1503#ifdef DDB
1504#include <sys/kernel.h>
1505
1506#include <ddb/ddb.h>
1507
1508DB_SHOW_COMMAND(page, vm_page_print_page_info)
1509{
1510	db_printf("cnt.v_free_count: %d\n", cnt.v_free_count);
1511	db_printf("cnt.v_cache_count: %d\n", cnt.v_cache_count);
1512	db_printf("cnt.v_inactive_count: %d\n", cnt.v_inactive_count);
1513	db_printf("cnt.v_active_count: %d\n", cnt.v_active_count);
1514	db_printf("cnt.v_wire_count: %d\n", cnt.v_wire_count);
1515	db_printf("cnt.v_free_reserved: %d\n", cnt.v_free_reserved);
1516	db_printf("cnt.v_free_min: %d\n", cnt.v_free_min);
1517	db_printf("cnt.v_free_target: %d\n", cnt.v_free_target);
1518	db_printf("cnt.v_cache_min: %d\n", cnt.v_cache_min);
1519	db_printf("cnt.v_inactive_target: %d\n", cnt.v_inactive_target);
1520}
1521
1522DB_SHOW_COMMAND(pageq, vm_page_print_pageq_info)
1523{
1524	int i;
1525	db_printf("PQ_FREE:");
1526	for(i=0;i<PQ_L2_SIZE;i++) {
1527		db_printf(" %d", *vm_page_queues[PQ_FREE + i].lcnt);
1528	}
1529	db_printf("\n");
1530
1531	db_printf("PQ_CACHE:");
1532	for(i=0;i<PQ_L2_SIZE;i++) {
1533		db_printf(" %d", *vm_page_queues[PQ_CACHE + i].lcnt);
1534	}
1535	db_printf("\n");
1536
1537	db_printf("PQ_ZERO:");
1538	for(i=0;i<PQ_L2_SIZE;i++) {
1539		db_printf(" %d", *vm_page_queues[PQ_ZERO + i].lcnt);
1540	}
1541	db_printf("\n");
1542
1543	db_printf("PQ_ACTIVE: %d, PQ_INACTIVE: %d\n",
1544		*vm_page_queues[PQ_ACTIVE].lcnt,
1545		*vm_page_queues[PQ_INACTIVE].lcnt);
1546}
1547#endif /* DDB */
1548