vm_page.c revision 32454
1/*
2 * Copyright (c) 1991 Regents of the University of California.
3 * All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * The Mach Operating System project at Carnegie-Mellon University.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 *    must display the following acknowledgement:
18 *	This product includes software developed by the University of
19 *	California, Berkeley and its contributors.
20 * 4. Neither the name of the University nor the names of its contributors
21 *    may be used to endorse or promote products derived from this software
22 *    without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 *	from: @(#)vm_page.c	7.4 (Berkeley) 5/7/91
37 *	$Id: vm_page.c,v 1.84 1997/12/29 00:24:58 dyson Exp $
38 */
39
40/*
41 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
42 * All rights reserved.
43 *
44 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
45 *
46 * Permission to use, copy, modify and distribute this software and
47 * its documentation is hereby granted, provided that both the copyright
48 * notice and this permission notice appear in all copies of the
49 * software, derivative works or modified versions, and any portions
50 * thereof, and that both notices appear in supporting documentation.
51 *
52 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
53 * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
54 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
55 *
56 * Carnegie Mellon requests users of this software to return to
57 *
58 *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
59 *  School of Computer Science
60 *  Carnegie Mellon University
61 *  Pittsburgh PA 15213-3890
62 *
63 * any improvements or extensions that they make and grant Carnegie the
64 * rights to redistribute these changes.
65 */
66
67/*
68 *	Resident memory management module.
69 */
70
71#include <sys/param.h>
72#include <sys/systm.h>
73#include <sys/malloc.h>
74#include <sys/proc.h>
75#include <sys/vmmeter.h>
76#include <sys/vnode.h>
77
78#include <vm/vm.h>
79#include <vm/vm_param.h>
80#include <vm/vm_prot.h>
81#include <sys/lock.h>
82#include <vm/vm_kern.h>
83#include <vm/vm_object.h>
84#include <vm/vm_page.h>
85#include <vm/vm_pageout.h>
86#include <vm/vm_extern.h>
87
88static void	vm_page_queue_init __P((void));
89static vm_page_t vm_page_select_free __P((vm_object_t object,
90			vm_pindex_t pindex, int prefqueue));
91
92/*
93 *	Associated with page of user-allocatable memory is a
94 *	page structure.
95 */
96
97static struct pglist *vm_page_buckets;	/* Array of buckets */
98static int vm_page_bucket_count;	/* How big is array? */
99static int vm_page_hash_mask;		/* Mask for hash function */
100
101struct pglist vm_page_queue_free[PQ_L2_SIZE] = {0};
102struct pglist vm_page_queue_zero[PQ_L2_SIZE] = {0};
103struct pglist vm_page_queue_active = {0};
104struct pglist vm_page_queue_inactive = {0};
105struct pglist vm_page_queue_cache[PQ_L2_SIZE] = {0};
106
107int no_queue=0;
108
109struct vpgqueues vm_page_queues[PQ_COUNT] = {0};
110int pqcnt[PQ_COUNT] = {0};
111
112static void
113vm_page_queue_init(void) {
114	int i;
115
116	vm_page_queues[PQ_NONE].pl = NULL;
117	vm_page_queues[PQ_NONE].cnt = &no_queue;
118	for(i=0;i<PQ_L2_SIZE;i++) {
119		vm_page_queues[PQ_FREE+i].pl = &vm_page_queue_free[i];
120		vm_page_queues[PQ_FREE+i].cnt = &cnt.v_free_count;
121	}
122	for(i=0;i<PQ_L2_SIZE;i++) {
123		vm_page_queues[PQ_ZERO+i].pl = &vm_page_queue_zero[i];
124		vm_page_queues[PQ_ZERO+i].cnt = &cnt.v_free_count;
125	}
126	vm_page_queues[PQ_INACTIVE].pl = &vm_page_queue_inactive;
127	vm_page_queues[PQ_INACTIVE].cnt = &cnt.v_inactive_count;
128
129	vm_page_queues[PQ_ACTIVE].pl = &vm_page_queue_active;
130	vm_page_queues[PQ_ACTIVE].cnt = &cnt.v_active_count;
131	for(i=0;i<PQ_L2_SIZE;i++) {
132		vm_page_queues[PQ_CACHE+i].pl = &vm_page_queue_cache[i];
133		vm_page_queues[PQ_CACHE+i].cnt = &cnt.v_cache_count;
134	}
135	for(i=0;i<PQ_COUNT;i++) {
136		if (vm_page_queues[i].pl) {
137			TAILQ_INIT(vm_page_queues[i].pl);
138		} else if (i != 0) {
139			panic("vm_page_queue_init: queue %d is null", i);
140		}
141		vm_page_queues[i].lcnt = &pqcnt[i];
142	}
143}
144
145vm_page_t vm_page_array = 0;
146int vm_page_array_size = 0;
147long first_page = 0;
148static long last_page;
149static vm_size_t page_mask;
150static int page_shift;
151int vm_page_zero_count = 0;
152
153/*
154 * map of contiguous valid DEV_BSIZE chunks in a page
155 * (this list is valid for page sizes upto 16*DEV_BSIZE)
156 */
157static u_short vm_page_dev_bsize_chunks[] = {
158	0x0, 0x1, 0x3, 0x7, 0xf, 0x1f, 0x3f, 0x7f, 0xff,
159	0x1ff, 0x3ff, 0x7ff, 0xfff, 0x1fff, 0x3fff, 0x7fff, 0xffff
160};
161
162static inline int vm_page_hash __P((vm_object_t object, vm_pindex_t pindex));
163static int vm_page_freechk_and_unqueue __P((vm_page_t m));
164static void vm_page_free_wakeup __P((void));
165
166/*
167 *	vm_set_page_size:
168 *
169 *	Sets the page size, perhaps based upon the memory
170 *	size.  Must be called before any use of page-size
171 *	dependent functions.
172 *
173 *	Sets page_shift and page_mask from cnt.v_page_size.
174 */
175void
176vm_set_page_size()
177{
178
179	if (cnt.v_page_size == 0)
180		cnt.v_page_size = DEFAULT_PAGE_SIZE;
181	page_mask = cnt.v_page_size - 1;
182	if ((page_mask & cnt.v_page_size) != 0)
183		panic("vm_set_page_size: page size not a power of two");
184	for (page_shift = 0;; page_shift++)
185		if ((1 << page_shift) == cnt.v_page_size)
186			break;
187}
188
189/*
190 *	vm_page_startup:
191 *
192 *	Initializes the resident memory module.
193 *
194 *	Allocates memory for the page cells, and
195 *	for the object/offset-to-page hash table headers.
196 *	Each page cell is initialized and placed on the free list.
197 */
198
199vm_offset_t
200vm_page_startup(starta, enda, vaddr)
201	register vm_offset_t starta;
202	vm_offset_t enda;
203	register vm_offset_t vaddr;
204{
205	register vm_offset_t mapped;
206	register vm_page_t m;
207	register struct pglist *bucket;
208	vm_size_t npages, page_range;
209	register vm_offset_t new_start;
210	int i;
211	vm_offset_t pa;
212	int nblocks;
213	vm_offset_t first_managed_page;
214
215	/* the biggest memory array is the second group of pages */
216	vm_offset_t start;
217	vm_offset_t biggestone, biggestsize;
218
219	vm_offset_t total;
220
221	total = 0;
222	biggestsize = 0;
223	biggestone = 0;
224	nblocks = 0;
225	vaddr = round_page(vaddr);
226
227	for (i = 0; phys_avail[i + 1]; i += 2) {
228		phys_avail[i] = round_page(phys_avail[i]);
229		phys_avail[i + 1] = trunc_page(phys_avail[i + 1]);
230	}
231
232	for (i = 0; phys_avail[i + 1]; i += 2) {
233		int size = phys_avail[i + 1] - phys_avail[i];
234
235		if (size > biggestsize) {
236			biggestone = i;
237			biggestsize = size;
238		}
239		++nblocks;
240		total += size;
241	}
242
243	start = phys_avail[biggestone];
244
245	/*
246	 * Initialize the queue headers for the free queue, the active queue
247	 * and the inactive queue.
248	 */
249
250	vm_page_queue_init();
251
252	/*
253	 * Allocate (and initialize) the hash table buckets.
254	 *
255	 * The number of buckets MUST BE a power of 2, and the actual value is
256	 * the next power of 2 greater than the number of physical pages in
257	 * the system.
258	 *
259	 * Note: This computation can be tweaked if desired.
260	 */
261	vm_page_buckets = (struct pglist *) vaddr;
262	bucket = vm_page_buckets;
263	if (vm_page_bucket_count == 0) {
264		vm_page_bucket_count = 1;
265		while (vm_page_bucket_count < atop(total))
266			vm_page_bucket_count <<= 1;
267	}
268	vm_page_hash_mask = vm_page_bucket_count - 1;
269
270	/*
271	 * Validate these addresses.
272	 */
273
274	new_start = start + vm_page_bucket_count * sizeof(struct pglist);
275	new_start = round_page(new_start);
276	mapped = vaddr;
277	vaddr = pmap_map(mapped, start, new_start,
278	    VM_PROT_READ | VM_PROT_WRITE);
279	start = new_start;
280	bzero((caddr_t) mapped, vaddr - mapped);
281	mapped = vaddr;
282
283	for (i = 0; i < vm_page_bucket_count; i++) {
284		TAILQ_INIT(bucket);
285		bucket++;
286	}
287
288	/*
289	 * Validate these zone addresses.
290	 */
291
292	new_start = start + (vaddr - mapped);
293	pmap_map(mapped, start, new_start, VM_PROT_READ | VM_PROT_WRITE);
294	bzero((caddr_t) mapped, (vaddr - mapped));
295	start = round_page(new_start);
296
297	/*
298	 * Compute the number of pages of memory that will be available for
299	 * use (taking into account the overhead of a page structure per
300	 * page).
301	 */
302
303	first_page = phys_avail[0] / PAGE_SIZE;
304	last_page = phys_avail[(nblocks - 1) * 2 + 1] / PAGE_SIZE;
305
306	page_range = last_page - (phys_avail[0] / PAGE_SIZE);
307	npages = (total - (page_range * sizeof(struct vm_page)) -
308	    (start - phys_avail[biggestone])) / PAGE_SIZE;
309
310	/*
311	 * Initialize the mem entry structures now, and put them in the free
312	 * queue.
313	 */
314
315	vm_page_array = (vm_page_t) vaddr;
316	mapped = vaddr;
317
318	/*
319	 * Validate these addresses.
320	 */
321
322	new_start = round_page(start + page_range * sizeof(struct vm_page));
323	mapped = pmap_map(mapped, start, new_start,
324	    VM_PROT_READ | VM_PROT_WRITE);
325	start = new_start;
326
327	first_managed_page = start / PAGE_SIZE;
328
329	/*
330	 * Clear all of the page structures
331	 */
332	bzero((caddr_t) vm_page_array, page_range * sizeof(struct vm_page));
333	vm_page_array_size = page_range;
334
335	cnt.v_page_count = 0;
336	cnt.v_free_count = 0;
337	for (i = 0; phys_avail[i + 1] && npages > 0; i += 2) {
338		if (i == biggestone)
339			pa = ptoa(first_managed_page);
340		else
341			pa = phys_avail[i];
342		while (pa < phys_avail[i + 1] && npages-- > 0) {
343			++cnt.v_page_count;
344			++cnt.v_free_count;
345			m = PHYS_TO_VM_PAGE(pa);
346			m->phys_addr = pa;
347			m->flags = 0;
348			m->pc = (pa >> PAGE_SHIFT) & PQ_L2_MASK;
349			m->queue = PQ_FREE + m->pc;
350			TAILQ_INSERT_TAIL(vm_page_queues[m->queue].pl, m, pageq);
351			++(*vm_page_queues[m->queue].lcnt);
352			pa += PAGE_SIZE;
353		}
354	}
355
356	return (mapped);
357}
358
359/*
360 *	vm_page_hash:
361 *
362 *	Distributes the object/offset key pair among hash buckets.
363 *
364 *	NOTE:  This macro depends on vm_page_bucket_count being a power of 2.
365 */
366static inline int
367vm_page_hash(object, pindex)
368	vm_object_t object;
369	vm_pindex_t pindex;
370{
371	return ((((unsigned) object) >> 5) + (pindex >> 1)) & vm_page_hash_mask;
372}
373
374/*
375 *	vm_page_insert:		[ internal use only ]
376 *
377 *	Inserts the given mem entry into the object/object-page
378 *	table and object list.
379 *
380 *	The object and page must be locked, and must be splhigh.
381 */
382
383void
384vm_page_insert(m, object, pindex)
385	register vm_page_t m;
386	register vm_object_t object;
387	register vm_pindex_t pindex;
388{
389	register struct pglist *bucket;
390
391	if (m->flags & PG_TABLED)
392		panic("vm_page_insert: already inserted");
393
394	/*
395	 * Record the object/offset pair in this page
396	 */
397
398	m->object = object;
399	m->pindex = pindex;
400
401	/*
402	 * Insert it into the object_object/offset hash table
403	 */
404
405	bucket = &vm_page_buckets[vm_page_hash(object, pindex)];
406	TAILQ_INSERT_TAIL(bucket, m, hashq);
407
408	/*
409	 * Now link into the object's list of backed pages.
410	 */
411
412	TAILQ_INSERT_TAIL(&object->memq, m, listq);
413	m->flags |= PG_TABLED;
414	m->object->page_hint = m;
415
416	/*
417	 * And show that the object has one more resident page.
418	 */
419
420	object->resident_page_count++;
421}
422
423/*
424 *	vm_page_remove:		[ internal use only ]
425 *				NOTE: used by device pager as well -wfj
426 *
427 *	Removes the given mem entry from the object/offset-page
428 *	table and the object page list.
429 *
430 *	The object and page must be locked, and at splhigh.
431 */
432
433void
434vm_page_remove(m)
435	register vm_page_t m;
436{
437	register struct pglist *bucket;
438
439	if (!(m->flags & PG_TABLED))
440		return;
441
442	if (m->object->page_hint == m)
443		m->object->page_hint = NULL;
444
445	/*
446	 * Remove from the object_object/offset hash table
447	 */
448
449	bucket = &vm_page_buckets[vm_page_hash(m->object, m->pindex)];
450	TAILQ_REMOVE(bucket, m, hashq);
451
452	/*
453	 * Now remove from the object's list of backed pages.
454	 */
455
456	TAILQ_REMOVE(&m->object->memq, m, listq);
457
458	/*
459	 * And show that the object has one fewer resident page.
460	 */
461
462	m->object->resident_page_count--;
463
464	m->flags &= ~PG_TABLED;
465}
466
467/*
468 *	vm_page_lookup:
469 *
470 *	Returns the page associated with the object/offset
471 *	pair specified; if none is found, NULL is returned.
472 *
473 *	The object must be locked.  No side effects.
474 */
475
476vm_page_t
477vm_page_lookup(object, pindex)
478	register vm_object_t object;
479	register vm_pindex_t pindex;
480{
481	register vm_page_t m;
482	register struct pglist *bucket;
483	int s;
484
485	/*
486	 * Search the hash table for this object/offset pair
487	 */
488
489	bucket = &vm_page_buckets[vm_page_hash(object, pindex)];
490
491	s = splvm();
492	for (m = TAILQ_FIRST(bucket); m != NULL; m = TAILQ_NEXT(m,hashq)) {
493		if ((m->object == object) && (m->pindex == pindex)) {
494			splx(s);
495			m->object->page_hint = m;
496			return (m);
497		}
498	}
499	splx(s);
500	return (NULL);
501}
502
503/*
504 *	vm_page_rename:
505 *
506 *	Move the given memory entry from its
507 *	current object to the specified target object/offset.
508 *
509 *	The object must be locked.
510 */
511void
512vm_page_rename(m, new_object, new_pindex)
513	register vm_page_t m;
514	register vm_object_t new_object;
515	vm_pindex_t new_pindex;
516{
517	int s;
518
519	s = splvm();
520	vm_page_remove(m);
521	vm_page_insert(m, new_object, new_pindex);
522	splx(s);
523}
524
525/*
526 * vm_page_unqueue without any wakeup
527 */
528void
529vm_page_unqueue_nowakeup(m)
530	vm_page_t m;
531{
532	int queue = m->queue;
533	struct vpgqueues *pq;
534	if (queue != PQ_NONE) {
535		pq = &vm_page_queues[queue];
536		m->queue = PQ_NONE;
537		TAILQ_REMOVE(pq->pl, m, pageq);
538		--(*pq->cnt);
539		--(*pq->lcnt);
540	}
541}
542
543/*
544 * vm_page_unqueue must be called at splhigh();
545 */
546void
547vm_page_unqueue(m)
548	vm_page_t m;
549{
550	int queue = m->queue;
551	struct vpgqueues *pq;
552	if (queue != PQ_NONE) {
553		m->queue = PQ_NONE;
554		pq = &vm_page_queues[queue];
555		TAILQ_REMOVE(pq->pl, m, pageq);
556		--(*pq->cnt);
557		--(*pq->lcnt);
558		if ((queue - m->pc) == PQ_CACHE) {
559			if ((cnt.v_cache_count + cnt.v_free_count) <
560				(cnt.v_free_reserved + cnt.v_cache_min))
561				pagedaemon_wakeup();
562		}
563	}
564}
565
566/*
567 * Find a page on the specified queue with color optimization.
568 */
569vm_page_t
570vm_page_list_find(basequeue, index)
571	int basequeue, index;
572{
573#if PQ_L2_SIZE > 1
574
575	int i,j;
576	vm_page_t m;
577	int hindex;
578	struct vpgqueues *pq;
579
580	pq = &vm_page_queues[basequeue];
581
582	m = TAILQ_FIRST(pq[index].pl);
583	if (m)
584		return m;
585
586	for(j = 0; j < PQ_L1_SIZE; j++) {
587		int ij;
588		for(i = (PQ_L2_SIZE / 2) - PQ_L1_SIZE;
589			(ij = i + j) > 0;
590			i -= PQ_L1_SIZE) {
591
592			hindex = index + ij;
593			if (hindex >= PQ_L2_SIZE)
594				hindex -= PQ_L2_SIZE;
595			if (m = TAILQ_FIRST(pq[hindex].pl))
596				return m;
597
598			hindex = index - ij;
599			if (hindex < 0)
600				hindex += PQ_L2_SIZE;
601			if (m = TAILQ_FIRST(pq[hindex].pl))
602				return m;
603		}
604	}
605
606	hindex = index + PQ_L2_SIZE / 2;
607	if (hindex >= PQ_L2_SIZE)
608		hindex -= PQ_L2_SIZE;
609	m = TAILQ_FIRST(pq[hindex].pl);
610	if (m)
611		return m;
612
613	return NULL;
614#else
615	return TAILQ_FIRST(vm_page_queues[basequeue].pl);
616#endif
617
618}
619
620/*
621 * Find a page on the specified queue with color optimization.
622 */
623vm_page_t
624vm_page_select(object, pindex, basequeue)
625	vm_object_t object;
626	vm_pindex_t pindex;
627	int basequeue;
628{
629
630#if PQ_L2_SIZE > 1
631	int index;
632	index = (pindex + object->pg_color) & PQ_L2_MASK;
633	return vm_page_list_find(basequeue, index);
634
635#else
636	return TAILQ_FIRST(vm_page_queues[basequeue].pl);
637#endif
638
639}
640
641/*
642 * Find a free or zero page, with specified preference.
643 */
644static vm_page_t
645vm_page_select_free(object, pindex, prefqueue)
646	vm_object_t object;
647	vm_pindex_t pindex;
648	int prefqueue;
649{
650#if PQ_L2_SIZE > 1
651	int i,j;
652	int index, hindex;
653#endif
654	vm_page_t m, mh;
655	int oqueuediff;
656	struct vpgqueues *pq;
657
658	if (prefqueue == PQ_ZERO)
659		oqueuediff = PQ_FREE - PQ_ZERO;
660	else
661		oqueuediff = PQ_ZERO - PQ_FREE;
662
663	if (mh = object->page_hint) {
664		 if (mh->pindex == (pindex - 1)) {
665			if ((mh->flags & PG_FICTITIOUS) == 0) {
666				if ((mh < &vm_page_array[cnt.v_page_count-1]) &&
667					(mh >= &vm_page_array[0])) {
668					int queue;
669					m = mh + 1;
670					if (VM_PAGE_TO_PHYS(m) == (VM_PAGE_TO_PHYS(mh) + PAGE_SIZE)) {
671						queue = m->queue - m->pc;
672						if (queue == PQ_FREE || queue == PQ_ZERO) {
673							return m;
674						}
675					}
676				}
677			}
678		}
679	}
680
681	pq = &vm_page_queues[prefqueue];
682
683#if PQ_L2_SIZE > 1
684
685	index = (pindex + object->pg_color) & PQ_L2_MASK;
686
687	if (m = TAILQ_FIRST(pq[index].pl))
688		return m;
689	if (m = TAILQ_FIRST(pq[index + oqueuediff].pl))
690		return m;
691
692	for(j = 0; j < PQ_L1_SIZE; j++) {
693		int ij;
694		for(i = (PQ_L2_SIZE / 2) - PQ_L1_SIZE;
695			(ij = i + j) >= 0;
696			i -= PQ_L1_SIZE) {
697
698			hindex = index + ij;
699			if (hindex >= PQ_L2_SIZE)
700				hindex -= PQ_L2_SIZE;
701			if (m = TAILQ_FIRST(pq[hindex].pl))
702				return m;
703			if (m = TAILQ_FIRST(pq[hindex + oqueuediff].pl))
704				return m;
705
706			hindex = index - ij;
707			if (hindex < 0)
708				hindex += PQ_L2_SIZE;
709			if (m = TAILQ_FIRST(pq[hindex].pl))
710				return m;
711			if (m = TAILQ_FIRST(pq[hindex + oqueuediff].pl))
712				return m;
713		}
714	}
715
716	hindex = index + PQ_L2_SIZE / 2;
717	if (hindex >= PQ_L2_SIZE)
718		hindex -= PQ_L2_SIZE;
719	if (m = TAILQ_FIRST(pq[hindex].pl))
720		return m;
721	if (m = TAILQ_FIRST(pq[hindex+oqueuediff].pl))
722		return m;
723
724#else
725	if (m = TAILQ_FIRST(pq[0].pl))
726		return m;
727	else
728		return TAILQ_FIRST(pq[oqueuediff].pl);
729#endif
730
731	return NULL;
732}
733
734/*
735 *	vm_page_alloc:
736 *
737 *	Allocate and return a memory cell associated
738 *	with this VM object/offset pair.
739 *
740 *	page_req classes:
741 *	VM_ALLOC_NORMAL		normal process request
742 *	VM_ALLOC_SYSTEM		system *really* needs a page
743 *	VM_ALLOC_INTERRUPT	interrupt time request
744 *	VM_ALLOC_ZERO		zero page
745 *
746 *	Object must be locked.
747 */
748vm_page_t
749vm_page_alloc(object, pindex, page_req)
750	vm_object_t object;
751	vm_pindex_t pindex;
752	int page_req;
753{
754	register vm_page_t m;
755	struct vpgqueues *pq;
756	vm_object_t oldobject;
757	int queue, qtype;
758	int s;
759
760#ifdef DIAGNOSTIC
761	m = vm_page_lookup(object, pindex);
762	if (m)
763		panic("vm_page_alloc: page already allocated");
764#endif
765
766	if ((curproc == pageproc) && (page_req != VM_ALLOC_INTERRUPT)) {
767		page_req = VM_ALLOC_SYSTEM;
768	};
769
770	s = splvm();
771
772	switch (page_req) {
773
774	case VM_ALLOC_NORMAL:
775		if (cnt.v_free_count >= cnt.v_free_reserved) {
776			m = vm_page_select_free(object, pindex, PQ_FREE);
777#if defined(DIAGNOSTIC)
778			if (m == NULL)
779				panic("vm_page_alloc(NORMAL): missing page on free queue\n");
780#endif
781		} else {
782			m = vm_page_select(object, pindex, PQ_CACHE);
783			if (m == NULL) {
784				splx(s);
785#if defined(DIAGNOSTIC)
786				if (cnt.v_cache_count > 0)
787					printf("vm_page_alloc(NORMAL): missing pages on cache queue: %d\n", cnt.v_cache_count);
788#endif
789				pagedaemon_wakeup();
790				return (NULL);
791			}
792		}
793		break;
794
795	case VM_ALLOC_ZERO:
796		if (cnt.v_free_count >= cnt.v_free_reserved) {
797			m = vm_page_select_free(object, pindex, PQ_ZERO);
798#if defined(DIAGNOSTIC)
799			if (m == NULL)
800				panic("vm_page_alloc(ZERO): missing page on free queue\n");
801#endif
802		} else {
803			m = vm_page_select(object, pindex, PQ_CACHE);
804			if (m == NULL) {
805				splx(s);
806#if defined(DIAGNOSTIC)
807				if (cnt.v_cache_count > 0)
808					printf("vm_page_alloc(ZERO): missing pages on cache queue: %d\n", cnt.v_cache_count);
809#endif
810				pagedaemon_wakeup();
811				return (NULL);
812			}
813		}
814		break;
815
816	case VM_ALLOC_SYSTEM:
817		if ((cnt.v_free_count >= cnt.v_free_reserved) ||
818		    ((cnt.v_cache_count == 0) &&
819		    (cnt.v_free_count >= cnt.v_interrupt_free_min))) {
820			m = vm_page_select_free(object, pindex, PQ_FREE);
821#if defined(DIAGNOSTIC)
822			if (m == NULL)
823				panic("vm_page_alloc(SYSTEM): missing page on free queue\n");
824#endif
825		} else {
826			m = vm_page_select(object, pindex, PQ_CACHE);
827			if (m == NULL) {
828				splx(s);
829#if defined(DIAGNOSTIC)
830				if (cnt.v_cache_count > 0)
831					printf("vm_page_alloc(SYSTEM): missing pages on cache queue: %d\n", cnt.v_cache_count);
832#endif
833				pagedaemon_wakeup();
834				return (NULL);
835			}
836		}
837		break;
838
839	case VM_ALLOC_INTERRUPT:
840		if (cnt.v_free_count > 0) {
841			m = vm_page_select_free(object, pindex, PQ_FREE);
842#if defined(DIAGNOSTIC)
843			if (m == NULL)
844				panic("vm_page_alloc(INTERRUPT): missing page on free queue\n");
845#endif
846		} else {
847			splx(s);
848			pagedaemon_wakeup();
849			return (NULL);
850		}
851		break;
852
853	default:
854		panic("vm_page_alloc: invalid allocation class");
855	}
856
857	queue = m->queue;
858	qtype = queue - m->pc;
859	if (qtype == PQ_ZERO)
860		--vm_page_zero_count;
861	pq = &vm_page_queues[queue];
862	TAILQ_REMOVE(pq->pl, m, pageq);
863	--(*pq->cnt);
864	--(*pq->lcnt);
865	oldobject = NULL;
866	if (qtype == PQ_ZERO) {
867		m->flags = PG_ZERO|PG_BUSY;
868	} else if (qtype == PQ_CACHE) {
869		oldobject = m->object;
870		vm_page_remove(m);
871		m->flags = PG_BUSY;
872	} else {
873		m->flags = PG_BUSY;
874	}
875	m->wire_count = 0;
876	m->hold_count = 0;
877	m->act_count = 0;
878	m->busy = 0;
879	m->valid = 0;
880	m->dirty = 0;
881	m->queue = PQ_NONE;
882
883	/* XXX before splx until vm_page_insert is safe */
884	vm_page_insert(m, object, pindex);
885
886	splx(s);
887
888	/*
889	 * Don't wakeup too often - wakeup the pageout daemon when
890	 * we would be nearly out of memory.
891	 */
892	if (((cnt.v_free_count + cnt.v_cache_count) <
893		(cnt.v_free_reserved + cnt.v_cache_min)) ||
894			(cnt.v_free_count < cnt.v_pageout_free_min))
895		pagedaemon_wakeup();
896
897	if (((page_req == VM_ALLOC_NORMAL) || (page_req == VM_ALLOC_ZERO)) &&
898		oldobject &&
899		((oldobject->type == OBJT_VNODE) &&
900		 (oldobject->ref_count == 0) &&
901		 (oldobject->resident_page_count == 0))) {
902		struct vnode *vp;
903		vp = (struct vnode *) oldobject->handle;
904		if (VSHOULDFREE(vp)) {
905			vm_object_reference(oldobject);
906			vm_object_vndeallocate(oldobject);
907		}
908	}
909
910	return (m);
911}
912
913void
914vm_wait()
915{
916	int s;
917
918	s = splvm();
919	if (curproc == pageproc) {
920		vm_pageout_pages_needed = 1;
921		tsleep(&vm_pageout_pages_needed, PSWP, "vmwait", 0);
922	} else {
923		if (!vm_pages_needed) {
924			vm_pages_needed++;
925			wakeup(&vm_pages_needed);
926		}
927		tsleep(&cnt.v_free_count, PVM, "vmwait", 0);
928	}
929	splx(s);
930}
931
932
933/*
934 *	vm_page_activate:
935 *
936 *	Put the specified page on the active list (if appropriate).
937 *
938 *	The page queues must be locked.
939 */
940void
941vm_page_activate(m)
942	register vm_page_t m;
943{
944	int s;
945
946	s = splvm();
947	if (m->queue == PQ_ACTIVE)
948		panic("vm_page_activate: already active");
949
950	if ((m->queue - m->pc) == PQ_CACHE)
951		cnt.v_reactivated++;
952
953	vm_page_unqueue(m);
954
955	if (m->wire_count == 0) {
956		m->queue = PQ_ACTIVE;
957		++(*vm_page_queues[PQ_ACTIVE].lcnt);
958		TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq);
959		if (m->act_count < ACT_INIT)
960			m->act_count = ACT_INIT;
961		cnt.v_active_count++;
962	}
963	splx(s);
964}
965
966/*
967 * helper routine for vm_page_free and vm_page_free_zero
968 */
969static int
970vm_page_freechk_and_unqueue(m)
971	vm_page_t m;
972{
973#if !defined(MAX_PERF)
974	if (m->busy ||
975		(m->flags & PG_BUSY) ||
976		((m->queue - m->pc) == PQ_FREE) ||
977		(m->hold_count != 0)) {
978		printf("vm_page_free: pindex(%ld), busy(%d), PG_BUSY(%d), hold(%d)\n",
979			m->pindex, m->busy,
980			(m->flags & PG_BUSY) ? 1 : 0, m->hold_count);
981		if ((m->queue - m->pc) == PQ_FREE)
982			panic("vm_page_free: freeing free page");
983		else
984			panic("vm_page_free: freeing busy page");
985	}
986#endif
987
988	vm_page_remove(m);
989	vm_page_unqueue_nowakeup(m);
990	if ((m->flags & PG_FICTITIOUS) != 0) {
991		return 0;
992	}
993	if (m->wire_count != 0) {
994		if (m->wire_count > 1) {
995			panic("vm_page_free: invalid wire count (%d), pindex: 0x%x",
996				m->wire_count, m->pindex);
997		}
998		m->wire_count = 0;
999		cnt.v_wire_count--;
1000	}
1001
1002	return 1;
1003}
1004
1005/*
1006 * helper routine for vm_page_free and vm_page_free_zero
1007 */
1008static __inline void
1009vm_page_free_wakeup()
1010{
1011
1012/*
1013 * if pageout daemon needs pages, then tell it that there are
1014 * some free.
1015 */
1016	if (vm_pageout_pages_needed) {
1017		wakeup(&vm_pageout_pages_needed);
1018		vm_pageout_pages_needed = 0;
1019	}
1020	/*
1021	 * wakeup processes that are waiting on memory if we hit a
1022	 * high water mark. And wakeup scheduler process if we have
1023	 * lots of memory. this process will swapin processes.
1024	 */
1025	if (vm_pages_needed &&
1026		((cnt.v_free_count + cnt.v_cache_count) >= cnt.v_free_min)) {
1027		wakeup(&cnt.v_free_count);
1028		vm_pages_needed = 0;
1029	}
1030}
1031
1032/*
1033 *	vm_page_free:
1034 *
1035 *	Returns the given page to the free list,
1036 *	disassociating it with any VM object.
1037 *
1038 *	Object and page must be locked prior to entry.
1039 */
1040void
1041vm_page_free(m)
1042	register vm_page_t m;
1043{
1044	int s;
1045	struct vpgqueues *pq;
1046
1047	s = splvm();
1048
1049	cnt.v_tfree++;
1050
1051	if (!vm_page_freechk_and_unqueue(m)) {
1052		splx(s);
1053		return;
1054	}
1055
1056	m->queue = PQ_FREE + m->pc;
1057	pq = &vm_page_queues[m->queue];
1058	++(*pq->lcnt);
1059	++(*pq->cnt);
1060	/*
1061	 * If the pageout process is grabbing the page, it is likely
1062	 * that the page is NOT in the cache.  It is more likely that
1063	 * the page will be partially in the cache if it is being
1064	 * explicitly freed.
1065	 */
1066	if (curproc == pageproc) {
1067		TAILQ_INSERT_TAIL(pq->pl, m, pageq);
1068	} else {
1069		TAILQ_INSERT_HEAD(pq->pl, m, pageq);
1070	}
1071	vm_page_free_wakeup();
1072	splx(s);
1073}
1074
1075void
1076vm_page_free_zero(m)
1077	register vm_page_t m;
1078{
1079	int s;
1080	struct vpgqueues *pq;
1081
1082	s = splvm();
1083
1084	cnt.v_tfree++;
1085
1086	if (!vm_page_freechk_and_unqueue(m)) {
1087		splx(s);
1088		return;
1089	}
1090
1091	m->queue = PQ_ZERO + m->pc;
1092	pq = &vm_page_queues[m->queue];
1093	++(*pq->lcnt);
1094	++(*pq->cnt);
1095
1096	TAILQ_INSERT_HEAD(pq->pl, m, pageq);
1097	++vm_page_zero_count;
1098	vm_page_free_wakeup();
1099	splx(s);
1100}
1101
1102/*
1103 *	vm_page_wire:
1104 *
1105 *	Mark this page as wired down by yet
1106 *	another map, removing it from paging queues
1107 *	as necessary.
1108 *
1109 *	The page queues must be locked.
1110 */
1111void
1112vm_page_wire(m)
1113	register vm_page_t m;
1114{
1115	int s;
1116
1117	if (m->wire_count == 0) {
1118		s = splvm();
1119		vm_page_unqueue(m);
1120		splx(s);
1121		cnt.v_wire_count++;
1122	}
1123	++(*vm_page_queues[PQ_NONE].lcnt);
1124	m->wire_count++;
1125	m->flags |= PG_MAPPED;
1126}
1127
1128/*
1129 *	vm_page_unwire:
1130 *
1131 *	Release one wiring of this page, potentially
1132 *	enabling it to be paged again.
1133 *
1134 *	The page queues must be locked.
1135 */
1136void
1137vm_page_unwire(m)
1138	register vm_page_t m;
1139{
1140	int s;
1141
1142	s = splvm();
1143
1144	if (m->wire_count > 0)
1145		m->wire_count--;
1146
1147	if (m->wire_count == 0) {
1148		cnt.v_wire_count--;
1149		TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq);
1150		m->queue = PQ_ACTIVE;
1151		++(*vm_page_queues[PQ_ACTIVE].lcnt);
1152		cnt.v_active_count++;
1153	}
1154	splx(s);
1155}
1156
1157
1158/*
1159 *	vm_page_deactivate:
1160 *
1161 *	Returns the given page to the inactive list,
1162 *	indicating that no physical maps have access
1163 *	to this page.  [Used by the physical mapping system.]
1164 *
1165 *	The page queues must be locked.
1166 */
1167void
1168vm_page_deactivate(m)
1169	register vm_page_t m;
1170{
1171	int s;
1172
1173	/*
1174	 * Only move active pages -- ignore locked or already inactive ones.
1175	 *
1176	 * XXX: sometimes we get pages which aren't wired down or on any queue -
1177	 * we need to put them on the inactive queue also, otherwise we lose
1178	 * track of them. Paul Mackerras (paulus@cs.anu.edu.au) 9-Jan-93.
1179	 */
1180	if (m->queue == PQ_INACTIVE)
1181		return;
1182
1183	s = splvm();
1184	if (m->wire_count == 0 && m->hold_count == 0) {
1185		if ((m->queue - m->pc) == PQ_CACHE)
1186			cnt.v_reactivated++;
1187		vm_page_unqueue(m);
1188		TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq);
1189		m->queue = PQ_INACTIVE;
1190		++(*vm_page_queues[PQ_INACTIVE].lcnt);
1191		cnt.v_inactive_count++;
1192	}
1193	splx(s);
1194}
1195
1196/*
1197 * vm_page_cache
1198 *
1199 * Put the specified page onto the page cache queue (if appropriate).
1200 */
1201void
1202vm_page_cache(m)
1203	register vm_page_t m;
1204{
1205	int s;
1206
1207	if ((m->flags & PG_BUSY) || m->busy || m->wire_count) {
1208		printf("vm_page_cache: attempting to cache busy page\n");
1209		return;
1210	}
1211	if ((m->queue - m->pc) == PQ_CACHE)
1212		return;
1213
1214	vm_page_protect(m, VM_PROT_NONE);
1215	if (m->dirty != 0) {
1216		panic("vm_page_cache: caching a dirty page, pindex: %d", m->pindex);
1217	}
1218	s = splvm();
1219	vm_page_unqueue_nowakeup(m);
1220	m->queue = PQ_CACHE + m->pc;
1221	++(*vm_page_queues[m->queue].lcnt);
1222	TAILQ_INSERT_TAIL(vm_page_queues[m->queue].pl, m, pageq);
1223	cnt.v_cache_count++;
1224	vm_page_free_wakeup();
1225	splx(s);
1226}
1227
1228
1229/*
1230 * mapping function for valid bits or for dirty bits in
1231 * a page
1232 */
1233inline int
1234vm_page_bits(int base, int size)
1235{
1236	u_short chunk;
1237
1238	if ((base == 0) && (size >= PAGE_SIZE))
1239		return VM_PAGE_BITS_ALL;
1240	size = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1);
1241	base = (base % PAGE_SIZE) / DEV_BSIZE;
1242	chunk = vm_page_dev_bsize_chunks[size / DEV_BSIZE];
1243	return (chunk << base) & VM_PAGE_BITS_ALL;
1244}
1245
1246/*
1247 * set a page valid and clean
1248 */
1249void
1250vm_page_set_validclean(m, base, size)
1251	vm_page_t m;
1252	int base;
1253	int size;
1254{
1255	int pagebits = vm_page_bits(base, size);
1256	m->valid |= pagebits;
1257	m->dirty &= ~pagebits;
1258	if( base == 0 && size == PAGE_SIZE)
1259		pmap_clear_modify(VM_PAGE_TO_PHYS(m));
1260}
1261
1262/*
1263 * set a page (partially) invalid
1264 */
1265void
1266vm_page_set_invalid(m, base, size)
1267	vm_page_t m;
1268	int base;
1269	int size;
1270{
1271	int bits;
1272
1273	m->valid &= ~(bits = vm_page_bits(base, size));
1274	if (m->valid == 0)
1275		m->dirty &= ~bits;
1276}
1277
1278/*
1279 * is (partial) page valid?
1280 */
1281int
1282vm_page_is_valid(m, base, size)
1283	vm_page_t m;
1284	int base;
1285	int size;
1286{
1287	int bits = vm_page_bits(base, size);
1288
1289	if (m->valid && ((m->valid & bits) == bits))
1290		return 1;
1291	else
1292		return 0;
1293}
1294
1295void
1296vm_page_test_dirty(m)
1297	vm_page_t m;
1298{
1299	if ((m->dirty != VM_PAGE_BITS_ALL) &&
1300	    pmap_is_modified(VM_PAGE_TO_PHYS(m))) {
1301		m->dirty = VM_PAGE_BITS_ALL;
1302	}
1303}
1304
1305/*
1306 * This interface is for merging with malloc() someday.
1307 * Even if we never implement compaction so that contiguous allocation
1308 * works after initialization time, malloc()'s data structures are good
1309 * for statistics and for allocations of less than a page.
1310 */
1311void *
1312contigmalloc1(size, type, flags, low, high, alignment, boundary, map)
1313	unsigned long size;	/* should be size_t here and for malloc() */
1314	struct malloc_type *type;
1315	int flags;
1316	unsigned long low;
1317	unsigned long high;
1318	unsigned long alignment;
1319	unsigned long boundary;
1320	vm_map_t map;
1321{
1322	int i, s, start;
1323	vm_offset_t addr, phys, tmp_addr;
1324	int pass;
1325	vm_page_t pga = vm_page_array;
1326
1327	size = round_page(size);
1328	if (size == 0)
1329		panic("contigmalloc1: size must not be 0");
1330	if ((alignment & (alignment - 1)) != 0)
1331		panic("contigmalloc1: alignment must be a power of 2");
1332	if ((boundary & (boundary - 1)) != 0)
1333		panic("contigmalloc1: boundary must be a power of 2");
1334
1335	start = 0;
1336	for (pass = 0; pass <= 1; pass++) {
1337		s = splvm();
1338again:
1339		/*
1340		 * Find first page in array that is free, within range, aligned, and
1341		 * such that the boundary won't be crossed.
1342		 */
1343		for (i = start; i < cnt.v_page_count; i++) {
1344			int pqtype;
1345			phys = VM_PAGE_TO_PHYS(&pga[i]);
1346			pqtype = pga[i].queue - pga[i].pc;
1347			if (((pqtype == PQ_ZERO) || (pqtype == PQ_FREE) || (pqtype == PQ_CACHE)) &&
1348			    (phys >= low) && (phys < high) &&
1349			    ((phys & (alignment - 1)) == 0) &&
1350			    (((phys ^ (phys + size - 1)) & ~(boundary - 1)) == 0))
1351				break;
1352		}
1353
1354		/*
1355		 * If the above failed or we will exceed the upper bound, fail.
1356		 */
1357		if ((i == cnt.v_page_count) ||
1358			((VM_PAGE_TO_PHYS(&pga[i]) + size) > high)) {
1359			vm_page_t m, next;
1360
1361again1:
1362			for (m = TAILQ_FIRST(&vm_page_queue_inactive);
1363				m != NULL;
1364				m = next) {
1365
1366				if (m->queue != PQ_INACTIVE) {
1367					break;
1368				}
1369
1370				next = TAILQ_NEXT(m, pageq);
1371				if (m->flags & PG_BUSY) {
1372					m->flags |= PG_WANTED;
1373					tsleep(m, PVM, "vpctw0", 0);
1374					goto again1;
1375				}
1376				vm_page_test_dirty(m);
1377				if (m->dirty) {
1378					if (m->object->type == OBJT_VNODE) {
1379						vn_lock(m->object->handle, LK_EXCLUSIVE | LK_RETRY, curproc);
1380						vm_object_page_clean(m->object, 0, 0, TRUE);
1381						VOP_UNLOCK(m->object->handle, 0, curproc);
1382						goto again1;
1383					} else if (m->object->type == OBJT_SWAP ||
1384								m->object->type == OBJT_DEFAULT) {
1385						vm_page_protect(m, VM_PROT_NONE);
1386						vm_pageout_flush(&m, 1, 0);
1387						goto again1;
1388					}
1389				}
1390				if ((m->dirty == 0) &&
1391					(m->busy == 0) &&
1392					(m->hold_count == 0))
1393					vm_page_cache(m);
1394			}
1395
1396			for (m = TAILQ_FIRST(&vm_page_queue_active);
1397				m != NULL;
1398				m = next) {
1399
1400				if (m->queue != PQ_ACTIVE) {
1401					break;
1402				}
1403
1404				next = TAILQ_NEXT(m, pageq);
1405				if (m->flags & PG_BUSY) {
1406					m->flags |= PG_WANTED;
1407					tsleep(m, PVM, "vpctw1", 0);
1408					goto again1;
1409				}
1410				vm_page_test_dirty(m);
1411				if (m->dirty) {
1412					if (m->object->type == OBJT_VNODE) {
1413						vn_lock(m->object->handle, LK_EXCLUSIVE | LK_RETRY, curproc);
1414						vm_object_page_clean(m->object, 0, 0, TRUE);
1415						VOP_UNLOCK(m->object->handle, 0, curproc);
1416						goto again1;
1417					} else if (m->object->type == OBJT_SWAP ||
1418								m->object->type == OBJT_DEFAULT) {
1419						vm_page_protect(m, VM_PROT_NONE);
1420						vm_pageout_flush(&m, 1, 0);
1421						goto again1;
1422					}
1423				}
1424				if ((m->dirty == 0) &&
1425					(m->busy == 0) &&
1426					(m->hold_count == 0))
1427					vm_page_cache(m);
1428			}
1429
1430			splx(s);
1431			continue;
1432		}
1433		start = i;
1434
1435		/*
1436		 * Check successive pages for contiguous and free.
1437		 */
1438		for (i = start + 1; i < (start + size / PAGE_SIZE); i++) {
1439			int pqtype;
1440			pqtype = pga[i].queue - pga[i].pc;
1441			if ((VM_PAGE_TO_PHYS(&pga[i]) !=
1442			    (VM_PAGE_TO_PHYS(&pga[i - 1]) + PAGE_SIZE)) ||
1443			    ((pqtype != PQ_ZERO) && (pqtype != PQ_FREE) && (pqtype != PQ_CACHE))) {
1444				start++;
1445				goto again;
1446			}
1447		}
1448
1449		for (i = start; i < (start + size / PAGE_SIZE); i++) {
1450			int pqtype;
1451			vm_page_t m = &pga[i];
1452
1453			pqtype = m->queue - m->pc;
1454			if (pqtype == PQ_CACHE)
1455				vm_page_free(m);
1456
1457			TAILQ_REMOVE(vm_page_queues[m->queue].pl, m, pageq);
1458			--(*vm_page_queues[m->queue].lcnt);
1459			cnt.v_free_count--;
1460			m->valid = VM_PAGE_BITS_ALL;
1461			m->flags = 0;
1462			m->dirty = 0;
1463			m->wire_count = 0;
1464			m->busy = 0;
1465			m->queue = PQ_NONE;
1466			m->object = NULL;
1467			vm_page_wire(m);
1468		}
1469
1470		/*
1471		 * We've found a contiguous chunk that meets are requirements.
1472		 * Allocate kernel VM, unfree and assign the physical pages to it and
1473		 * return kernel VM pointer.
1474		 */
1475		tmp_addr = addr = kmem_alloc_pageable(map, size);
1476		if (addr == 0) {
1477			/*
1478			 * XXX We almost never run out of kernel virtual
1479			 * space, so we don't make the allocated memory
1480			 * above available.
1481			 */
1482			splx(s);
1483			return (NULL);
1484		}
1485
1486		for (i = start; i < (start + size / PAGE_SIZE); i++) {
1487			vm_page_t m = &pga[i];
1488			vm_page_insert(m, kernel_object,
1489				OFF_TO_IDX(tmp_addr - VM_MIN_KERNEL_ADDRESS));
1490			pmap_kenter(tmp_addr, VM_PAGE_TO_PHYS(m));
1491			tmp_addr += PAGE_SIZE;
1492		}
1493
1494		splx(s);
1495		return ((void *)addr);
1496	}
1497	return NULL;
1498}
1499
1500void *
1501contigmalloc(size, type, flags, low, high, alignment, boundary)
1502	unsigned long size;	/* should be size_t here and for malloc() */
1503	struct malloc_type *type;
1504	int flags;
1505	unsigned long low;
1506	unsigned long high;
1507	unsigned long alignment;
1508	unsigned long boundary;
1509{
1510	return contigmalloc1(size, type, flags, low, high, alignment, boundary,
1511			     kernel_map);
1512}
1513
1514vm_offset_t
1515vm_page_alloc_contig(size, low, high, alignment)
1516	vm_offset_t size;
1517	vm_offset_t low;
1518	vm_offset_t high;
1519	vm_offset_t alignment;
1520{
1521	return ((vm_offset_t)contigmalloc1(size, M_DEVBUF, M_NOWAIT, low, high,
1522					  alignment, 0ul, kernel_map));
1523}
1524
1525#include "opt_ddb.h"
1526#ifdef DDB
1527#include <sys/kernel.h>
1528
1529#include <ddb/ddb.h>
1530
1531DB_SHOW_COMMAND(page, vm_page_print_page_info)
1532{
1533	db_printf("cnt.v_free_count: %d\n", cnt.v_free_count);
1534	db_printf("cnt.v_cache_count: %d\n", cnt.v_cache_count);
1535	db_printf("cnt.v_inactive_count: %d\n", cnt.v_inactive_count);
1536	db_printf("cnt.v_active_count: %d\n", cnt.v_active_count);
1537	db_printf("cnt.v_wire_count: %d\n", cnt.v_wire_count);
1538	db_printf("cnt.v_free_reserved: %d\n", cnt.v_free_reserved);
1539	db_printf("cnt.v_free_min: %d\n", cnt.v_free_min);
1540	db_printf("cnt.v_free_target: %d\n", cnt.v_free_target);
1541	db_printf("cnt.v_cache_min: %d\n", cnt.v_cache_min);
1542	db_printf("cnt.v_inactive_target: %d\n", cnt.v_inactive_target);
1543}
1544
1545DB_SHOW_COMMAND(pageq, vm_page_print_pageq_info)
1546{
1547	int i;
1548	db_printf("PQ_FREE:");
1549	for(i=0;i<PQ_L2_SIZE;i++) {
1550		db_printf(" %d", *vm_page_queues[PQ_FREE + i].lcnt);
1551	}
1552	db_printf("\n");
1553
1554	db_printf("PQ_CACHE:");
1555	for(i=0;i<PQ_L2_SIZE;i++) {
1556		db_printf(" %d", *vm_page_queues[PQ_CACHE + i].lcnt);
1557	}
1558	db_printf("\n");
1559
1560	db_printf("PQ_ZERO:");
1561	for(i=0;i<PQ_L2_SIZE;i++) {
1562		db_printf(" %d", *vm_page_queues[PQ_ZERO + i].lcnt);
1563	}
1564	db_printf("\n");
1565
1566	db_printf("PQ_ACTIVE: %d, PQ_INACTIVE: %d\n",
1567		*vm_page_queues[PQ_ACTIVE].lcnt,
1568		*vm_page_queues[PQ_INACTIVE].lcnt);
1569}
1570#endif /* DDB */
1571