vm_page.c revision 10548
1/*
2 * Copyright (c) 1991 Regents of the University of California.
3 * All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * The Mach Operating System project at Carnegie-Mellon University.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 *    must display the following acknowledgement:
18 *	This product includes software developed by the University of
19 *	California, Berkeley and its contributors.
20 * 4. Neither the name of the University nor the names of its contributors
21 *    may be used to endorse or promote products derived from this software
22 *    without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 *	from: @(#)vm_page.c	7.4 (Berkeley) 5/7/91
37 *	$Id: vm_page.c,v 1.35 1995/09/03 19:57:25 dyson Exp $
38 */
39
40/*
41 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
42 * All rights reserved.
43 *
44 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
45 *
46 * Permission to use, copy, modify and distribute this software and
47 * its documentation is hereby granted, provided that both the copyright
48 * notice and this permission notice appear in all copies of the
49 * software, derivative works or modified versions, and any portions
50 * thereof, and that both notices appear in supporting documentation.
51 *
52 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
53 * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
54 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
55 *
56 * Carnegie Mellon requests users of this software to return to
57 *
58 *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
59 *  School of Computer Science
60 *  Carnegie Mellon University
61 *  Pittsburgh PA 15213-3890
62 *
63 * any improvements or extensions that they make and grant Carnegie the
64 * rights to redistribute these changes.
65 */
66
67/*
68 *	Resident memory management module.
69 */
70
71#include <sys/param.h>
72#include <sys/systm.h>
73#include <sys/proc.h>
74
75#include <vm/vm.h>
76#include <vm/vm_kern.h>
77#include <vm/vm_page.h>
78#include <vm/vm_map.h>
79#include <vm/vm_pageout.h>
80
81/*
82 *	Associated with page of user-allocatable memory is a
83 *	page structure.
84 */
85
86struct pglist *vm_page_buckets;	/* Array of buckets */
87int vm_page_bucket_count;	/* How big is array? */
88int vm_page_hash_mask;		/* Mask for hash function */
89
90struct pglist vm_page_queue_free;
91struct pglist vm_page_queue_zero;
92struct pglist vm_page_queue_active;
93struct pglist vm_page_queue_inactive;
94struct pglist vm_page_queue_cache;
95
96/* has physical page allocation been initialized? */
97boolean_t vm_page_startup_initialized;
98
99vm_page_t vm_page_array;
100int vm_page_array_size;
101long first_page;
102long last_page;
103vm_offset_t first_phys_addr;
104vm_offset_t last_phys_addr;
105vm_size_t page_mask;
106int page_shift;
107
108/*
109 * map of contiguous valid DEV_BSIZE chunks in a page
110 * (this list is valid for page sizes upto 16*DEV_BSIZE)
111 */
112static u_short vm_page_dev_bsize_chunks[] = {
113	0x0, 0x1, 0x3, 0x7, 0xf, 0x1f, 0x3f, 0x7f, 0xff,
114	0x1ff, 0x3ff, 0x7ff, 0xfff, 0x1fff, 0x3fff, 0x7fff, 0xffff
115};
116
117
118/*
119 *	vm_set_page_size:
120 *
121 *	Sets the page size, perhaps based upon the memory
122 *	size.  Must be called before any use of page-size
123 *	dependent functions.
124 *
125 *	Sets page_shift and page_mask from cnt.v_page_size.
126 */
127void
128vm_set_page_size()
129{
130
131	if (cnt.v_page_size == 0)
132		cnt.v_page_size = DEFAULT_PAGE_SIZE;
133	page_mask = cnt.v_page_size - 1;
134	if ((page_mask & cnt.v_page_size) != 0)
135		panic("vm_set_page_size: page size not a power of two");
136	for (page_shift = 0;; page_shift++)
137		if ((1 << page_shift) == cnt.v_page_size)
138			break;
139}
140
141/*
142 *	vm_page_startup:
143 *
144 *	Initializes the resident memory module.
145 *
146 *	Allocates memory for the page cells, and
147 *	for the object/offset-to-page hash table headers.
148 *	Each page cell is initialized and placed on the free list.
149 */
150
151vm_offset_t
152vm_page_startup(starta, enda, vaddr)
153	register vm_offset_t starta;
154	vm_offset_t enda;
155	register vm_offset_t vaddr;
156{
157	register vm_offset_t mapped;
158	register vm_page_t m;
159	register struct pglist *bucket;
160	vm_size_t npages, page_range;
161	register vm_offset_t new_start;
162	int i;
163	vm_offset_t pa;
164	int nblocks;
165	vm_offset_t first_managed_page;
166
167	/* the biggest memory array is the second group of pages */
168	vm_offset_t start;
169	vm_offset_t biggestone, biggestsize;
170
171	vm_offset_t total;
172
173	total = 0;
174	biggestsize = 0;
175	biggestone = 0;
176	nblocks = 0;
177	vaddr = round_page(vaddr);
178
179	for (i = 0; phys_avail[i + 1]; i += 2) {
180		phys_avail[i] = round_page(phys_avail[i]);
181		phys_avail[i + 1] = trunc_page(phys_avail[i + 1]);
182	}
183
184	for (i = 0; phys_avail[i + 1]; i += 2) {
185		int size = phys_avail[i + 1] - phys_avail[i];
186
187		if (size > biggestsize) {
188			biggestone = i;
189			biggestsize = size;
190		}
191		++nblocks;
192		total += size;
193	}
194
195	start = phys_avail[biggestone];
196
197	/*
198	 * Initialize the queue headers for the free queue, the active queue
199	 * and the inactive queue.
200	 */
201
202	TAILQ_INIT(&vm_page_queue_free);
203	TAILQ_INIT(&vm_page_queue_zero);
204	TAILQ_INIT(&vm_page_queue_active);
205	TAILQ_INIT(&vm_page_queue_inactive);
206	TAILQ_INIT(&vm_page_queue_cache);
207
208	/*
209	 * Allocate (and initialize) the hash table buckets.
210	 *
211	 * The number of buckets MUST BE a power of 2, and the actual value is
212	 * the next power of 2 greater than the number of physical pages in
213	 * the system.
214	 *
215	 * Note: This computation can be tweaked if desired.
216	 */
217	vm_page_buckets = (struct pglist *) vaddr;
218	bucket = vm_page_buckets;
219	if (vm_page_bucket_count == 0) {
220		vm_page_bucket_count = 1;
221		while (vm_page_bucket_count < atop(total))
222			vm_page_bucket_count <<= 1;
223	}
224	vm_page_hash_mask = vm_page_bucket_count - 1;
225
226	/*
227	 * Validate these addresses.
228	 */
229
230	new_start = start + vm_page_bucket_count * sizeof(struct pglist);
231	new_start = round_page(new_start);
232	mapped = vaddr;
233	vaddr = pmap_map(mapped, start, new_start,
234	    VM_PROT_READ | VM_PROT_WRITE);
235	start = new_start;
236	bzero((caddr_t) mapped, vaddr - mapped);
237	mapped = vaddr;
238
239	for (i = 0; i < vm_page_bucket_count; i++) {
240		TAILQ_INIT(bucket);
241		bucket++;
242	}
243
244	/*
245	 * round (or truncate) the addresses to our page size.
246	 */
247
248	/*
249	 * Pre-allocate maps and map entries that cannot be dynamically
250	 * allocated via malloc().  The maps include the kernel_map and
251	 * kmem_map which must be initialized before malloc() will work
252	 * (obviously).  Also could include pager maps which would be
253	 * allocated before kmeminit.
254	 *
255	 * Allow some kernel map entries... this should be plenty since people
256	 * shouldn't be cluttering up the kernel map (they should use their
257	 * own maps).
258	 */
259
260	kentry_data_size = MAX_KMAP * sizeof(struct vm_map) +
261	    MAX_KMAPENT * sizeof(struct vm_map_entry);
262	kentry_data_size = round_page(kentry_data_size);
263	kentry_data = (vm_offset_t) vaddr;
264	vaddr += kentry_data_size;
265
266	/*
267	 * Validate these zone addresses.
268	 */
269
270	new_start = start + (vaddr - mapped);
271	pmap_map(mapped, start, new_start, VM_PROT_READ | VM_PROT_WRITE);
272	bzero((caddr_t) mapped, (vaddr - mapped));
273	start = round_page(new_start);
274
275	/*
276	 * Compute the number of pages of memory that will be available for
277	 * use (taking into account the overhead of a page structure per
278	 * page).
279	 */
280
281	first_page = phys_avail[0] / PAGE_SIZE;
282	last_page = phys_avail[(nblocks - 1) * 2 + 1] / PAGE_SIZE;
283
284	page_range = last_page - (phys_avail[0] / PAGE_SIZE);
285	npages = (total - (page_range * sizeof(struct vm_page)) -
286	    (start - phys_avail[biggestone])) / PAGE_SIZE;
287
288	/*
289	 * Initialize the mem entry structures now, and put them in the free
290	 * queue.
291	 */
292
293	vm_page_array = (vm_page_t) vaddr;
294	mapped = vaddr;
295
296
297	/*
298	 * Validate these addresses.
299	 */
300
301	new_start = round_page(start + page_range * sizeof(struct vm_page));
302	mapped = pmap_map(mapped, start, new_start,
303	    VM_PROT_READ | VM_PROT_WRITE);
304	start = new_start;
305
306	first_managed_page = start / PAGE_SIZE;
307
308	/*
309	 * Clear all of the page structures
310	 */
311	bzero((caddr_t) vm_page_array, page_range * sizeof(struct vm_page));
312	vm_page_array_size = page_range;
313
314	cnt.v_page_count = 0;
315	cnt.v_free_count = 0;
316	for (i = 0; phys_avail[i + 1] && npages > 0; i += 2) {
317		if (i == biggestone)
318			pa = ptoa(first_managed_page);
319		else
320			pa = phys_avail[i];
321		while (pa < phys_avail[i + 1] && npages-- > 0) {
322			++cnt.v_page_count;
323			++cnt.v_free_count;
324			m = PHYS_TO_VM_PAGE(pa);
325			m->flags = PG_FREE;
326			m->phys_addr = pa;
327			TAILQ_INSERT_TAIL(&vm_page_queue_free, m, pageq);
328			pa += PAGE_SIZE;
329		}
330	}
331
332	return (mapped);
333}
334
335/*
336 *	vm_page_hash:
337 *
338 *	Distributes the object/offset key pair among hash buckets.
339 *
340 *	NOTE:  This macro depends on vm_page_bucket_count being a power of 2.
341 */
342inline const int
343vm_page_hash(object, offset)
344	vm_object_t object;
345	vm_offset_t offset;
346{
347	return ((unsigned) object + offset / NBPG) & vm_page_hash_mask;
348}
349
350/*
351 *	vm_page_insert:		[ internal use only ]
352 *
353 *	Inserts the given mem entry into the object/object-page
354 *	table and object list.
355 *
356 *	The object and page must be locked, and must be splhigh.
357 */
358
359inline void
360vm_page_insert(mem, object, offset)
361	register vm_page_t mem;
362	register vm_object_t object;
363	register vm_offset_t offset;
364{
365	register struct pglist *bucket;
366
367	if (mem->flags & PG_TABLED)
368		panic("vm_page_insert: already inserted");
369
370	/*
371	 * Record the object/offset pair in this page
372	 */
373
374	mem->object = object;
375	mem->offset = offset;
376
377	/*
378	 * Insert it into the object_object/offset hash table
379	 */
380
381	bucket = &vm_page_buckets[vm_page_hash(object, offset)];
382	TAILQ_INSERT_TAIL(bucket, mem, hashq);
383
384	/*
385	 * Now link into the object's list of backed pages.
386	 */
387
388	TAILQ_INSERT_TAIL(&object->memq, mem, listq);
389	mem->flags |= PG_TABLED;
390
391	/*
392	 * And show that the object has one more resident page.
393	 */
394
395	object->resident_page_count++;
396}
397
398/*
399 *	vm_page_remove:		[ internal use only ]
400 *				NOTE: used by device pager as well -wfj
401 *
402 *	Removes the given mem entry from the object/offset-page
403 *	table and the object page list.
404 *
405 *	The object and page must be locked, and at splhigh.
406 */
407
408inline void
409vm_page_remove(mem)
410	register vm_page_t mem;
411{
412	register struct pglist *bucket;
413
414	if (!(mem->flags & PG_TABLED))
415		return;
416
417	/*
418	 * Remove from the object_object/offset hash table
419	 */
420
421	bucket = &vm_page_buckets[vm_page_hash(mem->object, mem->offset)];
422	TAILQ_REMOVE(bucket, mem, hashq);
423
424	/*
425	 * Now remove from the object's list of backed pages.
426	 */
427
428	TAILQ_REMOVE(&mem->object->memq, mem, listq);
429
430	/*
431	 * And show that the object has one fewer resident page.
432	 */
433
434	mem->object->resident_page_count--;
435
436	mem->flags &= ~PG_TABLED;
437}
438
439/*
440 *	vm_page_lookup:
441 *
442 *	Returns the page associated with the object/offset
443 *	pair specified; if none is found, NULL is returned.
444 *
445 *	The object must be locked.  No side effects.
446 */
447
448vm_page_t
449vm_page_lookup(object, offset)
450	register vm_object_t object;
451	register vm_offset_t offset;
452{
453	register vm_page_t mem;
454	register struct pglist *bucket;
455	int s;
456
457	/*
458	 * Search the hash table for this object/offset pair
459	 */
460
461	bucket = &vm_page_buckets[vm_page_hash(object, offset)];
462
463	s = splhigh();
464	for (mem = bucket->tqh_first; mem != NULL; mem = mem->hashq.tqe_next) {
465		if ((mem->object == object) && (mem->offset == offset)) {
466			splx(s);
467			return (mem);
468		}
469	}
470
471	splx(s);
472	return (NULL);
473}
474
475/*
476 *	vm_page_rename:
477 *
478 *	Move the given memory entry from its
479 *	current object to the specified target object/offset.
480 *
481 *	The object must be locked.
482 */
483void
484vm_page_rename(mem, new_object, new_offset)
485	register vm_page_t mem;
486	register vm_object_t new_object;
487	vm_offset_t new_offset;
488{
489	int s;
490
491	if (mem->object == new_object)
492		return;
493
494	s = splhigh();
495	vm_page_remove(mem);
496	vm_page_insert(mem, new_object, new_offset);
497	splx(s);
498}
499
500/*
501 * vm_page_unqueue must be called at splhigh();
502 */
503inline void
504vm_page_unqueue(vm_page_t mem)
505{
506	int origflags;
507
508	origflags = mem->flags;
509
510	if ((origflags & (PG_ACTIVE|PG_INACTIVE|PG_CACHE)) == 0)
511		return;
512
513	if (origflags & PG_ACTIVE) {
514		TAILQ_REMOVE(&vm_page_queue_active, mem, pageq);
515		cnt.v_active_count--;
516		mem->flags &= ~PG_ACTIVE;
517	} else if (origflags & PG_INACTIVE) {
518		TAILQ_REMOVE(&vm_page_queue_inactive, mem, pageq);
519		cnt.v_inactive_count--;
520		mem->flags &= ~PG_INACTIVE;
521	} else if (origflags & PG_CACHE) {
522		TAILQ_REMOVE(&vm_page_queue_cache, mem, pageq);
523		cnt.v_cache_count--;
524		mem->flags &= ~PG_CACHE;
525		if (cnt.v_cache_count + cnt.v_free_count < cnt.v_free_reserved)
526			pagedaemon_wakeup();
527	}
528	return;
529}
530
531/*
532 *	vm_page_alloc:
533 *
534 *	Allocate and return a memory cell associated
535 *	with this VM object/offset pair.
536 *
537 *	page_req classes:
538 *	VM_ALLOC_NORMAL		normal process request
539 *	VM_ALLOC_SYSTEM		system *really* needs a page
540 *	VM_ALLOC_INTERRUPT	interrupt time request
541 *	or in:
542 *	VM_ALLOC_ZERO		zero page
543 *
544 *	Object must be locked.
545 */
546vm_page_t
547vm_page_alloc(object, offset, page_req)
548	vm_object_t object;
549	vm_offset_t offset;
550	int page_req;
551{
552	register vm_page_t mem;
553	int s;
554
555#ifdef DIAGNOSTIC
556	if (offset != trunc_page(offset))
557		panic("vm_page_alloc: offset not page aligned");
558
559#if 0
560	mem = vm_page_lookup(object, offset);
561	if (mem)
562		panic("vm_page_alloc: page already allocated");
563#endif
564#endif
565
566	if ((curproc == pageproc) && (page_req != VM_ALLOC_INTERRUPT)) {
567		page_req = VM_ALLOC_SYSTEM;
568	};
569
570	s = splhigh();
571
572	switch ((page_req & ~(VM_ALLOC_ZERO))) {
573	case VM_ALLOC_NORMAL:
574		if (cnt.v_free_count >= cnt.v_free_reserved) {
575			if (page_req & VM_ALLOC_ZERO) {
576				mem = vm_page_queue_zero.tqh_first;
577				if (mem) {
578					TAILQ_REMOVE(&vm_page_queue_zero, mem, pageq);
579					mem->flags = PG_BUSY|PG_ZERO;
580				} else {
581					mem = vm_page_queue_free.tqh_first;
582					TAILQ_REMOVE(&vm_page_queue_free, mem, pageq);
583					mem->flags = PG_BUSY;
584				}
585			} else {
586				mem = vm_page_queue_free.tqh_first;
587				if (mem) {
588					TAILQ_REMOVE(&vm_page_queue_free, mem, pageq);
589					mem->flags = PG_BUSY;
590				} else {
591					mem = vm_page_queue_zero.tqh_first;
592					TAILQ_REMOVE(&vm_page_queue_zero, mem, pageq);
593					mem->flags = PG_BUSY|PG_ZERO;
594				}
595			}
596			cnt.v_free_count--;
597		} else {
598			mem = vm_page_queue_cache.tqh_first;
599			if (mem != NULL) {
600				TAILQ_REMOVE(&vm_page_queue_cache, mem, pageq);
601				vm_page_remove(mem);
602				mem->flags = PG_BUSY;
603				cnt.v_cache_count--;
604			} else {
605				splx(s);
606				pagedaemon_wakeup();
607				return (NULL);
608			}
609		}
610		break;
611
612	case VM_ALLOC_SYSTEM:
613		if ((cnt.v_free_count >= cnt.v_free_reserved) ||
614		    ((cnt.v_cache_count == 0) &&
615		    (cnt.v_free_count >= cnt.v_interrupt_free_min))) {
616			if (page_req & VM_ALLOC_ZERO) {
617				mem = vm_page_queue_zero.tqh_first;
618				if (mem) {
619					TAILQ_REMOVE(&vm_page_queue_zero, mem, pageq);
620					mem->flags = PG_BUSY|PG_ZERO;
621				} else {
622					mem = vm_page_queue_free.tqh_first;
623					TAILQ_REMOVE(&vm_page_queue_free, mem, pageq);
624					mem->flags = PG_BUSY;
625				}
626			} else {
627				mem = vm_page_queue_free.tqh_first;
628				if (mem) {
629					TAILQ_REMOVE(&vm_page_queue_free, mem, pageq);
630					mem->flags = PG_BUSY;
631				} else {
632					mem = vm_page_queue_zero.tqh_first;
633					TAILQ_REMOVE(&vm_page_queue_zero, mem, pageq);
634					mem->flags = PG_BUSY|PG_ZERO;
635				}
636			}
637			cnt.v_free_count--;
638		} else {
639			mem = vm_page_queue_cache.tqh_first;
640			if (mem != NULL) {
641				TAILQ_REMOVE(&vm_page_queue_cache, mem, pageq);
642				vm_page_remove(mem);
643				mem->flags = PG_BUSY;
644				cnt.v_cache_count--;
645			} else {
646				splx(s);
647				pagedaemon_wakeup();
648				return (NULL);
649			}
650		}
651		break;
652
653	case VM_ALLOC_INTERRUPT:
654		if (cnt.v_free_count > 0) {
655			mem = vm_page_queue_free.tqh_first;
656			if (mem) {
657				TAILQ_REMOVE(&vm_page_queue_free, mem, pageq);
658				mem->flags = PG_BUSY;
659			} else {
660				mem = vm_page_queue_zero.tqh_first;
661				TAILQ_REMOVE(&vm_page_queue_zero, mem, pageq);
662				mem->flags = PG_BUSY|PG_ZERO;
663			}
664			cnt.v_free_count--;
665		} else {
666			splx(s);
667			pagedaemon_wakeup();
668			return NULL;
669		}
670		break;
671
672	default:
673		panic("vm_page_alloc: invalid allocation class");
674	}
675
676	mem->wire_count = 0;
677	mem->hold_count = 0;
678	mem->act_count = 0;
679	mem->busy = 0;
680	mem->valid = 0;
681	mem->dirty = 0;
682	mem->bmapped = 0;
683
684	/* XXX before splx until vm_page_insert is safe */
685	vm_page_insert(mem, object, offset);
686
687	splx(s);
688
689	/*
690	 * Don't wakeup too often - wakeup the pageout daemon when
691	 * we would be nearly out of memory.
692	 */
693	if (((cnt.v_free_count + cnt.v_cache_count) < cnt.v_free_min) ||
694	    (cnt.v_free_count < cnt.v_pageout_free_min))
695		pagedaemon_wakeup();
696
697	return (mem);
698}
699
700vm_offset_t
701vm_page_alloc_contig(size, low, high, alignment)
702	vm_offset_t size;
703	vm_offset_t low;
704	vm_offset_t high;
705	vm_offset_t alignment;
706{
707	int i, s, start;
708	vm_offset_t addr, phys, tmp_addr;
709	vm_page_t pga = vm_page_array;
710
711	if ((alignment & (alignment - 1)) != 0)
712		panic("vm_page_alloc_contig: alignment must be a power of 2");
713
714	start = 0;
715	s = splhigh();
716again:
717	/*
718	 * Find first page in array that is free, within range, and aligned.
719	 */
720	for (i = start; i < cnt.v_page_count; i++) {
721		phys = VM_PAGE_TO_PHYS(&pga[i]);
722		if (((pga[i].flags & PG_FREE) == PG_FREE) &&
723		    (phys >= low) && (phys < high) &&
724		    ((phys & (alignment - 1)) == 0))
725			break;
726	}
727
728	/*
729	 * If the above failed or we will exceed the upper bound, fail.
730	 */
731	if ((i == cnt.v_page_count) || ((VM_PAGE_TO_PHYS(&pga[i]) + size) > high)) {
732		splx(s);
733		return (NULL);
734	}
735	start = i;
736
737	/*
738	 * Check successive pages for contiguous and free.
739	 */
740	for (i = start + 1; i < (start + size / PAGE_SIZE); i++) {
741		if ((VM_PAGE_TO_PHYS(&pga[i]) !=
742			(VM_PAGE_TO_PHYS(&pga[i - 1]) + PAGE_SIZE)) ||
743		    ((pga[i].flags & PG_FREE) != PG_FREE)) {
744			start++;
745			goto again;
746		}
747	}
748
749	/*
750	 * We've found a contiguous chunk that meets are requirements.
751	 * Allocate kernel VM, unfree and assign the physical pages to it and
752	 * return kernel VM pointer.
753	 */
754	tmp_addr = addr = kmem_alloc_pageable(kernel_map, size);
755
756	for (i = start; i < (start + size / PAGE_SIZE); i++) {
757		vm_page_t m = &pga[i];
758
759		TAILQ_REMOVE(&vm_page_queue_free, m, pageq);
760		cnt.v_free_count--;
761		m->valid = VM_PAGE_BITS_ALL;
762		m->flags = 0;
763		m->dirty = 0;
764		m->wire_count = 0;
765		m->act_count = 0;
766		m->bmapped = 0;
767		m->busy = 0;
768		vm_page_insert(m, kernel_object, tmp_addr - VM_MIN_KERNEL_ADDRESS);
769		vm_page_wire(m);
770		pmap_kenter(tmp_addr, VM_PAGE_TO_PHYS(m));
771		tmp_addr += PAGE_SIZE;
772	}
773
774	splx(s);
775	return (addr);
776}
777
778/*
779 *	vm_page_free:
780 *
781 *	Returns the given page to the free list,
782 *	disassociating it with any VM object.
783 *
784 *	Object and page must be locked prior to entry.
785 */
786void
787vm_page_free(mem)
788	register vm_page_t mem;
789{
790	int s;
791	int flags;
792
793	s = splhigh();
794	vm_page_remove(mem);
795	vm_page_unqueue(mem);
796
797	flags = mem->flags;
798	if (mem->bmapped || mem->busy || flags & (PG_BUSY|PG_FREE)) {
799		if (flags & PG_FREE)
800			panic("vm_page_free: freeing free page");
801		printf("vm_page_free: offset(%d), bmapped(%d), busy(%d), PG_BUSY(%d)\n",
802		    mem->offset, mem->bmapped, mem->busy, (flags & PG_BUSY) ? 1 : 0);
803		panic("vm_page_free: freeing busy page");
804	}
805
806	if ((flags & PG_WANTED) != 0)
807		wakeup(mem);
808	if ((flags & PG_FICTITIOUS) == 0) {
809		if (mem->wire_count) {
810			if (mem->wire_count > 1) {
811				printf("vm_page_free: wire count > 1 (%d)", mem->wire_count);
812				panic("vm_page_free: invalid wire count");
813			}
814			cnt.v_wire_count--;
815			mem->wire_count = 0;
816		}
817		mem->flags |= PG_FREE;
818		TAILQ_INSERT_TAIL(&vm_page_queue_free, mem, pageq);
819		splx(s);
820		/*
821		 * if pageout daemon needs pages, then tell it that there are
822		 * some free.
823		 */
824		if (vm_pageout_pages_needed) {
825			wakeup(&vm_pageout_pages_needed);
826			vm_pageout_pages_needed = 0;
827		}
828
829		cnt.v_free_count++;
830		/*
831		 * wakeup processes that are waiting on memory if we hit a
832		 * high water mark. And wakeup scheduler process if we have
833		 * lots of memory. this process will swapin processes.
834		 */
835		if ((cnt.v_free_count + cnt.v_cache_count) == cnt.v_free_min) {
836			wakeup(&cnt.v_free_count);
837			wakeup(&proc0);
838		}
839	} else {
840		splx(s);
841	}
842	cnt.v_tfree++;
843}
844
845
846/*
847 *	vm_page_wire:
848 *
849 *	Mark this page as wired down by yet
850 *	another map, removing it from paging queues
851 *	as necessary.
852 *
853 *	The page queues must be locked.
854 */
855void
856vm_page_wire(mem)
857	register vm_page_t mem;
858{
859	int s;
860
861	if (mem->wire_count == 0) {
862		s = splhigh();
863		vm_page_unqueue(mem);
864		splx(s);
865		cnt.v_wire_count++;
866	}
867	mem->flags |= PG_WRITEABLE|PG_MAPPED;
868	mem->wire_count++;
869}
870
871/*
872 *	vm_page_unwire:
873 *
874 *	Release one wiring of this page, potentially
875 *	enabling it to be paged again.
876 *
877 *	The page queues must be locked.
878 */
879void
880vm_page_unwire(mem)
881	register vm_page_t mem;
882{
883	int s;
884
885	s = splhigh();
886
887	if (mem->wire_count)
888		mem->wire_count--;
889	if (mem->wire_count == 0) {
890		TAILQ_INSERT_TAIL(&vm_page_queue_active, mem, pageq);
891		cnt.v_active_count++;
892		mem->flags |= PG_ACTIVE;
893		cnt.v_wire_count--;
894	}
895	splx(s);
896}
897
898/*
899 *	vm_page_activate:
900 *
901 *	Put the specified page on the active list (if appropriate).
902 *
903 *	The page queues must be locked.
904 */
905void
906vm_page_activate(m)
907	register vm_page_t m;
908{
909	int s;
910
911	s = splhigh();
912	if (m->flags & PG_ACTIVE)
913		panic("vm_page_activate: already active");
914
915	if (m->flags & PG_CACHE)
916		cnt.v_reactivated++;
917
918	vm_page_unqueue(m);
919
920	if (m->wire_count == 0) {
921		TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq);
922		m->flags |= PG_ACTIVE;
923		if (m->act_count < 5)
924			m->act_count = 5;
925		else if( m->act_count < ACT_MAX)
926			m->act_count += 1;
927		cnt.v_active_count++;
928	}
929	splx(s);
930}
931
932/*
933 *	vm_page_deactivate:
934 *
935 *	Returns the given page to the inactive list,
936 *	indicating that no physical maps have access
937 *	to this page.  [Used by the physical mapping system.]
938 *
939 *	The page queues must be locked.
940 */
941void
942vm_page_deactivate(m)
943	register vm_page_t m;
944{
945	int spl;
946
947	/*
948	 * Only move active pages -- ignore locked or already inactive ones.
949	 *
950	 * XXX: sometimes we get pages which aren't wired down or on any queue -
951	 * we need to put them on the inactive queue also, otherwise we lose
952	 * track of them. Paul Mackerras (paulus@cs.anu.edu.au) 9-Jan-93.
953	 */
954
955	spl = splhigh();
956	if (!(m->flags & PG_INACTIVE) && m->wire_count == 0 &&
957	    m->hold_count == 0) {
958		if (m->flags & PG_CACHE)
959			cnt.v_reactivated++;
960		vm_page_unqueue(m);
961		TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq);
962		m->flags |= PG_INACTIVE;
963		cnt.v_inactive_count++;
964		m->act_count = 0;
965	}
966	splx(spl);
967}
968
969/*
970 * vm_page_cache
971 *
972 * Put the specified page onto the page cache queue (if appropriate).
973 */
974void
975vm_page_cache(m)
976	register vm_page_t m;
977{
978	int s;
979
980	if ((m->flags & (PG_CACHE | PG_BUSY)) || m->busy || m->wire_count ||
981	    m->bmapped)
982		return;
983
984	s = splhigh();
985	vm_page_unqueue(m);
986	vm_page_protect(m, VM_PROT_NONE);
987
988	TAILQ_INSERT_TAIL(&vm_page_queue_cache, m, pageq);
989	m->flags |= PG_CACHE;
990	cnt.v_cache_count++;
991	if ((cnt.v_free_count + cnt.v_cache_count) == cnt.v_free_min) {
992		wakeup(&cnt.v_free_count);
993		wakeup(&proc0);
994	}
995	if (vm_pageout_pages_needed) {
996		wakeup(&vm_pageout_pages_needed);
997		vm_pageout_pages_needed = 0;
998	}
999
1000	splx(s);
1001}
1002
1003/*
1004 *	vm_page_zero_fill:
1005 *
1006 *	Zero-fill the specified page.
1007 *	Written as a standard pagein routine, to
1008 *	be used by the zero-fill object.
1009 */
1010boolean_t
1011vm_page_zero_fill(m)
1012	vm_page_t m;
1013{
1014	pmap_zero_page(VM_PAGE_TO_PHYS(m));
1015	return (TRUE);
1016}
1017
1018/*
1019 *	vm_page_copy:
1020 *
1021 *	Copy one page to another
1022 */
1023void
1024vm_page_copy(src_m, dest_m)
1025	vm_page_t src_m;
1026	vm_page_t dest_m;
1027{
1028	pmap_copy_page(VM_PAGE_TO_PHYS(src_m), VM_PAGE_TO_PHYS(dest_m));
1029	dest_m->valid = VM_PAGE_BITS_ALL;
1030}
1031
1032
1033/*
1034 * mapping function for valid bits or for dirty bits in
1035 * a page
1036 */
1037inline int
1038vm_page_bits(int base, int size)
1039{
1040	u_short chunk;
1041
1042	if ((base == 0) && (size >= PAGE_SIZE))
1043		return VM_PAGE_BITS_ALL;
1044	size = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1);
1045	base = (base % PAGE_SIZE) / DEV_BSIZE;
1046	chunk = vm_page_dev_bsize_chunks[size / DEV_BSIZE];
1047	return (chunk << base) & VM_PAGE_BITS_ALL;
1048}
1049
1050/*
1051 * set a page valid and clean
1052 */
1053void
1054vm_page_set_validclean(m, base, size)
1055	vm_page_t m;
1056	int base;
1057	int size;
1058{
1059	int pagebits = vm_page_bits(base, size);
1060	m->valid |= pagebits;
1061	m->dirty &= ~pagebits;
1062	if( base == 0 && size == PAGE_SIZE)
1063		pmap_clear_modify(VM_PAGE_TO_PHYS(m));
1064}
1065
1066/*
1067 * set a page (partially) valid
1068 */
1069void
1070vm_page_set_valid(m, base, size)
1071	vm_page_t m;
1072	int base;
1073	int size;
1074{
1075	m->valid |= vm_page_bits(base, size);
1076}
1077
1078/*
1079 * set a page (partially) invalid
1080 */
1081void
1082vm_page_set_invalid(m, base, size)
1083	vm_page_t m;
1084	int base;
1085	int size;
1086{
1087	int bits;
1088
1089	m->valid &= ~(bits = vm_page_bits(base, size));
1090	if (m->valid == 0)
1091		m->dirty &= ~bits;
1092}
1093
1094/*
1095 * is (partial) page valid?
1096 */
1097int
1098vm_page_is_valid(m, base, size)
1099	vm_page_t m;
1100	int base;
1101	int size;
1102{
1103	int bits = vm_page_bits(base, size);
1104
1105	if (m->valid && ((m->valid & bits) == bits))
1106		return 1;
1107	else
1108		return 0;
1109}
1110
1111
1112/*
1113 * set a page (partially) dirty
1114 */
1115void
1116vm_page_set_dirty(m, base, size)
1117	vm_page_t m;
1118	int base;
1119	int size;
1120{
1121	if ((base != 0) || (size != PAGE_SIZE)) {
1122		if (pmap_is_modified(VM_PAGE_TO_PHYS(m))) {
1123			m->dirty = VM_PAGE_BITS_ALL;
1124			pmap_clear_modify(VM_PAGE_TO_PHYS(m));
1125			return;
1126		}
1127		m->dirty |= vm_page_bits(base, size);
1128	} else {
1129		m->dirty = VM_PAGE_BITS_ALL;
1130		pmap_clear_modify(VM_PAGE_TO_PHYS(m));
1131	}
1132}
1133
1134void
1135vm_page_test_dirty(m)
1136	vm_page_t m;
1137{
1138	if ((m->dirty != VM_PAGE_BITS_ALL) &&
1139	    pmap_is_modified(VM_PAGE_TO_PHYS(m))) {
1140		m->dirty = VM_PAGE_BITS_ALL;
1141	}
1142}
1143
1144/*
1145 * set a page (partially) clean
1146 */
1147void
1148vm_page_set_clean(m, base, size)
1149	vm_page_t m;
1150	int base;
1151	int size;
1152{
1153	m->dirty &= ~vm_page_bits(base, size);
1154	if( base == 0 && size == PAGE_SIZE)
1155		pmap_clear_modify(VM_PAGE_TO_PHYS(m));
1156}
1157
1158/*
1159 * is (partial) page clean
1160 */
1161int
1162vm_page_is_clean(m, base, size)
1163	vm_page_t m;
1164	int base;
1165	int size;
1166{
1167	if (pmap_is_modified(VM_PAGE_TO_PHYS(m))) {
1168		m->dirty = VM_PAGE_BITS_ALL;
1169		pmap_clear_modify(VM_PAGE_TO_PHYS(m));
1170	}
1171	if ((m->dirty & m->valid & vm_page_bits(base, size)) == 0)
1172		return 1;
1173	else
1174		return 0;
1175}
1176
1177#ifdef DDB
1178void
1179print_page_info()
1180{
1181	printf("cnt.v_free_count: %d\n", cnt.v_free_count);
1182	printf("cnt.v_cache_count: %d\n", cnt.v_cache_count);
1183	printf("cnt.v_inactive_count: %d\n", cnt.v_inactive_count);
1184	printf("cnt.v_active_count: %d\n", cnt.v_active_count);
1185	printf("cnt.v_wire_count: %d\n", cnt.v_wire_count);
1186	printf("cnt.v_free_reserved: %d\n", cnt.v_free_reserved);
1187	printf("cnt.v_free_min: %d\n", cnt.v_free_min);
1188	printf("cnt.v_free_target: %d\n", cnt.v_free_target);
1189	printf("cnt.v_cache_min: %d\n", cnt.v_cache_min);
1190	printf("cnt.v_inactive_target: %d\n", cnt.v_inactive_target);
1191}
1192#endif
1193