vm_page.c revision 12778
1/*
2 * Copyright (c) 1991 Regents of the University of California.
3 * All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * The Mach Operating System project at Carnegie-Mellon University.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 *    must display the following acknowledgement:
18 *	This product includes software developed by the University of
19 *	California, Berkeley and its contributors.
20 * 4. Neither the name of the University nor the names of its contributors
21 *    may be used to endorse or promote products derived from this software
22 *    without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 *	from: @(#)vm_page.c	7.4 (Berkeley) 5/7/91
37 *	$Id: vm_page.c,v 1.41 1995/12/11 04:58:25 dyson Exp $
38 */
39
40/*
41 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
42 * All rights reserved.
43 *
44 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
45 *
46 * Permission to use, copy, modify and distribute this software and
47 * its documentation is hereby granted, provided that both the copyright
48 * notice and this permission notice appear in all copies of the
49 * software, derivative works or modified versions, and any portions
50 * thereof, and that both notices appear in supporting documentation.
51 *
52 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
53 * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
54 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
55 *
56 * Carnegie Mellon requests users of this software to return to
57 *
58 *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
59 *  School of Computer Science
60 *  Carnegie Mellon University
61 *  Pittsburgh PA 15213-3890
62 *
63 * any improvements or extensions that they make and grant Carnegie the
64 * rights to redistribute these changes.
65 */
66
67/*
68 *	Resident memory management module.
69 */
70
71#include <sys/param.h>
72#include <sys/systm.h>
73#include <sys/proc.h>
74#include <sys/queue.h>
75#include <sys/vmmeter.h>
76
77#include <vm/vm.h>
78#include <vm/vm_param.h>
79#include <vm/vm_prot.h>
80#include <vm/lock.h>
81#include <vm/vm_kern.h>
82#include <vm/vm_object.h>
83#include <vm/vm_page.h>
84#include <vm/vm_map.h>
85#include <vm/vm_pageout.h>
86#include <vm/vm_extern.h>
87
88#ifdef DDB
89extern void	print_page_info __P((void));
90#endif
91
92/*
93 *	Associated with page of user-allocatable memory is a
94 *	page structure.
95 */
96
97struct pglist *vm_page_buckets;	/* Array of buckets */
98int vm_page_bucket_count;	/* How big is array? */
99static int vm_page_hash_mask;		/* Mask for hash function */
100
101struct pglist vm_page_queue_free;
102struct pglist vm_page_queue_zero;
103struct pglist vm_page_queue_active;
104struct pglist vm_page_queue_inactive;
105struct pglist vm_page_queue_cache;
106
107/* has physical page allocation been initialized? */
108boolean_t vm_page_startup_initialized;
109
110vm_page_t vm_page_array;
111int vm_page_array_size;
112long first_page;
113long last_page;
114vm_offset_t first_phys_addr;
115vm_offset_t last_phys_addr;
116vm_size_t page_mask;
117int page_shift;
118int vm_page_zero_count;
119
120/*
121 * map of contiguous valid DEV_BSIZE chunks in a page
122 * (this list is valid for page sizes upto 16*DEV_BSIZE)
123 */
124static u_short vm_page_dev_bsize_chunks[] = {
125	0x0, 0x1, 0x3, 0x7, 0xf, 0x1f, 0x3f, 0x7f, 0xff,
126	0x1ff, 0x3ff, 0x7ff, 0xfff, 0x1fff, 0x3fff, 0x7fff, 0xffff
127};
128
129static inline __pure int
130		vm_page_hash __P((vm_object_t object, vm_offset_t offset))
131		__pure2;
132static void	vm_page_unqueue __P((vm_page_t ));
133
134/*
135 *	vm_set_page_size:
136 *
137 *	Sets the page size, perhaps based upon the memory
138 *	size.  Must be called before any use of page-size
139 *	dependent functions.
140 *
141 *	Sets page_shift and page_mask from cnt.v_page_size.
142 */
143void
144vm_set_page_size()
145{
146
147	if (cnt.v_page_size == 0)
148		cnt.v_page_size = DEFAULT_PAGE_SIZE;
149	page_mask = cnt.v_page_size - 1;
150	if ((page_mask & cnt.v_page_size) != 0)
151		panic("vm_set_page_size: page size not a power of two");
152	for (page_shift = 0;; page_shift++)
153		if ((1 << page_shift) == cnt.v_page_size)
154			break;
155}
156
157/*
158 *	vm_page_startup:
159 *
160 *	Initializes the resident memory module.
161 *
162 *	Allocates memory for the page cells, and
163 *	for the object/offset-to-page hash table headers.
164 *	Each page cell is initialized and placed on the free list.
165 */
166
167vm_offset_t
168vm_page_startup(starta, enda, vaddr)
169	register vm_offset_t starta;
170	vm_offset_t enda;
171	register vm_offset_t vaddr;
172{
173	register vm_offset_t mapped;
174	register vm_page_t m;
175	register struct pglist *bucket;
176	vm_size_t npages, page_range;
177	register vm_offset_t new_start;
178	int i;
179	vm_offset_t pa;
180	int nblocks;
181	vm_offset_t first_managed_page;
182
183	/* the biggest memory array is the second group of pages */
184	vm_offset_t start;
185	vm_offset_t biggestone, biggestsize;
186
187	vm_offset_t total;
188
189	total = 0;
190	biggestsize = 0;
191	biggestone = 0;
192	nblocks = 0;
193	vaddr = round_page(vaddr);
194
195	for (i = 0; phys_avail[i + 1]; i += 2) {
196		phys_avail[i] = round_page(phys_avail[i]);
197		phys_avail[i + 1] = trunc_page(phys_avail[i + 1]);
198	}
199
200	for (i = 0; phys_avail[i + 1]; i += 2) {
201		int size = phys_avail[i + 1] - phys_avail[i];
202
203		if (size > biggestsize) {
204			biggestone = i;
205			biggestsize = size;
206		}
207		++nblocks;
208		total += size;
209	}
210
211	start = phys_avail[biggestone];
212
213	/*
214	 * Initialize the queue headers for the free queue, the active queue
215	 * and the inactive queue.
216	 */
217
218	TAILQ_INIT(&vm_page_queue_free);
219	TAILQ_INIT(&vm_page_queue_zero);
220	TAILQ_INIT(&vm_page_queue_active);
221	TAILQ_INIT(&vm_page_queue_inactive);
222	TAILQ_INIT(&vm_page_queue_cache);
223
224	/*
225	 * Allocate (and initialize) the hash table buckets.
226	 *
227	 * The number of buckets MUST BE a power of 2, and the actual value is
228	 * the next power of 2 greater than the number of physical pages in
229	 * the system.
230	 *
231	 * Note: This computation can be tweaked if desired.
232	 */
233	vm_page_buckets = (struct pglist *) vaddr;
234	bucket = vm_page_buckets;
235	if (vm_page_bucket_count == 0) {
236		vm_page_bucket_count = 1;
237		while (vm_page_bucket_count < atop(total))
238			vm_page_bucket_count <<= 1;
239	}
240	vm_page_hash_mask = vm_page_bucket_count - 1;
241
242	/*
243	 * Validate these addresses.
244	 */
245
246	new_start = start + vm_page_bucket_count * sizeof(struct pglist);
247	new_start = round_page(new_start);
248	mapped = vaddr;
249	vaddr = pmap_map(mapped, start, new_start,
250	    VM_PROT_READ | VM_PROT_WRITE);
251	start = new_start;
252	bzero((caddr_t) mapped, vaddr - mapped);
253	mapped = vaddr;
254
255	for (i = 0; i < vm_page_bucket_count; i++) {
256		TAILQ_INIT(bucket);
257		bucket++;
258	}
259
260	/*
261	 * round (or truncate) the addresses to our page size.
262	 */
263
264	/*
265	 * Pre-allocate maps and map entries that cannot be dynamically
266	 * allocated via malloc().  The maps include the kernel_map and
267	 * kmem_map which must be initialized before malloc() will work
268	 * (obviously).  Also could include pager maps which would be
269	 * allocated before kmeminit.
270	 *
271	 * Allow some kernel map entries... this should be plenty since people
272	 * shouldn't be cluttering up the kernel map (they should use their
273	 * own maps).
274	 */
275
276	kentry_data_size = MAX_KMAP * sizeof(struct vm_map) +
277	    MAX_KMAPENT * sizeof(struct vm_map_entry);
278	kentry_data_size = round_page(kentry_data_size);
279	kentry_data = (vm_offset_t) vaddr;
280	vaddr += kentry_data_size;
281
282	/*
283	 * Validate these zone addresses.
284	 */
285
286	new_start = start + (vaddr - mapped);
287	pmap_map(mapped, start, new_start, VM_PROT_READ | VM_PROT_WRITE);
288	bzero((caddr_t) mapped, (vaddr - mapped));
289	start = round_page(new_start);
290
291	/*
292	 * Compute the number of pages of memory that will be available for
293	 * use (taking into account the overhead of a page structure per
294	 * page).
295	 */
296
297	first_page = phys_avail[0] / PAGE_SIZE;
298	last_page = phys_avail[(nblocks - 1) * 2 + 1] / PAGE_SIZE;
299
300	page_range = last_page - (phys_avail[0] / PAGE_SIZE);
301	npages = (total - (page_range * sizeof(struct vm_page)) -
302	    (start - phys_avail[biggestone])) / PAGE_SIZE;
303
304	/*
305	 * Initialize the mem entry structures now, and put them in the free
306	 * queue.
307	 */
308
309	vm_page_array = (vm_page_t) vaddr;
310	mapped = vaddr;
311
312	/*
313	 * Validate these addresses.
314	 */
315
316	new_start = round_page(start + page_range * sizeof(struct vm_page));
317	mapped = pmap_map(mapped, start, new_start,
318	    VM_PROT_READ | VM_PROT_WRITE);
319	start = new_start;
320
321	first_managed_page = start / PAGE_SIZE;
322
323	/*
324	 * Clear all of the page structures
325	 */
326	bzero((caddr_t) vm_page_array, page_range * sizeof(struct vm_page));
327	vm_page_array_size = page_range;
328
329	cnt.v_page_count = 0;
330	cnt.v_free_count = 0;
331	for (i = 0; phys_avail[i + 1] && npages > 0; i += 2) {
332		if (i == biggestone)
333			pa = ptoa(first_managed_page);
334		else
335			pa = phys_avail[i];
336		while (pa < phys_avail[i + 1] && npages-- > 0) {
337			++cnt.v_page_count;
338			++cnt.v_free_count;
339			m = PHYS_TO_VM_PAGE(pa);
340			m->flags = PG_FREE;
341			m->phys_addr = pa;
342			TAILQ_INSERT_TAIL(&vm_page_queue_free, m, pageq);
343			pa += PAGE_SIZE;
344		}
345	}
346
347	return (mapped);
348}
349
350/*
351 *	vm_page_hash:
352 *
353 *	Distributes the object/offset key pair among hash buckets.
354 *
355 *	NOTE:  This macro depends on vm_page_bucket_count being a power of 2.
356 */
357static inline __pure int
358vm_page_hash(object, pindex)
359	vm_object_t object;
360	vm_pindex_t pindex;
361{
362	return ((unsigned) object + pindex) & vm_page_hash_mask;
363}
364
365/*
366 *	vm_page_insert:		[ internal use only ]
367 *
368 *	Inserts the given mem entry into the object/object-page
369 *	table and object list.
370 *
371 *	The object and page must be locked, and must be splhigh.
372 */
373
374inline void
375vm_page_insert(mem, object, pindex)
376	register vm_page_t mem;
377	register vm_object_t object;
378	register vm_pindex_t pindex;
379{
380	register struct pglist *bucket;
381
382	if (mem->flags & PG_TABLED)
383		panic("vm_page_insert: already inserted");
384
385	/*
386	 * Record the object/offset pair in this page
387	 */
388
389	mem->object = object;
390	mem->pindex = pindex;
391
392	/*
393	 * Insert it into the object_object/offset hash table
394	 */
395
396	bucket = &vm_page_buckets[vm_page_hash(object, pindex)];
397	TAILQ_INSERT_TAIL(bucket, mem, hashq);
398
399	/*
400	 * Now link into the object's list of backed pages.
401	 */
402
403	TAILQ_INSERT_TAIL(&object->memq, mem, listq);
404	mem->flags |= PG_TABLED;
405
406	/*
407	 * And show that the object has one more resident page.
408	 */
409
410	object->resident_page_count++;
411}
412
413/*
414 *	vm_page_remove:		[ internal use only ]
415 *				NOTE: used by device pager as well -wfj
416 *
417 *	Removes the given mem entry from the object/offset-page
418 *	table and the object page list.
419 *
420 *	The object and page must be locked, and at splhigh.
421 */
422
423inline void
424vm_page_remove(mem)
425	register vm_page_t mem;
426{
427	register struct pglist *bucket;
428
429	if (!(mem->flags & PG_TABLED))
430		return;
431
432	/*
433	 * Remove from the object_object/offset hash table
434	 */
435
436	bucket = &vm_page_buckets[vm_page_hash(mem->object, mem->pindex)];
437	TAILQ_REMOVE(bucket, mem, hashq);
438
439	/*
440	 * Now remove from the object's list of backed pages.
441	 */
442
443	TAILQ_REMOVE(&mem->object->memq, mem, listq);
444
445	/*
446	 * And show that the object has one fewer resident page.
447	 */
448
449	mem->object->resident_page_count--;
450
451	mem->flags &= ~PG_TABLED;
452}
453
454/*
455 *	vm_page_lookup:
456 *
457 *	Returns the page associated with the object/offset
458 *	pair specified; if none is found, NULL is returned.
459 *
460 *	The object must be locked.  No side effects.
461 */
462
463vm_page_t
464vm_page_lookup(object, pindex)
465	register vm_object_t object;
466	register vm_pindex_t pindex;
467{
468	register vm_page_t mem;
469	register struct pglist *bucket;
470	int s;
471
472	/*
473	 * Search the hash table for this object/offset pair
474	 */
475
476	bucket = &vm_page_buckets[vm_page_hash(object, pindex)];
477
478	s = splhigh();
479	for (mem = bucket->tqh_first; mem != NULL; mem = mem->hashq.tqe_next) {
480		if ((mem->object == object) && (mem->pindex == pindex)) {
481			splx(s);
482			return (mem);
483		}
484	}
485
486	splx(s);
487	return (NULL);
488}
489
490/*
491 *	vm_page_rename:
492 *
493 *	Move the given memory entry from its
494 *	current object to the specified target object/offset.
495 *
496 *	The object must be locked.
497 */
498void
499vm_page_rename(mem, new_object, new_pindex)
500	register vm_page_t mem;
501	register vm_object_t new_object;
502	vm_pindex_t new_pindex;
503{
504	int s;
505
506	s = splhigh();
507	vm_page_remove(mem);
508	vm_page_insert(mem, new_object, new_pindex);
509	splx(s);
510}
511
512/*
513 * vm_page_unqueue must be called at splhigh();
514 */
515static inline void
516vm_page_unqueue(vm_page_t mem)
517{
518	int origflags;
519
520	origflags = mem->flags;
521
522	if ((origflags & (PG_ACTIVE|PG_INACTIVE|PG_CACHE)) == 0)
523		return;
524
525	if (origflags & PG_ACTIVE) {
526		TAILQ_REMOVE(&vm_page_queue_active, mem, pageq);
527		cnt.v_active_count--;
528		mem->flags &= ~PG_ACTIVE;
529	} else if (origflags & PG_INACTIVE) {
530		TAILQ_REMOVE(&vm_page_queue_inactive, mem, pageq);
531		cnt.v_inactive_count--;
532		mem->flags &= ~PG_INACTIVE;
533	} else if (origflags & PG_CACHE) {
534		TAILQ_REMOVE(&vm_page_queue_cache, mem, pageq);
535		cnt.v_cache_count--;
536		mem->flags &= ~PG_CACHE;
537		if (cnt.v_cache_count + cnt.v_free_count < cnt.v_free_reserved)
538			pagedaemon_wakeup();
539	}
540	return;
541}
542
543/*
544 *	vm_page_alloc:
545 *
546 *	Allocate and return a memory cell associated
547 *	with this VM object/offset pair.
548 *
549 *	page_req classes:
550 *	VM_ALLOC_NORMAL		normal process request
551 *	VM_ALLOC_SYSTEM		system *really* needs a page
552 *	VM_ALLOC_INTERRUPT	interrupt time request
553 *	or in:
554 *	VM_ALLOC_ZERO		zero page
555 *
556 *	Object must be locked.
557 */
558vm_page_t
559vm_page_alloc(object, pindex, page_req)
560	vm_object_t object;
561	vm_pindex_t pindex;
562	int page_req;
563{
564	register vm_page_t mem;
565	int s;
566
567#ifdef DIAGNOSTIC
568	mem = vm_page_lookup(object, pindex);
569	if (mem)
570		panic("vm_page_alloc: page already allocated");
571#endif
572
573	if ((curproc == pageproc) && (page_req != VM_ALLOC_INTERRUPT)) {
574		page_req = VM_ALLOC_SYSTEM;
575	};
576
577	s = splhigh();
578
579	switch ((page_req & ~(VM_ALLOC_ZERO))) {
580	case VM_ALLOC_NORMAL:
581		if (cnt.v_free_count >= cnt.v_free_reserved) {
582			if (page_req & VM_ALLOC_ZERO) {
583				mem = vm_page_queue_zero.tqh_first;
584				if (mem) {
585					--vm_page_zero_count;
586					TAILQ_REMOVE(&vm_page_queue_zero, mem, pageq);
587					mem->flags = PG_BUSY|PG_ZERO;
588				} else {
589					mem = vm_page_queue_free.tqh_first;
590					TAILQ_REMOVE(&vm_page_queue_free, mem, pageq);
591					mem->flags = PG_BUSY;
592				}
593			} else {
594				mem = vm_page_queue_free.tqh_first;
595				if (mem) {
596					TAILQ_REMOVE(&vm_page_queue_free, mem, pageq);
597					mem->flags = PG_BUSY;
598				} else {
599					--vm_page_zero_count;
600					mem = vm_page_queue_zero.tqh_first;
601					TAILQ_REMOVE(&vm_page_queue_zero, mem, pageq);
602					mem->flags = PG_BUSY|PG_ZERO;
603				}
604			}
605			cnt.v_free_count--;
606		} else {
607			mem = vm_page_queue_cache.tqh_first;
608			if (mem != NULL) {
609				TAILQ_REMOVE(&vm_page_queue_cache, mem, pageq);
610				vm_page_remove(mem);
611				mem->flags = PG_BUSY;
612				cnt.v_cache_count--;
613			} else {
614				splx(s);
615				pagedaemon_wakeup();
616				return (NULL);
617			}
618		}
619		break;
620
621	case VM_ALLOC_SYSTEM:
622		if ((cnt.v_free_count >= cnt.v_free_reserved) ||
623		    ((cnt.v_cache_count == 0) &&
624		    (cnt.v_free_count >= cnt.v_interrupt_free_min))) {
625			if (page_req & VM_ALLOC_ZERO) {
626				mem = vm_page_queue_zero.tqh_first;
627				if (mem) {
628					--vm_page_zero_count;
629					TAILQ_REMOVE(&vm_page_queue_zero, mem, pageq);
630					mem->flags = PG_BUSY|PG_ZERO;
631				} else {
632					mem = vm_page_queue_free.tqh_first;
633					TAILQ_REMOVE(&vm_page_queue_free, mem, pageq);
634					mem->flags = PG_BUSY;
635				}
636			} else {
637				mem = vm_page_queue_free.tqh_first;
638				if (mem) {
639					TAILQ_REMOVE(&vm_page_queue_free, mem, pageq);
640					mem->flags = PG_BUSY;
641				} else {
642					--vm_page_zero_count;
643					mem = vm_page_queue_zero.tqh_first;
644					TAILQ_REMOVE(&vm_page_queue_zero, mem, pageq);
645					mem->flags = PG_BUSY|PG_ZERO;
646				}
647			}
648			cnt.v_free_count--;
649		} else {
650			mem = vm_page_queue_cache.tqh_first;
651			if (mem != NULL) {
652				TAILQ_REMOVE(&vm_page_queue_cache, mem, pageq);
653				vm_page_remove(mem);
654				mem->flags = PG_BUSY;
655				cnt.v_cache_count--;
656			} else {
657				splx(s);
658				pagedaemon_wakeup();
659				return (NULL);
660			}
661		}
662		break;
663
664	case VM_ALLOC_INTERRUPT:
665		if (cnt.v_free_count > 0) {
666			mem = vm_page_queue_free.tqh_first;
667			if (mem) {
668				TAILQ_REMOVE(&vm_page_queue_free, mem, pageq);
669				mem->flags = PG_BUSY;
670			} else {
671				--vm_page_zero_count;
672				mem = vm_page_queue_zero.tqh_first;
673				TAILQ_REMOVE(&vm_page_queue_zero, mem, pageq);
674				mem->flags = PG_BUSY|PG_ZERO;
675			}
676			cnt.v_free_count--;
677		} else {
678			splx(s);
679			pagedaemon_wakeup();
680			return NULL;
681		}
682		break;
683
684	default:
685		panic("vm_page_alloc: invalid allocation class");
686	}
687
688	mem->wire_count = 0;
689	mem->hold_count = 0;
690	mem->act_count = 0;
691	mem->busy = 0;
692	mem->valid = 0;
693	mem->dirty = 0;
694	mem->bmapped = 0;
695
696	/* XXX before splx until vm_page_insert is safe */
697	vm_page_insert(mem, object, pindex);
698
699	splx(s);
700
701	/*
702	 * Don't wakeup too often - wakeup the pageout daemon when
703	 * we would be nearly out of memory.
704	 */
705	if (((cnt.v_free_count + cnt.v_cache_count) < cnt.v_free_min) ||
706	    (cnt.v_free_count < cnt.v_pageout_free_min))
707		pagedaemon_wakeup();
708
709	return (mem);
710}
711
712vm_offset_t
713vm_page_alloc_contig(size, low, high, alignment)
714	vm_offset_t size;
715	vm_offset_t low;
716	vm_offset_t high;
717	vm_offset_t alignment;
718{
719	int i, s, start;
720	vm_offset_t addr, phys, tmp_addr;
721	vm_page_t pga = vm_page_array;
722
723	if ((alignment & (alignment - 1)) != 0)
724		panic("vm_page_alloc_contig: alignment must be a power of 2");
725
726	start = 0;
727	s = splhigh();
728again:
729	/*
730	 * Find first page in array that is free, within range, and aligned.
731	 */
732	for (i = start; i < cnt.v_page_count; i++) {
733		phys = VM_PAGE_TO_PHYS(&pga[i]);
734		if (((pga[i].flags & PG_FREE) == PG_FREE) &&
735		    (phys >= low) && (phys < high) &&
736		    ((phys & (alignment - 1)) == 0))
737			break;
738	}
739
740	/*
741	 * If the above failed or we will exceed the upper bound, fail.
742	 */
743	if ((i == cnt.v_page_count) || ((VM_PAGE_TO_PHYS(&pga[i]) + size) > high)) {
744		splx(s);
745		return (NULL);
746	}
747	start = i;
748
749	/*
750	 * Check successive pages for contiguous and free.
751	 */
752	for (i = start + 1; i < (start + size / PAGE_SIZE); i++) {
753		if ((VM_PAGE_TO_PHYS(&pga[i]) !=
754			(VM_PAGE_TO_PHYS(&pga[i - 1]) + PAGE_SIZE)) ||
755		    ((pga[i].flags & PG_FREE) != PG_FREE)) {
756			start++;
757			goto again;
758		}
759	}
760
761	/*
762	 * We've found a contiguous chunk that meets are requirements.
763	 * Allocate kernel VM, unfree and assign the physical pages to it and
764	 * return kernel VM pointer.
765	 */
766	tmp_addr = addr = kmem_alloc_pageable(kernel_map, size);
767
768	for (i = start; i < (start + size / PAGE_SIZE); i++) {
769		vm_page_t m = &pga[i];
770
771		TAILQ_REMOVE(&vm_page_queue_free, m, pageq);
772		cnt.v_free_count--;
773		m->valid = VM_PAGE_BITS_ALL;
774		m->flags = 0;
775		m->dirty = 0;
776		m->wire_count = 0;
777		m->act_count = 0;
778		m->bmapped = 0;
779		m->busy = 0;
780		vm_page_insert(m, kernel_object,
781			OFF_TO_IDX(tmp_addr - VM_MIN_KERNEL_ADDRESS));
782		vm_page_wire(m);
783		pmap_kenter(tmp_addr, VM_PAGE_TO_PHYS(m));
784		tmp_addr += PAGE_SIZE;
785	}
786
787	splx(s);
788	return (addr);
789}
790
791/*
792 *	vm_page_free:
793 *
794 *	Returns the given page to the free list,
795 *	disassociating it with any VM object.
796 *
797 *	Object and page must be locked prior to entry.
798 */
799void
800vm_page_free(mem)
801	register vm_page_t mem;
802{
803	int s;
804	int flags;
805
806	s = splhigh();
807	vm_page_remove(mem);
808	vm_page_unqueue(mem);
809
810	flags = mem->flags;
811	if (mem->bmapped || mem->busy || flags & (PG_BUSY|PG_FREE)) {
812		if (flags & PG_FREE)
813			panic("vm_page_free: freeing free page");
814		printf("vm_page_free: pindex(%ld), bmapped(%d), busy(%d), PG_BUSY(%d)\n",
815		    mem->pindex, mem->bmapped, mem->busy, (flags & PG_BUSY) ? 1 : 0);
816		panic("vm_page_free: freeing busy page");
817	}
818
819	if ((flags & PG_WANTED) != 0)
820		wakeup(mem);
821	if ((flags & PG_FICTITIOUS) == 0) {
822		if (mem->wire_count) {
823			if (mem->wire_count > 1) {
824				printf("vm_page_free: wire count > 1 (%d)", mem->wire_count);
825				panic("vm_page_free: invalid wire count");
826			}
827			cnt.v_wire_count--;
828			mem->wire_count = 0;
829		}
830		mem->flags |= PG_FREE;
831		TAILQ_INSERT_TAIL(&vm_page_queue_free, mem, pageq);
832		splx(s);
833		/*
834		 * if pageout daemon needs pages, then tell it that there are
835		 * some free.
836		 */
837		if (vm_pageout_pages_needed) {
838			wakeup(&vm_pageout_pages_needed);
839			vm_pageout_pages_needed = 0;
840		}
841
842		cnt.v_free_count++;
843		/*
844		 * wakeup processes that are waiting on memory if we hit a
845		 * high water mark. And wakeup scheduler process if we have
846		 * lots of memory. this process will swapin processes.
847		 */
848		if ((cnt.v_free_count + cnt.v_cache_count) == cnt.v_free_min) {
849			wakeup(&cnt.v_free_count);
850			wakeup(&proc0);
851		}
852	} else {
853		splx(s);
854	}
855	cnt.v_tfree++;
856}
857
858
859/*
860 *	vm_page_wire:
861 *
862 *	Mark this page as wired down by yet
863 *	another map, removing it from paging queues
864 *	as necessary.
865 *
866 *	The page queues must be locked.
867 */
868void
869vm_page_wire(mem)
870	register vm_page_t mem;
871{
872	int s;
873
874	if (mem->wire_count == 0) {
875		s = splhigh();
876		vm_page_unqueue(mem);
877		splx(s);
878		cnt.v_wire_count++;
879	}
880	mem->flags |= PG_WRITEABLE|PG_MAPPED;
881	mem->wire_count++;
882}
883
884/*
885 *	vm_page_unwire:
886 *
887 *	Release one wiring of this page, potentially
888 *	enabling it to be paged again.
889 *
890 *	The page queues must be locked.
891 */
892void
893vm_page_unwire(mem)
894	register vm_page_t mem;
895{
896	int s;
897
898	s = splhigh();
899
900	if (mem->wire_count)
901		mem->wire_count--;
902	if (mem->wire_count == 0) {
903		TAILQ_INSERT_TAIL(&vm_page_queue_active, mem, pageq);
904		cnt.v_active_count++;
905		mem->flags |= PG_ACTIVE;
906		cnt.v_wire_count--;
907	}
908	splx(s);
909}
910
911/*
912 *	vm_page_activate:
913 *
914 *	Put the specified page on the active list (if appropriate).
915 *
916 *	The page queues must be locked.
917 */
918void
919vm_page_activate(m)
920	register vm_page_t m;
921{
922	int s;
923
924	s = splhigh();
925	if (m->flags & PG_ACTIVE)
926		panic("vm_page_activate: already active");
927
928	if (m->flags & PG_CACHE)
929		cnt.v_reactivated++;
930
931	vm_page_unqueue(m);
932
933	if (m->wire_count == 0) {
934		TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq);
935		m->flags |= PG_ACTIVE;
936		if (m->act_count < 5)
937			m->act_count = 5;
938		else if( m->act_count < ACT_MAX)
939			m->act_count += 1;
940		cnt.v_active_count++;
941	}
942	splx(s);
943}
944
945/*
946 *	vm_page_deactivate:
947 *
948 *	Returns the given page to the inactive list,
949 *	indicating that no physical maps have access
950 *	to this page.  [Used by the physical mapping system.]
951 *
952 *	The page queues must be locked.
953 */
954void
955vm_page_deactivate(m)
956	register vm_page_t m;
957{
958	int spl;
959
960	/*
961	 * Only move active pages -- ignore locked or already inactive ones.
962	 *
963	 * XXX: sometimes we get pages which aren't wired down or on any queue -
964	 * we need to put them on the inactive queue also, otherwise we lose
965	 * track of them. Paul Mackerras (paulus@cs.anu.edu.au) 9-Jan-93.
966	 */
967
968	spl = splhigh();
969	if (!(m->flags & PG_INACTIVE) && m->wire_count == 0 &&
970	    m->hold_count == 0) {
971		if (m->flags & PG_CACHE)
972			cnt.v_reactivated++;
973		vm_page_unqueue(m);
974		TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq);
975		m->flags |= PG_INACTIVE;
976		cnt.v_inactive_count++;
977		m->act_count = 0;
978	}
979	splx(spl);
980}
981
982/*
983 * vm_page_cache
984 *
985 * Put the specified page onto the page cache queue (if appropriate).
986 */
987void
988vm_page_cache(m)
989	register vm_page_t m;
990{
991	int s;
992
993	if ((m->flags & (PG_CACHE | PG_BUSY)) || m->busy || m->wire_count ||
994	    m->bmapped)
995		return;
996
997	s = splhigh();
998	vm_page_unqueue(m);
999	vm_page_protect(m, VM_PROT_NONE);
1000
1001	TAILQ_INSERT_TAIL(&vm_page_queue_cache, m, pageq);
1002	m->flags |= PG_CACHE;
1003	cnt.v_cache_count++;
1004	if ((cnt.v_free_count + cnt.v_cache_count) == cnt.v_free_min) {
1005		wakeup(&cnt.v_free_count);
1006		wakeup(&proc0);
1007	}
1008	if (vm_pageout_pages_needed) {
1009		wakeup(&vm_pageout_pages_needed);
1010		vm_pageout_pages_needed = 0;
1011	}
1012
1013	splx(s);
1014}
1015
1016/*
1017 *	vm_page_zero_fill:
1018 *
1019 *	Zero-fill the specified page.
1020 *	Written as a standard pagein routine, to
1021 *	be used by the zero-fill object.
1022 */
1023boolean_t
1024vm_page_zero_fill(m)
1025	vm_page_t m;
1026{
1027	pmap_zero_page(VM_PAGE_TO_PHYS(m));
1028	return (TRUE);
1029}
1030
1031/*
1032 *	vm_page_copy:
1033 *
1034 *	Copy one page to another
1035 */
1036void
1037vm_page_copy(src_m, dest_m)
1038	vm_page_t src_m;
1039	vm_page_t dest_m;
1040{
1041	pmap_copy_page(VM_PAGE_TO_PHYS(src_m), VM_PAGE_TO_PHYS(dest_m));
1042	dest_m->valid = VM_PAGE_BITS_ALL;
1043}
1044
1045
1046/*
1047 * mapping function for valid bits or for dirty bits in
1048 * a page
1049 */
1050inline int
1051vm_page_bits(int base, int size)
1052{
1053	u_short chunk;
1054
1055	if ((base == 0) && (size >= PAGE_SIZE))
1056		return VM_PAGE_BITS_ALL;
1057	size = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1);
1058	base = (base % PAGE_SIZE) / DEV_BSIZE;
1059	chunk = vm_page_dev_bsize_chunks[size / DEV_BSIZE];
1060	return (chunk << base) & VM_PAGE_BITS_ALL;
1061}
1062
1063/*
1064 * set a page valid and clean
1065 */
1066void
1067vm_page_set_validclean(m, base, size)
1068	vm_page_t m;
1069	int base;
1070	int size;
1071{
1072	int pagebits = vm_page_bits(base, size);
1073	m->valid |= pagebits;
1074	m->dirty &= ~pagebits;
1075	if( base == 0 && size == PAGE_SIZE)
1076		pmap_clear_modify(VM_PAGE_TO_PHYS(m));
1077}
1078
1079/*
1080 * set a page (partially) invalid
1081 */
1082void
1083vm_page_set_invalid(m, base, size)
1084	vm_page_t m;
1085	int base;
1086	int size;
1087{
1088	int bits;
1089
1090	m->valid &= ~(bits = vm_page_bits(base, size));
1091	if (m->valid == 0)
1092		m->dirty &= ~bits;
1093}
1094
1095/*
1096 * is (partial) page valid?
1097 */
1098int
1099vm_page_is_valid(m, base, size)
1100	vm_page_t m;
1101	int base;
1102	int size;
1103{
1104	int bits = vm_page_bits(base, size);
1105
1106	if (m->valid && ((m->valid & bits) == bits))
1107		return 1;
1108	else
1109		return 0;
1110}
1111
1112
1113
1114void
1115vm_page_test_dirty(m)
1116	vm_page_t m;
1117{
1118	if ((m->dirty != VM_PAGE_BITS_ALL) &&
1119	    pmap_is_modified(VM_PAGE_TO_PHYS(m))) {
1120		m->dirty = VM_PAGE_BITS_ALL;
1121	}
1122}
1123
1124#ifdef DDB
1125void
1126print_page_info(void)
1127{
1128	printf("cnt.v_free_count: %d\n", cnt.v_free_count);
1129	printf("cnt.v_cache_count: %d\n", cnt.v_cache_count);
1130	printf("cnt.v_inactive_count: %d\n", cnt.v_inactive_count);
1131	printf("cnt.v_active_count: %d\n", cnt.v_active_count);
1132	printf("cnt.v_wire_count: %d\n", cnt.v_wire_count);
1133	printf("cnt.v_free_reserved: %d\n", cnt.v_free_reserved);
1134	printf("cnt.v_free_min: %d\n", cnt.v_free_min);
1135	printf("cnt.v_free_target: %d\n", cnt.v_free_target);
1136	printf("cnt.v_cache_min: %d\n", cnt.v_cache_min);
1137	printf("cnt.v_inactive_target: %d\n", cnt.v_inactive_target);
1138}
1139#endif
1140