vm_page.c revision 5464
1/*
2 * Copyright (c) 1991 Regents of the University of California.
3 * All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * The Mach Operating System project at Carnegie-Mellon University.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 *    must display the following acknowledgement:
18 *	This product includes software developed by the University of
19 *	California, Berkeley and its contributors.
20 * 4. Neither the name of the University nor the names of its contributors
21 *    may be used to endorse or promote products derived from this software
22 *    without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 *	from: @(#)vm_page.c	7.4 (Berkeley) 5/7/91
37 *	$Id: vm_page.c,v 1.13 1995/01/09 16:05:51 davidg Exp $
38 */
39
40/*
41 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
42 * All rights reserved.
43 *
44 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
45 *
46 * Permission to use, copy, modify and distribute this software and
47 * its documentation is hereby granted, provided that both the copyright
48 * notice and this permission notice appear in all copies of the
49 * software, derivative works or modified versions, and any portions
50 * thereof, and that both notices appear in supporting documentation.
51 *
52 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
53 * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
54 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
55 *
56 * Carnegie Mellon requests users of this software to return to
57 *
58 *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
59 *  School of Computer Science
60 *  Carnegie Mellon University
61 *  Pittsburgh PA 15213-3890
62 *
63 * any improvements or extensions that they make and grant Carnegie the
64 * rights to redistribute these changes.
65 */
66
67/*
68 *	Resident memory management module.
69 */
70
71#include <sys/param.h>
72#include <sys/systm.h>
73#include <sys/proc.h>
74
75#include <vm/vm.h>
76#include <vm/vm_page.h>
77#include <vm/vm_map.h>
78#include <vm/vm_pageout.h>
79
80/*
81 *	Associated with page of user-allocatable memory is a
82 *	page structure.
83 */
84
85struct pglist *vm_page_buckets;	/* Array of buckets */
86int vm_page_bucket_count = 0;	/* How big is array? */
87int vm_page_hash_mask;		/* Mask for hash function */
88simple_lock_data_t bucket_lock;	/* lock for all buckets XXX */
89
90struct pglist vm_page_queue_free;
91struct pglist vm_page_queue_active;
92struct pglist vm_page_queue_inactive;
93struct pglist vm_page_queue_cache;
94simple_lock_data_t vm_page_queue_lock;
95simple_lock_data_t vm_page_queue_free_lock;
96
97/* has physical page allocation been initialized? */
98boolean_t vm_page_startup_initialized;
99
100vm_page_t vm_page_array;
101int vm_page_array_size;
102long first_page;
103long last_page;
104vm_offset_t first_phys_addr;
105vm_offset_t last_phys_addr;
106vm_size_t page_mask;
107int page_shift;
108
109/*
110 * map of contiguous valid DEV_BSIZE chunks in a page
111 * (this list is valid for page sizes upto 16*DEV_BSIZE)
112 */
113static u_short vm_page_dev_bsize_chunks[] = {
114	0x0, 0x1, 0x3, 0x7, 0xf, 0x1f, 0x3f, 0x7f, 0xff,
115	0x1ff, 0x3ff, 0x7ff, 0xfff, 0x1fff, 0x3fff, 0x7fff, 0xffff
116};
117
118
119/*
120 *	vm_set_page_size:
121 *
122 *	Sets the page size, perhaps based upon the memory
123 *	size.  Must be called before any use of page-size
124 *	dependent functions.
125 *
126 *	Sets page_shift and page_mask from cnt.v_page_size.
127 */
128void
129vm_set_page_size()
130{
131
132	if (cnt.v_page_size == 0)
133		cnt.v_page_size = DEFAULT_PAGE_SIZE;
134	page_mask = cnt.v_page_size - 1;
135	if ((page_mask & cnt.v_page_size) != 0)
136		panic("vm_set_page_size: page size not a power of two");
137	for (page_shift = 0;; page_shift++)
138		if ((1 << page_shift) == cnt.v_page_size)
139			break;
140}
141
142/*
143 *	vm_page_startup:
144 *
145 *	Initializes the resident memory module.
146 *
147 *	Allocates memory for the page cells, and
148 *	for the object/offset-to-page hash table headers.
149 *	Each page cell is initialized and placed on the free list.
150 */
151
152vm_offset_t
153vm_page_startup(starta, enda, vaddr)
154	register vm_offset_t starta;
155	vm_offset_t enda;
156	register vm_offset_t vaddr;
157{
158	register vm_offset_t mapped;
159	register vm_page_t m;
160	register struct pglist *bucket;
161	vm_size_t npages, page_range;
162	register vm_offset_t new_start;
163	int i;
164	vm_offset_t pa;
165	int nblocks;
166	vm_offset_t first_managed_page;
167
168	extern vm_offset_t kentry_data;
169	extern vm_size_t kentry_data_size;
170	extern vm_offset_t phys_avail[];
171
172	/* the biggest memory array is the second group of pages */
173	vm_offset_t start;
174	vm_offset_t biggestone, biggestsize;
175
176	vm_offset_t total;
177
178	total = 0;
179	biggestsize = 0;
180	biggestone = 0;
181	nblocks = 0;
182	vaddr = round_page(vaddr);
183
184	for (i = 0; phys_avail[i + 1]; i += 2) {
185		phys_avail[i] = round_page(phys_avail[i]);
186		phys_avail[i + 1] = trunc_page(phys_avail[i + 1]);
187	}
188
189	for (i = 0; phys_avail[i + 1]; i += 2) {
190		int size = phys_avail[i + 1] - phys_avail[i];
191
192		if (size > biggestsize) {
193			biggestone = i;
194			biggestsize = size;
195		}
196		++nblocks;
197		total += size;
198	}
199
200	start = phys_avail[biggestone];
201
202
203	/*
204	 * Initialize the locks
205	 */
206
207	simple_lock_init(&vm_page_queue_free_lock);
208	simple_lock_init(&vm_page_queue_lock);
209
210	/*
211	 * Initialize the queue headers for the free queue, the active queue
212	 * and the inactive queue.
213	 */
214
215	TAILQ_INIT(&vm_page_queue_free);
216	TAILQ_INIT(&vm_page_queue_active);
217	TAILQ_INIT(&vm_page_queue_inactive);
218	TAILQ_INIT(&vm_page_queue_cache);
219
220	/*
221	 * Allocate (and initialize) the hash table buckets.
222	 *
223	 * The number of buckets MUST BE a power of 2, and the actual value is
224	 * the next power of 2 greater than the number of physical pages in
225	 * the system.
226	 *
227	 * Note: This computation can be tweaked if desired.
228	 */
229	vm_page_buckets = (struct pglist *) vaddr;
230	bucket = vm_page_buckets;
231	if (vm_page_bucket_count == 0) {
232		vm_page_bucket_count = 1;
233		while (vm_page_bucket_count < atop(total))
234			vm_page_bucket_count <<= 1;
235	}
236	vm_page_hash_mask = vm_page_bucket_count - 1;
237
238	/*
239	 * Validate these addresses.
240	 */
241
242	new_start = start + vm_page_bucket_count * sizeof(struct pglist);
243	new_start = round_page(new_start);
244	mapped = vaddr;
245	vaddr = pmap_map(mapped, start, new_start,
246	    VM_PROT_READ | VM_PROT_WRITE);
247	start = new_start;
248	bzero((caddr_t) mapped, vaddr - mapped);
249	mapped = vaddr;
250
251	for (i = 0; i < vm_page_bucket_count; i++) {
252		TAILQ_INIT(bucket);
253		bucket++;
254	}
255
256	simple_lock_init(&bucket_lock);
257
258	/*
259	 * round (or truncate) the addresses to our page size.
260	 */
261
262	/*
263	 * Pre-allocate maps and map entries that cannot be dynamically
264	 * allocated via malloc().  The maps include the kernel_map and
265	 * kmem_map which must be initialized before malloc() will work
266	 * (obviously).  Also could include pager maps which would be
267	 * allocated before kmeminit.
268	 *
269	 * Allow some kernel map entries... this should be plenty since people
270	 * shouldn't be cluttering up the kernel map (they should use their
271	 * own maps).
272	 */
273
274	kentry_data_size = MAX_KMAP * sizeof(struct vm_map) +
275	    MAX_KMAPENT * sizeof(struct vm_map_entry);
276	kentry_data_size = round_page(kentry_data_size);
277	kentry_data = (vm_offset_t) vaddr;
278	vaddr += kentry_data_size;
279
280	/*
281	 * Validate these zone addresses.
282	 */
283
284	new_start = start + (vaddr - mapped);
285	pmap_map(mapped, start, new_start, VM_PROT_READ | VM_PROT_WRITE);
286	bzero((caddr_t) mapped, (vaddr - mapped));
287	start = round_page(new_start);
288
289	/*
290	 * Compute the number of pages of memory that will be available for
291	 * use (taking into account the overhead of a page structure per
292	 * page).
293	 */
294
295	first_page = phys_avail[0] / PAGE_SIZE;
296
297	/* for VM_PAGE_CHECK() */
298	last_page = phys_avail[(nblocks - 1) * 2 + 1] / PAGE_SIZE;
299
300	page_range = last_page - (phys_avail[0] / PAGE_SIZE);
301	npages = (total - (page_range * sizeof(struct vm_page)) -
302	    (start - phys_avail[biggestone])) / PAGE_SIZE;
303
304	/*
305	 * Initialize the mem entry structures now, and put them in the free
306	 * queue.
307	 */
308
309	vm_page_array = (vm_page_t) vaddr;
310	mapped = vaddr;
311
312
313	/*
314	 * Validate these addresses.
315	 */
316
317	new_start = round_page(start + page_range * sizeof(struct vm_page));
318	mapped = pmap_map(mapped, start, new_start,
319	    VM_PROT_READ | VM_PROT_WRITE);
320	start = new_start;
321
322	first_managed_page = start / PAGE_SIZE;
323
324	/*
325	 * Clear all of the page structures
326	 */
327	bzero((caddr_t) vm_page_array, page_range * sizeof(struct vm_page));
328	vm_page_array_size = page_range;
329
330	cnt.v_page_count = 0;
331	cnt.v_free_count = 0;
332	for (i = 0; phys_avail[i + 1] && npages > 0; i += 2) {
333		if (i == biggestone)
334			pa = ptoa(first_managed_page);
335		else
336			pa = phys_avail[i];
337		while (pa < phys_avail[i + 1] && npages-- > 0) {
338			++cnt.v_page_count;
339			++cnt.v_free_count;
340			m = PHYS_TO_VM_PAGE(pa);
341			m->flags = PG_FREE;
342			vm_page_set_clean(m, 0, PAGE_SIZE);
343			m->object = 0;
344			m->phys_addr = pa;
345			m->hold_count = 0;
346			TAILQ_INSERT_TAIL(&vm_page_queue_free, m, pageq);
347			pa += PAGE_SIZE;
348		}
349	}
350
351	/*
352	 * Initialize vm_pages_needed lock here - don't wait for pageout
353	 * daemon	XXX
354	 */
355	simple_lock_init(&vm_pages_needed_lock);
356
357	return (mapped);
358}
359
360/*
361 *	vm_page_hash:
362 *
363 *	Distributes the object/offset key pair among hash buckets.
364 *
365 *	NOTE:  This macro depends on vm_page_bucket_count being a power of 2.
366 */
367inline const int
368vm_page_hash(object, offset)
369	vm_object_t object;
370	vm_offset_t offset;
371{
372	return ((unsigned) object + offset / NBPG) & vm_page_hash_mask;
373}
374
375/*
376 *	vm_page_insert:		[ internal use only ]
377 *
378 *	Inserts the given mem entry into the object/object-page
379 *	table and object list.
380 *
381 *	The object and page must be locked.
382 */
383
384void
385vm_page_insert(mem, object, offset)
386	register vm_page_t mem;
387	register vm_object_t object;
388	register vm_offset_t offset;
389{
390	register struct pglist *bucket;
391	int s;
392
393	VM_PAGE_CHECK(mem);
394
395	if (mem->flags & PG_TABLED)
396		panic("vm_page_insert: already inserted");
397
398	/*
399	 * Record the object/offset pair in this page
400	 */
401
402	mem->object = object;
403	mem->offset = offset;
404
405	/*
406	 * Insert it into the object_object/offset hash table
407	 */
408
409	bucket = &vm_page_buckets[vm_page_hash(object, offset)];
410	s = splhigh();
411	simple_lock(&bucket_lock);
412	TAILQ_INSERT_TAIL(bucket, mem, hashq);
413	simple_unlock(&bucket_lock);
414	(void) splx(s);
415
416	/*
417	 * Now link into the object's list of backed pages.
418	 */
419
420	TAILQ_INSERT_TAIL(&object->memq, mem, listq);
421	mem->flags |= PG_TABLED;
422
423	/*
424	 * And show that the object has one more resident page.
425	 */
426
427	object->resident_page_count++;
428}
429
430/*
431 *	vm_page_remove:		[ internal use only ]
432 *				NOTE: used by device pager as well -wfj
433 *
434 *	Removes the given mem entry from the object/offset-page
435 *	table and the object page list.
436 *
437 *	The object and page must be locked.
438 */
439
440void
441vm_page_remove(mem)
442	register vm_page_t mem;
443{
444	register struct pglist *bucket;
445	int s;
446
447	VM_PAGE_CHECK(mem);
448
449
450	if (!(mem->flags & PG_TABLED))
451		return;
452
453	/*
454	 * Remove from the object_object/offset hash table
455	 */
456
457	bucket = &vm_page_buckets[vm_page_hash(mem->object, mem->offset)];
458	s = splhigh();
459	simple_lock(&bucket_lock);
460	TAILQ_REMOVE(bucket, mem, hashq);
461	simple_unlock(&bucket_lock);
462	(void) splx(s);
463
464	/*
465	 * Now remove from the object's list of backed pages.
466	 */
467
468	TAILQ_REMOVE(&mem->object->memq, mem, listq);
469
470	/*
471	 * And show that the object has one fewer resident page.
472	 */
473
474	mem->object->resident_page_count--;
475
476	mem->flags &= ~PG_TABLED;
477}
478
479/*
480 *	vm_page_lookup:
481 *
482 *	Returns the page associated with the object/offset
483 *	pair specified; if none is found, NULL is returned.
484 *
485 *	The object must be locked.  No side effects.
486 */
487
488vm_page_t
489vm_page_lookup(object, offset)
490	register vm_object_t object;
491	register vm_offset_t offset;
492{
493	register vm_page_t mem;
494	register struct pglist *bucket;
495	int s;
496
497	/*
498	 * Search the hash table for this object/offset pair
499	 */
500
501	bucket = &vm_page_buckets[vm_page_hash(object, offset)];
502
503	s = splhigh();
504	simple_lock(&bucket_lock);
505	for (mem = bucket->tqh_first; mem != NULL; mem = mem->hashq.tqe_next) {
506		VM_PAGE_CHECK(mem);
507		if ((mem->object == object) && (mem->offset == offset)) {
508			simple_unlock(&bucket_lock);
509			splx(s);
510			return (mem);
511		}
512	}
513
514	simple_unlock(&bucket_lock);
515	splx(s);
516	return (NULL);
517}
518
519/*
520 *	vm_page_rename:
521 *
522 *	Move the given memory entry from its
523 *	current object to the specified target object/offset.
524 *
525 *	The object must be locked.
526 */
527void
528vm_page_rename(mem, new_object, new_offset)
529	register vm_page_t mem;
530	register vm_object_t new_object;
531	vm_offset_t new_offset;
532{
533	int s;
534
535	if (mem->object == new_object)
536		return;
537
538	vm_page_lock_queues(); /* keep page from moving out from under pageout daemon */
539	s = splhigh();
540	vm_page_remove(mem);
541	vm_page_insert(mem, new_object, new_offset);
542	splx(s);
543	vm_page_unlock_queues();
544}
545
546int
547vm_page_unqueue(vm_page_t mem)
548{
549	int s, origflags;
550
551	s = splhigh();
552	origflags = mem->flags;
553	if (mem->flags & PG_ACTIVE) {
554		TAILQ_REMOVE(&vm_page_queue_active, mem, pageq);
555		cnt.v_active_count--;
556		mem->flags &= ~PG_ACTIVE;
557	} else if (mem->flags & PG_INACTIVE) {
558		TAILQ_REMOVE(&vm_page_queue_inactive, mem, pageq);
559		cnt.v_inactive_count--;
560		mem->flags &= ~PG_INACTIVE;
561	} else if (mem->flags & PG_CACHE) {
562		TAILQ_REMOVE(&vm_page_queue_cache, mem, pageq);
563		cnt.v_cache_count--;
564		mem->flags &= ~PG_CACHE;
565		if (cnt.v_cache_count + cnt.v_free_count < cnt.v_free_reserved)
566			wakeup((caddr_t) &vm_pages_needed);
567	}
568	splx(s);
569	return origflags;
570}
571
572void
573vm_page_requeue(vm_page_t mem, int flags)
574{
575	int s;
576
577	if (mem->wire_count)
578		return;
579	s = splhigh();
580	if (flags & PG_CACHE) {
581		TAILQ_INSERT_TAIL(&vm_page_queue_cache, mem, pageq);
582		mem->flags |= PG_CACHE;
583		cnt.v_cache_count++;
584	} else if (flags & PG_ACTIVE) {
585		TAILQ_INSERT_TAIL(&vm_page_queue_active, mem, pageq);
586		mem->flags |= PG_ACTIVE;
587		cnt.v_active_count++;
588	} else if (flags & PG_INACTIVE) {
589		TAILQ_INSERT_TAIL(&vm_page_queue_inactive, mem, pageq);
590		mem->flags |= PG_INACTIVE;
591		cnt.v_inactive_count++;
592	}
593	TAILQ_REMOVE(&mem->object->memq, mem, listq);
594	TAILQ_INSERT_TAIL(&mem->object->memq, mem, listq);
595	splx(s);
596}
597
598/*
599 *	vm_page_alloc:
600 *
601 *	Allocate and return a memory cell associated
602 *	with this VM object/offset pair.
603 *
604 *	Object must be locked.
605 */
606vm_page_t
607vm_page_alloc(object, offset, inttime)
608	vm_object_t object;
609	vm_offset_t offset;
610	int inttime;
611{
612	register vm_page_t mem;
613	int s;
614
615	simple_lock(&vm_page_queue_free_lock);
616
617	s = splhigh();
618
619	if (object != kernel_object &&
620	    object != kmem_object &&
621	    curproc != pageproc &&
622	    curproc != &proc0 &&
623	    (cnt.v_free_count + cnt.v_cache_count) < cnt.v_free_reserved) {
624		simple_unlock(&vm_page_queue_free_lock);
625		splx(s);
626		return (NULL);
627	}
628	if (inttime) {
629		if ((mem = vm_page_queue_free.tqh_first) == 0) {
630			for (mem = vm_page_queue_cache.tqh_first; mem; mem = mem->pageq.tqe_next) {
631				if ((mem->object->flags & OBJ_ILOCKED) == 0) {
632					TAILQ_REMOVE(&vm_page_queue_cache, mem, pageq);
633					vm_page_remove(mem);
634					cnt.v_cache_count--;
635					goto gotpage;
636				}
637			}
638			splx(s);
639			return NULL;
640		}
641	} else {
642		if ((cnt.v_free_count < 3) ||
643		    (mem = vm_page_queue_free.tqh_first) == 0) {
644			mem = vm_page_queue_cache.tqh_first;
645			if (mem) {
646				TAILQ_REMOVE(&vm_page_queue_cache, mem, pageq);
647				vm_page_remove(mem);
648				cnt.v_cache_count--;
649				goto gotpage;
650			}
651			simple_unlock(&vm_page_queue_free_lock);
652			splx(s);
653			/* wakeup((caddr_t) &vm_pages_needed); */
654			return (NULL);
655		}
656	}
657
658	TAILQ_REMOVE(&vm_page_queue_free, mem, pageq);
659	cnt.v_free_count--;
660
661gotpage:
662	simple_unlock(&vm_page_queue_free_lock);
663
664	VM_PAGE_INIT(mem, object, offset);
665	splx(s);
666
667/*
668 * don't wakeup too often, so we wakeup the pageout daemon when
669 * we would be nearly out of memory.
670 */
671	if (curproc != pageproc &&
672	    ((cnt.v_free_count + cnt.v_cache_count) < cnt.v_free_min))
673		wakeup((caddr_t) &vm_pages_needed);
674
675	return (mem);
676}
677
678vm_offset_t
679vm_page_alloc_contig(size, low, high, alignment)
680	vm_offset_t size;
681	vm_offset_t low;
682	vm_offset_t high;
683	vm_offset_t alignment;
684{
685	int i, s, start;
686	vm_offset_t addr, phys, tmp_addr;
687	vm_page_t pga = vm_page_array;
688	extern vm_map_t kernel_map;
689
690	if ((alignment & (alignment - 1)) != 0)
691		panic("vm_page_alloc_contig: alignment must be a power of 2");
692
693	start = 0;
694	s = splhigh();
695again:
696	/*
697	 * Find first page in array that is free, within range, and aligned.
698	 */
699	for (i = start; i < cnt.v_page_count; i++) {
700		phys = VM_PAGE_TO_PHYS(&pga[i]);
701		if (((pga[i].flags & PG_FREE) == PG_FREE) &&
702		    (phys >= low) && (phys < high) &&
703		    ((phys & (alignment - 1)) == 0))
704			break;
705	}
706
707	/*
708	 * If the above failed or we will exceed the upper bound, fail.
709	 */
710	if ((i == cnt.v_page_count) || ((VM_PAGE_TO_PHYS(&pga[i]) + size) > high)) {
711		splx(s);
712		return (NULL);
713	}
714	start = i;
715
716	/*
717	 * Check successive pages for contiguous and free.
718	 */
719	for (i = start + 1; i < (start + size / PAGE_SIZE); i++) {
720		if ((VM_PAGE_TO_PHYS(&pga[i]) !=
721			(VM_PAGE_TO_PHYS(&pga[i - 1]) + PAGE_SIZE)) ||
722		    ((pga[i].flags & PG_FREE) != PG_FREE)) {
723			start++;
724			goto again;
725		}
726	}
727
728	/*
729	 * We've found a contiguous chunk that meets are requirements.
730	 * Allocate kernel VM, unfree and assign the physical pages to it and
731	 * return kernel VM pointer.
732	 */
733	tmp_addr = addr = kmem_alloc_pageable(kernel_map, size);
734
735	for (i = start; i < (start + size / PAGE_SIZE); i++) {
736		TAILQ_REMOVE(&vm_page_queue_free, &pga[i], pageq);
737		cnt.v_free_count--;
738		vm_page_wire(&pga[i]);
739		vm_page_set_clean(&pga[i], 0, PAGE_SIZE);
740		pmap_kenter(tmp_addr, VM_PAGE_TO_PHYS(&pga[i]));
741		tmp_addr += PAGE_SIZE;
742	}
743
744	splx(s);
745	return (addr);
746}
747
748/*
749 *	vm_page_free:
750 *
751 *	Returns the given page to the free list,
752 *	disassociating it with any VM object.
753 *
754 *	Object and page must be locked prior to entry.
755 */
756void
757vm_page_free(mem)
758	register vm_page_t mem;
759{
760	int s;
761
762	s = splhigh();
763	vm_page_remove(mem);
764	vm_page_unqueue(mem);
765
766	if (mem->bmapped || mem->busy || mem->flags & PG_BUSY) {
767		printf("vm_page_free: offset(%d), bmapped(%d), busy(%d), PG_BUSY(%d)\n",
768		    mem->offset, mem->bmapped, mem->busy, (mem->flags & PG_BUSY) ? 1 : 0);
769		panic("vm_page_free: freeing busy page\n");
770	}
771	if (mem->flags & PG_FREE)
772		panic("vm_page_free: freeing free page");
773
774	if (!(mem->flags & PG_FICTITIOUS)) {
775
776		simple_lock(&vm_page_queue_free_lock);
777		if (mem->wire_count) {
778			if (mem->wire_count > 1) {
779				printf("vm_page_free: wire count > 1 (%d)", mem->wire_count);
780				panic("vm_page_free: invalid wire count");
781			}
782			cnt.v_wire_count--;
783			mem->wire_count = 0;
784		}
785		mem->flags |= PG_FREE;
786		TAILQ_INSERT_TAIL(&vm_page_queue_free, mem, pageq);
787
788		cnt.v_free_count++;
789		simple_unlock(&vm_page_queue_free_lock);
790		splx(s);
791		/*
792		 * if pageout daemon needs pages, then tell it that there are
793		 * some free.
794		 */
795		if (vm_pageout_pages_needed)
796			wakeup((caddr_t) &vm_pageout_pages_needed);
797
798		/*
799		 * wakeup processes that are waiting on memory if we hit a
800		 * high water mark. And wakeup scheduler process if we have
801		 * lots of memory. this process will swapin processes.
802		 */
803		if ((cnt.v_free_count + cnt.v_cache_count) == cnt.v_free_min) {
804			wakeup((caddr_t) &cnt.v_free_count);
805			wakeup((caddr_t) &proc0);
806		}
807	} else {
808		splx(s);
809	}
810	if (mem->flags & PG_WANTED)
811		wakeup((caddr_t) mem);
812	cnt.v_tfree++;
813}
814
815
816/*
817 *	vm_page_wire:
818 *
819 *	Mark this page as wired down by yet
820 *	another map, removing it from paging queues
821 *	as necessary.
822 *
823 *	The page queues must be locked.
824 */
825void
826vm_page_wire(mem)
827	register vm_page_t mem;
828{
829	int s;
830
831	VM_PAGE_CHECK(mem);
832
833	if (mem->wire_count == 0) {
834		vm_page_unqueue(mem);
835		cnt.v_wire_count++;
836	}
837	mem->wire_count++;
838}
839
840/*
841 *	vm_page_unwire:
842 *
843 *	Release one wiring of this page, potentially
844 *	enabling it to be paged again.
845 *
846 *	The page queues must be locked.
847 */
848void
849vm_page_unwire(mem)
850	register vm_page_t mem;
851{
852	int s;
853
854	VM_PAGE_CHECK(mem);
855
856	s = splhigh();
857
858	if (mem->wire_count)
859		mem->wire_count--;
860	if (mem->wire_count == 0) {
861		TAILQ_INSERT_TAIL(&vm_page_queue_active, mem, pageq);
862		cnt.v_active_count++;
863		mem->flags |= PG_ACTIVE;
864		cnt.v_wire_count--;
865	}
866	splx(s);
867}
868
869/*
870 *	vm_page_deactivate:
871 *
872 *	Returns the given page to the inactive list,
873 *	indicating that no physical maps have access
874 *	to this page.  [Used by the physical mapping system.]
875 *
876 *	The page queues must be locked.
877 */
878void
879vm_page_deactivate(m)
880	register vm_page_t m;
881{
882	int spl;
883
884	VM_PAGE_CHECK(m);
885
886	/*
887	 * Only move active pages -- ignore locked or already inactive ones.
888	 *
889	 * XXX: sometimes we get pages which aren't wired down or on any queue -
890	 * we need to put them on the inactive queue also, otherwise we lose
891	 * track of them. Paul Mackerras (paulus@cs.anu.edu.au) 9-Jan-93.
892	 */
893
894	spl = splhigh();
895	if (!(m->flags & PG_INACTIVE) && m->wire_count == 0 &&
896	    m->hold_count == 0) {
897		pmap_clear_reference(VM_PAGE_TO_PHYS(m));
898		vm_page_unqueue(m);
899		TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq);
900		m->flags |= PG_INACTIVE;
901		cnt.v_inactive_count++;
902		m->act_count = 0;
903	}
904	splx(spl);
905}
906
907/*
908 * vm_page_cache
909 *
910 * Put the specified page onto the page cache queue (if appropriate).
911 */
912
913void
914vm_page_cache(m)
915	register vm_page_t m;
916{
917	int s;
918
919	VM_PAGE_CHECK(m);
920	if ((m->flags & (PG_CACHE | PG_BUSY)) || m->busy || m->wire_count ||
921	    m->bmapped)
922		return;
923
924	s = splhigh();
925	vm_page_unqueue(m);
926	pmap_page_protect(VM_PAGE_TO_PHYS(m), VM_PROT_NONE);
927
928	TAILQ_INSERT_TAIL(&vm_page_queue_cache, m, pageq);
929	m->flags |= PG_CACHE;
930	cnt.v_cache_count++;
931	if ((cnt.v_free_count + cnt.v_cache_count) == cnt.v_free_min) {
932		wakeup((caddr_t) &cnt.v_free_count);
933		wakeup((caddr_t) &proc0);
934	}
935	if (vm_pageout_pages_needed)
936		wakeup((caddr_t) &vm_pageout_pages_needed);
937
938	splx(s);
939}
940
941/*
942 *	vm_page_activate:
943 *
944 *	Put the specified page on the active list (if appropriate).
945 *
946 *	The page queues must be locked.
947 */
948
949void
950vm_page_activate(m)
951	register vm_page_t m;
952{
953	int s;
954
955	VM_PAGE_CHECK(m);
956
957	s = splhigh();
958	if (m->flags & PG_ACTIVE)
959		panic("vm_page_activate: already active");
960
961	vm_page_unqueue(m);
962
963	if (m->wire_count == 0) {
964		TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq);
965		m->flags |= PG_ACTIVE;
966		TAILQ_REMOVE(&m->object->memq, m, listq);
967		TAILQ_INSERT_TAIL(&m->object->memq, m, listq);
968		if (m->act_count < 5)
969			m->act_count = 5;
970		else
971			m->act_count += 1;
972		cnt.v_active_count++;
973	}
974	splx(s);
975}
976
977/*
978 *	vm_page_zero_fill:
979 *
980 *	Zero-fill the specified page.
981 *	Written as a standard pagein routine, to
982 *	be used by the zero-fill object.
983 */
984
985boolean_t
986vm_page_zero_fill(m)
987	vm_page_t m;
988{
989	VM_PAGE_CHECK(m);
990
991	pmap_zero_page(VM_PAGE_TO_PHYS(m));
992	m->valid = VM_PAGE_BITS_ALL;
993	return (TRUE);
994}
995
996/*
997 *	vm_page_copy:
998 *
999 *	Copy one page to another
1000 */
1001void
1002vm_page_copy(src_m, dest_m)
1003	vm_page_t src_m;
1004	vm_page_t dest_m;
1005{
1006	VM_PAGE_CHECK(src_m);
1007	VM_PAGE_CHECK(dest_m);
1008
1009	pmap_copy_page(VM_PAGE_TO_PHYS(src_m), VM_PAGE_TO_PHYS(dest_m));
1010	dest_m->valid = VM_PAGE_BITS_ALL;
1011}
1012
1013
1014/*
1015 * mapping function for valid bits or for dirty bits in
1016 * a page
1017 */
1018inline int
1019vm_page_bits(int base, int size)
1020{
1021	u_short chunk;
1022
1023	size = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1);
1024	base = (base % PAGE_SIZE) / DEV_BSIZE;
1025	chunk = vm_page_dev_bsize_chunks[size / DEV_BSIZE];
1026	return (chunk << base) & VM_PAGE_BITS_ALL;
1027}
1028
1029/*
1030 * set a page (partially) valid
1031 */
1032void
1033vm_page_set_valid(m, base, size)
1034	vm_page_t m;
1035	int base;
1036	int size;
1037{
1038	m->valid |= vm_page_bits(base, size);
1039}
1040
1041/*
1042 * set a page (partially) invalid
1043 */
1044void
1045vm_page_set_invalid(m, base, size)
1046	vm_page_t m;
1047	int base;
1048	int size;
1049{
1050	int bits;
1051
1052	m->valid &= ~(bits = vm_page_bits(base, size));
1053	if (m->valid == 0)
1054		m->dirty &= ~bits;
1055}
1056
1057/*
1058 * is (partial) page valid?
1059 */
1060int
1061vm_page_is_valid(m, base, size)
1062	vm_page_t m;
1063	int base;
1064	int size;
1065{
1066	int bits;
1067
1068	if (m->valid && ((m->valid & (bits = vm_page_bits(base, size))) == bits))
1069		return 1;
1070	else
1071		return 0;
1072}
1073
1074
1075/*
1076 * set a page (partially) dirty
1077 */
1078void
1079vm_page_set_dirty(m, base, size)
1080	vm_page_t m;
1081	int base;
1082	int size;
1083{
1084	if ((base != 0) || (size != PAGE_SIZE)) {
1085		if (pmap_is_modified(VM_PAGE_TO_PHYS(m))) {
1086			m->dirty = VM_PAGE_BITS_ALL;
1087			pmap_clear_modify(VM_PAGE_TO_PHYS(m));
1088			return;
1089		}
1090		m->dirty |= vm_page_bits(base, size);
1091	} else {
1092		m->dirty = VM_PAGE_BITS_ALL;
1093		pmap_clear_modify(VM_PAGE_TO_PHYS(m));
1094	}
1095}
1096
1097void
1098vm_page_test_dirty(m)
1099	vm_page_t m;
1100{
1101	if ((!m->dirty || (m->dirty != vm_page_bits(0, PAGE_SIZE))) &&
1102	    pmap_is_modified(VM_PAGE_TO_PHYS(m))) {
1103		pmap_clear_modify(VM_PAGE_TO_PHYS(m));
1104		m->dirty = VM_PAGE_BITS_ALL;
1105	}
1106}
1107
1108/*
1109 * set a page (partially) clean
1110 */
1111void
1112vm_page_set_clean(m, base, size)
1113	vm_page_t m;
1114	int base;
1115	int size;
1116{
1117	m->dirty &= ~vm_page_bits(base, size);
1118}
1119
1120/*
1121 * is (partial) page clean
1122 */
1123int
1124vm_page_is_clean(m, base, size)
1125	vm_page_t m;
1126	int base;
1127	int size;
1128{
1129	if (pmap_is_modified(VM_PAGE_TO_PHYS(m))) {
1130		m->dirty = VM_PAGE_BITS_ALL;
1131		pmap_clear_modify(VM_PAGE_TO_PHYS(m));
1132	}
1133	if ((m->dirty & m->valid & vm_page_bits(base, size)) == 0)
1134		return 1;
1135	else
1136		return 0;
1137}
1138
1139void
1140print_page_info()
1141{
1142	printf("cnt.v_free_count: %d\n", cnt.v_free_count);
1143	printf("cnt.v_cache_count: %d\n", cnt.v_cache_count);
1144	printf("cnt.v_inactive_count: %d\n", cnt.v_inactive_count);
1145	printf("cnt.v_active_count: %d\n", cnt.v_active_count);
1146	printf("cnt.v_wire_count: %d\n", cnt.v_wire_count);
1147	printf("cnt.v_free_reserved: %d\n", cnt.v_free_reserved);
1148	printf("cnt.v_free_min: %d\n", cnt.v_free_min);
1149	printf("cnt.v_free_target: %d\n", cnt.v_free_target);
1150	printf("cnt.v_cache_min: %d\n", cnt.v_cache_min);
1151	printf("cnt.v_inactive_target: %d\n", cnt.v_inactive_target);
1152}
1153