vm_page.c revision 10542
1/*
2 * Copyright (c) 1991 Regents of the University of California.
3 * All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * The Mach Operating System project at Carnegie-Mellon University.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 *    must display the following acknowledgement:
18 *	This product includes software developed by the University of
19 *	California, Berkeley and its contributors.
20 * 4. Neither the name of the University nor the names of its contributors
21 *    may be used to endorse or promote products derived from this software
22 *    without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 *	from: @(#)vm_page.c	7.4 (Berkeley) 5/7/91
37 *	$Id: vm_page.c,v 1.34 1995/07/20 05:28:07 davidg Exp $
38 */
39
40/*
41 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
42 * All rights reserved.
43 *
44 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
45 *
46 * Permission to use, copy, modify and distribute this software and
47 * its documentation is hereby granted, provided that both the copyright
48 * notice and this permission notice appear in all copies of the
49 * software, derivative works or modified versions, and any portions
50 * thereof, and that both notices appear in supporting documentation.
51 *
52 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
53 * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
54 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
55 *
56 * Carnegie Mellon requests users of this software to return to
57 *
58 *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
59 *  School of Computer Science
60 *  Carnegie Mellon University
61 *  Pittsburgh PA 15213-3890
62 *
63 * any improvements or extensions that they make and grant Carnegie the
64 * rights to redistribute these changes.
65 */
66
67/*
68 *	Resident memory management module.
69 */
70
71#include <sys/param.h>
72#include <sys/systm.h>
73#include <sys/proc.h>
74
75#include <vm/vm.h>
76#include <vm/vm_kern.h>
77#include <vm/vm_page.h>
78#include <vm/vm_map.h>
79#include <vm/vm_pageout.h>
80
81/*
82 *	Associated with page of user-allocatable memory is a
83 *	page structure.
84 */
85
86struct pglist *vm_page_buckets;	/* Array of buckets */
87int vm_page_bucket_count;	/* How big is array? */
88int vm_page_hash_mask;		/* Mask for hash function */
89
90struct pglist vm_page_queue_free;
91struct pglist vm_page_queue_active;
92struct pglist vm_page_queue_inactive;
93struct pglist vm_page_queue_cache;
94
95/* has physical page allocation been initialized? */
96boolean_t vm_page_startup_initialized;
97
98vm_page_t vm_page_array;
99int vm_page_array_size;
100long first_page;
101long last_page;
102vm_offset_t first_phys_addr;
103vm_offset_t last_phys_addr;
104vm_size_t page_mask;
105int page_shift;
106
107/*
108 * map of contiguous valid DEV_BSIZE chunks in a page
109 * (this list is valid for page sizes upto 16*DEV_BSIZE)
110 */
111static u_short vm_page_dev_bsize_chunks[] = {
112	0x0, 0x1, 0x3, 0x7, 0xf, 0x1f, 0x3f, 0x7f, 0xff,
113	0x1ff, 0x3ff, 0x7ff, 0xfff, 0x1fff, 0x3fff, 0x7fff, 0xffff
114};
115
116
117/*
118 *	vm_set_page_size:
119 *
120 *	Sets the page size, perhaps based upon the memory
121 *	size.  Must be called before any use of page-size
122 *	dependent functions.
123 *
124 *	Sets page_shift and page_mask from cnt.v_page_size.
125 */
126void
127vm_set_page_size()
128{
129
130	if (cnt.v_page_size == 0)
131		cnt.v_page_size = DEFAULT_PAGE_SIZE;
132	page_mask = cnt.v_page_size - 1;
133	if ((page_mask & cnt.v_page_size) != 0)
134		panic("vm_set_page_size: page size not a power of two");
135	for (page_shift = 0;; page_shift++)
136		if ((1 << page_shift) == cnt.v_page_size)
137			break;
138}
139
140/*
141 *	vm_page_startup:
142 *
143 *	Initializes the resident memory module.
144 *
145 *	Allocates memory for the page cells, and
146 *	for the object/offset-to-page hash table headers.
147 *	Each page cell is initialized and placed on the free list.
148 */
149
150vm_offset_t
151vm_page_startup(starta, enda, vaddr)
152	register vm_offset_t starta;
153	vm_offset_t enda;
154	register vm_offset_t vaddr;
155{
156	register vm_offset_t mapped;
157	register vm_page_t m;
158	register struct pglist *bucket;
159	vm_size_t npages, page_range;
160	register vm_offset_t new_start;
161	int i;
162	vm_offset_t pa;
163	int nblocks;
164	vm_offset_t first_managed_page;
165
166	/* the biggest memory array is the second group of pages */
167	vm_offset_t start;
168	vm_offset_t biggestone, biggestsize;
169
170	vm_offset_t total;
171
172	total = 0;
173	biggestsize = 0;
174	biggestone = 0;
175	nblocks = 0;
176	vaddr = round_page(vaddr);
177
178	for (i = 0; phys_avail[i + 1]; i += 2) {
179		phys_avail[i] = round_page(phys_avail[i]);
180		phys_avail[i + 1] = trunc_page(phys_avail[i + 1]);
181	}
182
183	for (i = 0; phys_avail[i + 1]; i += 2) {
184		int size = phys_avail[i + 1] - phys_avail[i];
185
186		if (size > biggestsize) {
187			biggestone = i;
188			biggestsize = size;
189		}
190		++nblocks;
191		total += size;
192	}
193
194	start = phys_avail[biggestone];
195
196	/*
197	 * Initialize the queue headers for the free queue, the active queue
198	 * and the inactive queue.
199	 */
200
201	TAILQ_INIT(&vm_page_queue_free);
202	TAILQ_INIT(&vm_page_queue_active);
203	TAILQ_INIT(&vm_page_queue_inactive);
204	TAILQ_INIT(&vm_page_queue_cache);
205
206	/*
207	 * Allocate (and initialize) the hash table buckets.
208	 *
209	 * The number of buckets MUST BE a power of 2, and the actual value is
210	 * the next power of 2 greater than the number of physical pages in
211	 * the system.
212	 *
213	 * Note: This computation can be tweaked if desired.
214	 */
215	vm_page_buckets = (struct pglist *) vaddr;
216	bucket = vm_page_buckets;
217	if (vm_page_bucket_count == 0) {
218		vm_page_bucket_count = 1;
219		while (vm_page_bucket_count < atop(total))
220			vm_page_bucket_count <<= 1;
221	}
222	vm_page_hash_mask = vm_page_bucket_count - 1;
223
224	/*
225	 * Validate these addresses.
226	 */
227
228	new_start = start + vm_page_bucket_count * sizeof(struct pglist);
229	new_start = round_page(new_start);
230	mapped = vaddr;
231	vaddr = pmap_map(mapped, start, new_start,
232	    VM_PROT_READ | VM_PROT_WRITE);
233	start = new_start;
234	bzero((caddr_t) mapped, vaddr - mapped);
235	mapped = vaddr;
236
237	for (i = 0; i < vm_page_bucket_count; i++) {
238		TAILQ_INIT(bucket);
239		bucket++;
240	}
241
242	/*
243	 * round (or truncate) the addresses to our page size.
244	 */
245
246	/*
247	 * Pre-allocate maps and map entries that cannot be dynamically
248	 * allocated via malloc().  The maps include the kernel_map and
249	 * kmem_map which must be initialized before malloc() will work
250	 * (obviously).  Also could include pager maps which would be
251	 * allocated before kmeminit.
252	 *
253	 * Allow some kernel map entries... this should be plenty since people
254	 * shouldn't be cluttering up the kernel map (they should use their
255	 * own maps).
256	 */
257
258	kentry_data_size = MAX_KMAP * sizeof(struct vm_map) +
259	    MAX_KMAPENT * sizeof(struct vm_map_entry);
260	kentry_data_size = round_page(kentry_data_size);
261	kentry_data = (vm_offset_t) vaddr;
262	vaddr += kentry_data_size;
263
264	/*
265	 * Validate these zone addresses.
266	 */
267
268	new_start = start + (vaddr - mapped);
269	pmap_map(mapped, start, new_start, VM_PROT_READ | VM_PROT_WRITE);
270	bzero((caddr_t) mapped, (vaddr - mapped));
271	start = round_page(new_start);
272
273	/*
274	 * Compute the number of pages of memory that will be available for
275	 * use (taking into account the overhead of a page structure per
276	 * page).
277	 */
278
279	first_page = phys_avail[0] / PAGE_SIZE;
280	last_page = phys_avail[(nblocks - 1) * 2 + 1] / PAGE_SIZE;
281
282	page_range = last_page - (phys_avail[0] / PAGE_SIZE);
283	npages = (total - (page_range * sizeof(struct vm_page)) -
284	    (start - phys_avail[biggestone])) / PAGE_SIZE;
285
286	/*
287	 * Initialize the mem entry structures now, and put them in the free
288	 * queue.
289	 */
290
291	vm_page_array = (vm_page_t) vaddr;
292	mapped = vaddr;
293
294
295	/*
296	 * Validate these addresses.
297	 */
298
299	new_start = round_page(start + page_range * sizeof(struct vm_page));
300	mapped = pmap_map(mapped, start, new_start,
301	    VM_PROT_READ | VM_PROT_WRITE);
302	start = new_start;
303
304	first_managed_page = start / PAGE_SIZE;
305
306	/*
307	 * Clear all of the page structures
308	 */
309	bzero((caddr_t) vm_page_array, page_range * sizeof(struct vm_page));
310	vm_page_array_size = page_range;
311
312	cnt.v_page_count = 0;
313	cnt.v_free_count = 0;
314	for (i = 0; phys_avail[i + 1] && npages > 0; i += 2) {
315		if (i == biggestone)
316			pa = ptoa(first_managed_page);
317		else
318			pa = phys_avail[i];
319		while (pa < phys_avail[i + 1] && npages-- > 0) {
320			++cnt.v_page_count;
321			++cnt.v_free_count;
322			m = PHYS_TO_VM_PAGE(pa);
323			m->flags = PG_FREE;
324			m->phys_addr = pa;
325			TAILQ_INSERT_TAIL(&vm_page_queue_free, m, pageq);
326			pa += PAGE_SIZE;
327		}
328	}
329
330	return (mapped);
331}
332
333/*
334 *	vm_page_hash:
335 *
336 *	Distributes the object/offset key pair among hash buckets.
337 *
338 *	NOTE:  This macro depends on vm_page_bucket_count being a power of 2.
339 */
340inline const int
341vm_page_hash(object, offset)
342	vm_object_t object;
343	vm_offset_t offset;
344{
345	return ((unsigned) object + offset / NBPG) & vm_page_hash_mask;
346}
347
348/*
349 *	vm_page_insert:		[ internal use only ]
350 *
351 *	Inserts the given mem entry into the object/object-page
352 *	table and object list.
353 *
354 *	The object and page must be locked, and must be splhigh.
355 */
356
357inline void
358vm_page_insert(mem, object, offset)
359	register vm_page_t mem;
360	register vm_object_t object;
361	register vm_offset_t offset;
362{
363	register struct pglist *bucket;
364
365	if (mem->flags & PG_TABLED)
366		panic("vm_page_insert: already inserted");
367
368	/*
369	 * Record the object/offset pair in this page
370	 */
371
372	mem->object = object;
373	mem->offset = offset;
374
375	/*
376	 * Insert it into the object_object/offset hash table
377	 */
378
379	bucket = &vm_page_buckets[vm_page_hash(object, offset)];
380	TAILQ_INSERT_TAIL(bucket, mem, hashq);
381
382	/*
383	 * Now link into the object's list of backed pages.
384	 */
385
386	TAILQ_INSERT_TAIL(&object->memq, mem, listq);
387	mem->flags |= PG_TABLED;
388
389	/*
390	 * And show that the object has one more resident page.
391	 */
392
393	object->resident_page_count++;
394}
395
396/*
397 *	vm_page_remove:		[ internal use only ]
398 *				NOTE: used by device pager as well -wfj
399 *
400 *	Removes the given mem entry from the object/offset-page
401 *	table and the object page list.
402 *
403 *	The object and page must be locked, and at splhigh.
404 */
405
406inline void
407vm_page_remove(mem)
408	register vm_page_t mem;
409{
410	register struct pglist *bucket;
411
412	if (!(mem->flags & PG_TABLED))
413		return;
414
415	/*
416	 * Remove from the object_object/offset hash table
417	 */
418
419	bucket = &vm_page_buckets[vm_page_hash(mem->object, mem->offset)];
420	TAILQ_REMOVE(bucket, mem, hashq);
421
422	/*
423	 * Now remove from the object's list of backed pages.
424	 */
425
426	TAILQ_REMOVE(&mem->object->memq, mem, listq);
427
428	/*
429	 * And show that the object has one fewer resident page.
430	 */
431
432	mem->object->resident_page_count--;
433
434	mem->flags &= ~PG_TABLED;
435}
436
437/*
438 *	vm_page_lookup:
439 *
440 *	Returns the page associated with the object/offset
441 *	pair specified; if none is found, NULL is returned.
442 *
443 *	The object must be locked.  No side effects.
444 */
445
446vm_page_t
447vm_page_lookup(object, offset)
448	register vm_object_t object;
449	register vm_offset_t offset;
450{
451	register vm_page_t mem;
452	register struct pglist *bucket;
453	int s;
454
455	/*
456	 * Search the hash table for this object/offset pair
457	 */
458
459	bucket = &vm_page_buckets[vm_page_hash(object, offset)];
460
461	s = splhigh();
462	for (mem = bucket->tqh_first; mem != NULL; mem = mem->hashq.tqe_next) {
463		if ((mem->object == object) && (mem->offset == offset)) {
464			splx(s);
465			return (mem);
466		}
467	}
468
469	splx(s);
470	return (NULL);
471}
472
473/*
474 *	vm_page_rename:
475 *
476 *	Move the given memory entry from its
477 *	current object to the specified target object/offset.
478 *
479 *	The object must be locked.
480 */
481void
482vm_page_rename(mem, new_object, new_offset)
483	register vm_page_t mem;
484	register vm_object_t new_object;
485	vm_offset_t new_offset;
486{
487	int s;
488
489	if (mem->object == new_object)
490		return;
491
492	s = splhigh();
493	vm_page_remove(mem);
494	vm_page_insert(mem, new_object, new_offset);
495	splx(s);
496}
497
498/*
499 * vm_page_unqueue must be called at splhigh();
500 */
501inline void
502vm_page_unqueue(vm_page_t mem)
503{
504	int origflags;
505
506	origflags = mem->flags;
507
508	if ((origflags & (PG_ACTIVE|PG_INACTIVE|PG_CACHE)) == 0)
509		return;
510
511	if (origflags & PG_ACTIVE) {
512		TAILQ_REMOVE(&vm_page_queue_active, mem, pageq);
513		cnt.v_active_count--;
514		mem->flags &= ~PG_ACTIVE;
515	} else if (origflags & PG_INACTIVE) {
516		TAILQ_REMOVE(&vm_page_queue_inactive, mem, pageq);
517		cnt.v_inactive_count--;
518		mem->flags &= ~PG_INACTIVE;
519	} else if (origflags & PG_CACHE) {
520		TAILQ_REMOVE(&vm_page_queue_cache, mem, pageq);
521		cnt.v_cache_count--;
522		mem->flags &= ~PG_CACHE;
523		if (cnt.v_cache_count + cnt.v_free_count < cnt.v_free_reserved)
524			pagedaemon_wakeup();
525	}
526	return;
527}
528
529/*
530 *	vm_page_alloc:
531 *
532 *	Allocate and return a memory cell associated
533 *	with this VM object/offset pair.
534 *
535 *	page_req classes:
536 *	VM_ALLOC_NORMAL		normal process request
537 *	VM_ALLOC_SYSTEM		system *really* needs a page
538 *	VM_ALLOC_INTERRUPT	interrupt time request
539 *
540 *	Object must be locked.
541 */
542vm_page_t
543vm_page_alloc(object, offset, page_req)
544	vm_object_t object;
545	vm_offset_t offset;
546	int page_req;
547{
548	register vm_page_t mem;
549	int s;
550
551#ifdef DIAGNOSTIC
552	if (offset != trunc_page(offset))
553		panic("vm_page_alloc: offset not page aligned");
554
555#if 0
556	mem = vm_page_lookup(object, offset);
557	if (mem)
558		panic("vm_page_alloc: page already allocated");
559#endif
560#endif
561
562	if ((curproc == pageproc) && (page_req != VM_ALLOC_INTERRUPT)) {
563		page_req = VM_ALLOC_SYSTEM;
564	};
565
566	s = splhigh();
567
568	mem = vm_page_queue_free.tqh_first;
569
570	switch (page_req) {
571	case VM_ALLOC_NORMAL:
572		if (cnt.v_free_count >= cnt.v_free_reserved) {
573			TAILQ_REMOVE(&vm_page_queue_free, mem, pageq);
574			cnt.v_free_count--;
575		} else {
576			mem = vm_page_queue_cache.tqh_first;
577			if (mem != NULL) {
578				TAILQ_REMOVE(&vm_page_queue_cache, mem, pageq);
579				vm_page_remove(mem);
580				cnt.v_cache_count--;
581			} else {
582				splx(s);
583				pagedaemon_wakeup();
584				return (NULL);
585			}
586		}
587		break;
588
589	case VM_ALLOC_SYSTEM:
590		if ((cnt.v_free_count >= cnt.v_free_reserved) ||
591		    ((cnt.v_cache_count == 0) &&
592		    (cnt.v_free_count >= cnt.v_interrupt_free_min))) {
593			TAILQ_REMOVE(&vm_page_queue_free, mem, pageq);
594			cnt.v_free_count--;
595		} else {
596			mem = vm_page_queue_cache.tqh_first;
597			if (mem != NULL) {
598				TAILQ_REMOVE(&vm_page_queue_cache, mem, pageq);
599				vm_page_remove(mem);
600				cnt.v_cache_count--;
601			} else {
602				splx(s);
603				pagedaemon_wakeup();
604				return (NULL);
605			}
606		}
607		break;
608
609	case VM_ALLOC_INTERRUPT:
610		if (mem != NULL) {
611			TAILQ_REMOVE(&vm_page_queue_free, mem, pageq);
612			cnt.v_free_count--;
613		} else {
614			splx(s);
615			pagedaemon_wakeup();
616			return NULL;
617		}
618		break;
619
620	default:
621		panic("vm_page_alloc: invalid allocation class");
622	}
623
624	mem->flags = PG_BUSY;
625	mem->wire_count = 0;
626	mem->hold_count = 0;
627	mem->act_count = 0;
628	mem->busy = 0;
629	mem->valid = 0;
630	mem->dirty = 0;
631	mem->bmapped = 0;
632
633	/* XXX before splx until vm_page_insert is safe */
634	vm_page_insert(mem, object, offset);
635
636	splx(s);
637
638	/*
639	 * Don't wakeup too often - wakeup the pageout daemon when
640	 * we would be nearly out of memory.
641	 */
642	if (((cnt.v_free_count + cnt.v_cache_count) < cnt.v_free_min) ||
643	    (cnt.v_free_count < cnt.v_pageout_free_min))
644		pagedaemon_wakeup();
645
646	return (mem);
647}
648
649vm_offset_t
650vm_page_alloc_contig(size, low, high, alignment)
651	vm_offset_t size;
652	vm_offset_t low;
653	vm_offset_t high;
654	vm_offset_t alignment;
655{
656	int i, s, start;
657	vm_offset_t addr, phys, tmp_addr;
658	vm_page_t pga = vm_page_array;
659
660	if ((alignment & (alignment - 1)) != 0)
661		panic("vm_page_alloc_contig: alignment must be a power of 2");
662
663	start = 0;
664	s = splhigh();
665again:
666	/*
667	 * Find first page in array that is free, within range, and aligned.
668	 */
669	for (i = start; i < cnt.v_page_count; i++) {
670		phys = VM_PAGE_TO_PHYS(&pga[i]);
671		if (((pga[i].flags & PG_FREE) == PG_FREE) &&
672		    (phys >= low) && (phys < high) &&
673		    ((phys & (alignment - 1)) == 0))
674			break;
675	}
676
677	/*
678	 * If the above failed or we will exceed the upper bound, fail.
679	 */
680	if ((i == cnt.v_page_count) || ((VM_PAGE_TO_PHYS(&pga[i]) + size) > high)) {
681		splx(s);
682		return (NULL);
683	}
684	start = i;
685
686	/*
687	 * Check successive pages for contiguous and free.
688	 */
689	for (i = start + 1; i < (start + size / PAGE_SIZE); i++) {
690		if ((VM_PAGE_TO_PHYS(&pga[i]) !=
691			(VM_PAGE_TO_PHYS(&pga[i - 1]) + PAGE_SIZE)) ||
692		    ((pga[i].flags & PG_FREE) != PG_FREE)) {
693			start++;
694			goto again;
695		}
696	}
697
698	/*
699	 * We've found a contiguous chunk that meets are requirements.
700	 * Allocate kernel VM, unfree and assign the physical pages to it and
701	 * return kernel VM pointer.
702	 */
703	tmp_addr = addr = kmem_alloc_pageable(kernel_map, size);
704
705	for (i = start; i < (start + size / PAGE_SIZE); i++) {
706		vm_page_t m = &pga[i];
707
708		TAILQ_REMOVE(&vm_page_queue_free, m, pageq);
709		cnt.v_free_count--;
710		m->valid = VM_PAGE_BITS_ALL;
711		m->flags = 0;
712		m->dirty = 0;
713		m->wire_count = 0;
714		m->act_count = 0;
715		m->bmapped = 0;
716		m->busy = 0;
717		vm_page_insert(m, kernel_object, tmp_addr - VM_MIN_KERNEL_ADDRESS);
718		vm_page_wire(m);
719		pmap_kenter(tmp_addr, VM_PAGE_TO_PHYS(m));
720		tmp_addr += PAGE_SIZE;
721	}
722
723	splx(s);
724	return (addr);
725}
726
727/*
728 *	vm_page_free:
729 *
730 *	Returns the given page to the free list,
731 *	disassociating it with any VM object.
732 *
733 *	Object and page must be locked prior to entry.
734 */
735void
736vm_page_free(mem)
737	register vm_page_t mem;
738{
739	int s;
740	int flags;
741
742	s = splhigh();
743	vm_page_remove(mem);
744	vm_page_unqueue(mem);
745
746	flags = mem->flags;
747	if (mem->bmapped || mem->busy || flags & (PG_BUSY|PG_FREE)) {
748		if (flags & PG_FREE)
749			panic("vm_page_free: freeing free page");
750		printf("vm_page_free: offset(%d), bmapped(%d), busy(%d), PG_BUSY(%d)\n",
751		    mem->offset, mem->bmapped, mem->busy, (flags & PG_BUSY) ? 1 : 0);
752		panic("vm_page_free: freeing busy page");
753	}
754
755	if ((flags & PG_WANTED) != 0)
756		wakeup(mem);
757	if ((flags & PG_FICTITIOUS) == 0) {
758		if (mem->wire_count) {
759			if (mem->wire_count > 1) {
760				printf("vm_page_free: wire count > 1 (%d)", mem->wire_count);
761				panic("vm_page_free: invalid wire count");
762			}
763			cnt.v_wire_count--;
764			mem->wire_count = 0;
765		}
766		mem->flags |= PG_FREE;
767		TAILQ_INSERT_TAIL(&vm_page_queue_free, mem, pageq);
768		splx(s);
769		/*
770		 * if pageout daemon needs pages, then tell it that there are
771		 * some free.
772		 */
773		if (vm_pageout_pages_needed) {
774			wakeup(&vm_pageout_pages_needed);
775			vm_pageout_pages_needed = 0;
776		}
777
778		cnt.v_free_count++;
779		/*
780		 * wakeup processes that are waiting on memory if we hit a
781		 * high water mark. And wakeup scheduler process if we have
782		 * lots of memory. this process will swapin processes.
783		 */
784		if ((cnt.v_free_count + cnt.v_cache_count) == cnt.v_free_min) {
785			wakeup(&cnt.v_free_count);
786			wakeup(&proc0);
787		}
788	} else {
789		splx(s);
790	}
791	cnt.v_tfree++;
792}
793
794
795/*
796 *	vm_page_wire:
797 *
798 *	Mark this page as wired down by yet
799 *	another map, removing it from paging queues
800 *	as necessary.
801 *
802 *	The page queues must be locked.
803 */
804void
805vm_page_wire(mem)
806	register vm_page_t mem;
807{
808	int s;
809
810	if (mem->wire_count == 0) {
811		s = splhigh();
812		vm_page_unqueue(mem);
813		splx(s);
814		cnt.v_wire_count++;
815	}
816	mem->flags |= PG_WRITEABLE|PG_MAPPED;
817	mem->wire_count++;
818}
819
820/*
821 *	vm_page_unwire:
822 *
823 *	Release one wiring of this page, potentially
824 *	enabling it to be paged again.
825 *
826 *	The page queues must be locked.
827 */
828void
829vm_page_unwire(mem)
830	register vm_page_t mem;
831{
832	int s;
833
834	s = splhigh();
835
836	if (mem->wire_count)
837		mem->wire_count--;
838	if (mem->wire_count == 0) {
839		TAILQ_INSERT_TAIL(&vm_page_queue_active, mem, pageq);
840		cnt.v_active_count++;
841		mem->flags |= PG_ACTIVE;
842		cnt.v_wire_count--;
843	}
844	splx(s);
845}
846
847/*
848 *	vm_page_activate:
849 *
850 *	Put the specified page on the active list (if appropriate).
851 *
852 *	The page queues must be locked.
853 */
854void
855vm_page_activate(m)
856	register vm_page_t m;
857{
858	int s;
859
860	s = splhigh();
861	if (m->flags & PG_ACTIVE)
862		panic("vm_page_activate: already active");
863
864	if (m->flags & PG_CACHE)
865		cnt.v_reactivated++;
866
867	vm_page_unqueue(m);
868
869	if (m->wire_count == 0) {
870		TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq);
871		m->flags |= PG_ACTIVE;
872		if (m->act_count < 5)
873			m->act_count = 5;
874		else if( m->act_count < ACT_MAX)
875			m->act_count += 1;
876		cnt.v_active_count++;
877	}
878	splx(s);
879}
880
881/*
882 *	vm_page_deactivate:
883 *
884 *	Returns the given page to the inactive list,
885 *	indicating that no physical maps have access
886 *	to this page.  [Used by the physical mapping system.]
887 *
888 *	The page queues must be locked.
889 */
890void
891vm_page_deactivate(m)
892	register vm_page_t m;
893{
894	int spl;
895
896	/*
897	 * Only move active pages -- ignore locked or already inactive ones.
898	 *
899	 * XXX: sometimes we get pages which aren't wired down or on any queue -
900	 * we need to put them on the inactive queue also, otherwise we lose
901	 * track of them. Paul Mackerras (paulus@cs.anu.edu.au) 9-Jan-93.
902	 */
903
904	spl = splhigh();
905	if (!(m->flags & PG_INACTIVE) && m->wire_count == 0 &&
906	    m->hold_count == 0) {
907		pmap_clear_reference(VM_PAGE_TO_PHYS(m));
908		if (m->flags & PG_CACHE)
909			cnt.v_reactivated++;
910		vm_page_unqueue(m);
911		TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq);
912		m->flags |= PG_INACTIVE;
913		cnt.v_inactive_count++;
914		m->act_count = 0;
915	}
916	splx(spl);
917}
918
919/*
920 * vm_page_cache
921 *
922 * Put the specified page onto the page cache queue (if appropriate).
923 */
924void
925vm_page_cache(m)
926	register vm_page_t m;
927{
928	int s;
929
930	if ((m->flags & (PG_CACHE | PG_BUSY)) || m->busy || m->wire_count ||
931	    m->bmapped)
932		return;
933
934	s = splhigh();
935	vm_page_unqueue(m);
936	vm_page_protect(m, VM_PROT_NONE);
937
938	TAILQ_INSERT_TAIL(&vm_page_queue_cache, m, pageq);
939	m->flags |= PG_CACHE;
940	cnt.v_cache_count++;
941	if ((cnt.v_free_count + cnt.v_cache_count) == cnt.v_free_min) {
942		wakeup(&cnt.v_free_count);
943		wakeup(&proc0);
944	}
945	if (vm_pageout_pages_needed) {
946		wakeup(&vm_pageout_pages_needed);
947		vm_pageout_pages_needed = 0;
948	}
949
950	splx(s);
951}
952
953/*
954 *	vm_page_zero_fill:
955 *
956 *	Zero-fill the specified page.
957 *	Written as a standard pagein routine, to
958 *	be used by the zero-fill object.
959 */
960boolean_t
961vm_page_zero_fill(m)
962	vm_page_t m;
963{
964	pmap_zero_page(VM_PAGE_TO_PHYS(m));
965	m->valid = VM_PAGE_BITS_ALL;
966	return (TRUE);
967}
968
969/*
970 *	vm_page_copy:
971 *
972 *	Copy one page to another
973 */
974void
975vm_page_copy(src_m, dest_m)
976	vm_page_t src_m;
977	vm_page_t dest_m;
978{
979	pmap_copy_page(VM_PAGE_TO_PHYS(src_m), VM_PAGE_TO_PHYS(dest_m));
980	dest_m->valid = VM_PAGE_BITS_ALL;
981}
982
983
984/*
985 * mapping function for valid bits or for dirty bits in
986 * a page
987 */
988inline int
989vm_page_bits(int base, int size)
990{
991	u_short chunk;
992
993	if ((base == 0) && (size >= PAGE_SIZE))
994		return VM_PAGE_BITS_ALL;
995	size = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1);
996	base = (base % PAGE_SIZE) / DEV_BSIZE;
997	chunk = vm_page_dev_bsize_chunks[size / DEV_BSIZE];
998	return (chunk << base) & VM_PAGE_BITS_ALL;
999}
1000
1001/*
1002 * set a page valid and clean
1003 */
1004void
1005vm_page_set_validclean(m, base, size)
1006	vm_page_t m;
1007	int base;
1008	int size;
1009{
1010	int pagebits = vm_page_bits(base, size);
1011	m->valid |= pagebits;
1012	m->dirty &= ~pagebits;
1013	if( base == 0 && size == PAGE_SIZE)
1014		pmap_clear_modify(VM_PAGE_TO_PHYS(m));
1015}
1016
1017/*
1018 * set a page (partially) valid
1019 */
1020void
1021vm_page_set_valid(m, base, size)
1022	vm_page_t m;
1023	int base;
1024	int size;
1025{
1026	m->valid |= vm_page_bits(base, size);
1027}
1028
1029/*
1030 * set a page (partially) invalid
1031 */
1032void
1033vm_page_set_invalid(m, base, size)
1034	vm_page_t m;
1035	int base;
1036	int size;
1037{
1038	int bits;
1039
1040	m->valid &= ~(bits = vm_page_bits(base, size));
1041	if (m->valid == 0)
1042		m->dirty &= ~bits;
1043}
1044
1045/*
1046 * is (partial) page valid?
1047 */
1048int
1049vm_page_is_valid(m, base, size)
1050	vm_page_t m;
1051	int base;
1052	int size;
1053{
1054	int bits = vm_page_bits(base, size);
1055
1056	if (m->valid && ((m->valid & bits) == bits))
1057		return 1;
1058	else
1059		return 0;
1060}
1061
1062
1063/*
1064 * set a page (partially) dirty
1065 */
1066void
1067vm_page_set_dirty(m, base, size)
1068	vm_page_t m;
1069	int base;
1070	int size;
1071{
1072	if ((base != 0) || (size != PAGE_SIZE)) {
1073		if (pmap_is_modified(VM_PAGE_TO_PHYS(m))) {
1074			m->dirty = VM_PAGE_BITS_ALL;
1075			pmap_clear_modify(VM_PAGE_TO_PHYS(m));
1076			return;
1077		}
1078		m->dirty |= vm_page_bits(base, size);
1079	} else {
1080		m->dirty = VM_PAGE_BITS_ALL;
1081		pmap_clear_modify(VM_PAGE_TO_PHYS(m));
1082	}
1083}
1084
1085void
1086vm_page_test_dirty(m)
1087	vm_page_t m;
1088{
1089	if ((m->dirty != VM_PAGE_BITS_ALL) &&
1090	    pmap_is_modified(VM_PAGE_TO_PHYS(m))) {
1091		m->dirty = VM_PAGE_BITS_ALL;
1092	}
1093}
1094
1095/*
1096 * set a page (partially) clean
1097 */
1098void
1099vm_page_set_clean(m, base, size)
1100	vm_page_t m;
1101	int base;
1102	int size;
1103{
1104	m->dirty &= ~vm_page_bits(base, size);
1105	if( base == 0 && size == PAGE_SIZE)
1106		pmap_clear_modify(VM_PAGE_TO_PHYS(m));
1107}
1108
1109/*
1110 * is (partial) page clean
1111 */
1112int
1113vm_page_is_clean(m, base, size)
1114	vm_page_t m;
1115	int base;
1116	int size;
1117{
1118	if (pmap_is_modified(VM_PAGE_TO_PHYS(m))) {
1119		m->dirty = VM_PAGE_BITS_ALL;
1120		pmap_clear_modify(VM_PAGE_TO_PHYS(m));
1121	}
1122	if ((m->dirty & m->valid & vm_page_bits(base, size)) == 0)
1123		return 1;
1124	else
1125		return 0;
1126}
1127
1128#ifdef DDB
1129void
1130print_page_info()
1131{
1132	printf("cnt.v_free_count: %d\n", cnt.v_free_count);
1133	printf("cnt.v_cache_count: %d\n", cnt.v_cache_count);
1134	printf("cnt.v_inactive_count: %d\n", cnt.v_inactive_count);
1135	printf("cnt.v_active_count: %d\n", cnt.v_active_count);
1136	printf("cnt.v_wire_count: %d\n", cnt.v_wire_count);
1137	printf("cnt.v_free_reserved: %d\n", cnt.v_free_reserved);
1138	printf("cnt.v_free_min: %d\n", cnt.v_free_min);
1139	printf("cnt.v_free_target: %d\n", cnt.v_free_target);
1140	printf("cnt.v_cache_min: %d\n", cnt.v_cache_min);
1141	printf("cnt.v_inactive_target: %d\n", cnt.v_inactive_target);
1142}
1143#endif
1144