vm_page.c revision 3449
1/*
2 * Copyright (c) 1991 Regents of the University of California.
3 * All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * The Mach Operating System project at Carnegie-Mellon University.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 *    must display the following acknowledgement:
18 *	This product includes software developed by the University of
19 *	California, Berkeley and its contributors.
20 * 4. Neither the name of the University nor the names of its contributors
21 *    may be used to endorse or promote products derived from this software
22 *    without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 *	from: @(#)vm_page.c	7.4 (Berkeley) 5/7/91
37 *	$Id: vm_page.c,v 1.9 1994/09/27 20:49:02 davidg Exp $
38 */
39
40/*
41 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
42 * All rights reserved.
43 *
44 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
45 *
46 * Permission to use, copy, modify and distribute this software and
47 * its documentation is hereby granted, provided that both the copyright
48 * notice and this permission notice appear in all copies of the
49 * software, derivative works or modified versions, and any portions
50 * thereof, and that both notices appear in supporting documentation.
51 *
52 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
53 * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
54 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
55 *
56 * Carnegie Mellon requests users of this software to return to
57 *
58 *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
59 *  School of Computer Science
60 *  Carnegie Mellon University
61 *  Pittsburgh PA 15213-3890
62 *
63 * any improvements or extensions that they make and grant Carnegie the
64 * rights to redistribute these changes.
65 */
66
67/*
68 *	Resident memory management module.
69 */
70
71#include <sys/param.h>
72#include <sys/systm.h>
73#include <sys/proc.h>
74
75#include <vm/vm.h>
76#include <vm/vm_page.h>
77#include <vm/vm_map.h>
78#include <vm/vm_pageout.h>
79
80/*
81 *	Associated with page of user-allocatable memory is a
82 *	page structure.
83 */
84
85struct pglist	*vm_page_buckets;		/* Array of buckets */
86int		vm_page_bucket_count = 0;	/* How big is array? */
87int		vm_page_hash_mask;		/* Mask for hash function */
88simple_lock_data_t	bucket_lock;		/* lock for all buckets XXX */
89
90struct pglist	vm_page_queue_free;
91struct pglist	vm_page_queue_active;
92struct pglist	vm_page_queue_inactive;
93simple_lock_data_t	vm_page_queue_lock;
94simple_lock_data_t	vm_page_queue_free_lock;
95
96/* has physical page allocation been initialized? */
97boolean_t vm_page_startup_initialized;
98
99vm_page_t	vm_page_array;
100long		first_page;
101long		last_page;
102vm_offset_t	first_phys_addr;
103vm_offset_t	last_phys_addr;
104vm_size_t	page_mask;
105int		page_shift;
106
107/*
108 *	vm_set_page_size:
109 *
110 *	Sets the page size, perhaps based upon the memory
111 *	size.  Must be called before any use of page-size
112 *	dependent functions.
113 *
114 *	Sets page_shift and page_mask from cnt.v_page_size.
115 */
116void vm_set_page_size()
117{
118
119	if (cnt.v_page_size == 0)
120		cnt.v_page_size = DEFAULT_PAGE_SIZE;
121	page_mask = cnt.v_page_size - 1;
122	if ((page_mask & cnt.v_page_size) != 0)
123		panic("vm_set_page_size: page size not a power of two");
124	for (page_shift = 0; ; page_shift++)
125		if ((1 << page_shift) == cnt.v_page_size)
126			break;
127}
128
129/*
130 *	vm_page_startup:
131 *
132 *	Initializes the resident memory module.
133 *
134 *	Allocates memory for the page cells, and
135 *	for the object/offset-to-page hash table headers.
136 *	Each page cell is initialized and placed on the free list.
137 */
138
139vm_offset_t
140vm_page_startup(starta, enda, vaddr)
141	register vm_offset_t	starta;
142	vm_offset_t	enda;
143	register vm_offset_t	vaddr;
144{
145	register vm_offset_t	mapped;
146	register vm_page_t	m;
147	register struct pglist *bucket;
148	vm_size_t		npages, page_range;
149	register vm_offset_t	new_start;
150	int			i;
151	vm_offset_t		pa;
152	int nblocks;
153	vm_offset_t		first_managed_page;
154
155	extern	vm_offset_t	kentry_data;
156	extern	vm_size_t	kentry_data_size;
157	extern vm_offset_t phys_avail[];
158/* the biggest memory array is the second group of pages */
159	vm_offset_t start;
160	vm_offset_t biggestone, biggestsize;
161
162	vm_offset_t total;
163
164	total = 0;
165	biggestsize = 0;
166	biggestone = 0;
167	nblocks = 0;
168	vaddr = round_page(vaddr);
169
170	for (i = 0; phys_avail[i + 1]; i += 2) {
171		phys_avail[i] = round_page(phys_avail[i]);
172		phys_avail[i+1] = trunc_page(phys_avail[i+1]);
173	}
174
175	for (i = 0; phys_avail[i + 1]; i += 2) {
176		int size = phys_avail[i+1] - phys_avail[i];
177		if (size > biggestsize) {
178			biggestone = i;
179			biggestsize = size;
180		}
181		++nblocks;
182		total += size;
183	}
184
185	start = phys_avail[biggestone];
186
187
188	/*
189	 *	Initialize the locks
190	 */
191
192	simple_lock_init(&vm_page_queue_free_lock);
193	simple_lock_init(&vm_page_queue_lock);
194
195	/*
196	 *	Initialize the queue headers for the free queue,
197	 *	the active queue and the inactive queue.
198	 */
199
200	TAILQ_INIT(&vm_page_queue_free);
201	TAILQ_INIT(&vm_page_queue_active);
202	TAILQ_INIT(&vm_page_queue_inactive);
203
204	/*
205	 *	Allocate (and initialize) the hash table buckets.
206	 *
207	 *	The number of buckets MUST BE a power of 2, and
208	 *	the actual value is the next power of 2 greater
209	 *	than the number of physical pages in the system.
210	 *
211	 *	Note:
212	 *		This computation can be tweaked if desired.
213	 */
214	vm_page_buckets = (struct pglist *)vaddr;
215	bucket = vm_page_buckets;
216	if (vm_page_bucket_count == 0) {
217		vm_page_bucket_count = 1;
218		while (vm_page_bucket_count < atop(total))
219			vm_page_bucket_count <<= 1;
220	}
221
222
223	vm_page_hash_mask = vm_page_bucket_count - 1;
224
225	/*
226	 *	Validate these addresses.
227	 */
228
229	new_start = start + vm_page_bucket_count * sizeof(struct pglist);
230	new_start = round_page(new_start);
231	mapped = vaddr;
232	vaddr = pmap_map(mapped, start, new_start,
233			VM_PROT_READ|VM_PROT_WRITE);
234	start = new_start;
235	bzero((caddr_t) mapped, vaddr - mapped);
236	mapped = vaddr;
237
238	for (i = 0; i< vm_page_bucket_count; i++) {
239		TAILQ_INIT(bucket);
240		bucket++;
241	}
242
243	simple_lock_init(&bucket_lock);
244
245	/*
246	 *	round (or truncate) the addresses to our page size.
247	 */
248
249	/*
250	 *	Pre-allocate maps and map entries that cannot be dynamically
251	 *	allocated via malloc().  The maps include the kernel_map and
252	 *	kmem_map which must be initialized before malloc() will
253	 *	work (obviously).  Also could include pager maps which would
254	 *	be allocated before kmeminit.
255	 *
256	 *	Allow some kernel map entries... this should be plenty
257	 *	since people shouldn't be cluttering up the kernel
258	 *	map (they should use their own maps).
259	 */
260
261	kentry_data_size = MAX_KMAP * sizeof(struct vm_map) +
262			   MAX_KMAPENT * sizeof(struct vm_map_entry);
263	kentry_data_size = round_page(kentry_data_size);
264	kentry_data = (vm_offset_t) vaddr;
265	vaddr += kentry_data_size;
266
267	/*
268	 *	Validate these zone addresses.
269	 */
270
271	new_start = start + (vaddr - mapped);
272	pmap_map(mapped, start, new_start, VM_PROT_READ|VM_PROT_WRITE);
273	bzero((caddr_t) mapped, (vaddr - mapped));
274	start = round_page(new_start);
275
276	/*
277 	 *	Compute the number of pages of memory that will be
278	 *	available for use (taking into account the overhead
279	 *	of a page structure per page).
280	 */
281
282	npages = (total - (start - phys_avail[biggestone])) / (PAGE_SIZE + sizeof(struct vm_page));
283	first_page = phys_avail[0] / PAGE_SIZE;
284
285	page_range = (phys_avail[(nblocks-1)*2 + 1] - phys_avail[0]) / PAGE_SIZE;
286	/*
287	 *	Initialize the mem entry structures now, and
288	 *	put them in the free queue.
289	 */
290
291	vm_page_array = (vm_page_t) vaddr;
292	mapped = vaddr;
293
294
295	/*
296	 *	Validate these addresses.
297	 */
298
299	new_start = round_page(start + page_range * sizeof (struct vm_page));
300	mapped = pmap_map(mapped, start, new_start,
301			VM_PROT_READ|VM_PROT_WRITE);
302	start = new_start;
303
304	first_managed_page = start / PAGE_SIZE;
305
306	/*
307	 *	Clear all of the page structures
308	 */
309	bzero((caddr_t)vm_page_array, page_range * sizeof(struct vm_page));
310
311	cnt.v_page_count = 0;
312	cnt.v_free_count= 0;
313	for (i = 0; phys_avail[i + 1] && npages > 0; i += 2) {
314		if (i == biggestone)
315			pa = ptoa(first_managed_page);
316		else
317			pa = phys_avail[i];
318		while (pa < phys_avail[i + 1] && npages-- > 0) {
319			++cnt.v_page_count;
320			++cnt.v_free_count;
321			m = PHYS_TO_VM_PAGE(pa);
322			m->flags = PG_CLEAN | PG_FREE;
323			m->object = 0;
324			m->phys_addr = pa;
325			m->hold_count = 0;
326			TAILQ_INSERT_TAIL(&vm_page_queue_free, m, pageq);
327			pa += PAGE_SIZE;
328		}
329	}
330
331	/*
332	 *	Initialize vm_pages_needed lock here - don't wait for pageout
333	 *	daemon	XXX
334	 */
335	simple_lock_init(&vm_pages_needed_lock);
336
337	return(mapped);
338}
339
340/*
341 *	vm_page_hash:
342 *
343 *	Distributes the object/offset key pair among hash buckets.
344 *
345 *	NOTE:  This macro depends on vm_page_bucket_count being a power of 2.
346 */
347inline const int
348vm_page_hash(object, offset)
349	vm_object_t object;
350	vm_offset_t offset;
351{
352	return ((unsigned)object + offset/NBPG) & vm_page_hash_mask;
353}
354
355/*
356 *	vm_page_insert:		[ internal use only ]
357 *
358 *	Inserts the given mem entry into the object/object-page
359 *	table and object list.
360 *
361 *	The object and page must be locked.
362 */
363
364void vm_page_insert(mem, object, offset)
365	register vm_page_t	mem;
366	register vm_object_t	object;
367	register vm_offset_t	offset;
368{
369	register struct pglist	*bucket;
370	int			s;
371
372	VM_PAGE_CHECK(mem);
373
374	if (mem->flags & PG_TABLED)
375		panic("vm_page_insert: already inserted");
376
377	/*
378	 *	Record the object/offset pair in this page
379	 */
380
381	mem->object = object;
382	mem->offset = offset;
383
384	/*
385	 *	Insert it into the object_object/offset hash table
386	 */
387
388	bucket = &vm_page_buckets[vm_page_hash(object, offset)];
389	s = splhigh();
390	simple_lock(&bucket_lock);
391	TAILQ_INSERT_TAIL(bucket, mem, hashq);
392	simple_unlock(&bucket_lock);
393	(void) splx(s);
394
395	/*
396	 *	Now link into the object's list of backed pages.
397	 */
398
399	TAILQ_INSERT_TAIL(&object->memq, mem, listq);
400	mem->flags |= PG_TABLED;
401
402	/*
403	 *	And show that the object has one more resident
404	 *	page.
405	 */
406
407	object->resident_page_count++;
408}
409
410/*
411 *	vm_page_remove:		[ internal use only ]
412 *				NOTE: used by device pager as well -wfj
413 *
414 *	Removes the given mem entry from the object/offset-page
415 *	table and the object page list.
416 *
417 *	The object and page must be locked.
418 */
419
420void vm_page_remove(mem)
421	register vm_page_t	mem;
422{
423	register struct pglist	*bucket;
424	int			s;
425
426	VM_PAGE_CHECK(mem);
427
428	if (!(mem->flags & PG_TABLED))
429		return;
430
431	/*
432	 *	Remove from the object_object/offset hash table
433	 */
434
435	bucket = &vm_page_buckets[vm_page_hash(mem->object, mem->offset)];
436	s = splhigh();
437	simple_lock(&bucket_lock);
438	TAILQ_REMOVE(bucket, mem, hashq);
439	simple_unlock(&bucket_lock);
440	(void) splx(s);
441
442	/*
443	 *	Now remove from the object's list of backed pages.
444	 */
445
446	TAILQ_REMOVE(&mem->object->memq, mem, listq);
447
448	/*
449	 *	And show that the object has one fewer resident
450	 *	page.
451	 */
452
453	mem->object->resident_page_count--;
454
455	mem->flags &= ~PG_TABLED;
456}
457
458/*
459 *	vm_page_lookup:
460 *
461 *	Returns the page associated with the object/offset
462 *	pair specified; if none is found, NULL is returned.
463 *
464 *	The object must be locked.  No side effects.
465 */
466
467vm_page_t vm_page_lookup(object, offset)
468	register vm_object_t	object;
469	register vm_offset_t	offset;
470{
471	register vm_page_t	mem;
472	register struct pglist	*bucket;
473	int			s;
474
475	/*
476	 *	Search the hash table for this object/offset pair
477	 */
478
479	bucket = &vm_page_buckets[vm_page_hash(object, offset)];
480
481	s = splhigh();
482	simple_lock(&bucket_lock);
483	for (mem = bucket->tqh_first; mem != NULL; mem = mem->hashq.tqe_next) {
484		VM_PAGE_CHECK(mem);
485		if ((mem->object == object) && (mem->offset == offset)) {
486			simple_unlock(&bucket_lock);
487			splx(s);
488			return(mem);
489		}
490	}
491
492	simple_unlock(&bucket_lock);
493	splx(s);
494	return(NULL);
495}
496
497/*
498 *	vm_page_rename:
499 *
500 *	Move the given memory entry from its
501 *	current object to the specified target object/offset.
502 *
503 *	The object must be locked.
504 */
505void vm_page_rename(mem, new_object, new_offset)
506	register vm_page_t	mem;
507	register vm_object_t	new_object;
508	vm_offset_t		new_offset;
509{
510	if (mem->object == new_object)
511		return;
512
513	vm_page_lock_queues();	/* keep page from moving out from
514				   under pageout daemon */
515    	vm_page_remove(mem);
516	vm_page_insert(mem, new_object, new_offset);
517	vm_page_unlock_queues();
518}
519
520/*
521 *	vm_page_alloc:
522 *
523 *	Allocate and return a memory cell associated
524 *	with this VM object/offset pair.
525 *
526 *	Object must be locked.
527 */
528vm_page_t
529vm_page_alloc(object, offset)
530	vm_object_t	object;
531	vm_offset_t	offset;
532{
533	register vm_page_t	mem;
534	int		s;
535
536	s = splhigh();
537	simple_lock(&vm_page_queue_free_lock);
538	if (	object != kernel_object &&
539		object != kmem_object	&&
540		curproc != pageproc && curproc != &proc0 &&
541		cnt.v_free_count < cnt.v_free_reserved) {
542
543		simple_unlock(&vm_page_queue_free_lock);
544		splx(s);
545		/*
546		 * this wakeup seems unnecessary, but there is code that
547		 * might just check to see if there are free pages, and
548		 * punt if there aren't.  VM_WAIT does this too, but
549		 * redundant wakeups aren't that bad...
550		 */
551		if (curproc != pageproc)
552			wakeup((caddr_t) &vm_pages_needed);
553		return(NULL);
554	}
555	if (( mem = vm_page_queue_free.tqh_first) == 0) {
556		simple_unlock(&vm_page_queue_free_lock);
557		printf("No pages???\n");
558		splx(s);
559		/*
560		 * comment above re: wakeups applies here too...
561		 */
562		if (curproc != pageproc)
563			wakeup((caddr_t) &vm_pages_needed);
564		return(NULL);
565	}
566
567	TAILQ_REMOVE(&vm_page_queue_free, mem, pageq);
568
569	cnt.v_free_count--;
570	simple_unlock(&vm_page_queue_free_lock);
571
572	VM_PAGE_INIT(mem, object, offset);
573	splx(s);
574
575/*
576 * don't wakeup too often, so we wakeup the pageout daemon when
577 * we would be nearly out of memory.
578 */
579	if (curproc != pageproc &&
580		(cnt.v_free_count < cnt.v_free_reserved))
581		wakeup((caddr_t) &vm_pages_needed);
582
583	return(mem);
584}
585
586vm_offset_t
587vm_page_alloc_contig(size, low, high, alignment)
588	vm_offset_t	size;
589	vm_offset_t	low;
590	vm_offset_t	high;
591	vm_offset_t	alignment;
592{
593	int i, s, start;
594	vm_offset_t addr, phys, tmp_addr;
595	vm_page_t pga = vm_page_array;
596	extern vm_map_t kernel_map;
597
598	if ((alignment & (alignment - 1)) != 0)
599		panic("vm_page_alloc_contig: alignment must be a power of 2");
600
601	start = 0;
602	s = splhigh();
603again:
604	/*
605	 * Find first page in array that is free, within range, and aligned.
606	 */
607	for (i = start; i < cnt.v_page_count; i++) {
608		phys = VM_PAGE_TO_PHYS(&pga[i]);
609		if (((pga[i].flags & PG_FREE) == PG_FREE) &&
610		    (phys >= low) && (phys < high) &&
611		    ((phys & (alignment - 1)) == 0))
612			break;
613	}
614
615	/*
616	 * If the above failed or we will exceed the upper bound, fail.
617	 */
618	if ((i == cnt.v_page_count) || ((VM_PAGE_TO_PHYS(&pga[i]) + size) > high)) {
619		splx(s);
620		return (NULL);
621	}
622
623	start = i;
624
625	/*
626	 * Check successive pages for contiguous and free.
627	 */
628	for (i = start + 1; i < (start + size / PAGE_SIZE); i++) {
629		if ((VM_PAGE_TO_PHYS(&pga[i]) !=
630		    (VM_PAGE_TO_PHYS(&pga[i - 1]) + PAGE_SIZE)) ||
631		    ((pga[i].flags & PG_FREE) != PG_FREE)) {
632			start++;
633			goto again;
634		}
635	}
636
637	/*
638	 * We've found a contiguous chunk that meets are requirements.
639	 * Allocate kernel VM, unfree and assign the physical pages to it
640	 * and return kernel VM pointer.
641	 */
642	tmp_addr = addr = kmem_alloc_pageable(kernel_map, size);
643
644	for (i = start; i < (start + size / PAGE_SIZE); i++) {
645		TAILQ_REMOVE(&vm_page_queue_free, &pga[i], pageq);
646		cnt.v_free_count--;
647		vm_page_wire(&pga[i]);
648		pga[i].flags = PG_CLEAN; /* shut off PG_FREE and any other flags */
649		pmap_kenter(tmp_addr, VM_PAGE_TO_PHYS(&pga[i]));
650		tmp_addr += PAGE_SIZE;
651	}
652
653	splx(s);
654	return (addr);
655}
656
657/*
658 *	vm_page_free:
659 *
660 *	Returns the given page to the free list,
661 *	disassociating it with any VM object.
662 *
663 *	Object and page must be locked prior to entry.
664 */
665void vm_page_free(mem)
666	register vm_page_t	mem;
667{
668	int s;
669	s = splhigh();
670	vm_page_remove(mem);
671	if (mem->flags & PG_ACTIVE) {
672		TAILQ_REMOVE(&vm_page_queue_active, mem, pageq);
673		mem->flags &= ~PG_ACTIVE;
674		cnt.v_active_count--;
675	}
676
677	if (mem->flags & PG_INACTIVE) {
678		TAILQ_REMOVE(&vm_page_queue_inactive, mem, pageq);
679		mem->flags &= ~PG_INACTIVE;
680		cnt.v_inactive_count--;
681	}
682	if (mem->flags & PG_FREE)
683		panic("vm_page_free: freeing free page");
684
685	if (!(mem->flags & PG_FICTITIOUS)) {
686
687		simple_lock(&vm_page_queue_free_lock);
688		if (mem->wire_count) {
689			if (mem->wire_count > 1) {
690				printf("vm_page_free: wire count > 1 (%d)", mem->wire_count);
691				panic("vm_page_free: invalid wire count");
692			}
693			cnt.v_wire_count--;
694			mem->wire_count = 0;
695		}
696
697		mem->flags |= PG_FREE;
698		TAILQ_INSERT_TAIL(&vm_page_queue_free, mem, pageq);
699
700		cnt.v_free_count++;
701		simple_unlock(&vm_page_queue_free_lock);
702		splx(s);
703		/*
704		 * if pageout daemon needs pages, then tell it that there
705		 * are some free.
706		 */
707		if (vm_pageout_pages_needed)
708			wakeup((caddr_t)&vm_pageout_pages_needed);
709
710		/*
711		 * wakeup processes that are waiting on memory if we
712		 * hit a high water mark.
713		 */
714		if (cnt.v_free_count == cnt.v_free_min) {
715			wakeup((caddr_t)&cnt.v_free_count);
716		}
717
718		/*
719		 * wakeup scheduler process if we have lots of memory.
720		 * this process will swapin processes.
721		 */
722		if (cnt.v_free_count == cnt.v_free_target) {
723			wakeup((caddr_t)&proc0);
724		}
725	} else {
726		splx(s);
727	}
728	wakeup((caddr_t) mem);
729}
730
731
732/*
733 *	vm_page_wire:
734 *
735 *	Mark this page as wired down by yet
736 *	another map, removing it from paging queues
737 *	as necessary.
738 *
739 *	The page queues must be locked.
740 */
741void vm_page_wire(mem)
742	register vm_page_t	mem;
743{
744	int s;
745	VM_PAGE_CHECK(mem);
746
747	if (mem->wire_count == 0) {
748		s = splhigh();
749		if (mem->flags & PG_ACTIVE) {
750			TAILQ_REMOVE(&vm_page_queue_active, mem, pageq);
751			cnt.v_active_count--;
752			mem->flags &= ~PG_ACTIVE;
753		}
754		if (mem->flags & PG_INACTIVE) {
755			TAILQ_REMOVE(&vm_page_queue_inactive, mem, pageq);
756			cnt.v_inactive_count--;
757			mem->flags &= ~PG_INACTIVE;
758		}
759		splx(s);
760		cnt.v_wire_count++;
761	}
762	mem->wire_count++;
763}
764
765/*
766 *	vm_page_unwire:
767 *
768 *	Release one wiring of this page, potentially
769 *	enabling it to be paged again.
770 *
771 *	The page queues must be locked.
772 */
773void vm_page_unwire(mem)
774	register vm_page_t	mem;
775{
776	int s;
777	VM_PAGE_CHECK(mem);
778
779	s = splhigh();
780
781	if( mem->wire_count)
782		mem->wire_count--;
783	if (mem->wire_count == 0) {
784		TAILQ_INSERT_TAIL(&vm_page_queue_active, mem, pageq);
785		cnt.v_active_count++;
786		mem->flags |= PG_ACTIVE;
787		cnt.v_wire_count--;
788	}
789	splx(s);
790}
791
792/*
793 *	vm_page_deactivate:
794 *
795 *	Returns the given page to the inactive list,
796 *	indicating that no physical maps have access
797 *	to this page.  [Used by the physical mapping system.]
798 *
799 *	The page queues must be locked.
800 */
801void
802vm_page_deactivate(m)
803	register vm_page_t	m;
804{
805	int spl;
806	VM_PAGE_CHECK(m);
807
808	/*
809	 *	Only move active pages -- ignore locked or already
810	 *	inactive ones.
811	 *
812	 *	XXX: sometimes we get pages which aren't wired down
813	 *	or on any queue - we need to put them on the inactive
814	 *	queue also, otherwise we lose track of them.
815	 *	Paul Mackerras (paulus@cs.anu.edu.au) 9-Jan-93.
816	 */
817
818	spl = splhigh();
819	if (!(m->flags & PG_INACTIVE) && m->wire_count == 0 &&
820		m->hold_count == 0) {
821
822		pmap_clear_reference(VM_PAGE_TO_PHYS(m));
823		if (m->flags & PG_ACTIVE) {
824			TAILQ_REMOVE(&vm_page_queue_active, m, pageq);
825			m->flags &= ~PG_ACTIVE;
826			cnt.v_active_count--;
827		}
828		TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq);
829		m->flags |= PG_INACTIVE;
830		cnt.v_inactive_count++;
831#define NOT_DEACTIVATE_PROTECTS
832#ifndef NOT_DEACTIVATE_PROTECTS
833		pmap_page_protect(VM_PAGE_TO_PHYS(m), VM_PROT_NONE);
834#else
835		if ((m->flags & PG_CLEAN) &&
836			pmap_is_modified(VM_PAGE_TO_PHYS(m)))
837			m->flags &= ~PG_CLEAN;
838#endif
839		if ((m->flags & PG_CLEAN) == 0)
840			m->flags |= PG_LAUNDRY;
841	}
842	splx(spl);
843}
844/*
845 *	vm_page_activate:
846 *
847 *	Put the specified page on the active list (if appropriate).
848 *
849 *	The page queues must be locked.
850 */
851
852void vm_page_activate(m)
853	register vm_page_t	m;
854{
855	int s;
856	VM_PAGE_CHECK(m);
857
858	s = splhigh();
859	if (m->flags & PG_INACTIVE) {
860		TAILQ_REMOVE(&vm_page_queue_inactive, m, pageq);
861		cnt.v_inactive_count--;
862		m->flags &= ~PG_INACTIVE;
863	}
864	if (m->wire_count == 0) {
865		if (m->flags & PG_ACTIVE)
866			panic("vm_page_activate: already active");
867
868		TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq);
869		m->flags |= PG_ACTIVE;
870		TAILQ_REMOVE(&m->object->memq, m, listq);
871		TAILQ_INSERT_TAIL(&m->object->memq, m, listq);
872		m->act_count = 1;
873		cnt.v_active_count++;
874	}
875	splx(s);
876}
877
878/*
879 *	vm_page_zero_fill:
880 *
881 *	Zero-fill the specified page.
882 *	Written as a standard pagein routine, to
883 *	be used by the zero-fill object.
884 */
885
886boolean_t
887vm_page_zero_fill(m)
888	vm_page_t	m;
889{
890	VM_PAGE_CHECK(m);
891
892	pmap_zero_page(VM_PAGE_TO_PHYS(m));
893	return(TRUE);
894}
895
896/*
897 *	vm_page_copy:
898 *
899 *	Copy one page to another
900 */
901void
902vm_page_copy(src_m, dest_m)
903	vm_page_t	src_m;
904	vm_page_t	dest_m;
905{
906	VM_PAGE_CHECK(src_m);
907	VM_PAGE_CHECK(dest_m);
908
909	pmap_copy_page(VM_PAGE_TO_PHYS(src_m), VM_PAGE_TO_PHYS(dest_m));
910}
911