vm_page.c revision 1997
1/*
2 * Copyright (c) 1991 Regents of the University of California.
3 * All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * The Mach Operating System project at Carnegie-Mellon University.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 *    must display the following acknowledgement:
18 *	This product includes software developed by the University of
19 *	California, Berkeley and its contributors.
20 * 4. Neither the name of the University nor the names of its contributors
21 *    may be used to endorse or promote products derived from this software
22 *    without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 *	from: @(#)vm_page.c	7.4 (Berkeley) 5/7/91
37 *	$Id: vm_page.c,v 1.4 1994/08/07 13:10:41 davidg Exp $
38 */
39
40/*
41 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
42 * All rights reserved.
43 *
44 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
45 *
46 * Permission to use, copy, modify and distribute this software and
47 * its documentation is hereby granted, provided that both the copyright
48 * notice and this permission notice appear in all copies of the
49 * software, derivative works or modified versions, and any portions
50 * thereof, and that both notices appear in supporting documentation.
51 *
52 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
53 * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
54 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
55 *
56 * Carnegie Mellon requests users of this software to return to
57 *
58 *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
59 *  School of Computer Science
60 *  Carnegie Mellon University
61 *  Pittsburgh PA 15213-3890
62 *
63 * any improvements or extensions that they make and grant Carnegie the
64 * rights to redistribute these changes.
65 */
66
67/*
68 *	Resident memory management module.
69 */
70
71#include <sys/param.h>
72#include <sys/systm.h>
73#include <sys/proc.h>
74
75#include <vm/vm.h>
76#include <vm/vm_page.h>
77#include <vm/vm_map.h>
78#include <vm/vm_pageout.h>
79
80/*
81 *	Associated with page of user-allocatable memory is a
82 *	page structure.
83 */
84
85struct pglist	*vm_page_buckets;		/* Array of buckets */
86int		vm_page_bucket_count = 0;	/* How big is array? */
87int		vm_page_hash_mask;		/* Mask for hash function */
88simple_lock_data_t	bucket_lock;		/* lock for all buckets XXX */
89
90struct pglist	vm_page_queue_free;
91struct pglist	vm_page_queue_active;
92struct pglist	vm_page_queue_inactive;
93simple_lock_data_t	vm_page_queue_lock;
94simple_lock_data_t	vm_page_queue_free_lock;
95
96/* has physical page allocation been initialized? */
97boolean_t vm_page_startup_initialized;
98
99vm_page_t	vm_page_array;
100long		first_page;
101long		last_page;
102vm_offset_t	first_phys_addr;
103vm_offset_t	last_phys_addr;
104vm_size_t	page_mask;
105int		page_shift;
106
107/*
108 *	vm_set_page_size:
109 *
110 *	Sets the page size, perhaps based upon the memory
111 *	size.  Must be called before any use of page-size
112 *	dependent functions.
113 *
114 *	Sets page_shift and page_mask from cnt.v_page_size.
115 */
116void vm_set_page_size()
117{
118
119	if (cnt.v_page_size == 0)
120		cnt.v_page_size = DEFAULT_PAGE_SIZE;
121	page_mask = cnt.v_page_size - 1;
122	if ((page_mask & cnt.v_page_size) != 0)
123		panic("vm_set_page_size: page size not a power of two");
124	for (page_shift = 0; ; page_shift++)
125		if ((1 << page_shift) == cnt.v_page_size)
126			break;
127}
128
129/*
130 *	vm_page_startup:
131 *
132 *	Initializes the resident memory module.
133 *
134 *	Allocates memory for the page cells, and
135 *	for the object/offset-to-page hash table headers.
136 *	Each page cell is initialized and placed on the free list.
137 */
138
139vm_offset_t
140vm_page_startup(starta, enda, vaddr)
141	register vm_offset_t	starta;
142	vm_offset_t	enda;
143	register vm_offset_t	vaddr;
144{
145	register vm_offset_t	mapped;
146	register vm_page_t	m;
147	register struct pglist *bucket;
148	vm_size_t		npages, page_range;
149	register vm_offset_t	new_start;
150	int			i;
151	vm_offset_t		pa;
152	int nblocks;
153	vm_offset_t		first_managed_page;
154	int			size;
155
156	extern	vm_offset_t	kentry_data;
157	extern	vm_size_t	kentry_data_size;
158	extern vm_offset_t phys_avail[];
159/* the biggest memory array is the second group of pages */
160	vm_offset_t start;
161	vm_offset_t biggestone, biggestsize;
162
163	vm_offset_t total;
164
165	total = 0;
166	biggestsize = 0;
167	biggestone = 0;
168	nblocks = 0;
169	vaddr = round_page(vaddr);
170
171	for (i = 0; phys_avail[i + 1]; i += 2) {
172		phys_avail[i] = round_page(phys_avail[i]);
173		phys_avail[i+1] = trunc_page(phys_avail[i+1]);
174	}
175
176	for (i = 0; phys_avail[i + 1]; i += 2) {
177		int size = phys_avail[i+1] - phys_avail[i];
178		if (size > biggestsize) {
179			biggestone = i;
180			biggestsize = size;
181		}
182		++nblocks;
183		total += size;
184	}
185
186	start = phys_avail[biggestone];
187
188
189	/*
190	 *	Initialize the locks
191	 */
192
193	simple_lock_init(&vm_page_queue_free_lock);
194	simple_lock_init(&vm_page_queue_lock);
195
196	/*
197	 *	Initialize the queue headers for the free queue,
198	 *	the active queue and the inactive queue.
199	 */
200
201	TAILQ_INIT(&vm_page_queue_free);
202	TAILQ_INIT(&vm_page_queue_active);
203	TAILQ_INIT(&vm_page_queue_inactive);
204
205	/*
206	 *	Allocate (and initialize) the hash table buckets.
207	 *
208	 *	The number of buckets MUST BE a power of 2, and
209	 *	the actual value is the next power of 2 greater
210	 *	than the number of physical pages in the system.
211	 *
212	 *	Note:
213	 *		This computation can be tweaked if desired.
214	 */
215	vm_page_buckets = (struct pglist *)vaddr;
216	bucket = vm_page_buckets;
217	if (vm_page_bucket_count == 0) {
218		vm_page_bucket_count = 1;
219		while (vm_page_bucket_count < atop(total))
220			vm_page_bucket_count <<= 1;
221	}
222
223
224	vm_page_hash_mask = vm_page_bucket_count - 1;
225
226	/*
227	 *	Validate these addresses.
228	 */
229
230	new_start = start + vm_page_bucket_count * sizeof(struct pglist);
231	new_start = round_page(new_start);
232	mapped = vaddr;
233	vaddr = pmap_map(mapped, start, new_start,
234			VM_PROT_READ|VM_PROT_WRITE);
235	start = new_start;
236	bzero((caddr_t) mapped, vaddr - mapped);
237	mapped = vaddr;
238
239	for (i = 0; i< vm_page_bucket_count; i++) {
240		TAILQ_INIT(bucket);
241		bucket++;
242	}
243
244	simple_lock_init(&bucket_lock);
245
246	/*
247	 *	round (or truncate) the addresses to our page size.
248	 */
249
250	/*
251	 *	Pre-allocate maps and map entries that cannot be dynamically
252	 *	allocated via malloc().  The maps include the kernel_map and
253	 *	kmem_map which must be initialized before malloc() will
254	 *	work (obviously).  Also could include pager maps which would
255	 *	be allocated before kmeminit.
256	 *
257	 *	Allow some kernel map entries... this should be plenty
258	 *	since people shouldn't be cluttering up the kernel
259	 *	map (they should use their own maps).
260	 */
261
262	kentry_data_size = MAX_KMAP * sizeof(struct vm_map) +
263			   MAX_KMAPENT * sizeof(struct vm_map_entry);
264	kentry_data_size = round_page(kentry_data_size);
265	kentry_data = (vm_offset_t) vaddr;
266	vaddr += kentry_data_size;
267
268	/*
269	 *	Validate these zone addresses.
270	 */
271
272	new_start = start + (vaddr - mapped);
273	pmap_map(mapped, start, new_start, VM_PROT_READ|VM_PROT_WRITE);
274	bzero((caddr_t) mapped, (vaddr - mapped));
275	start = round_page(new_start);
276
277	/*
278 	 *	Compute the number of pages of memory that will be
279	 *	available for use (taking into account the overhead
280	 *	of a page structure per page).
281	 */
282
283	npages = (total - (start - phys_avail[biggestone])) / (PAGE_SIZE + sizeof(struct vm_page));
284	first_page = phys_avail[0] / PAGE_SIZE;
285
286	page_range = (phys_avail[(nblocks-1)*2 + 1] - phys_avail[0]) / PAGE_SIZE;
287	/*
288	 *	Initialize the mem entry structures now, and
289	 *	put them in the free queue.
290	 */
291
292	vm_page_array = (vm_page_t) vaddr;
293	mapped = vaddr;
294
295
296	/*
297	 *	Validate these addresses.
298	 */
299
300	new_start = round_page(start + page_range * sizeof (struct vm_page));
301	mapped = pmap_map(mapped, start, new_start,
302			VM_PROT_READ|VM_PROT_WRITE);
303	start = new_start;
304
305	first_managed_page = start / PAGE_SIZE;
306
307	/*
308	 *	Clear all of the page structures
309	 */
310	bzero((caddr_t)vm_page_array, page_range * sizeof(struct vm_page));
311
312	cnt.v_page_count = 0;
313	cnt.v_free_count= 0;
314	for (i = 0; phys_avail[i + 1] && npages > 0; i += 2) {
315		if (i == biggestone)
316			pa = ptoa(first_managed_page);
317		else
318			pa = phys_avail[i];
319		while (pa < phys_avail[i + 1] && npages-- > 0) {
320			++cnt.v_page_count;
321			++cnt.v_free_count;
322			m = PHYS_TO_VM_PAGE(pa);
323			m->flags = 0;
324			m->object = 0;
325			m->phys_addr = pa;
326			m->hold_count = 0;
327			TAILQ_INSERT_TAIL(&vm_page_queue_free, m, pageq);
328			pa += PAGE_SIZE;
329		}
330	}
331
332	/*
333	 *	Initialize vm_pages_needed lock here - don't wait for pageout
334	 *	daemon	XXX
335	 */
336	simple_lock_init(&vm_pages_needed_lock);
337
338	return(mapped);
339}
340
341/*
342 *	vm_page_hash:
343 *
344 *	Distributes the object/offset key pair among hash buckets.
345 *
346 *	NOTE:  This macro depends on vm_page_bucket_count being a power of 2.
347 */
348inline const int
349vm_page_hash(object, offset)
350	vm_object_t object;
351	vm_offset_t offset;
352{
353	return ((unsigned)object + offset/NBPG) & vm_page_hash_mask;
354}
355
356/*
357 *	vm_page_insert:		[ internal use only ]
358 *
359 *	Inserts the given mem entry into the object/object-page
360 *	table and object list.
361 *
362 *	The object and page must be locked.
363 */
364
365void vm_page_insert(mem, object, offset)
366	register vm_page_t	mem;
367	register vm_object_t	object;
368	register vm_offset_t	offset;
369{
370	register struct pglist	*bucket;
371	int			s;
372
373	VM_PAGE_CHECK(mem);
374
375	if (mem->flags & PG_TABLED)
376		panic("vm_page_insert: already inserted");
377
378	/*
379	 *	Record the object/offset pair in this page
380	 */
381
382	mem->object = object;
383	mem->offset = offset;
384
385	/*
386	 *	Insert it into the object_object/offset hash table
387	 */
388
389	bucket = &vm_page_buckets[vm_page_hash(object, offset)];
390	s = splhigh();
391	simple_lock(&bucket_lock);
392	TAILQ_INSERT_TAIL(bucket, mem, hashq);
393	simple_unlock(&bucket_lock);
394	(void) splx(s);
395
396	/*
397	 *	Now link into the object's list of backed pages.
398	 */
399
400	TAILQ_INSERT_TAIL(&object->memq, mem, listq);
401	mem->flags |= PG_TABLED;
402
403	/*
404	 *	And show that the object has one more resident
405	 *	page.
406	 */
407
408	object->resident_page_count++;
409}
410
411/*
412 *	vm_page_remove:		[ internal use only ]
413 *				NOTE: used by device pager as well -wfj
414 *
415 *	Removes the given mem entry from the object/offset-page
416 *	table and the object page list.
417 *
418 *	The object and page must be locked.
419 */
420
421void vm_page_remove(mem)
422	register vm_page_t	mem;
423{
424	register struct pglist	*bucket;
425	int			s;
426
427	VM_PAGE_CHECK(mem);
428
429	if (!(mem->flags & PG_TABLED))
430		return;
431
432	/*
433	 *	Remove from the object_object/offset hash table
434	 */
435
436	bucket = &vm_page_buckets[vm_page_hash(mem->object, mem->offset)];
437	s = splhigh();
438	simple_lock(&bucket_lock);
439	TAILQ_REMOVE(bucket, mem, hashq);
440	simple_unlock(&bucket_lock);
441	(void) splx(s);
442
443	/*
444	 *	Now remove from the object's list of backed pages.
445	 */
446
447	TAILQ_REMOVE(&mem->object->memq, mem, listq);
448
449	/*
450	 *	And show that the object has one fewer resident
451	 *	page.
452	 */
453
454	mem->object->resident_page_count--;
455
456	mem->flags &= ~PG_TABLED;
457}
458
459/*
460 *	vm_page_lookup:
461 *
462 *	Returns the page associated with the object/offset
463 *	pair specified; if none is found, NULL is returned.
464 *
465 *	The object must be locked.  No side effects.
466 */
467
468vm_page_t vm_page_lookup(object, offset)
469	register vm_object_t	object;
470	register vm_offset_t	offset;
471{
472	register vm_page_t	mem;
473	register struct pglist	*bucket;
474	int			s;
475
476	/*
477	 *	Search the hash table for this object/offset pair
478	 */
479
480	bucket = &vm_page_buckets[vm_page_hash(object, offset)];
481
482	s = splhigh();
483	simple_lock(&bucket_lock);
484	for (mem = bucket->tqh_first; mem != NULL; mem = mem->hashq.tqe_next) {
485		VM_PAGE_CHECK(mem);
486		if ((mem->object == object) && (mem->offset == offset)) {
487			simple_unlock(&bucket_lock);
488			splx(s);
489			return(mem);
490		}
491	}
492
493	simple_unlock(&bucket_lock);
494	splx(s);
495	return(NULL);
496}
497
498/*
499 *	vm_page_rename:
500 *
501 *	Move the given memory entry from its
502 *	current object to the specified target object/offset.
503 *
504 *	The object must be locked.
505 */
506void vm_page_rename(mem, new_object, new_offset)
507	register vm_page_t	mem;
508	register vm_object_t	new_object;
509	vm_offset_t		new_offset;
510{
511	if (mem->object == new_object)
512		return;
513
514	vm_page_lock_queues();	/* keep page from moving out from
515				   under pageout daemon */
516    	vm_page_remove(mem);
517	vm_page_insert(mem, new_object, new_offset);
518	vm_page_unlock_queues();
519}
520
521/*
522 *	vm_page_alloc:
523 *
524 *	Allocate and return a memory cell associated
525 *	with this VM object/offset pair.
526 *
527 *	Object must be locked.
528 */
529vm_page_t
530vm_page_alloc(object, offset)
531	vm_object_t	object;
532	vm_offset_t	offset;
533{
534	register vm_page_t	mem;
535	int		s;
536
537	s = splhigh();
538	simple_lock(&vm_page_queue_free_lock);
539	if (	object != kernel_object &&
540		object != kmem_object	&&
541		curproc != pageproc && curproc != &proc0 &&
542		cnt.v_free_count < cnt.v_free_reserved) {
543
544		simple_unlock(&vm_page_queue_free_lock);
545		splx(s);
546		/*
547		 * this wakeup seems unnecessary, but there is code that
548		 * might just check to see if there are free pages, and
549		 * punt if there aren't.  VM_WAIT does this too, but
550		 * redundant wakeups aren't that bad...
551		 */
552		if (curproc != pageproc)
553			wakeup((caddr_t) &vm_pages_needed);
554		return(NULL);
555	}
556	if (( mem = vm_page_queue_free.tqh_first) == 0) {
557		simple_unlock(&vm_page_queue_free_lock);
558		printf("No pages???\n");
559		splx(s);
560		/*
561		 * comment above re: wakeups applies here too...
562		 */
563		if (curproc != pageproc)
564			wakeup((caddr_t) &vm_pages_needed);
565		return(NULL);
566	}
567
568	TAILQ_REMOVE(&vm_page_queue_free, mem, pageq);
569
570	cnt.v_free_count--;
571	simple_unlock(&vm_page_queue_free_lock);
572
573	VM_PAGE_INIT(mem, object, offset);
574	splx(s);
575
576/*
577 * don't wakeup too often, so we wakeup the pageout daemon when
578 * we would be nearly out of memory.
579 */
580	if (curproc != pageproc &&
581		(cnt.v_free_count < cnt.v_free_reserved))
582		wakeup((caddr_t) &vm_pages_needed);
583
584	return(mem);
585}
586
587/*
588 *	vm_page_free:
589 *
590 *	Returns the given page to the free list,
591 *	disassociating it with any VM object.
592 *
593 *	Object and page must be locked prior to entry.
594 */
595void vm_page_free(mem)
596	register vm_page_t	mem;
597{
598	int s;
599	s = splhigh();
600	vm_page_remove(mem);
601	if (mem->flags & PG_ACTIVE) {
602		TAILQ_REMOVE(&vm_page_queue_active, mem, pageq);
603		mem->flags &= ~PG_ACTIVE;
604		cnt.v_active_count--;
605	}
606
607	if (mem->flags & PG_INACTIVE) {
608		TAILQ_REMOVE(&vm_page_queue_inactive, mem, pageq);
609		mem->flags &= ~PG_INACTIVE;
610		cnt.v_inactive_count--;
611	}
612
613	if (!(mem->flags & PG_FICTITIOUS)) {
614
615		simple_lock(&vm_page_queue_free_lock);
616		if (mem->wire_count) {
617			cnt.v_wire_count--;
618			mem->wire_count = 0;
619		}
620		TAILQ_INSERT_TAIL(&vm_page_queue_free, mem, pageq);
621
622		cnt.v_free_count++;
623		simple_unlock(&vm_page_queue_free_lock);
624		splx(s);
625		/*
626		 * if pageout daemon needs pages, then tell it that there
627		 * are some free.
628		 */
629		if (vm_pageout_pages_needed)
630			wakeup((caddr_t)&vm_pageout_pages_needed);
631
632		/*
633		 * wakeup processes that are waiting on memory if we
634		 * hit a high water mark.
635		 */
636		if (cnt.v_free_count == cnt.v_free_min) {
637			wakeup((caddr_t)&cnt.v_free_count);
638		}
639
640		/*
641		 * wakeup scheduler process if we have lots of memory.
642		 * this process will swapin processes.
643		 */
644		if (cnt.v_free_count == cnt.v_free_target) {
645			wakeup((caddr_t)&proc0);
646		}
647	} else {
648		splx(s);
649	}
650	wakeup((caddr_t) mem);
651}
652
653
654/*
655 *	vm_page_wire:
656 *
657 *	Mark this page as wired down by yet
658 *	another map, removing it from paging queues
659 *	as necessary.
660 *
661 *	The page queues must be locked.
662 */
663void vm_page_wire(mem)
664	register vm_page_t	mem;
665{
666	int s;
667	VM_PAGE_CHECK(mem);
668
669	if (mem->wire_count == 0) {
670		s = splhigh();
671		if (mem->flags & PG_ACTIVE) {
672			TAILQ_REMOVE(&vm_page_queue_active, mem, pageq);
673			cnt.v_active_count--;
674			mem->flags &= ~PG_ACTIVE;
675		}
676		if (mem->flags & PG_INACTIVE) {
677			TAILQ_REMOVE(&vm_page_queue_inactive, mem, pageq);
678			cnt.v_inactive_count--;
679			mem->flags &= ~PG_INACTIVE;
680		}
681		splx(s);
682		cnt.v_wire_count++;
683	}
684	mem->wire_count++;
685}
686
687/*
688 *	vm_page_unwire:
689 *
690 *	Release one wiring of this page, potentially
691 *	enabling it to be paged again.
692 *
693 *	The page queues must be locked.
694 */
695void vm_page_unwire(mem)
696	register vm_page_t	mem;
697{
698	int s;
699	VM_PAGE_CHECK(mem);
700
701	s = splhigh();
702
703	if( mem->wire_count)
704		mem->wire_count--;
705	if (mem->wire_count == 0) {
706		TAILQ_INSERT_TAIL(&vm_page_queue_active, mem, pageq);
707		cnt.v_active_count++;
708		mem->flags |= PG_ACTIVE;
709		cnt.v_wire_count--;
710	}
711	splx(s);
712}
713
714/*
715 *	vm_page_deactivate:
716 *
717 *	Returns the given page to the inactive list,
718 *	indicating that no physical maps have access
719 *	to this page.  [Used by the physical mapping system.]
720 *
721 *	The page queues must be locked.
722 */
723void
724vm_page_deactivate(m)
725	register vm_page_t	m;
726{
727	int spl;
728	VM_PAGE_CHECK(m);
729
730	/*
731	 *	Only move active pages -- ignore locked or already
732	 *	inactive ones.
733	 *
734	 *	XXX: sometimes we get pages which aren't wired down
735	 *	or on any queue - we need to put them on the inactive
736	 *	queue also, otherwise we lose track of them.
737	 *	Paul Mackerras (paulus@cs.anu.edu.au) 9-Jan-93.
738	 */
739
740	spl = splhigh();
741	if (!(m->flags & PG_INACTIVE) && m->wire_count == 0 &&
742		m->hold_count == 0) {
743
744		pmap_clear_reference(VM_PAGE_TO_PHYS(m));
745		if (m->flags & PG_ACTIVE) {
746			TAILQ_REMOVE(&vm_page_queue_active, m, pageq);
747			m->flags &= ~PG_ACTIVE;
748			cnt.v_active_count--;
749		}
750		TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq);
751		m->flags |= PG_INACTIVE;
752		cnt.v_inactive_count++;
753#define NOT_DEACTIVATE_PROTECTS
754#ifndef NOT_DEACTIVATE_PROTECTS
755		pmap_page_protect(VM_PAGE_TO_PHYS(m), VM_PROT_NONE);
756#else
757		if ((m->flags & PG_CLEAN) &&
758			pmap_is_modified(VM_PAGE_TO_PHYS(m)))
759			m->flags &= ~PG_CLEAN;
760#endif
761		if ((m->flags & PG_CLEAN) == 0)
762			m->flags |= PG_LAUNDRY;
763	}
764	splx(spl);
765}
766/*
767 *	vm_page_activate:
768 *
769 *	Put the specified page on the active list (if appropriate).
770 *
771 *	The page queues must be locked.
772 */
773
774void vm_page_activate(m)
775	register vm_page_t	m;
776{
777	int s;
778	VM_PAGE_CHECK(m);
779
780	s = splhigh();
781	if (m->flags & PG_INACTIVE) {
782		TAILQ_REMOVE(&vm_page_queue_inactive, m, pageq);
783		cnt.v_inactive_count--;
784		m->flags &= ~PG_INACTIVE;
785	}
786	if (m->wire_count == 0) {
787		if (m->flags & PG_ACTIVE)
788			panic("vm_page_activate: already active");
789
790		TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq);
791		m->flags |= PG_ACTIVE;
792		TAILQ_REMOVE(&m->object->memq, m, listq);
793		TAILQ_INSERT_TAIL(&m->object->memq, m, listq);
794		m->act_count = 1;
795		cnt.v_active_count++;
796	}
797	splx(s);
798}
799
800/*
801 *	vm_page_zero_fill:
802 *
803 *	Zero-fill the specified page.
804 *	Written as a standard pagein routine, to
805 *	be used by the zero-fill object.
806 */
807
808boolean_t
809vm_page_zero_fill(m)
810	vm_page_t	m;
811{
812	VM_PAGE_CHECK(m);
813
814	pmap_zero_page(VM_PAGE_TO_PHYS(m));
815	return(TRUE);
816}
817
818/*
819 *	vm_page_copy:
820 *
821 *	Copy one page to another
822 */
823void
824vm_page_copy(src_m, dest_m)
825	vm_page_t	src_m;
826	vm_page_t	dest_m;
827{
828	VM_PAGE_CHECK(src_m);
829	VM_PAGE_CHECK(dest_m);
830
831	pmap_copy_page(VM_PAGE_TO_PHYS(src_m), VM_PAGE_TO_PHYS(dest_m));
832}
833