vm_page.h revision 60755
15455Sdg/*
21541Srgrimes * Copyright (c) 1991, 1993
31541Srgrimes *	The Regents of the University of California.  All rights reserved.
41541Srgrimes *
51541Srgrimes * This code is derived from software contributed to Berkeley by
61541Srgrimes * The Mach Operating System project at Carnegie-Mellon University.
71541Srgrimes *
81541Srgrimes * Redistribution and use in source and binary forms, with or without
91541Srgrimes * modification, are permitted provided that the following conditions
101541Srgrimes * are met:
111541Srgrimes * 1. Redistributions of source code must retain the above copyright
121541Srgrimes *    notice, this list of conditions and the following disclaimer.
131541Srgrimes * 2. Redistributions in binary form must reproduce the above copyright
141541Srgrimes *    notice, this list of conditions and the following disclaimer in the
151541Srgrimes *    documentation and/or other materials provided with the distribution.
161541Srgrimes * 3. All advertising materials mentioning features or use of this software
171541Srgrimes *    must display the following acknowledgement:
181541Srgrimes *	This product includes software developed by the University of
191541Srgrimes *	California, Berkeley and its contributors.
201541Srgrimes * 4. Neither the name of the University nor the names of its contributors
211541Srgrimes *    may be used to endorse or promote products derived from this software
221541Srgrimes *    without specific prior written permission.
231541Srgrimes *
241541Srgrimes * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
251541Srgrimes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
261541Srgrimes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
271541Srgrimes * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
281541Srgrimes * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
291541Srgrimes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
301541Srgrimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
311541Srgrimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
321541Srgrimes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
331541Srgrimes * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
341541Srgrimes * SUCH DAMAGE.
351541Srgrimes *
361817Sdg *	from: @(#)vm_page.h	8.2 (Berkeley) 12/13/93
371541Srgrimes *
381541Srgrimes *
391541Srgrimes * Copyright (c) 1987, 1990 Carnegie-Mellon University.
401541Srgrimes * All rights reserved.
411541Srgrimes *
421541Srgrimes * Authors: Avadis Tevanian, Jr., Michael Wayne Young
435455Sdg *
441541Srgrimes * Permission to use, copy, modify and distribute this software and
451541Srgrimes * its documentation is hereby granted, provided that both the copyright
461541Srgrimes * notice and this permission notice appear in all copies of the
471541Srgrimes * software, derivative works or modified versions, and any portions
481541Srgrimes * thereof, and that both notices appear in supporting documentation.
495455Sdg *
505455Sdg * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
515455Sdg * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
521541Srgrimes * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
535455Sdg *
541541Srgrimes * Carnegie Mellon requests users of this software to return to
551541Srgrimes *
561541Srgrimes *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
571541Srgrimes *  School of Computer Science
581541Srgrimes *  Carnegie Mellon University
591541Srgrimes *  Pittsburgh PA 15213-3890
601541Srgrimes *
611541Srgrimes * any improvements or extensions that they make and grant Carnegie the
621541Srgrimes * rights to redistribute these changes.
631817Sdg *
6450477Speter * $FreeBSD: head/sys/vm/vm_page.h 60755 2000-05-21 12:50:18Z peter $
651541Srgrimes */
661541Srgrimes
671541Srgrimes/*
681541Srgrimes *	Resident memory system definitions.
691541Srgrimes */
701541Srgrimes
711541Srgrimes#ifndef	_VM_PAGE_
721541Srgrimes#define	_VM_PAGE_
731541Srgrimes
7444754Sjulian#if !defined(KLD_MODULE)
7537282Sjmg#include "opt_vmpage.h"
7644754Sjulian#endif
7737282Sjmg
786816Sdg#include <vm/pmap.h>
7938517Sdfr#include <machine/atomic.h>
8038517Sdfr
811541Srgrimes/*
821541Srgrimes *	Management of resident (logical) pages.
831541Srgrimes *
841541Srgrimes *	A small structure is kept for each resident
851541Srgrimes *	page, indexed by page number.  Each structure
861541Srgrimes *	is an element of several lists:
871541Srgrimes *
881541Srgrimes *		A hash table bucket used to quickly
891541Srgrimes *		perform object/offset lookups
901541Srgrimes *
911541Srgrimes *		A list of all pages for a given object,
921541Srgrimes *		so they can be quickly deactivated at
931541Srgrimes *		time of deallocation.
941541Srgrimes *
951541Srgrimes *		An ordered list of pages due for pageout.
961541Srgrimes *
971541Srgrimes *	In addition, the structure contains the object
981541Srgrimes *	and offset to which this page belongs (for pageout),
991541Srgrimes *	and sundry status bits.
1001541Srgrimes *
1011541Srgrimes *	Fields in this structure are locked either by the lock on the
1021541Srgrimes *	object that the page belongs to (O) or by the lock on the page
1031541Srgrimes *	queues (P).
10446349Salc *
10546349Salc *	The 'valid' and 'dirty' fields are distinct.  A page may have dirty
10646349Salc *	bits set without having associated valid bits set.  This is used by
10746349Salc *	NFS to implement piecemeal writes.
1081541Srgrimes */
1091541Srgrimes
1101541SrgrimesTAILQ_HEAD(pglist, vm_page);
1111541Srgrimes
1121541Srgrimesstruct vm_page {
1135455Sdg	TAILQ_ENTRY(vm_page) pageq;	/* queue info for FIFO queue or free list (P) */
11442957Sdillon	struct vm_page	*hnext;		/* hash table link (O,P)	*/
11542957Sdillon	TAILQ_ENTRY(vm_page) listq;	/* pages in same object (O) 	*/
1161541Srgrimes
11742957Sdillon	vm_object_t object;		/* which object am I in (O,P)*/
11812767Sdyson	vm_pindex_t pindex;		/* offset into object (O,P) */
1195455Sdg	vm_offset_t phys_addr;		/* physical address of page */
12060755Speter	struct md_page md;		/* machine dependant stuff */
12118169Sdyson	u_short	queue;			/* page queue index */
12218169Sdyson	u_short	flags,			/* see below */
12318169Sdyson		pc;			/* page color */
1245455Sdg	u_short wire_count;		/* wired down maps refs (P) */
1255455Sdg	short hold_count;		/* page hold count */
12613490Sdyson	u_char	act_count;		/* page usage count */
12713490Sdyson	u_char	busy;			/* page busy count */
12813490Sdyson	/* NOTE that these must support one bit per DEV_BSIZE in a page!!! */
12913490Sdyson	/* so, on normal X86 kernels, they must be at least 8 bits wide */
13036735Sdfr#if PAGE_SIZE == 4096
13113490Sdyson	u_char	valid;			/* map of valid DEV_BSIZE chunks */
13213490Sdyson	u_char	dirty;			/* map of dirty DEV_BSIZE chunks */
13336735Sdfr#elif PAGE_SIZE == 8192
13436735Sdfr	u_short	valid;			/* map of valid DEV_BSIZE chunks */
13536735Sdfr	u_short	dirty;			/* map of dirty DEV_BSIZE chunks */
13636735Sdfr#endif
1371541Srgrimes};
1381541Srgrimes
13918169Sdyson/*
14051337Sdillon * note: currently use SWAPBLK_NONE as an absolute value rather then
14151337Sdillon * a flag bit.
14242957Sdillon */
14342957Sdillon
14442957Sdillon#define SWAPBLK_MASK	((daddr_t)((u_daddr_t)-1 >> 1))		/* mask */
14542957Sdillon#define SWAPBLK_NONE	((daddr_t)((u_daddr_t)SWAPBLK_MASK + 1))/* flag */
14642957Sdillon
14744754Sjulian#if !defined(KLD_MODULE)
14844754Sjulian
14942957Sdillon/*
15018169Sdyson * Page coloring parameters
15118169Sdyson */
15243752Sdillon/* Each of PQ_FREE, and PQ_CACHE have PQ_HASH_SIZE entries */
15318169Sdyson
15418169Sdyson/* Define one of the following */
15536326Sdyson#if defined(PQ_HUGECACHE)
15636326Sdyson#define PQ_PRIME1 31	/* Prime number somewhat less than PQ_HASH_SIZE */
15736326Sdyson#define PQ_PRIME2 23	/* Prime number somewhat less than PQ_HASH_SIZE */
15836326Sdyson#define PQ_L2_SIZE 256	/* A number of colors opt for 1M cache */
15936326Sdyson#endif
16036326Sdyson
16136326Sdyson/* Define one of the following */
16218169Sdyson#if defined(PQ_LARGECACHE)
16318169Sdyson#define PQ_PRIME1 31	/* Prime number somewhat less than PQ_HASH_SIZE */
16418169Sdyson#define PQ_PRIME2 23	/* Prime number somewhat less than PQ_HASH_SIZE */
16518169Sdyson#define PQ_L2_SIZE 128	/* A number of colors opt for 512K cache */
16618169Sdyson#endif
16718169Sdyson
16818169Sdyson
16918169Sdyson/*
17018169Sdyson * Use 'options PQ_NOOPT' to disable page coloring
17118169Sdyson */
17218169Sdyson#if defined(PQ_NOOPT)
17318169Sdyson#define PQ_PRIME1 1
17418169Sdyson#define PQ_PRIME2 1
17518169Sdyson#define PQ_L2_SIZE 1
17618169Sdyson#endif
17718169Sdyson
17818779Sdyson#if defined(PQ_NORMALCACHE)
17918169Sdyson#define PQ_PRIME1 5	/* Prime number somewhat less than PQ_HASH_SIZE */
18018169Sdyson#define PQ_PRIME2 3	/* Prime number somewhat less than PQ_HASH_SIZE */
18118169Sdyson#define PQ_L2_SIZE 16	/* A reasonable number of colors (opt for 64K cache) */
18218169Sdyson#endif
18318169Sdyson
18449666Salc#if defined(PQ_MEDIUMCACHE)
18518779Sdyson#define PQ_PRIME1 13	/* Prime number somewhat less than PQ_HASH_SIZE */
18618779Sdyson#define PQ_PRIME2 7	/* Prime number somewhat less than PQ_HASH_SIZE */
18718779Sdyson#define PQ_L2_SIZE 64	/* A number of colors opt for 256K cache */
18818779Sdyson#endif
18918779Sdyson
19049666Salc#if !defined(PQ_L2_SIZE)
19149666Salc#define PQ_PRIME1 9	/* Produces a good PQ_L2_SIZE/3 + PQ_PRIME1 */
19249666Salc#define PQ_PRIME2 5	/* Prime number somewhat less than PQ_HASH_SIZE */
19349666Salc#define PQ_L2_SIZE 32	/* 512KB or smaller, 4-way set-associative cache */
19449666Salc#endif
19549666Salc
19618169Sdyson#define PQ_L2_MASK (PQ_L2_SIZE - 1)
19718169Sdyson
19849819Salc#if 1
19949813Smjacob#define PQ_NONE 0
20049813Smjacob#define PQ_FREE	1
20149813Smjacob#define PQ_INACTIVE (1 + 1*PQ_L2_SIZE)
20249813Smjacob#define PQ_ACTIVE (2 + 1*PQ_L2_SIZE)
20349813Smjacob#define PQ_CACHE (3 + 1*PQ_L2_SIZE)
20449813Smjacob#define PQ_COUNT (3 + 2*PQ_L2_SIZE)
20549813Smjacob#else
20649720Salc#define PQ_NONE		PQ_COUNT
20749720Salc#define PQ_FREE		0
20849720Salc#define PQ_INACTIVE	PQ_L2_SIZE
20949720Salc#define PQ_ACTIVE	(1 +   PQ_L2_SIZE)
21049720Salc#define PQ_CACHE	(2 +   PQ_L2_SIZE)
21149720Salc#define PQ_COUNT	(2 + 2*PQ_L2_SIZE)
21249813Smjacob#endif
21313490Sdyson
21452647Salcstruct vpgqueues {
21552647Salc	struct pglist pl;
21618169Sdyson	int	*cnt;
21749326Salc	int	lcnt;
21852647Salc};
21918169Sdyson
22052647Salcextern struct vpgqueues vm_page_queues[PQ_COUNT];
22152647Salc
22244754Sjulian#endif
22344754Sjulian
2241541Srgrimes/*
2251541Srgrimes * These are the flags defined for vm_page.
2261541Srgrimes *
2271541Srgrimes * Note: PG_FILLED and PG_DIRTY are added for the filesystems.
2281541Srgrimes */
22942957Sdillon#define	PG_BUSY		0x0001		/* page is in transit (O) */
23042957Sdillon#define	PG_WANTED	0x0002		/* someone is waiting for page (O) */
23142957Sdillon#define	PG_FICTITIOUS	0x0008		/* physical page doesn't exist (O) */
23242957Sdillon#define	PG_WRITEABLE	0x0010		/* page is mapped writeable */
23342957Sdillon#define PG_MAPPED	0x0020		/* page is mapped */
23442957Sdillon#define	PG_ZERO		0x0040		/* page is zeroed */
23542957Sdillon#define PG_REFERENCED	0x0080		/* page has been referenced */
23642957Sdillon#define PG_CLEANCHK	0x0100		/* page will be checked for cleaning */
23742957Sdillon#define PG_SWAPINPROG	0x0200		/* swap I/O in progress on page	     */
23854467Sdillon#define PG_NOSYNC	0x0400		/* do not collect for syncer */
2391541Srgrimes
2409507Sdg/*
2419507Sdg * Misc constants.
2429507Sdg */
2431541Srgrimes
2449507Sdg#define ACT_DECLINE		1
2459507Sdg#define ACT_ADVANCE		3
24616750Sdyson#define ACT_INIT		5
24718169Sdyson#define ACT_MAX			64
2489507Sdg#define PFCLUSTER_BEHIND	3
2499507Sdg#define PFCLUSTER_AHEAD		3
2509507Sdg
25155206Speter#ifdef _KERNEL
2521541Srgrimes/*
2539507Sdg * Each pageable resident page falls into one of four lists:
2541541Srgrimes *
2555455Sdg *	free
2561541Srgrimes *		Available for allocation now.
2579507Sdg *
2589507Sdg * The following are all LRU sorted:
2599507Sdg *
2609507Sdg *	cache
2619507Sdg *		Almost available for allocation. Still in an
2629507Sdg *		object, but clean and immediately freeable at
2639507Sdg *		non-interrupt times.
2649507Sdg *
2651541Srgrimes *	inactive
26613765Smpp *		Low activity, candidates for reclamation.
2671541Srgrimes *		This is the list of pages that should be
2681541Srgrimes *		paged out next.
2699507Sdg *
2701541Srgrimes *	active
2719507Sdg *		Pages that are "active" i.e. they have been
2729507Sdg *		recently referenced.
27310544Sdyson *
27410544Sdyson *	zero
27510544Sdyson *		Pages that are really free and have been pre-zeroed
27610544Sdyson *
2771541Srgrimes */
2781541Srgrimes
27912767Sdysonextern int vm_page_zero_count;
28012767Sdyson
2815455Sdgextern vm_page_t vm_page_array;		/* First resident page in table */
28260755Speterextern int vm_page_array_size;		/* number of vm_page_t's */
2835455Sdgextern long first_page;			/* first physical page number */
2841541Srgrimes
2851541Srgrimes#define VM_PAGE_TO_PHYS(entry)	((entry)->phys_addr)
2861541Srgrimes
2871541Srgrimes#define PHYS_TO_VM_PAGE(pa) \
2881541Srgrimes		(&vm_page_array[atop(pa) - first_page ])
2891541Srgrimes
2901541Srgrimes/*
2911541Srgrimes *	Functions implemented as macros
2921541Srgrimes */
2931541Srgrimes
29438799Sdfrstatic __inline void
29538799Sdfrvm_page_flag_set(vm_page_t m, unsigned int bits)
29638799Sdfr{
29738799Sdfr	atomic_set_short(&(m)->flags, bits);
29838799Sdfr}
29938517Sdfr
30038799Sdfrstatic __inline void
30138799Sdfrvm_page_flag_clear(vm_page_t m, unsigned int bits)
30238799Sdfr{
30338799Sdfr	atomic_clear_short(&(m)->flags, bits);
30438799Sdfr}
30538517Sdfr
30638799Sdfr#if 0
30738799Sdfrstatic __inline void
30838799Sdfrvm_page_assert_wait(vm_page_t m, int interruptible)
30938799Sdfr{
31038799Sdfr	vm_page_flag_set(m, PG_WANTED);
31138799Sdfr	assert_wait((int) m, interruptible);
31233936Sdyson}
31338799Sdfr#endif
3141541Srgrimes
31538799Sdfrstatic __inline void
31638799Sdfrvm_page_busy(vm_page_t m)
31738799Sdfr{
31843122Sdillon	KASSERT((m->flags & PG_BUSY) == 0, ("vm_page_busy: page already busy!!!"));
31938799Sdfr	vm_page_flag_set(m, PG_BUSY);
32033936Sdyson}
3211541Srgrimes
32242957Sdillon/*
32342957Sdillon *	vm_page_flash:
32442957Sdillon *
32542957Sdillon *	wakeup anyone waiting for the page.
32642957Sdillon */
32742957Sdillon
32838799Sdfrstatic __inline void
32942957Sdillonvm_page_flash(vm_page_t m)
33038799Sdfr{
33138799Sdfr	if (m->flags & PG_WANTED) {
33238799Sdfr		vm_page_flag_clear(m, PG_WANTED);
33338799Sdfr		wakeup(m);
33438799Sdfr	}
33538799Sdfr}
33638517Sdfr
33742957Sdillon/*
33842957Sdillon *	vm_page_wakeup:
33942957Sdillon *
34042957Sdillon *	clear the PG_BUSY flag and wakeup anyone waiting for the
34142957Sdillon *	page.
34242957Sdillon *
34342957Sdillon */
34442957Sdillon
34538799Sdfrstatic __inline void
34642957Sdillonvm_page_wakeup(vm_page_t m)
34742957Sdillon{
34843122Sdillon	KASSERT(m->flags & PG_BUSY, ("vm_page_wakeup: page not busy!!!"));
34942957Sdillon	vm_page_flag_clear(m, PG_BUSY);
35042957Sdillon	vm_page_flash(m);
35142957Sdillon}
35242957Sdillon
35343134Sdillon/*
35443134Sdillon *
35543134Sdillon *
35643134Sdillon */
35743134Sdillon
35842957Sdillonstatic __inline void
35938799Sdfrvm_page_io_start(vm_page_t m)
36038799Sdfr{
36138799Sdfr	atomic_add_char(&(m)->busy, 1);
36234206Sdyson}
36334206Sdyson
36438799Sdfrstatic __inline void
36538799Sdfrvm_page_io_finish(vm_page_t m)
36638799Sdfr{
36738799Sdfr	atomic_subtract_char(&m->busy, 1);
36842957Sdillon	if (m->busy == 0)
36942957Sdillon		vm_page_flash(m);
37038799Sdfr}
37134206Sdyson
37238799Sdfr
3735455Sdg#if PAGE_SIZE == 4096
3745455Sdg#define VM_PAGE_BITS_ALL 0xff
3755455Sdg#endif
3761541Srgrimes
3775455Sdg#if PAGE_SIZE == 8192
3785455Sdg#define VM_PAGE_BITS_ALL 0xffff
3795455Sdg#endif
3801549Srgrimes
38133109Sdyson#define VM_ALLOC_NORMAL		0
38233109Sdyson#define VM_ALLOC_INTERRUPT	1
38333109Sdyson#define VM_ALLOC_SYSTEM		2
38433109Sdyson#define	VM_ALLOC_ZERO		3
38533109Sdyson#define	VM_ALLOC_RETRY		0x80
3865455Sdg
3875455Sdgvoid vm_page_activate __P((vm_page_t));
38812767Sdysonvm_page_t vm_page_alloc __P((vm_object_t, vm_pindex_t, int));
38933109Sdysonvm_page_t vm_page_grab __P((vm_object_t, vm_pindex_t, int));
3906357Sphkvoid vm_page_cache __P((register vm_page_t));
39151337Sdillonvoid vm_page_dontneed __P((register vm_page_t));
39215811Sdysonstatic __inline void vm_page_copy __P((vm_page_t, vm_page_t));
39342957Sdillonstatic __inline void vm_page_free __P((vm_page_t));
39442957Sdillonstatic __inline void vm_page_free_zero __P((vm_page_t));
3955455Sdgvoid vm_page_deactivate __P((vm_page_t));
39612767Sdysonvoid vm_page_insert __P((vm_page_t, vm_object_t, vm_pindex_t));
39712767Sdysonvm_page_t vm_page_lookup __P((vm_object_t, vm_pindex_t));
39844051Sdillonvoid vm_page_remove __P((vm_page_t));
39912767Sdysonvoid vm_page_rename __P((vm_page_t, vm_object_t, vm_pindex_t));
4005455Sdgvm_offset_t vm_page_startup __P((vm_offset_t, vm_offset_t, vm_offset_t));
40160755Spetervm_page_t vm_add_new_page __P((vm_offset_t pa));
40240700Sdgvoid vm_page_unwire __P((vm_page_t, int));
4035455Sdgvoid vm_page_wire __P((vm_page_t));
40417334Sdysonvoid vm_page_unqueue __P((vm_page_t));
40517334Sdysonvoid vm_page_unqueue_nowakeup __P((vm_page_t));
40610544Sdysonvoid vm_page_set_validclean __P((vm_page_t, int, int));
40746349Salcvoid vm_page_set_dirty __P((vm_page_t, int, int));
40846349Salcvoid vm_page_clear_dirty __P((vm_page_t, int, int));
4095455Sdgvoid vm_page_set_invalid __P((vm_page_t, int, int));
41015811Sdysonstatic __inline boolean_t vm_page_zero_fill __P((vm_page_t));
4115455Sdgint vm_page_is_valid __P((vm_page_t, int, int));
4125455Sdgvoid vm_page_test_dirty __P((vm_page_t));
4136357Sphkint vm_page_bits __P((int, int));
41443747Sdillonvm_page_t _vm_page_list_find __P((int, int));
41542957Sdillon#if 0
41633936Sdysonint vm_page_sleep(vm_page_t m, char *msg, char *busy);
41742957Sdillonint vm_page_asleep(vm_page_t m, char *msg, char *busy);
41842957Sdillon#endif
41945347Sjulianvoid vm_page_zero_invalid(vm_page_t m, boolean_t setvalid);
42043752Sdillonvoid vm_page_free_toq(vm_page_t m);
4215455Sdg
4221549Srgrimes/*
4231549Srgrimes * Keep page from being freed by the page daemon
4241549Srgrimes * much of the same effect as wiring, except much lower
4251549Srgrimes * overhead and should be used only for *very* temporary
4261549Srgrimes * holding ("wiring").
4271549Srgrimes */
4288010Sbdestatic __inline void
4294461Sbdevm_page_hold(vm_page_t mem)
4301549Srgrimes{
4311549Srgrimes	mem->hold_count++;
4321549Srgrimes}
4331549Srgrimes
4348010Sbdestatic __inline void
4354461Sbdevm_page_unhold(vm_page_t mem)
4361549Srgrimes{
4373660Sdg	--mem->hold_count;
43842408Seivind	KASSERT(mem->hold_count >= 0, ("vm_page_unhold: hold count < 0!!!"));
4391549Srgrimes}
4401549Srgrimes
44143122Sdillon/*
44243122Sdillon * 	vm_page_protect:
44343122Sdillon *
44454467Sdillon *	Reduce the protection of a page.  This routine never raises the
44554467Sdillon *	protection and therefore can be safely called if the page is already
44654467Sdillon *	at VM_PROT_NONE (it will be a NOP effectively ).
44743122Sdillon */
44843122Sdillon
4498010Sbdestatic __inline void
4506816Sdgvm_page_protect(vm_page_t mem, int prot)
4516816Sdg{
4526816Sdg	if (prot == VM_PROT_NONE) {
4536816Sdg		if (mem->flags & (PG_WRITEABLE|PG_MAPPED)) {
45460755Speter			pmap_page_protect(mem, VM_PROT_NONE);
45538799Sdfr			vm_page_flag_clear(mem, PG_WRITEABLE|PG_MAPPED);
4566816Sdg		}
4576816Sdg	} else if ((prot == VM_PROT_READ) && (mem->flags & PG_WRITEABLE)) {
45860755Speter		pmap_page_protect(mem, VM_PROT_READ);
45938799Sdfr		vm_page_flag_clear(mem, PG_WRITEABLE);
4606816Sdg	}
4616816Sdg}
4626816Sdg
46315811Sdyson/*
46415811Sdyson *	vm_page_zero_fill:
46515811Sdyson *
46615811Sdyson *	Zero-fill the specified page.
46715811Sdyson *	Written as a standard pagein routine, to
46815811Sdyson *	be used by the zero-fill object.
46915811Sdyson */
47015811Sdysonstatic __inline boolean_t
47115811Sdysonvm_page_zero_fill(m)
47215811Sdyson	vm_page_t m;
47315811Sdyson{
47415811Sdyson	pmap_zero_page(VM_PAGE_TO_PHYS(m));
47515811Sdyson	return (TRUE);
47615811Sdyson}
4776816Sdg
47815811Sdyson/*
47915811Sdyson *	vm_page_copy:
48015811Sdyson *
48115811Sdyson *	Copy one page to another
48215811Sdyson */
48315811Sdysonstatic __inline void
48415811Sdysonvm_page_copy(src_m, dest_m)
48515811Sdyson	vm_page_t src_m;
48615811Sdyson	vm_page_t dest_m;
48715811Sdyson{
48815811Sdyson	pmap_copy_page(VM_PAGE_TO_PHYS(src_m), VM_PAGE_TO_PHYS(dest_m));
48915811Sdyson	dest_m->valid = VM_PAGE_BITS_ALL;
49015811Sdyson}
49115811Sdyson
49242957Sdillon/*
49342957Sdillon *	vm_page_free:
49442957Sdillon *
49542957Sdillon *	Free a page
49643752Sdillon *
49743752Sdillon *	The clearing of PG_ZERO is a temporary safety until the code can be
49843752Sdillon *	reviewed to determine that PG_ZERO is being properly cleared on
49943752Sdillon *	write faults or maps.  PG_ZERO was previously cleared in
50043752Sdillon *	vm_page_alloc().
50142957Sdillon */
50242957Sdillonstatic __inline void
50342957Sdillonvm_page_free(m)
50442957Sdillon	vm_page_t m;
50542957Sdillon{
50643752Sdillon	vm_page_flag_clear(m, PG_ZERO);
50743752Sdillon	vm_page_free_toq(m);
50842957Sdillon}
50942957Sdillon
51042957Sdillon/*
51142957Sdillon *	vm_page_free_zero:
51242957Sdillon *
51342957Sdillon *	Free a page to the zerod-pages queue
51442957Sdillon */
51542957Sdillonstatic __inline void
51642957Sdillonvm_page_free_zero(m)
51742957Sdillon	vm_page_t m;
51842957Sdillon{
51943752Sdillon	vm_page_flag_set(m, PG_ZERO);
52043752Sdillon	vm_page_free_toq(m);
52142957Sdillon}
52242957Sdillon
52342957Sdillon/*
52442957Sdillon *	vm_page_sleep_busy:
52542957Sdillon *
52642957Sdillon *	Wait until page is no longer PG_BUSY or (if also_m_busy is TRUE)
52742957Sdillon *	m->busy is zero.  Returns TRUE if it had to sleep ( including if
52842957Sdillon *	it almost had to sleep and made temporary spl*() mods), FALSE
52942957Sdillon *	otherwise.
53042957Sdillon *
53142957Sdillon *	This routine assumes that interrupts can only remove the busy
53242957Sdillon *	status from a page, not set the busy status or change it from
53342957Sdillon *	PG_BUSY to m->busy or vise versa (which would create a timing
53442957Sdillon *	window).
53542957Sdillon *
53642957Sdillon *	Note that being an inline, this code will be well optimized.
53742957Sdillon */
53842957Sdillon
53942957Sdillonstatic __inline int
54042957Sdillonvm_page_sleep_busy(vm_page_t m, int also_m_busy, const char *msg)
54142957Sdillon{
54242957Sdillon	if ((m->flags & PG_BUSY) || (also_m_busy && m->busy))  {
54342957Sdillon		int s = splvm();
54442957Sdillon		if ((m->flags & PG_BUSY) || (also_m_busy && m->busy)) {
54542957Sdillon			/*
54642957Sdillon			 * Page is busy. Wait and retry.
54742957Sdillon			 */
54842957Sdillon			vm_page_flag_set(m, PG_WANTED | PG_REFERENCED);
54942957Sdillon			tsleep(m, PVM, msg, 0);
55042957Sdillon		}
55142957Sdillon		splx(s);
55242957Sdillon		return(TRUE);
55342957Sdillon		/* not reached */
55442957Sdillon	}
55542957Sdillon	return(FALSE);
55642957Sdillon}
55742957Sdillon
55843134Sdillon/*
55943134Sdillon *	vm_page_dirty:
56043134Sdillon *
56143134Sdillon *	make page all dirty
56243134Sdillon */
56343134Sdillon
56443134Sdillonstatic __inline void
56543134Sdillonvm_page_dirty(vm_page_t m)
56643134Sdillon{
56749991Sgreen#if !defined(KLD_MODULE)
56843134Sdillon	KASSERT(m->queue - m->pc != PQ_CACHE, ("vm_page_dirty: page in cache!"));
56949991Sgreen#endif
57043134Sdillon	m->dirty = VM_PAGE_BITS_ALL;
57143134Sdillon}
57243134Sdillon
57349945Salc/*
57449945Salc *	vm_page_undirty:
57549945Salc *
57649945Salc *	Set page to not be dirty.  Note: does not clear pmap modify bits
57749945Salc */
57849945Salc
57949945Salcstatic __inline void
58049945Salcvm_page_undirty(vm_page_t m)
58149945Salc{
58249945Salc	m->dirty = 0;
58349945Salc}
58449945Salc
58549991Sgreen#if !defined(KLD_MODULE)
58649991Sgreen
58743747Sdillonstatic __inline vm_page_t
58843752Sdillonvm_page_list_find(int basequeue, int index, boolean_t prefer_zero)
58943747Sdillon{
59043747Sdillon	vm_page_t m;
59143134Sdillon
59243747Sdillon#if PQ_L2_SIZE > 1
59343752Sdillon	if (prefer_zero) {
59452647Salc		m = TAILQ_LAST(&vm_page_queues[basequeue+index].pl, pglist);
59543752Sdillon	} else {
59652647Salc		m = TAILQ_FIRST(&vm_page_queues[basequeue+index].pl);
59743752Sdillon	}
59843747Sdillon	if (m == NULL)
59943747Sdillon		m = _vm_page_list_find(basequeue, index);
60043747Sdillon#else
60143752Sdillon	if (prefer_zero) {
60252647Salc		m = TAILQ_LAST(&vm_page_queues[basequeue].pl, pglist);
60343752Sdillon	} else {
60452647Salc		m = TAILQ_FIRST(&vm_page_queues[basequeue].pl);
60543752Sdillon	}
60643747Sdillon#endif
60743747Sdillon	return(m);
60843747Sdillon}
60943747Sdillon
61044754Sjulian#endif
61144754Sjulian
61255206Speter#endif				/* _KERNEL */
6135455Sdg#endif				/* !_VM_PAGE_ */
614