vm_page.h revision 209861
1/*-
2 * Copyright (c) 1991, 1993
3 *	The Regents of the University of California.  All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * The Mach Operating System project at Carnegie-Mellon University.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 * 4. Neither the name of the University nor the names of its contributors
17 *    may be used to endorse or promote products derived from this software
18 *    without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 *
32 *	from: @(#)vm_page.h	8.2 (Berkeley) 12/13/93
33 *
34 *
35 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
36 * All rights reserved.
37 *
38 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
39 *
40 * Permission to use, copy, modify and distribute this software and
41 * its documentation is hereby granted, provided that both the copyright
42 * notice and this permission notice appear in all copies of the
43 * software, derivative works or modified versions, and any portions
44 * thereof, and that both notices appear in supporting documentation.
45 *
46 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
47 * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
48 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
49 *
50 * Carnegie Mellon requests users of this software to return to
51 *
52 *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
53 *  School of Computer Science
54 *  Carnegie Mellon University
55 *  Pittsburgh PA 15213-3890
56 *
57 * any improvements or extensions that they make and grant Carnegie the
58 * rights to redistribute these changes.
59 *
60 * $FreeBSD: head/sys/vm/vm_page.h 209861 2010-07-09 19:38:30Z alc $
61 */
62
63/*
64 *	Resident memory system definitions.
65 */
66
67#ifndef	_VM_PAGE_
68#define	_VM_PAGE_
69
70#include <vm/pmap.h>
71
72/*
73 *	Management of resident (logical) pages.
74 *
75 *	A small structure is kept for each resident
76 *	page, indexed by page number.  Each structure
77 *	is an element of several lists:
78 *
79 *		A hash table bucket used to quickly
80 *		perform object/offset lookups
81 *
82 *		A list of all pages for a given object,
83 *		so they can be quickly deactivated at
84 *		time of deallocation.
85 *
86 *		An ordered list of pages due for pageout.
87 *
88 *	In addition, the structure contains the object
89 *	and offset to which this page belongs (for pageout),
90 *	and sundry status bits.
91 *
92 *	Fields in this structure are locked either by the lock on the
93 *	object that the page belongs to (O), its corresponding page lock (P),
94 *	or by the lock on the page queues (Q).
95 *
96 */
97
98TAILQ_HEAD(pglist, vm_page);
99
100struct vm_page {
101	TAILQ_ENTRY(vm_page) pageq;	/* queue info for FIFO queue or free list (Q) */
102	TAILQ_ENTRY(vm_page) listq;	/* pages in same object (O) 	*/
103	struct vm_page *left;		/* splay tree link (O)		*/
104	struct vm_page *right;		/* splay tree link (O)		*/
105
106	vm_object_t object;		/* which object am I in (O,P)*/
107	vm_pindex_t pindex;		/* offset into object (O,Q) */
108	vm_paddr_t phys_addr;		/* physical address of page */
109	struct md_page md;		/* machine dependant stuff */
110	uint8_t	queue;			/* page queue index (P,Q) */
111	int8_t segind;
112	u_short	flags;			/* see below */
113	uint8_t	order;			/* index of the buddy queue */
114	uint8_t pool;
115	u_short cow;			/* page cow mapping count (P) */
116	u_int wire_count;		/* wired down maps refs (P) */
117	short hold_count;		/* page hold count (P) */
118	u_short oflags;			/* page flags (O) */
119	u_char	act_count;		/* page usage count (P) */
120	u_char	busy;			/* page busy count (O) */
121	/* NOTE that these must support one bit per DEV_BSIZE in a page!!! */
122	/* so, on normal X86 kernels, they must be at least 8 bits wide */
123#if PAGE_SIZE == 4096
124	u_char	valid;			/* map of valid DEV_BSIZE chunks (O) */
125	u_char	dirty;			/* map of dirty DEV_BSIZE chunks (O) */
126#elif PAGE_SIZE == 8192
127	u_short	valid;			/* map of valid DEV_BSIZE chunks (O) */
128	u_short	dirty;			/* map of dirty DEV_BSIZE chunks (O) */
129#elif PAGE_SIZE == 16384
130	u_int valid;			/* map of valid DEV_BSIZE chunks (O) */
131	u_int dirty;			/* map of dirty DEV_BSIZE chunks (O) */
132#elif PAGE_SIZE == 32768
133	u_long valid;			/* map of valid DEV_BSIZE chunks (O) */
134	u_long dirty;			/* map of dirty DEV_BSIZE chunks (O) */
135#endif
136};
137
138/*
139 * Page flags stored in oflags:
140 *
141 * Access to these page flags is synchronized by the lock on the object
142 * containing the page (O).
143 */
144#define	VPO_BUSY	0x0001	/* page is in transit */
145#define	VPO_WANTED	0x0002	/* someone is waiting for page */
146#define	VPO_SWAPINPROG	0x0200	/* swap I/O in progress on page */
147#define	VPO_NOSYNC	0x0400	/* do not collect for syncer */
148
149#define PQ_NONE		0
150#define	PQ_INACTIVE	1
151#define	PQ_ACTIVE	2
152#define	PQ_HOLD		3
153#define	PQ_COUNT	4
154
155struct vpgqueues {
156	struct pglist pl;
157	int	*cnt;
158};
159
160extern struct vpgqueues vm_page_queues[PQ_COUNT];
161
162struct vpglocks {
163	struct mtx	data;
164	char		pad[CACHE_LINE_SIZE - sizeof(struct mtx)];
165} __aligned(CACHE_LINE_SIZE);
166
167extern struct vpglocks vm_page_queue_free_lock;
168extern struct vpglocks pa_lock[];
169
170#if defined(__arm__)
171#define	PDRSHIFT	PDR_SHIFT
172#elif !defined(PDRSHIFT)
173#define PDRSHIFT	21
174#endif
175
176#define	pa_index(pa)	((pa) >> PDRSHIFT)
177#define	PA_LOCKPTR(pa)	&pa_lock[pa_index((pa)) % PA_LOCK_COUNT].data
178#define	PA_LOCKOBJPTR(pa)	((struct lock_object *)PA_LOCKPTR((pa)))
179#define	PA_LOCK(pa)	mtx_lock(PA_LOCKPTR(pa))
180#define	PA_TRYLOCK(pa)	mtx_trylock(PA_LOCKPTR(pa))
181#define	PA_UNLOCK(pa)	mtx_unlock(PA_LOCKPTR(pa))
182#define	PA_UNLOCK_COND(pa) 			\
183	do {		   			\
184		if ((pa) != 0) {		\
185			PA_UNLOCK((pa));	\
186			(pa) = 0;		\
187		}				\
188	} while (0)
189
190#define	PA_LOCK_ASSERT(pa, a)	mtx_assert(PA_LOCKPTR(pa), (a))
191
192#define	vm_page_lockptr(m)	(PA_LOCKPTR(VM_PAGE_TO_PHYS((m))))
193#define	vm_page_lock(m)		mtx_lock(vm_page_lockptr((m)))
194#define	vm_page_unlock(m)	mtx_unlock(vm_page_lockptr((m)))
195#define	vm_page_trylock(m)	mtx_trylock(vm_page_lockptr((m)))
196#define	vm_page_lock_assert(m, a)	mtx_assert(vm_page_lockptr((m)), (a))
197
198#define	vm_page_queue_free_mtx	vm_page_queue_free_lock.data
199/*
200 * These are the flags defined for vm_page.
201 *
202 * Note: PG_UNMANAGED (used by OBJT_PHYS) indicates that the page is
203 * 	 not under PV management but otherwise should be treated as a
204 *	 normal page.  Pages not under PV management cannot be paged out
205 *	 via the object/vm_page_t because there is no knowledge of their
206 *	 pte mappings, nor can they be removed from their objects via
207 *	 the object, and such pages are also not on any PQ queue.
208 *
209 * PG_REFERENCED may be cleared only if the object containing the page is
210 * locked.
211 *
212 * PG_WRITEABLE is set exclusively on managed pages by pmap_enter().  When it
213 * does so, the page must be VPO_BUSY.
214 */
215#define	PG_CACHED	0x0001		/* page is cached */
216#define	PG_FREE		0x0002		/* page is free */
217#define PG_WINATCFLS	0x0004		/* flush dirty page on inactive q */
218#define	PG_FICTITIOUS	0x0008		/* physical page doesn't exist (O) */
219#define	PG_WRITEABLE	0x0010		/* page is mapped writeable */
220#define	PG_ZERO		0x0040		/* page is zeroed */
221#define PG_REFERENCED	0x0080		/* page has been referenced */
222#define PG_UNMANAGED	0x0800		/* No PV management for page */
223#define PG_MARKER	0x1000		/* special queue marker page */
224#define	PG_SLAB		0x2000		/* object pointer is actually a slab */
225
226/*
227 * Misc constants.
228 */
229#define ACT_DECLINE		1
230#define ACT_ADVANCE		3
231#define ACT_INIT		5
232#define ACT_MAX			64
233
234#ifdef _KERNEL
235
236#include <vm/vm_param.h>
237
238/*
239 * Each pageable resident page falls into one of five lists:
240 *
241 *	free
242 *		Available for allocation now.
243 *
244 *	cache
245 *		Almost available for allocation. Still associated with
246 *		an object, but clean and immediately freeable.
247 *
248 *	hold
249 *		Will become free after a pending I/O operation
250 *		completes.
251 *
252 * The following lists are LRU sorted:
253 *
254 *	inactive
255 *		Low activity, candidates for reclamation.
256 *		This is the list of pages that should be
257 *		paged out next.
258 *
259 *	active
260 *		Pages that are "active" i.e. they have been
261 *		recently referenced.
262 *
263 */
264
265extern int vm_page_zero_count;
266
267extern vm_page_t vm_page_array;		/* First resident page in table */
268extern int vm_page_array_size;		/* number of vm_page_t's */
269extern long first_page;			/* first physical page number */
270
271#define	VM_PAGE_IS_FREE(m)	(((m)->flags & PG_FREE) != 0)
272
273#define VM_PAGE_TO_PHYS(entry)	((entry)->phys_addr)
274
275vm_page_t vm_phys_paddr_to_vm_page(vm_paddr_t pa);
276
277static __inline vm_page_t PHYS_TO_VM_PAGE(vm_paddr_t pa);
278
279static __inline vm_page_t
280PHYS_TO_VM_PAGE(vm_paddr_t pa)
281{
282#ifdef VM_PHYSSEG_SPARSE
283	return (vm_phys_paddr_to_vm_page(pa));
284#elif defined(VM_PHYSSEG_DENSE)
285	return (&vm_page_array[atop(pa) - first_page]);
286#else
287#error "Either VM_PHYSSEG_DENSE or VM_PHYSSEG_SPARSE must be defined."
288#endif
289}
290
291extern struct vpglocks vm_page_queue_lock;
292
293#define	vm_page_queue_mtx	vm_page_queue_lock.data
294#define vm_page_lock_queues()   mtx_lock(&vm_page_queue_mtx)
295#define vm_page_unlock_queues() mtx_unlock(&vm_page_queue_mtx)
296
297#if PAGE_SIZE == 4096
298#define VM_PAGE_BITS_ALL 0xffu
299#elif PAGE_SIZE == 8192
300#define VM_PAGE_BITS_ALL 0xffffu
301#elif PAGE_SIZE == 16384
302#define VM_PAGE_BITS_ALL 0xffffffffu
303#elif PAGE_SIZE == 32768
304#define VM_PAGE_BITS_ALL 0xfffffffffffffffflu
305#endif
306
307/* page allocation classes: */
308#define VM_ALLOC_NORMAL		0
309#define VM_ALLOC_INTERRUPT	1
310#define VM_ALLOC_SYSTEM		2
311#define	VM_ALLOC_CLASS_MASK	3
312/* page allocation flags: */
313#define	VM_ALLOC_WIRED		0x0020	/* non pageable */
314#define	VM_ALLOC_ZERO		0x0040	/* Try to obtain a zeroed page */
315#define	VM_ALLOC_RETRY		0x0080	/* Mandatory with vm_page_grab() */
316#define	VM_ALLOC_NOOBJ		0x0100	/* No associated object */
317#define	VM_ALLOC_NOBUSY		0x0200	/* Do not busy the page */
318#define	VM_ALLOC_IFCACHED	0x0400	/* Fail if the page is not cached */
319#define	VM_ALLOC_IFNOTCACHED	0x0800	/* Fail if the page is cached */
320#define	VM_ALLOC_IGN_SBUSY	0x1000	/* vm_page_grab() only */
321
322#define	VM_ALLOC_COUNT_SHIFT	16
323#define	VM_ALLOC_COUNT(count)	((count) << VM_ALLOC_COUNT_SHIFT)
324
325void vm_page_flag_set(vm_page_t m, unsigned short bits);
326void vm_page_flag_clear(vm_page_t m, unsigned short bits);
327void vm_page_busy(vm_page_t m);
328void vm_page_flash(vm_page_t m);
329void vm_page_io_start(vm_page_t m);
330void vm_page_io_finish(vm_page_t m);
331void vm_page_hold(vm_page_t mem);
332void vm_page_unhold(vm_page_t mem);
333void vm_page_free(vm_page_t m);
334void vm_page_free_zero(vm_page_t m);
335void vm_page_dirty(vm_page_t m);
336void vm_page_wakeup(vm_page_t m);
337
338void vm_pageq_remove(vm_page_t m);
339
340void vm_page_activate (vm_page_t);
341vm_page_t vm_page_alloc (vm_object_t, vm_pindex_t, int);
342vm_page_t vm_page_grab (vm_object_t, vm_pindex_t, int);
343void vm_page_cache(vm_page_t);
344void vm_page_cache_free(vm_object_t, vm_pindex_t, vm_pindex_t);
345void vm_page_cache_remove(vm_page_t);
346void vm_page_cache_transfer(vm_object_t, vm_pindex_t, vm_object_t);
347int vm_page_try_to_cache (vm_page_t);
348int vm_page_try_to_free (vm_page_t);
349void vm_page_dontneed(vm_page_t);
350void vm_page_deactivate (vm_page_t);
351vm_page_t vm_page_find_least(vm_object_t, vm_pindex_t);
352void vm_page_insert (vm_page_t, vm_object_t, vm_pindex_t);
353vm_page_t vm_page_lookup (vm_object_t, vm_pindex_t);
354vm_page_t vm_page_next(vm_page_t m);
355int vm_page_pa_tryrelock(pmap_t, vm_paddr_t, vm_paddr_t *);
356vm_page_t vm_page_prev(vm_page_t m);
357void vm_page_remove (vm_page_t);
358void vm_page_rename (vm_page_t, vm_object_t, vm_pindex_t);
359void vm_page_requeue(vm_page_t m);
360void vm_page_set_valid(vm_page_t m, int base, int size);
361void vm_page_sleep(vm_page_t m, const char *msg);
362vm_page_t vm_page_splay(vm_pindex_t, vm_page_t);
363vm_offset_t vm_page_startup(vm_offset_t vaddr);
364void vm_page_unwire (vm_page_t, int);
365void vm_page_wire (vm_page_t);
366void vm_page_set_validclean (vm_page_t, int, int);
367void vm_page_clear_dirty (vm_page_t, int, int);
368void vm_page_set_invalid (vm_page_t, int, int);
369int vm_page_is_valid (vm_page_t, int, int);
370void vm_page_test_dirty (vm_page_t);
371int vm_page_bits (int, int);
372void vm_page_zero_invalid(vm_page_t m, boolean_t setvalid);
373void vm_page_free_toq(vm_page_t m);
374void vm_page_zero_idle_wakeup(void);
375void vm_page_cowfault (vm_page_t);
376int vm_page_cowsetup(vm_page_t);
377void vm_page_cowclear (vm_page_t);
378
379/*
380 *	vm_page_sleep_if_busy:
381 *
382 *	Sleep and release the page queues lock if VPO_BUSY is set or,
383 *	if also_m_busy is TRUE, busy is non-zero.  Returns TRUE if the
384 *	thread slept and the page queues lock was released.
385 *	Otherwise, retains the page queues lock and returns FALSE.
386 *
387 *	The object containing the given page must be locked.
388 */
389static __inline int
390vm_page_sleep_if_busy(vm_page_t m, int also_m_busy, const char *msg)
391{
392
393	if ((m->oflags & VPO_BUSY) || (also_m_busy && m->busy)) {
394		vm_page_sleep(m, msg);
395		return (TRUE);
396	}
397	return (FALSE);
398}
399
400/*
401 *	vm_page_undirty:
402 *
403 *	Set page to not be dirty.  Note: does not clear pmap modify bits
404 */
405static __inline void
406vm_page_undirty(vm_page_t m)
407{
408	m->dirty = 0;
409}
410
411#endif				/* _KERNEL */
412#endif				/* !_VM_PAGE_ */
413