vm_page.h revision 327785
1/*-
2 * Copyright (c) 1991, 1993
3 *	The Regents of the University of California.  All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * The Mach Operating System project at Carnegie-Mellon University.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 * 4. Neither the name of the University nor the names of its contributors
17 *    may be used to endorse or promote products derived from this software
18 *    without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 *
32 *	from: @(#)vm_page.h	8.2 (Berkeley) 12/13/93
33 *
34 *
35 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
36 * All rights reserved.
37 *
38 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
39 *
40 * Permission to use, copy, modify and distribute this software and
41 * its documentation is hereby granted, provided that both the copyright
42 * notice and this permission notice appear in all copies of the
43 * software, derivative works or modified versions, and any portions
44 * thereof, and that both notices appear in supporting documentation.
45 *
46 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
47 * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
48 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
49 *
50 * Carnegie Mellon requests users of this software to return to
51 *
52 *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
53 *  School of Computer Science
54 *  Carnegie Mellon University
55 *  Pittsburgh PA 15213-3890
56 *
57 * any improvements or extensions that they make and grant Carnegie the
58 * rights to redistribute these changes.
59 *
60 * $FreeBSD: stable/11/sys/vm/vm_page.h 327785 2018-01-10 20:39:26Z markj $
61 */
62
63/*
64 *	Resident memory system definitions.
65 */
66
67#ifndef	_VM_PAGE_
68#define	_VM_PAGE_
69
70#include <vm/pmap.h>
71
72/*
73 *	Management of resident (logical) pages.
74 *
75 *	A small structure is kept for each resident
76 *	page, indexed by page number.  Each structure
77 *	is an element of several collections:
78 *
79 *		A radix tree used to quickly
80 *		perform object/offset lookups
81 *
82 *		A list of all pages for a given object,
83 *		so they can be quickly deactivated at
84 *		time of deallocation.
85 *
86 *		An ordered list of pages due for pageout.
87 *
88 *	In addition, the structure contains the object
89 *	and offset to which this page belongs (for pageout),
90 *	and sundry status bits.
91 *
92 *	In general, operations on this structure's mutable fields are
93 *	synchronized using either one of or a combination of the lock on the
94 *	object that the page belongs to (O), the pool lock for the page (P),
95 *	or the lock for either the free or paging queue (Q).  If a field is
96 *	annotated below with two of these locks, then holding either lock is
97 *	sufficient for read access, but both locks are required for write
98 *	access.
99 *
100 *	In contrast, the synchronization of accesses to the page's
101 *	dirty field is machine dependent (M).  In the
102 *	machine-independent layer, the lock on the object that the
103 *	page belongs to must be held in order to operate on the field.
104 *	However, the pmap layer is permitted to set all bits within
105 *	the field without holding that lock.  If the underlying
106 *	architecture does not support atomic read-modify-write
107 *	operations on the field's type, then the machine-independent
108 *	layer uses a 32-bit atomic on the aligned 32-bit word that
109 *	contains the dirty field.  In the machine-independent layer,
110 *	the implementation of read-modify-write operations on the
111 *	field is encapsulated in vm_page_clear_dirty_mask().
112 */
113
114#if PAGE_SIZE == 4096
115#define VM_PAGE_BITS_ALL 0xffu
116typedef uint8_t vm_page_bits_t;
117#elif PAGE_SIZE == 8192
118#define VM_PAGE_BITS_ALL 0xffffu
119typedef uint16_t vm_page_bits_t;
120#elif PAGE_SIZE == 16384
121#define VM_PAGE_BITS_ALL 0xffffffffu
122typedef uint32_t vm_page_bits_t;
123#elif PAGE_SIZE == 32768
124#define VM_PAGE_BITS_ALL 0xfffffffffffffffflu
125typedef uint64_t vm_page_bits_t;
126#endif
127
128struct vm_page {
129	union {
130		TAILQ_ENTRY(vm_page) q; /* page queue or free list (Q) */
131		struct {
132			SLIST_ENTRY(vm_page) ss; /* private slists */
133			void *pv;
134		} s;
135		struct {
136			u_long p;
137			u_long v;
138		} memguard;
139	} plinks;
140	TAILQ_ENTRY(vm_page) listq;	/* pages in same object (O) */
141	vm_object_t object;		/* which object am I in (O,P) */
142	vm_pindex_t pindex;		/* offset into object (O,P) */
143	vm_paddr_t phys_addr;		/* physical address of page */
144	struct md_page md;		/* machine dependent stuff */
145	u_int wire_count;		/* wired down maps refs (P) */
146	volatile u_int busy_lock;	/* busy owners lock */
147	uint16_t hold_count;		/* page hold count (P) */
148	uint16_t flags;			/* page PG_* flags (P) */
149	uint8_t aflags;			/* access is atomic */
150	uint8_t oflags;			/* page VPO_* flags (O) */
151	uint8_t	queue;			/* page queue index (P,Q) */
152	int8_t psind;			/* pagesizes[] index (O) */
153	int8_t segind;
154	uint8_t	order;			/* index of the buddy queue */
155	uint8_t pool;
156	u_char	act_count;		/* page usage count (P) */
157	/* NOTE that these must support one bit per DEV_BSIZE in a page */
158	/* so, on normal X86 kernels, they must be at least 8 bits wide */
159	vm_page_bits_t valid;		/* map of valid DEV_BSIZE chunks (O) */
160	vm_page_bits_t dirty;		/* map of dirty DEV_BSIZE chunks (M) */
161};
162
163/*
164 * Page flags stored in oflags:
165 *
166 * Access to these page flags is synchronized by the lock on the object
167 * containing the page (O).
168 *
169 * Note: VPO_UNMANAGED (used by OBJT_DEVICE, OBJT_PHYS and OBJT_SG)
170 * 	 indicates that the page is not under PV management but
171 * 	 otherwise should be treated as a normal page.  Pages not
172 * 	 under PV management cannot be paged out via the
173 * 	 object/vm_page_t because there is no knowledge of their pte
174 * 	 mappings, and such pages are also not on any PQ queue.
175 *
176 */
177#define	VPO_UNUSED01	0x01		/* --available-- */
178#define	VPO_SWAPSLEEP	0x02		/* waiting for swap to finish */
179#define	VPO_UNMANAGED	0x04		/* no PV management for page */
180#define	VPO_SWAPINPROG	0x08		/* swap I/O in progress on page */
181#define	VPO_NOSYNC	0x10		/* do not collect for syncer */
182
183/*
184 * Busy page implementation details.
185 * The algorithm is taken mostly by rwlock(9) and sx(9) locks implementation,
186 * even if the support for owner identity is removed because of size
187 * constraints.  Checks on lock recursion are then not possible, while the
188 * lock assertions effectiveness is someway reduced.
189 */
190#define	VPB_BIT_SHARED		0x01
191#define	VPB_BIT_EXCLUSIVE	0x02
192#define	VPB_BIT_WAITERS		0x04
193#define	VPB_BIT_FLAGMASK						\
194	(VPB_BIT_SHARED | VPB_BIT_EXCLUSIVE | VPB_BIT_WAITERS)
195
196#define	VPB_SHARERS_SHIFT	3
197#define	VPB_SHARERS(x)							\
198	(((x) & ~VPB_BIT_FLAGMASK) >> VPB_SHARERS_SHIFT)
199#define	VPB_SHARERS_WORD(x)	((x) << VPB_SHARERS_SHIFT | VPB_BIT_SHARED)
200#define	VPB_ONE_SHARER		(1 << VPB_SHARERS_SHIFT)
201
202#define	VPB_SINGLE_EXCLUSIVER	VPB_BIT_EXCLUSIVE
203
204#define	VPB_UNBUSIED		VPB_SHARERS_WORD(0)
205
206#define	PQ_NONE		255
207#define	PQ_INACTIVE	0
208#define	PQ_ACTIVE	1
209#define	PQ_LAUNDRY	2
210#define	PQ_COUNT	3
211
212#ifndef VM_PAGE_HAVE_PGLIST
213TAILQ_HEAD(pglist, vm_page);
214#define VM_PAGE_HAVE_PGLIST
215#endif
216SLIST_HEAD(spglist, vm_page);
217
218struct vm_pagequeue {
219	struct mtx	pq_mutex;
220	struct pglist	pq_pl;
221	int		pq_cnt;
222	u_int		* const pq_vcnt;
223	const char	* const pq_name;
224} __aligned(CACHE_LINE_SIZE);
225
226
227struct vm_domain {
228	struct vm_pagequeue vmd_pagequeues[PQ_COUNT];
229	u_int vmd_page_count;
230	u_int vmd_free_count;
231	long vmd_segs;	/* bitmask of the segments */
232	boolean_t vmd_oom;
233	int vmd_oom_seq;
234	int vmd_last_active_scan;
235	struct vm_page vmd_laundry_marker;
236	struct vm_page vmd_marker; /* marker for pagedaemon private use */
237	struct vm_page vmd_inacthead; /* marker for LRU-defeating insertions */
238};
239
240extern struct vm_domain vm_dom[MAXMEMDOM];
241
242#define	vm_pagequeue_assert_locked(pq)	mtx_assert(&(pq)->pq_mutex, MA_OWNED)
243#define	vm_pagequeue_lock(pq)		mtx_lock(&(pq)->pq_mutex)
244#define	vm_pagequeue_lockptr(pq)	(&(pq)->pq_mutex)
245#define	vm_pagequeue_unlock(pq)		mtx_unlock(&(pq)->pq_mutex)
246
247#ifdef _KERNEL
248static __inline void
249vm_pagequeue_cnt_add(struct vm_pagequeue *pq, int addend)
250{
251
252#ifdef notyet
253	vm_pagequeue_assert_locked(pq);
254#endif
255	pq->pq_cnt += addend;
256	atomic_add_int(pq->pq_vcnt, addend);
257}
258#define	vm_pagequeue_cnt_inc(pq)	vm_pagequeue_cnt_add((pq), 1)
259#define	vm_pagequeue_cnt_dec(pq)	vm_pagequeue_cnt_add((pq), -1)
260#endif	/* _KERNEL */
261
262extern struct mtx_padalign vm_page_queue_free_mtx;
263extern struct mtx_padalign pa_lock[];
264
265#if defined(__arm__)
266#define	PDRSHIFT	PDR_SHIFT
267#elif !defined(PDRSHIFT)
268#define PDRSHIFT	21
269#endif
270
271#define	pa_index(pa)	((pa) >> PDRSHIFT)
272#define	PA_LOCKPTR(pa)	((struct mtx *)(&pa_lock[pa_index(pa) % PA_LOCK_COUNT]))
273#define	PA_LOCKOBJPTR(pa)	((struct lock_object *)PA_LOCKPTR((pa)))
274#define	PA_LOCK(pa)	mtx_lock(PA_LOCKPTR(pa))
275#define	PA_TRYLOCK(pa)	mtx_trylock(PA_LOCKPTR(pa))
276#define	PA_UNLOCK(pa)	mtx_unlock(PA_LOCKPTR(pa))
277#define	PA_UNLOCK_COND(pa) 			\
278	do {		   			\
279		if ((pa) != 0) {		\
280			PA_UNLOCK((pa));	\
281			(pa) = 0;		\
282		}				\
283	} while (0)
284
285#define	PA_LOCK_ASSERT(pa, a)	mtx_assert(PA_LOCKPTR(pa), (a))
286
287#ifdef KLD_MODULE
288#define	vm_page_lock(m)		vm_page_lock_KBI((m), LOCK_FILE, LOCK_LINE)
289#define	vm_page_unlock(m)	vm_page_unlock_KBI((m), LOCK_FILE, LOCK_LINE)
290#define	vm_page_trylock(m)	vm_page_trylock_KBI((m), LOCK_FILE, LOCK_LINE)
291#else	/* !KLD_MODULE */
292#define	vm_page_lockptr(m)	(PA_LOCKPTR(VM_PAGE_TO_PHYS((m))))
293#define	vm_page_lock(m)		mtx_lock(vm_page_lockptr((m)))
294#define	vm_page_unlock(m)	mtx_unlock(vm_page_lockptr((m)))
295#define	vm_page_trylock(m)	mtx_trylock(vm_page_lockptr((m)))
296#endif
297#if defined(INVARIANTS)
298#define	vm_page_assert_locked(m)		\
299    vm_page_assert_locked_KBI((m), __FILE__, __LINE__)
300#define	vm_page_lock_assert(m, a)		\
301    vm_page_lock_assert_KBI((m), (a), __FILE__, __LINE__)
302#else
303#define	vm_page_assert_locked(m)
304#define	vm_page_lock_assert(m, a)
305#endif
306
307/*
308 * The vm_page's aflags are updated using atomic operations.  To set or clear
309 * these flags, the functions vm_page_aflag_set() and vm_page_aflag_clear()
310 * must be used.  Neither these flags nor these functions are part of the KBI.
311 *
312 * PGA_REFERENCED may be cleared only if the page is locked.  It is set by
313 * both the MI and MD VM layers.  However, kernel loadable modules should not
314 * directly set this flag.  They should call vm_page_reference() instead.
315 *
316 * PGA_WRITEABLE is set exclusively on managed pages by pmap_enter().
317 * When it does so, the object must be locked, or the page must be
318 * exclusive busied.  The MI VM layer must never access this flag
319 * directly.  Instead, it should call pmap_page_is_write_mapped().
320 *
321 * PGA_EXECUTABLE may be set by pmap routines, and indicates that a page has
322 * at least one executable mapping.  It is not consumed by the MI VM layer.
323 */
324#define	PGA_WRITEABLE	0x01		/* page may be mapped writeable */
325#define	PGA_REFERENCED	0x02		/* page has been referenced */
326#define	PGA_EXECUTABLE	0x04		/* page may be mapped executable */
327
328/*
329 * Page flags.  If changed at any other time than page allocation or
330 * freeing, the modification must be protected by the vm_page lock.
331 */
332#define	PG_FICTITIOUS	0x0004		/* physical page doesn't exist */
333#define	PG_ZERO		0x0008		/* page is zeroed */
334#define	PG_MARKER	0x0010		/* special queue marker page */
335#define	PG_NODUMP	0x0080		/* don't include this page in a dump */
336#define	PG_UNHOLDFREE	0x0100		/* delayed free of a held page */
337
338/*
339 * Misc constants.
340 */
341#define ACT_DECLINE		1
342#define ACT_ADVANCE		3
343#define ACT_INIT		5
344#define ACT_MAX			64
345
346#ifdef _KERNEL
347
348#include <sys/systm.h>
349
350#include <machine/atomic.h>
351
352/*
353 * Each pageable resident page falls into one of four lists:
354 *
355 *	free
356 *		Available for allocation now.
357 *
358 *	inactive
359 *		Low activity, candidates for reclamation.
360 *		This list is approximately LRU ordered.
361 *
362 *	laundry
363 *		This is the list of pages that should be
364 *		paged out next.
365 *
366 *	active
367 *		Pages that are "active", i.e., they have been
368 *		recently referenced.
369 *
370 */
371
372extern int vm_page_zero_count;
373
374extern vm_page_t vm_page_array;		/* First resident page in table */
375extern long vm_page_array_size;		/* number of vm_page_t's */
376extern long first_page;			/* first physical page number */
377
378#define VM_PAGE_TO_PHYS(entry)	((entry)->phys_addr)
379
380/*
381 * PHYS_TO_VM_PAGE() returns the vm_page_t object that represents a memory
382 * page to which the given physical address belongs. The correct vm_page_t
383 * object is returned for addresses that are not page-aligned.
384 */
385vm_page_t PHYS_TO_VM_PAGE(vm_paddr_t pa);
386
387/*
388 * Page allocation parameters for vm_page for the functions
389 * vm_page_alloc(), vm_page_grab(), vm_page_alloc_contig() and
390 * vm_page_alloc_freelist().  Some functions support only a subset
391 * of the flags, and ignore others, see the flags legend.
392 *
393 * The meaning of VM_ALLOC_ZERO differs slightly between the vm_page_alloc*()
394 * and the vm_page_grab*() functions.  See these functions for details.
395 *
396 * Bits 0 - 1 define class.
397 * Bits 2 - 15 dedicated for flags.
398 * Legend:
399 * (a) - vm_page_alloc() supports the flag.
400 * (c) - vm_page_alloc_contig() supports the flag.
401 * (f) - vm_page_alloc_freelist() supports the flag.
402 * (g) - vm_page_grab() supports the flag.
403 * (p) - vm_page_grab_pages() supports the flag.
404 * Bits above 15 define the count of additional pages that the caller
405 * intends to allocate.
406 */
407#define VM_ALLOC_NORMAL		0
408#define VM_ALLOC_INTERRUPT	1
409#define VM_ALLOC_SYSTEM		2
410#define	VM_ALLOC_CLASS_MASK	3
411#define	VM_ALLOC_WAITOK		0x0008	/* (acf) Sleep and retry */
412#define	VM_ALLOC_WAITFAIL	0x0010	/* (acf) Sleep and return error */
413#define	VM_ALLOC_WIRED		0x0020	/* (acfgp) Allocate a wired page */
414#define	VM_ALLOC_ZERO		0x0040	/* (acfgp) Allocate a prezeroed page */
415#define	VM_ALLOC_NOOBJ		0x0100	/* (acg) No associated object */
416#define	VM_ALLOC_NOBUSY		0x0200	/* (acgp) Do not excl busy the page */
417#define	VM_ALLOC_IFCACHED	0x0400
418#define	VM_ALLOC_IFNOTCACHED	0x0800
419#define	VM_ALLOC_IGN_SBUSY	0x1000	/* (gp) Ignore shared busy flag */
420#define	VM_ALLOC_NODUMP		0x2000	/* (ag) don't include in dump */
421#define	VM_ALLOC_SBUSY		0x4000	/* (acgp) Shared busy the page */
422#define	VM_ALLOC_NOWAIT		0x8000	/* (acfgp) Do not sleep */
423#define	VM_ALLOC_COUNT_SHIFT	16
424#define	VM_ALLOC_COUNT(count)	((count) << VM_ALLOC_COUNT_SHIFT)
425
426#ifdef M_NOWAIT
427static inline int
428malloc2vm_flags(int malloc_flags)
429{
430	int pflags;
431
432	KASSERT((malloc_flags & M_USE_RESERVE) == 0 ||
433	    (malloc_flags & M_NOWAIT) != 0,
434	    ("M_USE_RESERVE requires M_NOWAIT"));
435	pflags = (malloc_flags & M_USE_RESERVE) != 0 ? VM_ALLOC_INTERRUPT :
436	    VM_ALLOC_SYSTEM;
437	if ((malloc_flags & M_ZERO) != 0)
438		pflags |= VM_ALLOC_ZERO;
439	if ((malloc_flags & M_NODUMP) != 0)
440		pflags |= VM_ALLOC_NODUMP;
441	if ((malloc_flags & M_NOWAIT))
442		pflags |= VM_ALLOC_NOWAIT;
443	if ((malloc_flags & M_WAITOK))
444		pflags |= VM_ALLOC_WAITOK;
445	return (pflags);
446}
447#endif
448
449/*
450 * Predicates supported by vm_page_ps_test():
451 *
452 *	PS_ALL_DIRTY is true only if the entire (super)page is dirty.
453 *	However, it can be spuriously false when the (super)page has become
454 *	dirty in the pmap but that information has not been propagated to the
455 *	machine-independent layer.
456 */
457#define	PS_ALL_DIRTY	0x1
458#define	PS_ALL_VALID	0x2
459#define	PS_NONE_BUSY	0x4
460
461void vm_page_busy_downgrade(vm_page_t m);
462void vm_page_busy_sleep(vm_page_t m, const char *msg, bool nonshared);
463void vm_page_flash(vm_page_t m);
464void vm_page_hold(vm_page_t mem);
465void vm_page_unhold(vm_page_t mem);
466void vm_page_free(vm_page_t m);
467void vm_page_free_zero(vm_page_t m);
468
469void vm_page_activate (vm_page_t);
470void vm_page_advise(vm_page_t m, int advice);
471vm_page_t vm_page_alloc(vm_object_t, vm_pindex_t, int);
472vm_page_t vm_page_alloc_after(vm_object_t, vm_pindex_t, int, vm_page_t);
473vm_page_t vm_page_alloc_contig(vm_object_t object, vm_pindex_t pindex, int req,
474    u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment,
475    vm_paddr_t boundary, vm_memattr_t memattr);
476vm_page_t vm_page_alloc_freelist(int, int);
477void vm_page_change_lock(vm_page_t m, struct mtx **mtx);
478vm_page_t vm_page_grab (vm_object_t, vm_pindex_t, int);
479int vm_page_grab_pages(vm_object_t object, vm_pindex_t pindex, int allocflags,
480    vm_page_t *ma, int count);
481void vm_page_deactivate (vm_page_t);
482void vm_page_deactivate_noreuse(vm_page_t);
483void vm_page_dequeue(vm_page_t m);
484void vm_page_dequeue_locked(vm_page_t m);
485vm_page_t vm_page_find_least(vm_object_t, vm_pindex_t);
486void vm_page_free_phys_pglist(struct pglist *tq);
487bool vm_page_free_prep(vm_page_t m, bool pagequeue_locked);
488vm_page_t vm_page_getfake(vm_paddr_t paddr, vm_memattr_t memattr);
489void vm_page_initfake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr);
490int vm_page_insert (vm_page_t, vm_object_t, vm_pindex_t);
491void vm_page_launder(vm_page_t m);
492vm_page_t vm_page_lookup (vm_object_t, vm_pindex_t);
493vm_page_t vm_page_next(vm_page_t m);
494int vm_page_pa_tryrelock(pmap_t, vm_paddr_t, vm_paddr_t *);
495struct vm_pagequeue *vm_page_pagequeue(vm_page_t m);
496vm_page_t vm_page_prev(vm_page_t m);
497bool vm_page_ps_test(vm_page_t m, int flags, vm_page_t skip_m);
498void vm_page_putfake(vm_page_t m);
499void vm_page_readahead_finish(vm_page_t m);
500bool vm_page_reclaim_contig(int req, u_long npages, vm_paddr_t low,
501    vm_paddr_t high, u_long alignment, vm_paddr_t boundary);
502void vm_page_reference(vm_page_t m);
503void vm_page_remove (vm_page_t);
504int vm_page_rename (vm_page_t, vm_object_t, vm_pindex_t);
505vm_page_t vm_page_replace(vm_page_t mnew, vm_object_t object,
506    vm_pindex_t pindex);
507void vm_page_requeue(vm_page_t m);
508void vm_page_requeue_locked(vm_page_t m);
509int vm_page_sbusied(vm_page_t m);
510vm_page_t vm_page_scan_contig(u_long npages, vm_page_t m_start,
511    vm_page_t m_end, u_long alignment, vm_paddr_t boundary, int options);
512void vm_page_set_valid_range(vm_page_t m, int base, int size);
513int vm_page_sleep_if_busy(vm_page_t m, const char *msg);
514vm_offset_t vm_page_startup(vm_offset_t vaddr);
515void vm_page_sunbusy(vm_page_t m);
516bool vm_page_try_to_free(vm_page_t m);
517int vm_page_trysbusy(vm_page_t m);
518void vm_page_unhold_pages(vm_page_t *ma, int count);
519boolean_t vm_page_unwire(vm_page_t m, uint8_t queue);
520void vm_page_updatefake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr);
521void vm_page_wire (vm_page_t);
522void vm_page_xunbusy_hard(vm_page_t m);
523void vm_page_xunbusy_maybelocked(vm_page_t m);
524void vm_page_set_validclean (vm_page_t, int, int);
525void vm_page_clear_dirty (vm_page_t, int, int);
526void vm_page_set_invalid (vm_page_t, int, int);
527int vm_page_is_valid (vm_page_t, int, int);
528void vm_page_test_dirty (vm_page_t);
529vm_page_bits_t vm_page_bits(int base, int size);
530void vm_page_zero_invalid(vm_page_t m, boolean_t setvalid);
531void vm_page_free_toq(vm_page_t m);
532void vm_page_zero_idle_wakeup(void);
533
534void vm_page_dirty_KBI(vm_page_t m);
535void vm_page_lock_KBI(vm_page_t m, const char *file, int line);
536void vm_page_unlock_KBI(vm_page_t m, const char *file, int line);
537int vm_page_trylock_KBI(vm_page_t m, const char *file, int line);
538#if defined(INVARIANTS) || defined(INVARIANT_SUPPORT)
539void vm_page_assert_locked_KBI(vm_page_t m, const char *file, int line);
540void vm_page_lock_assert_KBI(vm_page_t m, int a, const char *file, int line);
541#endif
542
543#define	vm_page_assert_sbusied(m)					\
544	KASSERT(vm_page_sbusied(m),					\
545	    ("vm_page_assert_sbusied: page %p not shared busy @ %s:%d", \
546	    (m), __FILE__, __LINE__))
547
548#define	vm_page_assert_unbusied(m)					\
549	KASSERT(!vm_page_busied(m),					\
550	    ("vm_page_assert_unbusied: page %p busy @ %s:%d",		\
551	    (m), __FILE__, __LINE__))
552
553#define	vm_page_assert_xbusied(m)					\
554	KASSERT(vm_page_xbusied(m),					\
555	    ("vm_page_assert_xbusied: page %p not exclusive busy @ %s:%d", \
556	    (m), __FILE__, __LINE__))
557
558#define	vm_page_busied(m)						\
559	((m)->busy_lock != VPB_UNBUSIED)
560
561#define	vm_page_sbusy(m) do {						\
562	if (!vm_page_trysbusy(m))					\
563		panic("%s: page %p failed shared busying", __func__,	\
564		    (m));						\
565} while (0)
566
567#define	vm_page_tryxbusy(m)						\
568	(atomic_cmpset_acq_int(&(m)->busy_lock, VPB_UNBUSIED,		\
569	    VPB_SINGLE_EXCLUSIVER))
570
571#define	vm_page_xbusied(m)						\
572	(((m)->busy_lock & VPB_SINGLE_EXCLUSIVER) != 0)
573
574#define	vm_page_xbusy(m) do {						\
575	if (!vm_page_tryxbusy(m))					\
576		panic("%s: page %p failed exclusive busying", __func__,	\
577		    (m));						\
578} while (0)
579
580/* Note: page m's lock must not be owned by the caller. */
581#define	vm_page_xunbusy(m) do {						\
582	if (!atomic_cmpset_rel_int(&(m)->busy_lock,			\
583	    VPB_SINGLE_EXCLUSIVER, VPB_UNBUSIED))			\
584		vm_page_xunbusy_hard(m);				\
585} while (0)
586
587#ifdef INVARIANTS
588void vm_page_object_lock_assert(vm_page_t m);
589#define	VM_PAGE_OBJECT_LOCK_ASSERT(m)	vm_page_object_lock_assert(m)
590void vm_page_assert_pga_writeable(vm_page_t m, uint8_t bits);
591#define	VM_PAGE_ASSERT_PGA_WRITEABLE(m, bits)				\
592	vm_page_assert_pga_writeable(m, bits)
593#else
594#define	VM_PAGE_OBJECT_LOCK_ASSERT(m)	(void)0
595#define	VM_PAGE_ASSERT_PGA_WRITEABLE(m, bits)	(void)0
596#endif
597
598/*
599 * We want to use atomic updates for the aflags field, which is 8 bits wide.
600 * However, not all architectures support atomic operations on 8-bit
601 * destinations.  In order that we can easily use a 32-bit operation, we
602 * require that the aflags field be 32-bit aligned.
603 */
604CTASSERT(offsetof(struct vm_page, aflags) % sizeof(uint32_t) == 0);
605
606/*
607 *	Clear the given bits in the specified page.
608 */
609static inline void
610vm_page_aflag_clear(vm_page_t m, uint8_t bits)
611{
612	uint32_t *addr, val;
613
614	/*
615	 * The PGA_REFERENCED flag can only be cleared if the page is locked.
616	 */
617	if ((bits & PGA_REFERENCED) != 0)
618		vm_page_assert_locked(m);
619
620	/*
621	 * Access the whole 32-bit word containing the aflags field with an
622	 * atomic update.  Parallel non-atomic updates to the other fields
623	 * within this word are handled properly by the atomic update.
624	 */
625	addr = (void *)&m->aflags;
626	KASSERT(((uintptr_t)addr & (sizeof(uint32_t) - 1)) == 0,
627	    ("vm_page_aflag_clear: aflags is misaligned"));
628	val = bits;
629#if BYTE_ORDER == BIG_ENDIAN
630	val <<= 24;
631#endif
632	atomic_clear_32(addr, val);
633}
634
635/*
636 *	Set the given bits in the specified page.
637 */
638static inline void
639vm_page_aflag_set(vm_page_t m, uint8_t bits)
640{
641	uint32_t *addr, val;
642
643	VM_PAGE_ASSERT_PGA_WRITEABLE(m, bits);
644
645	/*
646	 * Access the whole 32-bit word containing the aflags field with an
647	 * atomic update.  Parallel non-atomic updates to the other fields
648	 * within this word are handled properly by the atomic update.
649	 */
650	addr = (void *)&m->aflags;
651	KASSERT(((uintptr_t)addr & (sizeof(uint32_t) - 1)) == 0,
652	    ("vm_page_aflag_set: aflags is misaligned"));
653	val = bits;
654#if BYTE_ORDER == BIG_ENDIAN
655	val <<= 24;
656#endif
657	atomic_set_32(addr, val);
658}
659
660/*
661 *	vm_page_dirty:
662 *
663 *	Set all bits in the page's dirty field.
664 *
665 *	The object containing the specified page must be locked if the
666 *	call is made from the machine-independent layer.
667 *
668 *	See vm_page_clear_dirty_mask().
669 */
670static __inline void
671vm_page_dirty(vm_page_t m)
672{
673
674	/* Use vm_page_dirty_KBI() under INVARIANTS to save memory. */
675#if defined(KLD_MODULE) || defined(INVARIANTS)
676	vm_page_dirty_KBI(m);
677#else
678	m->dirty = VM_PAGE_BITS_ALL;
679#endif
680}
681
682/*
683 *	vm_page_remque:
684 *
685 *	If the given page is in a page queue, then remove it from that page
686 *	queue.
687 *
688 *	The page must be locked.
689 */
690static inline void
691vm_page_remque(vm_page_t m)
692{
693
694	if (m->queue != PQ_NONE)
695		vm_page_dequeue(m);
696}
697
698/*
699 *	vm_page_undirty:
700 *
701 *	Set page to not be dirty.  Note: does not clear pmap modify bits
702 */
703static __inline void
704vm_page_undirty(vm_page_t m)
705{
706
707	VM_PAGE_OBJECT_LOCK_ASSERT(m);
708	m->dirty = 0;
709}
710
711static inline void
712vm_page_replace_checked(vm_page_t mnew, vm_object_t object, vm_pindex_t pindex,
713    vm_page_t mold)
714{
715	vm_page_t mret;
716
717	mret = vm_page_replace(mnew, object, pindex);
718	KASSERT(mret == mold,
719	    ("invalid page replacement, mold=%p, mret=%p", mold, mret));
720
721	/* Unused if !INVARIANTS. */
722	(void)mold;
723	(void)mret;
724}
725
726static inline bool
727vm_page_active(vm_page_t m)
728{
729
730	return (m->queue == PQ_ACTIVE);
731}
732
733static inline bool
734vm_page_inactive(vm_page_t m)
735{
736
737	return (m->queue == PQ_INACTIVE);
738}
739
740static inline bool
741vm_page_in_laundry(vm_page_t m)
742{
743
744	return (m->queue == PQ_LAUNDRY);
745}
746
747#endif				/* _KERNEL */
748#endif				/* !_VM_PAGE_ */
749