1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Macros for manipulating and testing page->flags
4 */
5
6#ifndef PAGE_FLAGS_H
7#define PAGE_FLAGS_H
8
9#include <linux/types.h>
10#include <linux/bug.h>
11#include <linux/mmdebug.h>
12#ifndef __GENERATING_BOUNDS_H
13#include <linux/mm_types.h>
14#include <generated/bounds.h>
15#endif /* !__GENERATING_BOUNDS_H */
16
17/*
18 * Various page->flags bits:
19 *
20 * PG_reserved is set for special pages. The "struct page" of such a page
21 * should in general not be touched (e.g. set dirty) except by its owner.
22 * Pages marked as PG_reserved include:
23 * - Pages part of the kernel image (including vDSO) and similar (e.g. BIOS,
24 *   initrd, HW tables)
25 * - Pages reserved or allocated early during boot (before the page allocator
26 *   was initialized). This includes (depending on the architecture) the
27 *   initial vmemmap, initial page tables, crashkernel, elfcorehdr, and much
28 *   much more. Once (if ever) freed, PG_reserved is cleared and they will
29 *   be given to the page allocator.
30 * - Pages falling into physical memory gaps - not IORESOURCE_SYSRAM. Trying
31 *   to read/write these pages might end badly. Don't touch!
32 * - The zero page(s)
33 * - Pages not added to the page allocator when onlining a section because
34 *   they were excluded via the online_page_callback() or because they are
35 *   PG_hwpoison.
36 * - Pages allocated in the context of kexec/kdump (loaded kernel image,
37 *   control pages, vmcoreinfo)
38 * - MMIO/DMA pages. Some architectures don't allow to ioremap pages that are
39 *   not marked PG_reserved (as they might be in use by somebody else who does
40 *   not respect the caching strategy).
41 * - Pages part of an offline section (struct pages of offline sections should
42 *   not be trusted as they will be initialized when first onlined).
43 * - MCA pages on ia64
44 * - Pages holding CPU notes for POWER Firmware Assisted Dump
45 * - Device memory (e.g. PMEM, DAX, HMM)
46 * Some PG_reserved pages will be excluded from the hibernation image.
47 * PG_reserved does in general not hinder anybody from dumping or swapping
48 * and is no longer required for remap_pfn_range(). ioremap might require it.
49 * Consequently, PG_reserved for a page mapped into user space can indicate
50 * the zero page, the vDSO, MMIO pages or device memory.
51 *
52 * The PG_private bitflag is set on pagecache pages if they contain filesystem
53 * specific data (which is normally at page->private). It can be used by
54 * private allocations for its own usage.
55 *
56 * During initiation of disk I/O, PG_locked is set. This bit is set before I/O
57 * and cleared when writeback _starts_ or when read _completes_. PG_writeback
58 * is set before writeback starts and cleared when it finishes.
59 *
60 * PG_locked also pins a page in pagecache, and blocks truncation of the file
61 * while it is held.
62 *
63 * page_waitqueue(page) is a wait queue of all tasks waiting for the page
64 * to become unlocked.
65 *
66 * PG_swapbacked is set when a page uses swap as a backing storage.  This are
67 * usually PageAnon or shmem pages but please note that even anonymous pages
68 * might lose their PG_swapbacked flag when they simply can be dropped (e.g. as
69 * a result of MADV_FREE).
70 *
71 * PG_referenced, PG_reclaim are used for page reclaim for anonymous and
72 * file-backed pagecache (see mm/vmscan.c).
73 *
74 * PG_error is set to indicate that an I/O error occurred on this page.
75 *
76 * PG_arch_1 is an architecture specific page state bit.  The generic code
77 * guarantees that this bit is cleared for a page when it first is entered into
78 * the page cache.
79 *
80 * PG_hwpoison indicates that a page got corrupted in hardware and contains
81 * data with incorrect ECC bits that triggered a machine check. Accessing is
82 * not safe since it may cause another machine check. Don't touch!
83 */
84
85/*
86 * Don't use the pageflags directly.  Use the PageFoo macros.
87 *
88 * The page flags field is split into two parts, the main flags area
89 * which extends from the low bits upwards, and the fields area which
90 * extends from the high bits downwards.
91 *
92 *  | FIELD | ... | FLAGS |
93 *  N-1           ^       0
94 *               (NR_PAGEFLAGS)
95 *
96 * The fields area is reserved for fields mapping zone, node (for NUMA) and
97 * SPARSEMEM section (for variants of SPARSEMEM that require section ids like
98 * SPARSEMEM_EXTREME with !SPARSEMEM_VMEMMAP).
99 */
100enum pageflags {
101	PG_locked,		/* Page is locked. Don't touch. */
102	PG_writeback,		/* Page is under writeback */
103	PG_referenced,
104	PG_uptodate,
105	PG_dirty,
106	PG_lru,
107	PG_head,		/* Must be in bit 6 */
108	PG_waiters,		/* Page has waiters, check its waitqueue. Must be bit #7 and in the same byte as "PG_locked" */
109	PG_active,
110	PG_workingset,
111	PG_error,
112	PG_slab,
113	PG_owner_priv_1,	/* Owner use. If pagecache, fs may use*/
114	PG_arch_1,
115	PG_reserved,
116	PG_private,		/* If pagecache, has fs-private data */
117	PG_private_2,		/* If pagecache, has fs aux data */
118	PG_mappedtodisk,	/* Has blocks allocated on-disk */
119	PG_reclaim,		/* To be reclaimed asap */
120	PG_swapbacked,		/* Page is backed by RAM/swap */
121	PG_unevictable,		/* Page is "unevictable"  */
122#ifdef CONFIG_MMU
123	PG_mlocked,		/* Page is vma mlocked */
124#endif
125#ifdef CONFIG_ARCH_USES_PG_UNCACHED
126	PG_uncached,		/* Page has been mapped as uncached */
127#endif
128#ifdef CONFIG_MEMORY_FAILURE
129	PG_hwpoison,		/* hardware poisoned page. Don't touch */
130#endif
131#if defined(CONFIG_PAGE_IDLE_FLAG) && defined(CONFIG_64BIT)
132	PG_young,
133	PG_idle,
134#endif
135#ifdef CONFIG_ARCH_USES_PG_ARCH_X
136	PG_arch_2,
137	PG_arch_3,
138#endif
139	__NR_PAGEFLAGS,
140
141	PG_readahead = PG_reclaim,
142
143	/*
144	 * Depending on the way an anonymous folio can be mapped into a page
145	 * table (e.g., single PMD/PUD/CONT of the head page vs. PTE-mapped
146	 * THP), PG_anon_exclusive may be set only for the head page or for
147	 * tail pages of an anonymous folio. For now, we only expect it to be
148	 * set on tail pages for PTE-mapped THP.
149	 */
150	PG_anon_exclusive = PG_mappedtodisk,
151
152	/* Filesystems */
153	PG_checked = PG_owner_priv_1,
154
155	/* SwapBacked */
156	PG_swapcache = PG_owner_priv_1,	/* Swap page: swp_entry_t in private */
157
158	/* Two page bits are conscripted by FS-Cache to maintain local caching
159	 * state.  These bits are set on pages belonging to the netfs's inodes
160	 * when those inodes are being locally cached.
161	 */
162	PG_fscache = PG_private_2,	/* page backed by cache */
163
164	/* XEN */
165	/* Pinned in Xen as a read-only pagetable page. */
166	PG_pinned = PG_owner_priv_1,
167	/* Pinned as part of domain save (see xen_mm_pin_all()). */
168	PG_savepinned = PG_dirty,
169	/* Has a grant mapping of another (foreign) domain's page. */
170	PG_foreign = PG_owner_priv_1,
171	/* Remapped by swiotlb-xen. */
172	PG_xen_remapped = PG_owner_priv_1,
173
174	/* non-lru isolated movable page */
175	PG_isolated = PG_reclaim,
176
177	/* Only valid for buddy pages. Used to track pages that are reported */
178	PG_reported = PG_uptodate,
179
180#ifdef CONFIG_MEMORY_HOTPLUG
181	/* For self-hosted memmap pages */
182	PG_vmemmap_self_hosted = PG_owner_priv_1,
183#endif
184
185	/*
186	 * Flags only valid for compound pages.  Stored in first tail page's
187	 * flags word.  Cannot use the first 8 flags or any flag marked as
188	 * PF_ANY.
189	 */
190
191	/* At least one page in this folio has the hwpoison flag set */
192	PG_has_hwpoisoned = PG_error,
193	PG_hugetlb = PG_active,
194	PG_large_rmappable = PG_workingset, /* anon or file-backed */
195};
196
197#define PAGEFLAGS_MASK		((1UL << NR_PAGEFLAGS) - 1)
198
199#ifndef __GENERATING_BOUNDS_H
200
201#ifdef CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP
202DECLARE_STATIC_KEY_FALSE(hugetlb_optimize_vmemmap_key);
203
204/*
205 * Return the real head page struct iff the @page is a fake head page, otherwise
206 * return the @page itself. See Documentation/mm/vmemmap_dedup.rst.
207 */
208static __always_inline const struct page *page_fixed_fake_head(const struct page *page)
209{
210	if (!static_branch_unlikely(&hugetlb_optimize_vmemmap_key))
211		return page;
212
213	/*
214	 * Only addresses aligned with PAGE_SIZE of struct page may be fake head
215	 * struct page. The alignment check aims to avoid access the fields (
216	 * e.g. compound_head) of the @page[1]. It can avoid touch a (possibly)
217	 * cold cacheline in some cases.
218	 */
219	if (IS_ALIGNED((unsigned long)page, PAGE_SIZE) &&
220	    test_bit(PG_head, &page->flags)) {
221		/*
222		 * We can safely access the field of the @page[1] with PG_head
223		 * because the @page is a compound page composed with at least
224		 * two contiguous pages.
225		 */
226		unsigned long head = READ_ONCE(page[1].compound_head);
227
228		if (likely(head & 1))
229			return (const struct page *)(head - 1);
230	}
231	return page;
232}
233#else
234static inline const struct page *page_fixed_fake_head(const struct page *page)
235{
236	return page;
237}
238#endif
239
240static __always_inline int page_is_fake_head(const struct page *page)
241{
242	return page_fixed_fake_head(page) != page;
243}
244
245static inline unsigned long _compound_head(const struct page *page)
246{
247	unsigned long head = READ_ONCE(page->compound_head);
248
249	if (unlikely(head & 1))
250		return head - 1;
251	return (unsigned long)page_fixed_fake_head(page);
252}
253
254#define compound_head(page)	((typeof(page))_compound_head(page))
255
256/**
257 * page_folio - Converts from page to folio.
258 * @p: The page.
259 *
260 * Every page is part of a folio.  This function cannot be called on a
261 * NULL pointer.
262 *
263 * Context: No reference, nor lock is required on @page.  If the caller
264 * does not hold a reference, this call may race with a folio split, so
265 * it should re-check the folio still contains this page after gaining
266 * a reference on the folio.
267 * Return: The folio which contains this page.
268 */
269#define page_folio(p)		(_Generic((p),				\
270	const struct page *:	(const struct folio *)_compound_head(p), \
271	struct page *:		(struct folio *)_compound_head(p)))
272
273/**
274 * folio_page - Return a page from a folio.
275 * @folio: The folio.
276 * @n: The page number to return.
277 *
278 * @n is relative to the start of the folio.  This function does not
279 * check that the page number lies within @folio; the caller is presumed
280 * to have a reference to the page.
281 */
282#define folio_page(folio, n)	nth_page(&(folio)->page, n)
283
284static __always_inline int PageTail(const struct page *page)
285{
286	return READ_ONCE(page->compound_head) & 1 || page_is_fake_head(page);
287}
288
289static __always_inline int PageCompound(const struct page *page)
290{
291	return test_bit(PG_head, &page->flags) ||
292	       READ_ONCE(page->compound_head) & 1;
293}
294
295#define	PAGE_POISON_PATTERN	-1l
296static inline int PagePoisoned(const struct page *page)
297{
298	return READ_ONCE(page->flags) == PAGE_POISON_PATTERN;
299}
300
301#ifdef CONFIG_DEBUG_VM
302void page_init_poison(struct page *page, size_t size);
303#else
304static inline void page_init_poison(struct page *page, size_t size)
305{
306}
307#endif
308
309static const unsigned long *const_folio_flags(const struct folio *folio,
310		unsigned n)
311{
312	const struct page *page = &folio->page;
313
314	VM_BUG_ON_PGFLAGS(PageTail(page), page);
315	VM_BUG_ON_PGFLAGS(n > 0 && !test_bit(PG_head, &page->flags), page);
316	return &page[n].flags;
317}
318
319static unsigned long *folio_flags(struct folio *folio, unsigned n)
320{
321	struct page *page = &folio->page;
322
323	VM_BUG_ON_PGFLAGS(PageTail(page), page);
324	VM_BUG_ON_PGFLAGS(n > 0 && !test_bit(PG_head, &page->flags), page);
325	return &page[n].flags;
326}
327
328/*
329 * Page flags policies wrt compound pages
330 *
331 * PF_POISONED_CHECK
332 *     check if this struct page poisoned/uninitialized
333 *
334 * PF_ANY:
335 *     the page flag is relevant for small, head and tail pages.
336 *
337 * PF_HEAD:
338 *     for compound page all operations related to the page flag applied to
339 *     head page.
340 *
341 * PF_NO_TAIL:
342 *     modifications of the page flag must be done on small or head pages,
343 *     checks can be done on tail pages too.
344 *
345 * PF_NO_COMPOUND:
346 *     the page flag is not relevant for compound pages.
347 *
348 * PF_SECOND:
349 *     the page flag is stored in the first tail page.
350 */
351#define PF_POISONED_CHECK(page) ({					\
352		VM_BUG_ON_PGFLAGS(PagePoisoned(page), page);		\
353		page; })
354#define PF_ANY(page, enforce)	PF_POISONED_CHECK(page)
355#define PF_HEAD(page, enforce)	PF_POISONED_CHECK(compound_head(page))
356#define PF_NO_TAIL(page, enforce) ({					\
357		VM_BUG_ON_PGFLAGS(enforce && PageTail(page), page);	\
358		PF_POISONED_CHECK(compound_head(page)); })
359#define PF_NO_COMPOUND(page, enforce) ({				\
360		VM_BUG_ON_PGFLAGS(enforce && PageCompound(page), page);	\
361		PF_POISONED_CHECK(page); })
362#define PF_SECOND(page, enforce) ({					\
363		VM_BUG_ON_PGFLAGS(!PageHead(page), page);		\
364		PF_POISONED_CHECK(&page[1]); })
365
366/* Which page is the flag stored in */
367#define FOLIO_PF_ANY		0
368#define FOLIO_PF_HEAD		0
369#define FOLIO_PF_NO_TAIL	0
370#define FOLIO_PF_NO_COMPOUND	0
371#define FOLIO_PF_SECOND		1
372
373#define FOLIO_HEAD_PAGE		0
374#define FOLIO_SECOND_PAGE	1
375
376/*
377 * Macros to create function definitions for page flags
378 */
379#define FOLIO_TEST_FLAG(name, page)					\
380static __always_inline bool folio_test_##name(const struct folio *folio) \
381{ return test_bit(PG_##name, const_folio_flags(folio, page)); }
382
383#define FOLIO_SET_FLAG(name, page)					\
384static __always_inline void folio_set_##name(struct folio *folio)	\
385{ set_bit(PG_##name, folio_flags(folio, page)); }
386
387#define FOLIO_CLEAR_FLAG(name, page)					\
388static __always_inline void folio_clear_##name(struct folio *folio)	\
389{ clear_bit(PG_##name, folio_flags(folio, page)); }
390
391#define __FOLIO_SET_FLAG(name, page)					\
392static __always_inline void __folio_set_##name(struct folio *folio)	\
393{ __set_bit(PG_##name, folio_flags(folio, page)); }
394
395#define __FOLIO_CLEAR_FLAG(name, page)					\
396static __always_inline void __folio_clear_##name(struct folio *folio)	\
397{ __clear_bit(PG_##name, folio_flags(folio, page)); }
398
399#define FOLIO_TEST_SET_FLAG(name, page)					\
400static __always_inline bool folio_test_set_##name(struct folio *folio)	\
401{ return test_and_set_bit(PG_##name, folio_flags(folio, page)); }
402
403#define FOLIO_TEST_CLEAR_FLAG(name, page)				\
404static __always_inline bool folio_test_clear_##name(struct folio *folio) \
405{ return test_and_clear_bit(PG_##name, folio_flags(folio, page)); }
406
407#define FOLIO_FLAG(name, page)						\
408FOLIO_TEST_FLAG(name, page)						\
409FOLIO_SET_FLAG(name, page)						\
410FOLIO_CLEAR_FLAG(name, page)
411
412#define TESTPAGEFLAG(uname, lname, policy)				\
413FOLIO_TEST_FLAG(lname, FOLIO_##policy)					\
414static __always_inline int Page##uname(const struct page *page)		\
415{ return test_bit(PG_##lname, &policy(page, 0)->flags); }
416
417#define SETPAGEFLAG(uname, lname, policy)				\
418FOLIO_SET_FLAG(lname, FOLIO_##policy)					\
419static __always_inline void SetPage##uname(struct page *page)		\
420{ set_bit(PG_##lname, &policy(page, 1)->flags); }
421
422#define CLEARPAGEFLAG(uname, lname, policy)				\
423FOLIO_CLEAR_FLAG(lname, FOLIO_##policy)					\
424static __always_inline void ClearPage##uname(struct page *page)		\
425{ clear_bit(PG_##lname, &policy(page, 1)->flags); }
426
427#define __SETPAGEFLAG(uname, lname, policy)				\
428__FOLIO_SET_FLAG(lname, FOLIO_##policy)					\
429static __always_inline void __SetPage##uname(struct page *page)		\
430{ __set_bit(PG_##lname, &policy(page, 1)->flags); }
431
432#define __CLEARPAGEFLAG(uname, lname, policy)				\
433__FOLIO_CLEAR_FLAG(lname, FOLIO_##policy)				\
434static __always_inline void __ClearPage##uname(struct page *page)	\
435{ __clear_bit(PG_##lname, &policy(page, 1)->flags); }
436
437#define TESTSETFLAG(uname, lname, policy)				\
438FOLIO_TEST_SET_FLAG(lname, FOLIO_##policy)				\
439static __always_inline int TestSetPage##uname(struct page *page)	\
440{ return test_and_set_bit(PG_##lname, &policy(page, 1)->flags); }
441
442#define TESTCLEARFLAG(uname, lname, policy)				\
443FOLIO_TEST_CLEAR_FLAG(lname, FOLIO_##policy)				\
444static __always_inline int TestClearPage##uname(struct page *page)	\
445{ return test_and_clear_bit(PG_##lname, &policy(page, 1)->flags); }
446
447#define PAGEFLAG(uname, lname, policy)					\
448	TESTPAGEFLAG(uname, lname, policy)				\
449	SETPAGEFLAG(uname, lname, policy)				\
450	CLEARPAGEFLAG(uname, lname, policy)
451
452#define __PAGEFLAG(uname, lname, policy)				\
453	TESTPAGEFLAG(uname, lname, policy)				\
454	__SETPAGEFLAG(uname, lname, policy)				\
455	__CLEARPAGEFLAG(uname, lname, policy)
456
457#define TESTSCFLAG(uname, lname, policy)				\
458	TESTSETFLAG(uname, lname, policy)				\
459	TESTCLEARFLAG(uname, lname, policy)
460
461#define TESTPAGEFLAG_FALSE(uname, lname)				\
462static inline bool folio_test_##lname(const struct folio *folio) { return false; } \
463static inline int Page##uname(const struct page *page) { return 0; }
464
465#define SETPAGEFLAG_NOOP(uname, lname)					\
466static inline void folio_set_##lname(struct folio *folio) { }		\
467static inline void SetPage##uname(struct page *page) {  }
468
469#define CLEARPAGEFLAG_NOOP(uname, lname)				\
470static inline void folio_clear_##lname(struct folio *folio) { }		\
471static inline void ClearPage##uname(struct page *page) {  }
472
473#define __CLEARPAGEFLAG_NOOP(uname, lname)				\
474static inline void __folio_clear_##lname(struct folio *folio) { }	\
475static inline void __ClearPage##uname(struct page *page) {  }
476
477#define TESTSETFLAG_FALSE(uname, lname)					\
478static inline bool folio_test_set_##lname(struct folio *folio)		\
479{ return 0; }								\
480static inline int TestSetPage##uname(struct page *page) { return 0; }
481
482#define TESTCLEARFLAG_FALSE(uname, lname)				\
483static inline bool folio_test_clear_##lname(struct folio *folio)	\
484{ return 0; }								\
485static inline int TestClearPage##uname(struct page *page) { return 0; }
486
487#define PAGEFLAG_FALSE(uname, lname) TESTPAGEFLAG_FALSE(uname, lname)	\
488	SETPAGEFLAG_NOOP(uname, lname) CLEARPAGEFLAG_NOOP(uname, lname)
489
490#define TESTSCFLAG_FALSE(uname, lname)					\
491	TESTSETFLAG_FALSE(uname, lname) TESTCLEARFLAG_FALSE(uname, lname)
492
493__PAGEFLAG(Locked, locked, PF_NO_TAIL)
494FOLIO_FLAG(waiters, FOLIO_HEAD_PAGE)
495PAGEFLAG(Error, error, PF_NO_TAIL) TESTCLEARFLAG(Error, error, PF_NO_TAIL)
496PAGEFLAG(Referenced, referenced, PF_HEAD)
497	TESTCLEARFLAG(Referenced, referenced, PF_HEAD)
498	__SETPAGEFLAG(Referenced, referenced, PF_HEAD)
499PAGEFLAG(Dirty, dirty, PF_HEAD) TESTSCFLAG(Dirty, dirty, PF_HEAD)
500	__CLEARPAGEFLAG(Dirty, dirty, PF_HEAD)
501PAGEFLAG(LRU, lru, PF_HEAD) __CLEARPAGEFLAG(LRU, lru, PF_HEAD)
502	TESTCLEARFLAG(LRU, lru, PF_HEAD)
503PAGEFLAG(Active, active, PF_HEAD) __CLEARPAGEFLAG(Active, active, PF_HEAD)
504	TESTCLEARFLAG(Active, active, PF_HEAD)
505PAGEFLAG(Workingset, workingset, PF_HEAD)
506	TESTCLEARFLAG(Workingset, workingset, PF_HEAD)
507__PAGEFLAG(Slab, slab, PF_NO_TAIL)
508PAGEFLAG(Checked, checked, PF_NO_COMPOUND)	   /* Used by some filesystems */
509
510/* Xen */
511PAGEFLAG(Pinned, pinned, PF_NO_COMPOUND)
512	TESTSCFLAG(Pinned, pinned, PF_NO_COMPOUND)
513PAGEFLAG(SavePinned, savepinned, PF_NO_COMPOUND);
514PAGEFLAG(Foreign, foreign, PF_NO_COMPOUND);
515PAGEFLAG(XenRemapped, xen_remapped, PF_NO_COMPOUND)
516	TESTCLEARFLAG(XenRemapped, xen_remapped, PF_NO_COMPOUND)
517
518PAGEFLAG(Reserved, reserved, PF_NO_COMPOUND)
519	__CLEARPAGEFLAG(Reserved, reserved, PF_NO_COMPOUND)
520	__SETPAGEFLAG(Reserved, reserved, PF_NO_COMPOUND)
521PAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL)
522	__CLEARPAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL)
523	__SETPAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL)
524
525/*
526 * Private page markings that may be used by the filesystem that owns the page
527 * for its own purposes.
528 * - PG_private and PG_private_2 cause release_folio() and co to be invoked
529 */
530PAGEFLAG(Private, private, PF_ANY)
531PAGEFLAG(Private2, private_2, PF_ANY) TESTSCFLAG(Private2, private_2, PF_ANY)
532PAGEFLAG(OwnerPriv1, owner_priv_1, PF_ANY)
533	TESTCLEARFLAG(OwnerPriv1, owner_priv_1, PF_ANY)
534
535/*
536 * Only test-and-set exist for PG_writeback.  The unconditional operators are
537 * risky: they bypass page accounting.
538 */
539TESTPAGEFLAG(Writeback, writeback, PF_NO_TAIL)
540	TESTSCFLAG(Writeback, writeback, PF_NO_TAIL)
541PAGEFLAG(MappedToDisk, mappedtodisk, PF_NO_TAIL)
542
543/* PG_readahead is only used for reads; PG_reclaim is only for writes */
544PAGEFLAG(Reclaim, reclaim, PF_NO_TAIL)
545	TESTCLEARFLAG(Reclaim, reclaim, PF_NO_TAIL)
546PAGEFLAG(Readahead, readahead, PF_NO_COMPOUND)
547	TESTCLEARFLAG(Readahead, readahead, PF_NO_COMPOUND)
548
549#ifdef CONFIG_HIGHMEM
550/*
551 * Must use a macro here due to header dependency issues. page_zone() is not
552 * available at this point.
553 */
554#define PageHighMem(__p) is_highmem_idx(page_zonenum(__p))
555#define folio_test_highmem(__f)	is_highmem_idx(folio_zonenum(__f))
556#else
557PAGEFLAG_FALSE(HighMem, highmem)
558#endif
559
560#ifdef CONFIG_SWAP
561static __always_inline bool folio_test_swapcache(const struct folio *folio)
562{
563	return folio_test_swapbacked(folio) &&
564			test_bit(PG_swapcache, const_folio_flags(folio, 0));
565}
566
567static __always_inline bool PageSwapCache(const struct page *page)
568{
569	return folio_test_swapcache(page_folio(page));
570}
571
572SETPAGEFLAG(SwapCache, swapcache, PF_NO_TAIL)
573CLEARPAGEFLAG(SwapCache, swapcache, PF_NO_TAIL)
574#else
575PAGEFLAG_FALSE(SwapCache, swapcache)
576#endif
577
578PAGEFLAG(Unevictable, unevictable, PF_HEAD)
579	__CLEARPAGEFLAG(Unevictable, unevictable, PF_HEAD)
580	TESTCLEARFLAG(Unevictable, unevictable, PF_HEAD)
581
582#ifdef CONFIG_MMU
583PAGEFLAG(Mlocked, mlocked, PF_NO_TAIL)
584	__CLEARPAGEFLAG(Mlocked, mlocked, PF_NO_TAIL)
585	TESTSCFLAG(Mlocked, mlocked, PF_NO_TAIL)
586#else
587PAGEFLAG_FALSE(Mlocked, mlocked) __CLEARPAGEFLAG_NOOP(Mlocked, mlocked)
588	TESTSCFLAG_FALSE(Mlocked, mlocked)
589#endif
590
591#ifdef CONFIG_ARCH_USES_PG_UNCACHED
592PAGEFLAG(Uncached, uncached, PF_NO_COMPOUND)
593#else
594PAGEFLAG_FALSE(Uncached, uncached)
595#endif
596
597#ifdef CONFIG_MEMORY_FAILURE
598PAGEFLAG(HWPoison, hwpoison, PF_ANY)
599TESTSCFLAG(HWPoison, hwpoison, PF_ANY)
600#define __PG_HWPOISON (1UL << PG_hwpoison)
601#define MAGIC_HWPOISON	0x48575053U	/* HWPS */
602extern void SetPageHWPoisonTakenOff(struct page *page);
603extern void ClearPageHWPoisonTakenOff(struct page *page);
604extern bool take_page_off_buddy(struct page *page);
605extern bool put_page_back_buddy(struct page *page);
606#else
607PAGEFLAG_FALSE(HWPoison, hwpoison)
608#define __PG_HWPOISON 0
609#endif
610
611#if defined(CONFIG_PAGE_IDLE_FLAG) && defined(CONFIG_64BIT)
612FOLIO_TEST_FLAG(young, FOLIO_HEAD_PAGE)
613FOLIO_SET_FLAG(young, FOLIO_HEAD_PAGE)
614FOLIO_TEST_CLEAR_FLAG(young, FOLIO_HEAD_PAGE)
615FOLIO_FLAG(idle, FOLIO_HEAD_PAGE)
616#endif
617
618/*
619 * PageReported() is used to track reported free pages within the Buddy
620 * allocator. We can use the non-atomic version of the test and set
621 * operations as both should be shielded with the zone lock to prevent
622 * any possible races on the setting or clearing of the bit.
623 */
624__PAGEFLAG(Reported, reported, PF_NO_COMPOUND)
625
626#ifdef CONFIG_MEMORY_HOTPLUG
627PAGEFLAG(VmemmapSelfHosted, vmemmap_self_hosted, PF_ANY)
628#else
629PAGEFLAG_FALSE(VmemmapSelfHosted, vmemmap_self_hosted)
630#endif
631
632/*
633 * On an anonymous page mapped into a user virtual memory area,
634 * page->mapping points to its anon_vma, not to a struct address_space;
635 * with the PAGE_MAPPING_ANON bit set to distinguish it.  See rmap.h.
636 *
637 * On an anonymous page in a VM_MERGEABLE area, if CONFIG_KSM is enabled,
638 * the PAGE_MAPPING_MOVABLE bit may be set along with the PAGE_MAPPING_ANON
639 * bit; and then page->mapping points, not to an anon_vma, but to a private
640 * structure which KSM associates with that merged page.  See ksm.h.
641 *
642 * PAGE_MAPPING_KSM without PAGE_MAPPING_ANON is used for non-lru movable
643 * page and then page->mapping points to a struct movable_operations.
644 *
645 * Please note that, confusingly, "page_mapping" refers to the inode
646 * address_space which maps the page from disk; whereas "page_mapped"
647 * refers to user virtual address space into which the page is mapped.
648 *
649 * For slab pages, since slab reuses the bits in struct page to store its
650 * internal states, the page->mapping does not exist as such, nor do these
651 * flags below.  So in order to avoid testing non-existent bits, please
652 * make sure that PageSlab(page) actually evaluates to false before calling
653 * the following functions (e.g., PageAnon).  See mm/slab.h.
654 */
655#define PAGE_MAPPING_ANON	0x1
656#define PAGE_MAPPING_MOVABLE	0x2
657#define PAGE_MAPPING_KSM	(PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE)
658#define PAGE_MAPPING_FLAGS	(PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE)
659
660/*
661 * Different with flags above, this flag is used only for fsdax mode.  It
662 * indicates that this page->mapping is now under reflink case.
663 */
664#define PAGE_MAPPING_DAX_SHARED	((void *)0x1)
665
666static __always_inline bool folio_mapping_flags(const struct folio *folio)
667{
668	return ((unsigned long)folio->mapping & PAGE_MAPPING_FLAGS) != 0;
669}
670
671static __always_inline int PageMappingFlags(const struct page *page)
672{
673	return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) != 0;
674}
675
676static __always_inline bool folio_test_anon(const struct folio *folio)
677{
678	return ((unsigned long)folio->mapping & PAGE_MAPPING_ANON) != 0;
679}
680
681static __always_inline bool PageAnon(const struct page *page)
682{
683	return folio_test_anon(page_folio(page));
684}
685
686static __always_inline bool __folio_test_movable(const struct folio *folio)
687{
688	return ((unsigned long)folio->mapping & PAGE_MAPPING_FLAGS) ==
689			PAGE_MAPPING_MOVABLE;
690}
691
692static __always_inline int __PageMovable(const struct page *page)
693{
694	return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) ==
695				PAGE_MAPPING_MOVABLE;
696}
697
698#ifdef CONFIG_KSM
699/*
700 * A KSM page is one of those write-protected "shared pages" or "merged pages"
701 * which KSM maps into multiple mms, wherever identical anonymous page content
702 * is found in VM_MERGEABLE vmas.  It's a PageAnon page, pointing not to any
703 * anon_vma, but to that page's node of the stable tree.
704 */
705static __always_inline bool folio_test_ksm(const struct folio *folio)
706{
707	return ((unsigned long)folio->mapping & PAGE_MAPPING_FLAGS) ==
708				PAGE_MAPPING_KSM;
709}
710
711static __always_inline bool PageKsm(const struct page *page)
712{
713	return folio_test_ksm(page_folio(page));
714}
715#else
716TESTPAGEFLAG_FALSE(Ksm, ksm)
717#endif
718
719u64 stable_page_flags(struct page *page);
720
721/**
722 * folio_xor_flags_has_waiters - Change some folio flags.
723 * @folio: The folio.
724 * @mask: Bits set in this word will be changed.
725 *
726 * This must only be used for flags which are changed with the folio
727 * lock held.  For example, it is unsafe to use for PG_dirty as that
728 * can be set without the folio lock held.  It can also only be used
729 * on flags which are in the range 0-6 as some of the implementations
730 * only affect those bits.
731 *
732 * Return: Whether there are tasks waiting on the folio.
733 */
734static inline bool folio_xor_flags_has_waiters(struct folio *folio,
735		unsigned long mask)
736{
737	return xor_unlock_is_negative_byte(mask, folio_flags(folio, 0));
738}
739
740/**
741 * folio_test_uptodate - Is this folio up to date?
742 * @folio: The folio.
743 *
744 * The uptodate flag is set on a folio when every byte in the folio is
745 * at least as new as the corresponding bytes on storage.  Anonymous
746 * and CoW folios are always uptodate.  If the folio is not uptodate,
747 * some of the bytes in it may be; see the is_partially_uptodate()
748 * address_space operation.
749 */
750static inline bool folio_test_uptodate(const struct folio *folio)
751{
752	bool ret = test_bit(PG_uptodate, const_folio_flags(folio, 0));
753	/*
754	 * Must ensure that the data we read out of the folio is loaded
755	 * _after_ we've loaded folio->flags to check the uptodate bit.
756	 * We can skip the barrier if the folio is not uptodate, because
757	 * we wouldn't be reading anything from it.
758	 *
759	 * See folio_mark_uptodate() for the other side of the story.
760	 */
761	if (ret)
762		smp_rmb();
763
764	return ret;
765}
766
767static inline int PageUptodate(const struct page *page)
768{
769	return folio_test_uptodate(page_folio(page));
770}
771
772static __always_inline void __folio_mark_uptodate(struct folio *folio)
773{
774	smp_wmb();
775	__set_bit(PG_uptodate, folio_flags(folio, 0));
776}
777
778static __always_inline void folio_mark_uptodate(struct folio *folio)
779{
780	/*
781	 * Memory barrier must be issued before setting the PG_uptodate bit,
782	 * so that all previous stores issued in order to bring the folio
783	 * uptodate are actually visible before folio_test_uptodate becomes true.
784	 */
785	smp_wmb();
786	set_bit(PG_uptodate, folio_flags(folio, 0));
787}
788
789static __always_inline void __SetPageUptodate(struct page *page)
790{
791	__folio_mark_uptodate((struct folio *)page);
792}
793
794static __always_inline void SetPageUptodate(struct page *page)
795{
796	folio_mark_uptodate((struct folio *)page);
797}
798
799CLEARPAGEFLAG(Uptodate, uptodate, PF_NO_TAIL)
800
801void __folio_start_writeback(struct folio *folio, bool keep_write);
802void set_page_writeback(struct page *page);
803
804#define folio_start_writeback(folio)			\
805	__folio_start_writeback(folio, false)
806#define folio_start_writeback_keepwrite(folio)	\
807	__folio_start_writeback(folio, true)
808
809static __always_inline bool folio_test_head(const struct folio *folio)
810{
811	return test_bit(PG_head, const_folio_flags(folio, FOLIO_PF_ANY));
812}
813
814static __always_inline int PageHead(const struct page *page)
815{
816	PF_POISONED_CHECK(page);
817	return test_bit(PG_head, &page->flags) && !page_is_fake_head(page);
818}
819
820__SETPAGEFLAG(Head, head, PF_ANY)
821__CLEARPAGEFLAG(Head, head, PF_ANY)
822CLEARPAGEFLAG(Head, head, PF_ANY)
823
824/**
825 * folio_test_large() - Does this folio contain more than one page?
826 * @folio: The folio to test.
827 *
828 * Return: True if the folio is larger than one page.
829 */
830static inline bool folio_test_large(const struct folio *folio)
831{
832	return folio_test_head(folio);
833}
834
835static __always_inline void set_compound_head(struct page *page, struct page *head)
836{
837	WRITE_ONCE(page->compound_head, (unsigned long)head + 1);
838}
839
840static __always_inline void clear_compound_head(struct page *page)
841{
842	WRITE_ONCE(page->compound_head, 0);
843}
844
845#ifdef CONFIG_TRANSPARENT_HUGEPAGE
846static inline void ClearPageCompound(struct page *page)
847{
848	BUG_ON(!PageHead(page));
849	ClearPageHead(page);
850}
851PAGEFLAG(LargeRmappable, large_rmappable, PF_SECOND)
852#else
853TESTPAGEFLAG_FALSE(LargeRmappable, large_rmappable)
854#endif
855
856#define PG_head_mask ((1UL << PG_head))
857
858#ifdef CONFIG_HUGETLB_PAGE
859int PageHuge(const struct page *page);
860SETPAGEFLAG(HugeTLB, hugetlb, PF_SECOND)
861CLEARPAGEFLAG(HugeTLB, hugetlb, PF_SECOND)
862
863/**
864 * folio_test_hugetlb - Determine if the folio belongs to hugetlbfs
865 * @folio: The folio to test.
866 *
867 * Context: Any context.  Caller should have a reference on the folio to
868 * prevent it from being turned into a tail page.
869 * Return: True for hugetlbfs folios, false for anon folios or folios
870 * belonging to other filesystems.
871 */
872static inline bool folio_test_hugetlb(const struct folio *folio)
873{
874	return folio_test_large(folio) &&
875		test_bit(PG_hugetlb, const_folio_flags(folio, 1));
876}
877#else
878TESTPAGEFLAG_FALSE(Huge, hugetlb)
879#endif
880
881#ifdef CONFIG_TRANSPARENT_HUGEPAGE
882/*
883 * PageHuge() only returns true for hugetlbfs pages, but not for
884 * normal or transparent huge pages.
885 *
886 * PageTransHuge() returns true for both transparent huge and
887 * hugetlbfs pages, but not normal pages. PageTransHuge() can only be
888 * called only in the core VM paths where hugetlbfs pages can't exist.
889 */
890static inline int PageTransHuge(const struct page *page)
891{
892	VM_BUG_ON_PAGE(PageTail(page), page);
893	return PageHead(page);
894}
895
896/*
897 * PageTransCompound returns true for both transparent huge pages
898 * and hugetlbfs pages, so it should only be called when it's known
899 * that hugetlbfs pages aren't involved.
900 */
901static inline int PageTransCompound(const struct page *page)
902{
903	return PageCompound(page);
904}
905
906/*
907 * PageTransTail returns true for both transparent huge pages
908 * and hugetlbfs pages, so it should only be called when it's known
909 * that hugetlbfs pages aren't involved.
910 */
911static inline int PageTransTail(const struct page *page)
912{
913	return PageTail(page);
914}
915#else
916TESTPAGEFLAG_FALSE(TransHuge, transhuge)
917TESTPAGEFLAG_FALSE(TransCompound, transcompound)
918TESTPAGEFLAG_FALSE(TransCompoundMap, transcompoundmap)
919TESTPAGEFLAG_FALSE(TransTail, transtail)
920#endif
921
922#if defined(CONFIG_MEMORY_FAILURE) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
923/*
924 * PageHasHWPoisoned indicates that at least one subpage is hwpoisoned in the
925 * compound page.
926 *
927 * This flag is set by hwpoison handler.  Cleared by THP split or free page.
928 */
929PAGEFLAG(HasHWPoisoned, has_hwpoisoned, PF_SECOND)
930	TESTSCFLAG(HasHWPoisoned, has_hwpoisoned, PF_SECOND)
931#else
932PAGEFLAG_FALSE(HasHWPoisoned, has_hwpoisoned)
933	TESTSCFLAG_FALSE(HasHWPoisoned, has_hwpoisoned)
934#endif
935
936/*
937 * Check if a page is currently marked HWPoisoned. Note that this check is
938 * best effort only and inherently racy: there is no way to synchronize with
939 * failing hardware.
940 */
941static inline bool is_page_hwpoison(struct page *page)
942{
943	if (PageHWPoison(page))
944		return true;
945	return PageHuge(page) && PageHWPoison(compound_head(page));
946}
947
948/*
949 * For pages that are never mapped to userspace (and aren't PageSlab),
950 * page_type may be used.  Because it is initialised to -1, we invert the
951 * sense of the bit, so __SetPageFoo *clears* the bit used for PageFoo, and
952 * __ClearPageFoo *sets* the bit used for PageFoo.  We reserve a few high and
953 * low bits so that an underflow or overflow of page_mapcount() won't be
954 * mistaken for a page type value.
955 */
956
957#define PAGE_TYPE_BASE	0xf0000000
958/* Reserve		0x0000007f to catch underflows of page_mapcount */
959#define PAGE_MAPCOUNT_RESERVE	-128
960#define PG_buddy	0x00000080
961#define PG_offline	0x00000100
962#define PG_table	0x00000200
963#define PG_guard	0x00000400
964
965#define PageType(page, flag)						\
966	((page->page_type & (PAGE_TYPE_BASE | flag)) == PAGE_TYPE_BASE)
967#define folio_test_type(folio, flag)					\
968	((folio->page.page_type & (PAGE_TYPE_BASE | flag)) == PAGE_TYPE_BASE)
969
970static inline int page_type_has_type(unsigned int page_type)
971{
972	return (int)page_type < PAGE_MAPCOUNT_RESERVE;
973}
974
975static inline int page_has_type(const struct page *page)
976{
977	return page_type_has_type(page->page_type);
978}
979
980#define PAGE_TYPE_OPS(uname, lname, fname)				\
981static __always_inline int Page##uname(const struct page *page)		\
982{									\
983	return PageType(page, PG_##lname);				\
984}									\
985static __always_inline int folio_test_##fname(const struct folio *folio)\
986{									\
987	return folio_test_type(folio, PG_##lname);			\
988}									\
989static __always_inline void __SetPage##uname(struct page *page)		\
990{									\
991	VM_BUG_ON_PAGE(!PageType(page, 0), page);			\
992	page->page_type &= ~PG_##lname;					\
993}									\
994static __always_inline void __folio_set_##fname(struct folio *folio)	\
995{									\
996	VM_BUG_ON_FOLIO(!folio_test_type(folio, 0), folio);		\
997	folio->page.page_type &= ~PG_##lname;				\
998}									\
999static __always_inline void __ClearPage##uname(struct page *page)	\
1000{									\
1001	VM_BUG_ON_PAGE(!Page##uname(page), page);			\
1002	page->page_type |= PG_##lname;					\
1003}									\
1004static __always_inline void __folio_clear_##fname(struct folio *folio)	\
1005{									\
1006	VM_BUG_ON_FOLIO(!folio_test_##fname(folio), folio);		\
1007	folio->page.page_type |= PG_##lname;				\
1008}									\
1009
1010/*
1011 * PageBuddy() indicates that the page is free and in the buddy system
1012 * (see mm/page_alloc.c).
1013 */
1014PAGE_TYPE_OPS(Buddy, buddy, buddy)
1015
1016/*
1017 * PageOffline() indicates that the page is logically offline although the
1018 * containing section is online. (e.g. inflated in a balloon driver or
1019 * not onlined when onlining the section).
1020 * The content of these pages is effectively stale. Such pages should not
1021 * be touched (read/write/dump/save) except by their owner.
1022 *
1023 * If a driver wants to allow to offline unmovable PageOffline() pages without
1024 * putting them back to the buddy, it can do so via the memory notifier by
1025 * decrementing the reference count in MEM_GOING_OFFLINE and incrementing the
1026 * reference count in MEM_CANCEL_OFFLINE. When offlining, the PageOffline()
1027 * pages (now with a reference count of zero) are treated like free pages,
1028 * allowing the containing memory block to get offlined. A driver that
1029 * relies on this feature is aware that re-onlining the memory block will
1030 * require to re-set the pages PageOffline() and not giving them to the
1031 * buddy via online_page_callback_t.
1032 *
1033 * There are drivers that mark a page PageOffline() and expect there won't be
1034 * any further access to page content. PFN walkers that read content of random
1035 * pages should check PageOffline() and synchronize with such drivers using
1036 * page_offline_freeze()/page_offline_thaw().
1037 */
1038PAGE_TYPE_OPS(Offline, offline, offline)
1039
1040extern void page_offline_freeze(void);
1041extern void page_offline_thaw(void);
1042extern void page_offline_begin(void);
1043extern void page_offline_end(void);
1044
1045/*
1046 * Marks pages in use as page tables.
1047 */
1048PAGE_TYPE_OPS(Table, table, pgtable)
1049
1050/*
1051 * Marks guardpages used with debug_pagealloc.
1052 */
1053PAGE_TYPE_OPS(Guard, guard, guard)
1054
1055extern bool is_free_buddy_page(struct page *page);
1056
1057PAGEFLAG(Isolated, isolated, PF_ANY);
1058
1059static __always_inline int PageAnonExclusive(const struct page *page)
1060{
1061	VM_BUG_ON_PGFLAGS(!PageAnon(page), page);
1062	VM_BUG_ON_PGFLAGS(PageHuge(page) && !PageHead(page), page);
1063	return test_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags);
1064}
1065
1066static __always_inline void SetPageAnonExclusive(struct page *page)
1067{
1068	VM_BUG_ON_PGFLAGS(!PageAnon(page) || PageKsm(page), page);
1069	VM_BUG_ON_PGFLAGS(PageHuge(page) && !PageHead(page), page);
1070	set_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags);
1071}
1072
1073static __always_inline void ClearPageAnonExclusive(struct page *page)
1074{
1075	VM_BUG_ON_PGFLAGS(!PageAnon(page) || PageKsm(page), page);
1076	VM_BUG_ON_PGFLAGS(PageHuge(page) && !PageHead(page), page);
1077	clear_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags);
1078}
1079
1080static __always_inline void __ClearPageAnonExclusive(struct page *page)
1081{
1082	VM_BUG_ON_PGFLAGS(!PageAnon(page), page);
1083	VM_BUG_ON_PGFLAGS(PageHuge(page) && !PageHead(page), page);
1084	__clear_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags);
1085}
1086
1087#ifdef CONFIG_MMU
1088#define __PG_MLOCKED		(1UL << PG_mlocked)
1089#else
1090#define __PG_MLOCKED		0
1091#endif
1092
1093/*
1094 * Flags checked when a page is freed.  Pages being freed should not have
1095 * these flags set.  If they are, there is a problem.
1096 */
1097#define PAGE_FLAGS_CHECK_AT_FREE				\
1098	(1UL << PG_lru		| 1UL << PG_locked	|	\
1099	 1UL << PG_private	| 1UL << PG_private_2	|	\
1100	 1UL << PG_writeback	| 1UL << PG_reserved	|	\
1101	 1UL << PG_slab		| 1UL << PG_active 	|	\
1102	 1UL << PG_unevictable	| __PG_MLOCKED | LRU_GEN_MASK)
1103
1104/*
1105 * Flags checked when a page is prepped for return by the page allocator.
1106 * Pages being prepped should not have these flags set.  If they are set,
1107 * there has been a kernel bug or struct page corruption.
1108 *
1109 * __PG_HWPOISON is exceptional because it needs to be kept beyond page's
1110 * alloc-free cycle to prevent from reusing the page.
1111 */
1112#define PAGE_FLAGS_CHECK_AT_PREP	\
1113	((PAGEFLAGS_MASK & ~__PG_HWPOISON) | LRU_GEN_MASK | LRU_REFS_MASK)
1114
1115/*
1116 * Flags stored in the second page of a compound page.  They may overlap
1117 * the CHECK_AT_FREE flags above, so need to be cleared.
1118 */
1119#define PAGE_FLAGS_SECOND						\
1120	(0xffUL /* order */		| 1UL << PG_has_hwpoisoned |	\
1121	 1UL << PG_hugetlb		| 1UL << PG_large_rmappable)
1122
1123#define PAGE_FLAGS_PRIVATE				\
1124	(1UL << PG_private | 1UL << PG_private_2)
1125/**
1126 * page_has_private - Determine if page has private stuff
1127 * @page: The page to be checked
1128 *
1129 * Determine if a page has private stuff, indicating that release routines
1130 * should be invoked upon it.
1131 */
1132static inline int page_has_private(const struct page *page)
1133{
1134	return !!(page->flags & PAGE_FLAGS_PRIVATE);
1135}
1136
1137static inline bool folio_has_private(const struct folio *folio)
1138{
1139	return page_has_private(&folio->page);
1140}
1141
1142#undef PF_ANY
1143#undef PF_HEAD
1144#undef PF_NO_TAIL
1145#undef PF_NO_COMPOUND
1146#undef PF_SECOND
1147#endif /* !__GENERATING_BOUNDS_H */
1148
1149#endif	/* PAGE_FLAGS_H */
1150