Lines Matching refs:page

20 #include <linux/page-flags-layout.h>
23 #include <linux/page-flags.h>
26 #include <asm/page.h>
58 * from MIGRATE_CMA pageblocks and page allocator never
80 get_pfnblock_flags_mask(&folio->page, pfn, MIGRATETYPE_MASK))
111 #define get_pageblock_migratetype(page) \
112 get_pfnblock_flags_mask(page, page_to_pfn(page), MIGRATETYPE_MASK)
115 get_pfnblock_flags_mask(&folio->page, folio_pfn(folio), \
197 NR_DIRTIED, /* page dirtyings since bootup */
198 NR_WRITTEN, /* page writings since bootup */
337 * a page is on one of lrugen->folios[]. Otherwise it stores 0.
339 * A page is added to the youngest generation on faulting. The aging needs to
340 * check the accessed bit at least twice before handing this page over to the
342 * fault; the second check makes sure this page hasn't been used since then.
349 * PG_active is always cleared while a page is on one of lrugen->folios[] so
350 * that the aging needs not to worry about it. And it's set again when a page
356 * accesses through page tables. This requires order_base_2(MAX_NR_GENS+1) bits
363 * Each generation is divided into multiple tiers. A page accessed N times
364 * through file descriptors is in tier order_base_2(N). A page in the first tier
365 * (N=0,1) is marked by PG_referenced unless it was faulted in through page
366 * tables or read ahead. A page in any other tier (N>1) is marked by
678 * PCPF_PREV_FREE_HIGH_ORDER: a high-order page is freed in the
679 * previous page freeing. To avoid to drain PCP for an accident
680 * high-order page freeing.
758 * 900MB. The kernel will set up special mappings (page
759 * table entries on i386) for each page that the kernel needs to
894 * by page allocator and vm scanner to calculate all kinds of watermarks
940 /* Write-intensive fields used from the page allocator */
1100 static inline enum zone_type page_zonenum(const struct page *page)
1102 ASSERT_EXCLUSIVE_BITS(page->flags, ZONES_MASK << ZONES_PGSHIFT);
1103 return (page->flags >> ZONES_PGSHIFT) & ZONES_MASK;
1108 return page_zonenum(&folio->page);
1112 static inline bool is_zone_device_page(const struct page *page)
1114 return page_zonenum(page) == ZONE_DEVICE;
1125 static inline bool zone_device_pages_have_same_pgmap(const struct page *a,
1126 const struct page *b)
1138 static inline bool is_zone_device_page(const struct page *page)
1142 static inline bool zone_device_pages_have_same_pgmap(const struct page *a,
1143 const struct page *b)
1151 return is_zone_device_page(&folio->page);
1154 static inline bool is_zone_movable_page(const struct page *page)
1156 return page_zonenum(page) == ZONE_MOVABLE;
1235 extern struct page *mem_map;
1252 * Cases not accounted: memory outside kernel control, offline page,
1275 * Memory statistics and page replacement data structures are maintained on a
1295 struct page *node_mem_map;
1304 * Also synchronizes pgdat->first_deferred_pfn during deferred page
1317 unsigned long node_spanned_pages; /* total size of physical page
1359 /* Write-intensive fields used by page reclaim */
1389 /* Fields commonly accessed by the page reclaim scanner */
1800 struct page;
1873 * 1. All mem_map arrays are page-aligned.
1906 static inline struct page *__section_mem_map_addr(struct mem_section *section)
1910 return (struct page *)map;
1996 * @pfn: the page frame number to check
1998 * Check if there is a valid memory map entry aka struct page for the @pfn.
2000 * there is actual usable memory at that @pfn. The struct page may
2001 * represent a hole or an unusable page frame.