Lines Matching refs:page

18 #include <linux/page-flags-layout.h>
36 * Each physical page in the system has a struct page associated with
37 * it to keep track of whatever it is we are using the page for at the
39 * a page, though if it is a pagecache page, rmap structures can tell us
42 * If you allocate the page using alloc_pages(), you can use some of the
43 * space in struct page for your own purposes. The five words in the main
47 * page->mapping, you must restore it to NULL before freeing the page.
49 * If your page will not be mapped to userspace, you can also use the four
55 * refcount does not cause problems. On receiving the page from
65 * bits of struct page, we align all struct pages to double-word boundaries,
74 struct page {
88 * by the page owner.
97 /* Count page's or folio's mlocks */
101 /* Or, free page */
105 /* See page-flags.h for PAGE_MAPPING_FLAGS */
130 struct { /* Tail pages of compound page */
134 /** @pgmap: Points to the hosting device page map. */
141 * page cache page while the page is migrated to device
149 /** @rcu_head: You can use this to free a page by RCU. */
155 * If the page can be mapped to userspace, encodes the number
156 * of times this page is referenced by a page table.
161 * If the page is neither PageSlab nor mappable to userspace,
162 * the value stored here may help determine what this page
163 * is used for. See page-flags.h for a list of page types
184 * WANT_PAGE_VIRTUAL in asm/page.h
197 * KMSAN metadata for this page:
198 * - shadow page: every bit indicates whether the corresponding
199 * bit of the original page is initialized (0) or not (1);
200 * - origin page: every 4 bytes contain an id of the stack trace
203 struct page *kmsan_shadow;
204 struct page *kmsan_origin;
211 * An 'encoded_page' pointer is a pointer to a regular 'struct page', but
217 * play with 'struct page' alignment (see CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
231 * the number of consecutive pages starting from this page, that all belong to
238 static __always_inline struct encoded_page *encode_page(struct page *page, unsigned long flags)
241 return (struct encoded_page *)(flags | (unsigned long)page);
244 static inline unsigned long encoded_page_flags(struct encoded_page *page)
246 return ENCODED_PAGE_BITS & (unsigned long)page;
249 static inline struct page *encoded_page_ptr(struct encoded_page *page)
251 return (struct page *)(~ENCODED_PAGE_BITS & (unsigned long)page);
260 static __always_inline unsigned long encoded_nr_pages(struct encoded_page *page)
262 return ((unsigned long)page) >> 2;
275 * @flags: Identical to the page flags.
278 * @mapping: The file this page belongs to, or refers to the anon_vma for
305 * in the page cache, it is at a file offset which is a multiple of that
307 * at an arbitrary page offset, but its kernel virtual address is aligned
344 /* private: the union with struct page is transitional */
346 struct page page;
360 /* private: the union with struct page is transitional */
362 struct page __page_1;
373 /* private: the union with struct page is transitional */
380 /* private: the union with struct page is transitional */
382 struct page __page_2;
387 static_assert(offsetof(struct page, pg) == offsetof(struct folio, fl))
408 offsetof(struct page, pg) + sizeof(struct page))
414 offsetof(struct page, pg) + 2 * sizeof(struct page))
422 * struct ptdesc - Memory descriptor for page tables.
423 * @__page_flags: Same as page flags. Powerpc only.
424 * @pt_rcu_head: For freeing page table pages.
425 * @pt_list: List of used page tables. Used for s390 and x86.
426 * @_pt_pad_1: Padding that aliases with page's compound head.
428 * @__page_mapping: Aliases with page->mapping. Unused for page tables.
431 * @pt_frag_refcount: For fragmented page table tracking. Powerpc only.
433 * @ptl: Lock for the page table.
434 * @__page_type: Same as page->page_type. Unused for page tables.
435 * @__page_refcount: Same as page refcount.
436 * @pt_memcg_data: Memcg data. Tracked for page tables here.
438 * This struct overlays struct page for now. Do not modify without a good
476 static_assert(offsetof(struct page, pg) == offsetof(struct ptdesc, pt))
489 static_assert(sizeof(struct ptdesc) <= sizeof(struct page));
492 const struct ptdesc *: (const struct page *)(pt), \
493 struct ptdesc *: (struct page *)(pt)))
500 const struct page *: (const struct ptdesc *)(p), \
501 struct page *: (struct ptdesc *)(p)))
506 #define STRUCT_PAGE_MAX_SHIFT (order_base_2(sizeof(struct page)))
513 * checked by the VM on the head page. So page_private on the tail pages
514 * should be used for data that's ancillary to the head page (eg attaching
515 * buffer heads to tail pages after attaching buffer heads to the head page)
517 #define page_private(page) ((page)->private)
519 static inline void set_page_private(struct page *page, unsigned long private)
521 page->private = private;
538 * containing page->_refcount every time we allocate a fragment.
645 * space that has a special rule for the page-fault handlers (ie a shared
830 atomic_long_t pgtables_bytes; /* size of all page tables */
834 spinlock_t page_table_lock; /* Protects page tables and some
889 * for instance during page table copying for fork().
954 * moving a PROT_NONE mapped page.
997 * page table walkers cleared the corresponding bits.
1037 /* mm_struct list for page table walkers */
1063 * When the bitmap is set, page reclaim knows this mm_struct has been
1065 * walking the page tables of this mm_struct to clear the accessed bit.
1208 * typedef vm_fault_t - Return type for page fault handlers.
1223 * @VM_FAULT_HWPOISON: Hit poisoned small page
1224 * @VM_FAULT_HWPOISON_LARGE: Hit poisoned large page. Index encoded
1227 * @VM_FAULT_NOPAGE: ->fault installed the pte, not return page
1228 * @VM_FAULT_LOCKED: ->fault locked the returned page
1230 * @VM_FAULT_FALLBACK: huge page fault failed, fall back to small
1232 * @VM_FAULT_NEEDDSYNC: ->fault did not modify page tables and needs
1233 * fsync() to complete (for synchronous page faults
1256 /* Encode hstate index for a hwpoisoned large page */
1288 struct page **pages;
1291 * If non-NULL, then this is called to resolve page faults
1324 * COW mapping, making sure that an exclusive anon page is
1331 * whether we would allow page faults to retry by specifying these two
1334 * (a) ALLOW_RETRY and !TRIED: this means the page fault allows retry, and
1337 * (b) ALLOW_RETRY and TRIED: this means the page fault allows retry, and
1340 * (c) !ALLOW_RETRY and !TRIED: this means the page fault does not allow retry
1343 * be used. Note that page faults can be allowed to retry for multiple times,
1346 * signals before a retry to make sure the continuous page faults can still be
1389 * pin, delaying writeback, bounce buffer page writeback, etc. As FS DAX was
1395 * that region. And so, CMA attempts to migrate the page before pinning, when
1398 * FOLL_PIN indicates that a special kind of tracking (not just page->_refcount,
1400 * anything that gets a page reference and then touches page data (for example,
1430 /* do get_page on page */
1443 /* check page is hwpoisoned */
1448 * FOLL_LONGTERM indicates that the page will be held for an indefinite
1463 * PROT_NONE-mapped page is not writable (exceptions with FOLL_FORCE