/linux-master/include/linux/ |
H A D | skmsg.h | 229 static inline struct page *sk_msg_page(struct sk_msg *msg, int which) 252 static inline void sk_msg_page_add(struct sk_msg *msg, struct page *page, argument 257 get_page(page); 259 sg_set_page(sge, page, len, offset);
|
H A D | skbuff.h | 407 * skb_frag_must_loop - Test if %p is a high memory page 408 * @p: fragment's page 410 static inline bool skb_frag_must_loop(struct page *p) 425 * @p: (temp var) current page 426 * @p_off: (temp var) offset from start of current page, 427 * non-zero only on first page. 428 * @p_len: (temp var) length in current page, 429 * < PAGE_SIZE only on first and last page. 432 * A fragment can hold a compound page, in which case per-page 2461 skb_frag_fill_page_desc(skb_frag_t *frag, struct page *page, int off, int size) argument 2477 __skb_fill_page_desc_noacc(struct skb_shared_info *shinfo, int i, struct page *page, int off, int size) argument 2513 struct page *page = netmem_to_page(netmem); local 2526 __skb_fill_page_desc(struct sk_buff *skb, int i, struct page *page, int off, int size) argument 2553 skb_fill_page_desc(struct sk_buff *skb, int i, struct page *page, int off, int size) argument 2570 skb_fill_page_desc_noacc(struct sk_buff *skb, int i, struct page *page, int off, int size) argument 2583 skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off, int size, unsigned int truesize) argument 3431 dev_page_is_reusable(const struct page *page) argument 3442 skb_propagate_pfmemalloc(const struct page *page, struct sk_buff *skb) argument 3727 skb_can_coalesce(struct sk_buff *skb, int i, const struct page *page, int off) argument [all...] |
H A D | rmap.h | 19 * an anonymous page pointing to this anon_vma needs to be unmapped: 23 * in mprotect), the mapping field of an anonymous page cannot point 28 * the anon_vma object itself: we're guaranteed no page can be 36 * guarantee that the vma of page tables will exist for 180 * No special request: A mapped anonymous (sub)page is possibly shared between 185 /* The anonymous (sub)page is exclusive to a single process. */ 198 struct page *page, int nr_pages, enum rmap_level level) 213 VM_WARN_ON_FOLIO(page_folio(page) != folio, folio); 214 VM_WARN_ON_FOLIO(page_folio(page 197 __folio_rmap_sanity_checks(struct folio *folio, struct page *page, int nr_pages, enum rmap_level level) argument 321 __folio_dup_file_rmap(struct folio *folio, struct page *page, int nr_pages, enum rmap_level level) argument 357 folio_dup_file_rmap_ptes(struct folio *folio, struct page *page, int nr_pages) argument 363 folio_dup_file_rmap_pte(struct folio *folio, struct page *page) argument 378 folio_dup_file_rmap_pmd(struct folio *folio, struct page *page) argument 388 __folio_try_dup_anon_rmap(struct folio *folio, struct page *page, int nr_pages, struct vm_area_struct *src_vma, enum rmap_level level) argument 472 folio_try_dup_anon_rmap_ptes(struct folio *folio, struct page *page, int nr_pages, struct vm_area_struct *src_vma) argument 479 folio_try_dup_anon_rmap_pte(struct folio *folio, struct page *page, struct vm_area_struct *src_vma) argument 508 folio_try_dup_anon_rmap_pmd(struct folio *folio, struct page *page, struct vm_area_struct *src_vma) argument 520 __folio_try_share_anon_rmap(struct folio *folio, struct page *page, int nr_pages, enum rmap_level level) argument 613 folio_try_share_anon_rmap_pte(struct folio *folio, struct page *page) argument 642 folio_try_share_anon_rmap_pmd(struct folio *folio, struct page *page) argument 790 page_mkclean(struct page *page) argument [all...] |
H A D | ring_buffer.h | 26 * @RINGBUF_TYPE_PADDING: Left over page padding or discarded event 203 struct buffer_data_read_page *page); 207 void *ring_buffer_read_page_data(struct buffer_data_read_page *page);
|
H A D | pgalloc_tag.h | 3 * page allocation tagging 27 static inline union codetag_ref *get_page_tag_ref(struct page *page) argument 29 if (page) { 30 struct page_ext *page_ext = page_ext_get(page); 43 static inline void pgalloc_tag_add(struct page *page, struct task_struct *task, argument 47 union codetag_ref *ref = get_page_tag_ref(page); 56 static inline void pgalloc_tag_sub(struct page *page, unsigne argument 68 pgalloc_tag_split(struct page *page, unsigned int nr) argument 97 pgalloc_tag_get(struct page *page) argument 121 get_page_tag_ref(struct page *page) argument 123 pgalloc_tag_add(struct page *page, struct task_struct *task, unsigned int nr) argument 125 pgalloc_tag_sub(struct page *page, unsigned int nr) argument 126 pgalloc_tag_split(struct page *page, unsigned int nr) argument 127 pgalloc_tag_get(struct page *page) argument [all...] |
H A D | pagemap.h | 147 * mapping_shrinkable - test if page cache state allows inode reclaim 148 * @mapping: the page cache mapping 173 * inodes before there is highmem pressure from the page 186 * head pointer, which allows non-resident page cache entries 317 * reduce page locking. 404 struct address_space *page_mapping(struct page *); 412 * For folios which are in the page cache, return the mapping that this 413 * page belongs to. Folios in the swap cache return the mapping of the 418 * Do not call this for folios which aren't in the page cache or swap cache. 432 * For folios which are in the page cach 448 page_file_mapping(struct page *page) argument 523 attach_page_private(struct page *page, void *data) argument 528 detach_page_private(struct page *page) argument 900 page_to_pgoff(struct page *page) argument 918 page_offset(struct page *page) argument 923 page_file_offset(struct page *page) argument 1017 trylock_page(struct page *page) argument 1062 lock_page(struct page *page) argument 1133 wait_on_page_locked(struct page *page) argument 1378 struct page *page; local 1391 VM_BUG_ON_PAGE(PageTail(page), page); local 1503 page_mkwrite_check_truncate(struct page *page, struct inode *inode) argument 1541 i_blocks_per_page(struct inode *inode, struct page *page) argument [all...] |
H A D | page_ref.h | 7 #include <linux/page-flags.h> 29 extern void __page_ref_set(struct page *page, int v); 30 extern void __page_ref_mod(struct page *page, int v); 31 extern void __page_ref_mod_and_test(struct page *page, int v, int ret); 32 extern void __page_ref_mod_and_return(struct page *page, int v, int ret); 33 extern void __page_ref_mod_unless(struct page *pag 41 __page_ref_set(struct page *page, int v) argument 44 __page_ref_mod(struct page *page, int v) argument 47 __page_ref_mod_and_test(struct page *page, int v, int ret) argument 50 __page_ref_mod_and_return(struct page *page, int v, int ret) argument 53 __page_ref_mod_unless(struct page *page, int v, int u) argument 56 __page_ref_freeze(struct page *page, int v, int ret) argument 59 __page_ref_unfreeze(struct page *page, int v) argument 65 page_ref_count(const struct page *page) argument 92 page_count(const struct page *page) argument 97 set_page_count(struct page *page, int v) argument 113 init_page_count(struct page *page) argument 118 page_ref_add(struct page *page, int nr) argument 130 page_ref_sub(struct page *page, int nr) argument 151 page_ref_inc(struct page *page) argument 163 page_ref_dec(struct page *page) argument 175 page_ref_sub_and_test(struct page *page, int nr) argument 189 page_ref_inc_return(struct page *page) argument 203 page_ref_dec_and_test(struct page *page) argument 217 page_ref_dec_return(struct page *page) argument 231 page_ref_add_unless(struct page *page, int nr, int u) argument 311 page_ref_freeze(struct page *page, int count) argument 325 page_ref_unfreeze(struct page *page, int count) argument 327 VM_BUG_ON_PAGE(page_count(page) != 0, page); local [all...] |
H A D | pageblock-flags.h | 35 /* Huge page sizes are variable */ 66 struct page; 68 unsigned long get_pfnblock_flags_mask(const struct page *page, 72 void set_pfnblock_flags_mask(struct page *page, 79 #define get_pageblock_skip(page) \ 80 get_pfnblock_flags_mask(page, page_to_pfn(page), \ 82 #define clear_pageblock_skip(page) \ 90 get_pageblock_skip(struct page *page) argument 94 clear_pageblock_skip(struct page *page) argument 97 set_pageblock_skip(struct page *page) argument [all...] |
H A D | page-flags.h | 3 * Macros for manipulating and testing page->flags 18 * Various page->flags bits: 20 * PG_reserved is set for special pages. The "struct page" of such a page 25 * - Pages reserved or allocated early during boot (before the page allocator 27 * initial vmemmap, initial page tables, crashkernel, elfcorehdr, and much 29 * be given to the page allocator. 32 * - The zero page(s) 33 * - Pages not added to the page allocator when onlining a section because 49 * Consequently, PG_reserved for a page mappe 206 page_fixed_fake_head(const struct page *page) argument 232 page_fixed_fake_head(const struct page *page) argument 238 page_is_fake_head(const struct page *page) argument 243 _compound_head(const struct page *page) argument 282 PageTail(const struct page *page) argument 287 PageCompound(const struct page *page) argument 294 PagePoisoned(const struct page *page) argument 302 page_init_poison(struct page *page, size_t size) argument 310 const struct page *page = &folio->page; local 312 VM_BUG_ON_PGFLAGS(PageTail(page), page); local 319 struct page *page = &folio->page; local 321 VM_BUG_ON_PGFLAGS(PageTail(page), page); local 585 PageSwapCache(const struct page *page) argument 696 PageMappingFlags(const struct page *page) argument 706 PageAnon(const struct page *page) argument 717 __PageMovable(const struct page *page) argument 736 PageKsm(const struct page *page) argument 792 PageUptodate(const struct page *page) argument 814 __SetPageUptodate(struct page *page) argument 819 SetPageUptodate(struct page *page) argument 839 PageHead(const struct page *page) argument 860 set_compound_head(struct page *page, struct page *head) argument 865 clear_compound_head(struct page *page) argument 871 ClearPageCompound(struct page *page) argument 892 PageTransHuge(const struct page *page) argument 894 VM_BUG_ON_PAGE(PageTail(page), page); local 903 PageTransCompound(const struct page *page) argument 913 PageTransTail(const struct page *page) argument 967 page_has_type(const struct page *page) argument 1059 PageSlab(const struct page *page) argument 1078 PageHuge(const struct page *page) argument 1088 is_page_hwpoison(const struct page *page) argument 1102 PageAnonExclusive(const struct page *page) argument 1114 SetPageAnonExclusive(struct page *page) argument 1117 VM_BUG_ON_PGFLAGS(PageHuge(page) && !PageHead(page), page); local 1121 ClearPageAnonExclusive(struct page *page) argument 1124 VM_BUG_ON_PGFLAGS(PageHuge(page) && !PageHead(page), page); local 1128 __ClearPageAnonExclusive(struct page *page) argument 1131 VM_BUG_ON_PGFLAGS(PageHuge(page) && !PageHead(page), page); local 1180 page_has_private(const struct page *page) argument [all...] |
H A D | page_ext.h | 46 * A page_ext page is associated with every page descriptor. The 47 * page_ext helps us add more information about the page. 80 extern struct page_ext *page_ext_get(const struct page *page); 120 static inline struct page_ext *page_ext_get(const struct page *page) argument
|
H A D | page-isolation.h | 10 static inline bool is_migrate_isolate_page(struct page *page) argument 12 return get_pageblock_migratetype(page) == MIGRATE_ISOLATE; 23 static inline bool is_migrate_isolate_page(struct page *page) argument 36 void set_pageblock_migratetype(struct page *page, int migratetype); 38 bool move_freepages_block_isolate(struct zone *zone, struct page *page,
|
H A D | page_idle.h | 6 #include <linux/page-flags.h> 11 * If there is not enough space to store Idle and Young bits in page flags, use 12 * page ext flags instead. 16 struct page_ext *page_ext = page_ext_get(&folio->page); 30 struct page_ext *page_ext = page_ext_get(&folio->page); 41 struct page_ext *page_ext = page_ext_get(&folio->page); 55 struct page_ext *page_ext = page_ext_get(&folio->page); 69 struct page_ext *page_ext = page_ext_get(&folio->page); 80 struct page_ext *page_ext = page_ext_get(&folio->page);
|
H A D | mmzone.h | 20 #include <linux/page-flags-layout.h> 23 #include <linux/page-flags.h> 26 #include <asm/page.h> 58 * from MIGRATE_CMA pageblocks and page allocator never 80 get_pfnblock_flags_mask(&folio->page, pfn, MIGRATETYPE_MASK)) 111 #define get_pageblock_migratetype(page) \ 112 get_pfnblock_flags_mask(page, page_to_pfn(page), MIGRATETYPE_MASK) 115 get_pfnblock_flags_mask(&folio->page, folio_pfn(folio), \ 197 NR_DIRTIED, /* page dirtying 1100 page_zonenum(const struct page *page) argument 1112 is_zone_device_page(const struct page *page) argument 1138 is_zone_device_page(const struct page *page) argument 1154 is_zone_movable_page(const struct page *page) argument [all...] |
H A D | net.h | 151 struct page; 316 * either a VM_BUG directly, or __page_cache_release a page that 320 static inline bool sendpage_ok(struct page *page) argument 322 return !PageSlab(page) && page_count(page) >= 1;
|
H A D | mm_types.h | 18 #include <linux/page-flags-layout.h> 36 * Each physical page in the system has a struct page associated with 37 * it to keep track of whatever it is we are using the page for at the 39 * a page, though if it is a pagecache page, rmap structures can tell us 42 * If you allocate the page using alloc_pages(), you can use some of the 43 * space in struct page for your own purposes. The five words in the main 47 * page->mapping, you must restore it to NULL before freeing the page 74 struct page { struct 238 encode_page(struct page *page, unsigned long flags) argument 244 encoded_page_flags(struct encoded_page *page) argument 249 encoded_page_ptr(struct encoded_page *page) argument 260 encoded_nr_pages(struct encoded_page *page) argument 346 struct page page; member in union:folio::__anon371 519 set_page_private(struct page *page, unsigned long private) argument [all...] |
H A D | mm.h | 25 #include <linux/page-flags.h> 100 #include <asm/page.h> 117 * a zero page mapping on a read fault. 120 * related to the physical page in case of virtualization. 133 /* This function must be updated when the size of struct page grows above 96 140 static inline void __mm_zero_struct_page(struct page *page) argument 142 unsigned long *_pp = (void *)page; 144 /* Check that struct page is either 56, 64, 72, 80, 88 or 96 bytes */ 145 BUILD_BUG_ON(sizeof(struct page) 544 struct page *page; /* ->fault handlers should return a member in struct:vm_fault 1090 compound_order(struct page *page) argument 1133 put_page_testzero(struct page *page) argument 1135 VM_BUG_ON_PAGE(page_ref_count(page) == 0, page); local 1150 get_page_unless_zero(struct page *page) argument 1155 folio_get_nontail_page(struct page *page) argument 1214 page_mapcount_reset(struct page *page) argument 1230 page_mapcount(struct page *page) argument 1299 page_mapped(const struct page *page) argument 1306 struct page *page = virt_to_page(x); local 1313 struct page *page = virt_to_page(x); local 1328 page_size(struct page *page) argument 1334 page_shift(struct page *page) argument 1343 thp_order(struct page *page) argument 1345 VM_BUG_ON_PGFLAGS(PageTail(page), page); local 1355 thp_size(struct page *page) argument 1478 get_page(struct page *page) argument 1483 try_get_page(struct page *page) argument 1570 put_page(struct page *page) argument 1654 page_zone_id(struct page *page) argument 1662 page_to_nid(const struct page *page) argument 1732 page_cpupid_reset_last(struct page *page) argument 1744 page_cpupid_reset_last(struct page *page) argument 1809 page_cpupid_reset_last(struct page *page) argument 1831 page_kasan_tag(const struct page *page) argument 1843 page_kasan_tag_set(struct page *page, u8 tag) argument 1859 page_kasan_tag_reset(struct page *page) argument 1867 page_kasan_tag(const struct page *page) argument 1872 page_kasan_tag_set(struct page *page, u8 tag) argument 1873 page_kasan_tag_reset(struct page *page) argument 1877 page_zone(const struct page *page) argument 1882 page_pgdat(const struct page *page) argument 1898 set_page_section(struct page *page, unsigned long section) argument 1904 page_to_section(const struct page *page) argument 1971 page_maybe_dma_pinned(struct page *page) argument 1999 is_zero_page(const struct page *page) argument 2044 set_page_zone(struct page *page, enum zone_type zone) argument 2050 set_page_node(struct page *page, unsigned long node) argument 2056 set_page_links(struct page *page, enum zone_type zone, unsigned long node, unsigned long pfn) argument 2095 compound_nr(struct page *page) argument 2112 thp_nr_pages(struct page *page) argument 2228 arch_make_page_accessible(struct page *page) argument 2260 page_address(const struct page *page) argument 2264 set_page_address(struct page *page, void *address) argument 2277 lowmem_page_address(const struct page *page) argument 2299 page_index(struct page *page) argument 2311 page_is_pfmemalloc(const struct page *page) argument 2340 set_page_pfmemalloc(struct page *page) argument 2345 clear_page_pfmemalloc(struct page *page) argument 2518 struct page *page; local 2903 struct page *page = alloc_pages_noprof(gfp | __GFP_COMP, order); local 2918 struct page *page = ptdesc_page(pt); local 3175 free_reserved_page(struct page *page) argument 3192 mark_page_reserved(struct page *page) argument 3637 vmf_insert_page(struct vm_area_struct *vma, unsigned long addr, struct page *page) argument 3748 kernel_poison_pages(struct page *page, int numpages) argument 3753 kernel_unpoison_pages(struct page *page, int numpages) argument 3761 __kernel_poison_pages(struct page *page, int nunmpages) argument 3762 kernel_poison_pages(struct page *page, int numpages) argument 3763 kernel_unpoison_pages(struct page *page, int numpages) argument 3816 debug_pagealloc_map_pages(struct page *page, int numpages) argument 3822 debug_pagealloc_unmap_pages(struct page *page, int numpages) argument 3841 page_is_guard(struct page *page) argument 3850 set_page_guard(struct zone *zone, struct page *page, unsigned int order) argument 3859 clear_page_guard(struct zone *zone, struct page *page, unsigned int order) argument 3868 debug_pagealloc_map_pages(struct page *page, int numpages) argument 3869 debug_pagealloc_unmap_pages(struct page *page, int numpages) argument 3872 page_is_guard(struct page *page) argument 3873 set_page_guard(struct zone *zone, struct page *page, unsigned int order) argument 3875 clear_page_guard(struct zone *zone, struct page *page, unsigned int order) argument [all...] |
H A D | memcontrol.h | 24 #include <linux/page-flags.h> 29 struct page; 33 /* Cgroup-specific page state, on top of universal node page state */ 179 * page cache and RSS per cgroup. We would eventually like to provide 335 /* page->memcg_data is a pointer to an slabobj_ext vector */ 337 /* page has been accounted as a non-slab kernel page */ 446 static inline struct mem_cgroup *page_memcg(struct page *page) argument 522 page_memcg_check(struct page *page) argument 568 PageMemcgKmem(struct page *page) argument 1008 mod_memcg_page_state(struct page *page, enum memcg_stat_item idx, int val) argument 1136 page_memcg(struct page *page) argument 1152 page_memcg_check(struct page *page) argument 1167 PageMemcgKmem(struct page *page) argument 1506 mod_memcg_page_state(struct page *page, enum memcg_stat_item idx, int val) argument 1539 struct page *page = virt_to_head_page(p); local 1547 struct page *page = virt_to_head_page(p); local 1807 memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order) argument 1815 memcg_kmem_uncharge_page(struct page *page, int order) argument 1853 memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order) argument 1859 memcg_kmem_uncharge_page(struct page *page, int order) argument 1863 __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order) argument 1869 __memcg_kmem_uncharge_page(struct page *page, int order) argument [all...] |
H A D | kprobes.h | 296 void *(*alloc)(void); /* allocate insn page */ 297 void (*free)(void *); /* free insn page */ 440 void free_optinsn_page(void *page);
|
H A D | ksm.h | 69 * When do_swap_page() first faults in from swap what used to be a KSM page, 73 * needed to reconstitute a cross-anon_vma KSM page: for now it has to make 77 * but what if the vma was unmerged while the page was swapped out? 84 void collect_procs_ksm(struct folio *folio, struct page *page, 117 static inline void collect_procs_ksm(struct folio *folio, struct page *page, argument
|
H A D | hugetlb.h | 38 * For HugeTLB page, there are more metadata to save in the struct page. But 39 * the head struct page cannot meet our needs, so we have to abuse other tail 40 * struct page to store the metadata. 83 * instantiated within the map. The from and to elements are huge page 136 struct page *hugetlb_follow_page_mask(struct vm_area_struct *vma, 140 unsigned long, unsigned long, struct page *, 145 struct page *ref_page, zap_flags_t zap_flags); 217 * high-level pgtable page, but also PUD entry that can be unshared 227 * pgtable page ca [all...] |
H A D | io.h | 15 #include <asm/page.h> 125 * no effect if the per-page mechanisms are functional.
|
H A D | huge_mm.h | 154 * - For file vma, check if the linear page offset of vma is 302 int split_huge_page_to_list_to_order(struct page *page, struct list_head *list, 304 static inline int split_huge_page(struct page *page) argument 306 return split_huge_page_to_list_to_order(page, NULL, 0); 379 struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr, 405 #define mk_huge_pmd(page, prot) pmd_mkhuge(mk_pmd(page, prot)) 457 split_huge_page_to_list_to_order(struct page *pag argument 462 split_huge_page(struct page *page) argument [all...] |
H A D | gfp.h | 144 * There is only one page-allocator function, and two main namespaces to 145 * it. The alloc_page*() variants return 'struct page *' and as such 146 * can allocate highmem pages, the *get*page*() variants return 147 * virtual kernel addresses to the allocated page(s). 174 static inline void arch_free_page(struct page *page, int order) { } argument 177 static inline void arch_alloc_page(struct page *page, int order) { } argument 180 struct page *__alloc_pages_noprof(gfp_t gfp, unsigned int order, int preferred_nid, 191 struct page **page_arra [all...] |
H A D | fs.h | 276 * @AOP_WRITEPAGE_ACTIVATE: Informs the caller that page writeback has 277 * completed, that the page is still locked, and 279 * to return the page to the active list -- it won't 282 * the page if they get this return. Returned by 285 * @AOP_TRUNCATED_PAGE: The AOP method that was handed a locked page has 286 * unlocked it and the page might have been truncated. 287 * The caller should back up to acquiring a new page and 289 * precautions not to livelock. If the caller held a page 295 * page to allow for functions that return the number of bytes operated on in a 296 * given page [all...] |
H A D | dma-map-ops.h | 32 struct page *(*alloc_pages_op)(struct device *dev, size_t size, 35 void (*free_pages)(struct device *dev, size_t size, struct page *vaddr, 49 dma_addr_t (*map_page)(struct device *dev, struct page *page, 128 struct page *dma_alloc_from_contiguous(struct device *dev, size_t count, 130 bool dma_release_from_contiguous(struct device *dev, struct page *pages, 132 struct page *dma_alloc_contiguous(struct device *dev, size_t size, gfp_t gfp); 133 void dma_free_contiguous(struct device *dev, struct page *page, size_t size); 150 static inline struct page *dma_alloc_from_contiguou 166 dma_free_contiguous(struct device *dev, struct page *page, size_t size) argument 398 arch_dma_prep_coherent(struct page *page, size_t size) argument [all...] |