Searched refs:folio (Results 1 - 25 of 361) sorted by relevance

1234567891011>>

/linux-master/tools/virtio/linux/
H A Dmm_types.h1 struct folio { struct
/linux-master/include/linux/
H A Dpage_idle.h16 static inline bool folio_test_young(struct folio *folio) argument
18 struct page_ext *page_ext = page_ext_get(&folio->page);
30 static inline void folio_set_young(struct folio *folio) argument
32 struct page_ext *page_ext = page_ext_get(&folio->page);
41 static inline bool folio_test_clear_young(struct folio *folio) argument
43 struct page_ext *page_ext = page_ext_get(&folio->page);
55 static inline bool folio_test_idle(struct folio *foli argument
69 folio_set_idle(struct folio *folio) argument
80 folio_clear_idle(struct folio *folio) argument
94 folio_test_young(struct folio *folio) argument
99 folio_set_young(struct folio *folio) argument
103 folio_test_clear_young(struct folio *folio) argument
108 folio_test_idle(struct folio *folio) argument
113 folio_set_idle(struct folio *folio) argument
117 folio_clear_idle(struct folio *folio) argument
[all...]
H A Dsecretmem.h9 static inline bool folio_is_secretmem(struct folio *folio) argument
19 if (folio_test_large(folio))
23 ((unsigned long)folio->mapping & ~PAGE_MAPPING_FLAGS);
25 if (!mapping || mapping != folio->mapping)
41 static inline bool folio_is_secretmem(struct folio *folio) argument
H A Dhugetlb_cgroup.h61 __hugetlb_cgroup_from_folio(struct folio *folio, bool rsvd) argument
63 VM_BUG_ON_FOLIO(!folio_test_hugetlb(folio), folio);
65 return folio->_hugetlb_cgroup_rsvd;
67 return folio->_hugetlb_cgroup;
70 static inline struct hugetlb_cgroup *hugetlb_cgroup_from_folio(struct folio *folio) argument
72 return __hugetlb_cgroup_from_folio(folio, false);
76 hugetlb_cgroup_from_folio_rsvd(struct folio *foli argument
81 __set_hugetlb_cgroup(struct folio *folio, struct hugetlb_cgroup *h_cg, bool rsvd) argument
91 set_hugetlb_cgroup(struct folio *folio, struct hugetlb_cgroup *h_cg) argument
97 set_hugetlb_cgroup_rsvd(struct folio *folio, struct hugetlb_cgroup *h_cg) argument
167 hugetlb_cgroup_from_folio(struct folio *folio) argument
173 hugetlb_cgroup_from_folio_rsvd(struct folio *folio) argument
178 set_hugetlb_cgroup(struct folio *folio, struct hugetlb_cgroup *h_cg) argument
183 set_hugetlb_cgroup_rsvd(struct folio *folio, struct hugetlb_cgroup *h_cg) argument
220 hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages, struct hugetlb_cgroup *h_cg, struct folio *folio) argument
227 hugetlb_cgroup_commit_charge_rsvd(int idx, unsigned long nr_pages, struct hugetlb_cgroup *h_cg, struct folio *folio) argument
233 hugetlb_cgroup_uncharge_folio(int idx, unsigned long nr_pages, struct folio *folio) argument
238 hugetlb_cgroup_uncharge_folio_rsvd(int idx, unsigned long nr_pages, struct folio *folio) argument
[all...]
H A Dcacheflush.h7 struct folio;
11 void flush_dcache_folio(struct folio *folio);
14 static inline void flush_dcache_folio(struct folio *folio) argument
H A Dzswap.h30 bool zswap_store(struct folio *folio);
31 bool zswap_load(struct folio *folio);
37 void zswap_folio_swapin(struct folio *folio);
43 static inline bool zswap_store(struct folio *folio) argument
48 static inline bool zswap_load(struct folio *folio) argument
61 zswap_folio_swapin(struct folio *folio) argument
[all...]
H A Drmap.h174 struct anon_vma *folio_get_anon_vma(struct folio *folio);
197 static inline void __folio_rmap_sanity_checks(struct folio *folio, argument
201 VM_WARN_ON_FOLIO(folio_test_hugetlb(folio), folio); local
213 VM_WARN_ON_FOLIO(page_folio(page) != folio, folio); local
214 VM_WARN_ON_FOLIO(page_folio(page + nr_pages - 1) != folio, folio);
225 VM_WARN_ON_FOLIO(folio_nr_pages(folio) != HPAGE_PMD_NR, folio); local
264 hugetlb_try_dup_anon_rmap(struct folio *folio, struct vm_area_struct *vma) argument
280 hugetlb_try_share_anon_rmap(struct folio *folio) argument
303 hugetlb_add_file_rmap(struct folio *folio) argument
306 VM_WARN_ON_FOLIO(folio_test_anon(folio), folio); local
311 hugetlb_remove_rmap(struct folio *folio) argument
318 __folio_dup_file_rmap(struct folio *folio, struct page *page, int nr_pages, enum rmap_level level) argument
345 folio_dup_file_rmap_ptes(struct folio *folio, struct page *page, int nr_pages) argument
362 folio_dup_file_rmap_pmd(struct folio *folio, struct page *page) argument
372 __folio_try_dup_anon_rmap(struct folio *folio, struct page *page, int nr_pages, struct vm_area_struct *src_vma, enum rmap_level level) argument
445 folio_try_dup_anon_rmap_ptes(struct folio *folio, struct page *page, int nr_pages, struct vm_area_struct *src_vma) argument
476 folio_try_dup_anon_rmap_pmd(struct folio *folio, struct page *page, struct vm_area_struct *src_vma) argument
488 __folio_try_share_anon_rmap(struct folio *folio, struct page *page, int nr_pages, enum rmap_level level) argument
581 folio_try_share_anon_rmap_pte(struct folio *folio, struct page *page) argument
610 folio_try_share_anon_rmap_pmd(struct folio *folio, struct page *page) argument
740 folio_referenced(struct folio *folio, int is_locked, struct mem_cgroup *memcg, unsigned long *vm_flags) argument
748 try_to_unmap(struct folio *folio, enum ttu_flags flags) argument
752 folio_mkclean(struct folio *folio) argument
[all...]
H A Dmigrate.h10 typedef struct folio *new_folio_t(struct folio *folio, unsigned long private);
11 typedef void free_folio_t(struct folio *folio, unsigned long private);
66 int migrate_folio_extra(struct address_space *mapping, struct folio *dst,
67 struct folio *src, enum migrate_mode mode, int extra_count);
68 int migrate_folio(struct address_space *mapping, struct folio *dst,
69 struct folio *src, enum migrate_mode mode);
73 struct folio *alloc_migration_targe
121 folio_test_movable(struct folio *folio) argument
127 folio_movable_ops(struct folio *folio) argument
148 migrate_misplaced_folio(struct folio *folio, struct vm_area_struct *vma, int node) argument
[all...]
H A Dpage_ref.h71 * folio_ref_count - The reference count on this folio.
72 * @folio: The folio.
76 * folio refcount:
85 * Return: The number of references to this folio.
87 static inline int folio_ref_count(const struct folio *folio) argument
89 return page_ref_count(&folio->page);
104 static inline void folio_set_count(struct folio *folio, in argument
125 folio_ref_add(struct folio *folio, int nr) argument
137 folio_ref_sub(struct folio *folio, int nr) argument
151 folio_ref_sub_return(struct folio *folio, int nr) argument
163 folio_ref_inc(struct folio *folio) argument
175 folio_ref_dec(struct folio *folio) argument
189 folio_ref_sub_and_test(struct folio *folio, int nr) argument
203 folio_ref_inc_return(struct folio *folio) argument
217 folio_ref_dec_and_test(struct folio *folio) argument
231 folio_ref_dec_return(struct folio *folio) argument
245 folio_ref_add_unless(struct folio *folio, int nr, int u) argument
261 folio_try_get(struct folio *folio) argument
266 folio_ref_try_add_rcu(struct folio *folio, int count) argument
277 VM_BUG_ON_FOLIO(folio_ref_count(folio) == 0, folio); local
311 folio_try_get_rcu(struct folio *folio) argument
325 folio_ref_freeze(struct folio *folio, int count) argument
340 folio_ref_unfreeze(struct folio *folio, int count) argument
[all...]
H A Dmpage.h19 int mpage_read_folio(struct folio *folio, get_block_t get_block);
/linux-master/mm/
H A Dswap.c60 * The following folio batches are grouped together because they are protected
77 static void __page_cache_release(struct folio *folio, struct lruvec **lruvecp, argument
80 if (folio_test_lru(folio)) {
81 folio_lruvec_relock_irqsave(folio, lruvecp, flagsp);
82 lruvec_del_folio(*lruvecp, folio);
83 __folio_clear_lru_flags(folio);
92 if (unlikely(folio_test_mlocked(folio))) {
93 long nr_pages = folio_nr_pages(folio);
95 __folio_clear_mlocked(folio);
105 page_cache_release(struct folio *folio) argument
115 __folio_put_small(struct folio *folio) argument
122 __folio_put_large(struct folio *folio) argument
135 __folio_put(struct folio *folio) argument
155 struct folio *folio, *next; local
179 lru_add_fn(struct lruvec *lruvec, struct folio *folio) argument
184 VM_BUG_ON_FOLIO(folio_test_lru(folio), folio); local
226 struct folio *folio = fbatch->folios[i]; local
243 folio_batch_add_and_move(struct folio_batch *fbatch, struct folio *folio, move_fn_t move_fn) argument
252 lru_move_tail_fn(struct lruvec *lruvec, struct folio *folio) argument
269 folio_rotate_reclaimable(struct folio *folio) argument
336 lru_note_cost_refault(struct folio *folio) argument
342 folio_activate_fn(struct lruvec *lruvec, struct folio *folio) argument
367 folio_activate(struct folio *folio) argument
386 folio_activate(struct folio *folio) argument
399 __lru_cache_activate_folio(struct folio *folio) argument
430 folio_inc_refs(struct folio *folio) argument
458 folio_inc_refs(struct folio *folio) argument
473 folio_mark_accessed(struct folio *folio) argument
516 folio_add_lru(struct folio *folio) argument
522 VM_BUG_ON_FOLIO(folio_test_lru(folio), folio); local
545 folio_add_lru_vma(struct folio *folio, struct vm_area_struct *vma) argument
547 VM_BUG_ON_FOLIO(folio_test_lru(folio), folio); local
576 lru_deactivate_file_fn(struct lruvec *lruvec, struct folio *folio) argument
617 lru_deactivate_fn(struct lruvec *lruvec, struct folio *folio) argument
633 lru_lazyfree_fn(struct lruvec *lruvec, struct folio *folio) argument
705 deactivate_file_folio(struct folio *folio) argument
728 folio_deactivate(struct folio *folio) argument
749 folio_mark_lazyfree(struct folio *folio) argument
985 struct folio *folio = folios->folios[i]; local
1060 struct folio *folio = page_folio(encoded_page_ptr(encoded[i])); local
1112 struct folio *folio = fbatch->folios[i]; local
[all...]
H A Dpage_idle.c34 static struct folio *page_idle_get_folio(unsigned long pfn)
37 struct folio *folio; local
42 folio = page_folio(page);
43 if (!folio_test_lru(folio) || !folio_try_get(folio))
45 if (unlikely(page_folio(page) != folio || !folio_test_lru(folio))) {
46 folio_put(folio);
47 folio
52 page_idle_clear_pte_refs_one(struct folio *folio, struct vm_area_struct *vma, unsigned long addr, void *arg) argument
89 page_idle_clear_pte_refs(struct folio *folio) argument
119 struct folio *folio; local
164 struct folio *folio; local
[all...]
H A Dswap.h13 void swap_read_folio(struct folio *folio, bool do_poll,
23 void __swap_writepage(struct folio *folio, struct writeback_control *wbc);
35 bool add_to_swap(struct folio *folio);
37 int add_to_swap_cache(struct folio *folio, swp_entry_t entry,
39 void __delete_from_swap_cache(struct folio *folio,
61 folio_swap_flags(struct folio *folio) argument
67 swap_read_folio(struct folio *folio, bool do_poll, struct swap_iocb **plug) argument
118 add_to_swap(struct folio *folio) argument
128 add_to_swap_cache(struct folio *folio, swp_entry_t entry, gfp_t gfp_mask, void **shadowp) argument
134 __delete_from_swap_cache(struct folio *folio, swp_entry_t entry, void *shadow) argument
139 delete_from_swap_cache(struct folio *folio) argument
148 folio_swap_flags(struct folio *folio) argument
[all...]
/linux-master/include/trace/events/
H A Dpagemap.h19 #define trace_pagemap_flags(folio) ( \
20 (folio_test_anon(folio) ? PAGEMAP_ANONYMOUS : PAGEMAP_FILE) | \
21 (folio_mapped(folio) ? PAGEMAP_MAPPED : 0) | \
22 (folio_test_swapcache(folio) ? PAGEMAP_SWAPCACHE : 0) | \
23 (folio_test_swapbacked(folio) ? PAGEMAP_SWAPBACKED : 0) | \
24 (folio_test_mappedtodisk(folio) ? PAGEMAP_MAPPEDDISK : 0) | \
25 (folio_test_private(folio) ? PAGEMAP_BUFFERS : 0) \
30 TP_PROTO(struct folio *folio),
32 TP_ARGS(folio),
[all...]
/linux-master/fs/gfs2/
H A Daops.h12 void gfs2_trans_add_databufs(struct gfs2_inode *ip, struct folio *folio,
/linux-master/fs/freevxfs/
H A Dvxfs_immed.c19 * @folio: folio to fill in.
23 * file that hosts @folio into the pagecache.
29 * @folio is locked and will be unlocked.
31 static int vxfs_immed_read_folio(struct file *fp, struct folio *folio) argument
33 struct vxfs_inode_info *vip = VXFS_INO(folio->mapping->host);
34 void *src = vip->vii_immed.vi_immed + folio_pos(folio);
37 for (i = 0; i < folio_nr_pages(folio); i++) {
38 memcpy_to_page(folio_page(folio,
[all...]
/linux-master/fs/bcachefs/
H A Dfs-io-pagecache.h7 typedef DARRAY(struct folio *) folios;
14 * Use u64 for the end pos and sector helpers because if the folio covers the
15 * max supported range of the mapping, the start offset of the next folio
19 static inline u64 folio_end_pos(struct folio *folio) argument
21 return folio_pos(folio) + folio_size(folio);
24 static inline size_t folio_sectors(struct folio *folio) argument
26 return PAGE_SECTORS << folio_order(folio);
29 folio_sector(struct folio *folio) argument
34 folio_end_sector(struct folio *folio) argument
72 bch2_folio_sector_set(struct folio *folio, struct bch_folio *s, unsigned i, unsigned n) argument
80 folio_pos_to_s(struct folio *folio, loff_t pos) argument
89 __bch2_folio_release(struct folio *folio) argument
94 bch2_folio_release(struct folio *folio) argument
100 __bch2_folio(struct folio *folio) argument
107 bch2_folio(struct folio *folio) argument
[all...]
/linux-master/fs/btrfs/
H A Dsubpage.h10 struct folio;
46 * For locked bitmaps, normally it's subpage representation for folio
49 * - Metadata doesn't really lock the folio
95 struct folio *folio, enum btrfs_subpage_type type);
96 void btrfs_detach_subpage(const struct btrfs_fs_info *fs_info, struct folio *folio);
103 void btrfs_folio_inc_eb_refs(const struct btrfs_fs_info *fs_info, struct folio *folio);
104 void btrfs_folio_dec_eb_refs(const struct btrfs_fs_info *fs_info, struct folio *foli
[all...]
H A Dsubpage.c121 struct folio *folio, enum btrfs_subpage_type type)
129 if (folio->mapping)
130 ASSERT(folio_test_locked(folio));
132 /* Either not subpage, or the folio already has private attached. */
133 if (!btrfs_is_subpage(fs_info, folio->mapping) || folio_test_private(folio))
140 folio_attach_private(folio, subpage);
144 void btrfs_detach_subpage(const struct btrfs_fs_info *fs_info, struct folio *folio) argument
120 btrfs_attach_subpage(const struct btrfs_fs_info *fs_info, struct folio *folio, enum btrfs_subpage_type type) argument
195 btrfs_folio_inc_eb_refs(const struct btrfs_fs_info *fs_info, struct folio *folio) argument
209 btrfs_folio_dec_eb_refs(const struct btrfs_fs_info *fs_info, struct folio *folio) argument
224 btrfs_subpage_assert(const struct btrfs_fs_info *fs_info, struct folio *folio, u64 start, u32 len) argument
253 btrfs_subpage_start_reader(const struct btrfs_fs_info *fs_info, struct folio *folio, u64 start, u32 len) argument
275 btrfs_subpage_end_reader(const struct btrfs_fs_info *fs_info, struct folio *folio, u64 start, u32 len) argument
309 btrfs_subpage_clamp_range(struct folio *folio, u64 *start, u32 *len) argument
327 btrfs_subpage_start_writer(const struct btrfs_fs_info *fs_info, struct folio *folio, u64 start, u32 len) argument
347 btrfs_subpage_end_and_test_writer(const struct btrfs_fs_info *fs_info, struct folio *folio, u64 start, u32 len) argument
390 btrfs_folio_start_writer_lock(const struct btrfs_fs_info *fs_info, struct folio *folio, u64 start, u32 len) argument
407 btrfs_folio_end_writer_lock(const struct btrfs_fs_info *fs_info, struct folio *folio, u64 start, u32 len) argument
429 btrfs_subpage_set_uptodate(const struct btrfs_fs_info *fs_info, struct folio *folio, u64 start, u32 len) argument
444 btrfs_subpage_clear_uptodate(const struct btrfs_fs_info *fs_info, struct folio *folio, u64 start, u32 len) argument
458 btrfs_subpage_set_dirty(const struct btrfs_fs_info *fs_info, struct folio *folio, u64 start, u32 len) argument
482 btrfs_subpage_clear_and_test_dirty(const struct btrfs_fs_info *fs_info, struct folio *folio, u64 start, u32 len) argument
499 btrfs_subpage_clear_dirty(const struct btrfs_fs_info *fs_info, struct folio *folio, u64 start, u32 len) argument
509 btrfs_subpage_set_writeback(const struct btrfs_fs_info *fs_info, struct folio *folio, u64 start, u32 len) argument
524 btrfs_subpage_clear_writeback(const struct btrfs_fs_info *fs_info, struct folio *folio, u64 start, u32 len) argument
541 btrfs_subpage_set_ordered(const struct btrfs_fs_info *fs_info, struct folio *folio, u64 start, u32 len) argument
555 btrfs_subpage_clear_ordered(const struct btrfs_fs_info *fs_info, struct folio *folio, u64 start, u32 len) argument
570 btrfs_subpage_set_checked(const struct btrfs_fs_info *fs_info, struct folio *folio, u64 start, u32 len) argument
585 btrfs_subpage_clear_checked(const struct btrfs_fs_info *fs_info, struct folio *folio, u64 start, u32 len) argument
706 btrfs_folio_assert_not_dirty(const struct btrfs_fs_info *fs_info, struct folio *folio) argument
736 btrfs_folio_unlock_writer(struct btrfs_fs_info *fs_info, struct folio *folio, u64 start, u32 len) argument
772 btrfs_subpage_dump_bitmap(const struct btrfs_fs_info *fs_info, struct folio *folio, u64 start, u32 len) argument
[all...]
/linux-master/mm/damon/
H A Dpaddr.c19 static bool __damon_pa_mkold(struct folio *folio, struct vm_area_struct *vma, argument
22 DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, addr, 0);
36 struct folio *folio = damon_get_folio(PHYS_PFN(paddr)); local
43 if (!folio)
46 if (!folio_mapped(folio) || !folio_raw_mapping(folio)) {
47 folio_set_idle(folio);
51 need_lock = !folio_test_anon(folio) || folio_test_ks
82 __damon_pa_young(struct folio *folio, struct vm_area_struct *vma, unsigned long addr, void *arg) argument
116 struct folio *folio = damon_get_folio(PHYS_PFN(paddr)); local
187 __damos_pa_filter_out(struct damos_filter *filter, struct folio *folio) argument
216 damos_pa_filter_out(struct damos *scheme, struct folio *folio) argument
233 struct folio *folio = damon_get_folio(PHYS_PFN(addr)); local
263 struct folio *folio = damon_get_folio(PHYS_PFN(addr)); local
[all...]
H A Dops-common.c22 struct folio *damon_get_folio(unsigned long pfn)
25 struct folio *folio; local
30 folio = page_folio(page);
31 if (!folio_test_lru(folio) || !folio_try_get(folio))
33 if (unlikely(page_folio(page) != folio || !folio_test_lru(folio))) {
34 folio_put(folio);
35 folio
42 struct folio *folio = damon_get_folio(pte_pfn(ptep_get(pte))); local
57 struct folio *folio = damon_get_folio(pmd_pfn(pmdp_get(pmd))); local
[all...]
/linux-master/fs/netfs/
H A Dmisc.c12 * Attach a folio to the buffer and maybe set marks on it to say that we need
13 * to put the folio later and twiddle the pagecache flags.
16 struct folio *folio, unsigned int flags,
19 XA_STATE_ORDER(xas, xa, index, folio_order(folio));
24 xas_store(&xas, folio);
50 struct folio *folio; local
57 /* TODO: Figure out what order folio can be allocated here */
58 folio
15 netfs_xa_store_and_mark(struct xarray *xa, unsigned long index, struct folio *folio, unsigned int flags, gfp_t gfp_mask) argument
81 struct folio *folio; local
103 netfs_dirty_folio(struct address_space *mapping, struct folio *folio) argument
178 netfs_invalidate_folio(struct folio *folio, size_t offset, size_t length) argument
240 netfs_release_folio(struct folio *folio, gfp_t gfp) argument
[all...]
H A Dbuffered_write.c21 NETFS_JUST_PREFETCH, /* We have to read the folio anyway */
22 NETFS_WHOLE_FOLIO_MODIFY, /* We're going to overwrite the whole folio */
31 static void netfs_set_group(struct folio *folio, struct netfs_group *netfs_group) argument
33 if (netfs_group && !folio_get_private(folio))
34 folio_attach_private(folio, netfs_get_group(netfs_group));
38 static void netfs_folio_start_fscache(bool caching, struct folio *folio) argument
41 folio_start_fscache(folio);
44 static void netfs_folio_start_fscache(bool caching, struct folio *foli argument
56 netfs_how_to_modify(struct netfs_inode *ctx, struct file *file, struct folio *folio, void *netfs_group, size_t flen, size_t offset, size_t len, bool maybe_trouble) argument
163 struct folio *folio; local
507 struct folio *folio = page_folio(vmf->page); local
566 struct folio *folio; local
604 struct folio *folio; local
641 struct folio *folio; local
780 struct folio *folio; local
891 netfs_write_back_from_locked_folio(struct address_space *mapping, struct writeback_control *wbc, struct netfs_group *group, struct xa_state *xas, struct folio *folio, unsigned long long start, unsigned long long end) argument
1005 struct folio *folio; local
1203 netfs_launder_folio(struct folio *folio) argument
[all...]
/linux-master/fs/coda/
H A Dsymlink.c23 static int coda_symlink_filler(struct file *file, struct folio *folio) argument
25 struct inode *inode = folio->mapping->host;
29 char *p = folio_address(folio);
36 folio_mark_uptodate(folio);
37 folio_unlock(folio);
41 folio_set_error(folio);
42 folio_unlock(folio);
/linux-master/fs/nilfs2/
H A Dpage.c28 static struct buffer_head *__nilfs_get_folio_block(struct folio *folio, argument
34 struct buffer_head *bh = folio_buffers(folio);
37 bh = create_empty_buffers(folio, 1 << blkbits, b_state);
54 struct folio *folio; local
57 folio = filemap_grab_folio(mapping, index);
58 if (IS_ERR(folio))
61 bh = __nilfs_get_folio_block(folio, blkoff, index, blkbits, b_state);
63 folio_unlock(folio);
76 struct folio *folio = bh->b_folio; local
140 nilfs_folio_buffers_clean(struct folio *folio) argument
153 nilfs_folio_bug(struct folio *folio) argument
258 struct folio *folio = fbatch.folios[i], *dfolio; local
312 struct folio *folio = fbatch.folios[i], *dfolio; local
373 struct folio *folio = fbatch.folios[i]; local
397 nilfs_clear_folio_dirty(struct folio *folio, bool silent) argument
462 __nilfs_clear_folio_dirty(struct folio *folio) argument
501 struct folio *folio; local
[all...]

Completed in 242 milliseconds

1234567891011>>