Searched refs:folio (Results 176 - 200 of 361) sorted by relevance

1234567891011>>

/linux-master/kernel/futex/
H A Dcore.c227 struct folio *folio; local
291 * The treatment of mapping from this point on is critical. The folio
292 * lock protects many things but in this context the folio lock
296 * Strictly speaking the folio lock is not needed in all cases being
297 * considered here and folio lock forces unnecessarily serialization.
299 * folio lock will be acquired only if it is unavoidable
301 * Mapping checks require the folio so it is looked up now. For
302 * anonymous pages, it does not matter if the folio is split
307 folio
[all...]
/linux-master/fs/ceph/
H A Daddr.c67 struct folio **foliop, void **_fsdata);
80 static bool ceph_dirty_folio(struct address_space *mapping, struct folio *folio) argument
87 if (folio_test_dirty(folio)) {
89 ceph_vinop(inode), folio, folio->index);
90 VM_BUG_ON_FOLIO(!folio_test_private(folio), folio);
116 ceph_vinop(inode), folio, folio
137 ceph_invalidate_folio(struct folio *folio, size_t offset, size_t length) argument
643 struct folio *folio = page_folio(page); local
1061 struct folio *folio = page_folio(page); local
1509 struct folio *folio = NULL; local
1530 struct folio *folio = page_folio(subpage); local
1839 struct folio *folio = NULL; local
[all...]
/linux-master/include/linux/
H A Dshmem_fs.h144 int shmem_get_folio(struct inode *inode, pgoff_t index, struct folio **foliop,
146 struct folio *shmem_read_folio_gfp(struct address_space *mapping,
149 static inline struct folio *shmem_read_folio(struct address_space *mapping,
193 struct folio **foliop);
H A Dgfp.h180 struct folio *__folio_alloc(gfp_t gfp, unsigned int order, int preferred_nid,
242 struct folio *__folio_alloc_node(gfp_t gfp, unsigned int order, int nid)
268 struct folio *folio_alloc(gfp_t gfp, unsigned int order);
269 struct folio *vma_alloc_folio(gfp_t gfp, int order, struct vm_area_struct *vma,
281 static inline struct folio *folio_alloc(gfp_t gfp, unsigned int order)
292 struct folio *folio = vma_alloc_folio(gfp, 0, vma, addr, false); local
294 return &folio->page;
H A Dfscrypt.h320 int fscrypt_decrypt_pagecache_blocks(struct folio *folio, size_t len,
336 static inline bool fscrypt_is_bounce_folio(struct folio *folio) argument
338 return folio->mapping == NULL;
341 static inline struct folio *fscrypt_pagecache_folio(struct folio *bounce_folio)
499 static inline int fscrypt_decrypt_pagecache_blocks(struct folio *folio, argument
524 static inline bool fscrypt_is_bounce_folio(struct folio *foli argument
[all...]
/linux-master/arch/mips/mm/
H A Dinit.c173 struct folio *src = page_folio(from);
199 struct folio *folio = page_folio(page); local
202 folio_mapped(folio) && !folio_test_dcache_dirty(folio)) {
209 folio_set_dcache_dirty(folio);
219 struct folio *folio = page_folio(page); local
222 folio_mapped(folio) && !folio_test_dcache_dirty(folio)) {
[all...]
/linux-master/include/trace/events/
H A Derofs.h85 TP_PROTO(struct folio *folio, bool raw),
87 TP_ARGS(folio, raw),
99 __entry->dev = folio->mapping->host->i_sb->s_dev;
100 __entry->nid = EROFS_I(folio->mapping->host)->nid;
101 __entry->dir = S_ISDIR(folio->mapping->host->i_mode);
102 __entry->index = folio->index;
103 __entry->uptodate = folio_test_uptodate(folio);
/linux-master/arch/parisc/include/asm/
H A Dcacheflush.h47 void flush_dcache_folio(struct folio *folio);
/linux-master/arch/sh/include/asm/
H A Dcacheflush.h16 * - flush_dcache_folio(folio) flushes(wback&invalidates) a folio for dcache
45 void flush_dcache_folio(struct folio *folio);
/linux-master/fs/btrfs/
H A Ddefrag.c863 static struct folio *defrag_prepare_one_folio(struct btrfs_inode *inode, pgoff_t index)
870 struct folio *folio; local
874 folio = __filemap_get_folio(mapping, index,
876 if (IS_ERR(folio))
877 return folio;
887 if (folio_test_large(folio)) {
888 folio_unlock(folio);
889 folio_put(folio);
893 ret = set_folio_extent_mapped(folio);
[all...]
/linux-master/fs/xfs/scrub/
H A Dxfarray.h127 /* Cache a folio here for faster scanning for pivots */
128 struct folio *folio; member in struct:xfarray_sortinfo
130 /* First array index in folio that is completely readable */
133 /* Last array index in folio that is completely readable */
H A Dxfarray.c568 * same memory folio. Returns 1 if it sorted, 0 if it did not, or a negative
577 struct folio *folio; local
582 /* No single folio could back this many records. */
587 folio = xfile_get_folio(si->array->xfile, lo_pos, len, XFILE_ALLOC);
588 if (IS_ERR(folio))
589 return PTR_ERR(folio);
590 if (!folio)
596 startp = folio_address(folio) + offset_in_folio(folio, lo_po
[all...]
/linux-master/drivers/misc/lkdtm/
H A Dusercopy.c405 struct folio *folio; local
412 folio = folio_alloc(GFP_KERNEL | __GFP_ZERO, 1);
413 if (!folio) {
417 addr = folio_address(folio);
419 do_usercopy_page_span("folio", addr + PAGE_SIZE);
422 folio_put(folio);
/linux-master/mm/
H A Dmincore.c55 struct folio *folio; local
63 folio = filemap_get_incore_folio(mapping, index);
64 if (!IS_ERR(folio)) {
65 present = folio_test_uptodate(folio);
66 folio_put(folio);
H A Dswap_slots.c276 /* Large folio swap slot is not covered. */
305 swp_entry_t folio_alloc_swap(struct folio *folio) argument
312 if (folio_test_large(folio)) {
314 get_swap_pages(1, &entry, folio_nr_pages(folio));
348 if (mem_cgroup_try_charge_swap(folio, entry)) {
349 put_swap_folio(folio, entry);
H A Dutil.c767 struct anon_vma *folio_anon_vma(struct folio *folio) argument
769 unsigned long mapping = (unsigned long)folio->mapping;
777 * folio_mapping - Find the mapping where this folio is stored.
778 * @folio: The folio.
788 struct address_space *folio_mapping(struct folio *folio) argument
793 if (unlikely(folio_test_slab(folio)))
796 if (unlikely(folio_test_swapcache(folio)))
1156 flush_dcache_folio(struct folio *folio) argument
[all...]
H A Dmempolicy.c457 static bool migrate_folio_add(struct folio *folio, struct list_head *foliolist,
484 struct folio *large; /* note last large folio encountered */
489 * Check if the folio's nid is in qp->nmask.
494 static inline bool queue_folio_required(struct folio *folio, argument
497 int nid = folio_nid(folio);
505 struct folio *folio; local
539 struct folio *folio; local
622 struct folio *folio; local
1028 migrate_folio_add(struct folio *folio, struct list_head *foliolist, unsigned long flags) argument
1243 migrate_folio_add(struct folio *folio, struct list_head *foliolist, unsigned long flags) argument
1359 struct folio *folio; local
2731 mpol_misplaced(struct folio *folio, struct vm_area_struct *vma, unsigned long addr) argument
[all...]
H A Dmprotect.c117 struct folio *folio; local
125 folio = vm_normal_folio(vma, addr, oldpte);
126 if (!folio || folio_is_zone_device(folio) ||
127 folio_test_ksm(folio))
132 folio_ref_count(folio) != 1)
140 if (folio_is_file_lru(folio) &&
141 folio_test_dirty(folio))
148 nid = folio_nid(folio);
201 struct folio *folio = pfn_swap_entry_folio(entry); local
[all...]
H A Dgup_test.c43 struct folio *folio; local
50 folio = page_folio(pages[i]);
52 if (WARN(!folio_maybe_dma_pinned(folio),
55 dump_page(&folio->page, "gup_test failure");
58 WARN(!folio_is_longterm_pinnable(folio),
61 dump_page(&folio->page, "gup_test failure");
H A Dpage_isolation.c82 struct folio *folio = page_folio(page); local
86 if (!hugepage_migration_supported(folio_hstate(folio)))
88 } else if (!folio_test_lru(folio) && !__folio_test_movable(folio)) {
92 skip_pages = folio_nr_pages(folio) - folio_page_idx(folio, page);
/linux-master/arch/arm/mm/
H A Dfault-armv.c188 struct folio *folio; local
200 folio = page_folio(pfn_to_page(pfn));
201 mapping = folio_flush_mapping(folio);
202 if (!test_and_set_bit(PG_dcache_clean, &folio->flags))
203 __flush_dcache_folio(mapping, folio);
/linux-master/fs/jfs/
H A Djfs_metapage.c470 static int metapage_read_folio(struct file *fp, struct folio *folio) argument
472 struct page *page = &folio->page;
527 static bool metapage_release_folio(struct folio *folio, gfp_t gfp_mask) argument
534 mp = page_to_mp(&folio->page, offset);
549 remove_metapage(&folio->page, mp);
556 static void metapage_invalidate_folio(struct folio *folio, size_t offset, argument
559 BUG_ON(offset || length < folio_size(folio));
696 struct folio *folio = page_folio(page); local
[all...]
/linux-master/fs/reiserfs/
H A Dinode.c2506 static int reiserfs_write_full_folio(struct folio *folio, argument
2509 struct inode *inode = folio->mapping->host;
2517 int checked = folio_test_checked(folio);
2525 folio_redirty_for_writepage(wbc, folio);
2526 folio_unlock(folio);
2531 * The folio dirty bit is cleared before writepage is called, which
2533 * The folio really should be up to date at this point, so tossing
2536 head = folio_buffers(folio);
2538 head = create_empty_buffers(folio,
2719 reiserfs_read_folio(struct file *f, struct folio *folio) argument
2726 struct folio *folio = page_folio(page); local
2869 struct folio *folio = page_folio(page); local
3126 reiserfs_invalidate_folio(struct folio *folio, size_t offset, size_t length) argument
3179 reiserfs_dirty_folio(struct address_space *mapping, struct folio *folio) argument
3198 reiserfs_release_folio(struct folio *folio, gfp_t unused_gfp_flags) argument
[all...]
/linux-master/fs/nfs/
H A Dfscache.c244 int nfs_netfs_read_folio(struct file *file, struct folio *folio) argument
246 if (!netfs_inode(folio_inode(folio))->cache)
249 return netfs_read_folio(file, folio);
342 int nfs_netfs_folio_unlock(struct folio *folio) argument
344 struct inode *inode = folio_file_mapping(folio)->host;
/linux-master/fs/bcachefs/
H A Dfs-io.c249 struct folio *folio; local
254 folio = filemap_lock_folio(mapping, index);
255 if (IS_ERR_OR_NULL(folio)) {
258 * folio
266 folio = __filemap_get_folio(mapping, index,
268 if (IS_ERR_OR_NULL(folio)) {
274 BUG_ON(start >= folio_end_pos(folio));
275 BUG_ON(end <= folio_pos(folio));
277 start_offset = max(start, folio_pos(folio))
[all...]

Completed in 361 milliseconds

1234567891011>>