Lines Matching refs:folio

210 				   folio contents */
379 * Large folio support currently depends on THP. These dependencies are
386 "Anonymous mapping always supports large folio");
392 /* Return the maximum folio size for this pagecache mapping, in bytes. */
430 struct address_space *folio_mapping(struct folio *);
431 struct address_space *swapcache_mapping(struct folio *);
434 * folio_file_mapping - Find the mapping this folio belongs to.
435 * @folio: The folio.
445 static inline struct address_space *folio_file_mapping(struct folio *folio)
447 if (unlikely(folio_test_swapcache(folio)))
448 return swapcache_mapping(folio);
450 return folio->mapping;
454 * folio_flush_mapping - Find the file mapping this folio belongs to.
455 * @folio: The folio.
459 * the swap cache. Other kinds of folio also return NULL.
465 static inline struct address_space *folio_flush_mapping(struct folio *folio)
467 if (unlikely(folio_test_swapcache(folio)))
470 return folio_mapping(folio);
479 * folio_inode - Get the host inode for this folio.
480 * @folio: The folio.
482 * For folios which are in the page cache, return the inode that this folio
487 static inline struct inode *folio_inode(struct folio *folio)
489 return folio->mapping->host;
493 * folio_attach_private - Attach private data to a folio.
494 * @folio: Folio to attach data to.
495 * @data: Data to attach to folio.
497 * Attaching private data to a folio increments the page's reference count.
498 * The data must be detached before the folio will be freed.
500 static inline void folio_attach_private(struct folio *folio, void *data)
502 folio_get(folio);
503 folio->private = data;
504 folio_set_private(folio);
508 * folio_change_private - Change private data on a folio.
509 * @folio: Folio to change the data on.
510 * @data: Data to set on the folio.
512 * Change the private data attached to a folio and return the old
514 * must be detached before the folio will be freed.
516 * Return: Data that was previously attached to the folio.
518 static inline void *folio_change_private(struct folio *folio, void *data)
520 void *old = folio_get_private(folio);
522 folio->private = data;
527 * folio_detach_private - Detach private data from a folio.
528 * @folio: Folio to detach data from.
530 * Removes the data that was previously attached to the folio and decrements
533 * Return: Data that was attached to the folio.
535 static inline void *folio_detach_private(struct folio *folio)
537 void *data = folio_get_private(folio);
539 if (!folio_test_private(folio))
541 folio_clear_private(folio);
542 folio->private = NULL;
543 folio_put(folio);
559 struct folio *filemap_alloc_folio_noprof(gfp_t gfp, unsigned int order);
561 static inline struct folio *filemap_alloc_folio_noprof(gfp_t gfp, unsigned int order)
580 typedef int filler_t(struct file *, struct folio *);
596 * * %FGP_ACCESSED - The folio will be marked accessed.
597 * * %FGP_LOCK - The folio is returned locked.
598 * * %FGP_CREAT - If no folio is present then a new folio is allocated,
599 * added to the page cache and the VM's LRU list. The folio is
602 * folio is already in cache. If the folio was allocated, unlock it
604 * * %FGP_WRITE - The folio will be written to by the caller.
606 * * %FGP_NOWAIT - Don't block on the folio lock.
607 * * %FGP_STABLE - Wait for the folio to be stable (finished writeback)
627 * @size: The suggested size of the folio to create.
630 * size for the folio that is created. If there is already a folio at
631 * the index, it will be returned, no matter what its size. If a folio
646 struct folio *__filemap_get_folio(struct address_space *mapping, pgoff_t index,
652 * filemap_get_folio - Find and get a folio.
656 * Looks up the page cache entry at @mapping & @index. If a folio is
659 * Return: A folio or ERR_PTR(-ENOENT) if there is no folio in the cache for
662 static inline struct folio *filemap_get_folio(struct address_space *mapping,
669 * filemap_lock_folio - Find and lock a folio.
673 * Looks up the page cache entry at @mapping & @index. If a folio is
677 * Return: A folio or ERR_PTR(-ENOENT) if there is no folio in the cache for
680 static inline struct folio *filemap_lock_folio(struct address_space *mapping,
687 * filemap_grab_folio - grab a folio from the page cache
691 * Looks up the page cache entry at @mapping & @index. If no folio is found,
692 * a new folio is created. The folio is locked, marked as accessed, and
695 * Return: A found or created folio. ERR_PTR(-ENOMEM) if no folio is found
696 * and failed to create a folio.
698 static inline struct folio *filemap_grab_folio(struct address_space *mapping,
795 #define swapcache_index(folio) __page_file_index(&(folio)->page)
798 * folio_index - File index of a folio.
799 * @folio: The folio.
801 * For a folio which is either in the page cache or the swap cache,
803 * the page is definitely in the page cache, you can look at the folio's
806 * Return: The index (offset in units of pages) of a folio in its file.
808 static inline pgoff_t folio_index(struct folio *folio)
810 if (unlikely(folio_test_swapcache(folio)))
811 return swapcache_index(folio);
812 return folio->index;
816 * folio_next_index - Get the index of the next folio.
817 * @folio: The current folio.
819 * Return: The index of the folio which follows this folio in the file.
821 static inline pgoff_t folio_next_index(struct folio *folio)
823 return folio->index + folio_nr_pages(folio);
828 * @folio: The folio which contains this index.
831 * Sometimes after looking up a folio in the page cache, we need to
836 static inline struct page *folio_file_page(struct folio *folio, pgoff_t index)
838 return folio_page(folio, index & (folio_nr_pages(folio) - 1));
842 * folio_contains - Does this folio contain this index?
843 * @folio: The folio.
851 static inline bool folio_contains(struct folio *folio, pgoff_t index)
853 return index - folio_index(folio) < folio_nr_pages(folio);
888 struct folio *read_cache_folio(struct address_space *, pgoff_t index,
890 struct folio *mapping_read_folio_gfp(struct address_space *, pgoff_t index,
903 static inline struct folio *read_mapping_folio(struct address_space *mapping,
941 * folio_pos - Returns the byte position of this folio in its file.
942 * @folio: The folio.
944 static inline loff_t folio_pos(struct folio *folio)
946 return page_offset(&folio->page);
950 * folio_file_pos - Returns the byte position of this folio in its file.
951 * @folio: The folio.
956 static inline loff_t folio_file_pos(struct folio *folio)
958 return page_file_offset(&folio->page);
964 static inline pgoff_t folio_pgoff(struct folio *folio)
966 return folio->index;
979 struct folio *folio;
985 struct folio *folio;
993 if (wait_page->folio != key->folio)
1003 void __folio_lock(struct folio *folio);
1004 int __folio_lock_killable(struct folio *folio);
1005 vm_fault_t __folio_lock_or_retry(struct folio *folio, struct vm_fault *vmf);
1007 void folio_unlock(struct folio *folio);
1010 * folio_trylock() - Attempt to lock a folio.
1011 * @folio: The folio to attempt to lock.
1013 * Sometimes it is undesirable to wait for a folio to be unlocked (eg
1021 static inline bool folio_trylock(struct folio *folio)
1023 return likely(!test_and_set_bit_lock(PG_locked, folio_flags(folio, 0)));
1035 * folio_lock() - Lock this folio.
1036 * @folio: The folio to lock.
1038 * The folio lock protects against many things, probably more than it
1039 * should. It is primarily held while a folio is being brought uptodate,
1041 * folio is being truncated from its address_space, so holding the lock
1042 * is sufficient to keep folio->mapping stable.
1044 * The folio lock is also held while write() is modifying the page to
1046 * cross a page boundary). Other modifications to the data in the folio
1047 * do not hold the folio lock and can race with writes, eg DMA and stores
1053 * acquire the lock of the folio which belongs to the address_space which
1056 static inline void folio_lock(struct folio *folio)
1059 if (!folio_trylock(folio))
1060 __folio_lock(folio);
1064 * lock_page() - Lock the folio containing this page.
1071 * Context: May sleep. Pages in the same folio share a lock, so do not
1072 * attempt to lock two pages which share a folio.
1076 struct folio *folio;
1079 folio = page_folio(page);
1080 if (!folio_trylock(folio))
1081 __folio_lock(folio);
1085 * folio_lock_killable() - Lock this folio, interruptible by a fatal signal.
1086 * @folio: The folio to lock.
1088 * Attempts to lock the folio, like folio_lock(), except that the sleep
1094 static inline int folio_lock_killable(struct folio *folio)
1097 if (!folio_trylock(folio))
1098 return __folio_lock_killable(folio);
1103 * folio_lock_or_retry - Lock the folio, unless this would block and the
1109 static inline vm_fault_t folio_lock_or_retry(struct folio *folio,
1113 if (!folio_trylock(folio))
1114 return __folio_lock_or_retry(folio, vmf);
1122 void folio_wait_bit(struct folio *folio, int bit_nr);
1123 int folio_wait_bit_killable(struct folio *folio, int bit_nr);
1126 * Wait for a folio to be unlocked.
1128 * This must be called with the caller "holding" the folio,
1129 * ie with increased folio reference count so that the folio won't
1132 static inline void folio_wait_locked(struct folio *folio)
1134 if (folio_test_locked(folio))
1135 folio_wait_bit(folio, PG_locked);
1138 static inline int folio_wait_locked_killable(struct folio *folio)
1140 if (!folio_test_locked(folio))
1142 return folio_wait_bit_killable(folio, PG_locked);
1150 void folio_end_read(struct folio *folio, bool success);
1152 void folio_wait_writeback(struct folio *folio);
1153 int folio_wait_writeback_killable(struct folio *folio);
1155 void folio_end_writeback(struct folio *folio);
1157 void folio_wait_stable(struct folio *folio);
1158 void __folio_mark_dirty(struct folio *folio, struct address_space *, int warn);
1159 void folio_account_cleaned(struct folio *folio, struct bdi_writeback *wb);
1160 void __folio_cancel_dirty(struct folio *folio);
1161 static inline void folio_cancel_dirty(struct folio *folio)
1164 if (folio_test_dirty(folio))
1165 __folio_cancel_dirty(folio);
1167 bool folio_clear_dirty_for_io(struct folio *folio);
1169 void folio_invalidate(struct folio *folio, size_t offset, size_t length);
1170 bool noop_dirty_folio(struct address_space *mapping, struct folio *folio);
1173 int filemap_migrate_folio(struct address_space *mapping, struct folio *dst,
1174 struct folio *src, enum migrate_mode mode);
1178 void folio_end_private_2(struct folio *folio);
1179 void folio_wait_private_2(struct folio *folio);
1180 int folio_wait_private_2_killable(struct folio *folio);
1185 void folio_add_wait_queue(struct folio *folio, wait_queue_entry_t *waiter);
1197 int filemap_add_folio(struct address_space *mapping, struct folio *folio,
1199 void filemap_remove_folio(struct folio *folio);
1200 void __filemap_remove_folio(struct folio *folio, void *shadow);
1201 void replace_page_cache_folio(struct folio *old, struct folio *new);
1204 bool filemap_release_folio(struct folio *folio, gfp_t gfp);
1209 int __filemap_add_folio(struct address_space *mapping, struct folio *folio,
1282 void page_cache_async_ra(struct readahead_control *, struct folio *,
1314 * @folio: The folio at @index which triggered the readahead call.
1326 struct folio *folio, pgoff_t index, unsigned long req_count)
1329 page_cache_async_ra(&ractl, folio, req_count);
1332 static inline struct folio *__readahead_folio(struct readahead_control *ractl)
1334 struct folio *folio;
1345 folio = xa_load(&ractl->mapping->i_pages, ractl->_index);
1346 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
1347 ractl->_batch_count = folio_nr_pages(folio);
1349 return folio;
1363 struct folio *folio = __readahead_folio(ractl);
1365 return &folio->page;
1369 * readahead_folio - Get the next folio to read.
1372 * Context: The folio is locked. The caller should unlock the folio once
1373 * all I/O to that folio has completed.
1374 * Return: A pointer to the next folio, or %NULL if we are done.
1376 static inline struct folio *readahead_folio(struct readahead_control *ractl)
1378 struct folio *folio = __readahead_folio(ractl);
1380 if (folio)
1381 folio_put(folio);
1382 return folio;
1480 * folio_mkwrite_check_truncate - check if folio was truncated
1481 * @folio: the folio to check
1482 * @inode: the inode to check the folio against
1484 * Return: the number of bytes in the folio up to EOF,
1485 * or -EFAULT if the folio was truncated.
1487 static inline ssize_t folio_mkwrite_check_truncate(struct folio *folio,
1492 size_t offset = offset_in_folio(folio, size);
1494 if (!folio->mapping)
1497 /* folio is wholly inside EOF */
1498 if (folio_next_index(folio) - 1 < index)
1499 return folio_size(folio);
1500 /* folio is wholly past EOF */
1501 if (folio->index > index || !offset)
1503 /* folio is partially inside EOF */
1536 * i_blocks_per_folio - How many blocks fit in this folio.
1538 * @folio: The folio.
1540 * If the block size is larger than the size of this folio, return zero.
1542 * Context: The caller should hold a refcount on the folio to prevent it
1544 * Return: The number of filesystem blocks covered by this folio.
1547 unsigned int i_blocks_per_folio(struct inode *inode, struct folio *folio)
1549 return folio_size(folio) >> inode->i_blkbits;