Lines Matching defs:folio

137 				   struct folio *folio, void *shadow)
139 XA_STATE(xas, &mapping->i_pages, folio->index);
144 xas_set_order(&xas, folio->index, folio_order(folio));
145 nr = folio_nr_pages(folio);
147 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
152 folio->mapping = NULL;
158 struct folio *folio)
162 VM_BUG_ON_FOLIO(folio_mapped(folio), folio);
163 if (!IS_ENABLED(CONFIG_DEBUG_VM) && unlikely(folio_mapped(folio))) {
165 current->comm, folio_pfn(folio));
166 dump_page(&folio->page, "still mapped when deleted");
170 if (mapping_exiting(mapping) && !folio_test_large(folio)) {
171 int mapcount = page_mapcount(&folio->page);
173 if (folio_ref_count(folio) >= mapcount + 2) {
180 page_mapcount_reset(&folio->page);
181 folio_ref_sub(folio, mapcount);
187 if (folio_test_hugetlb(folio))
190 nr = folio_nr_pages(folio);
192 __lruvec_stat_mod_folio(folio, NR_FILE_PAGES, -nr);
193 if (folio_test_swapbacked(folio)) {
194 __lruvec_stat_mod_folio(folio, NR_SHMEM, -nr);
195 if (folio_test_pmd_mappable(folio))
196 __lruvec_stat_mod_folio(folio, NR_SHMEM_THPS, -nr);
197 } else if (folio_test_pmd_mappable(folio)) {
198 __lruvec_stat_mod_folio(folio, NR_FILE_THPS, -nr);
203 * At this point folio must be either written or cleaned by
204 * truncate. Dirty folio here signals a bug and loss of
211 * Below fixes dirty accounting after removing the folio entirely
213 * folio and anyway will be cleared before returning folio to
216 if (WARN_ON_ONCE(folio_test_dirty(folio) &&
218 folio_account_cleaned(folio, inode_to_wb(mapping->host));
226 void __filemap_remove_folio(struct folio *folio, void *shadow)
228 struct address_space *mapping = folio->mapping;
230 trace_mm_filemap_delete_from_page_cache(folio);
231 filemap_unaccount_folio(mapping, folio);
232 page_cache_delete(mapping, folio, shadow);
235 void filemap_free_folio(struct address_space *mapping, struct folio *folio)
237 void (*free_folio)(struct folio *);
242 free_folio(folio);
244 if (folio_test_large(folio))
245 refs = folio_nr_pages(folio);
246 folio_put_refs(folio, refs);
250 * filemap_remove_folio - Remove folio from page cache.
251 * @folio: The folio.
254 * verified to be in the page cache. It will never put the folio into
257 void filemap_remove_folio(struct folio *folio)
259 struct address_space *mapping = folio->mapping;
261 BUG_ON(!folio_test_locked(folio));
264 __filemap_remove_folio(folio, NULL);
270 filemap_free_folio(mapping, folio);
292 struct folio *folio;
295 xas_for_each(&xas, folio, ULONG_MAX) {
300 if (xa_is_value(folio))
309 if (folio != fbatch->folios[i]) {
310 VM_BUG_ON_FOLIO(folio->index >
311 fbatch->folios[i]->index, folio);
315 WARN_ON_ONCE(!folio_test_locked(folio));
317 folio->mapping = NULL;
318 /* Leave folio->index set: truncation lookup relies on it */
322 total_pages += folio_nr_pages(folio);
338 struct folio *folio = fbatch->folios[i];
340 trace_mm_filemap_delete_from_page_cache(folio);
341 filemap_unaccount_folio(mapping, folio);
482 struct folio *folio;
491 folio = xas_find(&xas, max);
492 if (xas_retry(&xas, folio))
495 if (xa_is_value(folio))
506 return folio != NULL;
530 struct folio *folio = fbatch.folios[i];
532 folio_wait_writeback(folio);
533 folio_clear_error(folio);
643 struct folio *folio;
649 xas_for_each(&xas, folio, max) {
650 if (xas_retry(&xas, folio))
652 if (xa_is_value(folio))
654 if (folio_test_dirty(folio) || folio_test_locked(folio) ||
655 folio_test_writeback(folio))
659 return folio != NULL;
802 * replace_page_cache_folio - replace a pagecache folio with a new one
803 * @old: folio to be replaced
804 * @new: folio to replace with
806 * This function replaces a folio in the pagecache with a new one. On
807 * success it acquires the pagecache reference for the new folio and
808 * drops it for the old folio. Both the old and new folios must be
809 * locked. This function does not add the new folio to the LRU, the
814 void replace_page_cache_folio(struct folio *old, struct folio *new)
817 void (*free_folio)(struct folio *) = mapping->a_ops->free_folio;
852 struct folio *folio, pgoff_t index, gfp_t gfp, void **shadowp)
855 bool huge = folio_test_hugetlb(folio);
859 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
860 VM_BUG_ON_FOLIO(folio_test_swapbacked(folio), folio);
864 int error = mem_cgroup_charge(folio, NULL, gfp);
870 VM_BUG_ON_FOLIO(index & (folio_nr_pages(folio) - 1), folio);
871 xas_set_order(&xas, index, folio_order(folio));
872 nr = folio_nr_pages(folio);
875 folio_ref_add(folio, nr);
876 folio->mapping = mapping;
877 folio->index = xas.xa_index;
883 if (order > folio_order(folio))
900 if (order > folio_order(folio)) {
908 xas_store(&xas, folio);
916 __lruvec_stat_mod_folio(folio, NR_FILE_PAGES, nr);
917 if (folio_test_pmd_mappable(folio))
918 __lruvec_stat_mod_folio(folio,
928 trace_mm_filemap_add_to_page_cache(folio);
932 mem_cgroup_uncharge(folio);
933 folio->mapping = NULL;
935 folio_put_refs(folio, nr);
940 int filemap_add_folio(struct address_space *mapping, struct folio *folio,
946 __folio_set_locked(folio);
947 ret = __filemap_add_folio(mapping, folio, index, gfp, &shadow);
949 __folio_clear_locked(folio);
952 * The folio might have been evicted from cache only
954 * any other repeatedly accessed folio.
959 WARN_ON_ONCE(folio_test_active(folio));
961 workingset_refault(folio, shadow);
962 folio_add_lru(folio);
969 struct folio *filemap_alloc_folio(gfp_t gfp, unsigned int order)
972 struct folio *folio;
979 folio = __folio_alloc_node(gfp, order, n);
980 } while (!folio && read_mems_allowed_retry(cpuset_mems_cookie));
982 return folio;
1041 static wait_queue_head_t *folio_waitqueue(struct folio *folio)
1043 return &folio_wait_table[hash_ptr(folio, PAGE_WAIT_TABLE_BITS)];
1106 if (test_bit(key->bit_nr, &key->folio->flags))
1109 if (test_and_set_bit(key->bit_nr, &key->folio->flags))
1141 static void folio_wake_bit(struct folio *folio, int bit_nr)
1143 wait_queue_head_t *q = folio_waitqueue(folio);
1147 key.folio = folio;
1164 folio_clear_waiters(folio);
1185 * Attempt to check (or get) the folio flag, and mark us done
1188 static inline bool folio_trylock_flag(struct folio *folio, int bit_nr,
1192 if (test_and_set_bit(bit_nr, &folio->flags))
1194 } else if (test_bit(bit_nr, &folio->flags))
1204 static inline int folio_wait_bit_common(struct folio *folio, int bit_nr,
1207 wait_queue_head_t *q = folio_waitqueue(folio);
1216 !folio_test_uptodate(folio) && folio_test_workingset(folio)) {
1224 wait_page.folio = folio;
1250 folio_set_waiters(folio);
1251 if (!folio_trylock_flag(folio, bit_nr, wait))
1261 * We can drop our reference to the folio.
1264 folio_put(folio);
1301 if (unlikely(test_and_set_bit(bit_nr, folio_flags(folio, 0))))
1310 * waiter from the wait-queues, but the folio waiters bit will remain
1366 struct folio *folio = pfn_swap_entry_folio(entry);
1368 q = folio_waitqueue(folio);
1369 if (!folio_test_uptodate(folio) && folio_test_workingset(folio)) {
1377 wait_page.folio = folio;
1382 folio_set_waiters(folio);
1383 if (!folio_trylock_flag(folio, PG_locked, wait))
1420 void folio_wait_bit(struct folio *folio, int bit_nr)
1422 folio_wait_bit_common(folio, bit_nr, TASK_UNINTERRUPTIBLE, SHARED);
1426 int folio_wait_bit_killable(struct folio *folio, int bit_nr)
1428 return folio_wait_bit_common(folio, bit_nr, TASK_KILLABLE, SHARED);
1434 * @folio: The folio to wait for.
1437 * The caller should hold a reference on @folio. They expect the page to
1439 * (for example) by holding the reference while waiting for the folio to
1441 * dereference @folio.
1443 * Return: 0 if the folio was unlocked or -EINTR if interrupted by a signal.
1445 static int folio_put_wait_locked(struct folio *folio, int state)
1447 return folio_wait_bit_common(folio, PG_locked, state, DROP);
1451 * folio_add_wait_queue - Add an arbitrary waiter to a folio's wait queue
1452 * @folio: Folio defining the wait queue of interest
1455 * Add an arbitrary @waiter to the wait queue for the nominated @folio.
1457 void folio_add_wait_queue(struct folio *folio, wait_queue_entry_t *waiter)
1459 wait_queue_head_t *q = folio_waitqueue(folio);
1464 folio_set_waiters(folio);
1470 * folio_unlock - Unlock a locked folio.
1471 * @folio: The folio.
1473 * Unlocks the folio and wakes up any thread sleeping on the page lock.
1478 void folio_unlock(struct folio *folio)
1483 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
1484 if (folio_xor_flags_has_waiters(folio, 1 << PG_locked))
1485 folio_wake_bit(folio, PG_locked);
1490 * folio_end_read - End read on a folio.
1491 * @folio: The folio.
1494 * When all reads against a folio have completed, filesystems should
1496 * are outstanding. This will unlock the folio and wake up any thread
1497 * sleeping on the lock. The folio will also be marked uptodate if all
1503 void folio_end_read(struct folio *folio, bool success)
1509 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
1510 VM_BUG_ON_FOLIO(folio_test_uptodate(folio), folio);
1514 if (folio_xor_flags_has_waiters(folio, mask))
1515 folio_wake_bit(folio, PG_locked);
1521 * @folio: The folio.
1523 * Clear the PG_private_2 bit on a folio and wake up any sleepers waiting for
1524 * it. The folio reference held for PG_private_2 being set is released.
1526 * This is, for example, used when a netfs folio is being written to a local
1527 * disk cache, thereby allowing writes to the cache for the same folio to be
1530 void folio_end_private_2(struct folio *folio)
1532 VM_BUG_ON_FOLIO(!folio_test_private_2(folio), folio);
1533 clear_bit_unlock(PG_private_2, folio_flags(folio, 0));
1534 folio_wake_bit(folio, PG_private_2);
1535 folio_put(folio);
1540 * folio_wait_private_2 - Wait for PG_private_2 to be cleared on a folio.
1541 * @folio: The folio to wait on.
1543 * Wait for PG_private_2 to be cleared on a folio.
1545 void folio_wait_private_2(struct folio *folio)
1547 while (folio_test_private_2(folio))
1548 folio_wait_bit(folio, PG_private_2);
1553 * folio_wait_private_2_killable - Wait for PG_private_2 to be cleared on a folio.
1554 * @folio: The folio to wait on.
1556 * Wait for PG_private_2 to be cleared on a folio or until a fatal signal is
1563 int folio_wait_private_2_killable(struct folio *folio)
1567 while (folio_test_private_2(folio)) {
1568 ret = folio_wait_bit_killable(folio, PG_private_2);
1578 * folio_end_writeback - End writeback against a folio.
1579 * @folio: The folio.
1581 * The folio must actually be under writeback.
1585 void folio_end_writeback(struct folio *folio)
1587 VM_BUG_ON_FOLIO(!folio_test_writeback(folio), folio);
1592 * to shuffle a folio marked for immediate reclaim is too mild
1594 * end of every folio writeback.
1596 if (folio_test_reclaim(folio)) {
1597 folio_clear_reclaim(folio);
1598 folio_rotate_reclaimable(folio);
1602 * Writeback does not hold a folio reference of its own, relying
1604 * But here we must make sure that the folio is not freed and
1607 folio_get(folio);
1608 if (__folio_end_writeback(folio))
1609 folio_wake_bit(folio, PG_writeback);
1610 acct_reclaim_writeback(folio);
1611 folio_put(folio);
1616 * __folio_lock - Get a lock on the folio, assuming we need to sleep to get it.
1617 * @folio: The folio to lock
1619 void __folio_lock(struct folio *folio)
1621 folio_wait_bit_common(folio, PG_locked, TASK_UNINTERRUPTIBLE,
1626 int __folio_lock_killable(struct folio *folio)
1628 return folio_wait_bit_common(folio, PG_locked, TASK_KILLABLE,
1633 static int __folio_lock_async(struct folio *folio, struct wait_page_queue *wait)
1635 struct wait_queue_head *q = folio_waitqueue(folio);
1638 wait->folio = folio;
1643 folio_set_waiters(folio);
1644 ret = !folio_trylock(folio);
1661 * 0 - folio is locked.
1662 * non-zero - folio is not locked.
1668 * with the folio locked and the mmap_lock/per-VMA lock is left unperturbed.
1670 vm_fault_t __folio_lock_or_retry(struct folio *folio, struct vm_fault *vmf)
1684 folio_wait_locked_killable(folio);
1686 folio_wait_locked(folio);
1692 ret = __folio_lock_killable(folio);
1698 __folio_lock(folio);
1779 * 1. Load the folio from i_pages
1781 * 3. If the folio is not found by xas_reload(), put the refcount and retry
1801 * Looks up the page cache entry at @mapping & @index. If it is a folio,
1803 * of a previously evicted folio, or a swap entry from shmem/tmpfs,
1806 * Return: The folio, swap or shadow entry, %NULL if nothing is found.
1811 struct folio *folio;
1816 folio = xas_load(&xas);
1817 if (xas_retry(&xas, folio))
1823 if (!folio || xa_is_value(folio))
1826 if (!folio_try_get_rcu(folio))
1829 if (unlikely(folio != xas_reload(&xas))) {
1830 folio_put(folio);
1836 return folio;
1840 * __filemap_get_folio - Find and get a reference to a folio.
1843 * @fgp_flags: %FGP flags modify how the folio is returned.
1851 * If this function returns a folio, it is returned with an increased refcount.
1853 * Return: The found folio or an ERR_PTR() otherwise.
1855 struct folio *__filemap_get_folio(struct address_space *mapping, pgoff_t index,
1858 struct folio *folio;
1861 folio = filemap_get_entry(mapping, index);
1862 if (xa_is_value(folio))
1863 folio = NULL;
1864 if (!folio)
1869 if (!folio_trylock(folio)) {
1870 folio_put(folio);
1874 folio_lock(folio);
1878 if (unlikely(folio->mapping != mapping)) {
1879 folio_unlock(folio);
1880 folio_put(folio);
1883 VM_BUG_ON_FOLIO(!folio_contains(folio, index), folio);
1887 folio_mark_accessed(folio);
1890 if (folio_test_idle(folio))
1891 folio_clear_idle(folio);
1895 folio_wait_stable(folio);
1897 if (!folio && (fgp_flags & FGP_CREAT)) {
1916 /* If we're not aligned, allocate a smaller folio */
1926 folio = filemap_alloc_folio(alloc_gfp, order);
1927 if (!folio)
1932 __folio_set_referenced(folio);
1934 err = filemap_add_folio(mapping, folio, index, gfp);
1937 folio_put(folio);
1938 folio = NULL;
1949 if (folio && (fgp_flags & FGP_FOR_MMAP))
1950 folio_unlock(folio);
1953 if (!folio)
1955 return folio;
1959 static inline struct folio *find_get_entry(struct xa_state *xas, pgoff_t max,
1962 struct folio *folio;
1966 folio = xas_find(xas, max);
1968 folio = xas_find_marked(xas, max, mark);
1970 if (xas_retry(xas, folio))
1977 if (!folio || xa_is_value(folio))
1978 return folio;
1980 if (!folio_try_get_rcu(folio))
1983 if (unlikely(folio != xas_reload(xas))) {
1984 folio_put(folio);
1988 return folio;
2018 struct folio *folio;
2021 while ((folio = find_get_entry(&xas, end, XA_PRESENT)) != NULL) {
2023 if (!folio_batch_add(fbatch, folio))
2032 folio = fbatch->folios[idx];
2033 if (!xa_is_value(folio))
2034 nr = folio_nr_pages(folio);
2064 struct folio *folio;
2067 while ((folio = find_get_entry(&xas, end, XA_PRESENT))) {
2068 if (!xa_is_value(folio)) {
2069 if (folio->index < *start)
2071 if (folio_next_index(folio) - 1 > end)
2073 if (!folio_trylock(folio))
2075 if (folio->mapping != mapping ||
2076 folio_test_writeback(folio))
2078 VM_BUG_ON_FOLIO(!folio_contains(folio, xas.xa_index),
2079 folio);
2082 if (!folio_batch_add(fbatch, folio))
2086 folio_unlock(folio);
2088 folio_put(folio);
2096 folio = fbatch->folios[idx];
2097 if (!xa_is_value(folio))
2098 nr = folio_nr_pages(folio);
2116 * We also update @start to index the next folio for the traversal.
2137 * Also update @start to be positioned for traversal of the next folio.
2145 struct folio *folio;
2149 for (folio = xas_load(&xas); folio && xas.xa_index <= end;
2150 folio = xas_next(&xas)) {
2151 if (xas_retry(&xas, folio))
2157 if (xa_is_value(folio))
2160 if (!folio_try_get_rcu(folio))
2163 if (unlikely(folio != xas_reload(&xas)))
2166 if (!folio_batch_add(fbatch, folio)) {
2167 nr = folio_nr_pages(folio);
2168 *start = folio->index + nr;
2173 folio_put(folio);
2183 folio = fbatch->folios[nr - 1];
2184 *start = folio_next_index(folio);
2200 * The first folio may start before @start; if it does, it will contain
2201 * @start. The final folio may extend beyond @end; if it does, it will
2203 * between the folios if there are indices which have no folio in the
2209 * Also update @start to index the next folio for traversal.
2215 struct folio *folio;
2218 while ((folio = find_get_entry(&xas, end, tag)) != NULL) {
2224 if (xa_is_value(folio))
2226 if (!folio_batch_add(fbatch, folio)) {
2227 unsigned long nr = folio_nr_pages(folio);
2228 *start = folio->index + nr;
2274 * the middle of a folio, the entire folio will be returned. The last
2275 * folio in the batch may have the readahead flag set or the uptodate flag
2282 struct folio *folio;
2285 for (folio = xas_load(&xas); folio; folio = xas_next(&xas)) {
2286 if (xas_retry(&xas, folio))
2288 if (xas.xa_index > max || xa_is_value(folio))
2290 if (xa_is_sibling(folio))
2292 if (!folio_try_get_rcu(folio))
2295 if (unlikely(folio != xas_reload(&xas)))
2298 if (!folio_batch_add(fbatch, folio))
2300 if (!folio_test_uptodate(folio))
2302 if (folio_test_readahead(folio))
2304 xas_advance(&xas, folio_next_index(folio) - 1);
2307 folio_put(folio);
2315 struct folio *folio)
2317 bool workingset = folio_test_workingset(folio);
2326 folio_clear_error(folio);
2331 error = filler(file, folio);
2337 error = folio_wait_locked_killable(folio);
2340 if (folio_test_uptodate(folio))
2348 loff_t pos, size_t count, struct folio *folio,
2351 if (folio_test_uptodate(folio))
2358 if (mapping->host->i_blkbits >= folio_shift(folio))
2361 if (folio_pos(folio) > pos) {
2362 count -= folio_pos(folio) - pos;
2365 pos -= folio_pos(folio);
2368 return mapping->a_ops->is_partially_uptodate(folio, pos, count);
2373 struct folio *folio, bool need_uptodate)
2384 if (!folio_trylock(folio)) {
2394 folio_put_wait_locked(folio, TASK_KILLABLE);
2397 error = __folio_lock_async(folio, iocb->ki_waitq);
2403 if (!folio->mapping)
2407 if (filemap_range_uptodate(mapping, iocb->ki_pos, count, folio,
2416 folio);
2419 folio_unlock(folio);
2423 folio_put(folio);
2431 struct folio *folio;
2434 folio = filemap_alloc_folio(mapping_gfp_mask(mapping), 0);
2435 if (!folio)
2443 * release invalidate_lock after inserting the folio into
2444 * the page cache as the locked folio would then be enough to
2452 error = filemap_add_folio(mapping, folio, index,
2459 error = filemap_read_folio(file, mapping->a_ops->read_folio, folio);
2464 folio_batch_add(fbatch, folio);
2468 folio_put(folio);
2473 struct address_space *mapping, struct folio *folio,
2476 DEFINE_READAHEAD(ractl, file, &file->f_ra, mapping, folio->index);
2480 page_cache_async_ra(&ractl, folio, last_index - folio->index);
2492 struct folio *folio;
2519 folio = fbatch->folios[folio_batch_count(fbatch) - 1];
2520 if (folio_test_readahead(folio)) {
2521 err = filemap_readahead(iocb, filp, mapping, folio, last_index);
2525 if (!folio_test_uptodate(folio)) {
2529 err = filemap_update_page(iocb, mapping, count, folio,
2538 folio_put(folio);
2546 static inline bool pos_same_folio(loff_t pos1, loff_t pos2, struct folio *folio)
2548 unsigned int shift = folio_shift(folio);
2625 * When a read accesses the same folio several times, only
2633 struct folio *folio = fbatch.folios[i];
2634 size_t fsize = folio_size(folio);
2640 if (end_offset < folio_pos(folio))
2643 folio_mark_accessed(folio);
2645 * If users can be writing to this folio using arbitrary
2647 * before reading the folio on the kernel side.
2650 flush_dcache_folio(folio);
2652 copied = copy_folio_to_iter(folio, offset, bytes, iter);
2787 * Splice subpages from a folio into a pipe.
2790 struct folio *folio, loff_t fpos, size_t size)
2793 size_t spliced = 0, offset = offset_in_folio(folio, fpos);
2795 page = folio_page(folio, offset / PAGE_SIZE);
2796 size = min(size, folio_size(folio) - offset);
2810 folio_get(folio);
2894 struct folio *folio = fbatch.folios[i];
2897 if (folio_pos(folio) >= end_offset)
2899 folio_mark_accessed(folio);
2902 * If users can be writing to this folio using arbitrary
2904 * before reading the folio on the kernel side.
2907 flush_dcache_folio(folio);
2910 n = splice_folio_into_pipe(pipe, folio, *ppos, n);
2933 struct address_space *mapping, struct folio *folio,
2939 if (xa_is_value(folio) || folio_test_uptodate(folio))
2946 folio_lock(folio);
2947 if (unlikely(folio->mapping != mapping))
2950 offset = offset_in_folio(folio, start) & ~(bsz - 1);
2953 if (ops->is_partially_uptodate(folio, offset, bsz) ==
2958 } while (offset < folio_size(folio));
2960 folio_unlock(folio);
2965 static inline size_t seek_folio_size(struct xa_state *xas, struct folio *folio)
2967 if (xa_is_value(folio))
2969 return folio_size(folio);
2996 struct folio *folio;
3002 while ((folio = find_get_entry(&xas, max, XA_PRESENT))) {
3012 seek_size = seek_folio_size(&xas, folio);
3014 start = folio_seek_hole_data(&xas, mapping, folio, start, pos,
3022 if (!xa_is_value(folio))
3023 folio_put(folio);
3029 if (folio && !xa_is_value(folio))
3030 folio_put(folio);
3041 * @folio - the folio to lock.
3045 * mmap_lock. It differs in that it actually returns the folio locked
3046 * if it returns 1 and 0 if it couldn't lock the folio. If we did have
3050 static int lock_folio_maybe_drop_mmap(struct vm_fault *vmf, struct folio *folio,
3053 if (folio_trylock(folio))
3066 if (__folio_lock_killable(folio)) {
3079 __folio_lock(folio);
3161 struct folio *folio)
3177 if (folio_test_readahead(folio)) {
3179 page_cache_async_ra(&ractl, folio, ra->ra_pages);
3191 * We might have COW'ed a pagecache folio and might now have an mlocked
3192 * anon folio mapped. The original pagecache folio is not mlocked and
3257 struct folio *folio;
3268 folio = filemap_get_folio(mapping, index);
3269 if (likely(!IS_ERR(folio))) {
3275 fpin = do_async_mmap_readahead(vmf, folio);
3276 if (unlikely(!folio_test_uptodate(folio))) {
3299 folio = __filemap_get_folio(mapping, index,
3302 if (IS_ERR(folio)) {
3310 if (!lock_folio_maybe_drop_mmap(vmf, folio, &fpin))
3314 if (unlikely(folio->mapping != mapping)) {
3315 folio_unlock(folio);
3316 folio_put(folio);
3319 VM_BUG_ON_FOLIO(!folio_contains(folio, index), folio);
3322 * We have a locked folio in the page cache, now we need to check
3326 if (unlikely(!folio_test_uptodate(folio))) {
3328 * If the invalidate lock is not held, the folio was in cache
3334 folio_unlock(folio);
3335 folio_put(folio);
3340 * OK, the folio is really not uptodate. This can be because the
3353 folio_unlock(folio);
3365 folio_unlock(folio);
3366 folio_put(folio);
3370 vmf->page = folio_file_page(folio, index);
3381 error = filemap_read_folio(file, mapping->a_ops->read_folio, folio);
3384 folio_put(folio);
3398 if (!IS_ERR(folio))
3399 folio_put(folio);
3408 static bool filemap_map_pmd(struct vm_fault *vmf, struct folio *folio,
3415 folio_unlock(folio);
3416 folio_put(folio);
3420 if (pmd_none(*vmf->pmd) && folio_test_pmd_mappable(folio)) {
3421 struct page *page = folio_file_page(folio, start);
3425 folio_unlock(folio);
3436 static struct folio *next_uptodate_folio(struct xa_state *xas,
3439 struct folio *folio = xas_next_entry(xas, end_pgoff);
3443 if (!folio)
3445 if (xas_retry(xas, folio))
3447 if (xa_is_value(folio))
3449 if (folio_test_locked(folio))
3451 if (!folio_try_get_rcu(folio))
3454 if (unlikely(folio != xas_reload(xas)))
3456 if (!folio_test_uptodate(folio) || folio_test_readahead(folio))
3458 if (!folio_trylock(folio))
3460 if (folio->mapping != mapping)
3462 if (!folio_test_uptodate(folio))
3467 return folio;
3469 folio_unlock(folio);
3471 folio_put(folio);
3472 } while ((folio = xas_next_entry(xas, end_pgoff)) != NULL);
3478 * Map page range [start_page, start_page + nr_pages) of folio.
3479 * start_page is gotten from start by folio_page(folio, start)
3482 struct folio *folio, unsigned long start,
3487 struct page *page = folio_page(folio, start);
3509 set_pte_range(vmf, folio, page, count, addr);
3510 folio_ref_add(folio, count);
3523 set_pte_range(vmf, folio, page, count, addr);
3524 folio_ref_add(folio, count);
3535 struct folio *folio, unsigned long addr,
3539 struct page *page = &folio->page;
3557 set_pte_range(vmf, folio, page, 1, addr);
3558 folio_ref_inc(folio);
3572 struct folio *folio;
3577 folio = next_uptodate_folio(&xas, mapping, end_pgoff);
3578 if (!folio)
3581 if (filemap_map_pmd(vmf, folio, start_pgoff)) {
3589 folio_unlock(folio);
3590 folio_put(folio);
3599 end = folio_next_index(folio) - 1;
3602 if (!folio_test_large(folio))
3604 folio, addr, &mmap_miss);
3606 ret |= filemap_map_folio_range(vmf, folio,
3607 xas.xa_index - folio->index, addr,
3610 folio_unlock(folio);
3611 folio_put(folio);
3612 } while ((folio = next_uptodate_folio(&xas, mapping, end_pgoff)) != NULL);
3630 struct folio *folio = page_folio(vmf->page);
3635 folio_lock(folio);
3636 if (folio->mapping != mapping) {
3637 folio_unlock(folio);
3642 * We mark the folio dirty already here so that when freeze is in
3644 * see the dirty folio and writeprotect it again.
3646 folio_mark_dirty(folio);
3647 folio_wait_stable(folio);
3700 static struct folio *do_read_cache_folio(struct address_space *mapping,
3703 struct folio *folio;
3709 folio = filemap_get_folio(mapping, index);
3710 if (IS_ERR(folio)) {
3711 folio = filemap_alloc_folio(gfp, 0);
3712 if (!folio)
3714 err = filemap_add_folio(mapping, folio, index, gfp);
3716 folio_put(folio);
3725 if (folio_test_uptodate(folio))
3728 if (!folio_trylock(folio)) {
3729 folio_put_wait_locked(folio, TASK_UNINTERRUPTIBLE);
3734 if (!folio->mapping) {
3735 folio_unlock(folio);
3736 folio_put(folio);
3741 if (folio_test_uptodate(folio)) {
3742 folio_unlock(folio);
3747 err = filemap_read_folio(file, filler, folio);
3749 folio_put(folio);
3756 folio_mark_accessed(folio);
3757 return folio;
3767 * Read one page into the page cache. If it succeeds, the folio returned
3768 * will contain @index, but it may not be the first page of the folio.
3774 * Return: An uptodate folio on success, ERR_PTR() on failure.
3776 struct folio *read_cache_folio(struct address_space *mapping, pgoff_t index,
3786 * @mapping: The address_space for the folio.
3787 * @index: The index that the allocated folio will contain.
3799 * Return: Uptodate folio on success, ERR_PTR() on failure.
3801 struct folio *mapping_read_folio_gfp(struct address_space *mapping,
3811 struct folio *folio;
3813 folio = do_read_cache_folio(mapping, index, filler, file, gfp);
3814 if (IS_ERR(folio))
3815 return &folio->page;
3816 return folio_file_page(folio, index);
4105 * filemap_release_folio() - Release fs-specific metadata on a folio.
4106 * @folio: The folio which the kernel is trying to free.
4109 * The address_space is trying to release any data attached to a folio
4110 * (presumably at folio->private).
4113 * indicating that the folio has other metadata associated with it.
4121 bool filemap_release_folio(struct folio *folio, gfp_t gfp)
4123 struct address_space * const mapping = folio->mapping;
4125 BUG_ON(!folio_test_locked(folio));
4126 if (!folio_needs_release(folio))
4128 if (folio_test_writeback(folio))
4132 return mapping->a_ops->release_folio(folio, gfp);
4133 return try_to_free_buffers(folio);
4208 struct folio *folio;
4211 xas_for_each(&xas, folio, last_index) {
4217 * Don't deref the folio. It is not pinned, and might
4227 if (xas_retry(&xas, folio))
4242 if (xa_is_value(folio)) {
4244 void *shadow = (void *)folio;
4252 swp_entry_t swp = radix_to_swp_entry(folio);