Lines Matching defs:folio

150 			struct folio **foliop, enum sgp_type sgp, gfp_t gfp,
625 struct folio *folio;
678 folio = filemap_get_folio(inode->i_mapping, index);
679 if (IS_ERR(folio))
683 if (!folio_test_large(folio)) {
684 folio_put(folio);
695 if (!folio_trylock(folio)) {
696 folio_put(folio);
700 ret = split_folio(folio);
701 folio_unlock(folio);
702 folio_put(folio);
761 static int shmem_add_to_page_cache(struct folio *folio,
765 XA_STATE_ORDER(xas, &mapping->i_pages, index, folio_order(folio));
766 long nr = folio_nr_pages(folio);
768 VM_BUG_ON_FOLIO(index != round_down(index, nr), folio);
769 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
770 VM_BUG_ON_FOLIO(!folio_test_swapbacked(folio), folio);
771 VM_BUG_ON(expected && folio_test_large(folio));
773 folio_ref_add(folio, nr);
774 folio->mapping = mapping;
775 folio->index = index;
778 folio_throttle_swaprate(folio, gfp);
790 xas_store(&xas, folio);
793 if (folio_test_pmd_mappable(folio))
794 __lruvec_stat_mod_folio(folio, NR_SHMEM_THPS, nr);
795 __lruvec_stat_mod_folio(folio, NR_FILE_PAGES, nr);
796 __lruvec_stat_mod_folio(folio, NR_SHMEM, nr);
803 folio->mapping = NULL;
804 folio_ref_sub(folio, nr);
812 * Somewhat like filemap_remove_folio, but substitutes swap for @folio.
814 static void shmem_delete_from_page_cache(struct folio *folio, void *radswap)
816 struct address_space *mapping = folio->mapping;
817 long nr = folio_nr_pages(folio);
821 error = shmem_replace_entry(mapping, folio->index, folio, radswap);
822 folio->mapping = NULL;
824 __lruvec_stat_mod_folio(folio, NR_FILE_PAGES, -nr);
825 __lruvec_stat_mod_folio(folio, NR_SHMEM, -nr);
827 folio_put(folio);
932 static struct folio *shmem_get_partial_folio(struct inode *inode, pgoff_t index)
934 struct folio *folio;
940 folio = filemap_get_entry(inode->i_mapping, index);
941 if (!folio)
942 return folio;
943 if (!xa_is_value(folio)) {
944 folio_lock(folio);
945 if (folio->mapping == inode->i_mapping)
946 return folio;
947 /* The folio has been swapped out */
948 folio_unlock(folio);
949 folio_put(folio);
952 * But read a folio back from swap if any of it is within i_size
955 folio = NULL;
956 shmem_get_folio(inode, index, &folio, SGP_READ);
957 return folio;
973 struct folio *folio;
990 folio = fbatch.folios[i];
992 if (xa_is_value(folio)) {
996 indices[i], folio);
1000 if (!unfalloc || !folio_test_uptodate(folio))
1001 truncate_inode_folio(mapping, folio);
1002 folio_unlock(folio);
1010 * When undoing a failed fallocate, we want none of the partial folio
1012 * folio when !uptodate indicates that it was added by this fallocate,
1013 * even when [lstart, lend] covers only a part of the folio.
1019 folio = shmem_get_partial_folio(inode, lstart >> PAGE_SHIFT);
1020 if (folio) {
1021 same_folio = lend < folio_pos(folio) + folio_size(folio);
1022 folio_mark_dirty(folio);
1023 if (!truncate_inode_partial_folio(folio, lstart, lend)) {
1024 start = folio_next_index(folio);
1026 end = folio->index;
1028 folio_unlock(folio);
1029 folio_put(folio);
1030 folio = NULL;
1034 folio = shmem_get_partial_folio(inode, lend >> PAGE_SHIFT);
1035 if (folio) {
1036 folio_mark_dirty(folio);
1037 if (!truncate_inode_partial_folio(folio, lstart, lend))
1038 end = folio->index;
1039 folio_unlock(folio);
1040 folio_put(folio);
1059 folio = fbatch.folios[i];
1061 if (xa_is_value(folio)) {
1064 if (shmem_free_swap(mapping, indices[i], folio)) {
1073 folio_lock(folio);
1075 if (!unfalloc || !folio_test_uptodate(folio)) {
1076 if (folio_mapping(folio) != mapping) {
1078 folio_unlock(folio);
1082 VM_BUG_ON_FOLIO(folio_test_writeback(folio),
1083 folio);
1085 if (!folio_test_large(folio)) {
1086 truncate_inode_folio(mapping, folio);
1087 } else if (truncate_inode_partial_folio(folio, lstart, lend)) {
1096 if (!folio_test_large(folio)) {
1097 folio_unlock(folio);
1103 folio_unlock(folio);
1278 struct folio *folio;
1282 xas_for_each(&xas, folio, ULONG_MAX) {
1283 if (xas_retry(&xas, folio))
1286 if (!xa_is_value(folio))
1289 entry = radix_to_swp_entry(folio);
1298 if (!folio_batch_add(fbatch, folio))
1324 struct folio *folio = fbatch->folios[i];
1326 if (!xa_is_value(folio))
1328 error = shmem_swapin_folio(inode, indices[i], &folio, SGP_CACHE,
1331 folio_unlock(folio);
1332 folio_put(folio);
1421 struct folio *folio = page_folio(page);
1422 struct address_space *mapping = folio->mapping;
1450 if (folio_test_large(folio)) {
1452 folio_test_set_dirty(folio);
1455 folio = page_folio(page);
1456 folio_clear_dirty(folio);
1459 index = folio->index;
1464 * fallocated folio arriving here is now to initialize it and write it.
1466 * That's okay for a folio already fallocated earlier, but if we have
1468 * of this folio in case we have to undo it, and (b) it may not be a
1470 * reactivate the folio, and let shmem_fallocate() quit when too many.
1472 if (!folio_test_uptodate(folio)) {
1488 folio_zero_range(folio, 0, folio_size(folio));
1489 flush_dcache_folio(folio);
1490 folio_mark_uptodate(folio);
1493 swap = folio_alloc_swap(folio);
1499 * if it's not already there. Do it now before the folio is
1509 if (add_to_swap_cache(folio, swap,
1514 shmem_delete_from_page_cache(folio, swp_to_radix_entry(swap));
1517 BUG_ON(folio_mapped(folio));
1518 return swap_writepage(&folio->page, wbc);
1522 put_swap_folio(folio, swap);
1524 folio_mark_dirty(folio);
1526 return AOP_WRITEPAGE_ACTIVATE; /* Return with folio locked */
1527 folio_unlock(folio);
1568 static struct folio *shmem_swapin_cluster(swp_entry_t swap, gfp_t gfp,
1573 struct folio *folio;
1576 folio = swap_cluster_readahead(swap, gfp, mpol, ilx);
1579 return folio;
1606 static struct folio *shmem_alloc_hugefolio(gfp_t gfp,
1620 static struct folio *shmem_alloc_folio(gfp_t gfp,
1631 return (struct folio *)page;
1634 static struct folio *shmem_alloc_and_add_folio(gfp_t gfp,
1640 struct folio *folio;
1663 folio = shmem_alloc_hugefolio(gfp, info, index);
1664 if (!folio)
1668 folio = shmem_alloc_folio(gfp, info, index);
1670 if (!folio)
1673 __folio_set_locked(folio);
1674 __folio_set_swapbacked(folio);
1677 error = mem_cgroup_charge(folio, fault_mm, gfp);
1689 error = shmem_add_to_page_cache(folio, mapping, index, NULL, gfp);
1704 * except our folio is there in cache, so not quite balanced.
1716 filemap_remove_folio(folio);
1722 folio_add_lru(folio);
1723 return folio;
1726 folio_unlock(folio);
1727 folio_put(folio);
1743 static bool shmem_should_replace_folio(struct folio *folio, gfp_t gfp)
1745 return folio_zonenum(folio) > gfp_zone(gfp);
1748 static int shmem_replace_folio(struct folio **foliop, gfp_t gfp,
1751 struct folio *old, *new;
1818 struct folio *folio, swp_entry_t swap)
1831 folio_wait_writeback(folio);
1832 delete_from_swap_cache(folio);
1834 * Don't treat swapin error folio as alloced. Otherwise inode->i_blocks
1843 * Swap in the folio pointed to by *foliop.
1844 * Caller has to make sure that *foliop contains a valid swapped folio.
1845 * Returns 0 and the folio in foliop if success. On failure, returns the
1849 struct folio **foliop, enum sgp_type sgp,
1856 struct folio *folio = NULL;
1876 folio = swap_cache_get_folio(swap, NULL, 0);
1877 if (!folio) {
1885 folio = shmem_swapin_cluster(swap, gfp, info, index);
1886 if (!folio) {
1892 /* We have to do this with folio locked to prevent races */
1893 folio_lock(folio);
1894 if (!folio_test_swapcache(folio) ||
1895 folio->swap.val != swap.val ||
1900 if (!folio_test_uptodate(folio)) {
1904 folio_wait_writeback(folio);
1908 * folio after reading from swap.
1910 arch_swap_restore(swap, folio);
1912 if (shmem_should_replace_folio(folio, gfp)) {
1913 error = shmem_replace_folio(&folio, gfp, info, index);
1918 error = shmem_add_to_page_cache(folio, mapping, index,
1926 folio_mark_accessed(folio);
1928 delete_from_swap_cache(folio);
1929 folio_mark_dirty(folio);
1933 *foliop = folio;
1939 shmem_set_folio_swapin_error(inode, index, folio, swap);
1941 if (folio) {
1942 folio_unlock(folio);
1943 folio_put(folio);
1960 struct folio **foliop, enum sgp_type sgp, gfp_t gfp,
1965 struct folio *folio;
1982 folio = filemap_get_entry(inode->i_mapping, index);
1983 if (folio && vma && userfaultfd_minor(vma)) {
1984 if (!xa_is_value(folio))
1985 folio_put(folio);
1990 if (xa_is_value(folio)) {
1991 error = shmem_swapin_folio(inode, index, &folio,
1996 *foliop = folio;
2000 if (folio) {
2001 folio_lock(folio);
2003 /* Has the folio been truncated or swapped out? */
2004 if (unlikely(folio->mapping != inode->i_mapping)) {
2005 folio_unlock(folio);
2006 folio_put(folio);
2010 folio_mark_accessed(folio);
2011 if (folio_test_uptodate(folio))
2013 /* fallocated folio */
2016 folio_unlock(folio);
2017 folio_put(folio);
2021 * SGP_READ: succeed on hole, with NULL folio, letting caller zero.
2022 * SGP_NOALLOC: fail on hole, with NULL folio, letting caller fail.
2045 folio = shmem_alloc_and_add_folio(huge_gfp,
2047 if (!IS_ERR(folio)) {
2051 if (PTR_ERR(folio) == -EEXIST)
2055 folio = shmem_alloc_and_add_folio(gfp, inode, index, fault_mm, false);
2056 if (IS_ERR(folio)) {
2057 error = PTR_ERR(folio);
2060 folio = NULL;
2066 if (folio_test_pmd_mappable(folio) &&
2068 folio_next_index(folio) - 1) {
2072 * Part of the large folio is beyond i_size: subject
2089 folio_set_referenced(folio);
2091 * Let SGP_FALLOC use the SGP_WRITE optimization on a new folio.
2097 * Let SGP_WRITE caller clear ends if write does not fill folio;
2098 * but SGP_FALLOC on a folio fallocated earlier must initialize
2101 if (sgp != SGP_WRITE && !folio_test_uptodate(folio)) {
2102 long i, n = folio_nr_pages(folio);
2105 clear_highpage(folio_page(folio, i));
2106 flush_dcache_folio(folio);
2107 folio_mark_uptodate(folio);
2117 *foliop = folio;
2125 filemap_remove_folio(folio);
2127 if (folio) {
2128 folio_unlock(folio);
2129 folio_put(folio);
2135 * shmem_get_folio - find, and lock a shmem folio.
2138 * @foliop: pointer to the folio if found
2141 * Looks up the page cache entry at @inode & @index. If a folio is
2144 * If the caller modifies data in the folio, it must call folio_mark_dirty()
2145 * before unlocking the folio to ensure that the folio is not reclaimed.
2148 * When no folio is found, the behavior depends on @sgp:
2151 * - for all other flags a new folio is allocated, inserted into the
2157 int shmem_get_folio(struct inode *inode, pgoff_t index, struct folio **foliop,
2240 struct folio *folio = NULL;
2255 err = shmem_get_folio_gfp(inode, vmf->pgoff, &folio, SGP_CACHE,
2259 if (folio) {
2260 vmf->page = folio_file_page(folio, vmf->pgoff);
2610 struct folio **foliop)
2618 struct folio *folio;
2637 folio = shmem_alloc_folio(gfp, info, pgoff);
2638 if (!folio)
2642 page_kaddr = kmap_local_folio(folio, 0);
2667 *foliop = folio;
2673 flush_dcache_folio(folio);
2675 clear_user_highpage(&folio->page, dst_addr);
2678 folio = *foliop;
2679 VM_BUG_ON_FOLIO(folio_test_large(folio), folio);
2683 VM_BUG_ON(folio_test_locked(folio));
2684 VM_BUG_ON(folio_test_swapbacked(folio));
2685 __folio_set_locked(folio);
2686 __folio_set_swapbacked(folio);
2687 __folio_mark_uptodate(folio);
2694 ret = mem_cgroup_charge(folio, dst_vma->vm_mm, gfp);
2697 ret = shmem_add_to_page_cache(folio, mapping, pgoff, NULL, gfp);
2702 &folio->page, true, flags);
2707 folio_unlock(folio);
2710 filemap_remove_folio(folio);
2712 folio_unlock(folio);
2713 folio_put(folio);
2732 struct folio *folio;
2744 ret = shmem_get_folio(inode, index, &folio, SGP_WRITE);
2748 *pagep = folio_file_page(folio, index);
2750 folio_unlock(folio);
2751 folio_put(folio);
2764 struct folio *folio = page_folio(page);
2770 if (!folio_test_uptodate(folio)) {
2771 if (copied < folio_size(folio)) {
2772 size_t from = offset_in_folio(folio, pos);
2773 folio_zero_segments(folio, 0, from,
2774 from + copied, folio_size(folio));
2776 folio_mark_uptodate(folio);
2778 folio_mark_dirty(folio);
2779 folio_unlock(folio);
2780 folio_put(folio);
2800 struct folio *folio = NULL;
2815 error = shmem_get_folio(inode, index, &folio, SGP_READ);
2821 if (folio) {
2822 folio_unlock(folio);
2824 page = folio_file_page(folio, index);
2826 folio_put(folio);
2842 if (folio)
2843 folio_put(folio);
2849 if (folio) {
2861 folio_mark_accessed(folio);
2867 folio_put(folio);
2977 struct folio *folio = NULL;
2991 error = shmem_get_folio(inode, *ppos / PAGE_SIZE, &folio,
2998 if (folio) {
2999 folio_unlock(folio);
3001 if (folio_test_hwpoison(folio) ||
3002 (folio_test_large(folio) &&
3003 folio_test_has_hwpoisoned(folio))) {
3022 if (folio) {
3029 flush_dcache_folio(folio);
3030 folio_mark_accessed(folio);
3035 n = splice_folio_into_pipe(pipe, folio, *ppos, part);
3036 folio_put(folio);
3037 folio = NULL;
3054 if (folio)
3055 folio_put(folio);
3167 struct folio *folio;
3178 error = shmem_get_folio(inode, index, &folio,
3193 * a second SGP_FALLOC on the same large folio will clear it,
3196 index = folio_next_index(folio);
3205 if (!folio_test_uptodate(folio))
3216 folio_mark_dirty(folio);
3217 folio_unlock(folio);
3218 folio_put(folio);
3499 struct folio *folio;
3530 error = shmem_get_folio(inode, 0, &folio, SGP_WRITE);
3534 memcpy(folio_address(folio), symname, len);
3535 folio_mark_uptodate(folio);
3536 folio_mark_dirty(folio);
3537 folio_unlock(folio);
3538 folio_put(folio);
3563 struct folio *folio = NULL;
3567 folio = filemap_get_folio(inode->i_mapping, 0);
3568 if (IS_ERR(folio))
3570 if (PageHWPoison(folio_page(folio, 0)) ||
3571 !folio_test_uptodate(folio)) {
3572 folio_put(folio);
3576 error = shmem_get_folio(inode, 0, &folio, SGP_READ);
3579 if (!folio)
3581 if (PageHWPoison(folio_page(folio, 0))) {
3582 folio_unlock(folio);
3583 folio_put(folio);
3586 folio_unlock(folio);
3588 set_delayed_call(done, shmem_put_link, folio);
3589 return folio_address(folio);
4510 struct folio *folio)
4938 * @mapping: the folio's address_space
4939 * @index: the folio index
4951 struct folio *shmem_read_folio_gfp(struct address_space *mapping,
4956 struct folio *folio;
4959 error = shmem_get_folio_gfp(inode, index, &folio, SGP_CACHE,
4964 folio_unlock(folio);
4965 return folio;
4978 struct folio *folio = shmem_read_folio_gfp(mapping, index, gfp);
4981 if (IS_ERR(folio))
4982 return &folio->page;
4984 page = folio_file_page(folio, index);
4986 folio_put(folio);