Lines Matching defs:folio

88 int add_to_swap_cache(struct folio *folio, swp_entry_t entry,
93 XA_STATE_ORDER(xas, &address_space->i_pages, idx, folio_order(folio));
94 unsigned long i, nr = folio_nr_pages(folio);
99 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
100 VM_BUG_ON_FOLIO(folio_test_swapcache(folio), folio);
101 VM_BUG_ON_FOLIO(!folio_test_swapbacked(folio), folio);
103 folio_ref_add(folio, nr);
104 folio_set_swapcache(folio);
105 folio->swap = entry;
113 VM_BUG_ON_FOLIO(xas.xa_index != idx + i, folio);
119 xas_store(&xas, folio);
123 __node_stat_mod_folio(folio, NR_FILE_PAGES, nr);
124 __lruvec_stat_mod_folio(folio, NR_SWAPCACHE, nr);
132 folio_clear_swapcache(folio);
133 folio_ref_sub(folio, nr);
141 void __delete_from_swap_cache(struct folio *folio,
146 long nr = folio_nr_pages(folio);
152 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
153 VM_BUG_ON_FOLIO(!folio_test_swapcache(folio), folio);
154 VM_BUG_ON_FOLIO(folio_test_writeback(folio), folio);
158 VM_BUG_ON_PAGE(entry != folio, entry);
161 folio->swap.val = 0;
162 folio_clear_swapcache(folio);
164 __node_stat_mod_folio(folio, NR_FILE_PAGES, -nr);
165 __lruvec_stat_mod_folio(folio, NR_SWAPCACHE, -nr);
169 * add_to_swap - allocate swap space for a folio
170 * @folio: folio we want to move to swap
172 * Allocate swap space for the folio and add the folio to the
175 * Context: Caller needs to hold the folio lock.
176 * Return: Whether the folio was added to the swap cache.
178 bool add_to_swap(struct folio *folio)
183 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
184 VM_BUG_ON_FOLIO(!folio_test_uptodate(folio), folio);
186 entry = folio_alloc_swap(folio);
201 err = add_to_swap_cache(folio, entry,
210 * Normally the folio will be dirtied in unmap because its
212 * page's pte could have dirty bit cleared but the folio's
214 * and SwapBacked flag has no lock protected. For such folio,
215 * unmap will not set dirty bit for it, so folio reclaim will
216 * not write the folio out. This can cause data corruption when
217 * the folio is swapped in later. Always setting the dirty flag
218 * for the folio solves the problem.
220 folio_mark_dirty(folio);
225 put_swap_folio(folio, entry);
232 * It will never put the folio into the free list,
233 * the caller has a reference on the folio.
235 void delete_from_swap_cache(struct folio *folio)
237 swp_entry_t entry = folio->swap;
241 __delete_from_swap_cache(folio, entry, NULL);
244 put_swap_folio(folio, entry);
245 folio_ref_sub(folio, folio_nr_pages(folio));
281 * Its ok to check the swapcache flag without the folio lock
286 void free_swap_cache(struct folio *folio)
288 if (folio_test_swapcache(folio) && !folio_mapped(folio) &&
289 folio_trylock(folio)) {
290 folio_free_swap(folio);
291 folio_unlock(folio);
301 struct folio *folio = page_folio(page);
303 free_swap_cache(folio);
304 if (!is_huge_zero_folio(folio))
305 folio_put(folio);
320 struct folio *folio = page_folio(encoded_page_ptr(pages[i]));
322 free_swap_cache(folio);
328 if (folio_batch_add(&folios, folio) == 0)
341 * Lookup a swap entry in the swap cache. A found folio will be returned
343 * lock getting page table operations atomic even if we drop the folio
348 struct folio *swap_cache_get_folio(swp_entry_t entry,
351 struct folio *folio;
353 folio = filemap_get_folio(swap_address_space(entry), swp_offset(entry));
354 if (!IS_ERR(folio)) {
362 if (unlikely(folio_test_large(folio)))
363 return folio;
365 readahead = folio_test_clear_readahead(folio);
385 folio = NULL;
388 return folio;
392 * filemap_get_incore_folio - Find and get a folio from the page or swap caches.
397 * folio in the swap cache.
399 * Return: The found folio or %NULL.
401 struct folio *filemap_get_incore_folio(struct address_space *mapping,
406 struct folio *folio = filemap_get_entry(mapping, index);
408 if (!folio)
410 if (!xa_is_value(folio))
411 return folio;
415 swp = radix_to_swp_entry(folio);
424 folio = filemap_get_folio(swap_address_space(swp), index);
426 return folio;
429 struct folio *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
434 struct folio *folio;
449 folio = filemap_get_folio(swap_address_space(entry),
451 if (!IS_ERR(folio))
466 * Get a new folio to read into from swap. Allocate it now,
470 folio = (struct folio *)alloc_pages_mpol(gfp_mask, 0,
472 if (!folio)
482 folio_put(folio);
489 * is set but the folio is not the swap cache yet. This can
502 * in swap_map, but not yet added its folio to swap cache.
508 * The swap entry is ours to swap in. Prepare the new folio.
511 __folio_set_locked(folio);
512 __folio_set_swapbacked(folio);
514 if (mem_cgroup_swapin_charge_folio(folio, NULL, gfp_mask, entry))
518 if (add_to_swap_cache(folio, entry, gfp_mask & GFP_RECLAIM_MASK, &shadow))
524 workingset_refault(folio, shadow);
526 /* Caller will initiate read into locked folio */
527 folio_add_lru(folio);
531 return folio;
534 put_swap_folio(folio, entry);
535 folio_unlock(folio);
536 folio_put(folio);
550 * swap cache folio lock.
552 struct folio *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
559 struct folio *folio;
562 folio = __read_swap_cache_async(entry, gfp_mask, mpol, ilx,
567 swap_read_folio(folio, false, plug);
568 return folio;
639 * Returns the struct folio for entry and addr, after queueing swapin.
650 struct folio *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask,
653 struct folio *folio;
678 folio = __read_swap_cache_async(
681 if (!folio)
684 swap_read_folio(folio, false, &splug);
686 folio_set_readahead(folio);
690 folio_put(folio);
697 folio = __read_swap_cache_async(entry, gfp_mask, mpol, ilx,
700 zswap_folio_swapin(folio);
701 swap_read_folio(folio, false, NULL);
703 return folio;
807 * Returns the struct folio for entry and addr, after queueing swapin.
815 static struct folio *swap_vma_readahead(swp_entry_t targ_entry, gfp_t gfp_mask,
820 struct folio *folio;
853 folio = __read_swap_cache_async(entry, gfp_mask, mpol, ilx,
855 if (!folio)
858 swap_read_folio(folio, false, &splug);
860 folio_set_readahead(folio);
864 folio_put(folio);
872 /* The folio was likely read above, so no need for plugging here */
873 folio = __read_swap_cache_async(targ_entry, gfp_mask, mpol, targ_ilx,
876 zswap_folio_swapin(folio);
877 swap_read_folio(folio, false, NULL);
879 return folio;
899 struct folio *folio;
902 folio = swap_use_vma_readahead() ?
907 if (!folio)
909 return folio_file_page(folio, swp_offset(entry));