Lines Matching refs:folio

83 		struct folio *folio = fbatch->folios[i];
86 if (!xa_is_value(folio)) {
87 fbatch->folios[j++] = folio;
96 __clear_shadow_entry(mapping, index, folio);
139 * folio_invalidate - Invalidate part or all of a folio.
140 * @folio: The folio which is affected.
144 * folio_invalidate() is called when all or part of the folio has become
153 void folio_invalidate(struct folio *folio, size_t offset, size_t length)
155 const struct address_space_operations *aops = folio->mapping->a_ops;
158 aops->invalidate_folio(folio, offset, length);
172 static void truncate_cleanup_folio(struct folio *folio)
174 if (folio_mapped(folio))
175 unmap_mapping_folio(folio);
177 if (folio_has_private(folio))
178 folio_invalidate(folio, 0, folio_size(folio));
185 folio_cancel_dirty(folio);
186 folio_clear_mappedtodisk(folio);
189 int truncate_inode_folio(struct address_space *mapping, struct folio *folio)
191 if (folio->mapping != mapping)
194 truncate_cleanup_folio(folio);
195 filemap_remove_folio(folio);
200 * Handle partial folios. The folio may be entirely within the
202 * folio that's within the [start, end] range, and then split the folio if
208 * discarding the entire folio which is stubbornly unsplit.
210 bool truncate_inode_partial_folio(struct folio *folio, loff_t start, loff_t end)
212 loff_t pos = folio_pos(folio);
219 length = folio_size(folio);
225 folio_wait_writeback(folio);
226 if (length == folio_size(folio)) {
227 truncate_inode_folio(folio->mapping, folio);
236 folio_zero_range(folio, offset, length);
238 if (folio_has_private(folio))
239 folio_invalidate(folio, offset, length);
240 if (!folio_test_large(folio))
242 if (split_folio(folio) == 0)
244 if (folio_test_dirty(folio))
246 truncate_inode_folio(folio->mapping, folio);
254 struct folio *folio)
264 return truncate_inode_folio(mapping, folio);
269 * mapping_evict_folio() - Remove an unused folio from the page-cache.
270 * @mapping: The mapping this folio belongs to.
271 * @folio: The folio to remove.
273 * Safely remove one folio from the page cache.
279 long mapping_evict_folio(struct address_space *mapping, struct folio *folio)
284 if (folio_test_dirty(folio) || folio_test_writeback(folio))
286 /* The refcount will be elevated if any page in the folio is mapped */
287 if (folio_ref_count(folio) >
288 folio_nr_pages(folio) + folio_has_private(folio) + 1)
290 if (!filemap_release_folio(folio, 0))
293 return remove_mapping(mapping, folio);
329 struct folio *folio;
367 folio = __filemap_get_folio(mapping, lstart >> PAGE_SHIFT, FGP_LOCK, 0);
368 if (!IS_ERR(folio)) {
369 same_folio = lend < folio_pos(folio) + folio_size(folio);
370 if (!truncate_inode_partial_folio(folio, lstart, lend)) {
371 start = folio_next_index(folio);
373 end = folio->index;
375 folio_unlock(folio);
376 folio_put(folio);
377 folio = NULL;
381 folio = __filemap_get_folio(mapping, lend >> PAGE_SHIFT,
383 if (!IS_ERR(folio)) {
384 if (!truncate_inode_partial_folio(folio, lstart, lend))
385 end = folio->index;
386 folio_unlock(folio);
387 folio_put(folio);
405 struct folio *folio = fbatch.folios[i];
409 if (xa_is_value(folio))
412 folio_lock(folio);
413 VM_BUG_ON_FOLIO(!folio_contains(folio, indices[i]), folio);
414 folio_wait_writeback(folio);
415 truncate_inode_folio(mapping, folio);
416 folio_unlock(folio);
483 * @nr_failed: How many folio invalidations failed
501 struct folio *folio = fbatch.folios[i];
503 /* We rely upon deletion not changing folio->index */
505 if (xa_is_value(folio)) {
507 indices[i], folio);
511 ret = mapping_evict_folio(mapping, folio);
512 folio_unlock(folio);
514 * Invalidation is a hint that the folio is no longer
518 deactivate_file_folio(folio);
554 * This is like mapping_evict_folio(), except it ignores the folio's
561 struct folio *folio)
563 if (folio->mapping != mapping)
566 if (!filemap_release_folio(folio, GFP_KERNEL))
571 if (folio_test_dirty(folio))
574 BUG_ON(folio_has_private(folio));
575 __filemap_remove_folio(folio, NULL);
581 filemap_free_folio(mapping, folio);
589 static int folio_launder(struct address_space *mapping, struct folio *folio)
591 if (!folio_test_dirty(folio))
593 if (folio->mapping != mapping || mapping->a_ops->launder_folio == NULL)
595 return mapping->a_ops->launder_folio(folio);
627 struct folio *folio = fbatch.folios[i];
629 /* We rely upon deletion not changing folio->index */
631 if (xa_is_value(folio)) {
633 indices[i], folio))
638 if (!did_range_unmap && folio_mapped(folio)) {
640 * If folio is mapped, before taking its lock,
648 folio_lock(folio);
649 if (unlikely(folio->mapping != mapping)) {
650 folio_unlock(folio);
653 VM_BUG_ON_FOLIO(!folio_contains(folio, indices[i]), folio);
654 folio_wait_writeback(folio);
656 if (folio_mapped(folio))
657 unmap_mapping_folio(folio);
658 BUG_ON(folio_mapped(folio));
660 ret2 = folio_launder(mapping, folio);
662 if (!invalidate_complete_folio2(mapping, folio))
667 folio_unlock(folio);
775 * coming after we unlock the folio will already see the new i_size.
784 struct folio *folio;
795 folio = filemap_lock_folio(inode->i_mapping, from / PAGE_SIZE);
797 if (IS_ERR(folio))
803 if (folio_mkclean(folio))
804 folio_mark_dirty(folio);
805 folio_unlock(folio);
806 folio_put(folio);