Lines Matching defs:folio

62 	struct folio *folio = folio_get_nontail_page(page);
74 if (!folio)
77 if (unlikely(folio_test_slab(folio)))
86 if (unlikely(!__folio_test_movable(folio)))
90 if (unlikely(folio_test_slab(folio)))
104 if (unlikely(!folio_trylock(folio)))
107 if (!folio_test_movable(folio) || folio_test_isolated(folio))
110 mops = folio_movable_ops(folio);
111 VM_BUG_ON_FOLIO(!mops, folio);
113 if (!mops->isolate_page(&folio->page, mode))
117 WARN_ON_ONCE(folio_test_isolated(folio));
118 folio_set_isolated(folio);
119 folio_unlock(folio);
124 folio_unlock(folio);
126 folio_put(folio);
131 static void putback_movable_folio(struct folio *folio)
133 const struct movable_operations *mops = folio_movable_ops(folio);
135 mops->putback_page(&folio->page);
136 folio_clear_isolated(folio);
149 struct folio *folio;
150 struct folio *folio2;
152 list_for_each_entry_safe(folio, folio2, l, lru) {
153 if (unlikely(folio_test_hugetlb(folio))) {
154 folio_putback_active_hugetlb(folio);
157 list_del(&folio->lru);
159 * We isolated non-lru movable folio so here we can use
160 * __folio_test_movable because LRU folio's mapping cannot
163 if (unlikely(__folio_test_movable(folio))) {
164 VM_BUG_ON_FOLIO(!folio_test_isolated(folio), folio);
165 folio_lock(folio);
166 if (folio_test_movable(folio))
167 putback_movable_folio(folio);
169 folio_clear_isolated(folio);
170 folio_unlock(folio);
171 folio_put(folio);
173 node_stat_mod_folio(folio, NR_ISOLATED_ANON +
174 folio_is_file_lru(folio), -folio_nr_pages(folio));
175 folio_putback_lru(folio);
183 static bool remove_migration_pte(struct folio *folio,
197 if (folio_test_large(folio) && !folio_test_hugetlb(folio))
199 new = folio_page(folio, idx);
204 VM_BUG_ON_FOLIO(folio_test_hugetlb(folio) ||
205 !folio_test_pmd_mappable(folio), folio);
211 folio_get(folio);
218 if (folio_test_dirty(folio) && is_migration_entry_dirty(entry))
230 if (folio_test_anon(folio) && !is_readable_migration_entry(entry))
248 if (folio_test_hugetlb(folio)) {
254 if (folio_test_anon(folio))
255 hugetlb_add_anon_rmap(folio, vma, pvmw.address,
258 hugetlb_add_file_rmap(folio);
264 if (folio_test_anon(folio))
265 folio_add_anon_rmap_pte(folio, new, vma,
268 folio_add_file_rmap_pte(folio, new, vma);
288 void remove_migration_ptes(struct folio *src, struct folio *dst, bool locked)
382 struct folio *folio)
388 refs += folio_nr_pages(folio);
389 if (folio_test_private(folio))
404 struct folio *newfolio, struct folio *folio, int extra_count)
406 XA_STATE(xas, &mapping->i_pages, folio_index(folio));
409 int expected_count = folio_expected_refs(mapping, folio) + extra_count;
410 long nr = folio_nr_pages(folio);
415 if (folio_ref_count(folio) != expected_count)
419 newfolio->index = folio->index;
420 newfolio->mapping = folio->mapping;
421 if (folio_test_swapbacked(folio))
427 oldzone = folio_zone(folio);
431 if (!folio_ref_freeze(folio, expected_count)) {
437 * Now we know that no one else is looking at the folio:
440 newfolio->index = folio->index;
441 newfolio->mapping = folio->mapping;
443 if (folio_test_swapbacked(folio)) {
445 if (folio_test_swapcache(folio)) {
447 newfolio->private = folio_get_private(folio);
451 VM_BUG_ON_FOLIO(folio_test_swapcache(folio), folio);
456 dirty = folio_test_dirty(folio);
458 folio_clear_dirty(folio);
473 folio_ref_unfreeze(folio, expected_count - nr);
492 memcg = folio_memcg(folio);
498 if (folio_test_swapbacked(folio) && !folio_test_swapcache(folio)) {
502 if (folio_test_pmd_mappable(folio)) {
508 if (folio_test_swapcache(folio)) {
531 struct folio *dst, struct folio *src)
560 void folio_migrate_flags(struct folio *newfolio, struct folio *folio)
564 if (folio_test_error(folio))
566 if (folio_test_referenced(folio))
568 if (folio_test_uptodate(folio))
570 if (folio_test_clear_active(folio)) {
571 VM_BUG_ON_FOLIO(folio_test_unevictable(folio), folio);
573 } else if (folio_test_clear_unevictable(folio))
575 if (folio_test_workingset(folio))
577 if (folio_test_checked(folio))
585 if (folio_test_mappedtodisk(folio))
589 if (folio_test_dirty(folio))
592 if (folio_test_young(folio))
594 if (folio_test_idle(folio))
601 cpupid = folio_xchg_last_cpupid(folio, -1);
608 bool f_toptier = node_is_toptier(folio_nid(folio));
616 folio_migrate_ksm(newfolio, folio);
621 if (folio_test_swapcache(folio))
622 folio_clear_swapcache(folio);
623 folio_clear_private(folio);
626 if (!folio_test_hugetlb(folio))
627 folio->private = NULL;
641 if (folio_test_readahead(folio))
644 folio_copy_owner(newfolio, folio);
646 mem_cgroup_migrate(folio, newfolio);
650 void folio_migrate_copy(struct folio *newfolio, struct folio *folio)
652 folio_copy(newfolio, folio);
653 folio_migrate_flags(newfolio, folio);
661 int migrate_folio_extra(struct address_space *mapping, struct folio *dst,
662 struct folio *src, enum migrate_mode mode, int extra_count)
681 * migrate_folio() - Simple folio migration.
682 * @mapping: The address_space containing the folio.
683 * @dst: The folio to migrate the data to.
684 * @src: The folio containing the current data.
687 * Common logic to directly migrate a single LRU folio suitable for
692 int migrate_folio(struct address_space *mapping, struct folio *dst,
693 struct folio *src, enum migrate_mode mode)
734 struct folio *dst, struct folio *src, enum migrate_mode mode,
813 * @dst: The folio to migrate to.
814 * @src: The folio to migrate from.
815 * @mode: How to migrate the folio.
819 * heads are accessed only under the folio lock. If your filesystem cannot
826 struct folio *dst, struct folio *src, enum migrate_mode mode)
835 * @dst: The folio to migrate to.
836 * @src: The folio to migrate from.
837 * @mode: How to migrate the folio.
847 struct folio *dst, struct folio *src, enum migrate_mode mode)
855 struct folio *dst, struct folio *src, enum migrate_mode mode)
875 * Writeback a folio to clean the dirty state
877 static int writeout(struct address_space *mapping, struct folio *folio)
892 if (!folio_clear_dirty_for_io(folio))
897 * A dirty folio may imply that the underlying filesystem has
898 * the folio on some queue. So the folio must be clean for
900 * folio state is no longer what we checked for earlier.
904 remove_migration_ptes(folio, folio, false);
906 rc = mapping->a_ops->writepage(&folio->page, &wbc);
910 folio_lock(folio);
919 struct folio *dst, struct folio *src, enum migrate_mode mode)
954 static int move_to_new_folio(struct folio *dst, struct folio *src,
1034 * field of struct folio of the newly allocated destination folio.
1043 static void __migrate_folio_record(struct folio *dst,
1050 static void __migrate_folio_extract(struct folio *dst,
1061 /* Restore the source folio to the original state upon failure */
1062 static void migrate_folio_undo_src(struct folio *src,
1079 /* Restore the destination folio to the original state upon failure */
1080 static void migrate_folio_undo_dst(struct folio *dst, bool locked,
1091 /* Cleanup src folio upon migration success */
1092 static void migrate_folio_done(struct folio *src,
1112 struct folio *src, struct folio **dstp, enum migrate_mode mode,
1115 struct folio *dst;
1258 * A folio that has not been unmapped will be restored to
1271 /* Migrate the folio to the newly allocated folio in dst. */
1273 struct folio *src, struct folio *dst,
1321 * A folio that has been migrated has all references removed
1334 * A folio that has not been migrated will be restored to
1370 struct folio *src, int force, enum migrate_mode mode,
1373 struct folio *dst;
1482 static inline int try_split_folio(struct folio *folio, struct list_head *split_folios)
1486 folio_lock(folio);
1487 rc = split_folio_to_list(folio, split_folios);
1488 folio_unlock(folio);
1490 list_move_tail(&folio->lru, split_folios);
1513 int nr_split; /* Large folio (include THP) split before migrating */
1533 struct folio *folio, *folio2;
1540 list_for_each_entry_safe(folio, folio2, from, lru) {
1541 if (!folio_test_hugetlb(folio))
1544 nr_pages = folio_nr_pages(folio);
1555 if (!hugepage_migration_supported(folio_hstate(folio))) {
1558 list_move_tail(&folio->lru, ret_folios);
1564 folio, pass > 2, mode,
1568 * Success: hugetlb folio will be put back
1591 * unlike -EAGAIN case, the failed folio is
1592 * removed from migration folio list and not
1617 * lock or bit when we have locked more than one folio. Which may cause
1634 struct folio *folio, *folio2, *dst = NULL, *dst2;
1648 list_for_each_entry_safe(folio, folio2, from, lru) {
1649 is_large = folio_test_large(folio);
1650 is_thp = is_large && folio_test_pmd_mappable(folio);
1651 nr_pages = folio_nr_pages(folio);
1656 * Large folio migration might be unsupported or
1658 * on the same folio with the large folio split
1668 if (!try_split_folio(folio, split_folios)) {
1674 list_move_tail(&folio->lru, ret_folios);
1679 private, folio, &dst, mode, reason,
1683 * Success: folio will be freed
1684 * Unmap: folio will be put on unmap_folios list,
1685 * dst folio put on dst_folios list
1698 /* Large folio NUMA faulting doesn't split to retry. */
1700 int ret = try_split_folio(folio, split_folios);
1709 * Try again to split large folio to
1740 list_move_tail(&folio->lru, &unmap_folios);
1746 * unlike -EAGAIN case, the failed folio is
1747 * removed from migration folio list and not
1770 dst = list_first_entry(&dst_folios, struct folio, lru);
1772 list_for_each_entry_safe(folio, folio2, &unmap_folios, lru) {
1773 is_thp = folio_test_large(folio) && folio_test_pmd_mappable(folio);
1774 nr_pages = folio_nr_pages(folio);
1779 folio, dst, mode,
1783 * Success: folio will be freed
1814 dst = list_first_entry(&dst_folios, struct folio, lru);
1816 list_for_each_entry_safe(folio, folio2, &unmap_folios, lru) {
1821 migrate_folio_undo_src(folio, old_page_state & PAGE_WAS_MAPPED,
1889 * as the target of the folio migration.
1894 * folio migration, if any.
1895 * @reason: The reason for folio migration.
1904 * Returns the number of {normal folio, large folio, hugetlb} that were not
1905 * migrated, or an error code. The number of large folio splits will be
1906 * considered as the number of non-migrated large folio, no matter how many
1907 * split folios of the large folio are migrated successfully.
1915 struct folio *folio, *folio2;
1932 list_for_each_entry_safe(folio, folio2, from, lru) {
1934 if (folio_test_hugetlb(folio)) {
1935 list_move_tail(&folio->lru, &ret_folios);
1939 nr_pages += folio_nr_pages(folio);
1964 * Failure isn't counted since all split folios of a large folio
1978 * Put the permanent failure folio back to migration list, they
2006 struct folio *alloc_migration_target(struct folio *src, unsigned long private)
2087 struct folio *folio;
2109 folio = page_folio(page);
2110 if (folio_is_zone_device(folio))
2114 if (folio_nid(folio) == node)
2122 if (folio_test_hugetlb(folio)) {
2123 if (isolate_hugetlb(folio, pagelist))
2126 if (!folio_isolate_lru(folio))
2130 list_add_tail(&folio->lru, pagelist);
2131 node_stat_mod_folio(folio,
2132 NR_ISOLATED_ANON + folio_is_file_lru(folio),
2133 folio_nr_pages(folio));
2138 * or drop the folio ref if it was not isolated.
2140 folio_put(folio);
2494 static struct folio *alloc_misplaced_dst_folio(struct folio *src,
2511 static int numamigrate_isolate_folio(pg_data_t *pgdat, struct folio *folio)
2513 int nr_pages = folio_nr_pages(folio);
2534 folio_order(folio), ZONE_MOVABLE);
2538 if (!folio_isolate_lru(folio))
2541 node_stat_mod_folio(folio, NR_ISOLATED_ANON + folio_is_file_lru(folio),
2545 * Isolating the folio has taken another reference, so the
2546 * caller's reference can be safely dropped without the folio
2549 folio_put(folio);
2554 * Attempt to migrate a misplaced folio to the specified destination
2556 * the folio that will be dropped by this function before returning.
2558 int migrate_misplaced_folio(struct folio *folio, struct vm_area_struct *vma,
2566 int nr_pages = folio_nr_pages(folio);
2571 * To check if the folio is shared, ideally we want to make sure
2573 * expensive, so check the estimated mapcount of the folio instead.
2575 if (folio_estimated_sharers(folio) != 1 && folio_is_file_lru(folio) &&
2583 if (folio_is_file_lru(folio) && folio_test_dirty(folio))
2586 isolated = numamigrate_isolate_folio(pgdat, folio);
2590 list_add(&folio->lru, &migratepages);
2596 list_del(&folio->lru);
2597 node_stat_mod_folio(folio, NR_ISOLATED_ANON +
2598 folio_is_file_lru(folio), -nr_pages);
2599 folio_putback_lru(folio);
2605 if (!node_is_toptier(folio_nid(folio)) && node_is_toptier(node))
2613 folio_put(folio);