Lines Matching defs:folio

494  * NOTE: the caller should normally hold folio lock when calling this.  If
497 * concurrently without folio lock protection). See folio_lock_anon_vma_read()
500 struct anon_vma *folio_get_anon_vma(struct folio *folio)
506 anon_mapping = (unsigned long)READ_ONCE(folio->mapping);
509 if (!folio_mapped(folio))
519 * If this folio is still mapped, then its anon_vma cannot have been
525 if (!folio_mapped(folio)) {
544 struct anon_vma *folio_lock_anon_vma_read(struct folio *folio,
553 anon_mapping = (unsigned long)READ_ONCE(folio->mapping);
556 if (!folio_mapped(folio))
564 * might not hold the folio lock here.
566 if (unlikely((unsigned long)READ_ONCE(folio->mapping) !=
574 * If the folio is still mapped, then this anon_vma is still
578 if (!folio_mapped(folio)) {
597 if (!folio_mapped(folio)) {
609 * not hold the folio lock here.
611 if (unlikely((unsigned long)READ_ONCE(folio->mapping) !=
777 struct folio *folio = page_folio(page);
778 if (folio_test_anon(folio)) {
779 struct anon_vma *page__anon_vma = folio_anon_vma(folio);
789 } else if (vma->vm_file->f_mapping != folio->mapping) {
835 static bool folio_referenced_one(struct folio *folio,
839 DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0);
847 if (!folio_test_large(folio) || !pvmw.pte) {
849 mlock_vma_folio(folio, vma);
855 * For large folio fully mapped to VMA, will
858 * For large folio cross VMA boundaries, it's
885 /* unexpected pmd-mapped folio? */
893 folio_test_large(folio) &&
894 folio_within_vma(folio, vma)) {
898 e_align = ALIGN_DOWN(start + folio_size(folio) - 1, PMD_SIZE);
900 /* folio doesn't cross page table boundary and fully mapped */
901 if ((s_align == e_align) && (ptes == folio_nr_pages(folio))) {
903 mlock_vma_folio(folio, vma);
910 folio_clear_idle(folio);
911 if (folio_test_clear_young(folio))
932 * folio has been used in another mapping, we will catch it; if this
934 * referenced flag or activated the folio in zap_pte_range().
950 * folio_referenced() - Test if the folio was referenced.
951 * @folio: The folio to test.
952 * @is_locked: Caller holds lock on the folio.
954 * @vm_flags: A combination of all the vma->vm_flags which referenced the folio.
956 * Quick test_and_clear_referenced for all mappings of a folio,
958 * Return: The number of mappings which referenced the folio. Return -1 if
961 int folio_referenced(struct folio *folio, int is_locked,
966 .mapcount = folio_mapcount(folio),
981 if (!folio_raw_mapping(folio))
984 if (!is_locked && (!folio_test_anon(folio) || folio_test_ksm(folio))) {
985 we_locked = folio_trylock(folio);
990 rmap_walk(folio, &rwc);
994 folio_unlock(folio);
1008 * the folio can not be freed from this function.
1047 /* unexpected pmd-mapped folio? */
1061 static bool page_mkclean_one(struct folio *folio, struct vm_area_struct *vma,
1064 DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, PVMW_SYNC);
1080 int folio_mkclean(struct folio *folio)
1090 BUG_ON(!folio_test_locked(folio));
1092 if (!folio_mapped(folio))
1095 mapping = folio_mapping(folio);
1099 rmap_walk(folio, &rwc);
1137 int folio_total_mapcount(struct folio *folio)
1139 int mapcount = folio_entire_mapcount(folio);
1144 if (folio_nr_pages_mapped(folio) == 0)
1151 nr_pages = folio_nr_pages(folio);
1153 mapcount += atomic_read(&folio_page(folio, i)->_mapcount);
1160 static __always_inline unsigned int __folio_add_rmap(struct folio *folio,
1164 atomic_t *mapped = &folio->_nr_pages_mapped;
1167 __folio_rmap_sanity_checks(folio, page, nr_pages, level);
1173 if (first && folio_test_large(folio)) {
1183 first = atomic_inc_and_test(&folio->_entire_mapcount);
1187 *nr_pmdmapped = folio_nr_pages(folio);
1203 * folio_move_anon_rmap - move a folio to our anon_vma
1204 * @folio: The folio to move to our anon_vma
1205 * @vma: The vma the folio belongs to
1207 * When a folio belongs exclusively to one process after a COW event,
1208 * that folio can be moved into the anon_vma that belongs to just that
1211 void folio_move_anon_rmap(struct folio *folio, struct vm_area_struct *vma)
1215 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
1224 WRITE_ONCE(folio->mapping, anon_vma);
1228 * __folio_set_anon - set up a new anonymous rmap for a folio
1229 * @folio: The folio to set up the new anonymous rmap for.
1230 * @vma: VM area to add the folio to.
1232 * @exclusive: Whether the folio is exclusive to the process.
1234 static void __folio_set_anon(struct folio *folio, struct vm_area_struct *vma,
1242 * If the folio isn't exclusive to this vma, we must use the _oldest_
1243 * possible anon_vma for the folio mapping!
1249 * page_idle does a lockless/optimistic rmap scan on folio->mapping.
1255 WRITE_ONCE(folio->mapping, (struct address_space *) anon_vma);
1256 folio->index = linear_page_index(vma, address);
1261 * @folio: The folio containing @page.
1266 static void __page_check_anon_rmap(struct folio *folio, struct page *page,
1280 VM_BUG_ON_FOLIO(folio_anon_vma(folio)->root != vma->anon_vma->root,
1281 folio);
1286 static __always_inline void __folio_add_anon_rmap(struct folio *folio,
1292 nr = __folio_add_rmap(folio, page, nr_pages, level, &nr_pmdmapped);
1294 __lruvec_stat_mod_folio(folio, NR_ANON_THPS, nr_pmdmapped);
1296 __lruvec_stat_mod_folio(folio, NR_ANON_MAPPED, nr);
1298 if (unlikely(!folio_test_anon(folio))) {
1299 VM_WARN_ON_FOLIO(!folio_test_locked(folio), folio);
1301 * For a PTE-mapped large folio, we only know that the single
1303 * folio->index right when not given the address of the head
1306 VM_WARN_ON_FOLIO(folio_test_large(folio) &&
1307 level != RMAP_LEVEL_PMD, folio);
1308 __folio_set_anon(folio, vma, address,
1310 } else if (likely(!folio_test_ksm(folio))) {
1311 __page_check_anon_rmap(folio, page, vma, address);
1330 (folio_test_large(folio) &&
1331 folio_entire_mapcount(folio) > 1)) &&
1332 PageAnonExclusive(cur_page), folio);
1336 * For large folio, only mlock it if it's fully mapped to VMA. It's
1337 * not easy to check whether the large folio is fully mapped to VMA
1338 * here. Only mlock normal 4K folio and leave page reclaim to handle
1339 * large folio.
1341 if (!folio_test_large(folio))
1342 mlock_vma_folio(folio, vma);
1346 * folio_add_anon_rmap_ptes - add PTE mappings to a page range of an anon folio
1347 * @folio: The folio to add the mappings to
1354 * The page range of folio is defined by [first_page, first_page + nr_pages)
1358 * and to ensure that an anon folio is not being upgraded racily to a KSM folio
1361 void folio_add_anon_rmap_ptes(struct folio *folio, struct page *page,
1365 __folio_add_anon_rmap(folio, page, nr_pages, vma, address, flags,
1370 * folio_add_anon_rmap_pmd - add a PMD mapping to a page range of an anon folio
1371 * @folio: The folio to add the mapping to
1377 * The page range of folio is defined by [first_page, first_page + HPAGE_PMD_NR)
1382 void folio_add_anon_rmap_pmd(struct folio *folio, struct page *page,
1386 __folio_add_anon_rmap(folio, page, HPAGE_PMD_NR, vma, address, flags,
1394 * folio_add_new_anon_rmap - Add mapping to a new anonymous folio.
1395 * @folio: The folio to add the mapping to.
1401 * The folio does not have to be locked.
1403 * If the folio is pmd-mappable, it is accounted as a THP. As the folio
1406 void folio_add_new_anon_rmap(struct folio *folio, struct vm_area_struct *vma,
1409 int nr = folio_nr_pages(folio);
1411 VM_WARN_ON_FOLIO(folio_test_hugetlb(folio), folio);
1414 __folio_set_swapbacked(folio);
1415 __folio_set_anon(folio, vma, address, true);
1417 if (likely(!folio_test_large(folio))) {
1419 atomic_set(&folio->_mapcount, 0);
1420 SetPageAnonExclusive(&folio->page);
1421 } else if (!folio_test_pmd_mappable(folio)) {
1425 struct page *page = folio_page(folio, i);
1432 atomic_set(&folio->_nr_pages_mapped, nr);
1435 atomic_set(&folio->_entire_mapcount, 0);
1436 atomic_set(&folio->_nr_pages_mapped, ENTIRELY_MAPPED);
1437 SetPageAnonExclusive(&folio->page);
1438 __lruvec_stat_mod_folio(folio, NR_ANON_THPS, nr);
1441 __lruvec_stat_mod_folio(folio, NR_ANON_MAPPED, nr);
1444 static __always_inline void __folio_add_file_rmap(struct folio *folio,
1450 VM_WARN_ON_FOLIO(folio_test_anon(folio), folio);
1452 nr = __folio_add_rmap(folio, page, nr_pages, level, &nr_pmdmapped);
1454 __lruvec_stat_mod_folio(folio, folio_test_swapbacked(folio) ?
1457 __lruvec_stat_mod_folio(folio, NR_FILE_MAPPED, nr);
1460 if (!folio_test_large(folio))
1461 mlock_vma_folio(folio, vma);
1465 * folio_add_file_rmap_ptes - add PTE mappings to a page range of a folio
1466 * @folio: The folio to add the mappings to
1471 * The page range of the folio is defined by [page, page + nr_pages)
1475 void folio_add_file_rmap_ptes(struct folio *folio, struct page *page,
1478 __folio_add_file_rmap(folio, page, nr_pages, vma, RMAP_LEVEL_PTE);
1482 * folio_add_file_rmap_pmd - add a PMD mapping to a page range of a folio
1483 * @folio: The folio to add the mapping to
1487 * The page range of the folio is defined by [page, page + HPAGE_PMD_NR)
1491 void folio_add_file_rmap_pmd(struct folio *folio, struct page *page,
1495 __folio_add_file_rmap(folio, page, HPAGE_PMD_NR, vma, RMAP_LEVEL_PMD);
1501 static __always_inline void __folio_remove_rmap(struct folio *folio,
1505 atomic_t *mapped = &folio->_nr_pages_mapped;
1509 __folio_rmap_sanity_checks(folio, page, nr_pages, level);
1515 if (last && folio_test_large(folio)) {
1525 last = atomic_add_negative(-1, &folio->_entire_mapcount);
1529 nr_pmdmapped = folio_nr_pages(folio);
1543 if (folio_test_anon(folio))
1545 else if (folio_test_swapbacked(folio))
1549 __lruvec_stat_mod_folio(folio, idx, -nr_pmdmapped);
1552 idx = folio_test_anon(folio) ? NR_ANON_MAPPED : NR_FILE_MAPPED;
1553 __lruvec_stat_mod_folio(folio, idx, -nr);
1556 * Queue anon large folio for deferred split if at least one
1557 * page of the folio is unmapped and at least one page
1560 if (folio_test_large(folio) && folio_test_anon(folio))
1562 deferred_split_folio(folio);
1573 munlock_vma_folio(folio, vma);
1577 * folio_remove_rmap_ptes - remove PTE mappings from a page range of a folio
1578 * @folio: The folio to remove the mappings from
1583 * The page range of the folio is defined by [page, page + nr_pages)
1587 void folio_remove_rmap_ptes(struct folio *folio, struct page *page,
1590 __folio_remove_rmap(folio, page, nr_pages, vma, RMAP_LEVEL_PTE);
1594 * folio_remove_rmap_pmd - remove a PMD mapping from a page range of a folio
1595 * @folio: The folio to remove the mapping from
1599 * The page range of the folio is defined by [page, page + HPAGE_PMD_NR)
1603 void folio_remove_rmap_pmd(struct folio *folio, struct page *page,
1607 __folio_remove_rmap(folio, page, HPAGE_PMD_NR, vma, RMAP_LEVEL_PMD);
1616 static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
1620 DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0);
1639 split_huge_pmd_address(vma, address, false, folio);
1646 * Note that the folio can not be freed in this function as call of
1647 * try_to_unmap() must hold a reference on the folio.
1652 if (folio_test_hugetlb(folio)) {
1667 VM_BUG_ON_FOLIO(!pvmw.pte, folio);
1670 * If the folio is in an mlock()d vma, we must not swap it out.
1675 if (!folio_test_large(folio))
1676 mlock_vma_folio(folio, vma);
1683 subpage = folio_page(folio, pfn - folio_pfn(folio));
1685 anon_exclusive = folio_test_anon(folio) &&
1688 if (folio_test_hugetlb(folio)) {
1689 bool anon = folio_test_anon(folio);
1748 * a remote CPU could still be writing to the folio.
1769 /* Set the dirty flag on the folio now the pte is gone. */
1771 folio_mark_dirty(folio);
1778 if (folio_test_hugetlb(folio)) {
1779 hugetlb_count_sub(folio_nr_pages(folio), mm);
1783 dec_mm_counter(mm, mm_counter(folio));
1798 dec_mm_counter(mm, mm_counter(folio));
1799 } else if (folio_test_anon(folio)) {
1806 if (unlikely(folio_test_swapbacked(folio) !=
1807 folio_test_swapcache(folio))) {
1815 if (!folio_test_swapbacked(folio)) {
1825 ref_count = folio_ref_count(folio);
1826 map_count = folio_mapcount(folio);
1839 !folio_test_dirty(folio)) {
1845 * If the folio was redirtied, it cannot be
1849 folio_set_swapbacked(folio);
1871 folio_try_share_anon_rmap_pte(folio, subpage)) {
1896 * This is a locked file-backed folio,
1898 * cache and replaced by a new folio before
1901 * to point at a new folio while a device is
1902 * still using this folio.
1906 dec_mm_counter(mm, mm_counter_file(folio));
1909 if (unlikely(folio_test_hugetlb(folio)))
1910 hugetlb_remove_rmap(folio);
1912 folio_remove_rmap_pte(folio, subpage, vma);
1915 folio_put(folio);
1928 static int folio_not_mapped(struct folio *folio)
1930 return !folio_mapped(folio);
1934 * try_to_unmap - Try to remove all page table mappings to a folio.
1935 * @folio: The folio to unmap.
1939 * folio. It is the caller's responsibility to check if the folio is
1942 * Context: Caller must hold the folio lock.
1944 void try_to_unmap(struct folio *folio, enum ttu_flags flags)
1954 rmap_walk_locked(folio, &rwc);
1956 rmap_walk(folio, &rwc);
1965 static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
1969 DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0);
1992 split_huge_pmd_address(vma, address, true, folio);
2005 if (folio_test_hugetlb(folio)) {
2022 subpage = folio_page(folio,
2023 pmd_pfn(*pvmw.pmd) - folio_pfn(folio));
2024 VM_BUG_ON_FOLIO(folio_test_hugetlb(folio) ||
2025 !folio_test_pmd_mappable(folio), folio);
2037 VM_BUG_ON_FOLIO(!pvmw.pte, folio);
2041 if (folio_is_zone_device(folio)) {
2052 VM_BUG_ON_FOLIO(folio_nr_pages(folio) > 1, folio);
2053 subpage = &folio->page;
2055 subpage = folio_page(folio, pfn - folio_pfn(folio));
2058 anon_exclusive = folio_test_anon(folio) &&
2061 if (folio_test_hugetlb(folio)) {
2062 bool anon = folio_test_anon(folio);
2118 * a remote CPU could still be writing to the folio.
2132 /* Set the dirty flag on the folio now the pte is gone. */
2134 folio_mark_dirty(folio);
2139 if (folio_is_device_private(folio)) {
2140 unsigned long pfn = folio_pfn(folio);
2145 WARN_ON_ONCE(folio_try_share_anon_rmap_pte(folio,
2172 folio_order(folio));
2179 if (folio_test_hugetlb(folio)) {
2180 hugetlb_count_sub(folio_nr_pages(folio), mm);
2184 dec_mm_counter(mm, mm_counter(folio));
2199 dec_mm_counter(mm, mm_counter(folio));
2205 if (folio_test_hugetlb(folio))
2214 VM_BUG_ON_PAGE(pte_write(pteval) && folio_test_anon(folio) &&
2218 if (folio_test_hugetlb(folio)) {
2220 hugetlb_try_share_anon_rmap(folio)) {
2228 folio_try_share_anon_rmap_pte(folio, subpage)) {
2258 if (folio_test_hugetlb(folio))
2264 folio_order(folio));
2271 if (unlikely(folio_test_hugetlb(folio)))
2272 hugetlb_remove_rmap(folio);
2274 folio_remove_rmap_pte(folio, subpage, vma);
2277 folio_put(folio);
2287 * @folio: the folio to replace page table entries for
2290 * Tries to remove all the page table entries which are mapping this folio and
2291 * replace them with special swap entries. Caller must hold the folio lock.
2293 void try_to_migrate(struct folio *folio, enum ttu_flags flags)
2310 if (folio_is_zone_device(folio) &&
2311 (!folio_is_device_private(folio) && !folio_is_device_coherent(folio)))
2322 if (!folio_test_ksm(folio) && folio_test_anon(folio))
2326 rmap_walk_locked(folio, &rwc);
2328 rmap_walk(folio, &rwc);
2339 static bool page_make_device_exclusive_one(struct folio *folio,
2343 DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0);
2355 address + folio_size(folio)),
2361 VM_BUG_ON_FOLIO(!pvmw.pte, folio);
2370 subpage = folio_page(folio,
2371 pte_pfn(ptent) - folio_pfn(folio));
2378 /* Set the dirty flag on the folio now the pte is gone. */
2380 folio_mark_dirty(folio);
2413 folio_remove_rmap_pte(folio, subpage, vma);
2422 * folio_make_device_exclusive - Mark the folio exclusively owned by a device.
2423 * @folio: The folio to replace page table entries for.
2424 * @mm: The mm_struct where the folio is expected to be mapped.
2425 * @address: Address where the folio is expected to be mapped.
2429 * folio and replace them with special device exclusive swap entries to
2430 * grant a device exclusive access to the folio.
2432 * Context: Caller must hold the folio lock.
2436 static bool folio_make_device_exclusive(struct folio *folio,
2456 if (!folio_test_anon(folio))
2459 rmap_walk(folio, &rwc);
2461 return args.valid && !folio_mapcount(folio);
2499 struct folio *folio = page_folio(pages[i]);
2500 if (PageTail(pages[i]) || !folio_trylock(folio)) {
2501 folio_put(folio);
2506 if (!folio_make_device_exclusive(folio, mm, start, owner)) {
2507 folio_unlock(folio);
2508 folio_put(folio);
2527 static struct anon_vma *rmap_walk_anon_lock(struct folio *folio,
2533 return rwc->anon_lock(folio, rwc);
2541 anon_vma = folio_anon_vma(folio);
2562 * @folio: the folio to be handled
2566 * Find all the mappings of a folio using the mapping pointer and the vma
2569 static void rmap_walk_anon(struct folio *folio,
2577 anon_vma = folio_anon_vma(folio);
2579 VM_BUG_ON_FOLIO(!anon_vma, folio);
2581 anon_vma = rmap_walk_anon_lock(folio, rwc);
2586 pgoff_start = folio_pgoff(folio);
2587 pgoff_end = pgoff_start + folio_nr_pages(folio) - 1;
2591 unsigned long address = vma_address(&folio->page, vma);
2599 if (!rwc->rmap_one(folio, vma, address, rwc->arg))
2601 if (rwc->done && rwc->done(folio))
2611 * @folio: the folio to be handled
2615 * Find all the mappings of a folio using the mapping pointer and the vma chains
2618 static void rmap_walk_file(struct folio *folio,
2621 struct address_space *mapping = folio_mapping(folio);
2631 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
2636 pgoff_start = folio_pgoff(folio);
2637 pgoff_end = pgoff_start + folio_nr_pages(folio) - 1;
2652 unsigned long address = vma_address(&folio->page, vma);
2660 if (!rwc->rmap_one(folio, vma, address, rwc->arg))
2662 if (rwc->done && rwc->done(folio))
2671 void rmap_walk(struct folio *folio, struct rmap_walk_control *rwc)
2673 if (unlikely(folio_test_ksm(folio)))
2674 rmap_walk_ksm(folio, rwc);
2675 else if (folio_test_anon(folio))
2676 rmap_walk_anon(folio, rwc, false);
2678 rmap_walk_file(folio, rwc, false);
2682 void rmap_walk_locked(struct folio *folio, struct rmap_walk_control *rwc)
2685 VM_BUG_ON_FOLIO(folio_test_ksm(folio), folio);
2686 if (folio_test_anon(folio))
2687 rmap_walk_anon(folio, rwc, true);
2689 rmap_walk_file(folio, rwc, true);
2698 void hugetlb_add_anon_rmap(struct folio *folio, struct vm_area_struct *vma,
2701 VM_WARN_ON_FOLIO(!folio_test_hugetlb(folio), folio);
2702 VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio);
2704 atomic_inc(&folio->_entire_mapcount);
2706 SetPageAnonExclusive(&folio->page);
2707 VM_WARN_ON_FOLIO(folio_entire_mapcount(folio) > 1 &&
2708 PageAnonExclusive(&folio->page), folio);
2711 void hugetlb_add_new_anon_rmap(struct folio *folio,
2714 VM_WARN_ON_FOLIO(!folio_test_hugetlb(folio), folio);
2718 atomic_set(&folio->_entire_mapcount, 0);
2719 folio_clear_hugetlb_restore_reserve(folio);
2720 __folio_set_anon(folio, vma, address, true);
2721 SetPageAnonExclusive(&folio->page);