Lines Matching refs:page

76  * by their contents.  Because each such page is write-protected, searching on
81 * mapping from a KSM page to virtual addresses that map this page.
90 * different KSM page copy of that content
114 * take 10 attempts to find a page in the unstable tree, once it is found,
115 * it is secured in the stable tree. (When we scan a new page, we first
150 * @node: rb node of this ksm page in the stable tree
154 * @hlist: hlist head of rmap_items using this ksm page
155 * @kpfn: page frame number of this ksm page (perhaps temporarily on wrong nid)
192 * @nid: NUMA node id of unstable tree in which linked (may not match page)
195 * @oldchecksum: previous checksum of the page at that virtual address
261 /* The number of page slots additionally sharing those nodes */
279 /* Maximum number of page slots sharing a stable node */
288 /* Checksum of an empty (zeroed) page */
604 * page tables after it has passed through ksm_exit() - which, if necessary,
618 struct page *page = NULL;
629 page = vm_normal_page(walk->vma, addr, ptent);
638 page = pfn_swap_entry_to_page(entry);
640 /* return 1 if the page is an normal ksm page or KSM-placed zero page */
641 ret = (page && PageKsm(page)) || is_ksm_zero_pte(ptent);
657 * We use break_ksm to break COW on a ksm page by triggering unsharing,
658 * such that the ksm page will get replaced by an exclusive anonymous page.
660 * We take great care only to touch a ksm page, in a VM_MERGEABLE vma,
662 * Could a ksm page appear anywhere else? Actually yes, in a VM_PFNMAP
689 * We must loop until we no longer find a KSM page because
709 * will retry to break_cow on each pass, so should recover the page
769 static struct page *get_mergeable_page(struct ksm_rmap_item *rmap_item)
774 struct page *page;
781 page = follow_page(vma, addr, FOLL_GET);
782 if (IS_ERR_OR_NULL(page))
784 if (is_zone_device_page(page))
786 if (PageAnon(page)) {
787 flush_anon_page(vma, page, addr);
788 flush_dcache_page(page);
791 put_page(page);
793 page = NULL;
796 return page;
900 * ksm_get_folio: checks if the page indicated by the stable node
901 * is still its ksm page, despite having held no reference to it.
902 * In which case we can trust the content of the page, and it
903 * returns the gotten page; but if the page has now been zapped,
905 * But beware, the stable node's page might be being migrated.
907 * You would expect the stable_node to hold a reference to the ksm page.
908 * But if it increments the page's count, swapping out has to wait for
909 * ksmd to come around again before it can free the page, which may take
911 * "keyhole reference": access to the ksm page from the stable node peeps
912 * out through its keyhole to see if that page still holds the right key,
914 * page to reset its page->mapping to NULL, and relies on no other use of
915 * a page to put something that might look like our key in page->mapping.
934 * We cannot do anything with the page while its refcount is 0.
935 * Usually 0 means free, or tail of a higher-order page: in which
937 * however, it might mean that the page is under page_ref_freeze().
939 * the same is in reuse_ksm_page() case; but if page is swapcache
940 * in folio_migrate_mapping(), it might still be our page,
945 * Another check for page->mapping != expected_mapping would
947 * optimize the common case, when the page is or is about to
981 * We come here from above when page->mapping or !PageSwapCache
1061 * page and upping mmap_lock. Nor does it fit with the way we skip dup'ing
1092 static inline struct ksm_stable_node *page_stable_node(struct page *page)
1094 return folio_stable_node(page_folio(page));
1100 VM_WARN_ON_FOLIO(folio_test_anon(folio) && PageAnonExclusive(&folio->page), folio);
1269 static u32 calc_checksum(struct page *page)
1272 void *addr = kmap_local_page(page);
1292 pvmw.address = page_address_in_vma(&folio->page, vma);
1305 anon_exclusive = PageAnonExclusive(&folio->page);
1320 * No need to notify as we are downgrading page table to read
1321 * only not changing it to point to a new page.
1328 * page
1337 folio_try_share_anon_rmap_pte(folio, &folio->page)) {
1363 * replace_page - replace page in vma by new ksm page
1364 * @vma: vma that holds the pte pointing to page
1365 * @page: the page we are replacing by kpage
1366 * @kpage: the ksm page we replace page by
1371 static int replace_page(struct vm_area_struct *vma, struct page *page,
1372 struct page *kpage, pte_t orig_pte)
1386 addr = page_address_in_vma(page, vma);
1413 VM_BUG_ON_PAGE(PageAnonExclusive(page), page);
1427 * Use pte_mkdirty to mark the zero page mapped by KSM, and then
1429 * the dirty bit in zero page's PTE is set.
1435 * We're replacing an anonymous page with a zero page, which is
1445 * No need to notify as we are replacing a read only page with another
1446 * read only page with the same content.
1453 folio = page_folio(page);
1454 folio_remove_rmap_pte(folio, page, vma);
1469 * @vma: the vma that holds the pte pointing to page
1470 * @page: the PageAnon page that we want to replace with kpage
1471 * @kpage: the PageKsm page that we want to map instead of page,
1472 * or NULL the first time when we want to use page as kpage.
1477 struct page *page, struct page *kpage)
1482 if (page == kpage) /* ksm page forked */
1485 if (!PageAnon(page))
1489 * We need the page lock to read a stable PageSwapCache in
1493 * then come back to this page when it is unlocked.
1495 if (!trylock_page(page))
1498 if (PageTransCompound(page)) {
1499 if (split_huge_page(page))
1504 * If this anonymous page is mapped only here, its pte may need
1509 if (write_protect_page(vma, page_folio(page), &orig_pte) == 0) {
1512 * While we hold page lock, upgrade page from
1516 folio_set_stable_node(page_folio(page), NULL);
1517 mark_page_accessed(page);
1519 * Page reclaim just frees a clean page with no dirty
1520 * ptes: make sure that the ksm page would be swapped.
1522 if (!PageDirty(page))
1523 SetPageDirty(page);
1525 } else if (pages_identical(page, kpage))
1526 err = replace_page(vma, page, kpage, orig_pte);
1530 unlock_page(page);
1537 * but no new kernel page is allocated: kpage must already be a ksm page.
1542 struct page *page, struct page *kpage)
1553 err = try_to_merge_one_page(vma, page, kpage);
1565 trace_ksm_merge_with_ksm_page(kpage, page_to_pfn(kpage ? kpage : page),
1572 * to be merged into one page.
1575 * pages into one ksm page, NULL otherwise.
1577 * Note that this function upgrades page to ksm page: if one of the pages
1578 * is already a ksm page, try_to_merge_with_ksm_page should be used.
1580 static struct page *try_to_merge_two_pages(struct ksm_rmap_item *rmap_item,
1581 struct page *page,
1583 struct page *tree_page)
1587 err = try_to_merge_with_ksm_page(rmap_item, page, NULL);
1590 tree_page, page);
1592 * If that fails, we have a ksm page with only one pte
1598 return err ? NULL : page;
1810 * stable_tree_search - search for page inside the stable tree
1812 * This function checks if there is a page inside the stable tree
1813 * with identical content to the page that we are scanning right now.
1818 static struct page *stable_tree_search(struct page *page)
1828 folio = page_folio(page);
1831 /* ksm page forked */
1833 return &folio->page;
1875 * Take any of the stable_node dups page of
1900 ret = memcmp_pages(page, &tree_folio->page);
1929 * page in any of the existing
1932 * scanned page to find itself a match
1934 * brand new KSM page to add later to
1941 * Lock and unlock the stable_node's page (which
1942 * might already have been migrated) so that page
1966 return &tree_folio->page;
1980 return &folio->page;
2030 return &folio->page;
2055 * of the current nid for this page
2067 * stable_tree_insert - insert stable tree node pointing to new ksm page
2111 * Take any of the stable_node dups page of
2136 ret = memcmp_pages(&kfolio->page, &tree_folio->page);
2179 * unstable_tree_search_insert - search for identical page,
2182 * This function searches for a page in the unstable tree identical to the
2183 * page currently being scanned; and if no identical page is found in the
2187 * to the currently scanned page, NULL otherwise.
2194 struct page *page,
2195 struct page **tree_pagep)
2202 nid = get_kpfn_nid(page_to_pfn(page));
2208 struct page *tree_page;
2218 * Don't substitute a ksm page for a forked page.
2220 if (page == tree_page) {
2225 ret = memcmp_pages(page, tree_page);
2262 * the same ksm page.
2299 * cmp_and_merge_page - first see if page can be merged into the stable tree;
2300 * if not, compare checksum to previous and if it's the same, see if page can
2301 * be inserted into the unstable tree, or merged with a page already there and
2304 * @page: the page that we are searching identical page to.
2305 * @rmap_item: the reverse mapping into the virtual address of this page
2307 static void cmp_and_merge_page(struct page *page, struct ksm_rmap_item *rmap_item)
2311 struct page *tree_page = NULL;
2313 struct page *kpage;
2318 stable_node = page_stable_node(page);
2338 /* We first start with searching the page inside the stable tree */
2339 kpage = stable_tree_search(page);
2340 if (kpage == page && rmap_item->head == stable_node) {
2351 err = try_to_merge_with_ksm_page(rmap_item, page, kpage);
2354 * The page was successfully merged:
2367 * If the hash value of the page has changed from the last time
2368 * we calculated it, this page is changing frequently: therefore we
2372 checksum = calc_checksum(page);
2379 * Same checksum as an empty page. We attempt to merge it with the
2380 * appropriate zero page if the user enabled this via sysfs.
2388 err = try_to_merge_one_page(vma, page,
2402 * In case of failure, the page was not really empty, so we
2409 unstable_tree_search_insert(rmap_item, page, &tree_page);
2413 kpage = try_to_merge_two_pages(rmap_item, page,
2417 * page, then we actually ended up increasing the reference
2418 * count of the same compound page twice, and split_huge_page
2425 split = PageTransCompound(page)
2426 && compound_head(page) == compound_head(tree_page);
2444 * If we fail to insert the page into the stable tree,
2446 * to a ksm page left outside the stable tree,
2457 * compound page. We will split the page now, but no
2460 * the page is locked, it is better to skip it and
2463 if (!trylock_page(page))
2465 split_huge_page(page);
2466 unlock_page(page);
2501 * Calculate skip age for the ksm page age. The age determines how often
2503 * smaller, the scanning of this page is skipped for less scans.
2505 * @age: rmap_item age of page
2520 * Determines if a page should be skipped for the current scan.
2522 * @page: page to check
2523 * @rmap_item: associated rmap_item of page
2525 static bool should_skip_rmap_item(struct page *page,
2538 if (PageKsm(page))
2561 /* Skip this page */
2568 static struct ksm_rmap_item *scan_get_next_rmap_item(struct page **page)
2588 * LRU cache, raised page count preventing write_protect_page
2657 *page = follow_page(vma, ksm_scan.address, FOLL_GET);
2658 if (IS_ERR_OR_NULL(*page)) {
2663 if (is_zone_device_page(*page))
2665 if (PageAnon(*page)) {
2666 flush_anon_page(vma, *page, ksm_scan.address);
2667 flush_dcache_page(*page);
2674 if (should_skip_rmap_item(*page, rmap_item))
2679 put_page(*page);
2684 put_page(*page);
2755 struct page *page;
2760 rmap_item = scan_get_next_rmap_item(&page);
2763 cmp_and_merge_page(page, rmap_item);
2764 put_page(page);
3061 struct page *page = folio_page(folio, 0);
3078 if (PageHWPoison(page))
3090 if (copy_mc_user_highpage(folio_page(new_folio, 0), page,
3116 * Rely on the page lock to protect against concurrent modifications
3117 * to that page's node of the stable tree.
3179 * Collect processes when the error hit an ksm page.
3181 void collect_procs_ksm(struct folio *folio, struct page *page,
3210 add_to_kill_ksm(t, page, vma, to_kill,
3264 * Don't ksm_get_folio, page has already gone:
3265 * which is why we keep kpfn instead of page*
3349 * Most of the work is done by page migration; but there might
3353 * non-existent struct page.
3588 * When a KSM page is created it is shared by 2 mappings. This
3917 /* The correct value depends on page size and endianness */