Lines Matching defs:page

372  * page is divided by the number of processes sharing it.  So if a
380 * A shift of 12 before division means (assuming 4K page size):
443 static void smaps_account(struct mem_size_stats *mss, struct page *page,
447 struct folio *folio = page_folio(page);
448 int i, nr = compound ? compound_nr(page) : 1;
453 * of the compound page.
472 * differ page-by-page.
474 * refcount == 1 guarantees the page is mapped exactly once.
475 * If any subpage of the compound page mapped with PTE it would elevate
479 * Without holding the page lock this snapshot can be slightly wrong as
481 * call page_mapcount() even with PTL held if the page is not mapped,
490 for (i = 0; i < nr; i++, page++) {
491 int mapcount = page_mapcount(page);
533 struct page *page = NULL;
538 page = vm_normal_page(vma, addr, ptent);
560 page = pfn_swap_entry_to_page(swpent);
567 if (!page)
570 smaps_account(mss, page, false, young, dirty, locked, migration);
580 struct page *page = NULL;
585 page = vm_normal_page_pmd(vma, addr, *pmd);
591 page = pfn_swap_entry_to_page(entry);
594 if (IS_ERR_OR_NULL(page))
596 folio = page_folio(page);
606 smaps_account(mss, page, true, pmd_young(*pmd), pmd_dirty(*pmd),
792 * object, so we have to distinguish them during the page walk.
1086 struct page *page;
1094 page = vm_normal_page(vma, addr, pte);
1095 if (!page)
1097 return page_maybe_dma_pinned(page);
1412 struct page *page = NULL;
1419 page = vm_normal_page(vma, addr, pte);
1447 page = pfn_swap_entry_to_page(entry);
1452 if (page && !PageAnon(page))
1454 if (page && !migration && page_mapcount(page) == 1)
1477 struct page *page = NULL;
1483 page = pmd_page(pmd);
1516 page = pfn_swap_entry_to_page(entry);
1520 if (page && !migration && page_mapcount(page) == 1)
1630 * For each page in the address space, this file contains one 64-bit entry
1633 * Bits 0-54 page frame number (PFN) if present
1637 * Bit 56 page exclusively mapped
1640 * Bit 61 page is file-page or shared-anon
1641 * Bit 62 page swapped
1642 * Bit 63 page present
1644 * If the page is not present but in swap, then the PFN contains an
1645 * encoding of the swap file number and the page's offset into the
1789 struct page *page;
1796 page = vm_normal_page(vma, addr, pte);
1797 if (page && !PageAnon(page))
1851 struct page *page;
1858 page = vm_normal_page_pmd(vma, addr, pmd);
1859 if (page && !PageAnon(page))
1910 * page cannot be swapped. So PAGE_IS_FILE is not checked for
2132 * Break huge page into small pages if the WP operation
2133 * needs to be performed on a portion of the huge page.
2295 /* Partial HugeTLB page WP isn't possible. */
2557 static void gather_stats(struct page *page, struct numa_maps *md, int pte_dirty,
2560 struct folio *folio = page_folio(page);
2561 int count = page_mapcount(page);
2585 static struct page *can_gather_numa_stats(pte_t pte, struct vm_area_struct *vma,
2588 struct page *page;
2594 page = vm_normal_page(vma, addr, pte);
2595 if (!page || is_zone_device_page(page))
2598 if (PageReserved(page))
2601 nid = page_to_nid(page);
2605 return page;
2609 static struct page *can_gather_numa_stats_pmd(pmd_t pmd,
2613 struct page *page;
2619 page = vm_normal_page_pmd(vma, addr, pmd);
2620 if (!page)
2623 if (PageReserved(page))
2626 nid = page_to_nid(page);
2630 return page;
2646 struct page *page;
2648 page = can_gather_numa_stats_pmd(*pmd, vma, addr);
2649 if (page)
2650 gather_stats(page, md, pmd_dirty(*pmd),
2663 struct page *page = can_gather_numa_stats(ptent, vma, addr);
2664 if (!page)
2666 gather_stats(page, md, pte_dirty(ptent), 1);
2679 struct page *page;
2684 page = pte_page(huge_pte);
2687 gather_stats(page, md, pte_dirty(huge_pte), 1);