/linux-master/mm/ |
H A D | memory-failure.c | 13 * Handles page cache pages in various states. The tricky part 14 * here is that we can access any page asynchronously in respect to 26 * - The case actually shows up as a frequent (top 10) page state in 27 * tools/mm/page-types when running a real workload. 41 #include <linux/page-flags.h> 148 * 1: the page is dissolved (if needed) and taken off from buddy, 149 * 0: the page is dissolved (if needed) and not taken off from buddy, 152 static int __page_handle_poison(struct page *page) argument 162 * Disabling pcp before dissolving the page wa 177 page_handle_poison(struct page *page, bool hugepage_or_freepage, bool release) argument 388 shake_page(struct page *page) argument 608 collect_procs_anon(struct folio *folio, struct page *page, struct list_head *to_kill, int force_early) argument 645 collect_procs_file(struct folio *folio, struct page *page, struct list_head *to_kill, int force_early) argument 693 collect_procs_fsdax(struct page *page, struct address_space *mapping, pgoff_t pgoff, struct list_head *to_kill, bool pre_remove) argument 727 collect_procs(struct folio *folio, struct page *page, struct list_head *tokill, int force_early) argument 1362 PageHWPoisonTakenOff(struct page *page) argument 1367 SetPageHWPoisonTakenOff(struct page *page) argument 1372 ClearPageHWPoisonTakenOff(struct page *page) argument 1384 HWPoisonHandlable(struct page *page, unsigned long flags) argument 1396 __get_hwpoison_page(struct page *page, unsigned long flags) argument 1493 __get_unpoison_page(struct page *page) argument 1691 try_to_split_thp_page(struct page *page) argument 1810 struct page *page; local 1850 struct page *page; member in struct:raw_hwp_page 1858 is_raw_hwpoison_page_in_hugepage(struct page *page) argument 1911 folio_set_hugetlb_hwpoison(struct folio *folio, struct page *page) argument 1996 struct page *page = pfn_to_page(pfn); local 2128 struct page *page; local 2666 soft_offline_in_use_page(struct page *page) argument 2765 struct page *page; local [all...] |
H A D | memcontrol.c | 18 * Native page reclaim 20 * Lockless page tracking & accounting 39 #include <linux/page-flags.h> 293 * This page will be uncharged in obj_cgroup_release(). 386 * page_cgroup_ino - return inode number of the memcg a page is charged to 387 * @page: the page 389 * Look up the closest online ancestor of the memory cgroup @page is charged to 390 * and return its inode number or 0 if @page is not charged to any cgroup. It 391 * is safe to call this function without holding a reference to @page 398 page_cgroup_ino(struct page *page) argument 3437 __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order) argument 3460 __memcg_kmem_uncharge_page(struct page *page, int order) argument 6107 struct page *page = vm_normal_page(vma, addr, ptent); local 6127 struct page *page = NULL; local 6328 struct page *page = NULL; local 6413 struct page *page = NULL; local [all...] |
H A D | madvise.c | 13 #include <linux/page-isolation.h> 527 * As a side effect, it makes confuse idle-page tracking 675 * If the pte has swp_entry, just clear page table to 677 * (page allocation + zeroing). 942 /* Populate (prefault) page tables readable/writable. */ 1128 struct page *page; local 1131 ret = get_user_pages_fast(start, 1, 0, &page); 1134 pfn = page_to_pfn(page); 1137 * When soft offlining hugepages, after migrating the page [all...] |
H A D | ksm.c | 76 * by their contents. Because each such page is write-protected, searching on 81 * mapping from a KSM page to virtual addresses that map this page. 90 * different KSM page copy of that content 114 * take 10 attempts to find a page in the unstable tree, once it is found, 115 * it is secured in the stable tree. (When we scan a new page, we first 150 * @node: rb node of this ksm page in the stable tree 154 * @hlist: hlist head of rmap_items using this ksm page 155 * @kpfn: page frame number of this ksm page (perhap 618 struct page *page = NULL; local 774 struct page *page; local 1092 page_stable_node(struct page *page) argument 1269 calc_checksum(struct page *page) argument 1371 replace_page(struct vm_area_struct *vma, struct page *page, struct page *kpage, pte_t orig_pte) argument 1413 VM_BUG_ON_PAGE(PageAnonExclusive(page), page); local 1476 try_to_merge_one_page(struct vm_area_struct *vma, struct page *page, struct page *kpage) argument 1541 try_to_merge_with_ksm_page(struct ksm_rmap_item *rmap_item, struct page *page, struct page *kpage) argument 1580 try_to_merge_two_pages(struct ksm_rmap_item *rmap_item, struct page *page, struct ksm_rmap_item *tree_rmap_item, struct page *tree_page) argument 1818 stable_tree_search(struct page *page) argument 2193 unstable_tree_search_insert(struct ksm_rmap_item *rmap_item, struct page *page, struct page **tree_pagep) argument 2307 cmp_and_merge_page(struct page *page, struct ksm_rmap_item *rmap_item) argument 2525 should_skip_rmap_item(struct page *page, struct ksm_rmap_item *rmap_item) argument 2568 scan_get_next_rmap_item(struct page **page) argument 2755 struct page *page; local 3061 struct page *page = folio_page(folio, 0); local 3181 collect_procs_ksm(struct folio *folio, struct page *page, struct list_head *to_kill, int force_early) argument [all...] |
H A D | kmemleak.c | 1646 * Struct page scanning for each node. 1655 struct page *page = pfn_to_online_page(pfn); local 1660 if (!page) 1664 if (page_zone(page) != zone) 1666 /* only scan if page is in use */ 1667 if (page_count(page) == 0) 1669 scan_block(page, page + 1, NULL);
|
H A D | khugepaged.c | 83 * it would have happened if the vma was large enough during page 367 * register it here without waiting a page fault that 547 struct page *page = NULL; local 577 page = vm_normal_page(vma, address, pteval); 578 if (unlikely(!page) || unlikely(is_zone_device_page(page))) { 583 folio = page_folio(page); 601 * Check if we have dealt with the compound page 612 * page ca 792 struct page *page = folio_page(folio, i); local 1244 struct page *page = NULL; local 1543 struct page *page; local 1595 struct page *page; local [all...] |
H A D | internal.h | 128 * @addr: The user virtual address the first page is mapped at. 130 * @pte: Page table entry for the first page. 147 * start_ptep must map any page of the folio. max_nr must be at least one and 148 * must be limited by the caller so scanning cannot exceed a single page table. 248 * cannot exceed a single page table. 360 * Turn a non-refcounted page (->_refcount == 0) into refcounted with 363 static inline void set_page_refcounted(struct page *page) 365 VM_BUG_ON_PAGE(PageTail(page), page); [all...] |
H A D | hwpoison-inject.c | 17 struct page *p; 35 * This implies unable to support non-LRU pages except free page. 43 * the targeted owner (or on a free page). 44 * memory_failure() will redo the check reliably inside page lock. 46 err = hwpoison_filter(&folio->page);
|
H A D | hugetlb_cgroup.c | 184 * page reference and test for page active here. This function 188 struct page *page) 194 struct folio *folio = page_folio(page); 205 nr_pages = compound_nr(page); 228 struct page *page; local 233 list_for_each_entry(page, &h->hugepage_activelist, lru) 234 hugetlb_cgroup_move_parent(hstate_index(h), h_cg, page); local 187 hugetlb_cgroup_move_parent(int idx, struct hugetlb_cgroup *h_cg, struct page *page) argument [all...] |
H A D | hugetlb_vmemmap.c | 23 * struct vmemmap_remap_walk - walk vmemmap page table 27 * @reuse_page: the page which is reused for the tail vmemmap pages. 28 * @reuse_addr: the virtual address of the @reuse_page page. 31 * @flags: used to modify behavior in vmemmap page table walking 38 struct page *reuse_page; 49 static int vmemmap_split_pmd(pmd_t *pmd, struct page *head, unsigned long start, 99 struct page *head; 112 * the vmemmap page associated with the first vmemmap page 123 struct page *pag local 185 free_vmemmap_page(struct page *page) argument 196 struct page *page, *next; local 210 struct page *page = pte_page(ptep_get(pte)); local 254 struct page *page; local 386 struct page *page, *next; local [all...] |
H A D | hugetlb.c | 40 #include <asm/page.h> 61 return cma_pages_valid(hugetlb_cma[folio_nid(folio)], &folio->page, 88 * Serializes faults on the same logical page. This is used to 214 * Return the number of global page reservations that must be dropped. 418 * However, the lock is also used to synchronize page 690 * Add the huge page range represented by [f, t) to the reserve 704 * 1 page will only require at most 1 entry. 928 * the reserve map region for a page. The huge page itself was free'ed 929 * and removed from the page cach 1568 struct page *page; local 3091 isolate_or_dissolve_huge_page(struct page *page, struct list_head *list) argument 3341 struct page *page = pfn_to_page(pfn); local 3406 struct page *page = virt_to_page(m); local 5294 make_huge_pte(struct vm_area_struct *vma, struct page *page, int writable) argument 5653 struct page *page; local 5864 unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma, struct page *page, unsigned long address) argument 6950 struct page *page = pfn_swap_entry_to_page(entry); local [all...] |
H A D | huge_memory.c | 56 * Defrag is invoked by khugepaged hugepage allocations and by page faults 114 /* khugepaged doesn't collapse DAX vma, but page fault is fine. */ 130 * Skip the check for page fault. Huge fault does the check in fault 187 * Allow page fault since anon_vma may be not initialized until 188 * the first page fault. 256 /* we can free zero page only if last reference remains */ 919 struct page *page, gfp_t gfp) 922 struct folio *folio = page_folio(page); 945 clear_huge_page(page, vm 918 __do_huge_pmd_anonymous_page(struct vm_fault *vmf, struct page *page, gfp_t gfp) argument 1306 struct page *page; local 1532 struct page *page; local 1617 struct page *page; local 1846 struct page *page = pmd_page(orig_pmd); local 2369 struct page *page; local 2824 __split_huge_page(struct page *page, struct list_head *list, pgoff_t end, unsigned int new_order) argument 2992 split_huge_page_to_list_to_order(struct page *page, struct list_head *list, unsigned int new_order) argument 3328 struct page *page; local 3423 struct page *page; local 3629 set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw, struct page *page) argument [all...] |
H A D | gup.c | 33 static inline void sanity_check_pinned_pages(struct page **pages, 42 * stick around until the page is freed. 47 * THP we can assume that either the given page (PTE-mapped THP) or 48 * the head page (PMD-mapped THP) should be PageAnonExclusive(). If 52 struct page *page = *pages; local 53 struct folio *folio = page_folio(page); 55 if (is_zero_page(page) || 59 VM_BUG_ON_PAGE(!PageAnonExclusive(&folio->page), page); 71 try_get_folio(struct page *page, int refs) argument 126 try_grab_folio(struct page *page, int refs, unsigned int flags) argument 225 try_grab_page(struct page *page, unsigned int flags) argument 272 unpin_user_page(struct page *page) argument 424 unpin_user_page_range_dirty_lock(struct page *page, unsigned long npages, bool make_dirty) argument 505 record_subpages(struct page *page, unsigned long sz, unsigned long addr, unsigned long end, struct page **pages) argument 541 struct page *page; local 614 struct page *page; local 691 struct page *page; local 742 can_follow_write_pmd(pmd_t pmd, struct page *page, struct vm_area_struct *vma, unsigned int flags) argument 786 struct page *page; local 863 can_follow_write_pte(pte_t pte, struct page *page, struct vm_area_struct *vma, unsigned int flags) argument 905 struct page *page; local 1019 struct page *page; local 1074 struct page *page; local 1149 struct page *page; local 1173 struct page *page; local 1191 get_gate_page(struct mm_struct *mm, unsigned long address, unsigned int gup_flags, struct vm_area_struct **vma, struct page **page) argument 1535 struct page *page; local 2371 struct page *page; local 2934 struct page *page; local 3039 struct page *page = pfn_to_page(pfn); local 3124 struct page *page; local 3168 struct page *page; local 3214 struct page *page; local [all...] |
H A D | folio-compat.c | 13 struct address_space *page_mapping(struct page *page) argument 15 return folio_mapping(page_folio(page)); 19 void unlock_page(struct page *page) argument 21 return folio_unlock(page_folio(page)); 25 void end_page_writeback(struct page *page) argument 27 return folio_end_writeback(page_folio(page)); 31 void wait_on_page_writeback(struct page *pag argument 37 wait_for_stable_page(struct page *page) argument 43 mark_page_accessed(struct page *page) argument 49 set_page_writeback(struct page *page) argument 55 set_page_dirty(struct page *page) argument 61 clear_page_dirty_for_io(struct page *page) argument 67 redirty_page_for_writepage(struct writeback_control *wbc, struct page *page) argument 74 add_to_page_cache_lru(struct page *page, struct address_space *mapping, pgoff_t index, gfp_t gfp) argument 102 isolate_lru_page(struct page *page) argument 109 putback_lru_page(struct page *page) argument [all...] |
H A D | filemap.c | 71 * finished 'unifying' the page and buffer cache and SMP-threaded the 72 * page-cache, 21.05.1999, Ingo Molnar <mingo@redhat.com> 153 /* Leave page->index set: truncation lookup relies upon it */ 164 pr_alert("BUG: Bad page cache in process %s pfn:%05lx\n", 166 dump_page(&folio->page, "still mapped when deleted"); 176 * a good bet that actually the page is unmapped 178 * another bad page check should catch it later. 180 page_mapcount_reset(&folio->page); 186 /* hugetlb folios do not participate in page cache accounting. */ 208 * occur when a driver which did get_user_pages() sets page dirt 2816 struct page *page; local 3445 struct page *page = folio_file_page(folio, start); local 3511 struct page *page = folio_page(folio, start); local 3573 struct page *page = &folio->page; local 3988 struct page *page; local [all...] |
H A D | debug_vm_pgtable.c | 3 * This kernel test validates architecture page table helpers and 44 * On s390 platform, the lower 4 bits are used to identify given page table 46 * pxx_clear() because of how dynamic page table folding works on s390. So 102 * This test needs to be executed after the given page table entry 123 struct page *page; local 132 * PG_arch_1 for the page on ARM64. The page flag isn't cleared 133 * when it's released and page allocation check will fail when 134 * the page i 221 struct page *page; local 343 struct page *page; local 619 struct page *page; local 893 struct page *page; local 934 struct page *page; local 1031 struct page *page = NULL; local 1101 struct page *page = NULL; local 1198 struct page *page = NULL; local [all...] |
H A D | debug_page_alloc.c | 3 #include <linux/page-isolation.h> 35 bool __set_page_guard(struct zone *zone, struct page *page, unsigned int order) argument 40 __SetPageGuard(page); 41 INIT_LIST_HEAD(&page->buddy_list); 42 set_page_private(page, order); 47 void __clear_page_guard(struct zone *zone, struct page *page, unsigned int order) argument 49 __ClearPageGuard(page); 50 set_page_private(page, [all...] |
H A D | debug.c | 54 static void __dump_folio(struct folio *folio, struct page *page, argument 58 int mapcount = atomic_read(&page->_mapcount); 62 pr_warn("page: refcount:%d mapcount:%d mapping:%p index:%#lx pfn:%#lx\n", 94 if (page_has_type(&folio->page)) 95 pr_warn("page_type: %pGt\n", &folio->page.page_type); 98 sizeof(unsigned long), page, 99 sizeof(struct page), false); 103 2 * sizeof(struct page), false); 106 static void __dump_page(const struct page *pag argument 145 dump_page(const struct page *page, const char *reason) argument 271 page_init_poison(struct page *page, size_t size) argument [all...] |
H A D | compaction.c | 6 * this heavily depends upon page migration to do all the real heavy 20 #include <linux/page-isolation.h> 85 struct page *page, *next; local 89 list_for_each_entry_safe(page, next, &freepages[order], lru) { 92 list_del(&page->lru); 96 post_alloc_hook(page, order, __GFP_MOVABLE); 98 split_page(page, order); 101 list_add(&page->lru, &tmp_list); 102 page 115 struct page *page, *next; local 135 PageMovable(struct page *page) argument 150 __SetPageMovable(struct page *page, const struct movable_operations *mops) argument 158 __ClearPageMovable(struct page *page) argument 239 isolation_suitable(struct compact_control *cc, struct page *page) argument 313 pageblock_skip_persistent(struct page *page) argument 330 struct page *page = pfn_to_online_page(pfn); local 470 test_and_set_skip(struct compact_control *cc, struct page *page) argument 507 update_pageblock_skip(struct compact_control *cc, struct page *page, unsigned long pfn) argument 521 isolation_suitable(struct compact_control *cc, struct page *page) argument 527 pageblock_skip_persistent(struct page *page) argument 532 update_pageblock_skip(struct compact_control *cc, struct page *page, unsigned long pfn) argument 541 test_and_set_skip(struct compact_control *cc, struct page *page) argument 615 struct page *page; local 900 struct page *page = NULL, *valid_page = NULL; local 1416 suitable_migration_source(struct compact_control *cc, struct page *page) argument 1436 suitable_migration_target(struct compact_control *cc, struct page *page) argument 1518 struct page *page; local 1568 struct page *page = NULL; local 1737 struct page *page; local 1915 struct page *page = &dst->page; local 2106 struct page *page; local [all...] |
H A D | cma.c | 416 struct page *cma_alloc(struct cma *cma, unsigned long count, 424 struct page *page = NULL; local 431 return page; 437 return page; 445 return page; 470 page = pfn_to_page(pfn); 488 * CMA can allocate multiple page blocks, which results in different 490 * those page blocks. 492 if (page) { [all...] |
H A D | Makefile | 47 maccess.o page-writeback.o folio-compat.o \ 56 page-alloc-y := page_alloc.o 57 page-alloc-$(CONFIG_SHUFFLE_PAGE_ALLOCATOR) += shuffle.o 62 obj-y += page-alloc.o
|
/linux-master/mm/kfence/ |
H A D | core.c | 263 * This metadata object only ever maps to 1 page; verify that the stored 433 /* Unprotect if we're reusing this page. */ 507 /* Detect racy use-after-free, or incorrect reallocation of this page by KFENCE. */ 515 /* Restore page protection if there was an OOB access. */ 533 * Clear memory if init-on-free is set. While we protect the page, the 535 * unprotect the page, so the data is still accessible. 574 struct page *pages; 605 * Protect the first 2 pages. The first page is mostly unnecessary, and 606 * merely serves as an extended guard page. However, adding one 607 * additional page i [all...] |
/linux-master/mm/kasan/ |
H A D | hw_tags.c | 173 /* kasan.page_alloc.sample.order=<minimum page order> */ 286 struct page *page = area->pages[i]; local 288 page_kasan_tag_set(page, tag); 297 struct page *page = vmalloc_to_page(addr); local 299 clear_highpage_kasan_tagged(page); 355 * Explicitly poison and initialize the in-page vmalloc() redzone. 366 * Set per-page tag flags to allow accessing physical memory for the
|
/linux-master/lib/ |
H A D | test_hmm.c | 41 * For device_private pages, dpage is just a dummy struct page 43 * allocates a real system memory page as backing storage to fake a 44 * real device. zone_device_data points to that backing page. But 45 * for device_coherent memory, the struct page represents real 48 #define BACKING_PAGE(page) (is_device_private_page((page)) ? \ 49 (page)->zone_device_data : (page)) 121 struct page *free_pages; 196 static struct dmirror_chunk *dmirror_page_to_chunk(struct page *pag argument 201 dmirror_page_to_device(struct page *page) argument 215 struct page *page; local 370 struct page *page; local 436 struct page *page; local 575 struct page *page = pfn_to_page(pfn); local 1042 struct page *page; local 1261 struct page *page; local 1375 struct page *page; local 1402 dmirror_devmem_free(struct page *page) argument [all...] |
H A D | buildid.c | 61 /* only supports note that fits in the first page */ 76 /* only supports phdr that fits in one page */ 101 /* only supports phdr that fits in one page */ 130 struct page *page; local 134 /* only works for page backed storage */ 138 page = find_get_page(vma->vm_file->f_mapping, 0); 139 if (!page) 140 return -EFAULT; /* page not mapped */ 143 page_addr = kmap_local_page(page); [all...] |