/linux-master/mm/ |
H A D | page_vma_mapped.c | 10 static inline bool not_found(struct page_vma_mapped_walk *pvmw) argument 12 page_vma_mapped_walk_done(pvmw); 16 static bool map_pte(struct page_vma_mapped_walk *pvmw, spinlock_t **ptlp) argument 20 if (pvmw->flags & PVMW_SYNC) { 22 pvmw->pte = pte_offset_map_lock(pvmw->vma->vm_mm, pvmw->pmd, 23 pvmw->address, &pvmw->ptl); 24 *ptlp = pvmw 96 check_pte(struct page_vma_mapped_walk *pvmw) argument 133 check_pmd(unsigned long pfn, struct page_vma_mapped_walk *pvmw) argument 142 step_forward(struct page_vma_mapped_walk *pvmw, unsigned long size) argument 173 page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw) argument 328 struct page_vma_mapped_walk pvmw = { local [all...] |
H A D | rmap.c | 839 DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0); 843 while (page_vma_mapped_walk(&pvmw)) { 844 address = pvmw.address; 847 if (!folio_test_large(folio) || !pvmw.pte) { 850 page_vma_mapped_walk_done(&pvmw); 856 * be handled after the pvmw loop. 870 if (pvmw.pte) { 872 pte_young(ptep_get(pvmw.pte))) { 873 lru_gen_look_around(&pvmw); 878 pvmw 999 page_vma_mkclean_one(struct page_vma_mapped_walk *pvmw) argument 1120 struct page_vma_mapped_walk pvmw = { local [all...] |
H A D | page_idle.c | 56 DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, addr, 0); 59 while (page_vma_mapped_walk(&pvmw)) { 60 addr = pvmw.address; 61 if (pvmw.pte) { 66 if (ptep_clear_young_notify(vma, addr, pvmw.pte)) 69 if (pmdp_clear_young_notify(vma, addr, pvmw.pmd))
|
H A D | migrate.c | 186 DEFINE_FOLIO_VMA_WALK(pvmw, old, vma, addr, PVMW_SYNC | PVMW_MIGRATION); 188 while (page_vma_mapped_walk(&pvmw)) { 198 idx = linear_page_index(vma, pvmw.address) - pvmw.pgoff; 203 if (!pvmw.pte) { 206 remove_migration_pmd(&pvmw, new); 213 old_pte = ptep_get(pvmw.pte); 255 hugetlb_add_anon_rmap(folio, vma, pvmw.address, 259 set_huge_pte_at(vma->vm_mm, pvmw.address, pvmw [all...] |
H A D | ksm.c | 1282 DEFINE_PAGE_VMA_WALK(pvmw, page, vma, 0, 0); 1289 pvmw.address = page_address_in_vma(page, vma); 1290 if (pvmw.address == -EFAULT) 1295 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, pvmw.address, 1296 pvmw.address + PAGE_SIZE); 1299 if (!page_vma_mapped_walk(&pvmw)) 1301 if (WARN_ONCE(!pvmw.pte, "Unexpected PMD mapping?")) 1305 entry = ptep_get(pvmw.pte); 1309 flush_cache_page(vma, pvmw.address, page_to_pfn(page)); 1324 entry = ptep_clear_flush(vma, pvmw [all...] |
H A D | internal.h | 833 static inline unsigned long vma_address_end(struct page_vma_mapped_walk *pvmw) argument 835 struct vm_area_struct *vma = pvmw->vma; 840 if (pvmw->nr_pages == 1) 841 return pvmw->address + PAGE_SIZE; 843 pgoff = pvmw->pgoff + pvmw->nr_pages;
|
H A D | huge_memory.c | 3673 int set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw, argument 3677 struct vm_area_struct *vma = pvmw->vma; 3679 unsigned long address = pvmw->address; 3685 if (!(pvmw->pmd && !pvmw->pte)) 3689 pmdval = pmdp_invalidate(vma, address, pvmw->pmd); 3694 set_pmd_at(mm, address, pvmw->pmd, pmdval); 3715 set_pmd_at(mm, address, pvmw->pmd, pmdswp); 3723 void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new) argument 3726 struct vm_area_struct *vma = pvmw [all...] |
H A D | vmscan.c | 3988 void lru_gen_look_around(struct page_vma_mapped_walk *pvmw) argument 3995 pte_t *pte = pvmw->pte; 3996 unsigned long addr = pvmw->address; 3997 struct vm_area_struct *vma = pvmw->vma; 3998 struct folio *folio = pfn_folio(pvmw->pfn); 4007 lockdep_assert_held(pvmw->ptl); 4010 if (spin_is_contended(pvmw->ptl)) 4087 update_bloom_filter(mm_state, max_seq, pvmw->pmd);
|
/linux-master/mm/damon/ |
H A D | paddr.c | 22 DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, addr, 0); 24 while (page_vma_mapped_walk(&pvmw)) { 25 addr = pvmw.address; 26 if (pvmw.pte) 27 damon_ptep_mkold(pvmw.pte, vma, addr); 29 damon_pmdp_mkold(pvmw.pmd, vma, addr); 86 DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, addr, 0); 89 while (page_vma_mapped_walk(&pvmw)) { 90 addr = pvmw.address; 91 if (pvmw [all...] |
/linux-master/include/linux/ |
H A D | rmap.h | 672 static inline void page_vma_mapped_walk_done(struct page_vma_mapped_walk *pvmw) argument 675 if (pvmw->pte && !is_vm_hugetlb_page(pvmw->vma)) 676 pte_unmap(pvmw->pte); 677 if (pvmw->ptl) 678 spin_unlock(pvmw->ptl); 681 bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw);
|
H A D | swapops.h | 531 extern int set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw, 534 extern void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, 564 static inline int set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw, 570 static inline void remove_migration_pmd(struct page_vma_mapped_walk *pvmw,
|
H A D | mmzone.h | 557 void lru_gen_look_around(struct page_vma_mapped_walk *pvmw); 576 static inline void lru_gen_look_around(struct page_vma_mapped_walk *pvmw) argument
|
/linux-master/kernel/events/ |
H A D | uprobes.c | 159 DEFINE_FOLIO_VMA_WALK(pvmw, old_folio, vma, addr, 0); 178 if (!page_vma_mapped_walk(&pvmw)) 180 VM_BUG_ON_PAGE(addr != pvmw.address, old_page); 195 flush_cache_page(vma, addr, pte_pfn(ptep_get(pvmw.pte))); 196 ptep_clear_flush(vma, addr, pvmw.pte); 198 set_pte_at_notify(mm, addr, pvmw.pte, 204 page_vma_mapped_walk_done(&pvmw);
|