/linux-master/mm/ |
H A D | swapfile.c | 1935 struct vm_fault vmf = { local 1943 &vmf);
|
H A D | swap_state.c | 749 static void swap_ra_info(struct vm_fault *vmf, argument 752 struct vm_area_struct *vma = vmf->vma; 765 faddr = vmf->address; 805 * @vmf: fault information 812 * Caller must hold read mmap_lock if vmf->vma is not NULL. 816 struct mempolicy *mpol, pgoff_t targ_ilx, struct vm_fault *vmf) 831 swap_ra_info(vmf, &ra_info); 835 addr = vmf->address - (ra_info.offset * PAGE_SIZE); 841 pte = pte_offset_map(vmf->pmd, addr); 886 * @vmf 815 swap_vma_readahead(swp_entry_t targ_entry, gfp_t gfp_mask, struct mempolicy *mpol, pgoff_t targ_ilx, struct vm_fault *vmf) argument 894 swapin_readahead(swp_entry_t entry, gfp_t gfp_mask, struct vm_fault *vmf) argument [all...] |
H A D | shmem.c | 1957 * vmf and fault_type are only supplied by shmem_fault: otherwise they are NULL. 1961 struct vm_fault *vmf, vm_fault_t *fault_type) 1963 struct vm_area_struct *vma = vmf ? vmf->vma : NULL; 1986 *fault_type = handle_userfault(vmf, VM_UFFD_MINOR); 2035 *fault_type = handle_userfault(vmf, VM_UFFD_MISSING); 2195 static vm_fault_t shmem_falloc_wait(struct vm_fault *vmf, struct inode *inode) argument 2205 vmf->pgoff >= shmem_falloc->start && 2206 vmf->pgoff < shmem_falloc->next) { 2211 fpin = maybe_unlock_mmap_for_io(vmf, NUL 1959 shmem_get_folio_gfp(struct inode *inode, pgoff_t index, struct folio **foliop, enum sgp_type sgp, gfp_t gfp, struct vm_fault *vmf, vm_fault_t *fault_type) argument 2236 shmem_fault(struct vm_fault *vmf) argument [all...] |
H A D | nommu.c | 1618 vm_fault_t filemap_fault(struct vm_fault *vmf) argument 1625 vm_fault_t filemap_map_pages(struct vm_fault *vmf, argument
|
H A D | mmap.c | 3520 static vm_fault_t special_mapping_fault(struct vm_fault *vmf); 3573 static vm_fault_t special_mapping_fault(struct vm_fault *vmf) argument 3575 struct vm_area_struct *vma = vmf->vma; 3585 return sm->fault(sm, vmf->vma, vmf); 3590 for (pgoff = vmf->pgoff; pgoff && *pages; ++pages) 3596 vmf->page = page;
|
H A D | mempolicy.c | 2722 * @vmf: structure describing the fault 2732 int mpol_misplaced(struct folio *folio, struct vm_fault *vmf, argument 2739 struct vm_area_struct *vma = vmf->vma; 2749 lockdep_assert_held(vmf->ptl);
|
H A D | memory.c | 107 static vm_fault_t do_fault(struct vm_fault *vmf); 108 static vm_fault_t do_anonymous_page(struct vm_fault *vmf); 109 static bool vmf_pte_changed(struct vm_fault *vmf); 115 static __always_inline bool vmf_orig_pte_uffd_wp(struct vm_fault *vmf) argument 117 if (!userfaultfd_wp(vmf->vma)) 119 if (!(vmf->flags & FAULT_FLAG_ORIG_PTE_VALID)) 122 return pte_marker_uffd_wp(vmf->orig_pte); 2947 static inline int pte_unmap_same(struct vm_fault *vmf) argument 2952 spin_lock(vmf->ptl); 2953 same = pte_same(ptep_get(vmf 2968 __wp_page_copy_user(struct page *dst, struct page *src, struct vm_fault *vmf) argument 3088 do_page_mkwrite(struct vm_fault *vmf, struct folio *folio) argument 3121 fault_dirty_shared_page(struct vm_fault *vmf) argument 3207 vmf_can_call_fault(const struct vm_fault *vmf) argument 3232 vmf_anon_prepare(struct vm_fault *vmf) argument 3269 wp_page_copy(struct vm_fault *vmf) argument 3437 finish_mkwrite_fault(struct vm_fault *vmf, struct folio *folio) argument 3461 wp_pfn_shared(struct vm_fault *vmf) argument 3817 remove_device_exclusive_entry(struct vm_fault *vmf) argument 3878 pte_marker_clear(struct vm_fault *vmf) argument 3898 do_pte_missing(struct vm_fault *vmf) argument 3910 pte_marker_handle_uffd_wp(struct vm_fault *vmf) argument 3922 handle_pte_marker(struct vm_fault *vmf) argument 3953 do_swap_page(struct vm_fault *vmf) argument 4329 alloc_anon_folio(struct vm_fault *vmf) argument 4411 do_anonymous_page(struct vm_fault *vmf) argument 4535 __do_fault(struct vm_fault *vmf) argument 4592 deposit_prealloc_pte(struct vm_fault *vmf) argument 4605 do_set_pmd(struct vm_fault *vmf, struct page *page) argument 4670 do_set_pmd(struct vm_fault *vmf, struct page *page) argument 4684 set_pte_range(struct vm_fault *vmf, struct folio *folio, struct page *page, unsigned int nr, unsigned long addr) argument 4718 vmf_pte_changed(struct vm_fault *vmf) argument 4741 finish_fault(struct vm_fault *vmf) argument 4860 do_fault_around(struct vm_fault *vmf) argument 4893 should_fault_around(struct vm_fault *vmf) argument 4906 do_read_fault(struct vm_fault *vmf) argument 4938 do_cow_fault(struct vm_fault *vmf) argument 4976 do_shared_fault(struct vm_fault *vmf) argument 5026 do_fault(struct vm_fault *vmf) argument 5070 numa_migrate_prep(struct folio *folio, struct vm_fault *vmf, unsigned long addr, int page_nid, int *flags) argument 5089 numa_rebuild_single_mapping(struct vm_fault *vmf, struct vm_area_struct *vma, unsigned long fault_addr, pte_t *fault_pte, bool writable) argument 5104 numa_rebuild_large_mapping(struct vm_fault *vmf, struct vm_area_struct *vma, struct folio *folio, pte_t fault_pte, bool ignore_writable, bool pte_write_upgrade) argument 5137 do_numa_page(struct vm_fault *vmf) argument 5251 create_huge_pmd(struct vm_fault *vmf) argument 5262 wp_huge_pmd(struct vm_fault *vmf) argument 5293 create_huge_pud(struct vm_fault *vmf) argument 5307 wp_huge_pud(struct vm_fault *vmf, pud_t orig_pud) argument 5346 handle_pte_fault(struct vm_fault *vmf) argument 5433 struct vm_fault vmf = { local [all...] |
H A D | khugepaged.c | 986 struct vm_fault vmf = { local 1003 vmf.orig_pte = ptep_get_lockless(pte); 1004 if (!is_swap_pte(vmf.orig_pte)) 1007 vmf.pte = pte; 1008 vmf.ptl = ptl; 1009 ret = do_swap_page(&vmf); 1442 struct vm_fault vmf = { local 1452 if (do_set_pmd(&vmf, hpage))
|
H A D | internal.h | 296 vm_fault_t vmf_anon_prepare(struct vm_fault *vmf); 297 vm_fault_t do_swap_page(struct vm_fault *vmf); 941 static inline struct file *maybe_unlock_mmap_for_io(struct vm_fault *vmf, 944 int flags = vmf->flags; 956 fpin = get_file(vmf->vma->vm_file); 957 release_fault_lock(vmf); 1177 int numa_migrate_prep(struct folio *folio, struct vm_fault *vmf,
|
H A D | hugetlb.c | 5273 static vm_fault_t hugetlb_vm_op_fault(struct vm_fault *vmf) argument 5921 struct vm_fault *vmf) 5923 struct vm_area_struct *vma = vmf->vma; 5925 const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE; 5926 pte_t pte = huge_ptep_get(vmf->pte); 5954 set_huge_ptep_writable(vma, vmf->address, vmf->pte); 5980 set_huge_ptep_writable(vma, vmf->address, vmf->pte); 6007 spin_unlock(vmf 5920 hugetlb_wp(struct folio *pagecache_folio, struct vm_fault *vmf) argument 6165 hugetlb_handle_userfault(struct vm_fault *vmf, struct address_space *mapping, unsigned long reason) argument 6199 hugetlb_no_page(struct address_space *mapping, struct vm_fault *vmf) argument 6444 struct vm_fault vmf = { local [all...] |
H A D | huge_memory.c | 918 static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf, argument 921 struct vm_area_struct *vma = vmf->vma; 924 unsigned long haddr = vmf->address & HPAGE_PMD_MASK; 945 clear_huge_page(page, vmf->address, HPAGE_PMD_NR); 953 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); 954 if (unlikely(!pmd_none(*vmf->pmd))) { 965 spin_unlock(vmf->ptl); 968 ret = handle_userfault(vmf, VM_UFFD_MISSING); 977 pgtable_trans_huge_deposit(vma->vm_mm, vmf 1049 do_huge_pmd_anonymous_page(struct vm_fault *vmf) argument 1171 vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn, bool write) argument 1260 vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn, bool write) argument 1499 huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud) argument 1513 huge_pmd_set_accessed(struct vm_fault *vmf) argument 1527 do_huge_pmd_wp_page(struct vm_fault *vmf) argument 1645 do_huge_pmd_numa_page(struct vm_fault *vmf) argument [all...] |
H A D | filemap.c | 1694 vm_fault_t __folio_lock_or_retry(struct folio *folio, struct vm_fault *vmf) argument 1696 unsigned int flags = vmf->flags; 1706 release_fault_lock(vmf); 1718 release_fault_lock(vmf); 3064 * @vmf - the vm_fault for this fault. 3074 static int lock_folio_maybe_drop_mmap(struct vm_fault *vmf, struct folio *folio, argument 3085 if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT) 3088 *fpin = maybe_unlock_mmap_for_io(vmf, *fpin); 3089 if (vmf->flags & FAULT_FLAG_KILLABLE) { 3099 release_fault_lock(vmf); 3115 do_sync_mmap_readahead(struct vm_fault *vmf) argument 3184 do_async_mmap_readahead(struct vm_fault *vmf, struct folio *folio) argument 3208 filemap_fault_recheck_pte_none(struct vm_fault *vmf) argument 3273 filemap_fault(struct vm_fault *vmf) argument 3432 filemap_map_pmd(struct vm_fault *vmf, struct folio *folio, pgoff_t start) argument 3505 filemap_map_folio_range(struct vm_fault *vmf, struct folio *folio, unsigned long start, unsigned long addr, unsigned int nr_pages, unsigned long *rss, unsigned int *mmap_miss) argument 3568 filemap_map_order0_folio(struct vm_fault *vmf, struct folio *folio, unsigned long addr, unsigned long *rss, unsigned int *mmap_miss) argument 3600 filemap_map_pages(struct vm_fault *vmf, pgoff_t start_pgoff, pgoff_t end_pgoff) argument 3668 filemap_page_mkwrite(struct vm_fault *vmf) argument 3723 filemap_page_mkwrite(struct vm_fault *vmf) argument [all...] |
/linux-master/fs/xfs/ |
H A D | xfs_file.c | 1253 struct vm_fault *vmf, 1258 return dax_iomap_fault(vmf, order, pfn, NULL, 1259 (write_fault && !vmf->cow_page) ? 1266 struct vm_fault *vmf, 1288 struct vm_fault *vmf, 1292 struct inode *inode = file_inode(vmf->vma->vm_file); 1301 file_update_time(vmf->vma->vm_file); 1310 ret = xfs_dax_fault(vmf, order, write_fault, &pfn); 1312 ret = dax_finish_sync_fault(vmf, order, pfn); 1314 ret = iomap_page_mkwrite(vmf, 1252 xfs_dax_fault( struct vm_fault *vmf, unsigned int order, bool write_fault, pfn_t *pfn) argument 1265 xfs_dax_fault( struct vm_fault *vmf, unsigned int order, bool write_fault, pfn_t *pfn) argument 1287 __xfs_filemap_fault( struct vm_fault *vmf, unsigned int order, bool write_fault) argument 1328 xfs_is_write_fault( struct vm_fault *vmf) argument 1336 xfs_filemap_fault( struct vm_fault *vmf) argument 1346 xfs_filemap_huge_fault( struct vm_fault *vmf, unsigned int order) argument 1359 xfs_filemap_page_mkwrite( struct vm_fault *vmf) argument 1371 xfs_filemap_pfn_mkwrite( struct vm_fault *vmf) argument [all...] |
/linux-master/fs/udf/ |
H A D | file.c | 37 static vm_fault_t udf_page_mkwrite(struct vm_fault *vmf) argument 39 struct vm_area_struct *vma = vmf->vma; 42 struct folio *folio = page_folio(vmf->page);
|
/linux-master/fs/proc/ |
H A D | vmcore.c | 424 static vm_fault_t mmap_vmcore_fault(struct vm_fault *vmf) argument 427 struct address_space *mapping = vmf->vma->vm_file->f_mapping; 428 pgoff_t index = vmf->pgoff; 453 vmf->page = page;
|
/linux-master/fs/f2fs/ |
H A D | file.c | 39 static vm_fault_t f2fs_filemap_fault(struct vm_fault *vmf) argument 41 struct inode *inode = file_inode(vmf->vma->vm_file); 42 vm_flags_t flags = vmf->vma->vm_flags; 45 ret = filemap_fault(vmf); 50 trace_f2fs_filemap_fault(inode, vmf->pgoff, flags, ret); 55 static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf) argument 57 struct page *page = vmf->page; 58 struct inode *inode = file_inode(vmf->vma->vm_file); 107 file_update_time(vmf->vma->vm_file); 166 trace_f2fs_vm_page_mkwrite(inode, page->index, vmf [all...] |
/linux-master/fs/ext2/ |
H A D | file.c | 93 static vm_fault_t ext2_dax_fault(struct vm_fault *vmf) argument 95 struct inode *inode = file_inode(vmf->vma->vm_file); 97 bool write = (vmf->flags & FAULT_FLAG_WRITE) && 98 (vmf->vma->vm_flags & VM_SHARED); 102 file_update_time(vmf->vma->vm_file); 106 ret = dax_iomap_fault(vmf, 0, NULL, NULL, &ext2_iomap_ops);
|
/linux-master/fs/bcachefs/ |
H A D | fs-io-pagecache.c | 541 vm_fault_t bch2_page_fault(struct vm_fault *vmf) argument 543 struct file *file = vmf->vma->vm_file; 573 ret = filemap_fault(vmf); 579 vm_fault_t bch2_page_mkwrite(struct vm_fault *vmf) argument 581 struct folio *folio = page_folio(vmf->page); 582 struct file *file = vmf->vma->vm_file;
|
/linux-master/drivers/vfio/pci/ |
H A D | vfio_pci_core.c | 1769 static vm_fault_t vfio_pci_mmap_fault(struct vm_fault *vmf) argument 1771 struct vm_area_struct *vma = vmf->vma;
|
/linux-master/virt/kvm/ |
H A D | kvm_main.c | 4098 static vm_fault_t kvm_vcpu_fault(struct vm_fault *vmf) argument 4100 struct kvm_vcpu *vcpu = vmf->vma->vm_file->private_data; 4103 if (vmf->pgoff == 0) 4106 else if (vmf->pgoff == KVM_PIO_PAGE_OFFSET) 4110 else if (vmf->pgoff == KVM_COALESCED_MMIO_PAGE_OFFSET) 4113 else if (kvm_page_in_dirty_ring(vcpu->kvm, vmf->pgoff)) 4116 vmf->pgoff - KVM_DIRTY_LOG_PAGE_OFFSET); 4118 return kvm_arch_vcpu_fault(vcpu, vmf); 4120 vmf->page = page;
|
/linux-master/lib/ |
H A D | test_hmm.c | 1422 static vm_fault_t dmirror_devmem_fault(struct vm_fault *vmf) argument 1436 rpage = vmf->page->zone_device_data; 1440 args.vma = vmf->vma; 1441 args.start = vmf->address; 1447 args.fault_page = vmf->page;
|
/linux-master/kernel/events/ |
H A D | core.c | 6174 static vm_fault_t perf_mmap_fault(struct vm_fault *vmf) argument 6176 struct perf_event *event = vmf->vma->vm_file->private_data; 6180 if (vmf->flags & FAULT_FLAG_MKWRITE) { 6181 if (vmf->pgoff == 0) 6191 if (vmf->pgoff && (vmf->flags & FAULT_FLAG_WRITE)) 6194 vmf->page = perf_mmap_to_page(rb, vmf->pgoff); 6195 if (!vmf->page) 6198 get_page(vmf [all...] |
/linux-master/kernel/bpf/ |
H A D | arena.c | 245 static vm_fault_t arena_vm_fault(struct vm_fault *vmf) argument 247 struct bpf_map *map = vmf->vma->vm_file->private_data; 254 kaddr = kbase + (u32)(vmf->address); 266 ret = mtree_insert(&arena->mt, vmf->pgoff, MT_ENTRY, GFP_KERNEL); 273 mtree_erase(&arena->mt, vmf->pgoff); 279 mtree_erase(&arena->mt, vmf->pgoff); 285 vmf->page = page;
|
/linux-master/include/trace/events/ |
H A D | fs_dax.h | 11 TP_PROTO(struct inode *inode, struct vm_fault *vmf, 13 TP_ARGS(inode, vmf, max_pgoff, result), 29 __entry->vm_start = vmf->vma->vm_start; 30 __entry->vm_end = vmf->vma->vm_end; 31 __entry->vm_flags = vmf->vma->vm_flags; 32 __entry->address = vmf->address; 33 __entry->flags = vmf->flags; 34 __entry->pgoff = vmf->pgoff; 56 TP_PROTO(struct inode *inode, struct vm_fault *vmf, \ 58 TP_ARGS(inode, vmf, max_pgof [all...] |
/linux-master/include/linux/ |
H A D | pagemap.h | 993 vm_fault_t __folio_lock_or_retry(struct folio *folio, struct vm_fault *vmf); 1098 struct vm_fault *vmf) 1102 return __folio_lock_or_retry(folio, vmf); 1097 folio_lock_or_retry(struct folio *folio, struct vm_fault *vmf) argument
|