Searched refs:vmf (Results 26 - 50 of 170) sorted by relevance

1234567

/linux-master/arch/csky/abiv1/
H A Dcacheflush.c44 void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma, argument
/linux-master/arch/x86/kernel/cpu/sgx/
H A Dvirt.c74 static vm_fault_t sgx_vepc_fault(struct vm_fault *vmf) argument
76 struct vm_area_struct *vma = vmf->vma;
81 ret = __sgx_vepc_fault(vepc, vma, vmf->address);
87 if (ret == -EBUSY && (vmf->flags & FAULT_FLAG_ALLOW_RETRY)) {
/linux-master/drivers/char/agp/
H A Dalpha-agp.c14 static vm_fault_t alpha_core_agp_vm_fault(struct vm_fault *vmf) argument
21 dma_addr = vmf->address - vmf->vma->vm_start + agp->aperture.bus_base;
32 vmf->page = page;
/linux-master/include/drm/ttm/
H A Dttm_bo.h391 struct vm_fault *vmf);
392 vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
395 vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf);
400 vm_fault_t ttm_bo_vm_dummy_page(struct vm_fault *vmf, pgprot_t prot);
/linux-master/arch/s390/kernel/
H A Dvdso.c70 struct vm_area_struct *vma, struct vm_fault *vmf)
76 switch (vmf->pgoff) {
84 addr = vmf->address + VVAR_TIMENS_PAGE_OFFSET * PAGE_SIZE;
108 return vmf_insert_pfn(vma, vmf->address, pfn);
69 vvar_fault(const struct vm_special_mapping *sm, struct vm_area_struct *vma, struct vm_fault *vmf) argument
/linux-master/drivers/gpu/drm/gma500/
H A Dgem.c109 static vm_fault_t psb_gem_fault(struct vm_fault *vmf);
240 * @vmf: fault detail
254 static vm_fault_t psb_gem_fault(struct vm_fault *vmf) argument
256 struct vm_area_struct *vma = vmf->vma;
289 because vmf->pgoff is the fake GEM offset */
290 page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
297 ret = vmf_insert_pfn(vma, vmf->address, pfn);
H A Dfbdev.c23 static vm_fault_t psb_fbdev_vm_fault(struct vm_fault *vmf) argument
25 struct vm_area_struct *vma = vmf->vma;
27 unsigned long address = vmf->address - (vmf->pgoff << PAGE_SHIFT);
/linux-master/fs/
H A Duserfaultfd.c246 struct vm_fault *vmf,
249 struct vm_area_struct *vma = vmf->vma;
253 assert_fault_locked(vmf);
255 ptep = hugetlb_walk(vma, vmf->address, vma_mmu_pagesize(vma));
276 struct vm_fault *vmf,
291 struct vm_fault *vmf,
295 unsigned long address = vmf->address;
304 assert_fault_locked(vmf);
378 vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned long reason) argument
380 struct vm_area_struct *vma = vmf
245 userfaultfd_huge_must_wait(struct userfaultfd_ctx *ctx, struct vm_fault *vmf, unsigned long reason) argument
275 userfaultfd_huge_must_wait(struct userfaultfd_ctx *ctx, struct vm_fault *vmf, unsigned long reason) argument
290 userfaultfd_must_wait(struct userfaultfd_ctx *ctx, struct vm_fault *vmf, unsigned long reason) argument
[all...]
/linux-master/mm/
H A Dswap.h59 struct vm_fault *vmf);
91 struct vm_fault *vmf)
90 swapin_readahead(swp_entry_t swp, gfp_t gfp_mask, struct vm_fault *vmf) argument
H A Dswap_state.c749 static void swap_ra_info(struct vm_fault *vmf, argument
752 struct vm_area_struct *vma = vmf->vma;
765 faddr = vmf->address;
805 * @vmf: fault information
812 * Caller must hold read mmap_lock if vmf->vma is not NULL.
816 struct mempolicy *mpol, pgoff_t targ_ilx, struct vm_fault *vmf)
831 swap_ra_info(vmf, &ra_info);
835 addr = vmf->address - (ra_info.offset * PAGE_SIZE);
841 pte = pte_offset_map(vmf->pmd, addr);
886 * @vmf
815 swap_vma_readahead(swp_entry_t targ_entry, gfp_t gfp_mask, struct mempolicy *mpol, pgoff_t targ_ilx, struct vm_fault *vmf) argument
894 swapin_readahead(swp_entry_t entry, gfp_t gfp_mask, struct vm_fault *vmf) argument
[all...]
/linux-master/arch/powerpc/kernel/
H A Dvdso.c85 struct vm_area_struct *vma, struct vm_fault *vmf);
133 struct vm_area_struct *vma, struct vm_fault *vmf)
138 switch (vmf->pgoff) {
163 return vmf_insert_pfn(vma, vmf->address, pfn);
132 vvar_fault(const struct vm_special_mapping *sm, struct vm_area_struct *vma, struct vm_fault *vmf) argument
/linux-master/arch/csky/abiv2/
H A Dcacheflush.c10 void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma, argument
/linux-master/arch/riscv/kernel/
H A Dvdso.c125 struct vm_area_struct *vma, struct vm_fault *vmf)
130 switch (vmf->pgoff) {
155 return vmf_insert_pfn(vma, vmf->address, pfn);
124 vvar_fault(const struct vm_special_mapping *sm, struct vm_area_struct *vma, struct vm_fault *vmf) argument
/linux-master/fs/afs/
H A Dwrite.c256 vm_fault_t afs_page_mkwrite(struct vm_fault *vmf) argument
258 struct file *file = vmf->vma->vm_file;
262 return netfs_page_mkwrite(vmf, NULL);
/linux-master/fs/udf/
H A Dfile.c37 static vm_fault_t udf_page_mkwrite(struct vm_fault *vmf) argument
39 struct vm_area_struct *vma = vmf->vma;
42 struct folio *folio = page_folio(vmf->page);
/linux-master/drivers/dma-buf/
H A Dudmabuf.c33 static vm_fault_t udmabuf_vm_fault(struct vm_fault *vmf) argument
35 struct vm_area_struct *vma = vmf->vma;
37 pgoff_t pgoff = vmf->pgoff;
41 vmf->page = ubuf->pages[pgoff];
42 get_page(vmf->page);
/linux-master/drivers/dma-buf/heaps/
H A Dcma_heap.c163 static vm_fault_t cma_heap_vm_fault(struct vm_fault *vmf) argument
165 struct vm_area_struct *vma = vmf->vma;
168 if (vmf->pgoff > buffer->pagecount)
171 return vmf_insert_pfn(vma, vmf->address, page_to_pfn(buffer->pages[vmf->pgoff]));
/linux-master/kernel/bpf/
H A Darena.c245 static vm_fault_t arena_vm_fault(struct vm_fault *vmf) argument
247 struct bpf_map *map = vmf->vma->vm_file->private_data;
254 kaddr = kbase + (u32)(vmf->address);
266 ret = mtree_insert(&arena->mt, vmf->pgoff, MT_ENTRY, GFP_KERNEL);
273 mtree_erase(&arena->mt, vmf->pgoff);
279 mtree_erase(&arena->mt, vmf->pgoff);
285 vmf->page = page;
/linux-master/fs/fuse/
H A Ddax.c787 static vm_fault_t __fuse_dax_fault(struct vm_fault *vmf, unsigned int order, argument
791 struct inode *inode = file_inode(vmf->vma->vm_file);
812 ret = dax_iomap_fault(vmf, order, &pfn, &error, &fuse_iomap_ops);
821 ret = dax_finish_sync_fault(vmf, order, pfn);
830 static vm_fault_t fuse_dax_fault(struct vm_fault *vmf) argument
832 return __fuse_dax_fault(vmf, 0, vmf->flags & FAULT_FLAG_WRITE);
835 static vm_fault_t fuse_dax_huge_fault(struct vm_fault *vmf, unsigned int order) argument
837 return __fuse_dax_fault(vmf, order, vmf
840 fuse_dax_page_mkwrite(struct vm_fault *vmf) argument
845 fuse_dax_pfn_mkwrite(struct vm_fault *vmf) argument
[all...]
/linux-master/fs/ext4/
H A Dfile.c702 static vm_fault_t ext4_dax_huge_fault(struct vm_fault *vmf, unsigned int order) argument
708 struct inode *inode = file_inode(vmf->vma->vm_file);
717 * We check for VM_SHARED rather than vmf->cow_page since the latter is
722 bool write = (vmf->flags & FAULT_FLAG_WRITE) &&
723 (vmf->vma->vm_flags & VM_SHARED);
724 struct address_space *mapping = vmf->vma->vm_file->f_mapping;
729 file_update_time(vmf->vma->vm_file);
742 result = dax_iomap_fault(vmf, order, &pfn, &error, &ext4_iomap_ops);
751 result = dax_finish_sync_fault(vmf, order, pfn);
761 static vm_fault_t ext4_dax_fault(struct vm_fault *vmf) argument
[all...]
/linux-master/drivers/gpu/drm/armada/
H A Darmada_gem.c20 static vm_fault_t armada_gem_vm_fault(struct vm_fault *vmf) argument
22 struct drm_gem_object *gobj = vmf->vma->vm_private_data;
26 pfn += (vmf->address - vmf->vma->vm_start) >> PAGE_SHIFT;
27 return vmf_insert_pfn(vmf->vma, vmf->address, pfn);
/linux-master/arch/powerpc/platforms/cell/spufs/
H A Dfile.c230 spufs_mem_mmap_fault(struct vm_fault *vmf) argument
232 struct vm_area_struct *vma = vmf->vma;
237 offset = vmf->pgoff << PAGE_SHIFT;
242 vmf->address, offset);
254 ret = vmf_insert_pfn(vma, vmf->address, pfn);
310 static vm_fault_t spufs_ps_fault(struct vm_fault *vmf, argument
314 struct spu_context *ctx = vmf->vma->vm_file->private_data;
315 unsigned long area, offset = vmf->pgoff << PAGE_SHIFT;
353 ret = vmf_insert_pfn(vmf->vma, vmf
367 spufs_cntl_mmap_fault(struct vm_fault *vmf) argument
1023 spufs_signal1_mmap_fault(struct vm_fault *vmf) argument
1159 spufs_signal2_mmap_fault(struct vm_fault *vmf) argument
1288 spufs_mss_mmap_fault(struct vm_fault *vmf) argument
1350 spufs_psmap_mmap_fault(struct vm_fault *vmf) argument
1410 spufs_mfc_mmap_fault(struct vm_fault *vmf) argument
[all...]
/linux-master/arch/arm64/kernel/
H A Dvdso.c152 struct vm_area_struct *vma, struct vm_fault *vmf)
157 switch (vmf->pgoff) {
182 return vmf_insert_pfn(vma, vmf->address, pfn);
151 vvar_fault(const struct vm_special_mapping *sm, struct vm_area_struct *vma, struct vm_fault *vmf) argument
/linux-master/drivers/gpu/drm/omapdrm/
H A Domap_gem.c354 struct vm_area_struct *vma, struct vm_fault *vmf)
360 /* We don't use vmf->pgoff since that has the fake offset: */
361 pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
371 VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
374 return vmf_insert_mixed(vma, vmf->address,
380 struct vm_area_struct *vma, struct vm_fault *vmf)
409 /* We don't use vmf->pgoff since that has the fake offset: */
410 pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
421 vaddr = vmf->address - ((pgoff - base_pgoff) << PAGE_SHIFT);
466 VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf
353 omap_gem_fault_1d(struct drm_gem_object *obj, struct vm_area_struct *vma, struct vm_fault *vmf) argument
379 omap_gem_fault_2d(struct drm_gem_object *obj, struct vm_area_struct *vma, struct vm_fault *vmf) argument
497 omap_gem_fault(struct vm_fault *vmf) argument
[all...]
/linux-master/arch/m68k/include/asm/
H A Dpgtable_mm.h139 static inline void update_mmu_cache_range(struct vm_fault *vmf, argument

Completed in 224 milliseconds

1234567