Searched refs:vmf (Results 76 - 100 of 168) sorted by relevance

1234567

/linux-master/arch/powerpc/kvm/
H A Dbook3s_hv_uvmem.c999 static vm_fault_t kvmppc_uvmem_migrate_to_ram(struct vm_fault *vmf) argument
1001 struct kvmppc_uvmem_page_pvt *pvt = vmf->page->zone_device_data;
1003 if (kvmppc_svm_page_out(vmf->vma, vmf->address,
1004 vmf->address + PAGE_SIZE, PAGE_SHIFT,
1005 pvt->kvm, pvt->gpa, vmf->page))
H A Dbook3s_64_vio.c232 static vm_fault_t kvm_spapr_tce_fault(struct vm_fault *vmf) argument
234 struct kvmppc_spapr_tce_table *stt = vmf->vma->vm_file->private_data;
237 if (vmf->pgoff >= kvmppc_tce_pages(stt->size))
240 page = kvm_spapr_get_tce_page(stt, vmf->pgoff);
245 vmf->page = page;
/linux-master/fs/bcachefs/
H A Dfs-io-pagecache.c541 vm_fault_t bch2_page_fault(struct vm_fault *vmf) argument
543 struct file *file = vmf->vma->vm_file;
573 ret = filemap_fault(vmf);
579 vm_fault_t bch2_page_mkwrite(struct vm_fault *vmf) argument
581 struct folio *folio = page_folio(vmf->page);
582 struct file *file = vmf->vma->vm_file;
/linux-master/fs/kernfs/
H A Dfile.c369 static vm_fault_t kernfs_vma_fault(struct vm_fault *vmf) argument
371 struct file *file = vmf->vma->vm_file;
383 ret = of->vm_ops->fault(vmf);
389 static vm_fault_t kernfs_vma_page_mkwrite(struct vm_fault *vmf) argument
391 struct file *file = vmf->vma->vm_file;
403 ret = of->vm_ops->page_mkwrite(vmf);
/linux-master/mm/
H A Dinternal.h199 vm_fault_t vmf_anon_prepare(struct vm_fault *vmf);
200 vm_fault_t do_swap_page(struct vm_fault *vmf);
851 static inline struct file *maybe_unlock_mmap_for_io(struct vm_fault *vmf, argument
854 int flags = vmf->flags;
866 fpin = get_file(vmf->vma->vm_file);
867 release_fault_lock(vmf);
H A Dshmem.c1957 * vmf and fault_type are only supplied by shmem_fault: otherwise they are NULL.
1961 struct vm_fault *vmf, vm_fault_t *fault_type)
1963 struct vm_area_struct *vma = vmf ? vmf->vma : NULL;
1986 *fault_type = handle_userfault(vmf, VM_UFFD_MINOR);
2035 *fault_type = handle_userfault(vmf, VM_UFFD_MISSING);
2195 static vm_fault_t shmem_falloc_wait(struct vm_fault *vmf, struct inode *inode) argument
2205 vmf->pgoff >= shmem_falloc->start &&
2206 vmf->pgoff < shmem_falloc->next) {
2211 fpin = maybe_unlock_mmap_for_io(vmf, NUL
1959 shmem_get_folio_gfp(struct inode *inode, pgoff_t index, struct folio **foliop, enum sgp_type sgp, gfp_t gfp, struct vm_fault *vmf, vm_fault_t *fault_type) argument
2236 shmem_fault(struct vm_fault *vmf) argument
[all...]
/linux-master/drivers/gpu/drm/amd/amdkfd/
H A Dkfd_migrate.c671 * @fault_page: is from vmf->page, svm_migrate_to_ram(), this is CPU page fault callback
780 * @fault_page: is from vmf->page, svm_migrate_to_ram(), this is CPU page fault callback
922 * @vmf: CPU vm fault vma, address
930 static vm_fault_t svm_migrate_to_ram(struct vm_fault *vmf) argument
933 unsigned long addr = vmf->address;
940 svm_bo = vmf->page->zone_device_data;
951 if (mm != vmf->vma->vm_mm)
988 r = svm_migrate_vram_to_ram(prange, vmf->vma->vm_mm, start, last,
989 KFD_MIGRATE_TRIGGER_PAGEFAULT_CPU, vmf->page);
/linux-master/drivers/hwtracing/intel_th/
H A Dmsu.c1608 static vm_fault_t msc_mmap_fault(struct vm_fault *vmf) argument
1610 struct msc_iter *iter = vmf->vma->vm_file->private_data;
1613 vmf->page = msc_buffer_get_page(msc, vmf->pgoff);
1614 if (!vmf->page)
1617 get_page(vmf->page);
1618 vmf->page->mapping = vmf->vma->vm_file->f_mapping;
1619 vmf->page->index = vmf
[all...]
/linux-master/sound/usb/usx2y/
H A Dusx2yhwdeppcm.c673 static vm_fault_t snd_usx2y_hwdep_pcm_vm_fault(struct vm_fault *vmf) argument
678 offset = vmf->pgoff << PAGE_SHIFT;
679 vaddr = (char *)((struct usx2ydev *)vmf->vma->vm_private_data)->hwdep_pcm_shm + offset;
680 vmf->page = virt_to_page(vaddr);
681 get_page(vmf->page);
/linux-master/drivers/gpu/drm/i915/gem/
H A Di915_gem_mman.c250 static vm_fault_t vm_fault_cpu(struct vm_fault *vmf) argument
252 struct vm_area_struct *area = vmf->vma;
293 static vm_fault_t vm_fault_gtt(struct vm_fault *vmf) argument
296 struct vm_area_struct *area = vmf->vma;
311 /* We don't use vmf->pgoff since that has the fake offset */
312 page_offset = (vmf->address - area->vm_start) >> PAGE_SHIFT;
/linux-master/drivers/gpu/drm/nouveau/
H A Dnouveau_gem.c42 static vm_fault_t nouveau_ttm_fault(struct vm_fault *vmf) argument
44 struct vm_area_struct *vma = vmf->vma;
49 ret = ttm_bo_vm_reserve(bo, vmf);
59 ret = ttm_bo_vm_fault_reserved(vmf, prot, TTM_BO_VM_NUM_PREFAULT);
61 if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
/linux-master/drivers/uio/
H A Duio.c670 static vm_fault_t uio_vma_fault(struct vm_fault *vmf) argument
672 struct uio_device *idev = vmf->vma->vm_private_data;
685 mi = uio_find_mem_index(vmf->vma);
695 offset = (vmf->pgoff - mi) << PAGE_SHIFT;
703 vmf->page = page;
/linux-master/drivers/xen/
H A Dprivcmd.c1580 static vm_fault_t privcmd_fault(struct vm_fault *vmf) argument
1583 vmf->vma, vmf->vma->vm_start, vmf->vma->vm_end,
1584 vmf->pgoff, (void *)vmf->address);
/linux-master/arch/arm/include/asm/
H A Dtlbflush.h625 void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma,
628 static inline void update_mmu_cache_range(struct vm_fault *vmf, argument
/linux-master/drivers/gpu/drm/xe/
H A Dxe_bo.c1107 static vm_fault_t xe_gem_fault(struct vm_fault *vmf) argument
1109 struct ttm_buffer_object *tbo = vmf->vma->vm_private_data;
1120 ret = ttm_bo_vm_reserve(tbo, vmf);
1127 ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot,
1131 ret = ttm_bo_vm_dummy_page(vmf, vmf->vma->vm_page_prot);
1134 if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
/linux-master/fs/9p/
H A Dvfs_file.c478 v9fs_vm_page_mkwrite(struct vm_fault *vmf) argument
480 return netfs_page_mkwrite(vmf, NULL);
/linux-master/drivers/scsi/cxlflash/
H A Docxl_hw.c1121 * @vmf: VM fault associated with current fault.
1125 static vm_fault_t ocxlflash_mmap_fault(struct vm_fault *vmf) argument
1127 struct vm_area_struct *vma = vmf->vma;
1132 offset = vmf->pgoff << PAGE_SHIFT;
1148 return vmf_insert_pfn(vma, vmf->address, mmio_area >> PAGE_SHIFT);
/linux-master/arch/arm/mm/
H A Dfault-armv.c183 void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma, argument
/linux-master/arch/nios2/mm/
H A Dcacheflush.c209 void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma, argument
/linux-master/arch/xtensa/mm/
H A Dcache.c214 void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma, argument
/linux-master/arch/csky/include/asm/
H A Dpgtable.h266 void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma,
/linux-master/arch/um/include/asm/
H A Dpgtable.h289 #define update_mmu_cache_range(vmf, vma, address, ptep, nr) do {} while (0)
/linux-master/drivers/gpu/drm/tegra/
H A Dgem.c552 static vm_fault_t tegra_bo_fault(struct vm_fault *vmf) argument
554 struct vm_area_struct *vma = vmf->vma;
563 offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
566 return vmf_insert_page(vma, vmf->address, page);
/linux-master/kernel/
H A Drelay.c33 static vm_fault_t relay_buf_fault(struct vm_fault *vmf) argument
36 struct rchan_buf *buf = vmf->vma->vm_private_data;
37 pgoff_t pgoff = vmf->pgoff;
46 vmf->page = page;
/linux-master/arch/nios2/include/asm/
H A Dpgtable.h285 void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma,

Completed in 377 milliseconds

1234567