/linux-master/drivers/gpu/drm/etnaviv/ |
H A D | etnaviv_gem_prime.c | 92 struct vm_area_struct *vma) 96 ret = dma_buf_mmap(etnaviv_obj->base.dma_buf, vma, 0); 91 etnaviv_gem_prime_mmap_obj(struct etnaviv_gem_object *etnaviv_obj, struct vm_area_struct *vma) argument
|
/linux-master/drivers/gpu/drm/omapdrm/ |
H A D | omap_gem_dmabuf.c | 64 struct vm_area_struct *vma) 68 return drm_gem_mmap_obj(obj, omap_gem_mmap_size(obj), vma); 63 omap_gem_dmabuf_mmap(struct dma_buf *buffer, struct vm_area_struct *vma) argument
|
H A D | omap_gem.c | 354 struct vm_area_struct *vma, struct vm_fault *vmf) 361 pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT; 374 return vmf_insert_mixed(vma, vmf->address, 380 struct vm_area_struct *vma, struct vm_fault *vmf) 410 pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT; 470 ret = vmf_insert_mixed(vma, 494 * vma->vm_private_data points to the GEM object that is backing this 499 struct vm_area_struct *vma = vmf->vma; local 500 struct drm_gem_object *obj = vma 353 omap_gem_fault_1d(struct drm_gem_object *obj, struct vm_area_struct *vma, struct vm_fault *vmf) argument 379 omap_gem_fault_2d(struct drm_gem_object *obj, struct vm_area_struct *vma, struct vm_fault *vmf) argument 534 omap_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) argument [all...] |
/linux-master/arch/openrisc/mm/ |
H A D | tlb.c | 67 * There's no point in checking the vma for PAGE_EXEC to determine whether it's 83 void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long addr) argument 96 void local_flush_tlb_range(struct vm_area_struct *vma, argument
|
/linux-master/drivers/misc/sgi-gru/ |
H A D | grumain.c | 306 struct gru_thread_state *gru_alloc_gts(struct vm_area_struct *vma, argument 335 if (vma) { 337 gts->ts_vma = vma; 353 * Allocate a vma private data structure. 355 struct gru_vma_data *gru_alloc_vma_data(struct vm_area_struct *vma, int tsid) argument 373 struct gru_thread_state *gru_find_thread_state(struct vm_area_struct *vma, argument 376 struct gru_vma_data *vdata = vma->vm_private_data; 382 gru_dbg(grudev, "vma %p, gts %p\n", vma, gts); 390 struct gru_thread_state *gru_alloc_thread_state(struct vm_area_struct *vma, argument 923 struct vm_area_struct *vma = vmf->vma; local [all...] |
/linux-master/drivers/android/ |
H A D | binder_alloc.h | 76 * @vma: vm_area_struct passed to mmap_handler 100 struct vm_area_struct *vma; member in struct:binder_alloc 137 struct vm_area_struct *vma);
|
/linux-master/include/linux/ |
H A D | mman.h | 116 * This is called from mmap() and mprotect() with the updated vma->vm_flags. 192 static inline bool map_deny_write_exec(struct vm_area_struct *vma, unsigned long vm_flags) argument 200 if (!(vma->vm_flags & VM_EXEC) && (vm_flags & VM_EXEC))
|
/linux-master/drivers/tee/ |
H A D | tee_shm.c | 437 static int tee_shm_fop_mmap(struct file *filp, struct vm_area_struct *vma) argument 440 size_t size = vma->vm_end - vma->vm_start; 447 if (vma->vm_pgoff + vma_pages(vma) > shm->size >> PAGE_SHIFT) 450 return remap_pfn_range(vma, vma->vm_start, shm->paddr >> PAGE_SHIFT, 451 size, vma->vm_page_prot);
|
/linux-master/drivers/gpu/drm/i915/gt/uc/ |
H A D | intel_guc.c | 260 offset = intel_guc_ggtt_offset(guc, log->vma) >> PAGE_SHIFT; 642 * @rsa_offset: rsa offset w.r.t ggtt base of huc vma 766 struct i915_vma *vma; local 789 vma = i915_vma_instance(obj, >->ggtt->vm, NULL); 790 if (IS_ERR(vma)) 793 flags = PIN_OFFSET_BIAS | i915_ggtt_pin_bias(vma); 794 ret = i915_ggtt_pin(vma, NULL, 0, flags); 796 vma = ERR_PTR(ret); 800 return i915_vma_make_unshrinkable(vma); 804 return vma; 822 struct i915_vma *vma; local [all...] |
H A D | intel_guc_slpc.c | 94 GEM_BUG_ON(!slpc->vma); 164 u32 offset = intel_guc_ggtt_offset(guc, slpc->vma); 250 GEM_BUG_ON(slpc->vma); 252 err = intel_guc_allocate_and_map_vma(guc, size, &slpc->vma, (void **)&slpc->vaddr); 317 u32 offset = intel_guc_ggtt_offset(guc, slpc->vma); 342 GEM_BUG_ON(!slpc->vma); 353 GEM_BUG_ON(!slpc->vma); 687 GEM_BUG_ON(!slpc->vma); 783 GEM_BUG_ON(!slpc->vma); 810 if (!slpc->vma) [all...] |
/linux-master/kernel/dma/ |
H A D | mapping.c | 492 * @vma: vm_area_struct describing requested user mapping 502 int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, argument 509 return dma_direct_mmap(dev, vma, cpu_addr, dma_addr, size, 513 return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs); 650 int dma_mmap_pages(struct device *dev, struct vm_area_struct *vma, argument 655 if (vma->vm_pgoff >= count || vma_pages(vma) > count - vma->vm_pgoff) 657 return remap_pfn_range(vma, vma 754 dma_mmap_noncontiguous(struct device *dev, struct vm_area_struct *vma, size_t size, struct sg_table *sgt) argument [all...] |
/linux-master/drivers/gpu/drm/i915/gem/selftests/ |
H A D | i915_gem_dmabuf.c | 155 struct i915_vma *vma; local 179 vma = i915_vma_instance(import_obj, ce->vm, NULL); 180 if (IS_ERR(vma)) { 181 err = PTR_ERR(vma); 185 err = i915_vma_pin(vma, 0, 0, PIN_USER); 189 err = igt_gpu_fill_dw(ce, vma, 0, 190 vma->size >> PAGE_SHIFT, 0xdeadbeaf); 191 i915_vma_unpin(vma);
|
/linux-master/drivers/gpu/drm/i915/gt/ |
H A D | selftest_hangcheck.c | 110 struct i915_vma *hws, *vma; local 135 vma = i915_vma_instance(h->obj, vm, NULL); 136 if (IS_ERR(vma)) { 138 return ERR_CAST(vma); 147 err = i915_vma_pin(vma, 0, 0, PIN_USER); 163 err = igt_vma_move_to_active_unlocked(vma, rq, 0); 184 *batch++ = lower_32_bits(i915_vma_offset(vma)); 185 *batch++ = upper_32_bits(i915_vma_offset(vma)); 198 *batch++ = lower_32_bits(i915_vma_offset(vma)); 211 *batch++ = lower_32_bits(i915_vma_offset(vma)); 1375 struct i915_vma *vma; member in struct:evict_vma [all...] |
H A D | intel_ggtt.c | 117 struct i915_vma *vma, *vn; local 134 list_for_each_entry_safe(vma, vn, &vm->bound_list, vm_link) { 135 struct drm_i915_gem_object *obj = vma->obj; 137 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); 139 if (i915_vma_is_pinned(vma) || !i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND)) 153 GEM_WARN_ON(i915_vma_unbind(vma)); 161 if (!i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND)) { 162 i915_vma_wait_for_bind(vma); 164 __i915_vma_evict(vma, fals 1035 struct i915_vma *vma, *vn; local 1558 struct i915_vma *vma; local [all...] |
/linux-master/mm/ |
H A D | migrate.c | 184 struct vm_area_struct *vma, unsigned long addr, void *old) 186 DEFINE_FOLIO_VMA_WALK(pvmw, old, vma, addr, PVMW_SYNC | PVMW_MIGRATION); 198 idx = linear_page_index(vma, pvmw.address) - pvmw.pgoff; 212 pte = mk_pte(new, READ_ONCE(vma->vm_page_prot)); 226 pte = pte_mkwrite(pte, vma); 249 struct hstate *h = hstate_vma(vma); 253 pte = arch_make_huge_pte(pte, shift, vma->vm_flags); 255 hugetlb_add_anon_rmap(folio, vma, pvmw.address, 259 set_huge_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte, 265 folio_add_anon_rmap_pte(folio, new, vma, 183 remove_migration_pte(struct folio *folio, struct vm_area_struct *vma, unsigned long addr, void *old) argument 341 migration_entry_wait_huge(struct vm_area_struct *vma, pte_t *ptep) argument 2109 struct vm_area_struct *vma; local 2316 struct vm_area_struct *vma; local 2583 migrate_misplaced_folio(struct folio *folio, struct vm_area_struct *vma, int node) argument [all...] |
H A D | memory-failure.c | 393 static unsigned long dev_pagemap_mapping_shift(struct vm_area_struct *vma, argument 404 VM_BUG_ON_VMA(address == -EFAULT, vma); 405 pgd = pgd_offset(vma->vm_mm, address); 441 struct vm_area_struct *vma, struct list_head *to_kill, 454 tk->size_shift = dev_pagemap_mapping_shift(vma, tk->addr); 482 struct vm_area_struct *vma, struct list_head *to_kill, 487 __add_to_kill(tsk, p, vma, to_kill, addr); 505 struct vm_area_struct *vma, struct list_head *to_kill, 509 __add_to_kill(tsk, p, vma, to_kill, addr); 622 struct vm_area_struct *vma; local 440 __add_to_kill(struct task_struct *tsk, struct page *p, struct vm_area_struct *vma, struct list_head *to_kill, unsigned long addr) argument 481 add_to_kill_anon_file(struct task_struct *tsk, struct page *p, struct vm_area_struct *vma, struct list_head *to_kill, unsigned long addr) argument 504 add_to_kill_ksm(struct task_struct *tsk, struct page *p, struct vm_area_struct *vma, struct list_head *to_kill, unsigned long addr) argument 648 struct vm_area_struct *vma; local 682 add_to_kill_fsdax(struct task_struct *tsk, struct page *p, struct vm_area_struct *vma, struct list_head *to_kill, pgoff_t pgoff) argument 697 struct vm_area_struct *vma; local [all...] |
/linux-master/drivers/media/common/videobuf2/ |
H A D | videobuf2-dma-contig.c | 274 static int vb2_dc_mmap(void *buf_priv, struct vm_area_struct *vma) argument 285 ret = dma_mmap_noncontiguous(buf->dev, vma, buf->size, 288 ret = dma_mmap_attrs(buf->dev, vma, buf->cookie, buf->dma_addr, 295 vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP); 296 vma->vm_private_data = &buf->handler; 297 vma->vm_ops = &vb2_common_vm_ops; 299 vma->vm_ops->open(vma); 302 __func__, (unsigned long)buf->dma_addr, vma->vm_start, 456 struct vm_area_struct *vma) 455 vb2_dc_dmabuf_ops_mmap(struct dma_buf *dbuf, struct vm_area_struct *vma) argument [all...] |
/linux-master/drivers/gpu/drm/amd/amdkfd/ |
H A D | kfd_events.c | 1054 int kfd_event_mmap(struct kfd_process *p, struct vm_area_struct *vma) argument 1062 get_order(vma->vm_end - vma->vm_start)) { 1077 vm_flags_set(vma, VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_NORESERVE 1081 pr_debug(" start user address == 0x%08lx\n", vma->vm_start); 1082 pr_debug(" end user address == 0x%08lx\n", vma->vm_end); 1084 pr_debug(" vm_flags == 0x%08lX\n", vma->vm_flags); 1086 vma->vm_end - vma->vm_start); 1088 page->user_address = (uint64_t __user *)vma [all...] |
/linux-master/fs/cramfs/ |
H A D | inode.c | 346 static int cramfs_physmem_mmap(struct file *file, struct vm_area_struct *vma) argument 351 unsigned long address, pgoff = vma->vm_pgoff; 355 ret = generic_file_readonly_mmap(file, vma); 360 * Now try to pre-populate ptes for this vma with a direct 365 bailout_reason = "vma is writable"; 366 if (vma->vm_flags & VM_WRITE) 373 pages = min(vma_pages(vma), max_pages - pgoff); 395 if (pages == vma_pages(vma)) { 397 * The entire vma is mappable. remap_pfn_range() will 402 ret = remap_pfn_range(vma, vm 438 cramfs_physmem_mmap(struct file *file, struct vm_area_struct *vma) argument [all...] |
/linux-master/arch/s390/include/asm/ |
H A D | pgtable.h | 50 #define update_mmu_cache(vma, address, ptep) do { } while (0) 51 #define update_mmu_cache_range(vmf, vma, addr, ptep, nr) do { } while (0) 52 #define update_mmu_cache_pmd(vma, address, ptep) do { } while (0) 1145 static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, argument 1150 pte = ptep_xchg_direct(vma->vm_mm, addr, ptep, pte_mkold(pte)); 1155 static inline int ptep_clear_flush_young(struct vm_area_struct *vma, argument 1158 return ptep_test_and_clear_young(vma, address, ptep); 1180 static inline pte_t ptep_clear_flush(struct vm_area_struct *vma, argument 1185 res = ptep_xchg_direct(vma->vm_mm, addr, ptep, __pte(_PAGE_INVALID)); 1187 if (mm_is_protected(vma 1257 flush_tlb_fix_spurious_fault(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) argument 1280 ptep_set_access_flags(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep, pte_t entry, int dirty) argument 1711 pmdp_set_access_flags(struct vm_area_struct *vma, unsigned long addr, pmd_t *pmdp, pmd_t entry, int dirty) argument 1727 pmdp_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pmd_t *pmdp) argument 1737 pmdp_clear_flush_young(struct vm_area_struct *vma, unsigned long addr, pmd_t *pmdp) argument 1767 pmdp_huge_get_and_clear_full(struct vm_area_struct *vma, unsigned long addr, pmd_t *pmdp, int full) argument 1780 pmdp_huge_clear_flush(struct vm_area_struct *vma, unsigned long addr, pmd_t *pmdp) argument 1787 pmdp_invalidate(struct vm_area_struct *vma, unsigned long addr, pmd_t *pmdp) argument 1807 pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp) argument [all...] |
/linux-master/drivers/vhost/ |
H A D | vdpa.c | 1042 struct vm_area_struct *vma; local 1048 vma = find_vma(dev->mm, uaddr); 1049 if (!vma) { 1053 map_size = min(size, vma->vm_end - uaddr); 1054 if (!(vma->vm_file && (vma->vm_flags & VM_SHARED) && 1055 !(vma->vm_flags & (VM_IO | VM_PFNMAP)))) 1063 offset = (vma->vm_pgoff << PAGE_SHIFT) + uaddr - vma->vm_start; 1065 map_file->file = get_file(vma 1481 struct vm_area_struct *vma = vmf->vma; local 1499 vhost_vdpa_mmap(struct file *file, struct vm_area_struct *vma) argument [all...] |
/linux-master/drivers/gpu/drm/nouveau/nvkm/core/ |
H A D | gpuobj.c | 47 struct nvkm_vmm *vmm, struct nvkm_vma *vma, 50 return nvkm_memory_map(gpuobj->memory, offset, vmm, vma, argv, argc); 109 struct nvkm_vmm *vmm, struct nvkm_vma *vma, 113 vmm, vma, argv, argc); 46 nvkm_gpuobj_heap_map(struct nvkm_gpuobj *gpuobj, u64 offset, struct nvkm_vmm *vmm, struct nvkm_vma *vma, void *argv, u32 argc) argument 108 nvkm_gpuobj_map(struct nvkm_gpuobj *gpuobj, u64 offset, struct nvkm_vmm *vmm, struct nvkm_vma *vma, void *argv, u32 argc) argument
|
/linux-master/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ |
H A D | mem.c | 71 struct nvkm_vma *vma, void *argv, u32 argc) 79 return nvkm_vmm_map(vmm, vma, argv, argc, &map); 111 struct nvkm_vma *vma, void *argv, u32 argc) 119 return nvkm_vmm_map(vmm, vma, argv, argc, &map); 70 nvkm_mem_map_dma(struct nvkm_memory *memory, u64 offset, struct nvkm_vmm *vmm, struct nvkm_vma *vma, void *argv, u32 argc) argument 110 nvkm_mem_map_sgl(struct nvkm_memory *memory, u64 offset, struct nvkm_vmm *vmm, struct nvkm_vma *vma, void *argv, u32 argc) argument
|
/linux-master/drivers/gpu/drm/lima/ |
H A D | lima_gem.h | 45 void lima_set_vma_flags(struct vm_area_struct *vma);
|
/linux-master/arch/nios2/mm/ |
H A D | tlb.c | 102 void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, argument 105 unsigned long mmu_pid = get_pid_from_context(&vma->vm_mm->context); 113 void reload_tlb_page(struct vm_area_struct *vma, unsigned long addr, pte_t pte) argument 115 unsigned long mmu_pid = get_pid_from_context(&vma->vm_mm->context);
|