/linux-master/include/drm/ |
H A D | drm_gem.h | 178 * Handle mmap() of the gem object, setup vma accordingly. 184 * used, the @mmap callback must set vma->vm_ops instead. 186 int (*mmap)(struct drm_gem_object *obj, struct vm_area_struct *vma); 478 void drm_gem_vm_open(struct vm_area_struct *vma); 479 void drm_gem_vm_close(struct vm_area_struct *vma); 481 struct vm_area_struct *vma); 482 int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
|
H A D | drm_gem_shmem_helper.h | 109 int drm_gem_shmem_mmap(struct drm_gem_shmem_object *shmem, struct vm_area_struct *vma); 251 * @vma: VMA for the area to be mapped 259 static inline int drm_gem_shmem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) argument 263 return drm_gem_shmem_mmap(shmem, vma);
|
/linux-master/arch/powerpc/include/asm/nohash/ |
H A D | pgtable.h | 72 static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, argument 77 old = pte_update(vma->vm_mm, addr, ptep, _PAGE_ACCESSED, 0, 0); 106 static inline void __ptep_set_access_flags(struct vm_area_struct *vma, argument 115 pte_update(vma->vm_mm, address, ptep, 0, set, huge); 117 flush_tlb_page(vma, address);
|
/linux-master/drivers/iommu/ |
H A D | iommu-sva.c | 197 struct vm_area_struct *vma; local 211 vma = vma_lookup(mm, prm->addr); 212 if (!vma) 232 if (access_flags & ~vma->vm_flags) 236 ret = handle_mm_fault(vma, prm->addr, fault_flags, NULL);
|
/linux-master/drivers/infiniband/core/ |
H A D | uverbs_main.c | 690 static int ib_uverbs_mmap(struct file *filp, struct vm_area_struct *vma) argument 703 vma->vm_ops = &rdma_umap_ops; 704 ret = ucontext->device->ops.mmap(ucontext, vma); 714 static void rdma_umap_open(struct vm_area_struct *vma) argument 716 struct ib_uverbs_file *ufile = vma->vm_file->private_data; 717 struct rdma_umap_priv *opriv = vma->vm_private_data; 735 rdma_umap_priv_init(priv, vma, opriv->entry); 748 vma->vm_private_data = NULL; 749 zap_vma_ptes(vma, vma 752 rdma_umap_close(struct vm_area_struct *vma) argument 863 struct vm_area_struct *vma = priv->vma; local [all...] |
/linux-master/fs/ |
H A D | dax.c | 357 struct vm_area_struct *vma, unsigned long address, bool shared) 365 index = linear_page_index(vma, address & ~(size - 1)); 856 struct vm_area_struct *vma) 858 return (iter->flags & IOMAP_WRITE) && (vma->vm_flags & VM_SYNC) && 873 struct address_space *mapping = vmf->vma->vm_file->f_mapping; 876 bool dirty = write && !dax_fault_is_synchronous(iter, vmf->vma); 898 dax_associate_entry(new_entry, mapping, vmf->vma, vmf->address, 931 struct vm_area_struct *vma; local 993 vma_interval_tree_foreach(vma, &mapping->i_mmap, index, end) { 994 pfn_mkclean_range(pfn, count, index, vma); 356 dax_associate_entry(void *entry, struct address_space *mapping, struct vm_area_struct *vma, unsigned long address, bool shared) argument 855 dax_fault_is_synchronous(const struct iomap_iter *iter, struct vm_area_struct *vma) argument 1207 struct vm_area_struct *vma = vmf->vma; local [all...] |
/linux-master/drivers/gpu/drm/i915/gt/ |
H A D | intel_lrc.c | 870 const u32 ggtt_offset = i915_ggtt_offset(wa_ctx->vma); 879 i915_ggtt_offset(wa_ctx->vma) + 1074 struct i915_vma *vma; local 1109 vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL); 1110 if (IS_ERR(vma)) { 1112 return vma; 1115 return vma; 1129 struct i915_vma *vma; local 1134 vma = __lrc_alloc_state(ce, engine); 1135 if (IS_ERR(vma)) 1804 struct i915_vma *vma; local [all...] |
/linux-master/arch/x86/include/asm/ |
H A D | mmu_context.h | 249 static inline bool arch_vma_access_permitted(struct vm_area_struct *vma, argument 256 if (foreign || vma_is_foreign(vma)) 258 return __pkru_allows_pkey(vma_pkey(vma), write);
|
/linux-master/include/linux/ |
H A D | migrate.h | 145 int migrate_misplaced_folio(struct folio *folio, struct vm_area_struct *vma, 149 struct vm_area_struct *vma, int node) 186 struct vm_area_struct *vma; member in struct:migrate_vma 148 migrate_misplaced_folio(struct folio *folio, struct vm_area_struct *vma, int node) argument
|
/linux-master/arch/powerpc/include/asm/nohash/32/ |
H A D | pte-8xx.h | 132 static inline void __ptep_set_access_flags(struct vm_area_struct *vma, pte_t *ptep, argument 139 pte_update(vma->vm_mm, address, ptep, clr, set, huge); 141 flush_tlb_page(vma, address);
|
/linux-master/drivers/misc/uacce/ |
H A D | uacce.c | 203 static void uacce_vma_close(struct vm_area_struct *vma) argument 205 struct uacce_queue *q = vma->vm_private_data; 207 if (vma->vm_pgoff < UACCE_MAX_REGION) { 208 struct uacce_qfile_region *qfr = q->qfrs[vma->vm_pgoff]; 211 q->qfrs[vma->vm_pgoff] = NULL; 221 static int uacce_fops_mmap(struct file *filep, struct vm_area_struct *vma) argument 229 if (vma->vm_pgoff < UACCE_MAX_REGION) 230 type = vma->vm_pgoff; 238 vm_flags_set(vma, VM_DONTCOPY | VM_DONTEXPAND | VM_WIPEONFORK); 239 vma [all...] |
/linux-master/mm/ |
H A D | swap.h | 46 struct vm_area_struct *vma, unsigned long addr); 51 struct vm_area_struct *vma, unsigned long addr, 106 struct vm_area_struct *vma, unsigned long addr) 105 swap_cache_get_folio(swp_entry_t entry, struct vm_area_struct *vma, unsigned long addr) argument
|
H A D | interval_tree.c | 63 return vma_start_pgoff(avc->vma); 68 return vma_last_pgoff(avc->vma);
|
/linux-master/arch/powerpc/kvm/ |
H A D | e500_mmu_host.c | 357 struct vm_area_struct *vma; local 360 vma = find_vma(kvm->mm, hva); 361 if (vma && hva >= vma->vm_start && 362 (vma->vm_flags & VM_PFNMAP)) { 367 * vma and the memslot. 375 start = vma->vm_pgoff; 377 vma_pages(vma); 379 pfn = start + ((hva - vma->vm_start) >> PAGE_SHIFT); 424 } else if (vma [all...] |
/linux-master/arch/parisc/mm/ |
H A D | hugetlbpage.c | 47 pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, argument 170 int huge_ptep_set_access_flags(struct vm_area_struct *vma, argument 175 struct mm_struct *mm = vma->vm_mm;
|
/linux-master/lib/ |
H A D | buildid.c | 119 * Parse build ID of ELF file mapped to vma 120 * @vma: vma object 126 int build_id_parse(struct vm_area_struct *vma, unsigned char *build_id, argument 135 if (!vma->vm_file) 138 page = find_get_page(vma->vm_file->f_mapping, 0);
|
H A D | test_hmm.c | 688 (!spage && args->vma->vm_flags & VM_WRITE)) 870 dpage = alloc_page_vma(GFP_HIGHUSER_MOVABLE, args->vma, addr); 906 struct vm_area_struct *vma; local 925 vma = vma_lookup(mm, addr); 926 if (!vma || !(vma->vm_flags & VM_READ)) { 931 if (next > vma->vm_end) 932 next = vma->vm_end; 934 args.vma = vma; 966 struct vm_area_struct *vma; local 1370 dmirror_fops_mmap(struct file *file, struct vm_area_struct *vma) argument [all...] |
/linux-master/drivers/gpu/drm/i915/gem/ |
H A D | i915_gem_dmabuf.c | 94 static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma) argument 100 if (obj->base.size < vma->vm_end - vma->vm_start) 104 return drm_gem_prime_mmap(&obj->base, vma); 109 ret = call_mmap(obj->base.filp, vma); 113 vma_set_file(vma, obj->base.filp);
|
H A D | i915_gem_userptr.c | 172 * just in case. However, if we set the vma as being read-only we know 404 struct vm_area_struct *vma; local 408 for_each_vma_range(vmi, vma, end) { 410 if (vma->vm_start > addr) 413 if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP)) 416 addr = vma->vm_end; 420 if (vma || addr < end)
|
/linux-master/sound/soc/loongson/ |
H A D | loongson_dma.c | 303 struct vm_area_struct *vma) 305 return remap_pfn_range(vma, vma->vm_start, 307 vma->vm_end - vma->vm_start, vma->vm_page_prot); 301 loongson_pcm_mmap(struct snd_soc_component *component, struct snd_pcm_substream *substream, struct vm_area_struct *vma) argument
|
/linux-master/drivers/virt/acrn/ |
H A D | mm.c | 166 struct vm_area_struct *vma; local 177 vma = vma_lookup(current->mm, memmap->vma_base); 178 if (vma && ((vma->vm_flags & VM_PFNMAP) != 0)) { 184 if ((memmap->vma_base + memmap->len) > vma->vm_end) { 190 ret = follow_pte(vma, memmap->vma_base + i * PAGE_SIZE,
|
/linux-master/fs/ext2/ |
H A D | file.c | 95 struct inode *inode = file_inode(vmf->vma->vm_file); 98 (vmf->vma->vm_flags & VM_SHARED); 102 file_update_time(vmf->vma->vm_file); 125 static int ext2_file_mmap(struct file *file, struct vm_area_struct *vma) argument 128 return generic_file_mmap(file, vma); 131 vma->vm_ops = &ext2_dax_vm_ops;
|
/linux-master/arch/sparc/kernel/ |
H A D | adi_64.c | 22 * further allocations for same vma. 125 struct vm_area_struct *vma, 131 /* Check if this vma already has tag storage descriptor 157 struct vm_area_struct *vma, 171 /* Check if this vma already has tag storage descriptor 227 /* Tag storage has not been allocated for this vma and space 231 * store tags for as many pages in this vma as possible but not 327 void adi_restore_tags(struct mm_struct *mm, struct vm_area_struct *vma, argument 338 tag_desc = find_tag_store(mm, vma, addr); 370 int adi_save_tags(struct mm_struct *mm, struct vm_area_struct *vma, argument 124 find_tag_store(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr) argument 156 alloc_tag_store(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr) argument [all...] |
/linux-master/arch/x86/kernel/cpu/sgx/ |
H A D | encl.h | 90 struct vm_area_struct **vma) 98 *vma = result; 89 sgx_encl_find(struct mm_struct *mm, unsigned long addr, struct vm_area_struct **vma) argument
|
/linux-master/drivers/gpu/drm/nouveau/nvkm/subdev/acr/ |
H A D | gm20b.c | 53 .code_dma_base = fw->vma->addr >> 8, 59 .data_dma_base = (fw->vma->addr + fw->dmem_base_img) >> 8,
|