Lines Matching refs:vma

30 __vma_matches(struct vm_area_struct *vma, struct file *filp,
33 if (vma->vm_file != filp)
36 return vma->vm_start == addr &&
37 (vma->vm_end - vma->vm_start) == PAGE_ALIGN(size);
107 struct vm_area_struct *vma;
113 vma = find_vma(mm, addr);
114 if (vma && __vma_matches(vma, obj->base.filp, addr, args->size))
115 vma->vm_page_prot =
116 pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
252 struct vm_area_struct *area = vmf->vma;
296 struct vm_area_struct *area = vmf->vma;
306 struct i915_vma *vma;
339 vma = i915_gem_object_ggtt_pin_ww(obj, &ww, NULL, 0, 0,
343 if (IS_ERR(vma) && vma != ERR_PTR(-EDEADLK)) {
358 vma = i915_gem_object_ggtt_pin_ww(obj, &ww, &view, 0, 0, flags);
359 if (IS_ERR(vma) && vma != ERR_PTR(-EDEADLK)) {
362 vma = i915_gem_object_ggtt_pin_ww(obj, &ww, &view, 0, 0, flags);
370 if (vma == ERR_PTR(-ENOSPC)) {
378 vma = i915_gem_object_ggtt_pin_ww(obj, &ww, &view, 0, 0, flags);
381 if (IS_ERR(vma)) {
382 ret = PTR_ERR(vma);
401 ret = i915_vma_pin_fence(vma);
407 area->vm_start + (vma->gtt_view.partial.offset << PAGE_SHIFT),
408 (ggtt->gmadr.start + i915_ggtt_offset(vma)) >> PAGE_SHIFT,
409 min_t(u64, vma->size, area->vm_end - area->vm_start),
418 if (!i915_vma_set_userfault(vma) && !obj->userfault_count++)
422 /* Track the mmo associated with the fenced vma */
423 vma->mmo = mmo;
431 i915_vma_set_ggtt_write(vma);
436 i915_vma_unpin_fence(vma);
438 __i915_vma_unpin(vma);
508 struct i915_vma *vma;
512 for_each_ggtt_vma(vma, obj)
513 i915_vma_revoke_mmap(vma);
868 static void vm_open(struct vm_area_struct *vma)
870 struct i915_mmap_offset *mmo = vma->vm_private_data;
877 static void vm_close(struct vm_area_struct *vma)
879 struct i915_mmap_offset *mmo = vma->vm_private_data;
939 struct vm_area_struct *vma)
946 if (vma->vm_flags & VM_WRITE) {
950 vm_flags_clear(vma, VM_MAYWRITE);
959 vm_flags_set(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP | VM_IO);
963 * vma->vm_file->f_mapping, see vma_link(), for later revocation.
969 vma_set_file(vma, anon);
970 /* Drop the initial creation reference, the vma is now holding one. */
974 vma->vm_page_prot = pgprot_decrypted(vm_get_page_prot(vma->vm_flags));
975 vma->vm_ops = obj->ops->mmap_ops;
976 vma->vm_private_data = obj->base.vma_node.driver_private;
980 vma->vm_private_data = mmo;
984 vma->vm_page_prot =
985 pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
986 vma->vm_ops = &vm_ops_cpu;
993 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
994 vma->vm_ops = &vm_ops_cpu;
998 vma->vm_page_prot =
999 pgprot_noncached(vm_get_page_prot(vma->vm_flags));
1000 vma->vm_ops = &vm_ops_cpu;
1004 vma->vm_page_prot =
1005 pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
1006 vma->vm_ops = &vm_ops_gtt;
1009 vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
1016 * drm_gem_object as the vma->vm_private_data. Since we need to
1020 int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma)
1034 vma->vm_pgoff,
1035 vma_pages(vma));
1039 * destroyed and will be invalid when the vma manager lock
1060 return i915_gem_object_mmap(obj, mmo, vma);
1063 int i915_gem_fb_mmap(struct drm_i915_gem_object *obj, struct vm_area_struct *vma)
1080 vma->vm_pgoff += drm_vma_node_start(&obj->base.vma_node);
1096 return i915_gem_object_mmap(obj, mmo, vma);