Lines Matching refs:vma

289 	struct vm_area_struct *vma = vmf->vma;
290 struct drm_gem_object *obj = vma->vm_private_data;
321 pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
328 ret = vmf_insert_pfn(vma, vmf->address, pfn);
369 struct msm_gem_vma *vma;
373 vma = msm_gem_vma_new(aspace);
374 if (!vma)
377 list_add_tail(&vma->list, &msm_obj->vmas);
379 return vma;
386 struct msm_gem_vma *vma;
390 list_for_each_entry(vma, &msm_obj->vmas, list) {
391 if (vma->aspace == aspace)
392 return vma;
398 static void del_vma(struct msm_gem_vma *vma)
400 if (!vma)
403 list_del(&vma->list);
404 kfree(vma);
417 struct msm_gem_vma *vma;
421 list_for_each_entry(vma, &msm_obj->vmas, list) {
422 if (vma->aspace) {
423 msm_gem_vma_purge(vma);
425 msm_gem_vma_close(vma);
435 struct msm_gem_vma *vma, *tmp;
439 list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) {
440 del_vma(vma);
448 struct msm_gem_vma *vma;
452 vma = lookup_vma(obj, aspace);
454 if (!vma) {
457 vma = add_vma(obj, aspace);
458 if (IS_ERR(vma))
459 return vma;
461 ret = msm_gem_vma_init(vma, obj->size,
464 del_vma(vma);
468 GEM_WARN_ON(vma->iova < range_start);
469 GEM_WARN_ON((vma->iova + obj->size) > range_end);
472 return vma;
475 int msm_gem_pin_vma_locked(struct drm_gem_object *obj, struct msm_gem_vma *vma)
496 return msm_gem_vma_map(vma, prot, msm_obj->sgt, obj->size);
538 struct msm_gem_vma *vma;
543 vma = get_vma_locked(obj, aspace, range_start, range_end);
544 if (IS_ERR(vma))
545 return PTR_ERR(vma);
547 ret = msm_gem_pin_vma_locked(obj, vma);
549 *iova = vma->iova;
587 struct msm_gem_vma *vma;
591 vma = get_vma_locked(obj, aspace, 0, U64_MAX);
592 if (IS_ERR(vma)) {
593 ret = PTR_ERR(vma);
595 *iova = vma->iova;
605 struct msm_gem_vma *vma = lookup_vma(obj, aspace);
607 if (!vma)
610 msm_gem_vma_purge(vma);
611 msm_gem_vma_close(vma);
612 del_vma(vma);
622 * Setting an iova of zero will clear the vma.
633 struct msm_gem_vma *vma;
634 vma = get_vma_locked(obj, aspace, iova, iova + obj->size);
635 if (IS_ERR(vma)) {
636 ret = PTR_ERR(vma);
637 } else if (GEM_WARN_ON(vma->iova != iova)) {
655 struct msm_gem_vma *vma;
658 vma = lookup_vma(obj, aspace);
659 if (!GEM_WARN_ON(!vma)) {
927 struct msm_gem_vma *vma;
974 list_for_each_entry(vma, &msm_obj->vmas, list) {
976 if (vma->aspace) {
977 struct msm_gem_address_space *aspace = vma->aspace;
992 vma->aspace, vma->iova,
993 vma->mapped ? "mapped" : "unmapped");
1065 static int msm_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
1069 vm_flags_set(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
1070 vma->vm_page_prot = msm_gem_pgprot(msm_obj, vm_get_page_prot(vma->vm_flags));
1199 struct msm_gem_vma *vma;
1206 vma = add_vma(obj, NULL);
1208 if (IS_ERR(vma)) {
1209 ret = PTR_ERR(vma);
1213 to_msm_bo(obj)->vram_node = &vma->node;
1223 vma->iova = physaddr(obj);