Lines Matching defs:obj

66 	struct drm_i915_gem_object *obj;
82 obj = i915_gem_object_lookup(file, args->handle);
83 if (!obj)
89 if (!obj->base.filp) {
94 if (range_overflows(args->offset, args->size, (u64)obj->base.size)) {
99 addr = vm_mmap(obj->base.filp, 0, args->size,
114 if (vma && __vma_matches(vma, obj->base.filp, addr, args->size))
123 i915_gem_object_put(obj);
129 i915_gem_object_put(obj);
133 static unsigned int tile_row_pages(const struct drm_i915_gem_object *obj)
135 return i915_gem_object_get_tile_row_size(obj) >> PAGE_SHIFT;
198 compute_partial_view(const struct drm_i915_gem_object *obj,
204 if (i915_gem_object_is_tiled(obj))
205 chunk = roundup(chunk, tile_row_pages(obj) ?: 1);
211 (obj->base.size >> PAGE_SHIFT) - view.partial.offset);
214 if (chunk >= obj->base.size >> PAGE_SHIFT)
254 struct drm_i915_gem_object *obj = mmo->obj;
259 if (unlikely(i915_gem_object_is_readonly(obj) &&
263 if (i915_gem_object_lock_interruptible(obj, NULL))
266 err = i915_gem_object_pin_pages(obj);
271 if (!i915_gem_object_has_struct_page(obj)) {
272 iomap = obj->mm.region->iomap.base;
273 iomap -= obj->mm.region->region.start;
276 /* PTEs are revoked in obj->ops->put_pages() */
279 obj->mm.pages->sgl, iomap);
282 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
283 obj->mm.dirty = true;
286 i915_gem_object_unpin_pages(obj);
289 i915_gem_object_unlock(obj);
298 struct drm_i915_gem_object *obj = mmo->obj;
299 struct drm_device *dev = obj->base.dev;
314 trace_i915_gem_object_fault(obj, page_offset, true, write);
320 ret = i915_gem_object_lock(obj, &ww);
325 if (i915_gem_object_is_readonly(obj) && write) {
330 ret = i915_gem_object_pin_pages(obj);
339 vma = i915_gem_object_ggtt_pin_ww(obj, &ww, NULL, 0, 0,
346 compute_partial_view(obj, page_offset, MIN_CHUNK_PAGES);
358 vma = i915_gem_object_ggtt_pin_ww(obj, &ww, &view, 0, 0, flags);
362 vma = i915_gem_object_ggtt_pin_ww(obj, &ww, &view, 0, 0, flags);
378 vma = i915_gem_object_ggtt_pin_ww(obj, &ww, &view, 0, 0, flags);
395 if (!(i915_gem_object_has_cache_level(obj, I915_CACHE_NONE) ||
418 if (!i915_vma_set_userfault(vma) && !obj->userfault_count++)
419 list_add(&obj->userfault_link, &to_gt(i915)->ggtt->userfault_list);
430 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
432 obj->mm.dirty = true;
442 i915_gem_object_unpin_pages(obj);
459 struct drm_i915_gem_object *obj = mmo->obj;
464 if (i915_gem_object_is_readonly(obj) && write)
468 if (range_overflows_t(u64, addr, len, obj->base.size))
473 err = i915_gem_object_lock(obj, &ww);
478 vaddr = i915_gem_object_pin_map(obj, I915_MAP_FORCE_WC);
486 __i915_gem_object_flush_map(obj, addr, len);
491 i915_gem_object_unpin_map(obj);
506 void __i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj)
510 GEM_BUG_ON(!obj->userfault_count);
512 for_each_ggtt_vma(vma, obj)
515 GEM_BUG_ON(obj->userfault_count);
526 void i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj)
528 struct drm_i915_private *i915 = to_i915(obj->base.dev);
543 if (!obj->userfault_count)
546 __i915_gem_object_release_mmap_gtt(obj);
563 void i915_gem_object_runtime_pm_release_mmap_offset(struct drm_i915_gem_object *obj)
565 struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
574 GEM_BUG_ON(!obj->userfault_count);
575 list_del(&obj->userfault_link);
576 obj->userfault_count = 0;
579 void i915_gem_object_release_mmap_offset(struct drm_i915_gem_object *obj)
583 if (obj->ops->unmap_virtual)
584 obj->ops->unmap_virtual(obj);
586 spin_lock(&obj->mmo.lock);
588 &obj->mmo.offsets, offset) {
596 spin_unlock(&obj->mmo.lock);
598 obj->base.dev->anon_inode->i_mapping);
599 spin_lock(&obj->mmo.lock);
601 spin_unlock(&obj->mmo.lock);
605 lookup_mmo(struct drm_i915_gem_object *obj,
610 spin_lock(&obj->mmo.lock);
611 rb = obj->mmo.offsets.rb_node;
617 spin_unlock(&obj->mmo.lock);
626 spin_unlock(&obj->mmo.lock);
632 insert_mmo(struct drm_i915_gem_object *obj, struct i915_mmap_offset *mmo)
636 spin_lock(&obj->mmo.lock);
638 p = &obj->mmo.offsets.rb_node;
646 spin_unlock(&obj->mmo.lock);
647 drm_vma_offset_remove(obj->base.dev->vma_offset_manager,
659 rb_insert_color(&mmo->offset, &obj->mmo.offsets);
660 spin_unlock(&obj->mmo.lock);
666 mmap_offset_attach(struct drm_i915_gem_object *obj,
670 struct drm_i915_private *i915 = to_i915(obj->base.dev);
674 GEM_BUG_ON(obj->ops->mmap_offset || obj->ops->mmap_ops);
676 mmo = lookup_mmo(obj, mmap_type);
684 mmo->obj = obj;
688 err = drm_vma_offset_add(obj->base.dev->vma_offset_manager,
689 &mmo->vma_node, obj->base.size / PAGE_SIZE);
700 err = drm_vma_offset_add(obj->base.dev->vma_offset_manager,
701 &mmo->vma_node, obj->base.size / PAGE_SIZE);
706 mmo = insert_mmo(obj, mmo);
707 GEM_BUG_ON(lookup_mmo(obj, mmap_type) != mmo);
719 __assign_mmap_offset(struct drm_i915_gem_object *obj,
725 if (i915_gem_object_never_mmap(obj))
728 if (obj->ops->mmap_offset) {
732 *offset = obj->ops->mmap_offset(obj);
740 !i915_gem_object_has_struct_page(obj) &&
741 !i915_gem_object_has_iomem(obj))
744 mmo = mmap_offset_attach(obj, mmap_type, file);
758 struct drm_i915_gem_object *obj;
761 obj = i915_gem_object_lookup(file, handle);
762 if (!obj)
765 err = i915_gem_object_lock_interruptible(obj, NULL);
768 err = __assign_mmap_offset(obj, mmap_type, offset, file);
769 i915_gem_object_unlock(obj);
771 i915_gem_object_put(obj);
871 struct drm_i915_gem_object *obj = mmo->obj;
873 GEM_BUG_ON(!obj);
874 i915_gem_object_get(obj);
880 struct drm_i915_gem_object *obj = mmo->obj;
882 GEM_BUG_ON(!obj);
883 i915_gem_object_put(obj);
937 i915_gem_object_mmap(struct drm_i915_gem_object *obj,
941 struct drm_i915_private *i915 = to_i915(obj->base.dev);
945 if (i915_gem_object_is_readonly(obj)) {
947 i915_gem_object_put(obj);
955 i915_gem_object_put(obj);
962 * We keep the ref on mmo->obj, not vm_file, but we require
973 if (obj->ops->mmap_ops) {
975 vma->vm_ops = obj->ops->mmap_ops;
976 vma->vm_private_data = obj->base.vma_node.driver_private;
1025 struct drm_i915_gem_object *obj = NULL;
1044 obj = i915_gem_object_get_rcu(mmo->obj);
1046 GEM_BUG_ON(obj && obj->ops->mmap_ops);
1048 obj = i915_gem_object_get_rcu
1052 GEM_BUG_ON(obj && !obj->ops->mmap_ops);
1057 if (!obj)
1060 return i915_gem_object_mmap(obj, mmo, vma);
1063 int i915_gem_fb_mmap(struct drm_i915_gem_object *obj, struct vm_area_struct *vma)
1065 struct drm_i915_private *i915 = to_i915(obj->base.dev);
1075 if (obj->ops->mmap_ops) {
1080 vma->vm_pgoff += drm_vma_node_start(&obj->base.vma_node);
1084 mmo = mmap_offset_attach(obj, mmap_type, NULL);
1092 * this obj and then it gets decreased by the vm_ops->close().
1093 * To balance this increase the obj ref_count here.
1095 obj = i915_gem_object_get(obj);
1096 return i915_gem_object_mmap(obj, mmo, vma);