Lines Matching defs:obj

116 int i915_gem_object_unbind(struct drm_i915_gem_object *obj,
119 struct intel_runtime_pm *rpm = &to_i915(obj->base.dev)->runtime_pm;
126 assert_object_held(obj);
128 if (list_empty(&obj->vma.list))
141 spin_lock(&obj->vma.lock);
142 while (!ret && (vma = list_first_entry_or_null(&obj->vma.list,
165 spin_unlock(&obj->vma.lock);
175 assert_object_held(vma->obj);
192 spin_lock(&obj->vma.lock);
194 list_splice_init(&still_in_list, &obj->vma.list);
195 spin_unlock(&obj->vma.lock);
227 i915_gem_shmem_pread(struct drm_i915_gem_object *obj,
237 ret = i915_gem_object_lock_interruptible(obj, NULL);
241 ret = i915_gem_object_pin_pages(obj);
245 ret = i915_gem_object_prepare_read(obj, &needs_clflush);
249 i915_gem_object_finish_access(obj);
250 i915_gem_object_unlock(obj);
256 struct page *page = i915_gem_object_get_page(obj, idx);
269 i915_gem_object_unpin_pages(obj);
273 i915_gem_object_unpin_pages(obj);
275 i915_gem_object_unlock(obj);
303 static struct i915_vma *i915_gem_gtt_prepare(struct drm_i915_gem_object *obj,
307 struct drm_i915_private *i915 = to_i915(obj->base.dev);
316 ret = i915_gem_object_lock(obj, &ww);
320 ret = i915_gem_object_set_to_gtt_domain(obj, write);
324 if (!i915_gem_object_is_tiled(obj))
325 vma = i915_gem_object_ggtt_pin_ww(obj, &ww, NULL, 0, 0,
343 ret = i915_gem_object_pin_pages(obj);
364 static void i915_gem_gtt_cleanup(struct drm_i915_gem_object *obj,
368 struct drm_i915_private *i915 = to_i915(obj->base.dev);
371 i915_gem_object_unpin_pages(obj);
381 i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
384 struct drm_i915_private *i915 = to_i915(obj->base.dev);
399 vma = i915_gem_gtt_prepare(obj, &node, false);
422 i915_gem_object_get_dma_address(obj,
442 i915_gem_gtt_cleanup(obj, &node, vma);
462 struct drm_i915_gem_object *obj;
478 obj = i915_gem_object_lookup(file, args->handle);
479 if (!obj)
483 if (range_overflows_t(u64, args->offset, args->size, obj->base.size)) {
488 trace_i915_gem_object_pread(obj, args->offset, args->size);
490 if (obj->ops->pread)
491 ret = obj->ops->pread(obj, args);
495 ret = i915_gem_object_wait(obj,
501 ret = i915_gem_shmem_pread(obj, args);
503 ret = i915_gem_gtt_pread(obj, args);
506 i915_gem_object_put(obj);
540 * @obj: i915 GEM object
544 i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
547 struct drm_i915_private *i915 = to_i915(obj->base.dev);
561 if (i915_gem_object_has_struct_page(obj)) {
577 vma = i915_gem_gtt_prepare(obj, &node, true);
583 i915_gem_object_invalidate_frontbuffer(obj, ORIGIN_CPU);
603 i915_gem_object_get_dma_address(obj,
630 i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
632 i915_gem_gtt_cleanup(obj, &node, vma);
666 i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj,
677 ret = i915_gem_object_lock_interruptible(obj, NULL);
681 ret = i915_gem_object_pin_pages(obj);
685 ret = i915_gem_object_prepare_write(obj, &needs_clflush);
689 i915_gem_object_finish_access(obj);
690 i915_gem_object_unlock(obj);
704 struct page *page = i915_gem_object_get_page(obj, idx);
718 i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
720 i915_gem_object_unpin_pages(obj);
724 i915_gem_object_unpin_pages(obj);
726 i915_gem_object_unlock(obj);
744 struct drm_i915_gem_object *obj;
759 obj = i915_gem_object_lookup(file, args->handle);
760 if (!obj)
764 if (range_overflows_t(u64, args->offset, args->size, obj->base.size)) {
770 if (i915_gem_object_is_readonly(obj)) {
775 trace_i915_gem_object_pwrite(obj, args->offset, args->size);
778 if (obj->ops->pwrite)
779 ret = obj->ops->pwrite(obj, args);
783 ret = i915_gem_object_wait(obj,
797 if (!i915_gem_object_has_struct_page(obj) ||
798 i915_gem_cpu_write_needs_clflush(obj))
803 ret = i915_gem_gtt_pwrite_fast(obj, args);
806 if (i915_gem_object_has_struct_page(obj))
807 ret = i915_gem_shmem_pwrite(obj, args);
811 i915_gem_object_put(obj);
826 struct drm_i915_gem_object *obj;
828 obj = i915_gem_object_lookup(file, args->handle);
829 if (!obj)
838 i915_gem_object_flush_if_display(obj);
839 i915_gem_object_put(obj);
846 struct drm_i915_gem_object *obj, *on;
856 list_for_each_entry_safe(obj, on,
858 __i915_gem_object_release_mmap_gtt(obj);
860 list_for_each_entry_safe(obj, on,
862 i915_gem_object_runtime_pm_release_mmap_offset(obj);
894 struct drm_i915_gem_object *obj = vma->obj;
896 spin_lock(&obj->vma.lock);
898 rb_erase(&vma->obj_node, &obj->vma.tree);
901 spin_unlock(&obj->vma.lock);
905 i915_gem_object_ggtt_pin_ww(struct drm_i915_gem_object *obj,
910 struct drm_i915_private *i915 = to_i915(obj->base.dev);
927 if (obj->base.size > ggtt->mappable_end)
947 obj->base.size > ggtt->mappable_end / 2)
952 vma = i915_vma_instance(obj, &ggtt->vm, view);
992 if (vma->fence && !i915_gem_object_is_tiled(obj)) {
1008 i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
1017 err = i915_gem_object_lock(obj, &ww);
1021 ret = i915_gem_object_ggtt_pin_ww(obj, &ww, view, size,
1036 struct drm_i915_gem_object *obj;
1047 obj = i915_gem_object_lookup(file_priv, args->handle);
1048 if (!obj)
1051 err = i915_gem_object_lock_interruptible(obj, NULL);
1055 if (i915_gem_object_has_pages(obj) &&
1056 i915_gem_object_is_tiled(obj) &&
1058 if (obj->mm.madv == I915_MADV_WILLNEED) {
1059 GEM_BUG_ON(!i915_gem_object_has_tiling_quirk(obj));
1060 i915_gem_object_clear_tiling_quirk(obj);
1061 i915_gem_object_make_shrinkable(obj);
1064 GEM_BUG_ON(i915_gem_object_has_tiling_quirk(obj));
1065 i915_gem_object_make_unshrinkable(obj);
1066 i915_gem_object_set_tiling_quirk(obj);
1070 if (obj->mm.madv != __I915_MADV_PURGED) {
1071 obj->mm.madv = args->madv;
1072 if (obj->ops->adjust_lru)
1073 obj->ops->adjust_lru(obj);
1076 if (i915_gem_object_has_pages(obj) ||
1077 i915_gem_object_has_self_managed_shrink_list(obj)) {
1081 if (!list_empty(&obj->mm.link)) {
1084 if (obj->mm.madv != I915_MADV_WILLNEED)
1088 list_move_tail(&obj->mm.link, list);
1095 if (obj->mm.madv == I915_MADV_DONTNEED &&
1096 !i915_gem_object_has_pages(obj))
1097 i915_gem_object_truncate(obj);
1099 args->retained = obj->mm.madv != __I915_MADV_PURGED;
1101 i915_gem_object_unlock(obj);
1103 i915_gem_object_put(obj);