Lines Matching defs:obj

23 static bool gpu_write_needs_clflush(struct drm_i915_gem_object *obj)
25 struct drm_i915_private *i915 = to_i915(obj->base.dev);
37 return !(i915_gem_object_has_cache_level(obj, I915_CACHE_NONE) ||
38 i915_gem_object_has_cache_level(obj, I915_CACHE_WT));
41 bool i915_gem_cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
43 struct drm_i915_private *i915 = to_i915(obj->base.dev);
45 if (obj->cache_dirty)
51 if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE))
55 return i915_gem_object_is_framebuffer(obj);
59 flush_write_domain(struct drm_i915_gem_object *obj, unsigned int flush_domains)
63 assert_object_held(obj);
65 if (!(obj->write_domain & flush_domains))
68 switch (obj->write_domain) {
70 spin_lock(&obj->vma.lock);
71 for_each_ggtt_vma(vma, obj)
73 spin_unlock(&obj->vma.lock);
75 i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
83 i915_gem_clflush_object(obj, I915_CLFLUSH_SYNC);
87 if (gpu_write_needs_clflush(obj))
88 obj->cache_dirty = true;
92 obj->write_domain = 0;
95 static void __i915_gem_object_flush_for_display(struct drm_i915_gem_object *obj)
101 flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
102 if (obj->cache_dirty)
103 i915_gem_clflush_object(obj, I915_CLFLUSH_FORCE);
104 obj->write_domain = 0;
107 void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj)
109 if (!i915_gem_object_is_framebuffer(obj))
112 i915_gem_object_lock(obj, NULL);
113 __i915_gem_object_flush_for_display(obj);
114 i915_gem_object_unlock(obj);
117 void i915_gem_object_flush_if_display_locked(struct drm_i915_gem_object *obj)
119 if (i915_gem_object_is_framebuffer(obj))
120 __i915_gem_object_flush_for_display(obj);
126 * @obj: object to act on
133 i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write)
137 assert_object_held(obj);
139 ret = i915_gem_object_wait(obj,
146 if (obj->write_domain == I915_GEM_DOMAIN_WC)
149 /* Flush and acquire obj->pages so that we are coherent through
152 * For example, if the obj->filp was moved to swap without us
154 * continue to assume that the obj remained out of the CPU cached
157 ret = i915_gem_object_pin_pages(obj);
161 flush_write_domain(obj, ~I915_GEM_DOMAIN_WC);
167 if ((obj->read_domains & I915_GEM_DOMAIN_WC) == 0)
173 GEM_BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_WC) != 0);
174 obj->read_domains |= I915_GEM_DOMAIN_WC;
176 obj->read_domains = I915_GEM_DOMAIN_WC;
177 obj->write_domain = I915_GEM_DOMAIN_WC;
178 obj->mm.dirty = true;
181 i915_gem_object_unpin_pages(obj);
188 * @obj: object to act on
195 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
199 assert_object_held(obj);
201 ret = i915_gem_object_wait(obj,
208 if (obj->write_domain == I915_GEM_DOMAIN_GTT)
211 /* Flush and acquire obj->pages so that we are coherent through
214 * For example, if the obj->filp was moved to swap without us
216 * continue to assume that the obj remained out of the CPU cached
219 ret = i915_gem_object_pin_pages(obj);
223 flush_write_domain(obj, ~I915_GEM_DOMAIN_GTT);
229 if ((obj->read_domains & I915_GEM_DOMAIN_GTT) == 0)
235 GEM_BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
236 obj->read_domains |= I915_GEM_DOMAIN_GTT;
240 obj->read_domains = I915_GEM_DOMAIN_GTT;
241 obj->write_domain = I915_GEM_DOMAIN_GTT;
242 obj->mm.dirty = true;
244 spin_lock(&obj->vma.lock);
245 for_each_ggtt_vma(vma, obj)
248 spin_unlock(&obj->vma.lock);
251 i915_gem_object_unpin_pages(obj);
257 * @obj: object to act on
270 int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
281 if (i915_gem_object_has_cache_level(obj, cache_level))
284 ret = i915_gem_object_wait(obj,
292 i915_gem_object_set_cache_coherency(obj, cache_level);
293 obj->cache_dirty = true;
296 return i915_gem_object_unbind(obj,
305 struct drm_i915_gem_object *obj;
312 obj = i915_gem_object_lookup_rcu(file, args->handle);
313 if (!obj) {
322 if (obj->pat_set_by_user) {
327 if (i915_gem_object_has_cache_level(obj, I915_CACHE_LLC) ||
328 i915_gem_object_has_cache_level(obj, I915_CACHE_L3_LLC))
330 else if (i915_gem_object_has_cache_level(obj, I915_CACHE_WT))
344 struct drm_i915_gem_object *obj;
377 obj = i915_gem_object_lookup(file, args->handle);
378 if (!obj)
385 if (obj->pat_set_by_user) {
394 if (i915_gem_object_is_proxy(obj)) {
399 if (!i915_gem_object_is_userptr(obj) ||
406 ret = i915_gem_object_lock_interruptible(obj, NULL);
410 ret = i915_gem_object_set_cache_level(obj, level);
411 i915_gem_object_unlock(obj);
414 i915_gem_object_put(obj);
425 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
431 struct drm_i915_private *i915 = to_i915(obj->base.dev);
436 if (HAS_LMEM(i915) && !i915_gem_object_is_lmem(obj))
449 ret = i915_gem_object_set_cache_level(obj,
459 if (i915_gem_object_is_tiled(obj))
461 i915_gem_object_get_tile_row_size(obj));
477 vma = i915_gem_object_ggtt_pin_ww(obj, ww, view, 0, alignment,
481 vma = i915_gem_object_ggtt_pin_ww(obj, ww, view, 0,
489 i915_gem_object_flush_if_display_locked(obj);
497 * @obj: object to act on
504 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
508 assert_object_held(obj);
510 ret = i915_gem_object_wait(obj,
517 flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
520 if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) {
521 i915_gem_clflush_object(obj, I915_CLFLUSH_SYNC);
522 obj->read_domains |= I915_GEM_DOMAIN_CPU;
528 GEM_BUG_ON(obj->write_domain & ~I915_GEM_DOMAIN_CPU);
534 __start_cpu_write(obj);
552 struct drm_i915_gem_object *obj;
574 obj = i915_gem_object_lookup(file, args->handle);
575 if (!obj)
583 err = i915_gem_object_wait(obj,
591 if (i915_gem_object_is_userptr(obj)) {
596 err = i915_gem_object_userptr_validate(obj);
598 err = i915_gem_object_wait(obj,
612 if (i915_gem_object_is_proxy(obj)) {
617 err = i915_gem_object_lock_interruptible(obj, NULL);
622 * Flush and acquire obj->pages so that we are coherent through
625 * For example, if the obj->filp was moved to swap without us
627 * continue to assume that the obj remained out of the CPU cached
630 err = i915_gem_object_pin_pages(obj);
638 * no-ops. If obj->write_domain is set, we must be in the same
639 * obj->read_domains, and only that domain. Therefore, if that
640 * obj->write_domain matches the request read_domains, we are
644 if (READ_ONCE(obj->write_domain) == read_domains)
648 err = i915_gem_object_set_to_wc_domain(obj, write_domain);
650 err = i915_gem_object_set_to_gtt_domain(obj, write_domain);
652 err = i915_gem_object_set_to_cpu_domain(obj, write_domain);
655 i915_gem_object_unpin_pages(obj);
658 i915_gem_object_unlock(obj);
661 i915_gem_object_invalidate_frontbuffer(obj, ORIGIN_CPU);
664 i915_gem_object_put(obj);
673 int i915_gem_object_prepare_read(struct drm_i915_gem_object *obj,
679 if (!i915_gem_object_has_struct_page(obj))
682 assert_object_held(obj);
684 ret = i915_gem_object_wait(obj,
690 ret = i915_gem_object_pin_pages(obj);
694 if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ ||
696 ret = i915_gem_object_set_to_cpu_domain(obj, false);
703 flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
710 if (!obj->cache_dirty &&
711 !(obj->read_domains & I915_GEM_DOMAIN_CPU))
719 i915_gem_object_unpin_pages(obj);
723 int i915_gem_object_prepare_write(struct drm_i915_gem_object *obj,
729 if (!i915_gem_object_has_struct_page(obj))
732 assert_object_held(obj);
734 ret = i915_gem_object_wait(obj,
741 ret = i915_gem_object_pin_pages(obj);
745 if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE ||
747 ret = i915_gem_object_set_to_cpu_domain(obj, true);
754 flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
761 if (!obj->cache_dirty) {
768 if (!(obj->read_domains & I915_GEM_DOMAIN_CPU))
773 i915_gem_object_invalidate_frontbuffer(obj, ORIGIN_CPU);
774 obj->mm.dirty = true;
779 i915_gem_object_unpin_pages(obj);