Lines Matching defs:obj

58 bool i915_gem_object_has_cache_level(const struct drm_i915_gem_object *obj,
66 if (obj->pat_set_by_user)
73 return obj->pat_index == i915_gem_get_pat_index(obj_to_i915(obj), lvl);
78 struct drm_i915_gem_object *obj;
80 obj = kmem_cache_zalloc(slab_objects, GFP_KERNEL);
81 if (!obj)
83 obj->base.funcs = &i915_gem_object_funcs;
85 return obj;
88 void i915_gem_object_free(struct drm_i915_gem_object *obj)
90 return kmem_cache_free(slab_objects, obj);
93 void i915_gem_object_init(struct drm_i915_gem_object *obj,
101 BUILD_BUG_ON(offsetof(typeof(*obj), base) !=
102 offsetof(typeof(*obj), __do_not_access.base));
104 spin_lock_init(&obj->vma.lock);
105 INIT_LIST_HEAD(&obj->vma.list);
107 INIT_LIST_HEAD(&obj->mm.link);
110 INIT_LIST_HEAD(&obj->client_link);
113 INIT_LIST_HEAD(&obj->lut_list);
114 spin_lock_init(&obj->lut_lock);
116 spin_lock_init(&obj->mmo.lock);
117 obj->mmo.offsets = RB_ROOT;
119 init_rcu_head(&obj->rcu);
121 obj->ops = ops;
123 obj->flags = flags;
125 obj->mm.madv = I915_MADV_WILLNEED;
126 INIT_RADIX_TREE(&obj->mm.get_page.radix, GFP_KERNEL | __GFP_NOWARN);
127 mutex_init(&obj->mm.get_page.lock);
128 INIT_RADIX_TREE(&obj->mm.get_dma_page.radix, GFP_KERNEL | __GFP_NOWARN);
129 mutex_init(&obj->mm.get_dma_page.lock);
134 * @obj: The gem object to cleanup
141 void __i915_gem_object_fini(struct drm_i915_gem_object *obj)
143 mutex_destroy(&obj->mm.get_page.lock);
144 mutex_destroy(&obj->mm.get_dma_page.lock);
145 dma_resv_fini(&obj->base._resv);
151 * @obj: #drm_i915_gem_object
154 void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object *obj,
157 struct drm_i915_private *i915 = to_i915(obj->base.dev);
159 obj->pat_index = i915_gem_get_pat_index(i915, cache_level);
162 obj->cache_coherent = (I915_BO_CACHE_COHERENT_FOR_READ |
165 obj->cache_coherent = I915_BO_CACHE_COHERENT_FOR_READ;
167 obj->cache_coherent = 0;
169 obj->cache_dirty =
170 !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE) &&
176 * @obj: #drm_i915_gem_object
182 void i915_gem_object_set_pat_index(struct drm_i915_gem_object *obj,
185 struct drm_i915_private *i915 = to_i915(obj->base.dev);
187 if (obj->pat_index == pat_index)
190 obj->pat_index = pat_index;
193 obj->cache_coherent = (I915_BO_CACHE_COHERENT_FOR_READ |
196 obj->cache_coherent = I915_BO_CACHE_COHERENT_FOR_READ;
198 obj->cache_coherent = 0;
200 obj->cache_dirty =
201 !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE) &&
205 bool i915_gem_object_can_bypass_llc(struct drm_i915_gem_object *obj)
207 struct drm_i915_private *i915 = to_i915(obj->base.dev);
213 if (!(obj->flags & I915_BO_ALLOC_USER))
219 if (obj->pat_set_by_user)
239 struct drm_i915_gem_object *obj = to_intel_bo(gem);
246 spin_lock(&obj->lut_lock);
247 list_for_each_entry_safe(lut, ln, &obj->lut_list, obj_link) {
256 if (&ln->obj_link != &obj->lut_list) {
258 if (cond_resched_lock(&obj->lut_lock))
263 spin_unlock(&obj->lut_lock);
265 spin_lock(&obj->mmo.lock);
266 rbtree_postorder_for_each_entry_safe(mmo, mn, &obj->mmo.offsets, offset)
268 spin_unlock(&obj->mmo.lock);
282 GEM_BUG_ON(vma->obj != obj);
290 i915_gem_object_put(obj);
296 struct drm_i915_gem_object *obj =
297 container_of(head, typeof(*obj), rcu);
298 struct drm_i915_private *i915 = to_i915(obj->base.dev);
301 if (obj->mm.n_placements > 1)
302 kfree(obj->mm.placements);
304 i915_gem_object_free(obj);
310 static void __i915_gem_object_free_mmaps(struct drm_i915_gem_object *obj)
314 if (obj->userfault_count && !IS_DGFX(to_i915(obj->base.dev)))
315 i915_gem_object_release_mmap_gtt(obj);
317 if (!RB_EMPTY_ROOT(&obj->mmo.offsets)) {
320 i915_gem_object_release_mmap_offset(obj);
323 &obj->mmo.offsets,
325 drm_vma_offset_remove(obj->base.dev->vma_offset_manager,
329 obj->mmo.offsets = RB_ROOT;
335 * @obj: The gem object to clean up
342 void __i915_gem_object_pages_fini(struct drm_i915_gem_object *obj)
344 assert_object_held_shared(obj);
346 if (!list_empty(&obj->vma.list)) {
349 spin_lock(&obj->vma.lock);
350 while ((vma = list_first_entry_or_null(&obj->vma.list,
353 GEM_BUG_ON(vma->obj != obj);
354 spin_unlock(&obj->vma.lock);
358 spin_lock(&obj->vma.lock);
360 spin_unlock(&obj->vma.lock);
363 __i915_gem_object_free_mmaps(obj);
365 atomic_set(&obj->mm.pages_pin_count, 0);
373 if (obj->base.import_attach)
374 i915_gem_object_lock(obj, NULL);
376 __i915_gem_object_put_pages(obj);
378 if (obj->base.import_attach)
379 i915_gem_object_unlock(obj);
381 GEM_BUG_ON(i915_gem_object_has_pages(obj));
384 void __i915_gem_free_object(struct drm_i915_gem_object *obj)
386 trace_i915_gem_object_destroy(obj);
388 GEM_BUG_ON(!list_empty(&obj->lut_list));
390 bitmap_free(obj->bit_17);
392 if (obj->base.import_attach)
393 drm_prime_gem_destroy(&obj->base, NULL);
395 drm_gem_free_mmap_offset(&obj->base);
397 if (obj->ops->release)
398 obj->ops->release(obj);
400 if (obj->shares_resv_from)
401 i915_vm_resv_put(obj->shares_resv_from);
403 __i915_gem_object_fini(obj);
409 struct drm_i915_gem_object *obj, *on;
411 llist_for_each_entry_safe(obj, on, freed, freed) {
413 if (obj->ops->delayed_free) {
414 obj->ops->delayed_free(obj);
418 __i915_gem_object_pages_fini(obj);
419 __i915_gem_free_object(obj);
422 call_rcu(&obj->rcu, __i915_gem_free_object_rcu);
445 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
446 struct drm_i915_private *i915 = to_i915(obj->base.dev);
448 GEM_BUG_ON(i915_gem_object_is_framebuffer(obj));
450 i915_drm_client_remove_object(obj);
471 if (llist_add(&obj->freed, &i915->mm.free_list))
475 void __i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj,
480 front = i915_gem_object_get_frontbuffer(obj);
487 void __i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object *obj,
492 front = i915_gem_object_get_frontbuffer(obj);
500 i915_gem_object_read_from_page_kmap(struct drm_i915_gem_object *obj, u64 offset, void *dst, int size)
505 src_ptr = kmap_local_page(i915_gem_object_get_page(obj, idx))
507 if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ))
515 i915_gem_object_read_from_page_iomap(struct drm_i915_gem_object *obj, u64 offset, void *dst, int size)
518 dma_addr_t dma = i915_gem_object_get_dma_address(obj, idx);
522 src_map = io_mapping_map_wc(&obj->mm.region->iomap,
523 dma - obj->mm.region->region.start,
533 static bool object_has_mappable_iomem(struct drm_i915_gem_object *obj)
535 GEM_BUG_ON(!i915_gem_object_has_iomem(obj));
537 if (IS_DGFX(to_i915(obj->base.dev)))
538 return i915_ttm_resource_mappable(i915_gem_to_ttm(obj)->resource);
545 * @obj: GEM object to read from
550 * Reads data from @obj at the specified offset. The requested region to read
551 * from can't cross a page boundary. The caller must ensure that @obj pages
552 * are pinned and that @obj is synced wrt. any related writes.
554 * Return: %0 on success or -ENODEV if the type of @obj's backing store is
557 int i915_gem_object_read_from_page(struct drm_i915_gem_object *obj, u64 offset, void *dst, int size)
560 GEM_BUG_ON(offset >= obj->base.size);
562 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
564 if (i915_gem_object_has_struct_page(obj))
565 i915_gem_object_read_from_page_kmap(obj, offset, dst, size);
566 else if (i915_gem_object_has_iomem(obj) && object_has_mappable_iomem(obj))
567 i915_gem_object_read_from_page_iomap(obj, offset, dst, size);
576 * @obj: The object to check
587 bool i915_gem_object_evictable(struct drm_i915_gem_object *obj)
590 int pin_count = atomic_read(&obj->mm.pages_pin_count);
595 spin_lock(&obj->vma.lock);
596 list_for_each_entry(vma, &obj->vma.list, obj_link) {
598 spin_unlock(&obj->vma.lock);
604 spin_unlock(&obj->vma.lock);
613 * @obj: Pointer to the object.
618 bool i915_gem_object_migratable(struct drm_i915_gem_object *obj)
620 struct intel_memory_region *mr = READ_ONCE(obj->mm.region);
625 return obj->mm.n_placements > 1;
630 * @obj: The object to query.
637 bool i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj)
640 if (IS_DGFX(to_i915(obj->base.dev)) &&
641 i915_gem_object_evictable((void __force *)obj))
642 assert_object_held_shared(obj);
644 return obj->mem_flags & I915_BO_FLAG_STRUCT_PAGE;
649 * @obj: The object to query.
656 bool i915_gem_object_has_iomem(const struct drm_i915_gem_object *obj)
659 if (IS_DGFX(to_i915(obj->base.dev)) &&
660 i915_gem_object_evictable((void __force *)obj))
661 assert_object_held_shared(obj);
663 return obj->mem_flags & I915_BO_FLAG_IOMEM;
669 * @obj: The object to migrate
683 bool i915_gem_object_can_migrate(struct drm_i915_gem_object *obj,
686 struct drm_i915_private *i915 = to_i915(obj->base.dev);
687 unsigned int num_allowed = obj->mm.n_placements;
692 GEM_BUG_ON(obj->mm.madv != I915_MADV_WILLNEED);
698 if (!IS_ALIGNED(obj->base.size, mr->min_page_size))
701 if (obj->mm.region == mr)
704 if (!i915_gem_object_evictable(obj))
707 if (!obj->ops->migrate)
710 if (!(obj->flags & I915_BO_ALLOC_USER))
717 if (mr == obj->mm.placements[i])
726 * @obj: The object to migrate.
749 int i915_gem_object_migrate(struct drm_i915_gem_object *obj,
753 return __i915_gem_object_migrate(obj, ww, id, obj->flags);
759 * @obj: The object to migrate.
763 * @flags: The object flags. Normally just obj->flags.
783 int __i915_gem_object_migrate(struct drm_i915_gem_object *obj,
788 struct drm_i915_private *i915 = to_i915(obj->base.dev);
792 GEM_BUG_ON(obj->mm.madv != I915_MADV_WILLNEED);
793 assert_object_held(obj);
798 if (!i915_gem_object_can_migrate(obj, id))
801 if (!obj->ops->migrate) {
802 if (GEM_WARN_ON(obj->mm.region != mr))
807 return obj->ops->migrate(obj, mr, flags);
813 * @obj: Pointer to the object
818 bool i915_gem_object_placement_possible(struct drm_i915_gem_object *obj,
823 if (!obj->mm.n_placements) {
826 return i915_gem_object_has_iomem(obj);
828 return i915_gem_object_has_pages(obj);
836 for (i = 0; i < obj->mm.n_placements; i++) {
837 if (obj->mm.placements[i]->type == type)
849 * @obj: Pointer to the object
853 bool i915_gem_object_needs_ccs_pages(struct drm_i915_gem_object *obj)
858 if (!HAS_FLAT_CCS(to_i915(obj->base.dev)))
861 if (obj->flags & I915_BO_ALLOC_CCS_AUX)
864 for (i = 0; i < obj->mm.n_placements; i++) {
866 if (obj->mm.placements[i]->type == INTEL_MEMORY_SYSTEM)
869 obj->mm.placements[i]->type == INTEL_MEMORY_LOCAL)
903 * @obj: The object whose moving fence to get.
912 int i915_gem_object_get_moving_fence(struct drm_i915_gem_object *obj,
915 return dma_resv_get_singleton(obj->base.resv, DMA_RESV_USAGE_KERNEL,
921 * @obj: The object whose moving fence to wait for.
931 int i915_gem_object_wait_moving_fence(struct drm_i915_gem_object *obj,
936 assert_object_held(obj);
938 ret = dma_resv_wait_timeout(obj->base. resv, DMA_RESV_USAGE_KERNEL,
942 else if (ret > 0 && i915_gem_object_has_unknown_state(obj))
956 bool i915_gem_object_has_unknown_state(struct drm_i915_gem_object *obj)
964 return obj->mm.unknown_state;