Lines Matching defs:obj

19 void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
22 struct drm_i915_private *i915 = to_i915(obj->base.dev);
27 assert_object_held_shared(obj);
29 if (i915_gem_object_is_volatile(obj))
30 obj->mm.madv = I915_MADV_DONTNEED;
33 if (obj->cache_dirty) {
35 obj->write_domain = 0;
36 if (i915_gem_object_has_struct_page(obj))
38 obj->cache_dirty = false;
41 obj->mm.get_page.sg_pos = pages->sgl;
42 obj->mm.get_page.sg_idx = 0;
43 obj->mm.get_dma_page.sg_pos = pages->sgl;
44 obj->mm.get_dma_page.sg_idx = 0;
46 obj->mm.pages = pages;
48 obj->mm.page_sizes.phys = i915_sg_dma_sizes(pages->sgl);
49 GEM_BUG_ON(!obj->mm.page_sizes.phys);
59 obj->mm.page_sizes.sg = 0;
61 if (obj->mm.page_sizes.phys & ~0u << i)
62 obj->mm.page_sizes.sg |= BIT(i);
64 GEM_BUG_ON(!HAS_PAGE_SIZES(i915, obj->mm.page_sizes.sg));
66 shrinkable = i915_gem_object_is_shrinkable(obj);
68 if (i915_gem_object_is_tiled(obj) &&
70 GEM_BUG_ON(i915_gem_object_has_tiling_quirk(obj));
71 i915_gem_object_set_tiling_quirk(obj);
72 GEM_BUG_ON(!list_empty(&obj->mm.link));
73 atomic_inc(&obj->mm.shrink_pin);
77 if (shrinkable && !i915_gem_object_has_self_managed_shrink_list(obj)) {
81 assert_object_held(obj);
85 i915->mm.shrink_memory += obj->base.size;
87 if (obj->mm.madv != I915_MADV_WILLNEED)
91 list_add_tail(&obj->mm.link, list);
93 atomic_set(&obj->mm.shrink_pin, 0);
98 int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
100 struct drm_i915_private *i915 = to_i915(obj->base.dev);
103 assert_object_held_shared(obj);
105 if (unlikely(obj->mm.madv != I915_MADV_WILLNEED)) {
111 err = obj->ops->get_pages(obj);
112 GEM_BUG_ON(!err && !i915_gem_object_has_pages(obj));
124 int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
128 assert_object_held(obj);
130 assert_object_held_shared(obj);
132 if (unlikely(!i915_gem_object_has_pages(obj))) {
133 GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
135 err = ____i915_gem_object_get_pages(obj);
141 atomic_inc(&obj->mm.pages_pin_count);
146 int i915_gem_object_pin_pages_unlocked(struct drm_i915_gem_object *obj)
153 err = i915_gem_object_lock(obj, &ww);
155 err = i915_gem_object_pin_pages(obj);
167 int i915_gem_object_truncate(struct drm_i915_gem_object *obj)
169 if (obj->ops->truncate)
170 return obj->ops->truncate(obj);
175 static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
181 radix_tree_for_each_slot(slot, &obj->mm.get_page.radix, &iter, 0)
182 radix_tree_delete(&obj->mm.get_page.radix, iter.index);
183 radix_tree_for_each_slot(slot, &obj->mm.get_dma_page.radix, &iter, 0)
184 radix_tree_delete(&obj->mm.get_dma_page.radix, iter.index);
188 static void unmap_object(struct drm_i915_gem_object *obj, void *ptr)
194 static void flush_tlb_invalidate(struct drm_i915_gem_object *obj)
196 struct drm_i915_private *i915 = to_i915(obj->base.dev);
201 if (!obj->mm.tlb[id])
204 intel_gt_invalidate_tlb_full(gt, obj->mm.tlb[id]);
205 obj->mm.tlb[id] = 0;
210 __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj)
214 assert_object_held_shared(obj);
216 pages = fetch_and_zero(&obj->mm.pages);
220 if (i915_gem_object_is_volatile(obj))
221 obj->mm.madv = I915_MADV_WILLNEED;
223 if (!i915_gem_object_has_self_managed_shrink_list(obj))
224 i915_gem_object_make_unshrinkable(obj);
226 if (obj->mm.mapping) {
227 unmap_object(obj, page_mask_bits(obj->mm.mapping));
228 obj->mm.mapping = NULL;
231 __i915_gem_object_reset_page_iter(obj);
232 obj->mm.page_sizes.phys = obj->mm.page_sizes.sg = 0;
234 flush_tlb_invalidate(obj);
239 int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
243 if (i915_gem_object_has_pinned_pages(obj))
247 assert_object_held_shared(obj);
249 i915_gem_object_release_mmap_offset(obj);
256 pages = __i915_gem_object_unset_pages(obj);
265 obj->ops->put_pages(obj, pages);
271 static void *i915_gem_object_map_page(struct drm_i915_gem_object *obj,
274 unsigned long n_pages = obj->base.size >> PAGE_SHIFT, i;
302 if (n_pages == 1 && !PageHighMem(sg_page(obj->mm.pages->sgl)))
303 return page_address(sg_page(obj->mm.pages->sgl));
319 for_each_sgt_page(page, iter, obj->mm.pages)
328 static void *i915_gem_object_map_pfn(struct drm_i915_gem_object *obj,
331 resource_size_t iomap = obj->mm.region->iomap.base -
332 obj->mm.region->region.start;
333 unsigned long n_pfn = obj->base.size >> PAGE_SHIFT;
349 for_each_sgt_daddr(addr, iter, obj->mm.pages)
359 void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
367 if (!i915_gem_object_has_struct_page(obj) &&
368 !i915_gem_object_has_iomem(obj))
371 if (WARN_ON_ONCE(obj->flags & I915_BO_ALLOC_GPU_ONLY))
374 assert_object_held(obj);
379 if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) {
380 if (unlikely(!i915_gem_object_has_pages(obj))) {
381 GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
383 err = ____i915_gem_object_get_pages(obj);
389 atomic_inc(&obj->mm.pages_pin_count);
392 GEM_BUG_ON(!i915_gem_object_has_pages(obj));
411 if (i915_gem_object_placement_possible(obj, INTEL_MEMORY_LOCAL)) {
412 if (type != I915_MAP_WC && !obj->mm.n_placements) {
418 } else if (IS_DGFX(to_i915(obj->base.dev))) {
422 ptr = page_unpack_bits(obj->mm.mapping, &has_type);
429 unmap_object(obj, ptr);
431 ptr = obj->mm.mapping = NULL;
435 err = i915_gem_object_wait_moving_fence(obj, true);
443 else if (i915_gem_object_has_struct_page(obj))
444 ptr = i915_gem_object_map_page(obj, type);
446 ptr = i915_gem_object_map_pfn(obj, type);
450 obj->mm.mapping = page_pack_bits(ptr, type);
456 atomic_dec(&obj->mm.pages_pin_count);
460 void *i915_gem_object_pin_map_unlocked(struct drm_i915_gem_object *obj,
465 i915_gem_object_lock(obj, NULL);
466 ret = i915_gem_object_pin_map(obj, type);
467 i915_gem_object_unlock(obj);
472 void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj,
479 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
480 GEM_BUG_ON(range_overflows_t(typeof(obj->base.size),
481 offset, size, obj->base.size));
484 obj->mm.dirty = true;
486 if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE)
489 ptr = page_unpack_bits(obj->mm.mapping, &has_type);
494 if (size == obj->base.size) {
495 obj->write_domain &= ~I915_GEM_DOMAIN_CPU;
496 obj->cache_dirty = false;
500 void __i915_gem_object_release_map(struct drm_i915_gem_object *obj)
502 GEM_BUG_ON(!obj->mm.mapping);
510 unmap_object(obj, page_mask_bits(fetch_and_zero(&obj->mm.mapping)));
512 i915_gem_object_unpin_map(obj);
516 __i915_gem_object_page_iter_get_sg(struct drm_i915_gem_object *obj,
522 const bool dma = iter == &obj->mm.get_dma_page ||
523 iter == &obj->ttm.get_io_page;
528 GEM_BUG_ON(n >= obj->base.size >> PAGE_SHIFT);
529 if (!i915_gem_object_has_pinned_pages(obj))
530 assert_object_held(obj);
538 * to the previous one). Repeated lookups are O(lg(obj->base.size)),
633 __i915_gem_object_get_page(struct drm_i915_gem_object *obj, pgoff_t n)
638 GEM_BUG_ON(!i915_gem_object_has_struct_page(obj));
640 sg = i915_gem_object_get_sg(obj, n, &offset);
646 __i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, pgoff_t n)
650 page = i915_gem_object_get_page(obj, n);
651 if (!obj->mm.dirty)
658 __i915_gem_object_get_dma_address_len(struct drm_i915_gem_object *obj,
664 sg = i915_gem_object_get_sg_dma(obj, n, &offset);
673 __i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj, pgoff_t n)
675 return i915_gem_object_get_dma_address_len(obj, n, NULL);