Lines Matching refs:vma

48 static inline void assert_vma_held_evict(const struct i915_vma *vma)
55 if (kref_read(&vma->vm->ref))
56 assert_object_held_shared(vma->obj);
66 static void i915_vma_free(struct i915_vma *vma)
68 return kmem_cache_free(slab_vmas, vma);
75 static void vma_print_allocator(struct i915_vma *vma, const char *reason)
79 if (!vma->node.stack) {
80 drm_dbg(vma->obj->base.dev,
81 "vma.node [%08llx + %08llx] %s: unknown owner\n",
82 vma->node.start, vma->node.size, reason);
86 stack_depot_snprint(vma->node.stack, buf, sizeof(buf), 0);
87 drm_dbg(vma->obj->base.dev,
88 "vma.node [%08llx + %08llx] %s: inserted at %s\n",
89 vma->node.start, vma->node.size, reason, buf);
94 static void vma_print_allocator(struct i915_vma *vma, const char *reason)
107 struct i915_vma *vma = active_to_vma(ref);
109 if (!i915_vma_tryget(vma))
116 if (!i915_vma_is_ggtt(vma)) {
124 intel_gt_pm_get_untracked(vma->vm->gt);
132 struct i915_vma *vma = active_to_vma(ref);
134 if (!i915_vma_is_ggtt(vma)) {
139 intel_gt_pm_put_async_untracked(vma->vm->gt);
142 i915_vma_put(vma);
151 struct i915_vma *vma;
158 vma = i915_vma_alloc();
159 if (vma == NULL)
162 vma->ops = &vm->vma_ops;
163 vma->obj = obj;
164 vma->size = obj->base.size;
165 vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
167 i915_active_init(&vma->active, __i915_vma_active, __i915_vma_retire, 0);
172 might_lock(&vma->active.mutex);
176 INIT_LIST_HEAD(&vma->closed_link);
177 INIT_LIST_HEAD(&vma->obj_link);
178 RB_CLEAR_NODE(&vma->obj_node);
181 vma->gtt_view = *view;
187 vma->size = view->partial.size;
188 vma->size <<= PAGE_SHIFT;
189 GEM_BUG_ON(vma->size > obj->base.size);
191 vma->size = intel_rotation_info_size(&view->rotated);
192 vma->size <<= PAGE_SHIFT;
194 vma->size = intel_remapped_info_size(&view->remapped);
195 vma->size <<= PAGE_SHIFT;
199 if (unlikely(vma->size > vm->total))
202 GEM_BUG_ON(!IS_ALIGNED(vma->size, I915_GTT_PAGE_SIZE));
210 vma->vm = vm;
211 list_add_tail(&vma->vm_link, &vm->unbound_list);
213 spin_lock(&obj->vma.lock);
215 if (unlikely(overflows_type(vma->size, u32)))
218 vma->fence_size = i915_gem_fence_size(vm->i915, vma->size,
221 if (unlikely(vma->fence_size < vma->size || /* overflow */
222 vma->fence_size > vm->total))
225 GEM_BUG_ON(!IS_ALIGNED(vma->fence_size, I915_GTT_MIN_ALIGNMENT));
227 vma->fence_alignment = i915_gem_fence_alignment(vm->i915, vma->size,
230 GEM_BUG_ON(!is_power_of_2(vma->fence_alignment));
232 __set_bit(I915_VMA_GGTT_BIT, __i915_vma_flags(vma));
236 p = &obj->vma.tree.rb_node;
245 * already created a matching vma, so return the older instance
256 rb_link_node(&vma->obj_node, rb, p);
257 rb_insert_color(&vma->obj_node, &obj->vma.tree);
259 if (i915_vma_is_ggtt(vma))
261 * We put the GGTT vma at the start of the vma-list, followed
262 * by the ppGGTT vma. This allows us to break early when
263 * iterating over only the GGTT vma for an object, see
266 list_add(&vma->obj_link, &obj->vma.list);
268 list_add_tail(&vma->obj_link, &obj->vma.list);
270 spin_unlock(&obj->vma.lock);
273 return vma;
276 spin_unlock(&obj->vma.lock);
277 list_del_init(&vma->vm_link);
280 i915_vma_free(vma);
291 rb = obj->vma.tree.rb_node;
293 struct i915_vma *vma = rb_entry(rb, struct i915_vma, obj_node);
296 cmp = i915_vma_compare(vma, vm, view);
298 return vma;
320 * Returns the vma, or an error pointer.
327 struct i915_vma *vma;
332 spin_lock(&obj->vma.lock);
333 vma = i915_vma_lookup(obj, vm, view);
334 spin_unlock(&obj->vma.lock);
336 /* vma_create() will resolve the race if another creates the vma */
337 if (unlikely(!vma))
338 vma = vma_create(obj, vm, view);
340 GEM_BUG_ON(!IS_ERR(vma) && i915_vma_compare(vma, vm, view));
341 return vma;
405 int i915_vma_wait_for_bind(struct i915_vma *vma)
409 if (rcu_access_pointer(vma->active.excl.fence)) {
413 fence = dma_fence_get_rcu_safe(&vma->active.excl.fence);
425 static int i915_vma_verify_bind_complete(struct i915_vma *vma)
427 struct dma_fence *fence = i915_active_fence_get(&vma->active.excl);
448 struct i915_vma *vma)
450 struct drm_i915_gem_object *obj = vma->obj;
452 i915_vma_resource_init(vma_res, vma->vm, vma->pages, &vma->page_sizes,
455 vma->ops, vma->private, __i915_vma_offset(vma),
456 __i915_vma_size(vma), vma->size, vma->guard);
461 * @vma: VMA to map
465 * @vma_res: pointer to a preallocated vma resource. The resource is either
472 int i915_vma_bind(struct i915_vma *vma,
482 lockdep_assert_held(&vma->vm->mutex);
483 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
484 GEM_BUG_ON(vma->size > i915_vma_size(vma));
486 if (GEM_DEBUG_WARN_ON(range_overflows(vma->node.start,
487 vma->node.size,
488 vma->vm->total))) {
501 vma_flags = atomic_read(&vma->flags);
510 GEM_BUG_ON(!atomic_read(&vma->pages_count));
513 if (work && bind_flags & vma->vm->bind_async_flags)
514 ret = i915_vma_resource_bind_dep_await(vma->vm,
516 vma->node.start,
517 vma->node.size,
523 ret = i915_vma_resource_bind_dep_sync(vma->vm, vma->node.start,
524 vma->node.size, true);
530 if (vma->resource || !vma_res) {
535 i915_vma_resource_init_from_vma(vma_res, vma);
536 vma->resource = vma_res;
538 trace_i915_vma_bind(vma, bind_flags);
539 if (work && bind_flags & vma->vm->bind_async_flags) {
542 work->vma_res = i915_vma_resource_get(vma->resource);
551 * Also note that we do not want to track the async vma as
555 prev = i915_active_set_exclusive(&vma->active, &work->base.dma);
564 work->obj = i915_gem_object_get(vma->obj);
566 ret = i915_gem_object_wait_moving_fence(vma->obj, true);
568 i915_vma_resource_free(vma->resource);
569 vma->resource = NULL;
573 vma->ops->bind_vma(vma->vm, NULL, vma->resource, pat_index,
577 atomic_or(bind_flags, &vma->flags);
581 void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
586 if (WARN_ON_ONCE(vma->obj->flags & I915_BO_ALLOC_GPU_ONLY))
589 GEM_BUG_ON(!i915_vma_is_ggtt(vma));
590 GEM_BUG_ON(!i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND));
591 GEM_BUG_ON(i915_vma_verify_bind_complete(vma));
593 ptr = READ_ONCE(vma->iomap);
601 if (i915_gem_object_is_lmem(vma->obj)) {
602 ptr = i915_gem_object_lmem_io_map(vma->obj, 0,
603 vma->obj->base.size);
604 } else if (i915_vma_is_map_and_fenceable(vma)) {
605 ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->iomap,
606 i915_vma_offset(vma),
607 i915_vma_size(vma));
610 i915_gem_object_pin_map(vma->obj, I915_MAP_WC);
623 if (unlikely(cmpxchg(&vma->iomap, NULL, ptr))) {
625 __i915_gem_object_release_map(vma->obj);
628 ptr = vma->iomap;
632 __i915_vma_pin(vma);
634 err = i915_vma_pin_fence(vma);
638 i915_vma_set_ggtt_write(vma);
644 __i915_vma_unpin(vma);
649 void i915_vma_flush_writes(struct i915_vma *vma)
651 if (i915_vma_unset_ggtt_write(vma))
652 intel_gt_flush_ggtt_writes(vma->vm->gt);
655 void i915_vma_unpin_iomap(struct i915_vma *vma)
657 GEM_BUG_ON(vma->iomap == NULL);
661 i915_vma_flush_writes(vma);
663 i915_vma_unpin_fence(vma);
664 i915_vma_unpin(vma);
669 struct i915_vma *vma;
672 vma = fetch_and_zero(p_vma);
673 if (!vma)
676 obj = vma->obj;
679 i915_vma_unpin(vma);
687 bool i915_vma_misplaced(const struct i915_vma *vma,
690 if (!drm_mm_node_allocated(&vma->node))
693 if (test_bit(I915_VMA_ERROR_BIT, __i915_vma_flags(vma)))
696 if (i915_vma_size(vma) < size)
700 if (alignment && !IS_ALIGNED(i915_vma_offset(vma), alignment))
703 if (flags & PIN_MAPPABLE && !i915_vma_is_map_and_fenceable(vma))
707 i915_vma_offset(vma) < (flags & PIN_OFFSET_MASK))
711 i915_vma_offset(vma) != (flags & PIN_OFFSET_MASK))
715 vma->guard < (flags & PIN_OFFSET_MASK))
721 void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
725 GEM_BUG_ON(!i915_vma_is_ggtt(vma));
726 GEM_BUG_ON(!vma->fence_size);
728 fenceable = (i915_vma_size(vma) >= vma->fence_size &&
729 IS_ALIGNED(i915_vma_offset(vma), vma->fence_alignment));
731 mappable = i915_ggtt_offset(vma) + vma->fence_size <=
732 i915_vm_to_ggtt(vma->vm)->mappable_end;
735 set_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma));
737 clear_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma));
740 bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long color)
742 struct drm_mm_node *node = &vma->node;
752 if (!i915_vm_has_cache_coloring(vma->vm))
755 /* Only valid to be called on an already inserted vma */
773 * i915_vma_insert - finds a slot for the vma in its address space
774 * @vma: the vma
788 i915_vma_insert(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
795 GEM_BUG_ON(i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
796 GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
799 size = max(size, vma->size);
800 alignment = max_t(typeof(alignment), alignment, vma->display_alignment);
802 size = max_t(typeof(size), size, vma->fence_size);
804 alignment, vma->fence_alignment);
811 guard = vma->guard; /* retain guard across rebinds */
826 end = vma->vm->total;
828 end = min_t(u64, end, i915_vm_to_ggtt(vma->vm)->mappable_end);
833 alignment = max(alignment, i915_vm_obj_min_alignment(vma->vm, vma->obj));
841 drm_dbg(vma->obj->base.dev,
849 if (i915_vm_has_cache_coloring(vma->vm))
850 color = vma->obj->pat_index;
861 * of the vma->node due to the guard pages.
866 ret = i915_gem_gtt_reserve(vma->vm, ww, &vma->node,
883 vma->page_sizes.sg > I915_GTT_PAGE_SIZE &&
884 !HAS_64K_PAGES(vma->vm->i915)) {
892 rounddown_pow_of_two(vma->page_sizes.sg |
900 GEM_BUG_ON(i915_vma_is_ggtt(vma));
904 if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K)
908 ret = i915_gem_gtt_insert(vma->vm, ww, &vma->node,
914 GEM_BUG_ON(vma->node.start < start);
915 GEM_BUG_ON(vma->node.start + vma->node.size > end);
917 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
918 GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, color));
920 list_move_tail(&vma->vm_link, &vma->vm->bound_list);
921 vma->guard = guard;
927 i915_vma_detach(struct i915_vma *vma)
929 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
930 GEM_BUG_ON(i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
934 * vma, we can drop its hold on the backing storage and allow
937 list_move_tail(&vma->vm_link, &vma->vm->unbound_list);
940 static bool try_qad_pin(struct i915_vma *vma, unsigned int flags)
944 bound = atomic_read(&vma->flags);
962 } while (!atomic_try_cmpxchg(&vma->flags, &bound, bound + 1));
1300 __i915_vma_get_pages(struct i915_vma *vma)
1305 * The vma->pages are only valid within the lifespan of the borrowed
1307 * must be the vma->pages. A simple rule is that vma->pages must only
1310 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(vma->obj));
1312 switch (vma->gtt_view.type) {
1314 GEM_BUG_ON(vma->gtt_view.type);
1317 pages = vma->obj->mm.pages;
1322 intel_rotate_pages(&vma->gtt_view.rotated, vma->obj);
1327 intel_remap_pages(&vma->gtt_view.remapped, vma->obj);
1331 pages = intel_partial_pages(&vma->gtt_view, vma->obj);
1336 drm_err(&vma->vm->i915->drm,
1338 vma->gtt_view.type, PTR_ERR(pages));
1342 vma->pages = pages;
1347 I915_SELFTEST_EXPORT int i915_vma_get_pages(struct i915_vma *vma)
1351 if (atomic_add_unless(&vma->pages_count, 1, 0))
1354 err = i915_gem_object_pin_pages(vma->obj);
1358 err = __i915_vma_get_pages(vma);
1362 vma->page_sizes = vma->obj->mm.page_sizes;
1363 atomic_inc(&vma->pages_count);
1368 __i915_gem_object_unpin_pages(vma->obj);
1382 * Before we release the pages that were bound by this vma, we
1394 static void __vma_put_pages(struct i915_vma *vma, unsigned int count)
1397 GEM_BUG_ON(atomic_read(&vma->pages_count) < count);
1399 if (atomic_sub_return(count, &vma->pages_count) == 0) {
1400 if (vma->pages != vma->obj->mm.pages) {
1401 sg_free_table(vma->pages);
1402 kfree(vma->pages);
1404 vma->pages = NULL;
1406 i915_gem_object_unpin_pages(vma->obj);
1410 I915_SELFTEST_EXPORT void i915_vma_put_pages(struct i915_vma *vma)
1412 if (atomic_add_unless(&vma->pages_count, -1, 1))
1415 __vma_put_pages(vma, 1);
1418 static void vma_unbind_pages(struct i915_vma *vma)
1422 lockdep_assert_held(&vma->vm->mutex);
1425 count = atomic_read(&vma->pages_count);
1429 __vma_put_pages(vma, count | count << I915_VMA_PAGES_BIAS);
1432 int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
1442 assert_vma_held(vma);
1450 /* First try and grab the pin without rebinding the vma */
1451 if (try_qad_pin(vma, flags))
1454 err = i915_vma_get_pages(vma);
1465 wakeref = intel_runtime_pm_get(&vma->vm->i915->runtime_pm);
1467 if (flags & vma->vm->bind_async_flags) {
1469 err = i915_vm_lock_objects(vma->vm, ww);
1479 work->vm = vma->vm;
1481 err = i915_gem_object_get_moving_fence(vma->obj, &moving);
1488 if (vma->vm->allocate_va_range) {
1489 err = i915_vm_alloc_pt_stash(vma->vm,
1491 vma->size);
1495 err = i915_vm_map_pt_stash(vma->vm, &work->stash);
1508 * Differentiate between user/kernel vma inside the aliasing-ppgtt.
1510 * We conflate the Global GTT with the user's vma when using the
1524 err = mutex_lock_interruptible_nested(&vma->vm->mutex,
1531 if (unlikely(i915_vma_is_closed(vma))) {
1536 bound = atomic_read(&vma->flags);
1549 __i915_vma_pin(vma);
1553 err = i915_active_acquire(&vma->active);
1558 err = i915_vma_insert(vma, ww, size, alignment, flags);
1562 if (i915_is_ggtt(vma->vm))
1563 __i915_vma_set_map_and_fenceable(vma);
1566 GEM_BUG_ON(!vma->pages);
1567 err = i915_vma_bind(vma,
1568 vma->obj->pat_index,
1576 atomic_add(I915_VMA_PAGES_ACTIVE, &vma->pages_count);
1577 list_move_tail(&vma->vm_link, &vma->vm->bound_list);
1580 __i915_vma_pin(vma);
1581 GEM_BUG_ON(!i915_vma_is_pinned(vma));
1583 GEM_BUG_ON(!i915_vma_is_bound(vma, flags));
1584 GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
1587 if (!i915_vma_is_bound(vma, I915_VMA_BIND_MASK)) {
1588 i915_vma_detach(vma);
1589 drm_mm_remove_node(&vma->node);
1592 i915_active_release(&vma->active);
1594 mutex_unlock(&vma->vm->mutex);
1601 intel_runtime_pm_put(&vma->vm->i915->runtime_pm, wakeref);
1606 i915_vma_put_pages(vma);
1621 static int __i915_ggtt_pin(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
1624 struct i915_address_space *vm = vma->vm;
1630 err = i915_vma_pin_ww(vma, ww, 0, align, flags | PIN_GLOBAL);
1634 err = i915_vma_wait_for_bind(vma);
1636 i915_vma_unpin(vma);
1656 int i915_ggtt_pin(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
1662 GEM_BUG_ON(!i915_vma_is_ggtt(vma));
1665 return __i915_ggtt_pin(vma, ww, align, flags);
1667 lockdep_assert_not_held(&vma->obj->base.resv->lock.base);
1670 err = i915_gem_object_lock(vma->obj, &_ww);
1672 err = __i915_ggtt_pin(vma, &_ww, align, flags);
1688 struct i915_vma *vma;
1690 spin_lock(&obj->vma.lock);
1691 for_each_ggtt_vma(vma, obj) {
1692 i915_vma_clear_scanout(vma);
1693 vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
1695 spin_unlock(&obj->vma.lock);
1698 static void __vma_close(struct i915_vma *vma, struct intel_gt *gt)
1712 GEM_BUG_ON(i915_vma_is_closed(vma));
1713 list_add(&vma->closed_link, &gt->closed_vma);
1716 void i915_vma_close(struct i915_vma *vma)
1718 struct intel_gt *gt = vma->vm->gt;
1721 if (i915_vma_is_ggtt(vma))
1724 GEM_BUG_ON(!atomic_read(&vma->open_count));
1725 if (atomic_dec_and_lock_irqsave(&vma->open_count,
1728 __vma_close(vma, gt);
1733 static void __i915_vma_remove_closed(struct i915_vma *vma)
1735 list_del_init(&vma->closed_link);
1738 void i915_vma_reopen(struct i915_vma *vma)
1740 struct intel_gt *gt = vma->vm->gt;
1743 if (i915_vma_is_closed(vma))
1744 __i915_vma_remove_closed(vma);
1748 static void force_unbind(struct i915_vma *vma)
1750 if (!drm_mm_node_allocated(&vma->node))
1753 atomic_and(~I915_VMA_PIN_MASK, &vma->flags);
1754 WARN_ON(__i915_vma_unbind(vma));
1755 GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
1758 static void release_references(struct i915_vma *vma, struct intel_gt *gt,
1761 struct drm_i915_gem_object *obj = vma->obj;
1763 GEM_BUG_ON(i915_vma_is_active(vma));
1765 spin_lock(&obj->vma.lock);
1766 list_del(&vma->obj_link);
1767 if (!RB_EMPTY_NODE(&vma->obj_node))
1768 rb_erase(&vma->obj_node, &obj->vma.tree);
1770 spin_unlock(&obj->vma.lock);
1773 __i915_vma_remove_closed(vma);
1777 i915_vm_resv_put(vma->vm);
1780 i915_active_wait(&vma->active);
1781 i915_active_fini(&vma->active);
1782 GEM_WARN_ON(vma->resource);
1783 i915_vma_free(vma);
1787 * i915_vma_destroy_locked - Remove all weak reference to the vma and put
1790 * This function should be called when it's decided the vma isn't needed
1803 * vma freeing from __i915_gem_object_pages_fini().
1805 * Because of locks taken during destruction, a vma is also guaranteed to
1809 * - obj->vma.lock
1812 void i915_vma_destroy_locked(struct i915_vma *vma)
1814 lockdep_assert_held(&vma->vm->mutex);
1816 force_unbind(vma);
1817 list_del_init(&vma->vm_link);
1818 release_references(vma, vma->vm->gt, false);
1821 void i915_vma_destroy(struct i915_vma *vma)
1826 mutex_lock(&vma->vm->mutex);
1827 force_unbind(vma);
1828 list_del_init(&vma->vm_link);
1829 vm_ddestroy = vma->vm_ddestroy;
1830 vma->vm_ddestroy = false;
1832 /* vma->vm may be freed when releasing vma->vm->mutex. */
1833 gt = vma->vm->gt;
1834 mutex_unlock(&vma->vm->mutex);
1835 release_references(vma, gt, vm_ddestroy);
1840 struct i915_vma *vma, *next;
1844 list_for_each_entry_safe(vma, next, &gt->closed_vma, closed_link) {
1845 struct drm_i915_gem_object *obj = vma->obj;
1846 struct i915_address_space *vm = vma->vm;
1858 list_move(&vma->closed_link, &closed);
1862 /* As the GT is held idle, no vma can be reopened as we destroy them */
1863 list_for_each_entry_safe(vma, next, &closed, closed_link) {
1864 struct drm_i915_gem_object *obj = vma->obj;
1865 struct i915_address_space *vm = vma->vm;
1868 INIT_LIST_HEAD(&vma->closed_link);
1869 i915_vma_destroy(vma);
1874 list_add(&vma->closed_link, &gt->closed_vma);
1883 static void __i915_vma_iounmap(struct i915_vma *vma)
1885 GEM_BUG_ON(i915_vma_is_pinned(vma));
1887 if (vma->iomap == NULL)
1890 if (page_unmask_bits(vma->iomap))
1891 __i915_gem_object_release_map(vma->obj);
1893 io_mapping_unmap(vma->iomap);
1894 vma->iomap = NULL;
1897 void i915_vma_revoke_mmap(struct i915_vma *vma)
1902 if (!i915_vma_has_userfault(vma))
1905 GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma));
1906 GEM_BUG_ON(!vma->obj->userfault_count);
1908 node = &vma->mmo->vma_node;
1909 vma_offset = vma->gtt_view.partial.offset << PAGE_SHIFT;
1910 unmap_mapping_range(vma->vm->i915->drm.anon_inode->i_mapping,
1912 vma->size,
1915 i915_vma_unset_userfault(vma);
1916 if (!--vma->obj->userfault_count)
1917 list_del(&vma->obj->userfault_link);
1921 __i915_request_await_bind(struct i915_request *rq, struct i915_vma *vma)
1923 return __i915_request_await_exclusive(rq, &vma->active);
1926 static int __i915_vma_move_to_active(struct i915_vma *vma, struct i915_request *rq)
1930 /* Wait for the vma to be bound before we start! */
1931 err = __i915_request_await_bind(rq, vma);
1935 return i915_active_add_request(&vma->active, rq);
1938 int _i915_vma_move_to_active(struct i915_vma *vma,
1943 struct drm_i915_gem_object *obj = vma->obj;
1948 GEM_BUG_ON(!vma->pages);
1951 err = i915_request_await_object(rq, vma->obj, flags & EXEC_OBJECT_WRITE);
1955 err = __i915_vma_move_to_active(vma, rq);
1969 err = dma_resv_reserve_fences(vma->obj->base.resv, idx);
2000 dma_resv_add_fence(vma->obj->base.resv, curr, usage);
2003 if (flags & EXEC_OBJECT_NEEDS_FENCE && vma->fence)
2004 i915_active_add_request(&vma->fence->active, rq);
2009 GEM_BUG_ON(!i915_vma_is_active(vma));
2013 struct dma_fence *__i915_vma_evict(struct i915_vma *vma, bool async)
2015 struct i915_vma_resource *vma_res = vma->resource;
2018 GEM_BUG_ON(i915_vma_is_pinned(vma));
2019 assert_vma_held_evict(vma);
2021 if (i915_vma_is_map_and_fenceable(vma)) {
2023 i915_vma_revoke_mmap(vma);
2032 * bit from set-domain, as we mark all GGTT vma associated
2033 * with an object. We know this is for another vma, as we
2034 * are currently unbinding this one -- so if this vma will be
2038 i915_vma_flush_writes(vma);
2041 i915_vma_revoke_fence(vma);
2043 clear_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma));
2046 __i915_vma_iounmap(vma);
2048 GEM_BUG_ON(vma->fence);
2049 GEM_BUG_ON(i915_vma_has_userfault(vma));
2052 GEM_WARN_ON(async && !vma->resource->bi.pages_rsgt);
2055 vma_res->needs_wakeref = i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND) &&
2056 kref_read(&vma->vm->ref);
2057 vma_res->skip_pte_rewrite = !kref_read(&vma->vm->ref) ||
2058 vma->vm->skip_pte_rewrite;
2059 trace_i915_vma_unbind(vma);
2063 vma->obj->mm.tlb);
2067 vma->resource = NULL;
2070 &vma->flags);
2072 i915_vma_detach(vma);
2080 vma_invalidate_tlb(vma->vm, vma->obj->mm.tlb);
2089 vma_unbind_pages(vma);
2093 int __i915_vma_unbind(struct i915_vma *vma)
2097 lockdep_assert_held(&vma->vm->mutex);
2098 assert_vma_held_evict(vma);
2100 if (!drm_mm_node_allocated(&vma->node))
2103 if (i915_vma_is_pinned(vma)) {
2104 vma_print_allocator(vma, "is pinned");
2109 * After confirming that no one else is pinning this vma, wait for
2113 ret = i915_vma_sync(vma);
2117 GEM_BUG_ON(i915_vma_is_active(vma));
2118 __i915_vma_evict(vma, false);
2120 drm_mm_remove_node(&vma->node); /* pairs with i915_vma_release() */
2124 static struct dma_fence *__i915_vma_unbind_async(struct i915_vma *vma)
2128 lockdep_assert_held(&vma->vm->mutex);
2130 if (!drm_mm_node_allocated(&vma->node))
2133 if (i915_vma_is_pinned(vma) ||
2134 &vma->obj->mm.rsgt->table != vma->resource->bi.pages)
2139 * object's dma_resv when the vma active goes away. When doing that
2142 * the next vma from the object, in case there are many, will
2146 if (i915_sw_fence_await_active(&vma->resource->chain, &vma->active,
2152 fence = __i915_vma_evict(vma, true);
2154 drm_mm_remove_node(&vma->node); /* pairs with i915_vma_release() */
2159 int i915_vma_unbind(struct i915_vma *vma)
2161 struct i915_address_space *vm = vma->vm;
2165 assert_object_held_shared(vma->obj);
2168 err = i915_vma_sync(vma);
2172 if (!drm_mm_node_allocated(&vma->node))
2175 if (i915_vma_is_pinned(vma)) {
2176 vma_print_allocator(vma, "is pinned");
2180 if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND))
2184 err = mutex_lock_interruptible_nested(&vma->vm->mutex, !wakeref);
2188 err = __i915_vma_unbind(vma);
2197 int i915_vma_unbind_async(struct i915_vma *vma, bool trylock_vm)
2199 struct drm_i915_gem_object *obj = vma->obj;
2200 struct i915_address_space *vm = vma->vm;
2211 if (!drm_mm_node_allocated(&vma->node))
2214 if (i915_vma_is_pinned(vma)) {
2215 vma_print_allocator(vma, "is pinned");
2231 if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND))
2243 fence = __i915_vma_unbind_async(vma);
2259 int i915_vma_unbind_unlocked(struct i915_vma *vma)
2263 i915_gem_object_lock(vma->obj, NULL);
2264 err = i915_vma_unbind(vma);
2265 i915_gem_object_unlock(vma->obj);
2270 struct i915_vma *i915_vma_make_unshrinkable(struct i915_vma *vma)
2272 i915_gem_object_make_unshrinkable(vma->obj);
2273 return vma;
2276 void i915_vma_make_shrinkable(struct i915_vma *vma)
2278 i915_gem_object_make_shrinkable(vma->obj);
2281 void i915_vma_make_purgeable(struct i915_vma *vma)
2283 i915_gem_object_make_purgeable(vma->obj);