Lines Matching refs:vm

51 	 * We may be forced to unbind when the vm is dead, to clean it up.
55 if (kref_read(&vma->vm->ref))
124 intel_gt_pm_get_untracked(vma->vm->gt);
139 intel_gt_pm_put_async_untracked(vma->vm->gt);
147 struct i915_address_space *vm,
156 GEM_BUG_ON(vm == &vm->gt->ggtt->alias->vm);
162 vma->ops = &vm->vma_ops;
199 if (unlikely(vma->size > vm->total))
204 err = mutex_lock_interruptible(&vm->mutex);
210 vma->vm = vm;
211 list_add_tail(&vma->vm_link, &vm->unbound_list);
214 if (i915_is_ggtt(vm)) {
218 vma->fence_size = i915_gem_fence_size(vm->i915, vma->size,
222 vma->fence_size > vm->total))
227 vma->fence_alignment = i915_gem_fence_alignment(vm->i915, vma->size,
248 cmp = i915_vma_compare(pos, vm, view);
271 mutex_unlock(&vm->mutex);
278 mutex_unlock(&vm->mutex);
286 struct i915_address_space *vm,
296 cmp = i915_vma_compare(vma, vm, view);
312 * @vm: address space in which the mapping is located
315 * i915_vma_instance() looks up an existing VMA of the @obj in the @vm with
324 struct i915_address_space *vm,
329 GEM_BUG_ON(view && !i915_is_ggtt_or_dpt(vm));
330 GEM_BUG_ON(!kref_read(&vm->ref));
333 vma = i915_vma_lookup(obj, vm, view);
338 vma = vma_create(obj, vm, view);
340 GEM_BUG_ON(!IS_ERR(vma) && i915_vma_compare(vma, vm, view));
346 struct i915_address_space *vm;
369 vma_res->ops->bind_vma(vma_res->vm, &vw->stash,
380 i915_vm_free_pt_stash(vw->vm, &vw->stash);
452 i915_vma_resource_init(vma_res, vma->vm, vma->pages, &vma->page_sizes,
482 lockdep_assert_held(&vma->vm->mutex);
488 vma->vm->total))) {
513 if (work && bind_flags & vma->vm->bind_async_flags)
514 ret = i915_vma_resource_bind_dep_await(vma->vm,
523 ret = i915_vma_resource_bind_dep_sync(vma->vm, vma->node.start,
539 if (work && bind_flags & vma->vm->bind_async_flags) {
573 vma->ops->bind_vma(vma->vm, NULL, vma->resource, pat_index,
605 ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->iomap,
652 intel_gt_flush_ggtt_writes(vma->vm->gt);
732 i915_vm_to_ggtt(vma->vm)->mappable_end;
748 * domains and dying. During vm initialisation, we decide whether or not
752 if (!i915_vm_has_cache_coloring(vma->vm))
826 end = vma->vm->total;
828 end = min_t(u64, end, i915_vm_to_ggtt(vma->vm)->mappable_end);
833 alignment = max(alignment, i915_vm_obj_min_alignment(vma->vm, vma->obj));
849 if (i915_vm_has_cache_coloring(vma->vm))
866 ret = i915_gem_gtt_reserve(vma->vm, ww, &vma->node,
884 !HAS_64K_PAGES(vma->vm->i915)) {
908 ret = i915_gem_gtt_insert(vma->vm, ww, &vma->node,
920 list_move_tail(&vma->vm_link, &vma->vm->bound_list);
937 list_move_tail(&vma->vm_link, &vma->vm->unbound_list);
1336 drm_err(&vma->vm->i915->drm,
1373 void vma_invalidate_tlb(struct i915_address_space *vm, u32 *tlb)
1389 for_each_gt(gt, vm->i915, id)
1422 lockdep_assert_held(&vma->vm->mutex);
1463 * vm->mutex, get the first rpm wakeref outside of the mutex.
1465 wakeref = intel_runtime_pm_get(&vma->vm->i915->runtime_pm);
1467 if (flags & vma->vm->bind_async_flags) {
1469 err = i915_vm_lock_objects(vma->vm, ww);
1479 work->vm = vma->vm;
1488 if (vma->vm->allocate_va_range) {
1489 err = i915_vm_alloc_pt_stash(vma->vm,
1495 err = i915_vm_map_pt_stash(vma->vm, &work->stash);
1524 err = mutex_lock_interruptible_nested(&vma->vm->mutex,
1529 /* No more allocations allowed now we hold vm->mutex */
1562 if (i915_is_ggtt(vma->vm))
1577 list_move_tail(&vma->vm_link, &vma->vm->bound_list);
1594 mutex_unlock(&vma->vm->mutex);
1601 intel_runtime_pm_put(&vma->vm->i915->runtime_pm, wakeref);
1624 struct i915_address_space *vm = vma->vm;
1626 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
1644 if (mutex_lock_interruptible(&vm->mutex) == 0) {
1650 i915_gem_evict_vm(vm, NULL, NULL);
1651 mutex_unlock(&vm->mutex);
1718 struct intel_gt *gt = vma->vm->gt;
1740 struct intel_gt *gt = vma->vm->gt;
1777 i915_vm_resv_put(vma->vm);
1797 * on the vm and a reference on the object. Also takes the object lock so
1806 * - vm->mutex
1812 lockdep_assert_held(&vma->vm->mutex);
1816 release_references(vma, vma->vm->gt, false);
1824 mutex_lock(&vma->vm->mutex);
1830 /* vma->vm may be freed when releasing vma->vm->mutex. */
1831 gt = vma->vm->gt;
1832 mutex_unlock(&vma->vm->mutex);
1844 struct i915_address_space *vm = vma->vm;
1851 if (!i915_vm_tryget(vm)) {
1863 struct i915_address_space *vm = vma->vm;
1877 i915_vm_put(vm);
1908 unmap_mapping_range(vma->vm->i915->drm.anon_inode->i_mapping,
2052 /* If vm is not open, unbind is a nop. */
2054 kref_read(&vma->vm->ref);
2055 vma_res->skip_pte_rewrite = !kref_read(&vma->vm->ref) ||
2056 vma->vm->skip_pte_rewrite;
2078 vma_invalidate_tlb(vma->vm, vma->obj->mm.tlb);
2095 lockdep_assert_held(&vma->vm->mutex);
2109 * a residual pin skipping the vm->mutex) to complete.
2126 lockdep_assert_held(&vma->vm->mutex);
2159 struct i915_address_space *vm = vma->vm;
2180 wakeref = intel_runtime_pm_get(&vm->i915->runtime_pm);
2182 err = mutex_lock_interruptible_nested(&vma->vm->mutex, !wakeref);
2187 mutex_unlock(&vm->mutex);
2191 intel_runtime_pm_put(&vm->i915->runtime_pm, wakeref);
2198 struct i915_address_space *vm = vma->vm;
2230 wakeref = intel_runtime_pm_get(&vm->i915->runtime_pm);
2232 if (trylock_vm && !mutex_trylock(&vm->mutex)) {
2236 err = mutex_lock_interruptible_nested(&vm->mutex, !wakeref);
2242 mutex_unlock(&vm->mutex);
2253 intel_runtime_pm_put(&vm->i915->runtime_pm, wakeref);