Lines Matching refs:vm

31 static inline struct xe_vm *xe_vm_get(struct xe_vm *vm)
33 drm_gpuvm_get(&vm->gpuvm);
34 return vm;
37 static inline void xe_vm_put(struct xe_vm *vm)
39 drm_gpuvm_put(&vm->gpuvm);
42 int xe_vm_lock(struct xe_vm *vm, bool intr);
44 void xe_vm_unlock(struct xe_vm *vm);
46 static inline bool xe_vm_is_closed(struct xe_vm *vm)
48 /* Only guaranteed not to change when vm->lock is held */
49 return !vm->size;
52 static inline bool xe_vm_is_banned(struct xe_vm *vm)
54 return vm->flags & XE_VM_FLAG_BANNED;
57 static inline bool xe_vm_is_closed_or_banned(struct xe_vm *vm)
59 lockdep_assert_held(&vm->lock);
60 return xe_vm_is_closed(vm) || xe_vm_is_banned(vm);
64 xe_vm_find_overlapping_vma(struct xe_vm *vm, u64 start, u64 range);
67 * xe_vm_has_scratch() - Whether the vm is configured for scratch PTEs
68 * @vm: The vm
70 * Return: whether the vm populates unmapped areas with scratch PTEs
72 static inline bool xe_vm_has_scratch(const struct xe_vm *vm)
74 return vm->flags & XE_VM_FLAG_SCRATCH_PAGE;
90 return gpuvm_to_vm(gpuva->vm);
135 return container_of(vma->gpuva.vm, struct xe_vm, gpuvm);
175 u64 xe_vm_pdp4_descriptor(struct xe_vm *vm, struct xe_tile *tile);
184 void xe_vm_close_and_put(struct xe_vm *vm);
186 static inline bool xe_vm_in_fault_mode(struct xe_vm *vm)
188 return vm->flags & XE_VM_FLAG_FAULT_MODE;
191 static inline bool xe_vm_in_lr_mode(struct xe_vm *vm)
193 return vm->flags & XE_VM_FLAG_LR_MODE;
196 static inline bool xe_vm_in_preempt_fence_mode(struct xe_vm *vm)
198 return xe_vm_in_lr_mode(vm) && !xe_vm_in_fault_mode(vm);
201 int xe_vm_add_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q);
202 void xe_vm_remove_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q);
204 int xe_vm_userptr_pin(struct xe_vm *vm);
206 int __xe_vm_userptr_needs_repin(struct xe_vm *vm);
208 int xe_vm_userptr_check_repin(struct xe_vm *vm);
210 int xe_vm_rebind(struct xe_vm *vm, bool rebind_worker);
214 static inline void xe_vm_queue_rebind_worker(struct xe_vm *vm)
216 xe_assert(vm->xe, xe_vm_in_preempt_fence_mode(vm));
217 queue_work(vm->xe->ordered_wq, &vm->preempt.rebind_work);
223 * @vm: The vm.
225 * If the rebind functionality on a compute vm was disabled due
227 * This function should be called after submitting a batch to a compute vm.
229 static inline void xe_vm_reactivate_rebind(struct xe_vm *vm)
231 if (xe_vm_in_preempt_fence_mode(vm) && vm->preempt.rebind_deactivated) {
232 vm->preempt.rebind_deactivated = false;
233 xe_vm_queue_rebind_worker(vm);
243 int xe_analyze_vm(struct drm_printer *p, struct xe_vm *vm, int gt_id);
247 int xe_vm_validate_rebind(struct xe_vm *vm, struct drm_exec *exec,
251 * xe_vm_resv() - Return's the vm's reservation object
252 * @vm: The vm
254 * Return: Pointer to the vm's reservation object.
256 static inline struct dma_resv *xe_vm_resv(struct xe_vm *vm)
258 return drm_gpuvm_resv(&vm->gpuvm);
262 * xe_vm_assert_held(vm) - Assert that the vm's reservation object is held.
263 * @vm: The vm
265 #define xe_vm_assert_held(vm) dma_resv_assert_held(xe_vm_resv(vm))
277 struct xe_vm_snapshot *xe_vm_snapshot_capture(struct xe_vm *vm);