Lines Matching defs:err

181 	int err;
186 err = xe_bo_lock(bo, true);
187 if (err)
188 return err;
190 err = dma_resv_reserve_fences(bo->ttm.base.resv, vm->preempt.num_exec_queues);
191 if (err)
203 return err;
231 int err;
237 err = drm_gpuvm_exec_lock(&vm_exec);
238 if (err)
244 err = -ENOMEM;
273 return err;
338 * @err: The error returned from ttm_bo_validate().
352 bool xe_vm_validate_should_retry(struct drm_exec *exec, int err, ktime_t *end)
356 if (err != -ENOMEM)
431 int err;
433 err = drm_gpuvm_prepare_vm(&vm->gpuvm, exec, 0);
434 if (err)
435 return err;
448 err = drm_gpuvm_prepare_objects(&vm->gpuvm, exec, 0);
449 if (err)
450 return err;
452 err = wait_for_existing_preempt_fences(vm);
453 if (err)
454 return err;
472 int err = 0;
489 err = xe_vm_userptr_pin(vm);
490 if (err)
499 err = xe_preempt_work_begin(&exec, vm, &done);
501 if (err || done) {
503 if (err && xe_vm_validate_should_retry(&exec, err, &end))
504 err = -EAGAIN;
510 err = alloc_preempt_fences(vm, &preempt_fences, &fence_count);
511 if (err)
514 err = xe_vm_rebind(vm, true);
515 if (err)
523 err = -ETIME;
535 err = -EAGAIN;
553 if (err == -EAGAIN) {
558 if (err) {
559 drm_warn(&vm->xe->drm, "VM worker error: %d\n", err);
579 long err;
626 err = dma_resv_wait_timeout(xe_vm_resv(vm),
629 XE_WARN_ON(err <= 0);
632 err = xe_vm_invalidate_vma(vma);
633 XE_WARN_ON(err);
648 int err = 0;
667 err = xe_vma_userptr_pin_pages(uvma);
668 if (err == -EFAULT) {
677 err = xe_vm_invalidate_vma(&uvma->vma);
679 if (err)
680 return err;
682 if (err < 0)
683 return err;
833 int err;
839 err = mmu_interval_notifier_insert(&userptr->notifier,
843 if (err) {
845 return ERR_PTR(err);
953 int err;
957 err = drm_exec_lock_obj(exec, xe_vm_obj(vm));
958 if (!err && bo && !bo->vm)
959 err = drm_exec_lock_obj(exec, &bo->ttm.base);
961 return err;
967 int err;
971 err = xe_vm_lock_vma(&exec, vma);
973 if (XE_WARN_ON(err))
1001 int err;
1007 err = drm_gpuva_insert(&vm->gpuvm, &vma->gpuva);
1009 XE_WARN_ON(err); /* Shouldn't be possible */
1011 return err;
1231 int err, number_tiles = 0;
1268 err = -ENOMEM;
1277 err = dma_resv_lock_interruptible(xe_vm_resv(vm), NULL);
1278 if (err)
1291 err = PTR_ERR(vm->pt_root[id]);
1302 err = xe_vm_create_scratch(xe, tile, vm);
1303 if (err)
1340 err = PTR_ERR(q);
1366 return ERR_PTR(err);
1375 return ERR_PTR(err);
1555 int err;
1585 err = PTR_ERR(fence);
1604 err = -ENOMEM;
1625 return ERR_PTR(err);
1640 int err;
1661 err = PTR_ERR(fence);
1680 err = -ENOMEM;
1700 return ERR_PTR(err);
1764 int err;
1770 err = xe_bo_validate(bo, vm, true);
1771 if (err)
1772 return err;
1814 int err;
1864 err = xa_alloc(&xef->vm.xa, &id, vm, xa_limit_32b, GFP_KERNEL);
1866 if (err)
1871 err = xa_alloc_cyclic(&xe->usm.asid_to_vm, &asid, vm,
1875 if (err < 0)
1903 return err;
1913 int err = 0;
1922 err = -ENOENT;
1924 err = -EBUSY;
1929 if (!err)
1932 return err;
1947 int err;
1952 err = xe_bo_migrate(xe_vma_bo(vma), region_to_mem_type[region]);
1953 if (err)
1954 return err;
2052 int err;
2076 err = xe_bo_lock(bo, true);
2077 if (err)
2078 return ERR_PTR(err);
2124 int err;
2131 err = 0;
2133 err = drm_exec_lock_obj(&exec, xe_vm_obj(vm));
2136 if (!err) {
2137 err = drm_exec_lock_obj(&exec, &bo->ttm.base);
2140 if (err) {
2142 return ERR_PTR(err);
2153 err = xe_vma_userptr_pin_pages(to_userptr_vma(vma));
2154 if (err) {
2157 return ERR_PTR(err);
2160 err = add_preempt_fences(vm, bo);
2161 if (err) {
2164 return ERR_PTR(err);
2205 int err = 0;
2211 err |= xe_vm_insert_vma(vm, op->map.vma);
2212 if (!err)
2225 err |= xe_vm_insert_vma(vm, op->remap.prev);
2226 if (!err)
2228 if (!err && op->remap.skip_prev) {
2235 err |= xe_vm_insert_vma(vm, op->remap.next);
2236 if (!err)
2238 if (!err && op->remap.skip_next) {
2246 if (!err) {
2263 return err;
2275 int err = 0;
2406 err = xe_vma_op_commit(vm, op);
2407 if (err)
2408 return err;
2430 int err;
2434 err = xe_vm_lock_vma(exec, vma);
2435 if (err)
2436 return err;
2443 err = xe_vm_bind(vm, vma, op->q, xe_vma_bo(vma),
2457 err = xe_vm_unbind(vm, vma, op->q, op->syncs,
2462 if (err)
2469 err = xe_vm_bind(vm, op->remap.prev, op->q,
2474 if (err)
2481 err = xe_vm_bind(vm, op->remap.next, op->q,
2487 if (err)
2495 err = xe_vm_unbind(vm, vma, op->q, op->syncs,
2500 err = xe_vm_prefetch(vm, vma, op->q, op->prefetch.region,
2509 if (err)
2512 return err;
2519 int err;
2524 err = op_execute(&exec, vm, vma, op);
2526 if (err)
2531 if (err == -EAGAIN) {
2544 err = xe_vma_userptr_pin_pages(to_userptr_vma(vma));
2545 if (!err)
2552 return err;
2699 int err;
2704 err = xe_vma_op_execute(vm, op);
2705 if (err) {
2707 op->base.op, err);
2732 int err;
2752 err = __copy_from_user(*bind_ops, bind_user,
2755 if (XE_IOCTL_DBG(xe, err)) {
2756 err = -EFAULT;
2776 err = -EINVAL;
2784 err = -EINVAL;
2789 err = -EINVAL;
2820 err = -EINVAL;
2829 err = -EINVAL;
2839 return err;
2848 int i, err = 0;
2862 return err;
2879 int err;
2882 err = vm_bind_ioctl_check_args(xe, args, &bind_ops);
2883 if (err)
2884 return err;
2889 err = -ENOENT;
2894 err = -EINVAL;
2901 err = -EINVAL;
2905 err = down_write_killable(&vm->lock);
2906 if (err)
2910 err = -ENOENT;
2920 err = -EINVAL;
2929 err = -ENOMEM;
2936 err = -ENOMEM;
2955 err = -ENOENT;
2963 err = -EINVAL;
2972 err = -EINVAL;
2981 err = -EINVAL;
2991 err = -EINVAL;
2999 err = -ENOMEM;
3006 err = xe_sync_entry_parse(xe, xef, &syncs[num_syncs],
3012 if (err)
3020 err = -EINVAL;
3025 err = -ENODATA;
3042 err = PTR_ERR(ops[i]);
3047 err = vm_bind_ioctl_ops_parse(vm, q, ops[i], syncs, num_syncs,
3050 if (err)
3056 err = -ENODATA;
3064 err = vm_bind_ioctl_ops_execute(vm, &ops_list);
3080 return err;
3085 if (err == -ENODATA)
3086 err = vm_bind_ioctl_signal_fences(vm, q, syncs, num_syncs);
3106 return err;
3330 int err;
3343 err = ttm_bo_vmap(&bo->ttm, &src);
3344 if (!err) {
3357 err = 0;
3359 err = -EFAULT;
3366 if (err) {
3368 snap->snap[i].data = ERR_PTR(err);