Lines Matching refs:ret

176 	int ret = 0;
224 ret = -ENOMEM;
244 return ret;
317 int ret;
320 ret = amdgpu_bo_reserve(mem->bo, false);
321 if (ret)
322 return ret;
328 ret = amdgpu_gem_object_create(adev, mem->bo->tbo.base.size, 1,
334 if (ret) {
335 pr_err("Error in creating DMA mappable SG BO on domain: %d\n", ret);
341 return ret;
378 int ret;
400 ret = amdgpu_amdkfd_remove_eviction_fence(bo, ef);
404 return ret;
411 int ret;
419 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
420 if (ret)
426 return ret;
433 int ret = amdgpu_bo_reserve(bo, false);
435 if (ret)
436 return ret;
438 ret = amdgpu_amdkfd_bo_validate(bo, domain, true);
439 if (ret)
442 ret = dma_resv_reserve_fences(bo->tbo.base.resv, 1);
443 if (ret)
452 return ret;
472 int ret;
474 ret = amdgpu_vm_validate(adev, vm, ticket,
476 if (ret) {
478 return ret;
490 int ret;
492 ret = amdgpu_vm_update_pdes(adev, vm, false);
493 if (ret)
494 return ret;
556 int ret;
566 ret = sg_alloc_table_from_pages(ttm->sg, src_ttm->pages,
570 if (unlikely(ret))
573 ret = dma_map_sgtable(adev->dev, ttm->sg, direction, 0);
574 if (unlikely(ret))
578 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
579 if (ret)
587 pr_err("DMA map userptr failed: %d\n", ret);
592 return ret;
600 int ret;
603 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
604 if (ret)
605 return ret;
648 int ret;
664 ret = dma_mapping_error(adev->dev, dma_addr);
665 if (unlikely(ret))
666 return ret;
671 ret = -ENOMEM;
676 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
677 if (unlikely(ret))
680 return ret;
689 return ret;
838 int ret;
840 ret = kfd_mem_export_dmabuf(mem);
841 if (ret)
842 return ret;
877 int i, ret;
905 ret = -ENOMEM;
931 ret = create_dmamap_sg_bo(adev, mem, &bo[i]);
932 if (ret)
940 ret = create_dmamap_sg_bo(adev, mem, &bo[i]);
941 if (ret)
947 ret = kfd_mem_attach_dmabuf(adev, mem, &bo[i]);
948 if (ret)
953 ret = -EINVAL;
958 ret = amdgpu_bo_reserve(bo[i], false);
959 if (ret) {
971 ret = -ENOMEM;
972 pr_err("Failed to add BO object to VM. ret == %d\n",
973 ret);
1001 return ret;
1057 int ret = 0;
1061 ret = amdgpu_ttm_tt_set_userptr(&bo->tbo, user_addr, 0);
1062 if (ret) {
1063 pr_err("%s: Failed to set userptr: %d\n", __func__, ret);
1067 ret = amdgpu_hmm_register(bo, user_addr);
1068 if (ret) {
1070 __func__, ret);
1088 ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages, &range);
1089 if (ret) {
1090 pr_err("%s: Failed to get user pages: %d\n", __func__, ret);
1094 ret = amdgpu_bo_reserve(bo, true);
1095 if (ret) {
1100 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
1101 if (ret)
1108 if (ret)
1112 return ret;
1146 int ret;
1154 ret = amdgpu_vm_lock_pd(vm, &ctx->exec, 2);
1156 if (unlikely(ret))
1159 ret = drm_exec_prepare_obj(&ctx->exec, &bo->tbo.base, 1);
1161 if (unlikely(ret))
1169 return ret;
1188 int ret;
1200 ret = amdgpu_vm_lock_pd(entry->bo_va->base.vm,
1203 if (unlikely(ret))
1208 ret = drm_exec_prepare_obj(&ctx->exec, &bo->tbo.base, 1);
1210 if (unlikely(ret))
1218 return ret;
1234 int ret = 0;
1237 ret = amdgpu_sync_wait(ctx->sync, intr);
1241 return ret;
1265 int ret;
1267 ret = kfd_mem_dmamap_attachment(mem, entry);
1268 if (ret)
1269 return ret;
1272 ret = amdgpu_vm_bo_update(adev, bo_va, false);
1273 if (ret) {
1275 return ret;
1286 int ret;
1289 ret = amdgpu_vm_bo_map(entry->adev, entry->bo_va, entry->va, 0,
1292 if (ret) {
1293 pr_err("Failed to map VA 0x%llx in vm. ret %d\n",
1294 entry->va, ret);
1295 return ret;
1301 ret = update_gpuvm_pte(mem, entry, sync);
1302 if (ret) {
1312 return ret;
1319 int ret;
1323 ret = vm_validate_pt_pd_bos(peer_vm, ticket);
1324 if (ret)
1325 return ret;
1335 int ret;
1341 ret = amdgpu_sync_resv(NULL, sync, pd->tbo.base.resv,
1344 if (ret)
1345 return ret;
1355 int ret;
1359 ret = vm_update_pds(peer_vm, sync);
1360 if (ret)
1361 return ret;
1371 int ret;
1391 ret = -ENOMEM;
1405 ret = amdgpu_bo_reserve(vm->root.bo, true);
1406 if (ret)
1408 ret = vm_validate_pt_pd_bos(vm, NULL);
1409 if (ret) {
1413 ret = amdgpu_bo_sync_wait(vm->root.bo,
1415 if (ret)
1417 ret = dma_resv_reserve_fences(vm->root.bo->tbo.base.resv, 1);
1418 if (ret)
1451 return ret;
1467 int ret = 0;
1469 ret = amdgpu_bo_reserve(bo, false);
1470 if (unlikely(ret))
1471 return ret;
1473 ret = amdgpu_bo_pin_restricted(bo, domain, 0, 0);
1474 if (ret)
1480 return ret;
1493 int ret = 0;
1495 ret = amdgpu_bo_reserve(bo, false);
1496 if (unlikely(ret))
1507 int ret;
1517 ret = amdgpu_vm_set_pasid(adev, avm, pasid);
1518 if (ret)
1519 return ret;
1529 int ret;
1536 ret = amdgpu_vm_make_compute(adev, avm);
1537 if (ret)
1538 return ret;
1541 ret = init_kfd_vm(avm, process_info, ef);
1542 if (ret)
1543 return ret;
1624 int ret = 0;
1633 ret = -EINVAL;
1642 return ret;
1699 int ret;
1752 ret = -ENOMEM;
1771 ret = amdgpu_amdkfd_reserve_mem_limit(adev, aligned_size, flags,
1773 if (ret) {
1782 ret = amdgpu_gem_object_create(adev, aligned_size, 1, alloc_domain, alloc_flags,
1784 if (ret) {
1785 pr_debug("Failed to create BO on domain %s. ret %d\n",
1786 domain_string(alloc_domain), ret);
1789 ret = drm_vma_node_allow(&gobj->vma_node, drm_priv);
1790 if (ret) {
1791 pr_debug("Failed to allow vma node access. ret %d\n", ret);
1794 ret = drm_gem_handle_create(adev->kfd.client.file, gobj, &(*mem)->gem_handle);
1795 if (ret)
1816 ret = init_user_pages(*mem, user_addr, criu_resume);
1817 if (ret)
1821 ret = amdgpu_amdkfd_gpuvm_pin_bo(bo, AMDGPU_GEM_DOMAIN_GTT);
1822 if (ret) {
1832 ret = amdgpu_amdkfd_bo_validate_and_fence(bo, domain,
1835 if (ret)
1868 return ret;
1881 int ret;
1919 ret = reserve_bo_and_cond_vms(mem, NULL, BO_VM_ALL, &ctx);
1920 if (unlikely(ret))
1921 return ret;
1934 ret = unreserve_bo_and_vms(&ctx, false, false);
1983 return ret;
1991 int ret;
2032 ret = kfd_mem_attach(adev, mem, avm, mem->aql_queue);
2033 if (ret)
2037 ret = reserve_bo_and_vm(mem, avm, &ctx);
2038 if (unlikely(ret))
2050 ret = vm_validate_pt_pd_bos(avm, NULL);
2051 if (unlikely(ret))
2061 ret = map_bo_to_gpuvm(mem, entry, ctx.sync,
2063 if (ret) {
2068 ret = vm_update_pds(avm, ctx.sync);
2069 if (ret) {
2080 ret = unreserve_bo_and_vms(&ctx, false, false);
2089 return ret;
2096 int ret;
2102 ret = amdgpu_bo_reserve(mem->bo, true);
2103 if (ret)
2120 return ret;
2130 int ret;
2134 ret = reserve_bo_and_cond_vms(mem, avm, BO_VM_MAPPED, &ctx);
2135 if (unlikely(ret))
2139 ret = -EINVAL;
2143 ret = vm_validate_pt_pd_bos(avm, NULL);
2144 if (unlikely(ret))
2171 return ret;
2178 int ret;
2186 ret = amdgpu_sync_wait(&sync, intr);
2188 return ret;
2200 int ret;
2202 ret = amdgpu_bo_reserve(bo, true);
2203 if (ret) {
2204 pr_err("Failed to reserve bo. ret %d\n", ret);
2208 ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
2209 if (ret) {
2210 pr_err("Failed to pin bo. ret %d\n", ret);
2214 ret = amdgpu_ttm_alloc_gart(&bo->tbo);
2215 if (ret) {
2216 pr_err("Failed to bind bo to GART. ret %d\n", ret);
2235 return ret;
2254 int ret;
2264 ret = amdgpu_bo_reserve(bo, true);
2265 if (ret) {
2266 pr_err("Failed to reserve bo. ret %d\n", ret);
2270 ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
2271 if (ret) {
2272 pr_err("Failed to pin bo. ret %d\n", ret);
2276 ret = amdgpu_bo_kmap(bo, kptr);
2277 if (ret) {
2278 pr_err("Failed to map bo to kernel. ret %d\n", ret);
2300 return ret;
2341 int ret;
2353 ret = drm_vma_node_allow(&obj->vma_node, drm_priv);
2354 if (ret)
2388 ret = amdgpu_amdkfd_bo_validate_and_fence(bo, (*mem)->domain,
2391 if (ret)
2401 return ret;
2411 int ret;
2413 ret = drm_gem_prime_fd_to_handle(&adev->ddev, adev->kfd.client.file, fd,
2415 if (ret)
2416 return ret;
2419 ret = -EINVAL;
2423 ret = import_obj_create(adev, obj->dma_buf, obj, va, drm_priv, mem, size,
2425 if (ret)
2436 return ret;
2442 int ret;
2445 ret = kfd_mem_export_dmabuf(mem);
2446 if (ret)
2453 return ret;
2510 int ret = 0;
2549 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
2551 if (ret) {
2559 ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages,
2561 if (ret) {
2562 pr_debug("Failed %d to get user pages\n", ret);
2571 if (ret != -EFAULT)
2572 return ret;
2574 ret = 0;
2583 ret = -EAGAIN;
2594 return ret;
2611 int ret;
2621 ret = amdgpu_vm_lock_pd(peer_vm, &exec, 2);
2623 if (unlikely(ret))
2633 ret = drm_exec_prepare_obj(&exec, gobj, 1);
2635 if (unlikely(ret))
2640 ret = process_validate_vms(process_info, NULL);
2641 if (ret)
2655 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
2656 if (ret) {
2673 ret = update_gpuvm_pte(mem, attachment, &sync);
2674 if (ret) {
2686 ret = process_update_pds(process_info, &sync);
2693 return ret;
2703 int ret = 0;
2721 ret = -EAGAIN;
2727 ret = -EAGAIN;
2735 return ret;
2867 int ret;
2877 ret = amdgpu_vm_lock_pd(peer_vm, &exec, 2);
2879 if (unlikely(ret)) {
2880 pr_err("Locking VM PD failed, ret: %d\n", ret);
2893 ret = drm_exec_prepare_obj(&exec, gobj, 1);
2895 if (unlikely(ret)) {
2896 pr_err("drm_exec_prepare_obj failed, ret: %d\n", ret);
2915 ret = amdgpu_amdkfd_bo_validate(bo, domain, false);
2916 if (ret) {
2919 ret = amdgpu_amdkfd_bo_validate(bo,
2921 if (ret) {
2928 ret = amdgpu_sync_fence(&sync_obj, fence);
2929 if (ret) {
2942 ret = process_validate_vms(process_info, &exec.ticket);
2943 if (ret) {
2944 pr_debug("Validating VMs failed, ret: %d\n", ret);
2961 ret = update_gpuvm_pte(mem, attachment, &sync_obj);
2962 if (ret) {
2975 ret = amdgpu_vm_handle_moved(adev, peer_vm, &exec.ticket);
2976 if (ret) {
2983 ret = process_update_pds(process_info, &sync_obj);
2984 if (ret) {
2992 ret = process_sync_pds_resv(process_info, &sync_obj);
2993 if (ret) {
3020 ret = -ENOMEM;
3055 return ret;
3062 int ret;
3082 ret = amdgpu_bo_reserve(gws_bo, false);
3083 if (unlikely(ret)) {
3084 pr_err("Reserve gws bo failed %d\n", ret);
3088 ret = amdgpu_amdkfd_bo_validate(gws_bo, AMDGPU_GEM_DOMAIN_GWS, true);
3089 if (ret) {
3090 pr_err("GWS BO validate failed %d\n", ret);
3097 ret = dma_resv_reserve_fences(gws_bo->tbo.base.resv, 1);
3098 if (ret)
3106 return ret;
3119 return ret;
3124 int ret;
3134 ret = amdgpu_bo_reserve(gws_bo, false);
3135 if (unlikely(ret)) {
3136 pr_err("Reserve gws bo failed %d\n", ret);
3138 return ret;