Lines Matching defs:hdev

19 	struct hl_device *hdev = ctx->hdev;
20 struct asic_fixed_properties *prop = &hdev->asic_prop;
24 if (!hdev->supports_cb_mapping) {
25 dev_err_ratelimited(hdev->dev,
37 dev_err(hdev->dev, "Failed to allocate device virtual address for CB\n");
41 mutex_lock(&hdev->mmu_lock);
45 dev_err(hdev->dev, "Failed to map VA %#llx to CB\n", cb->virtual_addr);
49 rc = hl_mmu_invalidate_cache(hdev, false, MMU_OP_USERPTR | MMU_OP_SKIP_LOW_CACHE_INV);
53 mutex_unlock(&hdev->mmu_lock);
62 mutex_unlock(&hdev->mmu_lock);
70 struct hl_device *hdev = ctx->hdev;
72 mutex_lock(&hdev->mmu_lock);
74 hl_mmu_invalidate_cache(hdev, true, MMU_OP_USERPTR);
75 mutex_unlock(&hdev->mmu_lock);
80 static void cb_fini(struct hl_device *hdev, struct hl_cb *cb)
83 gen_pool_free(hdev->internal_cb_pool,
86 hl_asic_dma_free_coherent(hdev, cb->size, cb->kernel_address, cb->bus_address);
91 static void cb_do_release(struct hl_device *hdev, struct hl_cb *cb)
95 spin_lock(&hdev->cb_pool_lock);
96 list_add(&cb->pool_list, &hdev->cb_pool);
97 spin_unlock(&hdev->cb_pool_lock);
99 cb_fini(hdev, cb);
103 static struct hl_cb *hl_cb_alloc(struct hl_device *hdev, u32 cb_size,
118 if (ctx_id == HL_KERNEL_ASID_ID && !hdev->disabled)
128 p = (void *) gen_pool_alloc(hdev->internal_cb_pool, cb_size);
134 cb_offset = p - hdev->internal_cb_pool_virt_addr;
136 cb->bus_address = hdev->internal_cb_va_base + cb_offset;
138 p = hl_asic_dma_alloc_coherent(hdev, cb_size, &cb->bus_address, GFP_ATOMIC);
140 p = hl_asic_dma_alloc_coherent(hdev, cb_size, &cb->bus_address, GFP_KERNEL);
142 p = hl_asic_dma_alloc_coherent(hdev, cb_size, &cb->bus_address,
147 dev_err(hdev->dev,
161 struct hl_device *hdev;
179 cb_do_release(cb->hdev, cb);
195 cb_args->cb_size <= cb_args->hdev->asic_prop.cb_pool_cb_size) {
197 spin_lock(&cb_args->hdev->cb_pool_lock);
198 if (!list_empty(&cb_args->hdev->cb_pool)) {
199 cb = list_first_entry(&cb_args->hdev->cb_pool,
202 spin_unlock(&cb_args->hdev->cb_pool_lock);
205 spin_unlock(&cb_args->hdev->cb_pool_lock);
206 dev_dbg(cb_args->hdev->dev, "CB pool is empty\n");
212 cb = hl_cb_alloc(cb_args->hdev, cb_args->cb_size, ctx_id, cb_args->internal_cb);
217 cb->hdev = cb_args->hdev;
227 dev_err(cb_args->hdev->dev,
244 cb_do_release(cb_args->hdev, cb);
254 return cb->hdev->asic_funcs->mmap(cb->hdev, vma, cb->kernel_address,
266 int hl_cb_create(struct hl_device *hdev, struct hl_mem_mgr *mmg,
271 .hdev = hdev,
280 if ((hdev->disabled) || (hdev->reset_info.in_reset && (ctx_id != HL_KERNEL_ASID_ID))) {
281 dev_warn_ratelimited(hdev->dev,
287 dev_err(hdev->dev, "CB size %d must be less than %d\n",
367 struct hl_device *hdev = hpriv->hdev;
374 if (!hl_device_operational(hdev, &status)) {
375 dev_dbg_ratelimited(hdev->dev,
377 hdev->status[status]);
384 dev_err(hdev->dev,
389 rc = hl_cb_create(hdev, &hpriv->mem_mgr, hpriv->ctx,
444 struct hl_cb *hl_cb_kernel_create(struct hl_device *hdev, u32 cb_size,
451 rc = hl_cb_create(hdev, &hdev->kernel_mem_mgr, hdev->kernel_ctx, cb_size,
454 dev_err(hdev->dev,
459 cb = hl_cb_get(&hdev->kernel_mem_mgr, cb_handle);
462 dev_crit(hdev->dev, "Kernel CB handle invalid 0x%x\n",
470 hl_cb_destroy(&hdev->kernel_mem_mgr, cb_handle);
475 int hl_cb_pool_init(struct hl_device *hdev)
480 INIT_LIST_HEAD(&hdev->cb_pool);
481 spin_lock_init(&hdev->cb_pool_lock);
483 for (i = 0 ; i < hdev->asic_prop.cb_pool_cb_cnt ; i++) {
484 cb = hl_cb_alloc(hdev, hdev->asic_prop.cb_pool_cb_size,
488 list_add(&cb->pool_list, &hdev->cb_pool);
490 hl_cb_pool_fini(hdev);
498 int hl_cb_pool_fini(struct hl_device *hdev)
502 list_for_each_entry_safe(cb, tmp, &hdev->cb_pool, pool_list) {
504 cb_fini(hdev, cb);
512 struct hl_device *hdev = ctx->hdev;
513 struct asic_fixed_properties *prop = &hdev->asic_prop;
516 if (!hdev->supports_cb_mapping)
521 dev_err(hdev->dev,
526 ctx->cb_va_pool_base = hl_reserve_va_block(hdev, ctx, HL_VA_RANGE_TYPE_HOST,
534 dev_err(hdev->dev,
542 hl_unreserve_va_block(hdev, ctx, ctx->cb_va_pool_base, CB_VA_POOL_SIZE);
551 struct hl_device *hdev = ctx->hdev;
553 if (!hdev->supports_cb_mapping)
557 hl_unreserve_va_block(hdev, ctx, ctx->cb_va_pool_base, CB_VA_POOL_SIZE);