Lines Matching defs:hdev

16  * @hdev: habanalabs device structure.
22 static struct hl_mmu_funcs *hl_mmu_get_funcs(struct hl_device *hdev, int pgt_residency,
25 return &hdev->mmu_func[pgt_residency];
28 bool hl_is_dram_va(struct hl_device *hdev, u64 virt_addr)
30 struct asic_fixed_properties *prop = &hdev->asic_prop;
39 * @hdev: habanalabs device structure.
43 int hl_mmu_init(struct hl_device *hdev)
47 if (hdev->mmu_disable)
50 mutex_init(&hdev->mmu_lock);
52 if (hdev->mmu_func[MMU_DR_PGT].init != NULL) {
53 rc = hdev->mmu_func[MMU_DR_PGT].init(hdev);
58 if (hdev->mmu_func[MMU_HR_PGT].init != NULL) {
59 rc = hdev->mmu_func[MMU_HR_PGT].init(hdev);
67 if (hdev->mmu_func[MMU_DR_PGT].fini != NULL)
68 hdev->mmu_func[MMU_DR_PGT].fini(hdev);
75 * @hdev: habanalabs device structure.
83 void hl_mmu_fini(struct hl_device *hdev)
85 if (hdev->mmu_disable)
88 if (hdev->mmu_func[MMU_DR_PGT].fini != NULL)
89 hdev->mmu_func[MMU_DR_PGT].fini(hdev);
91 if (hdev->mmu_func[MMU_HR_PGT].fini != NULL)
92 hdev->mmu_func[MMU_HR_PGT].fini(hdev);
94 mutex_destroy(&hdev->mmu_lock);
107 struct hl_device *hdev = ctx->hdev;
110 if (hdev->mmu_disable)
113 if (hdev->mmu_func[MMU_DR_PGT].ctx_init != NULL) {
114 rc = hdev->mmu_func[MMU_DR_PGT].ctx_init(ctx);
119 if (hdev->mmu_func[MMU_HR_PGT].ctx_init != NULL) {
120 rc = hdev->mmu_func[MMU_HR_PGT].ctx_init(ctx);
128 if (hdev->mmu_func[MMU_DR_PGT].fini != NULL)
129 hdev->mmu_func[MMU_DR_PGT].fini(hdev);
146 struct hl_device *hdev = ctx->hdev;
148 if (hdev->mmu_disable)
151 if (hdev->mmu_func[MMU_DR_PGT].ctx_fini != NULL)
152 hdev->mmu_func[MMU_DR_PGT].ctx_fini(ctx);
154 if (hdev->mmu_func[MMU_HR_PGT].ctx_fini != NULL)
155 hdev->mmu_func[MMU_HR_PGT].ctx_fini(ctx);
161 * @hdev: pointer to device data.
174 int hl_mmu_get_real_page_size(struct hl_device *hdev, struct hl_mmu_properties *mmu_prop,
186 dev_err(hdev->dev, "page size of %u is not %uKB aligned, can't map\n",
192 static struct hl_mmu_properties *hl_mmu_get_prop(struct hl_device *hdev, u32 page_size,
195 struct asic_fixed_properties *prop = &hdev->asic_prop;
228 struct hl_device *hdev = ctx->hdev;
236 if (hdev->mmu_disable)
239 is_dram_addr = hl_is_dram_va(hdev, virt_addr);
240 mmu_prop = hl_mmu_get_prop(hdev, page_size, is_dram_addr);
243 mmu_funcs = hl_mmu_get_funcs(hdev, pgt_residency, is_dram_addr);
245 rc = hdev->asic_funcs->mmu_get_real_page_size(hdev, mmu_prop, page_size, &real_page_size,
265 trace_habanalabs_mmu_unmap(hdev->dev, virt_addr, 0, page_size, flush_pte);
296 struct hl_device *hdev = ctx->hdev;
304 if (hdev->mmu_disable)
307 is_dram_addr = hl_is_dram_va(hdev, virt_addr);
308 mmu_prop = hl_mmu_get_prop(hdev, page_size, is_dram_addr);
311 mmu_funcs = hl_mmu_get_funcs(hdev, pgt_residency, is_dram_addr);
313 rc = hdev->asic_funcs->mmu_get_real_page_size(hdev, mmu_prop, page_size, &real_page_size,
324 ((hdev->asic_funcs->scramble_addr(hdev, phys_addr) &
326 (hdev->asic_funcs->scramble_addr(hdev, virt_addr) &
330 dev_crit(hdev->dev,
352 trace_habanalabs_mmu_map(hdev->dev, virt_addr, phys_addr, page_size, flush_pte);
360 dev_warn_ratelimited(hdev->dev,
384 struct hl_device *hdev = ctx->hdev;
385 struct asic_fixed_properties *prop = &hdev->asic_prop;
410 dev_err(hdev->dev,
426 dev_warn_ratelimited(hdev->dev,
444 struct hl_device *hdev = ctx->hdev;
445 struct asic_fixed_properties *prop = &hdev->asic_prop;
468 dev_warn_ratelimited(hdev->dev,
479 struct asic_fixed_properties *prop = &ctx->hdev->asic_prop;
550 struct hl_device *hdev = ctx->hdev;
557 if (hdev->mmu_disable)
560 prop = &hdev->asic_prop;
570 mmu_funcs = hl_mmu_get_funcs(hdev, pgt_residency, is_dram_addr);
572 mutex_lock(&hdev->mmu_lock);
574 mutex_unlock(&hdev->mmu_lock);
586 int hl_mmu_if_set_funcs(struct hl_device *hdev)
588 struct asic_fixed_properties *prop = &hdev->asic_prop;
590 if (hdev->mmu_disable)
593 switch (hdev->asic_type) {
597 hl_mmu_v1_set_funcs(hdev, &hdev->mmu_func[MMU_DR_PGT]);
602 hl_mmu_v2_set_funcs(hdev, &hdev->mmu_func[MMU_DR_PGT]);
604 hl_mmu_v2_hr_set_funcs(hdev, &hdev->mmu_func[MMU_HR_PGT]);
607 dev_err(hdev->dev, "Unrecognized ASIC type %d\n",
608 hdev->asic_type);
617 * @hdev: pointer to device data.
622 u64 hl_mmu_scramble_addr(struct hl_device *hdev, u64 addr)
630 * @hdev: pointer to device data.
635 u64 hl_mmu_descramble_addr(struct hl_device *hdev, u64 addr)
640 int hl_mmu_invalidate_cache(struct hl_device *hdev, bool is_hard, u32 flags)
644 rc = hdev->asic_funcs->mmu_invalidate_cache(hdev, is_hard, flags);
646 dev_err_ratelimited(hdev->dev,
653 int hl_mmu_invalidate_cache_range(struct hl_device *hdev, bool is_hard,
658 rc = hdev->asic_funcs->mmu_invalidate_cache_range(hdev, is_hard, flags,
661 dev_err_ratelimited(hdev->dev,
672 struct hl_device *hdev = ctx->hdev;
674 if (!hl_device_operational(hdev, NULL))
677 mutex_lock(&hdev->mmu_lock);
679 hdev->asic_funcs->mmu_prefetch_cache_range(ctx, pfw->flags, pfw->asid, pfw->va, pfw->size);
681 mutex_unlock(&hdev->mmu_lock);
712 queue_work(ctx->hdev->prefetch_wq, &handle_prefetch_work->prefetch_work);
738 dev_err_ratelimited(ctx->hdev->dev, "Invalid hop index %d\n", hop_idx);
745 return hop_addr + ctx->hdev->asic_prop.mmu_pte_size * ((virt_addr & mask) >> shift);
752 struct hl_device *hdev = data;
754 hl_asic_dma_free_coherent(hdev, (chunk->end_addr - chunk->start_addr) + 1,
766 * @hdev: habanalabs device structure.
775 static void hl_mmu_hr_pool_destroy(struct hl_device *hdev, struct hl_mmu_hr_priv *hr_priv,
778 struct asic_fixed_properties *prop = &hdev->asic_prop;
797 gen_pool_for_each_chunk(*pool, mmu_dma_mem_free_from_chunk, hdev);
809 * @hdev: habanalabs device structure.
820 int hl_mmu_hr_init(struct hl_device *hdev, struct hl_mmu_hr_priv *hr_priv, u32 hop_table_size,
823 struct asic_fixed_properties *prop = &hdev->asic_prop;
838 dev_err(hdev->dev, "Failed to create hr page pool\n");
844 dev_err(hdev->dev, "Failed to allocate hr-mmu hop0 table\n");
850 virt_addr = (uintptr_t) hl_asic_dma_alloc_coherent(hdev, pool_chunk_size,
854 dev_err(hdev->dev,
863 dev_err(hdev->dev, "Failed to fill host-resident page pool\n");
876 dev_err(hdev->dev, "Failed to allocate HOP from pgt pool\n");
887 hl_mmu_hr_pool_destroy(hdev, hr_priv, hop_table_size);
896 * @hdev: habanalabs device structure.
906 void hl_mmu_hr_fini(struct hl_device *hdev, struct hl_mmu_hr_priv *hr_priv, u32 hop_table_size)
910 hl_mmu_hr_pool_destroy(hdev, hr_priv, hop_table_size);
1064 struct hl_device *hdev = ctx->hdev;
1083 virt_addr = hl_asic_dma_alloc_coherent(hdev, SZ_2M, &phys_addr,
1090 hl_asic_dma_free_coherent(hdev, SZ_2M, virt_addr, phys_addr);
1097 dev_err(hdev->dev, "failed to allocate page\n");
1156 struct hl_device *hdev = ctx->hdev;
1161 rc = hr_func->get_tlb_mapping_params(hdev, &mmu_prop, hops, virt_addr, &is_huge);
1171 hops->scrambled_vaddr = hdev->asic_funcs->scramble_addr(hdev, virt_addr);
1205 hops->unscrambled_paddr = hdev->asic_funcs->descramble_addr
1206 (hdev, hops->hop_info[i].hop_pte_val);
1236 struct hl_device *hdev = ctx->hdev;
1238 gen_pool_free(hdev->mmu_priv.dr.mmu_pgt_pool, pgt_info->phys_addr,
1239 hdev->asic_prop.dmmu.hop_table_size);
1247 return ctx->hdev->asic_prop.mmu_pgt_addr +
1248 (ctx->asid * ctx->hdev->asic_prop.dmmu.hop_table_size);
1253 return (u64) (uintptr_t) ctx->hdev->mmu_priv.dr.mmu_shadow_hop0 +
1254 (ctx->asid * ctx->hdev->asic_prop.dmmu.hop_table_size);
1259 u64 page_mask = ctx->hdev->asic_prop.dmmu.hop_table_size - 1;
1276 ctx->hdev->asic_funcs->write_pte(ctx->hdev, hl_mmu_dr_get_phys_addr(ctx, shadow_pte_addr),
1284 ctx->hdev->asic_funcs->write_pte(ctx->hdev,
1319 struct hl_device *hdev = ctx->hdev;
1320 struct asic_fixed_properties *prop = &hdev->asic_prop;
1328 phys_addr = (u64) gen_pool_alloc(hdev->mmu_priv.dr.mmu_pgt_pool,
1331 dev_err(hdev->dev, "failed to allocate page\n");
1349 gen_pool_free(hdev->mmu_priv.dr.mmu_pgt_pool,
1373 ctx->hdev->asic_funcs->read_pte(ctx->hdev, hl_mmu_dr_get_phys_hop0_addr(ctx));
1376 int hl_mmu_dr_init(struct hl_device *hdev)
1378 struct asic_fixed_properties *prop = &hdev->asic_prop;
1381 hdev->mmu_priv.dr.mmu_pgt_pool =
1384 if (!hdev->mmu_priv.dr.mmu_pgt_pool) {
1385 dev_err(hdev->dev, "Failed to create page gen pool\n");
1389 rc = gen_pool_add(hdev->mmu_priv.dr.mmu_pgt_pool, prop->mmu_pgt_addr +
1394 dev_err(hdev->dev, "Failed to add memory to page gen pool\n");
1398 hdev->mmu_priv.dr.mmu_shadow_hop0 = kvcalloc(prop->max_asid,
1400 if (ZERO_OR_NULL_PTR(hdev->mmu_priv.dr.mmu_shadow_hop0)) {
1410 gen_pool_destroy(hdev->mmu_priv.dr.mmu_pgt_pool);
1415 void hl_mmu_dr_fini(struct hl_device *hdev)
1419 if (ZERO_OR_NULL_PTR(hdev->mmu_priv.dr.mmu_shadow_hop0))
1422 kvfree(hdev->mmu_priv.dr.mmu_shadow_hop0);
1423 gen_pool_destroy(hdev->mmu_priv.dr.mmu_pgt_pool);
1429 hdev->mmu_priv.dr.mmu_shadow_hop0 = NULL;