1// SPDX-License-Identifier: GPL-2.0-only
2/* Copyright (C) 2021 Marvell. */
3
4#include <linux/soc/marvell/octeontx2/asm.h>
5#include "otx2_cptpf.h"
6#include "otx2_cptvf.h"
7#include "otx2_cptlf.h"
8#include "cn10k_cpt.h"
9
10static void cn10k_cpt_send_cmd(union otx2_cpt_inst_s *cptinst, u32 insts_num,
11			       struct otx2_cptlf_info *lf);
12
13static struct cpt_hw_ops otx2_hw_ops = {
14	.send_cmd = otx2_cpt_send_cmd,
15	.cpt_get_compcode = otx2_cpt_get_compcode,
16	.cpt_get_uc_compcode = otx2_cpt_get_uc_compcode,
17	.cpt_sg_info_create = otx2_sg_info_create,
18};
19
20static struct cpt_hw_ops cn10k_hw_ops = {
21	.send_cmd = cn10k_cpt_send_cmd,
22	.cpt_get_compcode = cn10k_cpt_get_compcode,
23	.cpt_get_uc_compcode = cn10k_cpt_get_uc_compcode,
24	.cpt_sg_info_create = otx2_sg_info_create,
25};
26
27static void cn10k_cpt_send_cmd(union otx2_cpt_inst_s *cptinst, u32 insts_num,
28			       struct otx2_cptlf_info *lf)
29{
30	void __iomem *lmtline = lf->lmtline;
31	u64 val = (lf->slot & 0x7FF);
32	u64 tar_addr = 0;
33
34	/* tar_addr<6:4> = Size of first LMTST - 1 in units of 128b. */
35	tar_addr |= (__force u64)lf->ioreg |
36		    (((OTX2_CPT_INST_SIZE/16) - 1) & 0x7) << 4;
37	/*
38	 * Make sure memory areas pointed in CPT_INST_S
39	 * are flushed before the instruction is sent to CPT
40	 */
41	dma_wmb();
42
43	/* Copy CPT command to LMTLINE */
44	memcpy_toio(lmtline, cptinst, insts_num * OTX2_CPT_INST_SIZE);
45	cn10k_lmt_flush(val, tar_addr);
46}
47
48int cn10k_cptpf_lmtst_init(struct otx2_cptpf_dev *cptpf)
49{
50	struct pci_dev *pdev = cptpf->pdev;
51	resource_size_t size;
52	u64 lmt_base;
53
54	if (!test_bit(CN10K_LMTST, &cptpf->cap_flag)) {
55		cptpf->lfs.ops = &otx2_hw_ops;
56		return 0;
57	}
58
59	cptpf->lfs.ops = &cn10k_hw_ops;
60	lmt_base = readq(cptpf->reg_base + RVU_PF_LMTLINE_ADDR);
61	if (!lmt_base) {
62		dev_err(&pdev->dev, "PF LMTLINE address not configured\n");
63		return -ENOMEM;
64	}
65	size = pci_resource_len(pdev, PCI_MBOX_BAR_NUM);
66	size -= ((1 + cptpf->max_vfs) * MBOX_SIZE);
67	cptpf->lfs.lmt_base = devm_ioremap_wc(&pdev->dev, lmt_base, size);
68	if (!cptpf->lfs.lmt_base) {
69		dev_err(&pdev->dev,
70			"Mapping of PF LMTLINE address failed\n");
71		return -ENOMEM;
72	}
73
74	return 0;
75}
76EXPORT_SYMBOL_NS_GPL(cn10k_cptpf_lmtst_init, CRYPTO_DEV_OCTEONTX2_CPT);
77
78int cn10k_cptvf_lmtst_init(struct otx2_cptvf_dev *cptvf)
79{
80	struct pci_dev *pdev = cptvf->pdev;
81	resource_size_t offset, size;
82
83	if (!test_bit(CN10K_LMTST, &cptvf->cap_flag))
84		return 0;
85
86	offset = pci_resource_start(pdev, PCI_MBOX_BAR_NUM);
87	size = pci_resource_len(pdev, PCI_MBOX_BAR_NUM);
88	/* Map VF LMILINE region */
89	cptvf->lfs.lmt_base = devm_ioremap_wc(&pdev->dev, offset, size);
90	if (!cptvf->lfs.lmt_base) {
91		dev_err(&pdev->dev, "Unable to map BAR4\n");
92		return -ENOMEM;
93	}
94
95	return 0;
96}
97EXPORT_SYMBOL_NS_GPL(cn10k_cptvf_lmtst_init, CRYPTO_DEV_OCTEONTX2_CPT);
98
99void cn10k_cpt_hw_ctx_clear(struct pci_dev *pdev,
100			    struct cn10k_cpt_errata_ctx *er_ctx)
101{
102	u64 cptr_dma;
103
104	if (!is_dev_cn10ka_ax(pdev))
105		return;
106
107	cptr_dma = er_ctx->cptr_dma & ~(BIT_ULL(60));
108	cn10k_cpt_ctx_flush(pdev, cptr_dma, true);
109	dma_unmap_single(&pdev->dev, cptr_dma, CN10K_CPT_HW_CTX_SIZE,
110			 DMA_BIDIRECTIONAL);
111	kfree(er_ctx->hw_ctx);
112}
113EXPORT_SYMBOL_NS_GPL(cn10k_cpt_hw_ctx_clear, CRYPTO_DEV_OCTEONTX2_CPT);
114
115void cn10k_cpt_hw_ctx_set(union cn10k_cpt_hw_ctx *hctx, u16 ctx_sz)
116{
117	hctx->w0.aop_valid = 1;
118	hctx->w0.ctx_hdr_sz = 0;
119	hctx->w0.ctx_sz = ctx_sz;
120	hctx->w0.ctx_push_sz = 1;
121}
122EXPORT_SYMBOL_NS_GPL(cn10k_cpt_hw_ctx_set, CRYPTO_DEV_OCTEONTX2_CPT);
123
124int cn10k_cpt_hw_ctx_init(struct pci_dev *pdev,
125			  struct cn10k_cpt_errata_ctx *er_ctx)
126{
127	union cn10k_cpt_hw_ctx *hctx;
128	u64 cptr_dma;
129
130	er_ctx->cptr_dma = 0;
131	er_ctx->hw_ctx = NULL;
132
133	if (!is_dev_cn10ka_ax(pdev))
134		return 0;
135
136	hctx = kmalloc(CN10K_CPT_HW_CTX_SIZE, GFP_KERNEL);
137	if (unlikely(!hctx))
138		return -ENOMEM;
139	cptr_dma = dma_map_single(&pdev->dev, hctx, CN10K_CPT_HW_CTX_SIZE,
140				  DMA_BIDIRECTIONAL);
141
142	cn10k_cpt_hw_ctx_set(hctx, 1);
143	er_ctx->hw_ctx = hctx;
144	er_ctx->cptr_dma = cptr_dma | BIT_ULL(60);
145
146	return 0;
147}
148EXPORT_SYMBOL_NS_GPL(cn10k_cpt_hw_ctx_init, CRYPTO_DEV_OCTEONTX2_CPT);
149
150void cn10k_cpt_ctx_flush(struct pci_dev *pdev, u64 cptr, bool inval)
151{
152	struct otx2_cptvf_dev *cptvf = pci_get_drvdata(pdev);
153	struct otx2_cptlfs_info *lfs = &cptvf->lfs;
154	u64 reg;
155
156	reg = (uintptr_t)cptr >> 7;
157	if (inval)
158		reg = reg | BIT_ULL(46);
159
160	otx2_cpt_write64(lfs->reg_base, lfs->blkaddr, lfs->lf[0].slot,
161			 OTX2_CPT_LF_CTX_FLUSH, reg);
162	/* Make sure that the FLUSH operation is complete */
163	wmb();
164	otx2_cpt_read64(lfs->reg_base, lfs->blkaddr, lfs->lf[0].slot,
165			OTX2_CPT_LF_CTX_ERR);
166}
167EXPORT_SYMBOL_NS_GPL(cn10k_cpt_ctx_flush, CRYPTO_DEV_OCTEONTX2_CPT);
168
169void cptvf_hw_ops_get(struct otx2_cptvf_dev *cptvf)
170{
171	if (test_bit(CN10K_LMTST, &cptvf->cap_flag))
172		cptvf->lfs.ops = &cn10k_hw_ops;
173	else
174		cptvf->lfs.ops = &otx2_hw_ops;
175}
176EXPORT_SYMBOL_NS_GPL(cptvf_hw_ops_get, CRYPTO_DEV_OCTEONTX2_CPT);
177