1/* SPDX-License-Identifier: GPL-2.0
2 * Marvell OcteonTX CPT driver
3 *
4 * Copyright (C) 2019 Marvell International Ltd.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#ifndef __OTX_CPTVF_REQUEST_MANAGER_H
12#define __OTX_CPTVF_REQUEST_MANAGER_H
13
14#include <linux/types.h>
15#include <linux/crypto.h>
16#include <linux/pci.h>
17#include "otx_cpt_hw_types.h"
18
19/*
20 * Maximum total number of SG buffers is 100, we divide it equally
21 * between input and output
22 */
23#define OTX_CPT_MAX_SG_IN_CNT		50
24#define OTX_CPT_MAX_SG_OUT_CNT		50
25
26/* DMA mode direct or SG */
27#define OTX_CPT_DMA_DIRECT_DIRECT	0
28#define OTX_CPT_DMA_GATHER_SCATTER	1
29
30/* Context source CPTR or DPTR */
31#define OTX_CPT_FROM_CPTR		0
32#define OTX_CPT_FROM_DPTR		1
33
34/* CPT instruction queue alignment */
35#define OTX_CPT_INST_Q_ALIGNMENT	128
36#define OTX_CPT_MAX_REQ_SIZE		65535
37
38/* Default command timeout in seconds */
39#define OTX_CPT_COMMAND_TIMEOUT		4
40#define OTX_CPT_TIMER_HOLD		0x03F
41#define OTX_CPT_COUNT_HOLD		32
42#define OTX_CPT_TIME_IN_RESET_COUNT     5
43
44/* Minimum and maximum values for interrupt coalescing */
45#define OTX_CPT_COALESC_MIN_TIME_WAIT	0x0
46#define OTX_CPT_COALESC_MAX_TIME_WAIT	((1<<16)-1)
47#define OTX_CPT_COALESC_MIN_NUM_WAIT	0x0
48#define OTX_CPT_COALESC_MAX_NUM_WAIT	((1<<20)-1)
49
50union otx_cpt_opcode_info {
51	u16 flags;
52	struct {
53		u8 major;
54		u8 minor;
55	} s;
56};
57
58struct otx_cptvf_request {
59	u32 param1;
60	u32 param2;
61	u16 dlen;
62	union otx_cpt_opcode_info opcode;
63};
64
65struct otx_cpt_buf_ptr {
66	u8 *vptr;
67	dma_addr_t dma_addr;
68	u16 size;
69};
70
71union otx_cpt_ctrl_info {
72	u32 flags;
73	struct {
74#if defined(__BIG_ENDIAN_BITFIELD)
75		u32 reserved0:26;
76		u32 grp:3;	/* Group bits */
77		u32 dma_mode:2;	/* DMA mode */
78		u32 se_req:1;	/* To SE core */
79#else
80		u32 se_req:1;	/* To SE core */
81		u32 dma_mode:2;	/* DMA mode */
82		u32 grp:3;	/* Group bits */
83		u32 reserved0:26;
84#endif
85	} s;
86};
87
88/*
89 * CPT_INST_S software command definitions
90 * Words EI (0-3)
91 */
92union otx_cpt_iq_cmd_word0 {
93	u64 u64;
94	struct {
95		__be16 opcode;
96		__be16 param1;
97		__be16 param2;
98		__be16 dlen;
99	} s;
100};
101
102union otx_cpt_iq_cmd_word3 {
103	u64 u64;
104	struct {
105#if defined(__BIG_ENDIAN_BITFIELD)
106		u64 grp:3;
107		u64 cptr:61;
108#else
109		u64 cptr:61;
110		u64 grp:3;
111#endif
112	} s;
113};
114
115struct otx_cpt_iq_cmd {
116	union otx_cpt_iq_cmd_word0 cmd;
117	u64 dptr;
118	u64 rptr;
119	union otx_cpt_iq_cmd_word3 cptr;
120};
121
122struct otx_cpt_sglist_component {
123	union {
124		u64 len;
125		struct {
126			__be16 len0;
127			__be16 len1;
128			__be16 len2;
129			__be16 len3;
130		} s;
131	} u;
132	__be64 ptr0;
133	__be64 ptr1;
134	__be64 ptr2;
135	__be64 ptr3;
136};
137
138struct otx_cpt_pending_entry {
139	u64 *completion_addr;	/* Completion address */
140	struct otx_cpt_info_buffer *info;
141	/* Kernel async request callback */
142	void (*callback)(int status, void *arg1, void *arg2);
143	struct crypto_async_request *areq; /* Async request callback arg */
144	u8 resume_sender;	/* Notify sender to resume sending requests */
145	u8 busy;		/* Entry status (free/busy) */
146};
147
148struct otx_cpt_pending_queue {
149	struct otx_cpt_pending_entry *head;	/* Head of the queue */
150	u32 front;			/* Process work from here */
151	u32 rear;			/* Append new work here */
152	u32 pending_count;		/* Pending requests count */
153	u32 qlen;			/* Queue length */
154	spinlock_t lock;		/* Queue lock */
155};
156
157struct otx_cpt_req_info {
158	/* Kernel async request callback */
159	void (*callback)(int status, void *arg1, void *arg2);
160	struct crypto_async_request *areq; /* Async request callback arg */
161	struct otx_cptvf_request req;/* Request information (core specific) */
162	union otx_cpt_ctrl_info ctrl;/* User control information */
163	struct otx_cpt_buf_ptr in[OTX_CPT_MAX_SG_IN_CNT];
164	struct otx_cpt_buf_ptr out[OTX_CPT_MAX_SG_OUT_CNT];
165	u8 *iv_out;     /* IV to send back */
166	u16 rlen;	/* Output length */
167	u8 incnt;	/* Number of input buffers */
168	u8 outcnt;	/* Number of output buffers */
169	u8 req_type;	/* Type of request */
170	u8 is_enc;	/* Is a request an encryption request */
171	u8 is_trunc_hmac;/* Is truncated hmac used */
172};
173
174struct otx_cpt_info_buffer {
175	struct otx_cpt_pending_entry *pentry;
176	struct otx_cpt_req_info *req;
177	struct pci_dev *pdev;
178	u64 *completion_addr;
179	u8 *out_buffer;
180	u8 *in_buffer;
181	dma_addr_t dptr_baddr;
182	dma_addr_t rptr_baddr;
183	dma_addr_t comp_baddr;
184	unsigned long time_in;
185	u32 dlen;
186	u32 dma_len;
187	u8 extra_time;
188};
189
190static inline void do_request_cleanup(struct pci_dev *pdev,
191				      struct otx_cpt_info_buffer *info)
192{
193	struct otx_cpt_req_info *req;
194	int i;
195
196	if (info->dptr_baddr)
197		dma_unmap_single(&pdev->dev, info->dptr_baddr,
198				 info->dma_len, DMA_BIDIRECTIONAL);
199
200	if (info->req) {
201		req = info->req;
202		for (i = 0; i < req->outcnt; i++) {
203			if (req->out[i].dma_addr)
204				dma_unmap_single(&pdev->dev,
205						 req->out[i].dma_addr,
206						 req->out[i].size,
207						 DMA_BIDIRECTIONAL);
208		}
209
210		for (i = 0; i < req->incnt; i++) {
211			if (req->in[i].dma_addr)
212				dma_unmap_single(&pdev->dev,
213						 req->in[i].dma_addr,
214						 req->in[i].size,
215						 DMA_BIDIRECTIONAL);
216		}
217	}
218	kfree_sensitive(info);
219}
220
221struct otx_cptvf_wqe;
222void otx_cpt_dump_sg_list(struct pci_dev *pdev, struct otx_cpt_req_info *req);
223void otx_cpt_post_process(struct otx_cptvf_wqe *wqe);
224int otx_cpt_do_request(struct pci_dev *pdev, struct otx_cpt_req_info *req,
225		       int cpu_num);
226
227#endif /* __OTX_CPTVF_REQUEST_MANAGER_H */
228