1/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
2/* Copyright (c) 2015 - 2020 Intel Corporation */
3#ifndef IRDMA_USER_H
4#define IRDMA_USER_H
5
6#define irdma_handle void *
7#define irdma_adapter_handle irdma_handle
8#define irdma_qp_handle irdma_handle
9#define irdma_cq_handle irdma_handle
10#define irdma_pd_id irdma_handle
11#define irdma_stag_handle irdma_handle
12#define irdma_stag_index u32
13#define irdma_stag u32
14#define irdma_stag_key u8
15#define irdma_tagged_offset u64
16#define irdma_access_privileges u32
17#define irdma_physical_fragment u64
18#define irdma_address_list u64 *
19
20#define	IRDMA_MAX_MR_SIZE       0x200000000000ULL
21
22#define IRDMA_ACCESS_FLAGS_LOCALREAD		0x01
23#define IRDMA_ACCESS_FLAGS_LOCALWRITE		0x02
24#define IRDMA_ACCESS_FLAGS_REMOTEREAD_ONLY	0x04
25#define IRDMA_ACCESS_FLAGS_REMOTEREAD		0x05
26#define IRDMA_ACCESS_FLAGS_REMOTEWRITE_ONLY	0x08
27#define IRDMA_ACCESS_FLAGS_REMOTEWRITE		0x0a
28#define IRDMA_ACCESS_FLAGS_BIND_WINDOW		0x10
29#define IRDMA_ACCESS_FLAGS_ZERO_BASED		0x20
30#define IRDMA_ACCESS_FLAGS_ALL			0x3f
31
32#define IRDMA_OP_TYPE_RDMA_WRITE		0x00
33#define IRDMA_OP_TYPE_RDMA_READ			0x01
34#define IRDMA_OP_TYPE_SEND			0x03
35#define IRDMA_OP_TYPE_SEND_INV			0x04
36#define IRDMA_OP_TYPE_SEND_SOL			0x05
37#define IRDMA_OP_TYPE_SEND_SOL_INV		0x06
38#define IRDMA_OP_TYPE_RDMA_WRITE_SOL		0x0d
39#define IRDMA_OP_TYPE_BIND_MW			0x08
40#define IRDMA_OP_TYPE_FAST_REG_NSMR		0x09
41#define IRDMA_OP_TYPE_INV_STAG			0x0a
42#define IRDMA_OP_TYPE_RDMA_READ_INV_STAG	0x0b
43#define IRDMA_OP_TYPE_NOP			0x0c
44#define IRDMA_OP_TYPE_REC	0x3e
45#define IRDMA_OP_TYPE_REC_IMM	0x3f
46
47#define IRDMA_FLUSH_MAJOR_ERR	1
48
49enum irdma_device_caps_const {
50	IRDMA_WQE_SIZE =			4,
51	IRDMA_CQP_WQE_SIZE =			8,
52	IRDMA_CQE_SIZE =			4,
53	IRDMA_EXTENDED_CQE_SIZE =		8,
54	IRDMA_AEQE_SIZE =			2,
55	IRDMA_CEQE_SIZE =			1,
56	IRDMA_CQP_CTX_SIZE =			8,
57	IRDMA_SHADOW_AREA_SIZE =		8,
58	IRDMA_QUERY_FPM_BUF_SIZE =		176,
59	IRDMA_COMMIT_FPM_BUF_SIZE =		176,
60	IRDMA_GATHER_STATS_BUF_SIZE =		1024,
61	IRDMA_MIN_IW_QP_ID =			0,
62	IRDMA_MAX_IW_QP_ID =			262143,
63	IRDMA_MIN_CEQID =			0,
64	IRDMA_MAX_CEQID =			1023,
65	IRDMA_CEQ_MAX_COUNT =			IRDMA_MAX_CEQID + 1,
66	IRDMA_MIN_CQID =			0,
67	IRDMA_MAX_CQID =			524287,
68	IRDMA_MIN_AEQ_ENTRIES =			1,
69	IRDMA_MAX_AEQ_ENTRIES =			524287,
70	IRDMA_MIN_CEQ_ENTRIES =			1,
71	IRDMA_MAX_CEQ_ENTRIES =			262143,
72	IRDMA_MIN_CQ_SIZE =			1,
73	IRDMA_MAX_CQ_SIZE =			1048575,
74	IRDMA_DB_ID_ZERO =			0,
75	IRDMA_MAX_WQ_FRAGMENT_COUNT =		13,
76	IRDMA_MAX_SGE_RD =			13,
77	IRDMA_MAX_OUTBOUND_MSG_SIZE =		2147483647,
78	IRDMA_MAX_INBOUND_MSG_SIZE =		2147483647,
79	IRDMA_MAX_PUSH_PAGE_COUNT =		1024,
80	IRDMA_MAX_PE_ENA_VF_COUNT =		32,
81	IRDMA_MAX_VF_FPM_ID =			47,
82	IRDMA_MAX_SQ_PAYLOAD_SIZE =		2145386496,
83	IRDMA_MAX_INLINE_DATA_SIZE =		101,
84	IRDMA_MAX_WQ_ENTRIES =			32768,
85	IRDMA_Q2_BUF_SIZE =			256,
86	IRDMA_QP_CTX_SIZE =			256,
87	IRDMA_MAX_PDS =				262144,
88	IRDMA_MIN_WQ_SIZE_GEN2 =                8,
89};
90
91enum irdma_addressing_type {
92	IRDMA_ADDR_TYPE_ZERO_BASED = 0,
93	IRDMA_ADDR_TYPE_VA_BASED   = 1,
94};
95
96enum irdma_flush_opcode {
97	FLUSH_INVALID = 0,
98	FLUSH_GENERAL_ERR,
99	FLUSH_PROT_ERR,
100	FLUSH_REM_ACCESS_ERR,
101	FLUSH_LOC_QP_OP_ERR,
102	FLUSH_REM_OP_ERR,
103	FLUSH_LOC_LEN_ERR,
104	FLUSH_FATAL_ERR,
105	FLUSH_RETRY_EXC_ERR,
106	FLUSH_MW_BIND_ERR,
107	FLUSH_REM_INV_REQ_ERR,
108};
109
110enum irdma_cmpl_status {
111	IRDMA_COMPL_STATUS_SUCCESS = 0,
112	IRDMA_COMPL_STATUS_FLUSHED,
113	IRDMA_COMPL_STATUS_INVALID_WQE,
114	IRDMA_COMPL_STATUS_QP_CATASTROPHIC,
115	IRDMA_COMPL_STATUS_REMOTE_TERMINATION,
116	IRDMA_COMPL_STATUS_INVALID_STAG,
117	IRDMA_COMPL_STATUS_BASE_BOUND_VIOLATION,
118	IRDMA_COMPL_STATUS_ACCESS_VIOLATION,
119	IRDMA_COMPL_STATUS_INVALID_PD_ID,
120	IRDMA_COMPL_STATUS_WRAP_ERROR,
121	IRDMA_COMPL_STATUS_STAG_INVALID_PDID,
122	IRDMA_COMPL_STATUS_RDMA_READ_ZERO_ORD,
123	IRDMA_COMPL_STATUS_QP_NOT_PRIVLEDGED,
124	IRDMA_COMPL_STATUS_STAG_NOT_INVALID,
125	IRDMA_COMPL_STATUS_INVALID_PHYS_BUF_SIZE,
126	IRDMA_COMPL_STATUS_INVALID_PHYS_BUF_ENTRY,
127	IRDMA_COMPL_STATUS_INVALID_FBO,
128	IRDMA_COMPL_STATUS_INVALID_LEN,
129	IRDMA_COMPL_STATUS_INVALID_ACCESS,
130	IRDMA_COMPL_STATUS_PHYS_BUF_LIST_TOO_LONG,
131	IRDMA_COMPL_STATUS_INVALID_VIRT_ADDRESS,
132	IRDMA_COMPL_STATUS_INVALID_REGION,
133	IRDMA_COMPL_STATUS_INVALID_WINDOW,
134	IRDMA_COMPL_STATUS_INVALID_TOTAL_LEN,
135	IRDMA_COMPL_STATUS_UNKNOWN,
136};
137
138enum irdma_cmpl_notify {
139	IRDMA_CQ_COMPL_EVENT     = 0,
140	IRDMA_CQ_COMPL_SOLICITED = 1,
141};
142
143enum irdma_qp_caps {
144	IRDMA_WRITE_WITH_IMM = 1,
145	IRDMA_SEND_WITH_IMM  = 2,
146	IRDMA_ROCE	     = 4,
147	IRDMA_PUSH_MODE      = 8,
148};
149
150struct irdma_qp_uk;
151struct irdma_cq_uk;
152struct irdma_qp_uk_init_info;
153struct irdma_cq_uk_init_info;
154
155struct irdma_ring {
156	u32 head;
157	u32 tail;
158	u32 size;
159};
160
161struct irdma_cqe {
162	__le64 buf[IRDMA_CQE_SIZE];
163};
164
165struct irdma_extended_cqe {
166	__le64 buf[IRDMA_EXTENDED_CQE_SIZE];
167};
168
169struct irdma_post_send {
170	struct ib_sge *sg_list;
171	u32 num_sges;
172	u32 qkey;
173	u32 dest_qp;
174	u32 ah_id;
175};
176
177struct irdma_post_rq_info {
178	u64 wr_id;
179	struct ib_sge *sg_list;
180	u32 num_sges;
181};
182
183struct irdma_rdma_write {
184	struct ib_sge *lo_sg_list;
185	u32 num_lo_sges;
186	struct ib_sge rem_addr;
187};
188
189struct irdma_rdma_read {
190	struct ib_sge *lo_sg_list;
191	u32 num_lo_sges;
192	struct ib_sge rem_addr;
193};
194
195struct irdma_bind_window {
196	irdma_stag mr_stag;
197	u64 bind_len;
198	void *va;
199	enum irdma_addressing_type addressing_type;
200	bool ena_reads:1;
201	bool ena_writes:1;
202	irdma_stag mw_stag;
203	bool mem_window_type_1:1;
204};
205
206struct irdma_inv_local_stag {
207	irdma_stag target_stag;
208};
209
210struct irdma_post_sq_info {
211	u64 wr_id;
212	u8 op_type;
213	u8 l4len;
214	bool signaled:1;
215	bool read_fence:1;
216	bool local_fence:1;
217	bool inline_data:1;
218	bool imm_data_valid:1;
219	bool report_rtt:1;
220	bool udp_hdr:1;
221	bool defer_flag:1;
222	u32 imm_data;
223	u32 stag_to_inv;
224	union {
225		struct irdma_post_send send;
226		struct irdma_rdma_write rdma_write;
227		struct irdma_rdma_read rdma_read;
228		struct irdma_bind_window bind_window;
229		struct irdma_inv_local_stag inv_local_stag;
230	} op;
231};
232
233struct irdma_cq_poll_info {
234	u64 wr_id;
235	irdma_qp_handle qp_handle;
236	u32 bytes_xfered;
237	u32 tcp_seq_num_rtt;
238	u32 qp_id;
239	u32 ud_src_qpn;
240	u32 imm_data;
241	irdma_stag inv_stag; /* or L_R_Key */
242	enum irdma_cmpl_status comp_status;
243	u16 major_err;
244	u16 minor_err;
245	u16 ud_vlan;
246	u8 ud_smac[6];
247	u8 op_type;
248	u8 q_type;
249	bool stag_invalid_set:1; /* or L_R_Key set */
250	bool error:1;
251	bool solicited_event:1;
252	bool ipv4:1;
253	bool ud_vlan_valid:1;
254	bool ud_smac_valid:1;
255	bool imm_valid:1;
256};
257
258int irdma_uk_inline_rdma_write(struct irdma_qp_uk *qp,
259			       struct irdma_post_sq_info *info, bool post_sq);
260int irdma_uk_inline_send(struct irdma_qp_uk *qp,
261			 struct irdma_post_sq_info *info, bool post_sq);
262int irdma_uk_post_nop(struct irdma_qp_uk *qp, u64 wr_id, bool signaled,
263		      bool post_sq);
264int irdma_uk_post_receive(struct irdma_qp_uk *qp,
265			  struct irdma_post_rq_info *info);
266void irdma_uk_qp_post_wr(struct irdma_qp_uk *qp);
267int irdma_uk_rdma_read(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
268		       bool inv_stag, bool post_sq);
269int irdma_uk_rdma_write(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
270			bool post_sq);
271int irdma_uk_send(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
272		  bool post_sq);
273int irdma_uk_stag_local_invalidate(struct irdma_qp_uk *qp,
274				   struct irdma_post_sq_info *info,
275				   bool post_sq);
276
277struct irdma_wqe_uk_ops {
278	void (*iw_copy_inline_data)(u8 *dest, struct ib_sge *sge_list,
279				    u32 num_sges, u8 polarity);
280	u16 (*iw_inline_data_size_to_quanta)(u32 data_size);
281	void (*iw_set_fragment)(__le64 *wqe, u32 offset, struct ib_sge *sge,
282				u8 valid);
283	void (*iw_set_mw_bind_wqe)(__le64 *wqe,
284				   struct irdma_bind_window *op_info);
285};
286
287int irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq,
288			  struct irdma_cq_poll_info *info);
289void irdma_uk_cq_request_notification(struct irdma_cq_uk *cq,
290				      enum irdma_cmpl_notify cq_notify);
291void irdma_uk_cq_resize(struct irdma_cq_uk *cq, void *cq_base, int size);
292void irdma_uk_cq_set_resized_cnt(struct irdma_cq_uk *qp, u16 cnt);
293void irdma_uk_cq_init(struct irdma_cq_uk *cq,
294		      struct irdma_cq_uk_init_info *info);
295int irdma_uk_qp_init(struct irdma_qp_uk *qp,
296		     struct irdma_qp_uk_init_info *info);
297void irdma_uk_calc_shift_wq(struct irdma_qp_uk_init_info *ukinfo, u8 *sq_shift,
298			    u8 *rq_shift);
299int irdma_uk_calc_depth_shift_sq(struct irdma_qp_uk_init_info *ukinfo,
300				 u32 *sq_depth, u8 *sq_shift);
301int irdma_uk_calc_depth_shift_rq(struct irdma_qp_uk_init_info *ukinfo,
302				 u32 *rq_depth, u8 *rq_shift);
303struct irdma_sq_uk_wr_trk_info {
304	u64 wrid;
305	u32 wr_len;
306	u16 quanta;
307	u8 reserved[2];
308};
309
310struct irdma_qp_quanta {
311	__le64 elem[IRDMA_WQE_SIZE];
312};
313
314struct irdma_qp_uk {
315	struct irdma_qp_quanta *sq_base;
316	struct irdma_qp_quanta *rq_base;
317	struct irdma_uk_attrs *uk_attrs;
318	u32 __iomem *wqe_alloc_db;
319	struct irdma_sq_uk_wr_trk_info *sq_wrtrk_array;
320	u64 *rq_wrid_array;
321	__le64 *shadow_area;
322	struct irdma_ring sq_ring;
323	struct irdma_ring rq_ring;
324	struct irdma_ring initial_ring;
325	u32 qp_id;
326	u32 qp_caps;
327	u32 sq_size;
328	u32 rq_size;
329	u32 max_sq_frag_cnt;
330	u32 max_rq_frag_cnt;
331	u32 max_inline_data;
332	struct irdma_wqe_uk_ops wqe_ops;
333	u16 conn_wqes;
334	u8 qp_type;
335	u8 swqe_polarity;
336	u8 swqe_polarity_deferred;
337	u8 rwqe_polarity;
338	u8 rq_wqe_size;
339	u8 rq_wqe_size_multiplier;
340	bool deferred_flag:1;
341	bool first_sq_wq:1;
342	bool sq_flush_complete:1; /* Indicates flush was seen and SQ was empty after the flush */
343	bool rq_flush_complete:1; /* Indicates flush was seen and RQ was empty after the flush */
344	bool destroy_pending:1; /* Indicates the QP is being destroyed */
345	void *back_qp;
346	u8 dbg_rq_flushed;
347	u8 sq_flush_seen;
348	u8 rq_flush_seen;
349};
350
351struct irdma_cq_uk {
352	struct irdma_cqe *cq_base;
353	u32 __iomem *cqe_alloc_db;
354	u32 __iomem *cq_ack_db;
355	__le64 *shadow_area;
356	u32 cq_id;
357	u32 cq_size;
358	struct irdma_ring cq_ring;
359	u8 polarity;
360	bool avoid_mem_cflct:1;
361};
362
363struct irdma_qp_uk_init_info {
364	struct irdma_qp_quanta *sq;
365	struct irdma_qp_quanta *rq;
366	struct irdma_uk_attrs *uk_attrs;
367	u32 __iomem *wqe_alloc_db;
368	__le64 *shadow_area;
369	struct irdma_sq_uk_wr_trk_info *sq_wrtrk_array;
370	u64 *rq_wrid_array;
371	u32 qp_id;
372	u32 qp_caps;
373	u32 sq_size;
374	u32 rq_size;
375	u32 max_sq_frag_cnt;
376	u32 max_rq_frag_cnt;
377	u32 max_inline_data;
378	u32 sq_depth;
379	u32 rq_depth;
380	u8 first_sq_wq;
381	u8 type;
382	u8 sq_shift;
383	u8 rq_shift;
384	int abi_ver;
385	bool legacy_mode;
386};
387
388struct irdma_cq_uk_init_info {
389	u32 __iomem *cqe_alloc_db;
390	u32 __iomem *cq_ack_db;
391	struct irdma_cqe *cq_base;
392	__le64 *shadow_area;
393	u32 cq_size;
394	u32 cq_id;
395	bool avoid_mem_cflct;
396};
397
398__le64 *irdma_qp_get_next_send_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx,
399				   u16 quanta, u32 total_size,
400				   struct irdma_post_sq_info *info);
401__le64 *irdma_qp_get_next_recv_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx);
402void irdma_uk_clean_cq(void *q, struct irdma_cq_uk *cq);
403int irdma_nop(struct irdma_qp_uk *qp, u64 wr_id, bool signaled, bool post_sq);
404int irdma_fragcnt_to_quanta_sq(u32 frag_cnt, u16 *quanta);
405int irdma_fragcnt_to_wqesize_rq(u32 frag_cnt, u16 *wqe_size);
406void irdma_get_wqe_shift(struct irdma_uk_attrs *uk_attrs, u32 sge,
407			 u32 inline_data, u8 *shift);
408int irdma_get_sqdepth(struct irdma_uk_attrs *uk_attrs, u32 sq_size, u8 shift,
409		      u32 *wqdepth);
410int irdma_get_rqdepth(struct irdma_uk_attrs *uk_attrs, u32 rq_size, u8 shift,
411		      u32 *wqdepth);
412void irdma_clr_wqes(struct irdma_qp_uk *qp, u32 qp_wqe_idx);
413#endif /* IRDMA_USER_H */
414