rdma_common.h revision 337519
1/*
2 * Copyright (c) 2017-2018 Cavium, Inc.
3 * All rights reserved.
4 *
5 *  Redistribution and use in source and binary forms, with or without
6 *  modification, are permitted provided that the following conditions
7 *  are met:
8 *
9 *  1. Redistributions of source code must retain the above copyright
10 *     notice, this list of conditions and the following disclaimer.
11 *  2. Redistributions in binary form must reproduce the above copyright
12 *     notice, this list of conditions and the following disclaimer in the
13 *     documentation and/or other materials provided with the distribution.
14 *
15 *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16 *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19 *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20 *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21 *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22 *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24 *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 *  POSSIBILITY OF SUCH DAMAGE.
26 *
27 * $FreeBSD: stable/10/sys/dev/qlnx/qlnxe/rdma_common.h 337519 2018-08-09 01:39:47Z davidcs $
28 *
29 */
30
31#ifndef __RDMA_COMMON__
32#define __RDMA_COMMON__
33/************************************************************************/
34/* Add include to common rdma target for both eCore and protocol rdma driver */
35/************************************************************************/
36
37#define RDMA_RESERVED_LKEY                      (0)                     //Reserved lkey
38#define RDMA_RING_PAGE_SIZE                     (0x1000)        //4KB pages
39
40#define RDMA_MAX_SGE_PER_SQ_WQE         (4)             //max number of SGEs in a single request
41#define RDMA_MAX_SGE_PER_RQ_WQE         (4)             //max number of SGEs in a single request
42
43#define RDMA_MAX_DATA_SIZE_IN_WQE       (0x80000000)    //max size of data in single request
44
45#define RDMA_REQ_RD_ATOMIC_ELM_SIZE             (0x50)
46#define RDMA_RESP_RD_ATOMIC_ELM_SIZE    (0x20)
47
48#define RDMA_MAX_CQS                            (64*1024)
49#define RDMA_MAX_TIDS                           (128*1024-1)
50#define RDMA_MAX_PDS                            (64*1024)
51#define RDMA_MAX_XRC_SRQS                       (1024)
52#define RDMA_MAX_SRQS                           (32*1024)
53
54#define RDMA_NUM_STATISTIC_COUNTERS                     MAX_NUM_VPORTS
55#define RDMA_NUM_STATISTIC_COUNTERS_K2                  MAX_NUM_VPORTS_K2
56#define RDMA_NUM_STATISTIC_COUNTERS_BB                  MAX_NUM_VPORTS_BB
57
58#define RDMA_TASK_TYPE (PROTOCOLID_ROCE)
59
60
61struct rdma_srq_id
62{
63        __le16 srq_idx /* SRQ index */;
64        __le16 opaque_fid;
65};
66
67
68struct rdma_srq_producers
69{
70        __le32 sge_prod /* Current produced sge in SRQ */;
71        __le32 wqe_prod /* Current produced WQE to SRQ */;
72};
73
74/*
75 * rdma completion notification queue element
76 */
77struct rdma_cnqe
78{
79	struct regpair cq_handle;
80};
81
82
83struct rdma_cqe_responder
84{
85	struct regpair srq_wr_id;
86	struct regpair qp_handle;
87	__le32 imm_data_or_inv_r_Key /* immediate data in case imm_flg is set, or invalidated r_key in case inv_flg is set */;
88	__le32 length;
89	__le32 imm_data_hi /* High bytes of immediate data in case imm_flg is set in iWARP only */;
90	__le16 rq_cons /* Valid only when status is WORK_REQUEST_FLUSHED_ERR. Indicates an aggregative flush on all posted RQ WQEs until the reported rq_cons. */;
91	u8 flags;
92#define RDMA_CQE_RESPONDER_TOGGLE_BIT_MASK  0x1 /* indicates a valid completion written by FW. FW toggle this bit each time it finishes producing all PBL entries */
93#define RDMA_CQE_RESPONDER_TOGGLE_BIT_SHIFT 0
94#define RDMA_CQE_RESPONDER_TYPE_MASK        0x3 /*  (use enum rdma_cqe_type) */
95#define RDMA_CQE_RESPONDER_TYPE_SHIFT       1
96#define RDMA_CQE_RESPONDER_INV_FLG_MASK     0x1 /* r_key invalidated indicator */
97#define RDMA_CQE_RESPONDER_INV_FLG_SHIFT    3
98#define RDMA_CQE_RESPONDER_IMM_FLG_MASK     0x1 /* immediate data indicator */
99#define RDMA_CQE_RESPONDER_IMM_FLG_SHIFT    4
100#define RDMA_CQE_RESPONDER_RDMA_FLG_MASK    0x1 /* 1=this CQE relates to an RDMA Write. 0=Send. */
101#define RDMA_CQE_RESPONDER_RDMA_FLG_SHIFT   5
102#define RDMA_CQE_RESPONDER_RESERVED2_MASK   0x3
103#define RDMA_CQE_RESPONDER_RESERVED2_SHIFT  6
104	u8 status;
105};
106
107struct rdma_cqe_requester
108{
109	__le16 sq_cons;
110	__le16 reserved0;
111	__le32 reserved1;
112	struct regpair qp_handle;
113	struct regpair reserved2;
114	__le32 reserved3;
115	__le16 reserved4;
116	u8 flags;
117#define RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK  0x1 /* indicates a valid completion written by FW. FW toggle this bit each time it finishes producing all PBL entries */
118#define RDMA_CQE_REQUESTER_TOGGLE_BIT_SHIFT 0
119#define RDMA_CQE_REQUESTER_TYPE_MASK        0x3 /*  (use enum rdma_cqe_type) */
120#define RDMA_CQE_REQUESTER_TYPE_SHIFT       1
121#define RDMA_CQE_REQUESTER_RESERVED5_MASK   0x1F
122#define RDMA_CQE_REQUESTER_RESERVED5_SHIFT  3
123	u8 status;
124};
125
126struct rdma_cqe_common
127{
128	struct regpair reserved0;
129	struct regpair qp_handle;
130	__le16 reserved1[7];
131	u8 flags;
132#define RDMA_CQE_COMMON_TOGGLE_BIT_MASK  0x1 /* indicates a valid completion written by FW. FW toggle this bit each time it finishes producing all PBL entries */
133#define RDMA_CQE_COMMON_TOGGLE_BIT_SHIFT 0
134#define RDMA_CQE_COMMON_TYPE_MASK        0x3 /*  (use enum rdma_cqe_type) */
135#define RDMA_CQE_COMMON_TYPE_SHIFT       1
136#define RDMA_CQE_COMMON_RESERVED2_MASK   0x1F
137#define RDMA_CQE_COMMON_RESERVED2_SHIFT  3
138	u8 status;
139};
140
141/*
142 * rdma completion queue element
143 */
144union rdma_cqe
145{
146	struct rdma_cqe_responder resp;
147	struct rdma_cqe_requester req;
148	struct rdma_cqe_common cmn;
149};
150
151
152
153
154/*
155 * CQE requester status enumeration
156 */
157enum rdma_cqe_requester_status_enum
158{
159	RDMA_CQE_REQ_STS_OK,
160	RDMA_CQE_REQ_STS_BAD_RESPONSE_ERR,
161	RDMA_CQE_REQ_STS_LOCAL_LENGTH_ERR,
162	RDMA_CQE_REQ_STS_LOCAL_QP_OPERATION_ERR,
163	RDMA_CQE_REQ_STS_LOCAL_PROTECTION_ERR,
164	RDMA_CQE_REQ_STS_MEMORY_MGT_OPERATION_ERR,
165	RDMA_CQE_REQ_STS_REMOTE_INVALID_REQUEST_ERR,
166	RDMA_CQE_REQ_STS_REMOTE_ACCESS_ERR,
167	RDMA_CQE_REQ_STS_REMOTE_OPERATION_ERR,
168	RDMA_CQE_REQ_STS_RNR_NAK_RETRY_CNT_ERR,
169	RDMA_CQE_REQ_STS_TRANSPORT_RETRY_CNT_ERR,
170	RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR,
171	MAX_RDMA_CQE_REQUESTER_STATUS_ENUM
172};
173
174
175
176/*
177 * CQE responder status enumeration
178 */
179enum rdma_cqe_responder_status_enum
180{
181	RDMA_CQE_RESP_STS_OK,
182	RDMA_CQE_RESP_STS_LOCAL_ACCESS_ERR,
183	RDMA_CQE_RESP_STS_LOCAL_LENGTH_ERR,
184	RDMA_CQE_RESP_STS_LOCAL_QP_OPERATION_ERR,
185	RDMA_CQE_RESP_STS_LOCAL_PROTECTION_ERR,
186	RDMA_CQE_RESP_STS_MEMORY_MGT_OPERATION_ERR,
187	RDMA_CQE_RESP_STS_REMOTE_INVALID_REQUEST_ERR,
188	RDMA_CQE_RESP_STS_WORK_REQUEST_FLUSHED_ERR,
189	MAX_RDMA_CQE_RESPONDER_STATUS_ENUM
190};
191
192
193/*
194 * CQE type enumeration
195 */
196enum rdma_cqe_type
197{
198	RDMA_CQE_TYPE_REQUESTER,
199	RDMA_CQE_TYPE_RESPONDER_RQ,
200	RDMA_CQE_TYPE_RESPONDER_SRQ,
201	RDMA_CQE_TYPE_INVALID,
202	MAX_RDMA_CQE_TYPE
203};
204
205
206/*
207 * DIF Block size options
208 */
209enum rdma_dif_block_size
210{
211	RDMA_DIF_BLOCK_512=0,
212	RDMA_DIF_BLOCK_4096=1,
213	MAX_RDMA_DIF_BLOCK_SIZE
214};
215
216
217/*
218 * DIF CRC initial value
219 */
220enum rdma_dif_crc_seed
221{
222	RDMA_DIF_CRC_SEED_0000=0,
223	RDMA_DIF_CRC_SEED_FFFF=1,
224	MAX_RDMA_DIF_CRC_SEED
225};
226
227
228/*
229 * RDMA DIF Error Result Structure
230 */
231struct rdma_dif_error_result
232{
233	__le32 error_intervals /* Total number of error intervals in the IO. */;
234	__le32 dif_error_1st_interval /* Number of the first interval that contained error. Set to 0xFFFFFFFF if error occurred in the Runt Block. */;
235	u8 flags;
236#define RDMA_DIF_ERROR_RESULT_DIF_ERROR_TYPE_CRC_MASK      0x1 /* CRC error occurred. */
237#define RDMA_DIF_ERROR_RESULT_DIF_ERROR_TYPE_CRC_SHIFT     0
238#define RDMA_DIF_ERROR_RESULT_DIF_ERROR_TYPE_APP_TAG_MASK  0x1 /* App Tag error occurred. */
239#define RDMA_DIF_ERROR_RESULT_DIF_ERROR_TYPE_APP_TAG_SHIFT 1
240#define RDMA_DIF_ERROR_RESULT_DIF_ERROR_TYPE_REF_TAG_MASK  0x1 /* Ref Tag error occurred. */
241#define RDMA_DIF_ERROR_RESULT_DIF_ERROR_TYPE_REF_TAG_SHIFT 2
242#define RDMA_DIF_ERROR_RESULT_RESERVED0_MASK               0xF
243#define RDMA_DIF_ERROR_RESULT_RESERVED0_SHIFT              3
244#define RDMA_DIF_ERROR_RESULT_TOGGLE_BIT_MASK              0x1 /* Used to indicate the structure is valid. Toggles each time an invalidate region is performed. */
245#define RDMA_DIF_ERROR_RESULT_TOGGLE_BIT_SHIFT             7
246	u8 reserved1[55] /* Pad to 64 bytes to ensure efficient word line writing. */;
247};
248
249
250/*
251 * DIF IO direction
252 */
253enum rdma_dif_io_direction_flg
254{
255	RDMA_DIF_DIR_RX=0,
256	RDMA_DIF_DIR_TX=1,
257	MAX_RDMA_DIF_IO_DIRECTION_FLG
258};
259
260
261/*
262 * RDMA DIF Runt Result Structure
263 */
264struct rdma_dif_runt_result
265{
266	__le16 guard_tag /* CRC result of received IO. */;
267	__le16 reserved[3];
268};
269
270
271/*
272 * memory window type enumeration
273 */
274enum rdma_mw_type
275{
276	RDMA_MW_TYPE_1,
277	RDMA_MW_TYPE_2A,
278	MAX_RDMA_MW_TYPE
279};
280
281
282struct rdma_rq_sge
283{
284	struct regpair addr;
285	__le32 length;
286	__le32 flags;
287#define RDMA_RQ_SGE_L_KEY_MASK      0x3FFFFFF /* key of memory relating to this RQ */
288#define RDMA_RQ_SGE_L_KEY_SHIFT     0
289#define RDMA_RQ_SGE_NUM_SGES_MASK   0x7 /* first SGE - number of SGEs in this RQ WQE. Other SGEs - should be set to 0 */
290#define RDMA_RQ_SGE_NUM_SGES_SHIFT  26
291#define RDMA_RQ_SGE_RESERVED0_MASK  0x7
292#define RDMA_RQ_SGE_RESERVED0_SHIFT 29
293};
294
295
296struct rdma_sq_atomic_wqe
297{
298	__le32 reserved1;
299	__le32 length /* Total data length (8 bytes for Atomic) */;
300	__le32 xrc_srq /* Valid only when XRC is set for the QP */;
301	u8 req_type /* Type of WQE */;
302	u8 flags;
303#define RDMA_SQ_ATOMIC_WQE_COMP_FLG_MASK         0x1 /* If set, completion will be generated when the WQE is completed */
304#define RDMA_SQ_ATOMIC_WQE_COMP_FLG_SHIFT        0
305#define RDMA_SQ_ATOMIC_WQE_RD_FENCE_FLG_MASK     0x1 /* If set, all pending RDMA read or Atomic operations will be completed before start processing this WQE */
306#define RDMA_SQ_ATOMIC_WQE_RD_FENCE_FLG_SHIFT    1
307#define RDMA_SQ_ATOMIC_WQE_INV_FENCE_FLG_MASK    0x1 /* If set, all pending operations will be completed before start processing this WQE */
308#define RDMA_SQ_ATOMIC_WQE_INV_FENCE_FLG_SHIFT   2
309#define RDMA_SQ_ATOMIC_WQE_SE_FLG_MASK           0x1 /* Dont care for atomic wqe */
310#define RDMA_SQ_ATOMIC_WQE_SE_FLG_SHIFT          3
311#define RDMA_SQ_ATOMIC_WQE_INLINE_FLG_MASK       0x1 /* Should be 0 for atomic wqe */
312#define RDMA_SQ_ATOMIC_WQE_INLINE_FLG_SHIFT      4
313#define RDMA_SQ_ATOMIC_WQE_DIF_ON_HOST_FLG_MASK  0x1 /* Should be 0 for atomic wqe */
314#define RDMA_SQ_ATOMIC_WQE_DIF_ON_HOST_FLG_SHIFT 5
315#define RDMA_SQ_ATOMIC_WQE_RESERVED0_MASK        0x3
316#define RDMA_SQ_ATOMIC_WQE_RESERVED0_SHIFT       6
317	u8 wqe_size /* Size of WQE in 16B chunks including SGE */;
318	u8 prev_wqe_size /* Previous WQE size in 16B chunks */;
319	struct regpair remote_va /* remote virtual address */;
320	__le32 r_key /* Remote key */;
321	__le32 reserved2;
322	struct regpair cmp_data /* Data to compare in case of ATOMIC_CMP_AND_SWAP */;
323	struct regpair swap_data /* Swap or add data */;
324};
325
326
327/*
328 * First element (16 bytes) of atomic wqe
329 */
330struct rdma_sq_atomic_wqe_1st
331{
332	__le32 reserved1;
333	__le32 length /* Total data length (8 bytes for Atomic) */;
334	__le32 xrc_srq /* Valid only when XRC is set for the QP */;
335	u8 req_type /* Type of WQE */;
336	u8 flags;
337#define RDMA_SQ_ATOMIC_WQE_1ST_COMP_FLG_MASK       0x1 /* If set, completion will be generated when the WQE is completed */
338#define RDMA_SQ_ATOMIC_WQE_1ST_COMP_FLG_SHIFT      0
339#define RDMA_SQ_ATOMIC_WQE_1ST_RD_FENCE_FLG_MASK   0x1 /* If set, all pending RDMA read or Atomic operations will be completed before start processing this WQE */
340#define RDMA_SQ_ATOMIC_WQE_1ST_RD_FENCE_FLG_SHIFT  1
341#define RDMA_SQ_ATOMIC_WQE_1ST_INV_FENCE_FLG_MASK  0x1 /* If set, all pending operations will be completed before start processing this WQE */
342#define RDMA_SQ_ATOMIC_WQE_1ST_INV_FENCE_FLG_SHIFT 2
343#define RDMA_SQ_ATOMIC_WQE_1ST_SE_FLG_MASK         0x1 /* Dont care for atomic wqe */
344#define RDMA_SQ_ATOMIC_WQE_1ST_SE_FLG_SHIFT        3
345#define RDMA_SQ_ATOMIC_WQE_1ST_INLINE_FLG_MASK     0x1 /* Should be 0 for atomic wqe */
346#define RDMA_SQ_ATOMIC_WQE_1ST_INLINE_FLG_SHIFT    4
347#define RDMA_SQ_ATOMIC_WQE_1ST_RESERVED0_MASK      0x7
348#define RDMA_SQ_ATOMIC_WQE_1ST_RESERVED0_SHIFT     5
349	u8 wqe_size /* Size of WQE in 16B chunks including all SGEs. Set to number of SGEs + 1. */;
350	u8 prev_wqe_size /* Previous WQE size in 16B chunks */;
351};
352
353
354/*
355 * Second element (16 bytes) of atomic wqe
356 */
357struct rdma_sq_atomic_wqe_2nd
358{
359	struct regpair remote_va /* remote virtual address */;
360	__le32 r_key /* Remote key */;
361	__le32 reserved2;
362};
363
364
365/*
366 * Third element (16 bytes) of atomic wqe
367 */
368struct rdma_sq_atomic_wqe_3rd
369{
370	struct regpair cmp_data /* Data to compare in case of ATOMIC_CMP_AND_SWAP */;
371	struct regpair swap_data /* Swap or add data */;
372};
373
374
375struct rdma_sq_bind_wqe
376{
377	struct regpair addr;
378	__le32 l_key;
379	u8 req_type /* Type of WQE */;
380	u8 flags;
381#define RDMA_SQ_BIND_WQE_COMP_FLG_MASK       0x1 /* If set, completion will be generated when the WQE is completed */
382#define RDMA_SQ_BIND_WQE_COMP_FLG_SHIFT      0
383#define RDMA_SQ_BIND_WQE_RD_FENCE_FLG_MASK   0x1 /* If set, all pending RDMA read or Atomic operations will be completed before start processing this WQE */
384#define RDMA_SQ_BIND_WQE_RD_FENCE_FLG_SHIFT  1
385#define RDMA_SQ_BIND_WQE_INV_FENCE_FLG_MASK  0x1 /* If set, all pending operations will be completed before start processing this WQE */
386#define RDMA_SQ_BIND_WQE_INV_FENCE_FLG_SHIFT 2
387#define RDMA_SQ_BIND_WQE_SE_FLG_MASK         0x1 /* Dont care for bind wqe */
388#define RDMA_SQ_BIND_WQE_SE_FLG_SHIFT        3
389#define RDMA_SQ_BIND_WQE_INLINE_FLG_MASK     0x1 /* Should be 0 for bind wqe */
390#define RDMA_SQ_BIND_WQE_INLINE_FLG_SHIFT    4
391#define RDMA_SQ_BIND_WQE_RESERVED0_MASK      0x7
392#define RDMA_SQ_BIND_WQE_RESERVED0_SHIFT     5
393	u8 wqe_size /* Size of WQE in 16B chunks */;
394	u8 prev_wqe_size /* Previous WQE size in 16B chunks */;
395	u8 bind_ctrl;
396#define RDMA_SQ_BIND_WQE_ZERO_BASED_MASK     0x1 /* zero based indication */
397#define RDMA_SQ_BIND_WQE_ZERO_BASED_SHIFT    0
398#define RDMA_SQ_BIND_WQE_MW_TYPE_MASK        0x1 /*  (use enum rdma_mw_type) */
399#define RDMA_SQ_BIND_WQE_MW_TYPE_SHIFT       1
400#define RDMA_SQ_BIND_WQE_RESERVED1_MASK      0x3F
401#define RDMA_SQ_BIND_WQE_RESERVED1_SHIFT     2
402	u8 access_ctrl;
403#define RDMA_SQ_BIND_WQE_REMOTE_READ_MASK    0x1
404#define RDMA_SQ_BIND_WQE_REMOTE_READ_SHIFT   0
405#define RDMA_SQ_BIND_WQE_REMOTE_WRITE_MASK   0x1
406#define RDMA_SQ_BIND_WQE_REMOTE_WRITE_SHIFT  1
407#define RDMA_SQ_BIND_WQE_ENABLE_ATOMIC_MASK  0x1
408#define RDMA_SQ_BIND_WQE_ENABLE_ATOMIC_SHIFT 2
409#define RDMA_SQ_BIND_WQE_LOCAL_READ_MASK     0x1
410#define RDMA_SQ_BIND_WQE_LOCAL_READ_SHIFT    3
411#define RDMA_SQ_BIND_WQE_LOCAL_WRITE_MASK    0x1
412#define RDMA_SQ_BIND_WQE_LOCAL_WRITE_SHIFT   4
413#define RDMA_SQ_BIND_WQE_RESERVED2_MASK      0x7
414#define RDMA_SQ_BIND_WQE_RESERVED2_SHIFT     5
415	u8 reserved3;
416	u8 length_hi /* upper 8 bits of the registered MW length */;
417	__le32 length_lo /* lower 32 bits of the registered MW length */;
418	__le32 parent_l_key /* l_key of the parent MR */;
419	__le32 reserved4;
420};
421
422
423/*
424 * First element (16 bytes) of bind wqe
425 */
426struct rdma_sq_bind_wqe_1st
427{
428	struct regpair addr;
429	__le32 l_key;
430	u8 req_type /* Type of WQE */;
431	u8 flags;
432#define RDMA_SQ_BIND_WQE_1ST_COMP_FLG_MASK       0x1 /* If set, completion will be generated when the WQE is completed */
433#define RDMA_SQ_BIND_WQE_1ST_COMP_FLG_SHIFT      0
434#define RDMA_SQ_BIND_WQE_1ST_RD_FENCE_FLG_MASK   0x1 /* If set, all pending RDMA read or Atomic operations will be completed before start processing this WQE */
435#define RDMA_SQ_BIND_WQE_1ST_RD_FENCE_FLG_SHIFT  1
436#define RDMA_SQ_BIND_WQE_1ST_INV_FENCE_FLG_MASK  0x1 /* If set, all pending operations will be completed before start processing this WQE */
437#define RDMA_SQ_BIND_WQE_1ST_INV_FENCE_FLG_SHIFT 2
438#define RDMA_SQ_BIND_WQE_1ST_SE_FLG_MASK         0x1 /* Dont care for bind wqe */
439#define RDMA_SQ_BIND_WQE_1ST_SE_FLG_SHIFT        3
440#define RDMA_SQ_BIND_WQE_1ST_INLINE_FLG_MASK     0x1 /* Should be 0 for bind wqe */
441#define RDMA_SQ_BIND_WQE_1ST_INLINE_FLG_SHIFT    4
442#define RDMA_SQ_BIND_WQE_1ST_RESERVED0_MASK      0x7
443#define RDMA_SQ_BIND_WQE_1ST_RESERVED0_SHIFT     5
444	u8 wqe_size /* Size of WQE in 16B chunks */;
445	u8 prev_wqe_size /* Previous WQE size in 16B chunks */;
446};
447
448
449/*
450 * Second element (16 bytes) of bind wqe
451 */
452struct rdma_sq_bind_wqe_2nd
453{
454	u8 bind_ctrl;
455#define RDMA_SQ_BIND_WQE_2ND_ZERO_BASED_MASK     0x1 /* zero based indication */
456#define RDMA_SQ_BIND_WQE_2ND_ZERO_BASED_SHIFT    0
457#define RDMA_SQ_BIND_WQE_2ND_MW_TYPE_MASK        0x1 /*  (use enum rdma_mw_type) */
458#define RDMA_SQ_BIND_WQE_2ND_MW_TYPE_SHIFT       1
459#define RDMA_SQ_BIND_WQE_2ND_RESERVED1_MASK      0x3F
460#define RDMA_SQ_BIND_WQE_2ND_RESERVED1_SHIFT     2
461	u8 access_ctrl;
462#define RDMA_SQ_BIND_WQE_2ND_REMOTE_READ_MASK    0x1
463#define RDMA_SQ_BIND_WQE_2ND_REMOTE_READ_SHIFT   0
464#define RDMA_SQ_BIND_WQE_2ND_REMOTE_WRITE_MASK   0x1
465#define RDMA_SQ_BIND_WQE_2ND_REMOTE_WRITE_SHIFT  1
466#define RDMA_SQ_BIND_WQE_2ND_ENABLE_ATOMIC_MASK  0x1
467#define RDMA_SQ_BIND_WQE_2ND_ENABLE_ATOMIC_SHIFT 2
468#define RDMA_SQ_BIND_WQE_2ND_LOCAL_READ_MASK     0x1
469#define RDMA_SQ_BIND_WQE_2ND_LOCAL_READ_SHIFT    3
470#define RDMA_SQ_BIND_WQE_2ND_LOCAL_WRITE_MASK    0x1
471#define RDMA_SQ_BIND_WQE_2ND_LOCAL_WRITE_SHIFT   4
472#define RDMA_SQ_BIND_WQE_2ND_RESERVED2_MASK      0x7
473#define RDMA_SQ_BIND_WQE_2ND_RESERVED2_SHIFT     5
474	u8 reserved3;
475	u8 length_hi /* upper 8 bits of the registered MW length */;
476	__le32 length_lo /* lower 32 bits of the registered MW length */;
477	__le32 parent_l_key /* l_key of the parent MR */;
478	__le32 reserved4;
479};
480
481
482/*
483 * Structure with only the SQ WQE common fields. Size is of one SQ element (16B)
484 */
485struct rdma_sq_common_wqe
486{
487	__le32 reserved1[3];
488	u8 req_type /* Type of WQE */;
489	u8 flags;
490#define RDMA_SQ_COMMON_WQE_COMP_FLG_MASK       0x1 /* If set, completion will be generated when the WQE is completed */
491#define RDMA_SQ_COMMON_WQE_COMP_FLG_SHIFT      0
492#define RDMA_SQ_COMMON_WQE_RD_FENCE_FLG_MASK   0x1 /* If set, all pending RDMA read or Atomic operations will be completed before start processing this WQE */
493#define RDMA_SQ_COMMON_WQE_RD_FENCE_FLG_SHIFT  1
494#define RDMA_SQ_COMMON_WQE_INV_FENCE_FLG_MASK  0x1 /* If set, all pending operations will be completed before start processing this WQE */
495#define RDMA_SQ_COMMON_WQE_INV_FENCE_FLG_SHIFT 2
496#define RDMA_SQ_COMMON_WQE_SE_FLG_MASK         0x1 /* If set, signal the responder to generate a solicited event on this WQE (only relevant in SENDs and RDMA write with Imm) */
497#define RDMA_SQ_COMMON_WQE_SE_FLG_SHIFT        3
498#define RDMA_SQ_COMMON_WQE_INLINE_FLG_MASK     0x1 /* if set, indicates inline data is following this WQE instead of SGEs (only relevant in SENDs and RDMA writes) */
499#define RDMA_SQ_COMMON_WQE_INLINE_FLG_SHIFT    4
500#define RDMA_SQ_COMMON_WQE_RESERVED0_MASK      0x7
501#define RDMA_SQ_COMMON_WQE_RESERVED0_SHIFT     5
502	u8 wqe_size /* Size of WQE in 16B chunks including all SGEs or inline data. In case there are SGEs: set to number of SGEs + 1. In case of inline data: set to the whole number of 16B which contain the inline data + 1. */;
503	u8 prev_wqe_size /* Previous WQE size in 16B chunks */;
504};
505
506
507struct rdma_sq_fmr_wqe
508{
509	struct regpair addr;
510	__le32 l_key;
511	u8 req_type /* Type of WQE */;
512	u8 flags;
513#define RDMA_SQ_FMR_WQE_COMP_FLG_MASK                0x1 /* If set, completion will be generated when the WQE is completed */
514#define RDMA_SQ_FMR_WQE_COMP_FLG_SHIFT               0
515#define RDMA_SQ_FMR_WQE_RD_FENCE_FLG_MASK            0x1 /* If set, all pending RDMA read or Atomic operations will be completed before start processing this WQE */
516#define RDMA_SQ_FMR_WQE_RD_FENCE_FLG_SHIFT           1
517#define RDMA_SQ_FMR_WQE_INV_FENCE_FLG_MASK           0x1 /* If set, all pending operations will be completed before start processing this WQE */
518#define RDMA_SQ_FMR_WQE_INV_FENCE_FLG_SHIFT          2
519#define RDMA_SQ_FMR_WQE_SE_FLG_MASK                  0x1 /* Dont care for FMR wqe */
520#define RDMA_SQ_FMR_WQE_SE_FLG_SHIFT                 3
521#define RDMA_SQ_FMR_WQE_INLINE_FLG_MASK              0x1 /* Should be 0 for FMR wqe */
522#define RDMA_SQ_FMR_WQE_INLINE_FLG_SHIFT             4
523#define RDMA_SQ_FMR_WQE_DIF_ON_HOST_FLG_MASK         0x1 /* If set, indicated host memory of this WQE is DIF protected. */
524#define RDMA_SQ_FMR_WQE_DIF_ON_HOST_FLG_SHIFT        5
525#define RDMA_SQ_FMR_WQE_RESERVED0_MASK               0x3
526#define RDMA_SQ_FMR_WQE_RESERVED0_SHIFT              6
527	u8 wqe_size /* Size of WQE in 16B chunks */;
528	u8 prev_wqe_size /* Previous WQE size in 16B chunks */;
529	u8 fmr_ctrl;
530#define RDMA_SQ_FMR_WQE_PAGE_SIZE_LOG_MASK           0x1F /* 0 is 4k, 1 is 8k... */
531#define RDMA_SQ_FMR_WQE_PAGE_SIZE_LOG_SHIFT          0
532#define RDMA_SQ_FMR_WQE_ZERO_BASED_MASK              0x1 /* zero based indication */
533#define RDMA_SQ_FMR_WQE_ZERO_BASED_SHIFT             5
534#define RDMA_SQ_FMR_WQE_BIND_EN_MASK                 0x1 /* indication whether bind is enabled for this MR */
535#define RDMA_SQ_FMR_WQE_BIND_EN_SHIFT                6
536#define RDMA_SQ_FMR_WQE_RESERVED1_MASK               0x1
537#define RDMA_SQ_FMR_WQE_RESERVED1_SHIFT              7
538	u8 access_ctrl;
539#define RDMA_SQ_FMR_WQE_REMOTE_READ_MASK             0x1
540#define RDMA_SQ_FMR_WQE_REMOTE_READ_SHIFT            0
541#define RDMA_SQ_FMR_WQE_REMOTE_WRITE_MASK            0x1
542#define RDMA_SQ_FMR_WQE_REMOTE_WRITE_SHIFT           1
543#define RDMA_SQ_FMR_WQE_ENABLE_ATOMIC_MASK           0x1
544#define RDMA_SQ_FMR_WQE_ENABLE_ATOMIC_SHIFT          2
545#define RDMA_SQ_FMR_WQE_LOCAL_READ_MASK              0x1
546#define RDMA_SQ_FMR_WQE_LOCAL_READ_SHIFT             3
547#define RDMA_SQ_FMR_WQE_LOCAL_WRITE_MASK             0x1
548#define RDMA_SQ_FMR_WQE_LOCAL_WRITE_SHIFT            4
549#define RDMA_SQ_FMR_WQE_RESERVED2_MASK               0x7
550#define RDMA_SQ_FMR_WQE_RESERVED2_SHIFT              5
551	u8 reserved3;
552	u8 length_hi /* upper 8 bits of the registered MR length */;
553	__le32 length_lo /* lower 32 bits of the registered MR length. In case of DIF the length is specified including the DIF guards. */;
554	struct regpair pbl_addr /* Address of PBL */;
555	__le32 dif_base_ref_tag /* Ref tag of the first DIF Block. */;
556	__le16 dif_app_tag /* App tag of all DIF Blocks. */;
557	__le16 dif_app_tag_mask /* Bitmask for verifying dif_app_tag. */;
558	__le16 dif_runt_crc_value /* In TX IO, in case the runt_valid_flg is set, this value is used to validate the last Block in the IO. */;
559	__le16 dif_flags;
560#define RDMA_SQ_FMR_WQE_DIF_IO_DIRECTION_FLG_MASK    0x1 /* 0=RX, 1=TX (use enum rdma_dif_io_direction_flg) */
561#define RDMA_SQ_FMR_WQE_DIF_IO_DIRECTION_FLG_SHIFT   0
562#define RDMA_SQ_FMR_WQE_DIF_BLOCK_SIZE_MASK          0x1 /* DIF block size. 0=512B 1=4096B (use enum rdma_dif_block_size) */
563#define RDMA_SQ_FMR_WQE_DIF_BLOCK_SIZE_SHIFT         1
564#define RDMA_SQ_FMR_WQE_DIF_RUNT_VALID_FLG_MASK      0x1 /* In TX IO, indicates the runt_value field is valid. In RX IO, indicates the calculated runt value is to be placed on host buffer. */
565#define RDMA_SQ_FMR_WQE_DIF_RUNT_VALID_FLG_SHIFT     2
566#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_CRC_GUARD_MASK  0x1 /* In TX IO, indicates CRC of each DIF guard tag is checked. */
567#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_CRC_GUARD_SHIFT 3
568#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_REF_TAG_MASK    0x1 /* In TX IO, indicates Ref tag of each DIF guard tag is checked. */
569#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_REF_TAG_SHIFT   4
570#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_APP_TAG_MASK    0x1 /* In TX IO, indicates App tag of each DIF guard tag is checked. */
571#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_APP_TAG_SHIFT   5
572#define RDMA_SQ_FMR_WQE_DIF_CRC_SEED_MASK            0x1 /* DIF CRC Seed to use. 0=0x000 1=0xFFFF (use enum rdma_dif_crc_seed) */
573#define RDMA_SQ_FMR_WQE_DIF_CRC_SEED_SHIFT           6
574#define RDMA_SQ_FMR_WQE_DIF_RX_REF_TAG_CONST_MASK    0x1 /* In RX IO, Ref Tag will remain at constant value of dif_base_ref_tag */
575#define RDMA_SQ_FMR_WQE_DIF_RX_REF_TAG_CONST_SHIFT   7
576#define RDMA_SQ_FMR_WQE_RESERVED4_MASK               0xFF
577#define RDMA_SQ_FMR_WQE_RESERVED4_SHIFT              8
578	__le32 Reserved5;
579};
580
581
582/*
583 * First element (16 bytes) of fmr wqe
584 */
585struct rdma_sq_fmr_wqe_1st
586{
587	struct regpair addr;
588	__le32 l_key;
589	u8 req_type /* Type of WQE */;
590	u8 flags;
591#define RDMA_SQ_FMR_WQE_1ST_COMP_FLG_MASK         0x1 /* If set, completion will be generated when the WQE is completed */
592#define RDMA_SQ_FMR_WQE_1ST_COMP_FLG_SHIFT        0
593#define RDMA_SQ_FMR_WQE_1ST_RD_FENCE_FLG_MASK     0x1 /* If set, all pending RDMA read or Atomic operations will be completed before start processing this WQE */
594#define RDMA_SQ_FMR_WQE_1ST_RD_FENCE_FLG_SHIFT    1
595#define RDMA_SQ_FMR_WQE_1ST_INV_FENCE_FLG_MASK    0x1 /* If set, all pending operations will be completed before start processing this WQE */
596#define RDMA_SQ_FMR_WQE_1ST_INV_FENCE_FLG_SHIFT   2
597#define RDMA_SQ_FMR_WQE_1ST_SE_FLG_MASK           0x1 /* Dont care for FMR wqe */
598#define RDMA_SQ_FMR_WQE_1ST_SE_FLG_SHIFT          3
599#define RDMA_SQ_FMR_WQE_1ST_INLINE_FLG_MASK       0x1 /* Should be 0 for FMR wqe */
600#define RDMA_SQ_FMR_WQE_1ST_INLINE_FLG_SHIFT      4
601#define RDMA_SQ_FMR_WQE_1ST_DIF_ON_HOST_FLG_MASK  0x1 /* If set, indicated host memory of this WQE is DIF protected. */
602#define RDMA_SQ_FMR_WQE_1ST_DIF_ON_HOST_FLG_SHIFT 5
603#define RDMA_SQ_FMR_WQE_1ST_RESERVED0_MASK        0x3
604#define RDMA_SQ_FMR_WQE_1ST_RESERVED0_SHIFT       6
605	u8 wqe_size /* Size of WQE in 16B chunks */;
606	u8 prev_wqe_size /* Previous WQE size in 16B chunks */;
607};
608
609
610/*
611 * Second element (16 bytes) of fmr wqe
612 */
613struct rdma_sq_fmr_wqe_2nd
614{
615	u8 fmr_ctrl;
616#define RDMA_SQ_FMR_WQE_2ND_PAGE_SIZE_LOG_MASK  0x1F /* 0 is 4k, 1 is 8k... */
617#define RDMA_SQ_FMR_WQE_2ND_PAGE_SIZE_LOG_SHIFT 0
618#define RDMA_SQ_FMR_WQE_2ND_ZERO_BASED_MASK     0x1 /* zero based indication */
619#define RDMA_SQ_FMR_WQE_2ND_ZERO_BASED_SHIFT    5
620#define RDMA_SQ_FMR_WQE_2ND_BIND_EN_MASK        0x1 /* indication whether bind is enabled for this MR */
621#define RDMA_SQ_FMR_WQE_2ND_BIND_EN_SHIFT       6
622#define RDMA_SQ_FMR_WQE_2ND_RESERVED1_MASK      0x1
623#define RDMA_SQ_FMR_WQE_2ND_RESERVED1_SHIFT     7
624	u8 access_ctrl;
625#define RDMA_SQ_FMR_WQE_2ND_REMOTE_READ_MASK    0x1
626#define RDMA_SQ_FMR_WQE_2ND_REMOTE_READ_SHIFT   0
627#define RDMA_SQ_FMR_WQE_2ND_REMOTE_WRITE_MASK   0x1
628#define RDMA_SQ_FMR_WQE_2ND_REMOTE_WRITE_SHIFT  1
629#define RDMA_SQ_FMR_WQE_2ND_ENABLE_ATOMIC_MASK  0x1
630#define RDMA_SQ_FMR_WQE_2ND_ENABLE_ATOMIC_SHIFT 2
631#define RDMA_SQ_FMR_WQE_2ND_LOCAL_READ_MASK     0x1
632#define RDMA_SQ_FMR_WQE_2ND_LOCAL_READ_SHIFT    3
633#define RDMA_SQ_FMR_WQE_2ND_LOCAL_WRITE_MASK    0x1
634#define RDMA_SQ_FMR_WQE_2ND_LOCAL_WRITE_SHIFT   4
635#define RDMA_SQ_FMR_WQE_2ND_RESERVED2_MASK      0x7
636#define RDMA_SQ_FMR_WQE_2ND_RESERVED2_SHIFT     5
637	u8 reserved3;
638	u8 length_hi /* upper 8 bits of the registered MR length */;
639	__le32 length_lo /* lower 32 bits of the registered MR length. */;
640	struct regpair pbl_addr /* Address of PBL */;
641};
642
643
644/*
645 * Third element (16 bytes) of fmr wqe
646 */
647struct rdma_sq_fmr_wqe_3rd
648{
649	__le32 dif_base_ref_tag /* Ref tag of the first DIF Block. */;
650	__le16 dif_app_tag /* App tag of all DIF Blocks. */;
651	__le16 dif_app_tag_mask /* Bitmask for verifying dif_app_tag. */;
652	__le16 dif_runt_crc_value /* In TX IO, in case the runt_valid_flg is set, this value is used to validate the last Block in the IO. */;
653	__le16 dif_flags;
654#define RDMA_SQ_FMR_WQE_3RD_DIF_IO_DIRECTION_FLG_MASK    0x1 /* 0=RX, 1=TX (use enum rdma_dif_io_direction_flg) */
655#define RDMA_SQ_FMR_WQE_3RD_DIF_IO_DIRECTION_FLG_SHIFT   0
656#define RDMA_SQ_FMR_WQE_3RD_DIF_BLOCK_SIZE_MASK          0x1 /* DIF block size. 0=512B 1=4096B (use enum rdma_dif_block_size) */
657#define RDMA_SQ_FMR_WQE_3RD_DIF_BLOCK_SIZE_SHIFT         1
658#define RDMA_SQ_FMR_WQE_3RD_DIF_RUNT_VALID_FLG_MASK      0x1 /* In TX IO, indicates the runt_value field is valid. In RX IO, indicates the calculated runt value is to be placed on host buffer. */
659#define RDMA_SQ_FMR_WQE_3RD_DIF_RUNT_VALID_FLG_SHIFT     2
660#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_CRC_GUARD_MASK  0x1 /* In TX IO, indicates CRC of each DIF guard tag is checked. */
661#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_CRC_GUARD_SHIFT 3
662#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_REF_TAG_MASK    0x1 /* In TX IO, indicates Ref tag of each DIF guard tag is checked. */
663#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_REF_TAG_SHIFT   4
664#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_APP_TAG_MASK    0x1 /* In TX IO, indicates App tag of each DIF guard tag is checked. */
665#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_APP_TAG_SHIFT   5
666#define RDMA_SQ_FMR_WQE_3RD_DIF_CRC_SEED_MASK            0x1 /* DIF CRC Seed to use. 0=0x000 1=0xFFFF (use enum rdma_dif_crc_seed) */
667#define RDMA_SQ_FMR_WQE_3RD_DIF_CRC_SEED_SHIFT           6
668#define RDMA_SQ_FMR_WQE_3RD_RESERVED4_MASK               0x1FF
669#define RDMA_SQ_FMR_WQE_3RD_RESERVED4_SHIFT              7
670	__le32 Reserved5;
671};
672
673
674struct rdma_sq_local_inv_wqe
675{
676	struct regpair reserved;
677	__le32 inv_l_key /* The invalidate local key */;
678	u8 req_type /* Type of WQE */;
679	u8 flags;
680#define RDMA_SQ_LOCAL_INV_WQE_COMP_FLG_MASK         0x1 /* If set, completion will be generated when the WQE is completed */
681#define RDMA_SQ_LOCAL_INV_WQE_COMP_FLG_SHIFT        0
682#define RDMA_SQ_LOCAL_INV_WQE_RD_FENCE_FLG_MASK     0x1 /* If set, all pending RDMA read or Atomic operations will be completed before start processing this WQE */
683#define RDMA_SQ_LOCAL_INV_WQE_RD_FENCE_FLG_SHIFT    1
684#define RDMA_SQ_LOCAL_INV_WQE_INV_FENCE_FLG_MASK    0x1 /* If set, all pending operations will be completed before start processing this WQE */
685#define RDMA_SQ_LOCAL_INV_WQE_INV_FENCE_FLG_SHIFT   2
686#define RDMA_SQ_LOCAL_INV_WQE_SE_FLG_MASK           0x1 /* Dont care for local invalidate wqe */
687#define RDMA_SQ_LOCAL_INV_WQE_SE_FLG_SHIFT          3
688#define RDMA_SQ_LOCAL_INV_WQE_INLINE_FLG_MASK       0x1 /* Should be 0 for local invalidate wqe */
689#define RDMA_SQ_LOCAL_INV_WQE_INLINE_FLG_SHIFT      4
690#define RDMA_SQ_LOCAL_INV_WQE_DIF_ON_HOST_FLG_MASK  0x1 /* If set, indicated host memory of this WQE is DIF protected. */
691#define RDMA_SQ_LOCAL_INV_WQE_DIF_ON_HOST_FLG_SHIFT 5
692#define RDMA_SQ_LOCAL_INV_WQE_RESERVED0_MASK        0x3
693#define RDMA_SQ_LOCAL_INV_WQE_RESERVED0_SHIFT       6
694	u8 wqe_size /* Size of WQE in 16B chunks */;
695	u8 prev_wqe_size /* Previous WQE size in 16B chunks */;
696};
697
698
699struct rdma_sq_rdma_wqe
700{
701	__le32 imm_data /* The immediate data in case of RDMA_WITH_IMM */;
702	__le32 length /* Total data length. If DIF on host is enabled, length does NOT include DIF guards. */;
703	__le32 xrc_srq /* Valid only when XRC is set for the QP */;
704	u8 req_type /* Type of WQE */;
705	u8 flags;
706#define RDMA_SQ_RDMA_WQE_COMP_FLG_MASK                  0x1 /* If set, completion will be generated when the WQE is completed */
707#define RDMA_SQ_RDMA_WQE_COMP_FLG_SHIFT                 0
708#define RDMA_SQ_RDMA_WQE_RD_FENCE_FLG_MASK              0x1 /* If set, all pending RDMA read or Atomic operations will be completed before start processing this WQE */
709#define RDMA_SQ_RDMA_WQE_RD_FENCE_FLG_SHIFT             1
710#define RDMA_SQ_RDMA_WQE_INV_FENCE_FLG_MASK             0x1 /* If set, all pending operations will be completed before start processing this WQE */
711#define RDMA_SQ_RDMA_WQE_INV_FENCE_FLG_SHIFT            2
712#define RDMA_SQ_RDMA_WQE_SE_FLG_MASK                    0x1 /* If set, signal the responder to generate a solicited event on this WQE */
713#define RDMA_SQ_RDMA_WQE_SE_FLG_SHIFT                   3
714#define RDMA_SQ_RDMA_WQE_INLINE_FLG_MASK                0x1 /* if set, indicates inline data is following this WQE instead of SGEs. Applicable for RDMA_WR or RDMA_WR_WITH_IMM. Should be 0 for RDMA_RD */
715#define RDMA_SQ_RDMA_WQE_INLINE_FLG_SHIFT               4
716#define RDMA_SQ_RDMA_WQE_DIF_ON_HOST_FLG_MASK           0x1 /* If set, indicated host memory of this WQE is DIF protected. */
717#define RDMA_SQ_RDMA_WQE_DIF_ON_HOST_FLG_SHIFT          5
718#define RDMA_SQ_RDMA_WQE_READ_INV_FLG_MASK              0x1 /* If set, indicated read with invalidate WQE. iWARP only */
719#define RDMA_SQ_RDMA_WQE_READ_INV_FLG_SHIFT             6
720#define RDMA_SQ_RDMA_WQE_RESERVED0_MASK                 0x1
721#define RDMA_SQ_RDMA_WQE_RESERVED0_SHIFT                7
722	u8 wqe_size /* Size of WQE in 16B chunks including all SGEs or inline data. In case there are SGEs: set to number of SGEs + 1. In case of inline data: set to the whole number of 16B which contain the inline data + 1. */;
723	u8 prev_wqe_size /* Previous WQE size in 16B chunks */;
724	struct regpair remote_va /* Remote virtual address */;
725	__le32 r_key /* Remote key */;
726	u8 dif_flags;
727#define RDMA_SQ_RDMA_WQE_DIF_BLOCK_SIZE_MASK            0x1 /* if dif_on_host_flg set: DIF block size. 0=512B 1=4096B (use enum rdma_dif_block_size) */
728#define RDMA_SQ_RDMA_WQE_DIF_BLOCK_SIZE_SHIFT           0
729#define RDMA_SQ_RDMA_WQE_DIF_FIRST_RDMA_IN_IO_FLG_MASK  0x1 /* if dif_on_host_flg set: WQE executes first RDMA on related IO. */
730#define RDMA_SQ_RDMA_WQE_DIF_FIRST_RDMA_IN_IO_FLG_SHIFT 1
731#define RDMA_SQ_RDMA_WQE_DIF_LAST_RDMA_IN_IO_FLG_MASK   0x1 /* if dif_on_host_flg set: WQE executes last RDMA on related IO. */
732#define RDMA_SQ_RDMA_WQE_DIF_LAST_RDMA_IN_IO_FLG_SHIFT  2
733#define RDMA_SQ_RDMA_WQE_RESERVED1_MASK                 0x1F
734#define RDMA_SQ_RDMA_WQE_RESERVED1_SHIFT                3
735	u8 reserved2[3];
736};
737
738
739/*
740 * First element (16 bytes) of rdma wqe
741 */
742struct rdma_sq_rdma_wqe_1st
743{
744	__le32 imm_data /* The immediate data in case of RDMA_WITH_IMM */;
745	__le32 length /* Total data length */;
746	__le32 xrc_srq /* Valid only when XRC is set for the QP */;
747	u8 req_type /* Type of WQE */;
748	u8 flags;
749#define RDMA_SQ_RDMA_WQE_1ST_COMP_FLG_MASK         0x1 /* If set, completion will be generated when the WQE is completed */
750#define RDMA_SQ_RDMA_WQE_1ST_COMP_FLG_SHIFT        0
751#define RDMA_SQ_RDMA_WQE_1ST_RD_FENCE_FLG_MASK     0x1 /* If set, all pending RDMA read or Atomic operations will be completed before start processing this WQE */
752#define RDMA_SQ_RDMA_WQE_1ST_RD_FENCE_FLG_SHIFT    1
753#define RDMA_SQ_RDMA_WQE_1ST_INV_FENCE_FLG_MASK    0x1 /* If set, all pending operations will be completed before start processing this WQE */
754#define RDMA_SQ_RDMA_WQE_1ST_INV_FENCE_FLG_SHIFT   2
755#define RDMA_SQ_RDMA_WQE_1ST_SE_FLG_MASK           0x1 /* If set, signal the responder to generate a solicited event on this WQE */
756#define RDMA_SQ_RDMA_WQE_1ST_SE_FLG_SHIFT          3
757#define RDMA_SQ_RDMA_WQE_1ST_INLINE_FLG_MASK       0x1 /* if set, indicates inline data is following this WQE instead of SGEs. Applicable for RDMA_WR or RDMA_WR_WITH_IMM. Should be 0 for RDMA_RD */
758#define RDMA_SQ_RDMA_WQE_1ST_INLINE_FLG_SHIFT      4
759#define RDMA_SQ_RDMA_WQE_1ST_DIF_ON_HOST_FLG_MASK  0x1 /* If set, indicated host memory of this WQE is DIF protected. */
760#define RDMA_SQ_RDMA_WQE_1ST_DIF_ON_HOST_FLG_SHIFT 5
761#define RDMA_SQ_RDMA_WQE_1ST_READ_INV_FLG_MASK     0x1 /* If set, indicated read with invalidate WQE. iWARP only */
762#define RDMA_SQ_RDMA_WQE_1ST_READ_INV_FLG_SHIFT    6
763#define RDMA_SQ_RDMA_WQE_1ST_RESERVED0_MASK        0x1
764#define RDMA_SQ_RDMA_WQE_1ST_RESERVED0_SHIFT       7
765	u8 wqe_size /* Size of WQE in 16B chunks including all SGEs or inline data. In case there are SGEs: set to number of SGEs + 1. In case of inline data: set to the whole number of 16B which contain the inline data + 1. */;
766	u8 prev_wqe_size /* Previous WQE size in 16B chunks */;
767};
768
769
770/*
771 * Second element (16 bytes) of rdma wqe
772 */
773struct rdma_sq_rdma_wqe_2nd
774{
775	struct regpair remote_va /* Remote virtual address */;
776	__le32 r_key /* Remote key */;
777	u8 dif_flags;
778#define RDMA_SQ_RDMA_WQE_2ND_DIF_BLOCK_SIZE_MASK         0x1 /* if dif_on_host_flg set: DIF block size. 0=512B 1=4096B (use enum rdma_dif_block_size) */
779#define RDMA_SQ_RDMA_WQE_2ND_DIF_BLOCK_SIZE_SHIFT        0
780#define RDMA_SQ_RDMA_WQE_2ND_DIF_FIRST_SEGMENT_FLG_MASK  0x1 /* if dif_on_host_flg set: WQE executes first DIF on related MR. */
781#define RDMA_SQ_RDMA_WQE_2ND_DIF_FIRST_SEGMENT_FLG_SHIFT 1
782#define RDMA_SQ_RDMA_WQE_2ND_DIF_LAST_SEGMENT_FLG_MASK   0x1 /* if dif_on_host_flg set: WQE executes last DIF on related MR. */
783#define RDMA_SQ_RDMA_WQE_2ND_DIF_LAST_SEGMENT_FLG_SHIFT  2
784#define RDMA_SQ_RDMA_WQE_2ND_RESERVED1_MASK              0x1F
785#define RDMA_SQ_RDMA_WQE_2ND_RESERVED1_SHIFT             3
786	u8 reserved2[3];
787};
788
789
790/*
791 * SQ WQE req type enumeration
792 */
793enum rdma_sq_req_type
794{
795	RDMA_SQ_REQ_TYPE_SEND,
796	RDMA_SQ_REQ_TYPE_SEND_WITH_IMM,
797	RDMA_SQ_REQ_TYPE_SEND_WITH_INVALIDATE,
798	RDMA_SQ_REQ_TYPE_RDMA_WR,
799	RDMA_SQ_REQ_TYPE_RDMA_WR_WITH_IMM,
800	RDMA_SQ_REQ_TYPE_RDMA_RD,
801	RDMA_SQ_REQ_TYPE_ATOMIC_CMP_AND_SWAP,
802	RDMA_SQ_REQ_TYPE_ATOMIC_ADD,
803	RDMA_SQ_REQ_TYPE_LOCAL_INVALIDATE,
804	RDMA_SQ_REQ_TYPE_FAST_MR,
805	RDMA_SQ_REQ_TYPE_BIND,
806	RDMA_SQ_REQ_TYPE_INVALID,
807	MAX_RDMA_SQ_REQ_TYPE
808};
809
810
811struct rdma_sq_send_wqe
812{
813	__le32 inv_key_or_imm_data /* the r_key to invalidate in case of SEND_WITH_INVALIDATE, or the immediate data in case of SEND_WITH_IMM */;
814	__le32 length /* Total data length */;
815	__le32 xrc_srq /* Valid only when XRC is set for the QP */;
816	u8 req_type /* Type of WQE */;
817	u8 flags;
818#define RDMA_SQ_SEND_WQE_COMP_FLG_MASK         0x1 /* If set, completion will be generated when the WQE is completed */
819#define RDMA_SQ_SEND_WQE_COMP_FLG_SHIFT        0
820#define RDMA_SQ_SEND_WQE_RD_FENCE_FLG_MASK     0x1 /* If set, all pending RDMA read or Atomic operations will be completed before start processing this WQE */
821#define RDMA_SQ_SEND_WQE_RD_FENCE_FLG_SHIFT    1
822#define RDMA_SQ_SEND_WQE_INV_FENCE_FLG_MASK    0x1 /* If set, all pending operations will be completed before start processing this WQE */
823#define RDMA_SQ_SEND_WQE_INV_FENCE_FLG_SHIFT   2
824#define RDMA_SQ_SEND_WQE_SE_FLG_MASK           0x1 /* If set, signal the responder to generate a solicited event on this WQE */
825#define RDMA_SQ_SEND_WQE_SE_FLG_SHIFT          3
826#define RDMA_SQ_SEND_WQE_INLINE_FLG_MASK       0x1 /* if set, indicates inline data is following this WQE instead of SGEs */
827#define RDMA_SQ_SEND_WQE_INLINE_FLG_SHIFT      4
828#define RDMA_SQ_SEND_WQE_DIF_ON_HOST_FLG_MASK  0x1 /* Should be 0 for send wqe */
829#define RDMA_SQ_SEND_WQE_DIF_ON_HOST_FLG_SHIFT 5
830#define RDMA_SQ_SEND_WQE_RESERVED0_MASK        0x3
831#define RDMA_SQ_SEND_WQE_RESERVED0_SHIFT       6
832	u8 wqe_size /* Size of WQE in 16B chunks including all SGEs or inline data. In case there are SGEs: set to number of SGEs + 1. In case of inline data: set to the whole number of 16B which contain the inline data + 1. */;
833	u8 prev_wqe_size /* Previous WQE size in 16B chunks */;
834	__le32 reserved1[4];
835};
836
837
838struct rdma_sq_send_wqe_1st
839{
840	__le32 inv_key_or_imm_data /* the r_key to invalidate in case of SEND_WITH_INVALIDATE, or the immediate data in case of SEND_WITH_IMM */;
841	__le32 length /* Total data length */;
842	__le32 xrc_srq /* Valid only when XRC is set for the QP */;
843	u8 req_type /* Type of WQE */;
844	u8 flags;
845#define RDMA_SQ_SEND_WQE_1ST_COMP_FLG_MASK       0x1 /* If set, completion will be generated when the WQE is completed */
846#define RDMA_SQ_SEND_WQE_1ST_COMP_FLG_SHIFT      0
847#define RDMA_SQ_SEND_WQE_1ST_RD_FENCE_FLG_MASK   0x1 /* If set, all pending RDMA read or Atomic operations will be completed before start processing this WQE */
848#define RDMA_SQ_SEND_WQE_1ST_RD_FENCE_FLG_SHIFT  1
849#define RDMA_SQ_SEND_WQE_1ST_INV_FENCE_FLG_MASK  0x1 /* If set, all pending operations will be completed before start processing this WQE */
850#define RDMA_SQ_SEND_WQE_1ST_INV_FENCE_FLG_SHIFT 2
851#define RDMA_SQ_SEND_WQE_1ST_SE_FLG_MASK         0x1 /* If set, signal the responder to generate a solicited event on this WQE */
852#define RDMA_SQ_SEND_WQE_1ST_SE_FLG_SHIFT        3
853#define RDMA_SQ_SEND_WQE_1ST_INLINE_FLG_MASK     0x1 /* if set, indicates inline data is following this WQE instead of SGEs */
854#define RDMA_SQ_SEND_WQE_1ST_INLINE_FLG_SHIFT    4
855#define RDMA_SQ_SEND_WQE_1ST_RESERVED0_MASK      0x7
856#define RDMA_SQ_SEND_WQE_1ST_RESERVED0_SHIFT     5
857	u8 wqe_size /* Size of WQE in 16B chunks including all SGEs or inline data. In case there are SGEs: set to number of SGEs + 1. In case of inline data: set to the whole number of 16B which contain the inline data + 1. */;
858	u8 prev_wqe_size /* Previous WQE size in 16B chunks */;
859};
860
861
862struct rdma_sq_send_wqe_2st
863{
864	__le32 reserved1[4];
865};
866
867
868struct rdma_sq_sge
869{
870	__le32 length /* Total length of the send. If DIF on host is enabled, SGE length includes the DIF guards. */;
871	struct regpair addr;
872	__le32 l_key;
873};
874
875
876struct rdma_srq_wqe_header
877{
878	struct regpair wr_id;
879	u8 num_sges /* number of SGEs in WQE */;
880	u8 reserved2[7];
881};
882
883struct rdma_srq_sge
884{
885	struct regpair addr;
886	__le32 length;
887	__le32 l_key;
888};
889
890/*
891 * rdma srq sge
892 */
893union rdma_srq_elm
894{
895	struct rdma_srq_wqe_header header;
896	struct rdma_srq_sge sge;
897};
898
899
900
901
902/*
903 * Rdma doorbell data for flags update
904 */
905struct rdma_pwm_flags_data
906{
907	__le16 icid /* internal CID */;
908	u8 agg_flags /* aggregative flags */;
909	u8 reserved;
910};
911
912
913/*
914 * Rdma doorbell data for SQ and RQ
915 */
916struct rdma_pwm_val16_data
917{
918	__le16 icid /* internal CID */;
919	__le16 value /* aggregated value to update */;
920};
921
922
923union rdma_pwm_val16_data_union
924{
925	struct rdma_pwm_val16_data as_struct /* Parameters field */;
926	__le32 as_dword;
927};
928
929
930/*
931 * Rdma doorbell data for CQ
932 */
933struct rdma_pwm_val32_data
934{
935	__le16 icid /* internal CID */;
936	u8 agg_flags /* bit for every DQ counter flags in CM context that DQ can increment */;
937	u8 params;
938#define RDMA_PWM_VAL32_DATA_AGG_CMD_MASK             0x3 /* aggregative command to CM (use enum db_agg_cmd_sel) */
939#define RDMA_PWM_VAL32_DATA_AGG_CMD_SHIFT            0
940#define RDMA_PWM_VAL32_DATA_BYPASS_EN_MASK           0x1 /* enable QM bypass */
941#define RDMA_PWM_VAL32_DATA_BYPASS_EN_SHIFT          2
942#define RDMA_PWM_VAL32_DATA_CONN_TYPE_IS_IWARP_MASK  0x1 /* Connection type is iWARP */
943#define RDMA_PWM_VAL32_DATA_CONN_TYPE_IS_IWARP_SHIFT 3
944#define RDMA_PWM_VAL32_DATA_SET_16B_VAL_MASK         0x1 /* Flag indicating 16b variable should be updated. Should be used when conn_type_is_iwarp is used */
945#define RDMA_PWM_VAL32_DATA_SET_16B_VAL_SHIFT        4
946#define RDMA_PWM_VAL32_DATA_RESERVED_MASK            0x7
947#define RDMA_PWM_VAL32_DATA_RESERVED_SHIFT           5
948	__le32 value /* aggregated value to update */;
949};
950
951
952union rdma_pwm_val32_data_union
953{
954	struct rdma_pwm_val32_data as_struct /* Parameters field */;
955	struct regpair as_repair;
956};
957
958#endif /* __RDMA_COMMON__ */
959