1316485Sdavidcs/*
2316485Sdavidcs * Copyright (c) 2017-2018 Cavium, Inc.
3316485Sdavidcs * All rights reserved.
4316485Sdavidcs *
5316485Sdavidcs *  Redistribution and use in source and binary forms, with or without
6316485Sdavidcs *  modification, are permitted provided that the following conditions
7316485Sdavidcs *  are met:
8316485Sdavidcs *
9316485Sdavidcs *  1. Redistributions of source code must retain the above copyright
10316485Sdavidcs *     notice, this list of conditions and the following disclaimer.
11316485Sdavidcs *  2. Redistributions in binary form must reproduce the above copyright
12316485Sdavidcs *     notice, this list of conditions and the following disclaimer in the
13316485Sdavidcs *     documentation and/or other materials provided with the distribution.
14316485Sdavidcs *
15316485Sdavidcs *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16316485Sdavidcs *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17316485Sdavidcs *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18316485Sdavidcs *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19316485Sdavidcs *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20316485Sdavidcs *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21316485Sdavidcs *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22316485Sdavidcs *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23316485Sdavidcs *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24316485Sdavidcs *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25316485Sdavidcs *  POSSIBILITY OF SUCH DAMAGE.
26316485Sdavidcs *
27316485Sdavidcs * $FreeBSD: stable/11/sys/dev/qlnx/qlnxe/ecore_roce_api.h 337517 2018-08-09 01:17:35Z davidcs $
28316485Sdavidcs *
29316485Sdavidcs */
30316485Sdavidcs
31316485Sdavidcs#ifndef __ECORE_RDMA_API_H__
32316485Sdavidcs#define __ECORE_RDMA_API_H__
33316485Sdavidcs
34337517Sdavidcs#ifndef ETH_ALEN
35316485Sdavidcs#define ETH_ALEN 6
36320164Sdavidcs#endif
37316485Sdavidcs
38316485Sdavidcs
39316485Sdavidcsenum ecore_roce_ll2_tx_dest
40316485Sdavidcs{
41316485Sdavidcs	ECORE_ROCE_LL2_TX_DEST_NW /* Light L2 TX Destination to the Network */,
42316485Sdavidcs	ECORE_ROCE_LL2_TX_DEST_LB /* Light L2 TX Destination to the Loopback */,
43316485Sdavidcs	ECORE_ROCE_LL2_TX_DEST_MAX
44316485Sdavidcs};
45316485Sdavidcs
46316485Sdavidcs/* HW/FW RoCE Limitations (external. For internal see ecore_roce.h) */
47316485Sdavidcs/* CNQ size Limitation
48316485Sdavidcs * The CNQ size should be set as twice the amount of CQs, since for each CQ one
49316485Sdavidcs * element may be inserted into the CNQ and another element is used per CQ to
50316485Sdavidcs * accommodate for a possible race in the arm mechanism.
51316485Sdavidcs * The FW supports a CNQ of 64k-1 and this apparently causes an issue - notice
52316485Sdavidcs * that the number of QPs can reach 32k giving 64k CQs and 128k CNQ elements.
53316485Sdavidcs * Luckily the FW can buffer CNQ elements avoiding an overflow, on the expense
54316485Sdavidcs * of performance.
55316485Sdavidcs */
56316485Sdavidcs#define ECORE_RDMA_MAX_CNQ_SIZE               (0xFFFF) /* 2^16 - 1 */
57316485Sdavidcs
58316485Sdavidcs/* rdma interface */
59316485Sdavidcsenum ecore_rdma_tid_type
60316485Sdavidcs{
61316485Sdavidcs	ECORE_RDMA_TID_REGISTERED_MR,
62316485Sdavidcs	ECORE_RDMA_TID_FMR,
63316485Sdavidcs	ECORE_RDMA_TID_MW_TYPE1,
64316485Sdavidcs	ECORE_RDMA_TID_MW_TYPE2A
65316485Sdavidcs};
66316485Sdavidcs
67316485Sdavidcsenum ecore_roce_qp_state {
68316485Sdavidcs	ECORE_ROCE_QP_STATE_RESET, /* Reset */
69316485Sdavidcs	ECORE_ROCE_QP_STATE_INIT,  /* Initialized */
70316485Sdavidcs	ECORE_ROCE_QP_STATE_RTR,   /* Ready to Receive */
71316485Sdavidcs	ECORE_ROCE_QP_STATE_RTS,   /* Ready to Send */
72316485Sdavidcs	ECORE_ROCE_QP_STATE_SQD,   /* Send Queue Draining */
73316485Sdavidcs	ECORE_ROCE_QP_STATE_ERR,   /* Error */
74316485Sdavidcs	ECORE_ROCE_QP_STATE_SQE    /* Send Queue Error */
75316485Sdavidcs};
76316485Sdavidcs
77316485Sdavidcstypedef
78316485Sdavidcsvoid (*affiliated_event_t)(void	*context,
79316485Sdavidcs			   u8	fw_event_code,
80316485Sdavidcs			   void	*fw_handle);
81316485Sdavidcs
82316485Sdavidcstypedef
83316485Sdavidcsvoid (*unaffiliated_event_t)(void *context,
84316485Sdavidcs			     u8   event_code);
85316485Sdavidcs
86316485Sdavidcsstruct ecore_rdma_events {
87316485Sdavidcs	void			*context;
88316485Sdavidcs	affiliated_event_t	affiliated_event;
89316485Sdavidcs	unaffiliated_event_t	unaffiliated_event;
90316485Sdavidcs};
91316485Sdavidcs
92316485Sdavidcsstruct ecore_rdma_device {
93316485Sdavidcs    /* Vendor specific information */
94316485Sdavidcs	u32	vendor_id;
95316485Sdavidcs	u32	vendor_part_id;
96316485Sdavidcs	u32	hw_ver;
97316485Sdavidcs	u64	fw_ver;
98316485Sdavidcs
99316485Sdavidcs	u64	node_guid; /* node GUID */
100316485Sdavidcs	u64	sys_image_guid; /* System image GUID */
101316485Sdavidcs
102316485Sdavidcs	u8	max_cnq;
103316485Sdavidcs	u8	max_sge; /* The maximum number of scatter/gather entries
104316485Sdavidcs			  * per Work Request supported
105316485Sdavidcs			  */
106316485Sdavidcs	u8	max_srq_sge; /* The maximum number of scatter/gather entries
107316485Sdavidcs			      * per Work Request supported for SRQ
108316485Sdavidcs			      */
109316485Sdavidcs	u16	max_inline;
110316485Sdavidcs	u32	max_wqe; /* The maximum number of outstanding work
111316485Sdavidcs			  * requests on any Work Queue supported
112316485Sdavidcs			  */
113316485Sdavidcs	u32	max_srq_wqe; /* The maximum number of outstanding work
114316485Sdavidcs			      * requests on any Work Queue supported for SRQ
115316485Sdavidcs			      */
116316485Sdavidcs	u8	max_qp_resp_rd_atomic_resc; /* The maximum number of RDMA Reads
117316485Sdavidcs					     * & atomic operation that can be
118316485Sdavidcs					     * outstanding per QP
119316485Sdavidcs					     */
120316485Sdavidcs
121316485Sdavidcs	u8	max_qp_req_rd_atomic_resc; /* The maximum depth per QP for
122316485Sdavidcs					    * initiation of RDMA Read
123316485Sdavidcs					    * & atomic operations
124316485Sdavidcs					    */
125316485Sdavidcs	u64	max_dev_resp_rd_atomic_resc;
126316485Sdavidcs	u32	max_cq;
127316485Sdavidcs	u32	max_qp;
128316485Sdavidcs	u32	max_srq; /* Maximum number of SRQs */
129316485Sdavidcs	u32	max_mr; /* Maximum number of MRs supported by this device */
130316485Sdavidcs	u64	max_mr_size; /* Size (in bytes) of the largest contiguous memory
131316485Sdavidcs			      * block that can be registered by this device
132316485Sdavidcs			      */
133316485Sdavidcs	u32	max_cqe;
134316485Sdavidcs	u32	max_mw; /* The maximum number of memory windows supported */
135316485Sdavidcs	u32	max_fmr;
136316485Sdavidcs	u32	max_mr_mw_fmr_pbl;
137316485Sdavidcs	u64	max_mr_mw_fmr_size;
138316485Sdavidcs	u32	max_pd; /* The maximum number of protection domains supported */
139316485Sdavidcs	u32	max_ah;
140316485Sdavidcs	u8	max_pkey;
141316485Sdavidcs	u16	max_srq_wr; /* Maximum number of WRs per SRQ */
142316485Sdavidcs	u8	max_stats_queues; /* Maximum number of statistics queues */
143316485Sdavidcs	u32	dev_caps;
144316485Sdavidcs
145316485Sdavidcs	/* Abilty to support RNR-NAK generation */
146316485Sdavidcs
147316485Sdavidcs#define ECORE_RDMA_DEV_CAP_RNR_NAK_MASK				0x1
148316485Sdavidcs#define ECORE_RDMA_DEV_CAP_RNR_NAK_SHIFT			0
149316485Sdavidcs	/* Abilty to support shutdown port */
150316485Sdavidcs#define ECORE_RDMA_DEV_CAP_SHUTDOWN_PORT_MASK			0x1
151316485Sdavidcs#define ECORE_RDMA_DEV_CAP_SHUTDOWN_PORT_SHIFT			1
152316485Sdavidcs	/* Abilty to support port active event */
153316485Sdavidcs#define ECORE_RDMA_DEV_CAP_PORT_ACTIVE_EVENT_MASK		0x1
154316485Sdavidcs#define ECORE_RDMA_DEV_CAP_PORT_ACTIVE_EVENT_SHIFT		2
155316485Sdavidcs	/* Abilty to support port change event */
156316485Sdavidcs#define ECORE_RDMA_DEV_CAP_PORT_CHANGE_EVENT_MASK		0x1
157316485Sdavidcs#define ECORE_RDMA_DEV_CAP_PORT_CHANGE_EVENT_SHIFT		3
158316485Sdavidcs	/* Abilty to support system image GUID */
159316485Sdavidcs#define ECORE_RDMA_DEV_CAP_SYS_IMAGE_MASK			0x1
160316485Sdavidcs#define ECORE_RDMA_DEV_CAP_SYS_IMAGE_SHIFT			4
161316485Sdavidcs	/* Abilty to support bad P_Key counter support */
162316485Sdavidcs#define ECORE_RDMA_DEV_CAP_BAD_PKEY_CNT_MASK			0x1
163316485Sdavidcs#define ECORE_RDMA_DEV_CAP_BAD_PKEY_CNT_SHIFT			5
164316485Sdavidcs	/* Abilty to support atomic operations */
165316485Sdavidcs#define ECORE_RDMA_DEV_CAP_ATOMIC_OP_MASK			0x1
166316485Sdavidcs#define ECORE_RDMA_DEV_CAP_ATOMIC_OP_SHIFT			6
167316485Sdavidcs#define ECORE_RDMA_DEV_CAP_RESIZE_CQ_MASK			0x1
168316485Sdavidcs#define ECORE_RDMA_DEV_CAP_RESIZE_CQ_SHIFT			7
169316485Sdavidcs	/* Abilty to support modifying the maximum number of
170316485Sdavidcs	 * outstanding work requests per QP
171316485Sdavidcs	 */
172316485Sdavidcs#define ECORE_RDMA_DEV_CAP_RESIZE_MAX_WR_MASK			0x1
173316485Sdavidcs#define ECORE_RDMA_DEV_CAP_RESIZE_MAX_WR_SHIFT			8
174316485Sdavidcs	/* Abilty to support automatic path migration */
175316485Sdavidcs#define ECORE_RDMA_DEV_CAP_AUTO_PATH_MIG_MASK			0x1
176316485Sdavidcs#define ECORE_RDMA_DEV_CAP_AUTO_PATH_MIG_SHIFT			9
177316485Sdavidcs	/* Abilty to support the base memory management extensions */
178316485Sdavidcs#define ECORE_RDMA_DEV_CAP_BASE_MEMORY_EXT_MASK			0x1
179316485Sdavidcs#define ECORE_RDMA_DEV_CAP_BASE_MEMORY_EXT_SHIFT		10
180316485Sdavidcs#define ECORE_RDMA_DEV_CAP_BASE_QUEUE_EXT_MASK			0x1
181316485Sdavidcs#define ECORE_RDMA_DEV_CAP_BASE_QUEUE_EXT_SHIFT			11
182316485Sdavidcs	/* Abilty to support multipile page sizes per memory region */
183316485Sdavidcs#define ECORE_RDMA_DEV_CAP_MULTI_PAGE_PER_MR_EXT_MASK		0x1
184316485Sdavidcs#define ECORE_RDMA_DEV_CAP_MULTI_PAGE_PER_MR_EXT_SHIFT		12
185316485Sdavidcs	/* Abilty to support block list physical buffer list */
186316485Sdavidcs#define ECORE_RDMA_DEV_CAP_BLOCK_MODE_MASK			0x1
187316485Sdavidcs#define ECORE_RDMA_DEV_CAP_BLOCK_MODE_SHIFT			13
188316485Sdavidcs	/* Abilty to support zero based virtual addresses */
189316485Sdavidcs#define ECORE_RDMA_DEV_CAP_ZBVA_MASK				0x1
190316485Sdavidcs#define ECORE_RDMA_DEV_CAP_ZBVA_SHIFT				14
191316485Sdavidcs	/* Abilty to support local invalidate fencing */
192316485Sdavidcs#define ECORE_RDMA_DEV_CAP_LOCAL_INV_FENCE_MASK			0x1
193316485Sdavidcs#define ECORE_RDMA_DEV_CAP_LOCAL_INV_FENCE_SHIFT		15
194316485Sdavidcs	/* Abilty to support Loopback on QP */
195316485Sdavidcs#define ECORE_RDMA_DEV_CAP_LB_INDICATOR_MASK			0x1
196316485Sdavidcs#define ECORE_RDMA_DEV_CAP_LB_INDICATOR_SHIFT			16
197316485Sdavidcs	u64	page_size_caps;
198316485Sdavidcs	u8	dev_ack_delay;
199316485Sdavidcs	u32	reserved_lkey; /* Value of reserved L_key */
200316485Sdavidcs	u32	bad_pkey_counter; /* Bad P_key counter support indicator */
201316485Sdavidcs	struct ecore_rdma_events events;
202316485Sdavidcs};
203316485Sdavidcs
204316485Sdavidcsenum ecore_port_state {
205316485Sdavidcs	ECORE_RDMA_PORT_UP,
206316485Sdavidcs	ECORE_RDMA_PORT_DOWN,
207316485Sdavidcs};
208316485Sdavidcs
209316485Sdavidcsenum ecore_roce_capability {
210316485Sdavidcs	ECORE_ROCE_V1	= 1 << 0,
211316485Sdavidcs	ECORE_ROCE_V2	= 1 << 1,
212316485Sdavidcs};
213316485Sdavidcs
214316485Sdavidcsstruct ecore_rdma_port {
215316485Sdavidcs	enum ecore_port_state port_state;
216316485Sdavidcs	int	link_speed;
217316485Sdavidcs	u64	max_msg_size;
218316485Sdavidcs	u8	source_gid_table_len;
219316485Sdavidcs	void	*source_gid_table_ptr;
220316485Sdavidcs	u8	pkey_table_len;
221316485Sdavidcs	void	*pkey_table_ptr;
222316485Sdavidcs	u32	pkey_bad_counter;
223316485Sdavidcs	enum ecore_roce_capability capability;
224316485Sdavidcs};
225316485Sdavidcs
226316485Sdavidcsstruct ecore_rdma_cnq_params
227316485Sdavidcs{
228316485Sdavidcs	u8  num_pbl_pages; /* Number of pages in the PBL allocated
229316485Sdavidcs				   * for this queue
230316485Sdavidcs				   */
231316485Sdavidcs	u64 pbl_ptr; /* Address to the first entry of the queue PBL */
232316485Sdavidcs};
233316485Sdavidcs
234316485Sdavidcs/* The CQ Mode affects the CQ doorbell transaction size.
235316485Sdavidcs * 64/32 bit machines should configure to 32/16 bits respectively.
236316485Sdavidcs */
237316485Sdavidcsenum ecore_rdma_cq_mode {
238316485Sdavidcs	ECORE_RDMA_CQ_MODE_16_BITS,
239316485Sdavidcs	ECORE_RDMA_CQ_MODE_32_BITS,
240316485Sdavidcs};
241316485Sdavidcs
242316485Sdavidcsstruct ecore_roce_dcqcn_params {
243316485Sdavidcs	u8	notification_point;
244316485Sdavidcs	u8	reaction_point;
245316485Sdavidcs
246316485Sdavidcs	/* fields for notification point */
247316485Sdavidcs	u32	cnp_send_timeout;
248316485Sdavidcs
249316485Sdavidcs	/* fields for reaction point */
250316485Sdavidcs	u32	rl_bc_rate;  /* Byte Counter Limit. */
251316485Sdavidcs	u16	rl_max_rate; /* Maximum rate in 1.6 Mbps resolution */
252316485Sdavidcs	u16	rl_r_ai;     /* Active increase rate */
253316485Sdavidcs	u16	rl_r_hai;    /* Hyper active increase rate */
254316485Sdavidcs	u16	dcqcn_g;     /* Alpha update gain in 1/64K resolution */
255316485Sdavidcs	u32	dcqcn_k_us;  /* Alpha update interval */
256316485Sdavidcs	u32	dcqcn_timeout_us;
257316485Sdavidcs};
258316485Sdavidcs
259316485Sdavidcs#ifdef CONFIG_ECORE_IWARP
260316485Sdavidcs
261316485Sdavidcs#define ECORE_MPA_RTR_TYPE_NONE		0 /* No RTR type */
262316485Sdavidcs#define ECORE_MPA_RTR_TYPE_ZERO_SEND	(1 << 0)
263316485Sdavidcs#define ECORE_MPA_RTR_TYPE_ZERO_WRITE	(1 << 1)
264316485Sdavidcs#define ECORE_MPA_RTR_TYPE_ZERO_READ	(1 << 2)
265316485Sdavidcs
266316485Sdavidcsenum ecore_mpa_rev {
267316485Sdavidcs	ECORE_MPA_REV1,
268316485Sdavidcs	ECORE_MPA_REV2,
269316485Sdavidcs};
270316485Sdavidcs
271316485Sdavidcsstruct ecore_iwarp_params {
272316485Sdavidcs	u32				rcv_wnd_size;
273316485Sdavidcs	u16				ooo_num_rx_bufs;
274316485Sdavidcs#define ECORE_IWARP_TS_EN (1 << 0)
275316485Sdavidcs#define ECORE_IWARP_DA_EN (1 << 1)
276316485Sdavidcs	u8				flags;
277316485Sdavidcs	u8				crc_needed;
278316485Sdavidcs	enum ecore_mpa_rev		mpa_rev;
279316485Sdavidcs	u8				mpa_rtr;
280316485Sdavidcs	u8				mpa_peer2peer;
281316485Sdavidcs};
282316485Sdavidcs
283316485Sdavidcs#endif
284316485Sdavidcs
285316485Sdavidcsstruct ecore_roce_params {
286316485Sdavidcs	enum ecore_rdma_cq_mode		cq_mode;
287316485Sdavidcs	struct ecore_roce_dcqcn_params	dcqcn_params;
288316485Sdavidcs	u8				ll2_handle; /* required for UD QPs */
289316485Sdavidcs};
290316485Sdavidcs
291316485Sdavidcsstruct ecore_rdma_start_in_params {
292316485Sdavidcs	struct ecore_rdma_events	*events;
293316485Sdavidcs	struct ecore_rdma_cnq_params	cnq_pbl_list[128];
294316485Sdavidcs	u8				desired_cnq;
295316485Sdavidcs	u16				max_mtu;
296316485Sdavidcs	u8				mac_addr[ETH_ALEN];
297316485Sdavidcs#ifdef CONFIG_ECORE_IWARP
298316485Sdavidcs	struct ecore_iwarp_params	iwarp;
299316485Sdavidcs#endif
300316485Sdavidcs	struct ecore_roce_params	roce;
301316485Sdavidcs};
302316485Sdavidcs
303316485Sdavidcsstruct ecore_rdma_add_user_out_params {
304316485Sdavidcs	/* output variables (given to miniport) */
305316485Sdavidcs	u16	dpi;
306316485Sdavidcs	u64	dpi_addr;
307316485Sdavidcs	u64	dpi_phys_addr;
308316485Sdavidcs	u32	dpi_size;
309316485Sdavidcs	u16	wid_count;
310316485Sdavidcs};
311316485Sdavidcs
312316485Sdavidcs/*Returns the CQ CID or zero in case of failure */
313316485Sdavidcsstruct ecore_rdma_create_cq_in_params {
314316485Sdavidcs	/* input variables (given by miniport) */
315316485Sdavidcs	u32	cq_handle_lo; /* CQ handle to be written in CNQ */
316316485Sdavidcs	u32	cq_handle_hi;
317316485Sdavidcs	u32	cq_size;
318316485Sdavidcs	u16	dpi;
319316485Sdavidcs	bool	pbl_two_level;
320316485Sdavidcs	u64	pbl_ptr;
321316485Sdavidcs	u16	pbl_num_pages;
322316485Sdavidcs	u8	pbl_page_size_log; /* for the pages that contain the
323316485Sdavidcs			   * pointers to the CQ pages
324316485Sdavidcs			   */
325316485Sdavidcs	u8	cnq_id;
326316485Sdavidcs	u16	int_timeout;
327316485Sdavidcs};
328316485Sdavidcs
329316485Sdavidcs
330316485Sdavidcsstruct ecore_rdma_resize_cq_in_params {
331316485Sdavidcs	/* input variables (given by miniport) */
332316485Sdavidcs
333316485Sdavidcs	u16	icid;
334316485Sdavidcs	u32	cq_size;
335316485Sdavidcs	bool	pbl_two_level;
336316485Sdavidcs	u64	pbl_ptr;
337316485Sdavidcs	u16	pbl_num_pages;
338316485Sdavidcs	u8	pbl_page_size_log; /* for the pages that contain the
339316485Sdavidcs		       * pointers to the CQ pages
340316485Sdavidcs		       */
341316485Sdavidcs};
342316485Sdavidcs
343316485Sdavidcs
344316485Sdavidcsenum roce_mode
345316485Sdavidcs{
346316485Sdavidcs	ROCE_V1,
347316485Sdavidcs	ROCE_V2_IPV4,
348316485Sdavidcs	ROCE_V2_IPV6,
349316485Sdavidcs	MAX_ROCE_MODE
350316485Sdavidcs};
351316485Sdavidcs
352316485Sdavidcsstruct ecore_rdma_create_qp_in_params {
353316485Sdavidcs	/* input variables (given by miniport) */
354316485Sdavidcs	u32	qp_handle_lo; /* QP handle to be written in CQE */
355316485Sdavidcs	u32	qp_handle_hi;
356316485Sdavidcs	u32	qp_handle_async_lo; /* QP handle to be written in async event */
357316485Sdavidcs	u32	qp_handle_async_hi;
358316485Sdavidcs	bool	use_srq;
359316485Sdavidcs	bool	signal_all;
360316485Sdavidcs	bool	fmr_and_reserved_lkey;
361316485Sdavidcs	u16	pd;
362316485Sdavidcs	u16	dpi;
363316485Sdavidcs	u16	sq_cq_id;
364316485Sdavidcs	u16	sq_num_pages;
365316485Sdavidcs	u64	sq_pbl_ptr;	/* Not relevant for iWARP */
366316485Sdavidcs	u8	max_sq_sges;
367316485Sdavidcs	u16	rq_cq_id;
368316485Sdavidcs	u16	rq_num_pages;
369316485Sdavidcs	u64	rq_pbl_ptr;	/* Not relevant for iWARP */
370316485Sdavidcs	u16	srq_id;
371316485Sdavidcs	u8	stats_queue;
372316485Sdavidcs};
373316485Sdavidcs
374316485Sdavidcsstruct ecore_rdma_create_qp_out_params {
375316485Sdavidcs	/* output variables (given to miniport) */
376316485Sdavidcs	u32		qp_id;
377316485Sdavidcs	u16		icid;
378316485Sdavidcs	void		*rq_pbl_virt;
379316485Sdavidcs	dma_addr_t	rq_pbl_phys;
380316485Sdavidcs	void		*sq_pbl_virt;
381316485Sdavidcs	dma_addr_t	sq_pbl_phys;
382316485Sdavidcs};
383316485Sdavidcs
384316485Sdavidcsstruct ecore_rdma_destroy_cq_in_params {
385316485Sdavidcs	/* input variables (given by miniport) */
386316485Sdavidcs	u16 icid;
387316485Sdavidcs};
388316485Sdavidcs
389316485Sdavidcsstruct ecore_rdma_destroy_cq_out_params {
390316485Sdavidcs	/* output variables, provided to the upper layer */
391316485Sdavidcs
392316485Sdavidcs	/* Sequence number of completion notification sent for the CQ on
393316485Sdavidcs	 * the associated CNQ
394316485Sdavidcs	 */
395316485Sdavidcs	u16	num_cq_notif;
396316485Sdavidcs};
397316485Sdavidcs
398316485Sdavidcs/* ECORE GID can be used as IPv4/6 address in RoCE v2 */
399316485Sdavidcsunion ecore_gid {
400316485Sdavidcs	u8 bytes[16];
401316485Sdavidcs	u16 words[8];
402316485Sdavidcs	u32 dwords[4];
403316485Sdavidcs	u64 qwords[2];
404316485Sdavidcs	u32 ipv4_addr;
405316485Sdavidcs};
406316485Sdavidcs
407316485Sdavidcsstruct ecore_rdma_modify_qp_in_params {
408316485Sdavidcs	/* input variables (given by miniport) */
409316485Sdavidcs	u32		modify_flags;
410316485Sdavidcs#define ECORE_RDMA_MODIFY_QP_VALID_NEW_STATE_MASK               0x1
411316485Sdavidcs#define ECORE_RDMA_MODIFY_QP_VALID_NEW_STATE_SHIFT              0
412316485Sdavidcs#define ECORE_ROCE_MODIFY_QP_VALID_PKEY_MASK                    0x1
413316485Sdavidcs#define ECORE_ROCE_MODIFY_QP_VALID_PKEY_SHIFT                   1
414316485Sdavidcs#define ECORE_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN_MASK             0x1
415316485Sdavidcs#define ECORE_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN_SHIFT            2
416316485Sdavidcs#define ECORE_ROCE_MODIFY_QP_VALID_DEST_QP_MASK                 0x1
417316485Sdavidcs#define ECORE_ROCE_MODIFY_QP_VALID_DEST_QP_SHIFT                3
418316485Sdavidcs#define ECORE_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR_MASK          0x1
419316485Sdavidcs#define ECORE_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR_SHIFT         4
420316485Sdavidcs#define ECORE_ROCE_MODIFY_QP_VALID_RQ_PSN_MASK                  0x1
421316485Sdavidcs#define ECORE_ROCE_MODIFY_QP_VALID_RQ_PSN_SHIFT                 5
422316485Sdavidcs#define ECORE_ROCE_MODIFY_QP_VALID_SQ_PSN_MASK                  0x1
423316485Sdavidcs#define ECORE_ROCE_MODIFY_QP_VALID_SQ_PSN_SHIFT                 6
424316485Sdavidcs#define ECORE_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ_MASK       0x1
425316485Sdavidcs#define ECORE_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ_SHIFT      7
426316485Sdavidcs#define ECORE_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP_MASK      0x1
427316485Sdavidcs#define ECORE_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP_SHIFT     8
428316485Sdavidcs#define ECORE_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT_MASK             0x1
429316485Sdavidcs#define ECORE_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT_SHIFT            9
430316485Sdavidcs#define ECORE_ROCE_MODIFY_QP_VALID_RETRY_CNT_MASK               0x1
431316485Sdavidcs#define ECORE_ROCE_MODIFY_QP_VALID_RETRY_CNT_SHIFT              10
432316485Sdavidcs#define ECORE_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT_MASK           0x1
433316485Sdavidcs#define ECORE_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT_SHIFT          11
434316485Sdavidcs#define ECORE_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER_MASK       0x1
435316485Sdavidcs#define ECORE_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER_SHIFT      12
436316485Sdavidcs#define ECORE_ROCE_MODIFY_QP_VALID_E2E_FLOW_CONTROL_EN_MASK     0x1
437316485Sdavidcs#define ECORE_ROCE_MODIFY_QP_VALID_E2E_FLOW_CONTROL_EN_SHIFT    13
438316485Sdavidcs#define ECORE_ROCE_MODIFY_QP_VALID_ROCE_MODE_MASK               0x1
439316485Sdavidcs#define ECORE_ROCE_MODIFY_QP_VALID_ROCE_MODE_SHIFT              14
440316485Sdavidcs
441316485Sdavidcs	enum ecore_roce_qp_state	new_state;
442316485Sdavidcs	u16		pkey;
443316485Sdavidcs	bool		incoming_rdma_read_en;
444316485Sdavidcs	bool		incoming_rdma_write_en;
445316485Sdavidcs	bool		incoming_atomic_en;
446316485Sdavidcs	bool		e2e_flow_control_en;
447316485Sdavidcs	u32		dest_qp;
448316485Sdavidcs	u16		mtu;
449316485Sdavidcs	u8		traffic_class_tos; /* IPv6/GRH tc; IPv4 TOS */
450316485Sdavidcs	u8		hop_limit_ttl; /* IPv6/GRH hop limit; IPv4 TTL */
451316485Sdavidcs	u32		flow_label; /* ignored in IPv4 */
452316485Sdavidcs	union ecore_gid	sgid; /* GRH SGID; IPv4/6 Source IP */
453316485Sdavidcs	union ecore_gid	dgid; /* GRH DGID; IPv4/6 Destination IP */
454316485Sdavidcs	u16		udp_src_port; /* RoCEv2 only */
455316485Sdavidcs
456316485Sdavidcs	u16		vlan_id;
457316485Sdavidcs
458316485Sdavidcs	u32		rq_psn;
459316485Sdavidcs	u32		sq_psn;
460316485Sdavidcs	u8		max_rd_atomic_resp;
461316485Sdavidcs	u8		max_rd_atomic_req;
462316485Sdavidcs	u32		ack_timeout;
463316485Sdavidcs	u8		retry_cnt;
464316485Sdavidcs	u8		rnr_retry_cnt;
465316485Sdavidcs	u8		min_rnr_nak_timer;
466316485Sdavidcs	bool		sqd_async;
467316485Sdavidcs	u8		remote_mac_addr[6];
468316485Sdavidcs	u8		local_mac_addr[6];
469316485Sdavidcs	bool		use_local_mac;
470316485Sdavidcs	enum roce_mode	roce_mode;
471316485Sdavidcs};
472316485Sdavidcs
473316485Sdavidcsstruct ecore_rdma_query_qp_out_params {
474316485Sdavidcs	/* output variables (given to miniport) */
475316485Sdavidcs	enum ecore_roce_qp_state	state;
476316485Sdavidcs	u32		rq_psn; /* responder */
477316485Sdavidcs	u32		sq_psn; /* requester */
478316485Sdavidcs	bool		draining; /* send queue is draining */
479316485Sdavidcs	u16		mtu;
480316485Sdavidcs	u32		dest_qp;
481316485Sdavidcs	bool		incoming_rdma_read_en;
482316485Sdavidcs	bool		incoming_rdma_write_en;
483316485Sdavidcs	bool		incoming_atomic_en;
484316485Sdavidcs	bool		e2e_flow_control_en;
485316485Sdavidcs	union ecore_gid sgid; /* GRH SGID; IPv4/6 Source IP */
486316485Sdavidcs	union ecore_gid dgid; /* GRH DGID; IPv4/6 Destination IP */
487316485Sdavidcs	u32		flow_label; /* ignored in IPv4 */
488316485Sdavidcs	u8		hop_limit_ttl; /* IPv6/GRH hop limit; IPv4 TTL */
489316485Sdavidcs	u8		traffic_class_tos; /* IPv6/GRH tc; IPv4 TOS */
490316485Sdavidcs	u32		timeout;
491316485Sdavidcs	u8		rnr_retry;
492316485Sdavidcs	u8		retry_cnt;
493316485Sdavidcs	u8		min_rnr_nak_timer;
494316485Sdavidcs	u16		pkey_index;
495316485Sdavidcs	u8		max_rd_atomic;
496316485Sdavidcs	u8		max_dest_rd_atomic;
497316485Sdavidcs	bool		sqd_async;
498316485Sdavidcs};
499316485Sdavidcs
500316485Sdavidcsstruct ecore_rdma_register_tid_in_params {
501316485Sdavidcs	/* input variables (given by miniport) */
502316485Sdavidcs	u32	itid; /* index only, 18 bit long, lkey = itid << 8 | key */
503316485Sdavidcs	enum ecore_rdma_tid_type tid_type;
504316485Sdavidcs	u8	key;
505316485Sdavidcs	u16	pd;
506316485Sdavidcs	bool	local_read;
507316485Sdavidcs	bool	local_write;
508316485Sdavidcs	bool	remote_read;
509316485Sdavidcs	bool	remote_write;
510316485Sdavidcs	bool	remote_atomic;
511316485Sdavidcs	bool	mw_bind;
512316485Sdavidcs	u64	pbl_ptr;
513316485Sdavidcs	bool	pbl_two_level;
514316485Sdavidcs	u8	pbl_page_size_log; /* for the pages that contain the pointers
515316485Sdavidcs		       * to the MR pages
516316485Sdavidcs		       */
517316485Sdavidcs	u8	page_size_log; /* for the MR pages */
518316485Sdavidcs	u32	fbo;
519316485Sdavidcs	u64	length; /* only lower 40 bits are valid */
520316485Sdavidcs	u64	vaddr;
521316485Sdavidcs	bool	zbva;
522316485Sdavidcs	bool	phy_mr;
523316485Sdavidcs	bool	dma_mr;
524316485Sdavidcs
525316485Sdavidcs	/* DIF related fields */
526316485Sdavidcs	bool	dif_enabled;
527316485Sdavidcs	u64	dif_error_addr;
528316485Sdavidcs	u64	dif_runt_addr;
529316485Sdavidcs};
530316485Sdavidcs
531316485Sdavidcsstruct ecore_rdma_create_srq_in_params	{
532316485Sdavidcs	u64 pbl_base_addr;
533316485Sdavidcs	u64 prod_pair_addr;
534316485Sdavidcs	u16 num_pages;
535316485Sdavidcs	u16 pd_id;
536316485Sdavidcs	u16 page_size;
537316485Sdavidcs};
538316485Sdavidcs
539316485Sdavidcsstruct ecore_rdma_create_srq_out_params {
540316485Sdavidcs	u16 srq_id;
541316485Sdavidcs};
542316485Sdavidcs
543316485Sdavidcsstruct ecore_rdma_destroy_srq_in_params {
544316485Sdavidcs	u16 srq_id;
545316485Sdavidcs};
546316485Sdavidcs
547316485Sdavidcsstruct ecore_rdma_modify_srq_in_params {
548316485Sdavidcs	u32 wqe_limit;
549316485Sdavidcs	u16 srq_id;
550316485Sdavidcs};
551316485Sdavidcs
552316485Sdavidcsstruct ecore_rdma_resize_cq_out_params {
553316485Sdavidcs	/* output variables, provided to the upper layer */
554316485Sdavidcs	u32 prod; /* CQ producer value on old PBL */
555316485Sdavidcs	u32 cons; /* CQ consumer value on old PBL */
556316485Sdavidcs};
557316485Sdavidcs
558316485Sdavidcsstruct ecore_rdma_resize_cnq_in_params {
559316485Sdavidcs	/* input variables (given by miniport) */
560316485Sdavidcs	u32	cnq_id;
561316485Sdavidcs	u32	pbl_page_size_log; /* for the pages that contain the
562316485Sdavidcs			* pointers to the cnq pages
563316485Sdavidcs			*/
564316485Sdavidcs	u64	pbl_ptr;
565316485Sdavidcs};
566316485Sdavidcs
567316485Sdavidcsstruct ecore_rdma_stats_out_params {
568316485Sdavidcs	u64	sent_bytes;
569316485Sdavidcs	u64	sent_pkts;
570316485Sdavidcs	u64	rcv_bytes;
571316485Sdavidcs	u64	rcv_pkts;
572316485Sdavidcs
573316485Sdavidcs	/* RoCE only */
574316485Sdavidcs	u64	icrc_errors;		/* wraps at 32 bits */
575316485Sdavidcs	u64	retransmit_events;	/* wraps at 32 bits */
576316485Sdavidcs	u64	silent_drops;		/* wraps at 16 bits */
577316485Sdavidcs	u64	rnr_nacks_sent;		/* wraps at 16 bits */
578316485Sdavidcs
579316485Sdavidcs	/* iWARP only */
580316485Sdavidcs	u64	iwarp_tx_fast_rxmit_cnt;
581316485Sdavidcs	u64	iwarp_tx_slow_start_cnt;
582316485Sdavidcs	u64	unalign_rx_comp;
583316485Sdavidcs};
584316485Sdavidcs
585316485Sdavidcsstruct ecore_rdma_counters_out_params {
586316485Sdavidcs	u64	pd_count;
587316485Sdavidcs	u64	max_pd;
588316485Sdavidcs	u64	dpi_count;
589316485Sdavidcs	u64	max_dpi;
590316485Sdavidcs	u64	cq_count;
591316485Sdavidcs	u64	max_cq;
592316485Sdavidcs	u64	qp_count;
593316485Sdavidcs	u64	max_qp;
594316485Sdavidcs	u64	tid_count;
595316485Sdavidcs	u64	max_tid;
596316485Sdavidcs};
597316485Sdavidcs
598316485Sdavidcsenum _ecore_status_t
599316485Sdavidcsecore_rdma_add_user(void *rdma_cxt,
600316485Sdavidcs		    struct ecore_rdma_add_user_out_params *out_params);
601316485Sdavidcs
602316485Sdavidcsenum _ecore_status_t
603316485Sdavidcsecore_rdma_alloc_pd(void *rdma_cxt,
604316485Sdavidcs		    u16	*pd);
605316485Sdavidcs
606316485Sdavidcsenum _ecore_status_t
607316485Sdavidcsecore_rdma_alloc_tid(void *rdma_cxt,
608316485Sdavidcs		     u32 *tid);
609316485Sdavidcs
610316485Sdavidcsenum _ecore_status_t
611316485Sdavidcsecore_rdma_create_cq(void *rdma_cxt,
612316485Sdavidcs		     struct ecore_rdma_create_cq_in_params *params,
613316485Sdavidcs		     u16 *icid);
614316485Sdavidcs
615316485Sdavidcs/* Returns a pointer to the responders' CID, which is also a pointer to the
616316485Sdavidcs * ecore_qp_params struct. Returns NULL in case of failure.
617316485Sdavidcs */
618316485Sdavidcsstruct ecore_rdma_qp*
619316485Sdavidcsecore_rdma_create_qp(void *rdma_cxt,
620316485Sdavidcs		     struct ecore_rdma_create_qp_in_params  *in_params,
621316485Sdavidcs		     struct ecore_rdma_create_qp_out_params *out_params);
622316485Sdavidcs
623316485Sdavidcsenum _ecore_status_t
624316485Sdavidcsecore_roce_create_ud_qp(void *rdma_cxt,
625316485Sdavidcs			struct ecore_rdma_create_qp_out_params *out_params);
626316485Sdavidcs
627316485Sdavidcsenum _ecore_status_t
628316485Sdavidcsecore_rdma_deregister_tid(void *rdma_cxt,
629316485Sdavidcs			  u32		tid);
630316485Sdavidcs
631316485Sdavidcsenum _ecore_status_t
632316485Sdavidcsecore_rdma_destroy_cq(void *rdma_cxt,
633316485Sdavidcs		      struct ecore_rdma_destroy_cq_in_params  *in_params,
634316485Sdavidcs		      struct ecore_rdma_destroy_cq_out_params *out_params);
635316485Sdavidcs
636316485Sdavidcsenum _ecore_status_t
637316485Sdavidcsecore_rdma_destroy_qp(void *rdma_cxt,
638316485Sdavidcs		      struct ecore_rdma_qp *qp);
639316485Sdavidcs
640316485Sdavidcsenum _ecore_status_t
641316485Sdavidcsecore_roce_destroy_ud_qp(void *rdma_cxt, u16 cid);
642316485Sdavidcs
643316485Sdavidcsvoid
644316485Sdavidcsecore_rdma_free_pd(void *rdma_cxt,
645316485Sdavidcs		   u16	pd);
646316485Sdavidcs
647316485Sdavidcsvoid
648316485Sdavidcsecore_rdma_free_tid(void *rdma_cxt,
649316485Sdavidcs		    u32	tid);
650316485Sdavidcs
651316485Sdavidcsenum _ecore_status_t
652316485Sdavidcsecore_rdma_modify_qp(void *rdma_cxt,
653316485Sdavidcs		     struct ecore_rdma_qp *qp,
654316485Sdavidcs		     struct ecore_rdma_modify_qp_in_params *params);
655316485Sdavidcs
656316485Sdavidcsstruct ecore_rdma_device*
657316485Sdavidcsecore_rdma_query_device(void *rdma_cxt);
658316485Sdavidcs
659316485Sdavidcsstruct ecore_rdma_port*
660316485Sdavidcsecore_rdma_query_port(void *rdma_cxt);
661316485Sdavidcs
662316485Sdavidcsenum _ecore_status_t
663316485Sdavidcsecore_rdma_query_qp(void *rdma_cxt,
664316485Sdavidcs		    struct ecore_rdma_qp		  *qp,
665316485Sdavidcs		    struct ecore_rdma_query_qp_out_params *out_params);
666316485Sdavidcs
667316485Sdavidcsenum _ecore_status_t
668316485Sdavidcsecore_rdma_register_tid(void *rdma_cxt,
669316485Sdavidcs			struct ecore_rdma_register_tid_in_params *params);
670316485Sdavidcs
671316485Sdavidcsvoid ecore_rdma_remove_user(void *rdma_cxt,
672316485Sdavidcs			    u16		dpi);
673316485Sdavidcs
674316485Sdavidcsenum _ecore_status_t
675316485Sdavidcsecore_rdma_resize_cnq(void *rdma_cxt,
676316485Sdavidcs		      struct ecore_rdma_resize_cnq_in_params *in_params);
677316485Sdavidcs
678316485Sdavidcs/*Returns the CQ CID or zero in case of failure */
679316485Sdavidcsenum _ecore_status_t
680316485Sdavidcsecore_rdma_resize_cq(void *rdma_cxt,
681316485Sdavidcs		     struct ecore_rdma_resize_cq_in_params  *in_params,
682316485Sdavidcs		     struct ecore_rdma_resize_cq_out_params *out_params);
683316485Sdavidcs
684316485Sdavidcs/* Before calling rdma_start upper layer (VBD/qed) should fill the
685316485Sdavidcs * page-size and mtu in hwfn context
686316485Sdavidcs */
687316485Sdavidcsenum _ecore_status_t
688316485Sdavidcsecore_rdma_start(void *p_hwfn,
689316485Sdavidcs		 struct ecore_rdma_start_in_params *params);
690316485Sdavidcs
691316485Sdavidcsenum _ecore_status_t
692316485Sdavidcsecore_rdma_stop(void *rdma_cxt);
693316485Sdavidcs
694316485Sdavidcsenum _ecore_status_t
695316485Sdavidcsecore_rdma_query_stats(void *rdma_cxt, u8 stats_queue,
696316485Sdavidcs		       struct ecore_rdma_stats_out_params *out_parms);
697316485Sdavidcs
698316485Sdavidcsenum _ecore_status_t
699316485Sdavidcsecore_rdma_query_counters(void *rdma_cxt,
700316485Sdavidcs			  struct ecore_rdma_counters_out_params *out_parms);
701316485Sdavidcs
702316485Sdavidcsu32 ecore_rdma_get_sb_id(void *p_hwfn, u32 rel_sb_id);
703316485Sdavidcs
704337517Sdavidcsu32 ecore_rdma_query_cau_timer_res(void *p_hwfn);
705316485Sdavidcs
706316485Sdavidcsvoid ecore_rdma_cnq_prod_update(void *rdma_cxt, u8 cnq_index, u16 prod);
707316485Sdavidcs
708316485Sdavidcsvoid ecore_rdma_resc_free(struct ecore_hwfn *p_hwfn);
709316485Sdavidcs
710316485Sdavidcs#ifdef CONFIG_ECORE_IWARP
711316485Sdavidcs
712316485Sdavidcs/* iWARP API */
713316485Sdavidcs
714316485Sdavidcs
715316485Sdavidcsenum ecore_iwarp_event_type {
716316485Sdavidcs	ECORE_IWARP_EVENT_MPA_REQUEST, /* Passive side request received */
717316485Sdavidcs	ECORE_IWARP_EVENT_PASSIVE_COMPLETE, /* Passive side established
718316485Sdavidcs					     * ( ack on mpa response )
719316485Sdavidcs					     */
720316485Sdavidcs	ECORE_IWARP_EVENT_ACTIVE_COMPLETE, /* Active side reply received */
721316485Sdavidcs	ECORE_IWARP_EVENT_DISCONNECT,
722316485Sdavidcs	ECORE_IWARP_EVENT_CLOSE,
723316485Sdavidcs	ECORE_IWARP_EVENT_IRQ_FULL,
724316485Sdavidcs	ECORE_IWARP_EVENT_RQ_EMPTY,
725316485Sdavidcs	ECORE_IWARP_EVENT_LLP_TIMEOUT,
726316485Sdavidcs	ECORE_IWARP_EVENT_REMOTE_PROTECTION_ERROR,
727316485Sdavidcs	ECORE_IWARP_EVENT_CQ_OVERFLOW,
728316485Sdavidcs	ECORE_IWARP_EVENT_QP_CATASTROPHIC,
729316485Sdavidcs	ECORE_IWARP_EVENT_ACTIVE_MPA_REPLY,
730316485Sdavidcs	ECORE_IWARP_EVENT_LOCAL_ACCESS_ERROR,
731316485Sdavidcs	ECORE_IWARP_EVENT_REMOTE_OPERATION_ERROR,
732316485Sdavidcs	ECORE_IWARP_EVENT_TERMINATE_RECEIVED
733316485Sdavidcs};
734316485Sdavidcs
735316485Sdavidcsenum ecore_tcp_ip_version
736316485Sdavidcs{
737316485Sdavidcs	ECORE_TCP_IPV4,
738316485Sdavidcs	ECORE_TCP_IPV6,
739316485Sdavidcs};
740316485Sdavidcs
741316485Sdavidcsstruct ecore_iwarp_cm_info {
742316485Sdavidcs	enum ecore_tcp_ip_version ip_version;
743316485Sdavidcs	u32 remote_ip[4];
744316485Sdavidcs	u32 local_ip[4];
745316485Sdavidcs	u16 remote_port;
746316485Sdavidcs	u16 local_port;
747316485Sdavidcs	u16 vlan;
748316485Sdavidcs	const void *private_data;
749316485Sdavidcs	u16 private_data_len;
750316485Sdavidcs	u8 ord;
751316485Sdavidcs	u8 ird;
752316485Sdavidcs};
753316485Sdavidcs
754316485Sdavidcsstruct ecore_iwarp_cm_event_params {
755316485Sdavidcs	enum ecore_iwarp_event_type event;
756316485Sdavidcs	const struct ecore_iwarp_cm_info *cm_info;
757316485Sdavidcs	void *ep_context; /* To be passed to accept call */
758316485Sdavidcs	int status;
759316485Sdavidcs};
760316485Sdavidcs
761316485Sdavidcstypedef int (*iwarp_event_handler)(void *context,
762316485Sdavidcs				   struct ecore_iwarp_cm_event_params *event);
763316485Sdavidcs
764316485Sdavidcs/* Active Side Connect Flow:
765316485Sdavidcs * upper layer driver calls ecore_iwarp_connect
766316485Sdavidcs * Function is blocking: i.e. returns after tcp connection is established
767316485Sdavidcs * After MPA connection is established ECORE_IWARP_EVENT_ACTIVE_COMPLETE event
768316485Sdavidcs * will be passed to upperlayer driver using the event_cb passed in
769316485Sdavidcs * ecore_iwarp_connect_in. Information of the established connection will be
770316485Sdavidcs * initialized in event data.
771316485Sdavidcs */
772316485Sdavidcsstruct ecore_iwarp_connect_in {
773316485Sdavidcs	iwarp_event_handler event_cb;
774316485Sdavidcs	void *cb_context;
775316485Sdavidcs	struct ecore_rdma_qp *qp;
776316485Sdavidcs	struct ecore_iwarp_cm_info cm_info;
777316485Sdavidcs	u16 mss;
778316485Sdavidcs	u8 remote_mac_addr[6];
779316485Sdavidcs	u8 local_mac_addr[6];
780316485Sdavidcs};
781316485Sdavidcs
782316485Sdavidcsstruct ecore_iwarp_connect_out {
783316485Sdavidcs	void *ep_context;
784316485Sdavidcs};
785316485Sdavidcs
786316485Sdavidcs/* Passive side connect flow:
787316485Sdavidcs * upper layer driver calls ecore_iwarp_create_listen
788316485Sdavidcs * once Syn packet that matches a ip/port that is listened on arrives, ecore
789316485Sdavidcs * will offload the tcp connection. After MPA Request is received on the
790316485Sdavidcs * offload connection, the event ECORE_IWARP_EVENT_MPA_REQUEST will be sent
791316485Sdavidcs * to upper layer driver using the event_cb passed below. The event data
792316485Sdavidcs * will be placed in event parameter. After upper layer driver processes the
793316485Sdavidcs * event, ecore_iwarp_accept or ecore_iwarp_reject should be called to continue
794316485Sdavidcs * MPA negotiation. Once negotiation is complete the event
795316485Sdavidcs * ECORE_IWARP_EVENT_PASSIVE_COMPLETE will be passed to the event_cb passed
796316485Sdavidcs * originally in ecore_iwarp_listen_in structure.
797316485Sdavidcs */
798316485Sdavidcsstruct ecore_iwarp_listen_in {
799316485Sdavidcs	iwarp_event_handler event_cb; /* Callback func for delivering events */
800316485Sdavidcs	void *cb_context; /* passed to event_cb */
801316485Sdavidcs	u32 max_backlog; /* Max num of pending incoming connection requests */
802316485Sdavidcs	enum ecore_tcp_ip_version ip_version;
803316485Sdavidcs	u32 ip_addr[4];
804316485Sdavidcs	u16 port;
805316485Sdavidcs	u16 vlan;
806316485Sdavidcs};
807316485Sdavidcs
808316485Sdavidcsstruct ecore_iwarp_listen_out {
809316485Sdavidcs	void *handle; /* to be sent to destroy */
810316485Sdavidcs};
811316485Sdavidcs
812316485Sdavidcsstruct ecore_iwarp_accept_in {
813316485Sdavidcs	void *ep_context; /* From event data of ECORE_IWARP_EVENT_MPA_REQUEST */
814316485Sdavidcs	void *cb_context; /* context to be passed to event_cb */
815316485Sdavidcs	struct ecore_rdma_qp *qp;
816316485Sdavidcs	const void *private_data;
817316485Sdavidcs	u16 private_data_len;
818316485Sdavidcs	u8 ord;
819316485Sdavidcs	u8 ird;
820316485Sdavidcs};
821316485Sdavidcs
822316485Sdavidcsstruct ecore_iwarp_reject_in {
823316485Sdavidcs	void *ep_context; /* From event data of ECORE_IWARP_EVENT_MPA_REQUEST */
824316485Sdavidcs	void *cb_context; /* context to be passed to event_cb */
825316485Sdavidcs	const void *private_data;
826316485Sdavidcs	u16 private_data_len;
827316485Sdavidcs};
828316485Sdavidcs
829316485Sdavidcsstruct ecore_iwarp_send_rtr_in {
830316485Sdavidcs	void *ep_context;
831316485Sdavidcs};
832316485Sdavidcs
833316485Sdavidcsstruct ecore_iwarp_tcp_abort_in {
834316485Sdavidcs	void *ep_context;
835316485Sdavidcs};
836316485Sdavidcs
837316485Sdavidcs
838316485Sdavidcsenum _ecore_status_t
839316485Sdavidcsecore_iwarp_connect(void *rdma_cxt,
840316485Sdavidcs		    struct ecore_iwarp_connect_in *iparams,
841316485Sdavidcs		    struct ecore_iwarp_connect_out *oparams);
842316485Sdavidcs
843316485Sdavidcsenum _ecore_status_t
844316485Sdavidcsecore_iwarp_create_listen(void *rdma_cxt,
845316485Sdavidcs			  struct ecore_iwarp_listen_in *iparams,
846316485Sdavidcs			  struct ecore_iwarp_listen_out *oparams);
847316485Sdavidcs
848316485Sdavidcsenum _ecore_status_t
849316485Sdavidcsecore_iwarp_accept(void *rdma_cxt,
850316485Sdavidcs		   struct ecore_iwarp_accept_in *iparams);
851316485Sdavidcs
852316485Sdavidcsenum _ecore_status_t
853316485Sdavidcsecore_iwarp_reject(void *rdma_cxt,
854316485Sdavidcs		   struct ecore_iwarp_reject_in *iparams);
855316485Sdavidcs
856316485Sdavidcsenum _ecore_status_t
857316485Sdavidcsecore_iwarp_destroy_listen(void *rdma_cxt, void *handle);
858316485Sdavidcs
859316485Sdavidcsenum _ecore_status_t
860316485Sdavidcsecore_iwarp_send_rtr(void *rdma_cxt, struct ecore_iwarp_send_rtr_in *iparams);
861316485Sdavidcs
862316485Sdavidcsenum _ecore_status_t
863316485Sdavidcsecore_iwarp_tcp_abort(void *rdma_cxt, struct ecore_iwarp_tcp_abort_in *iparams);
864316485Sdavidcs
865316485Sdavidcs#endif /* CONFIG_ECORE_IWARP */
866316485Sdavidcs
867316485Sdavidcs#endif
868