1336695Sdavidcs/*
2336695Sdavidcs * Copyright (c) 2018-2019 Cavium, Inc.
3336695Sdavidcs * All rights reserved.
4336695Sdavidcs *
5336695Sdavidcs *  Redistribution and use in source and binary forms, with or without
6336695Sdavidcs *  modification, are permitted provided that the following conditions
7336695Sdavidcs *  are met:
8336695Sdavidcs *
9336695Sdavidcs *  1. Redistributions of source code must retain the above copyright
10336695Sdavidcs *     notice, this list of conditions and the following disclaimer.
11336695Sdavidcs *  2. Redistributions in binary form must reproduce the above copyright
12336695Sdavidcs *     notice, this list of conditions and the following disclaimer in the
13336695Sdavidcs *     documentation and/or other materials provided with the distribution.
14336695Sdavidcs *
15336695Sdavidcs *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16336695Sdavidcs *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17336695Sdavidcs *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18336695Sdavidcs *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19336695Sdavidcs *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20336695Sdavidcs *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21336695Sdavidcs *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22336695Sdavidcs *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23336695Sdavidcs *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24336695Sdavidcs *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25336695Sdavidcs *  POSSIBILITY OF SUCH DAMAGE.
26336695Sdavidcs *
27336695Sdavidcs * $FreeBSD: stable/10/sys/dev/qlnx/qlnxe/ecore_rdma_api.h 337519 2018-08-09 01:39:47Z davidcs $
28336695Sdavidcs */
29336695Sdavidcs
30336695Sdavidcs#ifndef __ECORE_RDMA_API_H__
31336695Sdavidcs#define __ECORE_RDMA_API_H__
32336695Sdavidcs
33336695Sdavidcs#ifndef LINUX_REMOVE
34336695Sdavidcs#ifndef ETH_ALEN
35336695Sdavidcs#define ETH_ALEN 6
36336695Sdavidcs#endif
37336695Sdavidcs#endif
38336695Sdavidcs
39336695Sdavidcs#ifndef __EXTRACT__LINUX__
40336695Sdavidcs
41336695Sdavidcsenum ecore_roce_ll2_tx_dest
42336695Sdavidcs{
43336695Sdavidcs	ECORE_ROCE_LL2_TX_DEST_NW /* Light L2 TX Destination to the Network */,
44336695Sdavidcs	ECORE_ROCE_LL2_TX_DEST_LB /* Light L2 TX Destination to the Loopback */,
45336695Sdavidcs	ECORE_ROCE_LL2_TX_DEST_MAX
46336695Sdavidcs};
47336695Sdavidcs
48336695Sdavidcs/* HW/FW RoCE Limitations (external. For internal see ecore_roce.h) */
49336695Sdavidcs/* CNQ size Limitation
50336695Sdavidcs * The CNQ size should be set as twice the amount of CQs, since for each CQ one
51336695Sdavidcs * element may be inserted into the CNQ and another element is used per CQ to
52336695Sdavidcs * accommodate for a possible race in the arm mechanism.
53336695Sdavidcs * The FW supports a CNQ of 64k-1 and this apparently causes an issue - notice
54336695Sdavidcs * that the number of QPs can reach 32k giving 64k CQs and 128k CNQ elements.
55336695Sdavidcs * Luckily the FW can buffer CNQ elements avoiding an overflow, on the expense
56336695Sdavidcs * of performance.
57336695Sdavidcs */
58336695Sdavidcs#define ECORE_RDMA_MAX_CNQ_SIZE               (0xFFFF) /* 2^16 - 1 */
59336695Sdavidcs
60336695Sdavidcs/* rdma interface */
61336695Sdavidcs
62336695Sdavidcsenum ecore_roce_qp_state {
63336695Sdavidcs	ECORE_ROCE_QP_STATE_RESET, /* Reset */
64336695Sdavidcs	ECORE_ROCE_QP_STATE_INIT,  /* Initialized */
65336695Sdavidcs	ECORE_ROCE_QP_STATE_RTR,   /* Ready to Receive */
66336695Sdavidcs	ECORE_ROCE_QP_STATE_RTS,   /* Ready to Send */
67336695Sdavidcs	ECORE_ROCE_QP_STATE_SQD,   /* Send Queue Draining */
68336695Sdavidcs	ECORE_ROCE_QP_STATE_ERR,   /* Error */
69336695Sdavidcs	ECORE_ROCE_QP_STATE_SQE    /* Send Queue Error */
70336695Sdavidcs};
71336695Sdavidcs
72336695Sdavidcsenum ecore_rdma_qp_type {
73336695Sdavidcs	ECORE_RDMA_QP_TYPE_RC,
74336695Sdavidcs	ECORE_RDMA_QP_TYPE_XRC_INI,
75336695Sdavidcs	ECORE_RDMA_QP_TYPE_XRC_TGT,
76336695Sdavidcs	ECORE_RDMA_QP_TYPE_INVAL = 0xffff,
77336695Sdavidcs};
78336695Sdavidcs
79336695Sdavidcsenum ecore_rdma_tid_type
80336695Sdavidcs{
81336695Sdavidcs	ECORE_RDMA_TID_REGISTERED_MR,
82336695Sdavidcs	ECORE_RDMA_TID_FMR,
83336695Sdavidcs	ECORE_RDMA_TID_MW_TYPE1,
84336695Sdavidcs	ECORE_RDMA_TID_MW_TYPE2A
85336695Sdavidcs};
86336695Sdavidcs
87336695Sdavidcstypedef
88336695Sdavidcsvoid (*affiliated_event_t)(void	*context,
89336695Sdavidcs			   u8	fw_event_code,
90336695Sdavidcs			   void	*fw_handle);
91336695Sdavidcs
92336695Sdavidcstypedef
93336695Sdavidcsvoid (*unaffiliated_event_t)(void *context,
94336695Sdavidcs			     u8   event_code);
95336695Sdavidcs
96336695Sdavidcsstruct ecore_rdma_events {
97336695Sdavidcs	void			*context;
98336695Sdavidcs	affiliated_event_t	affiliated_event;
99336695Sdavidcs	unaffiliated_event_t	unaffiliated_event;
100336695Sdavidcs};
101336695Sdavidcs
102336695Sdavidcsstruct ecore_rdma_device {
103336695Sdavidcs    /* Vendor specific information */
104336695Sdavidcs	u32	vendor_id;
105336695Sdavidcs	u32	vendor_part_id;
106336695Sdavidcs	u32	hw_ver;
107336695Sdavidcs	u64	fw_ver;
108336695Sdavidcs
109336695Sdavidcs	u64	node_guid; /* node GUID */
110336695Sdavidcs	u64	sys_image_guid; /* System image GUID */
111336695Sdavidcs
112336695Sdavidcs	u8	max_cnq;
113336695Sdavidcs	u8	max_sge; /* The maximum number of scatter/gather entries
114336695Sdavidcs			  * per Work Request supported
115336695Sdavidcs			  */
116336695Sdavidcs	u8	max_srq_sge; /* The maximum number of scatter/gather entries
117336695Sdavidcs			      * per Work Request supported for SRQ
118336695Sdavidcs			      */
119336695Sdavidcs	u16	max_inline;
120336695Sdavidcs	u32	max_wqe; /* The maximum number of outstanding work
121336695Sdavidcs			  * requests on any Work Queue supported
122336695Sdavidcs			  */
123336695Sdavidcs	u32	max_srq_wqe; /* The maximum number of outstanding work
124336695Sdavidcs			      * requests on any Work Queue supported for SRQ
125336695Sdavidcs			      */
126336695Sdavidcs	u8	max_qp_resp_rd_atomic_resc; /* The maximum number of RDMA Reads
127336695Sdavidcs					     * & atomic operation that can be
128336695Sdavidcs					     * outstanding per QP
129336695Sdavidcs					     */
130336695Sdavidcs
131336695Sdavidcs	u8	max_qp_req_rd_atomic_resc; /* The maximum depth per QP for
132336695Sdavidcs					    * initiation of RDMA Read
133336695Sdavidcs					    * & atomic operations
134336695Sdavidcs					    */
135336695Sdavidcs	u64	max_dev_resp_rd_atomic_resc;
136336695Sdavidcs	u32	max_cq;
137336695Sdavidcs	u32	max_qp;
138336695Sdavidcs	u32	max_srq; /* Maximum number of SRQs */
139336695Sdavidcs	u32	max_mr; /* Maximum number of MRs supported by this device */
140336695Sdavidcs	u64	max_mr_size; /* Size (in bytes) of the largest contiguous memory
141336695Sdavidcs			      * block that can be registered by this device
142336695Sdavidcs			      */
143336695Sdavidcs	u32	max_cqe;
144336695Sdavidcs	u32	max_mw; /* The maximum number of memory windows supported */
145336695Sdavidcs	u32	max_fmr;
146336695Sdavidcs	u32	max_mr_mw_fmr_pbl;
147336695Sdavidcs	u64	max_mr_mw_fmr_size;
148336695Sdavidcs	u32	max_pd; /* The maximum number of protection domains supported */
149336695Sdavidcs	u32	max_ah;
150336695Sdavidcs	u8	max_pkey;
151336695Sdavidcs	u16	max_srq_wr; /* Maximum number of WRs per SRQ */
152336695Sdavidcs	u8	max_stats_queues; /* Maximum number of statistics queues */
153336695Sdavidcs	u32	dev_caps;
154336695Sdavidcs
155336695Sdavidcs	/* Abilty to support RNR-NAK generation */
156336695Sdavidcs
157336695Sdavidcs#define ECORE_RDMA_DEV_CAP_RNR_NAK_MASK				0x1
158336695Sdavidcs#define ECORE_RDMA_DEV_CAP_RNR_NAK_SHIFT			0
159336695Sdavidcs	/* Abilty to support shutdown port */
160336695Sdavidcs#define ECORE_RDMA_DEV_CAP_SHUTDOWN_PORT_MASK			0x1
161336695Sdavidcs#define ECORE_RDMA_DEV_CAP_SHUTDOWN_PORT_SHIFT			1
162336695Sdavidcs	/* Abilty to support port active event */
163336695Sdavidcs#define ECORE_RDMA_DEV_CAP_PORT_ACTIVE_EVENT_MASK		0x1
164336695Sdavidcs#define ECORE_RDMA_DEV_CAP_PORT_ACTIVE_EVENT_SHIFT		2
165336695Sdavidcs	/* Abilty to support port change event */
166336695Sdavidcs#define ECORE_RDMA_DEV_CAP_PORT_CHANGE_EVENT_MASK		0x1
167336695Sdavidcs#define ECORE_RDMA_DEV_CAP_PORT_CHANGE_EVENT_SHIFT		3
168336695Sdavidcs	/* Abilty to support system image GUID */
169336695Sdavidcs#define ECORE_RDMA_DEV_CAP_SYS_IMAGE_MASK			0x1
170336695Sdavidcs#define ECORE_RDMA_DEV_CAP_SYS_IMAGE_SHIFT			4
171336695Sdavidcs	/* Abilty to support bad P_Key counter support */
172336695Sdavidcs#define ECORE_RDMA_DEV_CAP_BAD_PKEY_CNT_MASK			0x1
173336695Sdavidcs#define ECORE_RDMA_DEV_CAP_BAD_PKEY_CNT_SHIFT			5
174336695Sdavidcs	/* Abilty to support atomic operations */
175336695Sdavidcs#define ECORE_RDMA_DEV_CAP_ATOMIC_OP_MASK			0x1
176336695Sdavidcs#define ECORE_RDMA_DEV_CAP_ATOMIC_OP_SHIFT			6
177336695Sdavidcs#define ECORE_RDMA_DEV_CAP_RESIZE_CQ_MASK			0x1
178336695Sdavidcs#define ECORE_RDMA_DEV_CAP_RESIZE_CQ_SHIFT			7
179336695Sdavidcs	/* Abilty to support modifying the maximum number of
180336695Sdavidcs	 * outstanding work requests per QP
181336695Sdavidcs	 */
182336695Sdavidcs#define ECORE_RDMA_DEV_CAP_RESIZE_MAX_WR_MASK			0x1
183336695Sdavidcs#define ECORE_RDMA_DEV_CAP_RESIZE_MAX_WR_SHIFT			8
184336695Sdavidcs	/* Abilty to support automatic path migration */
185336695Sdavidcs#define ECORE_RDMA_DEV_CAP_AUTO_PATH_MIG_MASK			0x1
186336695Sdavidcs#define ECORE_RDMA_DEV_CAP_AUTO_PATH_MIG_SHIFT			9
187336695Sdavidcs	/* Abilty to support the base memory management extensions */
188336695Sdavidcs#define ECORE_RDMA_DEV_CAP_BASE_MEMORY_EXT_MASK			0x1
189336695Sdavidcs#define ECORE_RDMA_DEV_CAP_BASE_MEMORY_EXT_SHIFT		10
190336695Sdavidcs#define ECORE_RDMA_DEV_CAP_BASE_QUEUE_EXT_MASK			0x1
191336695Sdavidcs#define ECORE_RDMA_DEV_CAP_BASE_QUEUE_EXT_SHIFT			11
192336695Sdavidcs	/* Abilty to support multipile page sizes per memory region */
193336695Sdavidcs#define ECORE_RDMA_DEV_CAP_MULTI_PAGE_PER_MR_EXT_MASK		0x1
194336695Sdavidcs#define ECORE_RDMA_DEV_CAP_MULTI_PAGE_PER_MR_EXT_SHIFT		12
195336695Sdavidcs	/* Abilty to support block list physical buffer list */
196336695Sdavidcs#define ECORE_RDMA_DEV_CAP_BLOCK_MODE_MASK			0x1
197336695Sdavidcs#define ECORE_RDMA_DEV_CAP_BLOCK_MODE_SHIFT			13
198336695Sdavidcs	/* Abilty to support zero based virtual addresses */
199336695Sdavidcs#define ECORE_RDMA_DEV_CAP_ZBVA_MASK				0x1
200336695Sdavidcs#define ECORE_RDMA_DEV_CAP_ZBVA_SHIFT				14
201336695Sdavidcs	/* Abilty to support local invalidate fencing */
202336695Sdavidcs#define ECORE_RDMA_DEV_CAP_LOCAL_INV_FENCE_MASK			0x1
203336695Sdavidcs#define ECORE_RDMA_DEV_CAP_LOCAL_INV_FENCE_SHIFT		15
204336695Sdavidcs	/* Abilty to support Loopback on QP */
205336695Sdavidcs#define ECORE_RDMA_DEV_CAP_LB_INDICATOR_MASK			0x1
206336695Sdavidcs#define ECORE_RDMA_DEV_CAP_LB_INDICATOR_SHIFT			16
207336695Sdavidcs	u64	page_size_caps;
208336695Sdavidcs	u8	dev_ack_delay;
209336695Sdavidcs	u32	reserved_lkey; /* Value of reserved L_key */
210336695Sdavidcs	u32	bad_pkey_counter; /* Bad P_key counter support indicator */
211336695Sdavidcs	struct ecore_rdma_events events;
212336695Sdavidcs};
213336695Sdavidcs
214336695Sdavidcsenum ecore_port_state {
215336695Sdavidcs	ECORE_RDMA_PORT_UP,
216336695Sdavidcs	ECORE_RDMA_PORT_DOWN,
217336695Sdavidcs};
218336695Sdavidcs
219336695Sdavidcsenum ecore_roce_capability {
220336695Sdavidcs	ECORE_ROCE_V1	= 1 << 0,
221336695Sdavidcs	ECORE_ROCE_V2	= 1 << 1,
222336695Sdavidcs};
223336695Sdavidcs
224336695Sdavidcsstruct ecore_rdma_port {
225336695Sdavidcs	enum ecore_port_state port_state;
226336695Sdavidcs	int	link_speed;
227336695Sdavidcs	u64	max_msg_size;
228336695Sdavidcs	u8	source_gid_table_len;
229336695Sdavidcs	void	*source_gid_table_ptr;
230336695Sdavidcs	u8	pkey_table_len;
231336695Sdavidcs	void	*pkey_table_ptr;
232336695Sdavidcs	u32	pkey_bad_counter;
233336695Sdavidcs	enum ecore_roce_capability capability;
234336695Sdavidcs};
235336695Sdavidcs
236336695Sdavidcsstruct ecore_rdma_cnq_params
237336695Sdavidcs{
238336695Sdavidcs	u8  num_pbl_pages; /* Number of pages in the PBL allocated
239336695Sdavidcs				   * for this queue
240336695Sdavidcs				   */
241336695Sdavidcs	u64 pbl_ptr; /* Address to the first entry of the queue PBL */
242336695Sdavidcs};
243336695Sdavidcs
244336695Sdavidcs/* The CQ Mode affects the CQ doorbell transaction size.
245336695Sdavidcs * 64/32 bit machines should configure to 32/16 bits respectively.
246336695Sdavidcs */
247336695Sdavidcsenum ecore_rdma_cq_mode {
248336695Sdavidcs	ECORE_RDMA_CQ_MODE_16_BITS,
249336695Sdavidcs	ECORE_RDMA_CQ_MODE_32_BITS,
250336695Sdavidcs};
251336695Sdavidcs
252336695Sdavidcsstruct ecore_roce_dcqcn_params {
253336695Sdavidcs	u8	notification_point;
254336695Sdavidcs	u8	reaction_point;
255336695Sdavidcs
256336695Sdavidcs	/* fields for notification point */
257336695Sdavidcs	u32	cnp_send_timeout;
258336695Sdavidcs	u8	cnp_dscp;
259336695Sdavidcs	u8	cnp_vlan_priority;
260336695Sdavidcs
261336695Sdavidcs	/* fields for reaction point */
262336695Sdavidcs	u32	rl_bc_rate;  /* Byte Counter Limit. */
263336695Sdavidcs	u32	rl_max_rate; /* Maximum rate in Mbps resolution */
264336695Sdavidcs	u32	rl_r_ai;     /* Active increase rate */
265336695Sdavidcs	u32	rl_r_hai;    /* Hyper active increase rate */
266336695Sdavidcs	u32	dcqcn_gd;    /* Alpha denominator */
267336695Sdavidcs	u32	dcqcn_k_us;  /* Alpha update interval */
268336695Sdavidcs	u32	dcqcn_timeout_us;
269336695Sdavidcs};
270336695Sdavidcs
271336695Sdavidcsstruct ecore_rdma_glob_cfg {
272336695Sdavidcs	/* global tunables affecting all QPs created after they are
273336695Sdavidcs	 * set.
274336695Sdavidcs	 */
275336695Sdavidcs	u8 vlan_pri_en;
276336695Sdavidcs	u8 vlan_pri;
277336695Sdavidcs	u8 ecn_en;
278336695Sdavidcs	u8 ecn;
279336695Sdavidcs	u8 dscp_en;
280336695Sdavidcs	u8 dscp;
281336695Sdavidcs};
282336695Sdavidcs
283336695Sdavidcs#ifndef LINUX_REMOVE
284336695Sdavidcs#define ECORE_RDMA_DCSP_BIT_MASK			0x01
285336695Sdavidcs#define ECORE_RDMA_DCSP_EN_BIT_MASK			0x02
286336695Sdavidcs#define ECORE_RDMA_ECN_BIT_MASK				0x04
287336695Sdavidcs#define ECORE_RDMA_ECN_EN_BIT_MASK			0x08
288336695Sdavidcs#define ECORE_RDMA_VLAN_PRIO_BIT_MASK		0x10
289336695Sdavidcs#define ECORE_RDMA_VLAN_PRIO_EN_BIT_MASK	0x20
290336695Sdavidcs
291336695Sdavidcsenum _ecore_status_t
292336695Sdavidcsecore_rdma_set_glob_cfg(struct ecore_hwfn *p_hwfn,
293336695Sdavidcs			struct ecore_rdma_glob_cfg *in_params,
294336695Sdavidcs			u32 glob_cfg_bits);
295336695Sdavidcs
296336695Sdavidcsenum _ecore_status_t
297336695Sdavidcsecore_rdma_get_glob_cfg(struct ecore_hwfn *p_hwfn,
298336695Sdavidcs			struct ecore_rdma_glob_cfg *out_params);
299336695Sdavidcs#endif /* LINUX_REMOVE */
300336695Sdavidcs
301336695Sdavidcs#ifdef CONFIG_ECORE_IWARP
302336695Sdavidcs
303336695Sdavidcs#define ECORE_IWARP_MAX_LIS_BACKLOG		(256)
304336695Sdavidcs
305336695Sdavidcs#define ECORE_MPA_RTR_TYPE_NONE		0 /* No RTR type */
306336695Sdavidcs#define ECORE_MPA_RTR_TYPE_ZERO_SEND	(1 << 0)
307336695Sdavidcs#define ECORE_MPA_RTR_TYPE_ZERO_WRITE	(1 << 1)
308336695Sdavidcs#define ECORE_MPA_RTR_TYPE_ZERO_READ	(1 << 2)
309336695Sdavidcs
310336695Sdavidcsenum ecore_mpa_rev {
311336695Sdavidcs	ECORE_MPA_REV1,
312336695Sdavidcs	ECORE_MPA_REV2,
313336695Sdavidcs};
314336695Sdavidcs
315336695Sdavidcsstruct ecore_iwarp_params {
316336695Sdavidcs	u32				rcv_wnd_size;
317336695Sdavidcs	u16				ooo_num_rx_bufs;
318336695Sdavidcs#define ECORE_IWARP_TS_EN (1 << 0)
319336695Sdavidcs#define ECORE_IWARP_DA_EN (1 << 1)
320336695Sdavidcs	u8				flags;
321336695Sdavidcs	u8				crc_needed;
322336695Sdavidcs	enum ecore_mpa_rev		mpa_rev;
323336695Sdavidcs	u8				mpa_rtr;
324336695Sdavidcs	u8				mpa_peer2peer;
325336695Sdavidcs};
326336695Sdavidcs
327336695Sdavidcs#endif
328336695Sdavidcs
329336695Sdavidcsstruct ecore_roce_params {
330336695Sdavidcs	enum ecore_rdma_cq_mode		cq_mode;
331336695Sdavidcs	struct ecore_roce_dcqcn_params	dcqcn_params;
332336695Sdavidcs	u8				ll2_handle; /* required for UD QPs */
333336695Sdavidcs};
334336695Sdavidcs
335336695Sdavidcsstruct ecore_rdma_start_in_params {
336336695Sdavidcs	struct ecore_rdma_events	*events;
337336695Sdavidcs	struct ecore_rdma_cnq_params	cnq_pbl_list[128];
338336695Sdavidcs	u8				desired_cnq;
339336695Sdavidcs	u16				max_mtu;
340336695Sdavidcs	u8				mac_addr[ETH_ALEN];
341336695Sdavidcs#ifdef CONFIG_ECORE_IWARP
342336695Sdavidcs	struct ecore_iwarp_params	iwarp;
343336695Sdavidcs#endif
344336695Sdavidcs	struct ecore_roce_params	roce;
345336695Sdavidcs};
346336695Sdavidcs
347336695Sdavidcsstruct ecore_rdma_add_user_out_params {
348336695Sdavidcs	/* output variables (given to miniport) */
349336695Sdavidcs	u16	dpi;
350336695Sdavidcs	u64	dpi_addr;
351336695Sdavidcs	u64	dpi_phys_addr;
352336695Sdavidcs	u32	dpi_size;
353336695Sdavidcs	u16	wid_count;
354336695Sdavidcs};
355336695Sdavidcs
356336695Sdavidcsenum roce_mode
357336695Sdavidcs{
358336695Sdavidcs	ROCE_V1,
359336695Sdavidcs	ROCE_V2_IPV4,
360336695Sdavidcs	ROCE_V2_IPV6,
361336695Sdavidcs	MAX_ROCE_MODE
362336695Sdavidcs};
363336695Sdavidcs
364336695Sdavidcs/* ECORE GID can be used as IPv4/6 address in RoCE v2 */
365336695Sdavidcsunion ecore_gid {
366336695Sdavidcs	u8 bytes[16];
367336695Sdavidcs	u16 words[8];
368336695Sdavidcs	u32 dwords[4];
369336695Sdavidcs	u64 qwords[2];
370336695Sdavidcs	u32 ipv4_addr;
371336695Sdavidcs};
372336695Sdavidcs
373336695Sdavidcsstruct ecore_rdma_register_tid_in_params {
374336695Sdavidcs	/* input variables (given by miniport) */
375336695Sdavidcs	u32	itid; /* index only, 18 bit long, lkey = itid << 8 | key */
376336695Sdavidcs	enum ecore_rdma_tid_type tid_type;
377336695Sdavidcs	u8	key;
378336695Sdavidcs	u16	pd;
379336695Sdavidcs	bool	local_read;
380336695Sdavidcs	bool	local_write;
381336695Sdavidcs	bool	remote_read;
382336695Sdavidcs	bool	remote_write;
383336695Sdavidcs	bool	remote_atomic;
384336695Sdavidcs	bool	mw_bind;
385336695Sdavidcs	u64	pbl_ptr;
386336695Sdavidcs	bool	pbl_two_level;
387336695Sdavidcs	u8	pbl_page_size_log; /* for the pages that contain the pointers
388336695Sdavidcs		       * to the MR pages
389336695Sdavidcs		       */
390336695Sdavidcs	u8	page_size_log; /* for the MR pages */
391336695Sdavidcs	u32	fbo;
392336695Sdavidcs	u64	length; /* only lower 40 bits are valid */
393336695Sdavidcs	u64	vaddr;
394336695Sdavidcs	bool	zbva;
395336695Sdavidcs	bool	phy_mr;
396336695Sdavidcs	bool	dma_mr;
397336695Sdavidcs
398336695Sdavidcs	/* DIF related fields */
399336695Sdavidcs	bool	dif_enabled;
400336695Sdavidcs	u64	dif_error_addr;
401336695Sdavidcs	u64	dif_runt_addr;
402336695Sdavidcs};
403336695Sdavidcs
404336695Sdavidcs/*Returns the CQ CID or zero in case of failure */
405336695Sdavidcsstruct ecore_rdma_create_cq_in_params {
406336695Sdavidcs	/* input variables (given by miniport) */
407336695Sdavidcs	u32	cq_handle_lo; /* CQ handle to be written in CNQ */
408336695Sdavidcs	u32	cq_handle_hi;
409336695Sdavidcs	u32	cq_size;
410336695Sdavidcs	u16	dpi;
411336695Sdavidcs	bool	pbl_two_level;
412336695Sdavidcs	u64	pbl_ptr;
413336695Sdavidcs	u16	pbl_num_pages;
414336695Sdavidcs	u8	pbl_page_size_log; /* for the pages that contain the
415336695Sdavidcs			   * pointers to the CQ pages
416336695Sdavidcs			   */
417336695Sdavidcs	u8	cnq_id;
418336695Sdavidcs	u16	int_timeout;
419336695Sdavidcs};
420336695Sdavidcs
421336695Sdavidcsstruct ecore_rdma_create_srq_in_params	{
422336695Sdavidcs	u64 pbl_base_addr;
423336695Sdavidcs	u64 prod_pair_addr;
424336695Sdavidcs	u16 num_pages;
425336695Sdavidcs	u16 pd_id;
426336695Sdavidcs	u16 page_size;
427336695Sdavidcs
428336695Sdavidcs	/* XRC related only */
429336695Sdavidcs	bool is_xrc;
430336695Sdavidcs	u16 xrcd_id;
431336695Sdavidcs	u32 cq_cid;
432336695Sdavidcs	bool reserved_key_en;
433336695Sdavidcs};
434336695Sdavidcs
435336695Sdavidcsstruct ecore_rdma_destroy_cq_in_params {
436336695Sdavidcs	/* input variables (given by miniport) */
437336695Sdavidcs	u16 icid;
438336695Sdavidcs};
439336695Sdavidcs
440336695Sdavidcsstruct ecore_rdma_destroy_cq_out_params {
441336695Sdavidcs	/* output variables, provided to the upper layer */
442336695Sdavidcs
443336695Sdavidcs	/* Sequence number of completion notification sent for the CQ on
444336695Sdavidcs	 * the associated CNQ
445336695Sdavidcs	 */
446336695Sdavidcs	u16	num_cq_notif;
447336695Sdavidcs};
448336695Sdavidcs#endif
449336695Sdavidcs
450336695Sdavidcsstruct ecore_rdma_resize_cq_in_params {
451336695Sdavidcs	/* input variables (given by miniport) */
452336695Sdavidcs
453336695Sdavidcs	u16	icid;
454336695Sdavidcs	u32	cq_size;
455336695Sdavidcs	bool	pbl_two_level;
456336695Sdavidcs	u64	pbl_ptr;
457336695Sdavidcs	u16	pbl_num_pages;
458336695Sdavidcs	u8	pbl_page_size_log; /* for the pages that contain the
459336695Sdavidcs		       * pointers to the CQ pages
460336695Sdavidcs		       */
461336695Sdavidcs};
462336695Sdavidcs
463336695Sdavidcs#ifndef __EXTRACT__LINUX__
464336695Sdavidcs
465336695Sdavidcsstruct ecore_rdma_create_qp_in_params {
466336695Sdavidcs	/* input variables (given by miniport) */
467336695Sdavidcs	u32	qp_handle_lo; /* QP handle to be written in CQE */
468336695Sdavidcs	u32	qp_handle_hi;
469336695Sdavidcs	u32	qp_handle_async_lo; /* QP handle to be written in async event */
470336695Sdavidcs	u32	qp_handle_async_hi;
471336695Sdavidcs	bool	use_srq;
472336695Sdavidcs	bool	signal_all;
473336695Sdavidcs	bool	fmr_and_reserved_lkey;
474336695Sdavidcs	u16	pd;
475336695Sdavidcs	u16	dpi;
476336695Sdavidcs	u16	sq_cq_id;
477336695Sdavidcs	u16	sq_num_pages;
478336695Sdavidcs	u64	sq_pbl_ptr;	/* Not relevant for iWARP */
479336695Sdavidcs	u8	max_sq_sges;
480336695Sdavidcs	u16	rq_cq_id;
481336695Sdavidcs	u16	rq_num_pages;
482336695Sdavidcs	u64	rq_pbl_ptr;	/* Not relevant for iWARP */
483336695Sdavidcs	u16	srq_id;
484336695Sdavidcs	u8	stats_queue;
485336695Sdavidcs	enum	ecore_rdma_qp_type qp_type;
486336695Sdavidcs	u16	xrcd_id;
487336695Sdavidcs};
488336695Sdavidcs
489336695Sdavidcsstruct ecore_rdma_create_qp_out_params {
490336695Sdavidcs	/* output variables (given to miniport) */
491336695Sdavidcs	u32		qp_id;
492336695Sdavidcs	u16		icid;
493336695Sdavidcs	void		*rq_pbl_virt;
494336695Sdavidcs	dma_addr_t	rq_pbl_phys;
495336695Sdavidcs	void		*sq_pbl_virt;
496336695Sdavidcs	dma_addr_t	sq_pbl_phys;
497336695Sdavidcs};
498336695Sdavidcs
499336695Sdavidcsstruct ecore_rdma_modify_qp_in_params {
500336695Sdavidcs	/* input variables (given by miniport) */
501336695Sdavidcs	u32		modify_flags;
502336695Sdavidcs#define ECORE_RDMA_MODIFY_QP_VALID_NEW_STATE_MASK               0x1
503336695Sdavidcs#define ECORE_RDMA_MODIFY_QP_VALID_NEW_STATE_SHIFT              0
504336695Sdavidcs#define ECORE_ROCE_MODIFY_QP_VALID_PKEY_MASK                    0x1
505336695Sdavidcs#define ECORE_ROCE_MODIFY_QP_VALID_PKEY_SHIFT                   1
506336695Sdavidcs#define ECORE_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN_MASK             0x1
507336695Sdavidcs#define ECORE_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN_SHIFT            2
508336695Sdavidcs#define ECORE_ROCE_MODIFY_QP_VALID_DEST_QP_MASK                 0x1
509336695Sdavidcs#define ECORE_ROCE_MODIFY_QP_VALID_DEST_QP_SHIFT                3
510336695Sdavidcs#define ECORE_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR_MASK          0x1
511336695Sdavidcs#define ECORE_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR_SHIFT         4
512336695Sdavidcs#define ECORE_ROCE_MODIFY_QP_VALID_RQ_PSN_MASK                  0x1
513336695Sdavidcs#define ECORE_ROCE_MODIFY_QP_VALID_RQ_PSN_SHIFT                 5
514336695Sdavidcs#define ECORE_ROCE_MODIFY_QP_VALID_SQ_PSN_MASK                  0x1
515336695Sdavidcs#define ECORE_ROCE_MODIFY_QP_VALID_SQ_PSN_SHIFT                 6
516336695Sdavidcs#define ECORE_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ_MASK       0x1
517336695Sdavidcs#define ECORE_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ_SHIFT      7
518336695Sdavidcs#define ECORE_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP_MASK      0x1
519336695Sdavidcs#define ECORE_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP_SHIFT     8
520336695Sdavidcs#define ECORE_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT_MASK             0x1
521336695Sdavidcs#define ECORE_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT_SHIFT            9
522336695Sdavidcs#define ECORE_ROCE_MODIFY_QP_VALID_RETRY_CNT_MASK               0x1
523336695Sdavidcs#define ECORE_ROCE_MODIFY_QP_VALID_RETRY_CNT_SHIFT              10
524336695Sdavidcs#define ECORE_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT_MASK           0x1
525336695Sdavidcs#define ECORE_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT_SHIFT          11
526336695Sdavidcs#define ECORE_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER_MASK       0x1
527336695Sdavidcs#define ECORE_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER_SHIFT      12
528336695Sdavidcs#define ECORE_ROCE_MODIFY_QP_VALID_E2E_FLOW_CONTROL_EN_MASK     0x1
529336695Sdavidcs#define ECORE_ROCE_MODIFY_QP_VALID_E2E_FLOW_CONTROL_EN_SHIFT    13
530336695Sdavidcs#define ECORE_ROCE_MODIFY_QP_VALID_ROCE_MODE_MASK               0x1
531336695Sdavidcs#define ECORE_ROCE_MODIFY_QP_VALID_ROCE_MODE_SHIFT              14
532336695Sdavidcs
533336695Sdavidcs	enum ecore_roce_qp_state	new_state;
534336695Sdavidcs	u16		pkey;
535336695Sdavidcs	bool		incoming_rdma_read_en;
536336695Sdavidcs	bool		incoming_rdma_write_en;
537336695Sdavidcs	bool		incoming_atomic_en;
538336695Sdavidcs	bool		e2e_flow_control_en;
539336695Sdavidcs	u32		dest_qp;
540336695Sdavidcs	u16		mtu;
541336695Sdavidcs	u8		traffic_class_tos; /* IPv6/GRH tc; IPv4 TOS */
542336695Sdavidcs	u8		hop_limit_ttl; /* IPv6/GRH hop limit; IPv4 TTL */
543336695Sdavidcs	u32		flow_label; /* ignored in IPv4 */
544336695Sdavidcs	union ecore_gid	sgid; /* GRH SGID; IPv4/6 Source IP */
545336695Sdavidcs	union ecore_gid	dgid; /* GRH DGID; IPv4/6 Destination IP */
546336695Sdavidcs	u16		udp_src_port; /* RoCEv2 only */
547336695Sdavidcs
548336695Sdavidcs	u16		vlan_id;
549336695Sdavidcs
550336695Sdavidcs	u32		rq_psn;
551336695Sdavidcs	u32		sq_psn;
552336695Sdavidcs	u8		max_rd_atomic_resp;
553336695Sdavidcs	u8		max_rd_atomic_req;
554336695Sdavidcs	u32		ack_timeout;
555336695Sdavidcs	u8		retry_cnt;
556336695Sdavidcs	u8		rnr_retry_cnt;
557336695Sdavidcs	u8		min_rnr_nak_timer;
558336695Sdavidcs	bool		sqd_async;
559336695Sdavidcs	u8		remote_mac_addr[6];
560336695Sdavidcs	u8		local_mac_addr[6];
561336695Sdavidcs	bool		use_local_mac;
562336695Sdavidcs	enum roce_mode	roce_mode;
563336695Sdavidcs};
564336695Sdavidcs
565336695Sdavidcsstruct ecore_rdma_query_qp_out_params {
566336695Sdavidcs	/* output variables (given to miniport) */
567336695Sdavidcs	enum ecore_roce_qp_state	state;
568336695Sdavidcs	u32		rq_psn; /* responder */
569336695Sdavidcs	u32		sq_psn; /* requester */
570336695Sdavidcs	bool		draining; /* send queue is draining */
571336695Sdavidcs	u16		mtu;
572336695Sdavidcs	u32		dest_qp;
573336695Sdavidcs	bool		incoming_rdma_read_en;
574336695Sdavidcs	bool		incoming_rdma_write_en;
575336695Sdavidcs	bool		incoming_atomic_en;
576336695Sdavidcs	bool		e2e_flow_control_en;
577336695Sdavidcs	union ecore_gid sgid; /* GRH SGID; IPv4/6 Source IP */
578336695Sdavidcs	union ecore_gid dgid; /* GRH DGID; IPv4/6 Destination IP */
579336695Sdavidcs	u32		flow_label; /* ignored in IPv4 */
580336695Sdavidcs	u8		hop_limit_ttl; /* IPv6/GRH hop limit; IPv4 TTL */
581336695Sdavidcs	u8		traffic_class_tos; /* IPv6/GRH tc; IPv4 TOS */
582336695Sdavidcs	u32		timeout;
583336695Sdavidcs	u8		rnr_retry;
584336695Sdavidcs	u8		retry_cnt;
585336695Sdavidcs	u8		min_rnr_nak_timer;
586336695Sdavidcs	u16		pkey_index;
587336695Sdavidcs	u8		max_rd_atomic;
588336695Sdavidcs	u8		max_dest_rd_atomic;
589336695Sdavidcs	bool		sqd_async;
590336695Sdavidcs};
591336695Sdavidcs
592336695Sdavidcsstruct ecore_rdma_destroy_qp_out_params {
593336695Sdavidcs	u32		sq_cq_prod;
594336695Sdavidcs	u32		rq_cq_prod;
595336695Sdavidcs};
596336695Sdavidcs
597336695Sdavidcsstruct ecore_rdma_create_srq_out_params {
598336695Sdavidcs	u16 srq_id;
599336695Sdavidcs};
600336695Sdavidcs
601336695Sdavidcsstruct ecore_rdma_destroy_srq_in_params {
602336695Sdavidcs	u16 srq_id;
603336695Sdavidcs	bool is_xrc;
604336695Sdavidcs};
605336695Sdavidcs
606336695Sdavidcsstruct ecore_rdma_modify_srq_in_params {
607336695Sdavidcs	u32 wqe_limit;
608336695Sdavidcs	u16 srq_id;
609336695Sdavidcs	bool is_xrc;
610336695Sdavidcs};
611336695Sdavidcs#endif
612336695Sdavidcs
613336695Sdavidcsstruct ecore_rdma_resize_cq_out_params {
614336695Sdavidcs	/* output variables, provided to the upper layer */
615336695Sdavidcs	u32 prod; /* CQ producer value on old PBL */
616336695Sdavidcs	u32 cons; /* CQ consumer value on old PBL */
617336695Sdavidcs};
618336695Sdavidcs
619336695Sdavidcsstruct ecore_rdma_resize_cnq_in_params {
620336695Sdavidcs	/* input variables (given by miniport) */
621336695Sdavidcs	u32	cnq_id;
622336695Sdavidcs	u32	pbl_page_size_log; /* for the pages that contain the
623336695Sdavidcs			* pointers to the cnq pages
624336695Sdavidcs			*/
625336695Sdavidcs	u64	pbl_ptr;
626336695Sdavidcs};
627336695Sdavidcs
628336695Sdavidcs#ifndef __EXTRACT__LINUX__
629336695Sdavidcsstruct ecore_rdma_stats_out_params {
630336695Sdavidcs	u64	sent_bytes;
631336695Sdavidcs	u64	sent_pkts;
632336695Sdavidcs	u64	rcv_bytes;
633336695Sdavidcs	u64	rcv_pkts;
634336695Sdavidcs
635336695Sdavidcs	/* RoCE only */
636336695Sdavidcs	u64	icrc_errors;		/* wraps at 32 bits */
637336695Sdavidcs	u64	retransmit_events;	/* wraps at 32 bits */
638336695Sdavidcs	u64	silent_drops;		/* wraps at 16 bits */
639336695Sdavidcs	u64	rnr_nacks_sent;		/* wraps at 16 bits */
640336695Sdavidcs
641336695Sdavidcs	/* RoCE DCQCN */
642336695Sdavidcs	u64	ecn_pkt_rcv;
643336695Sdavidcs	u64	cnp_pkt_rcv;
644336695Sdavidcs	u64	cnp_pkt_sent;
645336695Sdavidcs
646336695Sdavidcs	/* iWARP only */
647336695Sdavidcs	u64	iwarp_tx_fast_rxmit_cnt;
648336695Sdavidcs	u64	iwarp_tx_slow_start_cnt;
649336695Sdavidcs	u64	unalign_rx_comp;
650336695Sdavidcs};
651336695Sdavidcs
652336695Sdavidcsstruct ecore_rdma_counters_out_params {
653336695Sdavidcs	u64	pd_count;
654336695Sdavidcs	u64	max_pd;
655336695Sdavidcs	u64	dpi_count;
656336695Sdavidcs	u64	max_dpi;
657336695Sdavidcs	u64	cq_count;
658336695Sdavidcs	u64	max_cq;
659336695Sdavidcs	u64	qp_count;
660336695Sdavidcs	u64	max_qp;
661336695Sdavidcs	u64	tid_count;
662336695Sdavidcs	u64	max_tid;
663336695Sdavidcs	u64	srq_count;
664336695Sdavidcs	u64	max_srq;
665336695Sdavidcs	u64	xrc_srq_count;
666336695Sdavidcs	u64	max_xrc_srq;
667336695Sdavidcs	u64	xrcd_count;
668336695Sdavidcs	u64	max_xrcd;
669336695Sdavidcs};
670336695Sdavidcs#endif
671336695Sdavidcs
672336695Sdavidcsenum _ecore_status_t
673336695Sdavidcsecore_rdma_add_user(void *rdma_cxt,
674336695Sdavidcs		    struct ecore_rdma_add_user_out_params *out_params);
675336695Sdavidcs
676336695Sdavidcsenum _ecore_status_t
677336695Sdavidcsecore_rdma_alloc_pd(void *rdma_cxt,
678336695Sdavidcs		    u16	*pd);
679336695Sdavidcs
680336695Sdavidcsenum _ecore_status_t
681336695Sdavidcsecore_rdma_alloc_tid(void *rdma_cxt,
682336695Sdavidcs		     u32 *tid);
683336695Sdavidcs
684336695Sdavidcsenum _ecore_status_t
685336695Sdavidcsecore_rdma_create_cq(void *rdma_cxt,
686336695Sdavidcs		     struct ecore_rdma_create_cq_in_params *params,
687336695Sdavidcs		     u16 *icid);
688336695Sdavidcs
689336695Sdavidcs/* Returns a pointer to the responders' CID, which is also a pointer to the
690336695Sdavidcs * ecore_qp_params struct. Returns NULL in case of failure.
691336695Sdavidcs */
692336695Sdavidcsstruct ecore_rdma_qp*
693336695Sdavidcsecore_rdma_create_qp(void *rdma_cxt,
694336695Sdavidcs		     struct ecore_rdma_create_qp_in_params  *in_params,
695336695Sdavidcs		     struct ecore_rdma_create_qp_out_params *out_params);
696336695Sdavidcs
697336695Sdavidcsenum _ecore_status_t
698336695Sdavidcsecore_roce_create_ud_qp(void *rdma_cxt,
699336695Sdavidcs			struct ecore_rdma_create_qp_out_params *out_params);
700336695Sdavidcs
701336695Sdavidcsenum _ecore_status_t
702336695Sdavidcsecore_rdma_deregister_tid(void *rdma_cxt,
703336695Sdavidcs			  u32		tid);
704336695Sdavidcs
705336695Sdavidcsenum _ecore_status_t
706336695Sdavidcsecore_rdma_destroy_cq(void *rdma_cxt,
707336695Sdavidcs		      struct ecore_rdma_destroy_cq_in_params  *in_params,
708336695Sdavidcs		      struct ecore_rdma_destroy_cq_out_params *out_params);
709336695Sdavidcs
710336695Sdavidcsenum _ecore_status_t
711336695Sdavidcsecore_rdma_destroy_qp(void *rdma_cxt,
712336695Sdavidcs		      struct ecore_rdma_qp *qp,
713336695Sdavidcs		      struct ecore_rdma_destroy_qp_out_params *out_params);
714336695Sdavidcs
715336695Sdavidcsenum _ecore_status_t
716336695Sdavidcsecore_roce_destroy_ud_qp(void *rdma_cxt, u16 cid);
717336695Sdavidcs
718336695Sdavidcsvoid
719336695Sdavidcsecore_rdma_free_pd(void *rdma_cxt,
720336695Sdavidcs		   u16	pd);
721336695Sdavidcs
722336695Sdavidcsenum _ecore_status_t
723336695Sdavidcsecore_rdma_alloc_xrcd(void *rdma_cxt, u16 *xrcd_id);
724336695Sdavidcs
725336695Sdavidcsvoid
726336695Sdavidcsecore_rdma_free_xrcd(void  *rdma_cxt, u16 xrcd_id);
727336695Sdavidcs
728336695Sdavidcsvoid
729336695Sdavidcsecore_rdma_free_tid(void *rdma_cxt,
730336695Sdavidcs		    u32	tid);
731336695Sdavidcs
732336695Sdavidcsenum _ecore_status_t
733336695Sdavidcsecore_rdma_modify_qp(void *rdma_cxt,
734336695Sdavidcs		     struct ecore_rdma_qp *qp,
735336695Sdavidcs		     struct ecore_rdma_modify_qp_in_params *params);
736336695Sdavidcs
737336695Sdavidcsstruct ecore_rdma_device*
738336695Sdavidcsecore_rdma_query_device(void *rdma_cxt);
739336695Sdavidcs
740336695Sdavidcsstruct ecore_rdma_port*
741336695Sdavidcsecore_rdma_query_port(void *rdma_cxt);
742336695Sdavidcs
743336695Sdavidcsenum _ecore_status_t
744336695Sdavidcsecore_rdma_query_qp(void *rdma_cxt,
745336695Sdavidcs		    struct ecore_rdma_qp		  *qp,
746336695Sdavidcs		    struct ecore_rdma_query_qp_out_params *out_params);
747336695Sdavidcs
748336695Sdavidcsenum _ecore_status_t
749336695Sdavidcsecore_rdma_register_tid(void *rdma_cxt,
750336695Sdavidcs			struct ecore_rdma_register_tid_in_params *params);
751336695Sdavidcs
752336695Sdavidcsvoid ecore_rdma_remove_user(void *rdma_cxt,
753336695Sdavidcs			    u16		dpi);
754336695Sdavidcs
755336695Sdavidcsenum _ecore_status_t
756336695Sdavidcsecore_rdma_resize_cnq(void *rdma_cxt,
757336695Sdavidcs		      struct ecore_rdma_resize_cnq_in_params *in_params);
758336695Sdavidcs
759336695Sdavidcs/*Returns the CQ CID or zero in case of failure */
760336695Sdavidcsenum _ecore_status_t
761336695Sdavidcsecore_rdma_resize_cq(void *rdma_cxt,
762336695Sdavidcs		     struct ecore_rdma_resize_cq_in_params  *in_params,
763336695Sdavidcs		     struct ecore_rdma_resize_cq_out_params *out_params);
764336695Sdavidcs
765336695Sdavidcs/* Before calling rdma_start upper layer (VBD/qed) should fill the
766336695Sdavidcs * page-size and mtu in hwfn context
767336695Sdavidcs */
768336695Sdavidcsenum _ecore_status_t
769336695Sdavidcsecore_rdma_start(void *p_hwfn,
770336695Sdavidcs		 struct ecore_rdma_start_in_params *params);
771336695Sdavidcs
772336695Sdavidcsenum _ecore_status_t
773336695Sdavidcsecore_rdma_stop(void *rdma_cxt);
774336695Sdavidcs
775336695Sdavidcsenum _ecore_status_t
776336695Sdavidcsecore_rdma_query_stats(void *rdma_cxt, u8 stats_queue,
777336695Sdavidcs		       struct ecore_rdma_stats_out_params *out_parms);
778336695Sdavidcs
779336695Sdavidcsenum _ecore_status_t
780336695Sdavidcsecore_rdma_query_counters(void *rdma_cxt,
781336695Sdavidcs			  struct ecore_rdma_counters_out_params *out_parms);
782336695Sdavidcs
783336695Sdavidcsu32 ecore_rdma_get_sb_id(struct ecore_hwfn *p_hwfn, u32 rel_sb_id);
784336695Sdavidcs
785336695Sdavidcs#ifndef LINUX_REMOVE
786336695Sdavidcsu32 ecore_rdma_query_cau_timer_res(void);
787336695Sdavidcs#endif
788336695Sdavidcs
789336695Sdavidcsvoid ecore_rdma_cnq_prod_update(void *rdma_cxt, u8 cnq_index, u16 prod);
790336695Sdavidcs
791336695Sdavidcsvoid ecore_rdma_resc_free(struct ecore_hwfn *p_hwfn);
792336695Sdavidcs
793336695Sdavidcsenum _ecore_status_t
794336695Sdavidcsecore_rdma_create_srq(void *rdma_cxt,
795336695Sdavidcs		      struct ecore_rdma_create_srq_in_params *in_params,
796336695Sdavidcs		      struct ecore_rdma_create_srq_out_params *out_params);
797336695Sdavidcs
798336695Sdavidcsenum _ecore_status_t
799336695Sdavidcsecore_rdma_destroy_srq(void *rdma_cxt,
800336695Sdavidcs		       struct ecore_rdma_destroy_srq_in_params *in_params);
801336695Sdavidcs
802336695Sdavidcsenum _ecore_status_t
803336695Sdavidcsecore_rdma_modify_srq(void *rdma_cxt,
804336695Sdavidcs		      struct ecore_rdma_modify_srq_in_params *in_params);
805336695Sdavidcs
806336695Sdavidcs#ifdef CONFIG_ECORE_IWARP
807336695Sdavidcs
808336695Sdavidcs/* iWARP API */
809336695Sdavidcs
810336695Sdavidcs#ifndef __EXTRACT__LINUX__
811336695Sdavidcs
812336695Sdavidcsenum ecore_iwarp_event_type {
813336695Sdavidcs	ECORE_IWARP_EVENT_MPA_REQUEST, /* Passive side request received */
814336695Sdavidcs	ECORE_IWARP_EVENT_PASSIVE_COMPLETE, /* Passive side established
815336695Sdavidcs					     * ( ack on mpa response )
816336695Sdavidcs					     */
817336695Sdavidcs	ECORE_IWARP_EVENT_LISTEN_PAUSE_COMP, /* Passive side will drop
818336695Sdavidcs					      * MPA requests
819336695Sdavidcs					      */
820336695Sdavidcs	ECORE_IWARP_EVENT_ACTIVE_COMPLETE, /* Active side reply received */
821336695Sdavidcs	ECORE_IWARP_EVENT_DISCONNECT,
822336695Sdavidcs	ECORE_IWARP_EVENT_CLOSE,
823336695Sdavidcs    /* Slow/Error path events start from here */
824336695Sdavidcs	ECORE_IWARP_EVENT_IRQ_FULL,
825336695Sdavidcs	ECORE_IWARP_ERROR_EVENTS_START = ECORE_IWARP_EVENT_IRQ_FULL,
826336695Sdavidcs	ECORE_IWARP_EVENT_RQ_EMPTY,
827336695Sdavidcs	ECORE_IWARP_EVENT_LLP_TIMEOUT,
828336695Sdavidcs	ECORE_IWARP_EVENT_REMOTE_PROTECTION_ERROR,
829336695Sdavidcs	ECORE_IWARP_EVENT_CQ_OVERFLOW,
830336695Sdavidcs	ECORE_IWARP_EVENT_QP_CATASTROPHIC,
831336695Sdavidcs	ECORE_IWARP_EVENT_ACTIVE_MPA_REPLY,
832336695Sdavidcs	ECORE_IWARP_EVENT_LOCAL_ACCESS_ERROR,
833336695Sdavidcs	ECORE_IWARP_EVENT_REMOTE_OPERATION_ERROR,
834336695Sdavidcs	ECORE_IWARP_EVENT_TERMINATE_RECEIVED
835336695Sdavidcs};
836336695Sdavidcs
837336695Sdavidcsenum ecore_tcp_ip_version
838336695Sdavidcs{
839336695Sdavidcs	ECORE_TCP_IPV4,
840336695Sdavidcs	ECORE_TCP_IPV6,
841336695Sdavidcs};
842336695Sdavidcs
843336695Sdavidcsstruct ecore_iwarp_cm_info {
844336695Sdavidcs	enum ecore_tcp_ip_version ip_version;
845336695Sdavidcs	u32 remote_ip[4];
846336695Sdavidcs	u32 local_ip[4];
847336695Sdavidcs	u16 remote_port;
848336695Sdavidcs	u16 local_port;
849336695Sdavidcs	u16 vlan;
850336695Sdavidcs	const void *private_data;
851336695Sdavidcs	u16 private_data_len;
852336695Sdavidcs	u8 ord;
853336695Sdavidcs	u8 ird;
854336695Sdavidcs};
855336695Sdavidcs
856336695Sdavidcsstruct ecore_iwarp_cm_event_params {
857336695Sdavidcs	enum ecore_iwarp_event_type event;
858336695Sdavidcs	const struct ecore_iwarp_cm_info *cm_info;
859336695Sdavidcs	void *ep_context; /* To be passed to accept call */
860336695Sdavidcs	int status;
861336695Sdavidcs};
862336695Sdavidcs
863336695Sdavidcstypedef int (*iwarp_event_handler)(void *context,
864336695Sdavidcs				   struct ecore_iwarp_cm_event_params *event);
865336695Sdavidcs
866336695Sdavidcs/* Active Side Connect Flow:
867336695Sdavidcs * upper layer driver calls ecore_iwarp_connect
868336695Sdavidcs * Function is blocking: i.e. returns after tcp connection is established
869336695Sdavidcs * After MPA connection is established ECORE_IWARP_EVENT_ACTIVE_COMPLETE event
870336695Sdavidcs * will be passed to upperlayer driver using the event_cb passed in
871336695Sdavidcs * ecore_iwarp_connect_in. Information of the established connection will be
872336695Sdavidcs * initialized in event data.
873336695Sdavidcs */
874336695Sdavidcsstruct ecore_iwarp_connect_in {
875336695Sdavidcs	iwarp_event_handler event_cb;
876336695Sdavidcs	void *cb_context;
877336695Sdavidcs	struct ecore_rdma_qp *qp;
878336695Sdavidcs	struct ecore_iwarp_cm_info cm_info;
879336695Sdavidcs	u16 mss;
880336695Sdavidcs	u8 remote_mac_addr[6];
881336695Sdavidcs	u8 local_mac_addr[6];
882336695Sdavidcs};
883336695Sdavidcs
884336695Sdavidcsstruct ecore_iwarp_connect_out {
885336695Sdavidcs	void *ep_context;
886336695Sdavidcs};
887336695Sdavidcs
888336695Sdavidcs/* Passive side connect flow:
889336695Sdavidcs * upper layer driver calls ecore_iwarp_create_listen
890336695Sdavidcs * once Syn packet that matches a ip/port that is listened on arrives, ecore
891336695Sdavidcs * will offload the tcp connection. After MPA Request is received on the
892336695Sdavidcs * offload connection, the event ECORE_IWARP_EVENT_MPA_REQUEST will be sent
893336695Sdavidcs * to upper layer driver using the event_cb passed below. The event data
894336695Sdavidcs * will be placed in event parameter. After upper layer driver processes the
895336695Sdavidcs * event, ecore_iwarp_accept or ecore_iwarp_reject should be called to continue
896336695Sdavidcs * MPA negotiation. Once negotiation is complete the event
897336695Sdavidcs * ECORE_IWARP_EVENT_PASSIVE_COMPLETE will be passed to the event_cb passed
898336695Sdavidcs * originally in ecore_iwarp_listen_in structure.
899336695Sdavidcs */
900336695Sdavidcsstruct ecore_iwarp_listen_in {
901336695Sdavidcs	iwarp_event_handler event_cb; /* Callback func for delivering events */
902336695Sdavidcs	void *cb_context; /* passed to event_cb */
903336695Sdavidcs	u32 max_backlog; /* Max num of pending incoming connection requests */
904336695Sdavidcs	enum ecore_tcp_ip_version ip_version;
905336695Sdavidcs	u32 ip_addr[4];
906336695Sdavidcs	u16 port;
907336695Sdavidcs	u16 vlan;
908336695Sdavidcs};
909336695Sdavidcs
910336695Sdavidcsstruct ecore_iwarp_listen_out {
911336695Sdavidcs	void *handle; /* to be sent to destroy */
912336695Sdavidcs};
913336695Sdavidcs
914336695Sdavidcsstruct ecore_iwarp_accept_in {
915336695Sdavidcs	void *ep_context; /* From event data of ECORE_IWARP_EVENT_MPA_REQUEST */
916336695Sdavidcs	void *cb_context; /* context to be passed to event_cb */
917336695Sdavidcs	struct ecore_rdma_qp *qp;
918336695Sdavidcs	const void *private_data;
919336695Sdavidcs	u16 private_data_len;
920336695Sdavidcs	u8 ord;
921336695Sdavidcs	u8 ird;
922336695Sdavidcs};
923336695Sdavidcs
924336695Sdavidcsstruct ecore_iwarp_reject_in {
925336695Sdavidcs	void *ep_context; /* From event data of ECORE_IWARP_EVENT_MPA_REQUEST */
926336695Sdavidcs	void *cb_context; /* context to be passed to event_cb */
927336695Sdavidcs	const void *private_data;
928336695Sdavidcs	u16 private_data_len;
929336695Sdavidcs};
930336695Sdavidcs
931336695Sdavidcsstruct ecore_iwarp_send_rtr_in {
932336695Sdavidcs	void *ep_context;
933336695Sdavidcs};
934336695Sdavidcs
935336695Sdavidcsstruct ecore_iwarp_tcp_abort_in {
936336695Sdavidcs	void *ep_context;
937336695Sdavidcs};
938336695Sdavidcs
939336695Sdavidcs#endif
940336695Sdavidcs
941336695Sdavidcsenum _ecore_status_t
942336695Sdavidcsecore_iwarp_connect(void *rdma_cxt,
943336695Sdavidcs		    struct ecore_iwarp_connect_in *iparams,
944336695Sdavidcs		    struct ecore_iwarp_connect_out *oparams);
945336695Sdavidcs
946336695Sdavidcsenum _ecore_status_t
947336695Sdavidcsecore_iwarp_create_listen(void *rdma_cxt,
948336695Sdavidcs			  struct ecore_iwarp_listen_in *iparams,
949336695Sdavidcs			  struct ecore_iwarp_listen_out *oparams);
950336695Sdavidcs
951336695Sdavidcsenum _ecore_status_t
952336695Sdavidcsecore_iwarp_accept(void *rdma_cxt,
953336695Sdavidcs		   struct ecore_iwarp_accept_in *iparams);
954336695Sdavidcs
955336695Sdavidcsenum _ecore_status_t
956336695Sdavidcsecore_iwarp_reject(void *rdma_cxt,
957336695Sdavidcs		   struct ecore_iwarp_reject_in *iparams);
958336695Sdavidcs
959336695Sdavidcsenum _ecore_status_t
960336695Sdavidcsecore_iwarp_destroy_listen(void *rdma_cxt, void *handle);
961336695Sdavidcs
962336695Sdavidcsenum _ecore_status_t
963336695Sdavidcsecore_iwarp_send_rtr(void *rdma_cxt, struct ecore_iwarp_send_rtr_in *iparams);
964336695Sdavidcs
965336695Sdavidcsenum _ecore_status_t
966336695Sdavidcsecore_iwarp_pause_listen(void *rdma_cxt, void *handle, bool pause, bool comp);
967336695Sdavidcs
968336695Sdavidcs#endif /* CONFIG_ECORE_IWARP */
969336695Sdavidcs
970336695Sdavidcs#endif
971