1/*
2 * Copyright (c) 2017-2018 Cavium, Inc.
3 * All rights reserved.
4 *
5 *  Redistribution and use in source and binary forms, with or without
6 *  modification, are permitted provided that the following conditions
7 *  are met:
8 *
9 *  1. Redistributions of source code must retain the above copyright
10 *     notice, this list of conditions and the following disclaimer.
11 *  2. Redistributions in binary form must reproduce the above copyright
12 *     notice, this list of conditions and the following disclaimer in the
13 *     documentation and/or other materials provided with the distribution.
14 *
15 *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16 *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19 *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20 *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21 *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22 *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24 *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 *  POSSIBILITY OF SUCH DAMAGE.
26 *
27 * $FreeBSD: stable/11/sys/dev/qlnx/qlnxe/ecore_roce_api.h 337517 2018-08-09 01:17:35Z davidcs $
28 *
29 */
30
31#ifndef __ECORE_RDMA_API_H__
32#define __ECORE_RDMA_API_H__
33
34#ifndef ETH_ALEN
35#define ETH_ALEN 6
36#endif
37
38
39enum ecore_roce_ll2_tx_dest
40{
41	ECORE_ROCE_LL2_TX_DEST_NW /* Light L2 TX Destination to the Network */,
42	ECORE_ROCE_LL2_TX_DEST_LB /* Light L2 TX Destination to the Loopback */,
43	ECORE_ROCE_LL2_TX_DEST_MAX
44};
45
46/* HW/FW RoCE Limitations (external. For internal see ecore_roce.h) */
47/* CNQ size Limitation
48 * The CNQ size should be set as twice the amount of CQs, since for each CQ one
49 * element may be inserted into the CNQ and another element is used per CQ to
50 * accommodate for a possible race in the arm mechanism.
51 * The FW supports a CNQ of 64k-1 and this apparently causes an issue - notice
52 * that the number of QPs can reach 32k giving 64k CQs and 128k CNQ elements.
53 * Luckily the FW can buffer CNQ elements avoiding an overflow, on the expense
54 * of performance.
55 */
56#define ECORE_RDMA_MAX_CNQ_SIZE               (0xFFFF) /* 2^16 - 1 */
57
58/* rdma interface */
59enum ecore_rdma_tid_type
60{
61	ECORE_RDMA_TID_REGISTERED_MR,
62	ECORE_RDMA_TID_FMR,
63	ECORE_RDMA_TID_MW_TYPE1,
64	ECORE_RDMA_TID_MW_TYPE2A
65};
66
67enum ecore_roce_qp_state {
68	ECORE_ROCE_QP_STATE_RESET, /* Reset */
69	ECORE_ROCE_QP_STATE_INIT,  /* Initialized */
70	ECORE_ROCE_QP_STATE_RTR,   /* Ready to Receive */
71	ECORE_ROCE_QP_STATE_RTS,   /* Ready to Send */
72	ECORE_ROCE_QP_STATE_SQD,   /* Send Queue Draining */
73	ECORE_ROCE_QP_STATE_ERR,   /* Error */
74	ECORE_ROCE_QP_STATE_SQE    /* Send Queue Error */
75};
76
77typedef
78void (*affiliated_event_t)(void	*context,
79			   u8	fw_event_code,
80			   void	*fw_handle);
81
82typedef
83void (*unaffiliated_event_t)(void *context,
84			     u8   event_code);
85
86struct ecore_rdma_events {
87	void			*context;
88	affiliated_event_t	affiliated_event;
89	unaffiliated_event_t	unaffiliated_event;
90};
91
92struct ecore_rdma_device {
93    /* Vendor specific information */
94	u32	vendor_id;
95	u32	vendor_part_id;
96	u32	hw_ver;
97	u64	fw_ver;
98
99	u64	node_guid; /* node GUID */
100	u64	sys_image_guid; /* System image GUID */
101
102	u8	max_cnq;
103	u8	max_sge; /* The maximum number of scatter/gather entries
104			  * per Work Request supported
105			  */
106	u8	max_srq_sge; /* The maximum number of scatter/gather entries
107			      * per Work Request supported for SRQ
108			      */
109	u16	max_inline;
110	u32	max_wqe; /* The maximum number of outstanding work
111			  * requests on any Work Queue supported
112			  */
113	u32	max_srq_wqe; /* The maximum number of outstanding work
114			      * requests on any Work Queue supported for SRQ
115			      */
116	u8	max_qp_resp_rd_atomic_resc; /* The maximum number of RDMA Reads
117					     * & atomic operation that can be
118					     * outstanding per QP
119					     */
120
121	u8	max_qp_req_rd_atomic_resc; /* The maximum depth per QP for
122					    * initiation of RDMA Read
123					    * & atomic operations
124					    */
125	u64	max_dev_resp_rd_atomic_resc;
126	u32	max_cq;
127	u32	max_qp;
128	u32	max_srq; /* Maximum number of SRQs */
129	u32	max_mr; /* Maximum number of MRs supported by this device */
130	u64	max_mr_size; /* Size (in bytes) of the largest contiguous memory
131			      * block that can be registered by this device
132			      */
133	u32	max_cqe;
134	u32	max_mw; /* The maximum number of memory windows supported */
135	u32	max_fmr;
136	u32	max_mr_mw_fmr_pbl;
137	u64	max_mr_mw_fmr_size;
138	u32	max_pd; /* The maximum number of protection domains supported */
139	u32	max_ah;
140	u8	max_pkey;
141	u16	max_srq_wr; /* Maximum number of WRs per SRQ */
142	u8	max_stats_queues; /* Maximum number of statistics queues */
143	u32	dev_caps;
144
145	/* Abilty to support RNR-NAK generation */
146
147#define ECORE_RDMA_DEV_CAP_RNR_NAK_MASK				0x1
148#define ECORE_RDMA_DEV_CAP_RNR_NAK_SHIFT			0
149	/* Abilty to support shutdown port */
150#define ECORE_RDMA_DEV_CAP_SHUTDOWN_PORT_MASK			0x1
151#define ECORE_RDMA_DEV_CAP_SHUTDOWN_PORT_SHIFT			1
152	/* Abilty to support port active event */
153#define ECORE_RDMA_DEV_CAP_PORT_ACTIVE_EVENT_MASK		0x1
154#define ECORE_RDMA_DEV_CAP_PORT_ACTIVE_EVENT_SHIFT		2
155	/* Abilty to support port change event */
156#define ECORE_RDMA_DEV_CAP_PORT_CHANGE_EVENT_MASK		0x1
157#define ECORE_RDMA_DEV_CAP_PORT_CHANGE_EVENT_SHIFT		3
158	/* Abilty to support system image GUID */
159#define ECORE_RDMA_DEV_CAP_SYS_IMAGE_MASK			0x1
160#define ECORE_RDMA_DEV_CAP_SYS_IMAGE_SHIFT			4
161	/* Abilty to support bad P_Key counter support */
162#define ECORE_RDMA_DEV_CAP_BAD_PKEY_CNT_MASK			0x1
163#define ECORE_RDMA_DEV_CAP_BAD_PKEY_CNT_SHIFT			5
164	/* Abilty to support atomic operations */
165#define ECORE_RDMA_DEV_CAP_ATOMIC_OP_MASK			0x1
166#define ECORE_RDMA_DEV_CAP_ATOMIC_OP_SHIFT			6
167#define ECORE_RDMA_DEV_CAP_RESIZE_CQ_MASK			0x1
168#define ECORE_RDMA_DEV_CAP_RESIZE_CQ_SHIFT			7
169	/* Abilty to support modifying the maximum number of
170	 * outstanding work requests per QP
171	 */
172#define ECORE_RDMA_DEV_CAP_RESIZE_MAX_WR_MASK			0x1
173#define ECORE_RDMA_DEV_CAP_RESIZE_MAX_WR_SHIFT			8
174	/* Abilty to support automatic path migration */
175#define ECORE_RDMA_DEV_CAP_AUTO_PATH_MIG_MASK			0x1
176#define ECORE_RDMA_DEV_CAP_AUTO_PATH_MIG_SHIFT			9
177	/* Abilty to support the base memory management extensions */
178#define ECORE_RDMA_DEV_CAP_BASE_MEMORY_EXT_MASK			0x1
179#define ECORE_RDMA_DEV_CAP_BASE_MEMORY_EXT_SHIFT		10
180#define ECORE_RDMA_DEV_CAP_BASE_QUEUE_EXT_MASK			0x1
181#define ECORE_RDMA_DEV_CAP_BASE_QUEUE_EXT_SHIFT			11
182	/* Abilty to support multipile page sizes per memory region */
183#define ECORE_RDMA_DEV_CAP_MULTI_PAGE_PER_MR_EXT_MASK		0x1
184#define ECORE_RDMA_DEV_CAP_MULTI_PAGE_PER_MR_EXT_SHIFT		12
185	/* Abilty to support block list physical buffer list */
186#define ECORE_RDMA_DEV_CAP_BLOCK_MODE_MASK			0x1
187#define ECORE_RDMA_DEV_CAP_BLOCK_MODE_SHIFT			13
188	/* Abilty to support zero based virtual addresses */
189#define ECORE_RDMA_DEV_CAP_ZBVA_MASK				0x1
190#define ECORE_RDMA_DEV_CAP_ZBVA_SHIFT				14
191	/* Abilty to support local invalidate fencing */
192#define ECORE_RDMA_DEV_CAP_LOCAL_INV_FENCE_MASK			0x1
193#define ECORE_RDMA_DEV_CAP_LOCAL_INV_FENCE_SHIFT		15
194	/* Abilty to support Loopback on QP */
195#define ECORE_RDMA_DEV_CAP_LB_INDICATOR_MASK			0x1
196#define ECORE_RDMA_DEV_CAP_LB_INDICATOR_SHIFT			16
197	u64	page_size_caps;
198	u8	dev_ack_delay;
199	u32	reserved_lkey; /* Value of reserved L_key */
200	u32	bad_pkey_counter; /* Bad P_key counter support indicator */
201	struct ecore_rdma_events events;
202};
203
204enum ecore_port_state {
205	ECORE_RDMA_PORT_UP,
206	ECORE_RDMA_PORT_DOWN,
207};
208
209enum ecore_roce_capability {
210	ECORE_ROCE_V1	= 1 << 0,
211	ECORE_ROCE_V2	= 1 << 1,
212};
213
214struct ecore_rdma_port {
215	enum ecore_port_state port_state;
216	int	link_speed;
217	u64	max_msg_size;
218	u8	source_gid_table_len;
219	void	*source_gid_table_ptr;
220	u8	pkey_table_len;
221	void	*pkey_table_ptr;
222	u32	pkey_bad_counter;
223	enum ecore_roce_capability capability;
224};
225
226struct ecore_rdma_cnq_params
227{
228	u8  num_pbl_pages; /* Number of pages in the PBL allocated
229				   * for this queue
230				   */
231	u64 pbl_ptr; /* Address to the first entry of the queue PBL */
232};
233
234/* The CQ Mode affects the CQ doorbell transaction size.
235 * 64/32 bit machines should configure to 32/16 bits respectively.
236 */
237enum ecore_rdma_cq_mode {
238	ECORE_RDMA_CQ_MODE_16_BITS,
239	ECORE_RDMA_CQ_MODE_32_BITS,
240};
241
242struct ecore_roce_dcqcn_params {
243	u8	notification_point;
244	u8	reaction_point;
245
246	/* fields for notification point */
247	u32	cnp_send_timeout;
248
249	/* fields for reaction point */
250	u32	rl_bc_rate;  /* Byte Counter Limit. */
251	u16	rl_max_rate; /* Maximum rate in 1.6 Mbps resolution */
252	u16	rl_r_ai;     /* Active increase rate */
253	u16	rl_r_hai;    /* Hyper active increase rate */
254	u16	dcqcn_g;     /* Alpha update gain in 1/64K resolution */
255	u32	dcqcn_k_us;  /* Alpha update interval */
256	u32	dcqcn_timeout_us;
257};
258
259#ifdef CONFIG_ECORE_IWARP
260
261#define ECORE_MPA_RTR_TYPE_NONE		0 /* No RTR type */
262#define ECORE_MPA_RTR_TYPE_ZERO_SEND	(1 << 0)
263#define ECORE_MPA_RTR_TYPE_ZERO_WRITE	(1 << 1)
264#define ECORE_MPA_RTR_TYPE_ZERO_READ	(1 << 2)
265
266enum ecore_mpa_rev {
267	ECORE_MPA_REV1,
268	ECORE_MPA_REV2,
269};
270
271struct ecore_iwarp_params {
272	u32				rcv_wnd_size;
273	u16				ooo_num_rx_bufs;
274#define ECORE_IWARP_TS_EN (1 << 0)
275#define ECORE_IWARP_DA_EN (1 << 1)
276	u8				flags;
277	u8				crc_needed;
278	enum ecore_mpa_rev		mpa_rev;
279	u8				mpa_rtr;
280	u8				mpa_peer2peer;
281};
282
283#endif
284
285struct ecore_roce_params {
286	enum ecore_rdma_cq_mode		cq_mode;
287	struct ecore_roce_dcqcn_params	dcqcn_params;
288	u8				ll2_handle; /* required for UD QPs */
289};
290
291struct ecore_rdma_start_in_params {
292	struct ecore_rdma_events	*events;
293	struct ecore_rdma_cnq_params	cnq_pbl_list[128];
294	u8				desired_cnq;
295	u16				max_mtu;
296	u8				mac_addr[ETH_ALEN];
297#ifdef CONFIG_ECORE_IWARP
298	struct ecore_iwarp_params	iwarp;
299#endif
300	struct ecore_roce_params	roce;
301};
302
303struct ecore_rdma_add_user_out_params {
304	/* output variables (given to miniport) */
305	u16	dpi;
306	u64	dpi_addr;
307	u64	dpi_phys_addr;
308	u32	dpi_size;
309	u16	wid_count;
310};
311
312/*Returns the CQ CID or zero in case of failure */
313struct ecore_rdma_create_cq_in_params {
314	/* input variables (given by miniport) */
315	u32	cq_handle_lo; /* CQ handle to be written in CNQ */
316	u32	cq_handle_hi;
317	u32	cq_size;
318	u16	dpi;
319	bool	pbl_two_level;
320	u64	pbl_ptr;
321	u16	pbl_num_pages;
322	u8	pbl_page_size_log; /* for the pages that contain the
323			   * pointers to the CQ pages
324			   */
325	u8	cnq_id;
326	u16	int_timeout;
327};
328
329
330struct ecore_rdma_resize_cq_in_params {
331	/* input variables (given by miniport) */
332
333	u16	icid;
334	u32	cq_size;
335	bool	pbl_two_level;
336	u64	pbl_ptr;
337	u16	pbl_num_pages;
338	u8	pbl_page_size_log; /* for the pages that contain the
339		       * pointers to the CQ pages
340		       */
341};
342
343
344enum roce_mode
345{
346	ROCE_V1,
347	ROCE_V2_IPV4,
348	ROCE_V2_IPV6,
349	MAX_ROCE_MODE
350};
351
352struct ecore_rdma_create_qp_in_params {
353	/* input variables (given by miniport) */
354	u32	qp_handle_lo; /* QP handle to be written in CQE */
355	u32	qp_handle_hi;
356	u32	qp_handle_async_lo; /* QP handle to be written in async event */
357	u32	qp_handle_async_hi;
358	bool	use_srq;
359	bool	signal_all;
360	bool	fmr_and_reserved_lkey;
361	u16	pd;
362	u16	dpi;
363	u16	sq_cq_id;
364	u16	sq_num_pages;
365	u64	sq_pbl_ptr;	/* Not relevant for iWARP */
366	u8	max_sq_sges;
367	u16	rq_cq_id;
368	u16	rq_num_pages;
369	u64	rq_pbl_ptr;	/* Not relevant for iWARP */
370	u16	srq_id;
371	u8	stats_queue;
372};
373
374struct ecore_rdma_create_qp_out_params {
375	/* output variables (given to miniport) */
376	u32		qp_id;
377	u16		icid;
378	void		*rq_pbl_virt;
379	dma_addr_t	rq_pbl_phys;
380	void		*sq_pbl_virt;
381	dma_addr_t	sq_pbl_phys;
382};
383
384struct ecore_rdma_destroy_cq_in_params {
385	/* input variables (given by miniport) */
386	u16 icid;
387};
388
389struct ecore_rdma_destroy_cq_out_params {
390	/* output variables, provided to the upper layer */
391
392	/* Sequence number of completion notification sent for the CQ on
393	 * the associated CNQ
394	 */
395	u16	num_cq_notif;
396};
397
398/* ECORE GID can be used as IPv4/6 address in RoCE v2 */
399union ecore_gid {
400	u8 bytes[16];
401	u16 words[8];
402	u32 dwords[4];
403	u64 qwords[2];
404	u32 ipv4_addr;
405};
406
407struct ecore_rdma_modify_qp_in_params {
408	/* input variables (given by miniport) */
409	u32		modify_flags;
410#define ECORE_RDMA_MODIFY_QP_VALID_NEW_STATE_MASK               0x1
411#define ECORE_RDMA_MODIFY_QP_VALID_NEW_STATE_SHIFT              0
412#define ECORE_ROCE_MODIFY_QP_VALID_PKEY_MASK                    0x1
413#define ECORE_ROCE_MODIFY_QP_VALID_PKEY_SHIFT                   1
414#define ECORE_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN_MASK             0x1
415#define ECORE_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN_SHIFT            2
416#define ECORE_ROCE_MODIFY_QP_VALID_DEST_QP_MASK                 0x1
417#define ECORE_ROCE_MODIFY_QP_VALID_DEST_QP_SHIFT                3
418#define ECORE_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR_MASK          0x1
419#define ECORE_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR_SHIFT         4
420#define ECORE_ROCE_MODIFY_QP_VALID_RQ_PSN_MASK                  0x1
421#define ECORE_ROCE_MODIFY_QP_VALID_RQ_PSN_SHIFT                 5
422#define ECORE_ROCE_MODIFY_QP_VALID_SQ_PSN_MASK                  0x1
423#define ECORE_ROCE_MODIFY_QP_VALID_SQ_PSN_SHIFT                 6
424#define ECORE_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ_MASK       0x1
425#define ECORE_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ_SHIFT      7
426#define ECORE_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP_MASK      0x1
427#define ECORE_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP_SHIFT     8
428#define ECORE_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT_MASK             0x1
429#define ECORE_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT_SHIFT            9
430#define ECORE_ROCE_MODIFY_QP_VALID_RETRY_CNT_MASK               0x1
431#define ECORE_ROCE_MODIFY_QP_VALID_RETRY_CNT_SHIFT              10
432#define ECORE_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT_MASK           0x1
433#define ECORE_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT_SHIFT          11
434#define ECORE_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER_MASK       0x1
435#define ECORE_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER_SHIFT      12
436#define ECORE_ROCE_MODIFY_QP_VALID_E2E_FLOW_CONTROL_EN_MASK     0x1
437#define ECORE_ROCE_MODIFY_QP_VALID_E2E_FLOW_CONTROL_EN_SHIFT    13
438#define ECORE_ROCE_MODIFY_QP_VALID_ROCE_MODE_MASK               0x1
439#define ECORE_ROCE_MODIFY_QP_VALID_ROCE_MODE_SHIFT              14
440
441	enum ecore_roce_qp_state	new_state;
442	u16		pkey;
443	bool		incoming_rdma_read_en;
444	bool		incoming_rdma_write_en;
445	bool		incoming_atomic_en;
446	bool		e2e_flow_control_en;
447	u32		dest_qp;
448	u16		mtu;
449	u8		traffic_class_tos; /* IPv6/GRH tc; IPv4 TOS */
450	u8		hop_limit_ttl; /* IPv6/GRH hop limit; IPv4 TTL */
451	u32		flow_label; /* ignored in IPv4 */
452	union ecore_gid	sgid; /* GRH SGID; IPv4/6 Source IP */
453	union ecore_gid	dgid; /* GRH DGID; IPv4/6 Destination IP */
454	u16		udp_src_port; /* RoCEv2 only */
455
456	u16		vlan_id;
457
458	u32		rq_psn;
459	u32		sq_psn;
460	u8		max_rd_atomic_resp;
461	u8		max_rd_atomic_req;
462	u32		ack_timeout;
463	u8		retry_cnt;
464	u8		rnr_retry_cnt;
465	u8		min_rnr_nak_timer;
466	bool		sqd_async;
467	u8		remote_mac_addr[6];
468	u8		local_mac_addr[6];
469	bool		use_local_mac;
470	enum roce_mode	roce_mode;
471};
472
473struct ecore_rdma_query_qp_out_params {
474	/* output variables (given to miniport) */
475	enum ecore_roce_qp_state	state;
476	u32		rq_psn; /* responder */
477	u32		sq_psn; /* requester */
478	bool		draining; /* send queue is draining */
479	u16		mtu;
480	u32		dest_qp;
481	bool		incoming_rdma_read_en;
482	bool		incoming_rdma_write_en;
483	bool		incoming_atomic_en;
484	bool		e2e_flow_control_en;
485	union ecore_gid sgid; /* GRH SGID; IPv4/6 Source IP */
486	union ecore_gid dgid; /* GRH DGID; IPv4/6 Destination IP */
487	u32		flow_label; /* ignored in IPv4 */
488	u8		hop_limit_ttl; /* IPv6/GRH hop limit; IPv4 TTL */
489	u8		traffic_class_tos; /* IPv6/GRH tc; IPv4 TOS */
490	u32		timeout;
491	u8		rnr_retry;
492	u8		retry_cnt;
493	u8		min_rnr_nak_timer;
494	u16		pkey_index;
495	u8		max_rd_atomic;
496	u8		max_dest_rd_atomic;
497	bool		sqd_async;
498};
499
500struct ecore_rdma_register_tid_in_params {
501	/* input variables (given by miniport) */
502	u32	itid; /* index only, 18 bit long, lkey = itid << 8 | key */
503	enum ecore_rdma_tid_type tid_type;
504	u8	key;
505	u16	pd;
506	bool	local_read;
507	bool	local_write;
508	bool	remote_read;
509	bool	remote_write;
510	bool	remote_atomic;
511	bool	mw_bind;
512	u64	pbl_ptr;
513	bool	pbl_two_level;
514	u8	pbl_page_size_log; /* for the pages that contain the pointers
515		       * to the MR pages
516		       */
517	u8	page_size_log; /* for the MR pages */
518	u32	fbo;
519	u64	length; /* only lower 40 bits are valid */
520	u64	vaddr;
521	bool	zbva;
522	bool	phy_mr;
523	bool	dma_mr;
524
525	/* DIF related fields */
526	bool	dif_enabled;
527	u64	dif_error_addr;
528	u64	dif_runt_addr;
529};
530
531struct ecore_rdma_create_srq_in_params	{
532	u64 pbl_base_addr;
533	u64 prod_pair_addr;
534	u16 num_pages;
535	u16 pd_id;
536	u16 page_size;
537};
538
539struct ecore_rdma_create_srq_out_params {
540	u16 srq_id;
541};
542
543struct ecore_rdma_destroy_srq_in_params {
544	u16 srq_id;
545};
546
547struct ecore_rdma_modify_srq_in_params {
548	u32 wqe_limit;
549	u16 srq_id;
550};
551
552struct ecore_rdma_resize_cq_out_params {
553	/* output variables, provided to the upper layer */
554	u32 prod; /* CQ producer value on old PBL */
555	u32 cons; /* CQ consumer value on old PBL */
556};
557
558struct ecore_rdma_resize_cnq_in_params {
559	/* input variables (given by miniport) */
560	u32	cnq_id;
561	u32	pbl_page_size_log; /* for the pages that contain the
562			* pointers to the cnq pages
563			*/
564	u64	pbl_ptr;
565};
566
567struct ecore_rdma_stats_out_params {
568	u64	sent_bytes;
569	u64	sent_pkts;
570	u64	rcv_bytes;
571	u64	rcv_pkts;
572
573	/* RoCE only */
574	u64	icrc_errors;		/* wraps at 32 bits */
575	u64	retransmit_events;	/* wraps at 32 bits */
576	u64	silent_drops;		/* wraps at 16 bits */
577	u64	rnr_nacks_sent;		/* wraps at 16 bits */
578
579	/* iWARP only */
580	u64	iwarp_tx_fast_rxmit_cnt;
581	u64	iwarp_tx_slow_start_cnt;
582	u64	unalign_rx_comp;
583};
584
585struct ecore_rdma_counters_out_params {
586	u64	pd_count;
587	u64	max_pd;
588	u64	dpi_count;
589	u64	max_dpi;
590	u64	cq_count;
591	u64	max_cq;
592	u64	qp_count;
593	u64	max_qp;
594	u64	tid_count;
595	u64	max_tid;
596};
597
598enum _ecore_status_t
599ecore_rdma_add_user(void *rdma_cxt,
600		    struct ecore_rdma_add_user_out_params *out_params);
601
602enum _ecore_status_t
603ecore_rdma_alloc_pd(void *rdma_cxt,
604		    u16	*pd);
605
606enum _ecore_status_t
607ecore_rdma_alloc_tid(void *rdma_cxt,
608		     u32 *tid);
609
610enum _ecore_status_t
611ecore_rdma_create_cq(void *rdma_cxt,
612		     struct ecore_rdma_create_cq_in_params *params,
613		     u16 *icid);
614
615/* Returns a pointer to the responders' CID, which is also a pointer to the
616 * ecore_qp_params struct. Returns NULL in case of failure.
617 */
618struct ecore_rdma_qp*
619ecore_rdma_create_qp(void *rdma_cxt,
620		     struct ecore_rdma_create_qp_in_params  *in_params,
621		     struct ecore_rdma_create_qp_out_params *out_params);
622
623enum _ecore_status_t
624ecore_roce_create_ud_qp(void *rdma_cxt,
625			struct ecore_rdma_create_qp_out_params *out_params);
626
627enum _ecore_status_t
628ecore_rdma_deregister_tid(void *rdma_cxt,
629			  u32		tid);
630
631enum _ecore_status_t
632ecore_rdma_destroy_cq(void *rdma_cxt,
633		      struct ecore_rdma_destroy_cq_in_params  *in_params,
634		      struct ecore_rdma_destroy_cq_out_params *out_params);
635
636enum _ecore_status_t
637ecore_rdma_destroy_qp(void *rdma_cxt,
638		      struct ecore_rdma_qp *qp);
639
640enum _ecore_status_t
641ecore_roce_destroy_ud_qp(void *rdma_cxt, u16 cid);
642
643void
644ecore_rdma_free_pd(void *rdma_cxt,
645		   u16	pd);
646
647void
648ecore_rdma_free_tid(void *rdma_cxt,
649		    u32	tid);
650
651enum _ecore_status_t
652ecore_rdma_modify_qp(void *rdma_cxt,
653		     struct ecore_rdma_qp *qp,
654		     struct ecore_rdma_modify_qp_in_params *params);
655
656struct ecore_rdma_device*
657ecore_rdma_query_device(void *rdma_cxt);
658
659struct ecore_rdma_port*
660ecore_rdma_query_port(void *rdma_cxt);
661
662enum _ecore_status_t
663ecore_rdma_query_qp(void *rdma_cxt,
664		    struct ecore_rdma_qp		  *qp,
665		    struct ecore_rdma_query_qp_out_params *out_params);
666
667enum _ecore_status_t
668ecore_rdma_register_tid(void *rdma_cxt,
669			struct ecore_rdma_register_tid_in_params *params);
670
671void ecore_rdma_remove_user(void *rdma_cxt,
672			    u16		dpi);
673
674enum _ecore_status_t
675ecore_rdma_resize_cnq(void *rdma_cxt,
676		      struct ecore_rdma_resize_cnq_in_params *in_params);
677
678/*Returns the CQ CID or zero in case of failure */
679enum _ecore_status_t
680ecore_rdma_resize_cq(void *rdma_cxt,
681		     struct ecore_rdma_resize_cq_in_params  *in_params,
682		     struct ecore_rdma_resize_cq_out_params *out_params);
683
684/* Before calling rdma_start upper layer (VBD/qed) should fill the
685 * page-size and mtu in hwfn context
686 */
687enum _ecore_status_t
688ecore_rdma_start(void *p_hwfn,
689		 struct ecore_rdma_start_in_params *params);
690
691enum _ecore_status_t
692ecore_rdma_stop(void *rdma_cxt);
693
694enum _ecore_status_t
695ecore_rdma_query_stats(void *rdma_cxt, u8 stats_queue,
696		       struct ecore_rdma_stats_out_params *out_parms);
697
698enum _ecore_status_t
699ecore_rdma_query_counters(void *rdma_cxt,
700			  struct ecore_rdma_counters_out_params *out_parms);
701
702u32 ecore_rdma_get_sb_id(void *p_hwfn, u32 rel_sb_id);
703
704u32 ecore_rdma_query_cau_timer_res(void *p_hwfn);
705
706void ecore_rdma_cnq_prod_update(void *rdma_cxt, u8 cnq_index, u16 prod);
707
708void ecore_rdma_resc_free(struct ecore_hwfn *p_hwfn);
709
710#ifdef CONFIG_ECORE_IWARP
711
712/* iWARP API */
713
714
715enum ecore_iwarp_event_type {
716	ECORE_IWARP_EVENT_MPA_REQUEST, /* Passive side request received */
717	ECORE_IWARP_EVENT_PASSIVE_COMPLETE, /* Passive side established
718					     * ( ack on mpa response )
719					     */
720	ECORE_IWARP_EVENT_ACTIVE_COMPLETE, /* Active side reply received */
721	ECORE_IWARP_EVENT_DISCONNECT,
722	ECORE_IWARP_EVENT_CLOSE,
723	ECORE_IWARP_EVENT_IRQ_FULL,
724	ECORE_IWARP_EVENT_RQ_EMPTY,
725	ECORE_IWARP_EVENT_LLP_TIMEOUT,
726	ECORE_IWARP_EVENT_REMOTE_PROTECTION_ERROR,
727	ECORE_IWARP_EVENT_CQ_OVERFLOW,
728	ECORE_IWARP_EVENT_QP_CATASTROPHIC,
729	ECORE_IWARP_EVENT_ACTIVE_MPA_REPLY,
730	ECORE_IWARP_EVENT_LOCAL_ACCESS_ERROR,
731	ECORE_IWARP_EVENT_REMOTE_OPERATION_ERROR,
732	ECORE_IWARP_EVENT_TERMINATE_RECEIVED
733};
734
735enum ecore_tcp_ip_version
736{
737	ECORE_TCP_IPV4,
738	ECORE_TCP_IPV6,
739};
740
741struct ecore_iwarp_cm_info {
742	enum ecore_tcp_ip_version ip_version;
743	u32 remote_ip[4];
744	u32 local_ip[4];
745	u16 remote_port;
746	u16 local_port;
747	u16 vlan;
748	const void *private_data;
749	u16 private_data_len;
750	u8 ord;
751	u8 ird;
752};
753
754struct ecore_iwarp_cm_event_params {
755	enum ecore_iwarp_event_type event;
756	const struct ecore_iwarp_cm_info *cm_info;
757	void *ep_context; /* To be passed to accept call */
758	int status;
759};
760
761typedef int (*iwarp_event_handler)(void *context,
762				   struct ecore_iwarp_cm_event_params *event);
763
764/* Active Side Connect Flow:
765 * upper layer driver calls ecore_iwarp_connect
766 * Function is blocking: i.e. returns after tcp connection is established
767 * After MPA connection is established ECORE_IWARP_EVENT_ACTIVE_COMPLETE event
768 * will be passed to upperlayer driver using the event_cb passed in
769 * ecore_iwarp_connect_in. Information of the established connection will be
770 * initialized in event data.
771 */
772struct ecore_iwarp_connect_in {
773	iwarp_event_handler event_cb;
774	void *cb_context;
775	struct ecore_rdma_qp *qp;
776	struct ecore_iwarp_cm_info cm_info;
777	u16 mss;
778	u8 remote_mac_addr[6];
779	u8 local_mac_addr[6];
780};
781
782struct ecore_iwarp_connect_out {
783	void *ep_context;
784};
785
786/* Passive side connect flow:
787 * upper layer driver calls ecore_iwarp_create_listen
788 * once Syn packet that matches a ip/port that is listened on arrives, ecore
789 * will offload the tcp connection. After MPA Request is received on the
790 * offload connection, the event ECORE_IWARP_EVENT_MPA_REQUEST will be sent
791 * to upper layer driver using the event_cb passed below. The event data
792 * will be placed in event parameter. After upper layer driver processes the
793 * event, ecore_iwarp_accept or ecore_iwarp_reject should be called to continue
794 * MPA negotiation. Once negotiation is complete the event
795 * ECORE_IWARP_EVENT_PASSIVE_COMPLETE will be passed to the event_cb passed
796 * originally in ecore_iwarp_listen_in structure.
797 */
798struct ecore_iwarp_listen_in {
799	iwarp_event_handler event_cb; /* Callback func for delivering events */
800	void *cb_context; /* passed to event_cb */
801	u32 max_backlog; /* Max num of pending incoming connection requests */
802	enum ecore_tcp_ip_version ip_version;
803	u32 ip_addr[4];
804	u16 port;
805	u16 vlan;
806};
807
808struct ecore_iwarp_listen_out {
809	void *handle; /* to be sent to destroy */
810};
811
812struct ecore_iwarp_accept_in {
813	void *ep_context; /* From event data of ECORE_IWARP_EVENT_MPA_REQUEST */
814	void *cb_context; /* context to be passed to event_cb */
815	struct ecore_rdma_qp *qp;
816	const void *private_data;
817	u16 private_data_len;
818	u8 ord;
819	u8 ird;
820};
821
822struct ecore_iwarp_reject_in {
823	void *ep_context; /* From event data of ECORE_IWARP_EVENT_MPA_REQUEST */
824	void *cb_context; /* context to be passed to event_cb */
825	const void *private_data;
826	u16 private_data_len;
827};
828
829struct ecore_iwarp_send_rtr_in {
830	void *ep_context;
831};
832
833struct ecore_iwarp_tcp_abort_in {
834	void *ep_context;
835};
836
837
838enum _ecore_status_t
839ecore_iwarp_connect(void *rdma_cxt,
840		    struct ecore_iwarp_connect_in *iparams,
841		    struct ecore_iwarp_connect_out *oparams);
842
843enum _ecore_status_t
844ecore_iwarp_create_listen(void *rdma_cxt,
845			  struct ecore_iwarp_listen_in *iparams,
846			  struct ecore_iwarp_listen_out *oparams);
847
848enum _ecore_status_t
849ecore_iwarp_accept(void *rdma_cxt,
850		   struct ecore_iwarp_accept_in *iparams);
851
852enum _ecore_status_t
853ecore_iwarp_reject(void *rdma_cxt,
854		   struct ecore_iwarp_reject_in *iparams);
855
856enum _ecore_status_t
857ecore_iwarp_destroy_listen(void *rdma_cxt, void *handle);
858
859enum _ecore_status_t
860ecore_iwarp_send_rtr(void *rdma_cxt, struct ecore_iwarp_send_rtr_in *iparams);
861
862enum _ecore_status_t
863ecore_iwarp_tcp_abort(void *rdma_cxt, struct ecore_iwarp_tcp_abort_in *iparams);
864
865#endif /* CONFIG_ECORE_IWARP */
866
867#endif
868