1/*
2 * Copyright (c) 2018-2019 Cavium, Inc.
3 * All rights reserved.
4 *
5 *  Redistribution and use in source and binary forms, with or without
6 *  modification, are permitted provided that the following conditions
7 *  are met:
8 *
9 *  1. Redistributions of source code must retain the above copyright
10 *     notice, this list of conditions and the following disclaimer.
11 *  2. Redistributions in binary form must reproduce the above copyright
12 *     notice, this list of conditions and the following disclaimer in the
13 *     documentation and/or other materials provided with the distribution.
14 *
15 *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16 *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19 *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20 *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21 *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22 *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24 *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 *  POSSIBILITY OF SUCH DAMAGE.
26 *
27 * $FreeBSD: stable/11/sys/dev/qlnx/qlnxe/ecore_rdma_api.h 337517 2018-08-09 01:17:35Z davidcs $
28 */
29
30#ifndef __ECORE_RDMA_API_H__
31#define __ECORE_RDMA_API_H__
32
33#ifndef LINUX_REMOVE
34#ifndef ETH_ALEN
35#define ETH_ALEN 6
36#endif
37#endif
38
39#ifndef __EXTRACT__LINUX__
40
41enum ecore_roce_ll2_tx_dest
42{
43	ECORE_ROCE_LL2_TX_DEST_NW /* Light L2 TX Destination to the Network */,
44	ECORE_ROCE_LL2_TX_DEST_LB /* Light L2 TX Destination to the Loopback */,
45	ECORE_ROCE_LL2_TX_DEST_MAX
46};
47
48/* HW/FW RoCE Limitations (external. For internal see ecore_roce.h) */
49/* CNQ size Limitation
50 * The CNQ size should be set as twice the amount of CQs, since for each CQ one
51 * element may be inserted into the CNQ and another element is used per CQ to
52 * accommodate for a possible race in the arm mechanism.
53 * The FW supports a CNQ of 64k-1 and this apparently causes an issue - notice
54 * that the number of QPs can reach 32k giving 64k CQs and 128k CNQ elements.
55 * Luckily the FW can buffer CNQ elements avoiding an overflow, on the expense
56 * of performance.
57 */
58#define ECORE_RDMA_MAX_CNQ_SIZE               (0xFFFF) /* 2^16 - 1 */
59
60/* rdma interface */
61
62enum ecore_roce_qp_state {
63	ECORE_ROCE_QP_STATE_RESET, /* Reset */
64	ECORE_ROCE_QP_STATE_INIT,  /* Initialized */
65	ECORE_ROCE_QP_STATE_RTR,   /* Ready to Receive */
66	ECORE_ROCE_QP_STATE_RTS,   /* Ready to Send */
67	ECORE_ROCE_QP_STATE_SQD,   /* Send Queue Draining */
68	ECORE_ROCE_QP_STATE_ERR,   /* Error */
69	ECORE_ROCE_QP_STATE_SQE    /* Send Queue Error */
70};
71
72enum ecore_rdma_qp_type {
73	ECORE_RDMA_QP_TYPE_RC,
74	ECORE_RDMA_QP_TYPE_XRC_INI,
75	ECORE_RDMA_QP_TYPE_XRC_TGT,
76	ECORE_RDMA_QP_TYPE_INVAL = 0xffff,
77};
78
79enum ecore_rdma_tid_type
80{
81	ECORE_RDMA_TID_REGISTERED_MR,
82	ECORE_RDMA_TID_FMR,
83	ECORE_RDMA_TID_MW_TYPE1,
84	ECORE_RDMA_TID_MW_TYPE2A
85};
86
87typedef
88void (*affiliated_event_t)(void	*context,
89			   u8	fw_event_code,
90			   void	*fw_handle);
91
92typedef
93void (*unaffiliated_event_t)(void *context,
94			     u8   event_code);
95
96struct ecore_rdma_events {
97	void			*context;
98	affiliated_event_t	affiliated_event;
99	unaffiliated_event_t	unaffiliated_event;
100};
101
102struct ecore_rdma_device {
103    /* Vendor specific information */
104	u32	vendor_id;
105	u32	vendor_part_id;
106	u32	hw_ver;
107	u64	fw_ver;
108
109	u64	node_guid; /* node GUID */
110	u64	sys_image_guid; /* System image GUID */
111
112	u8	max_cnq;
113	u8	max_sge; /* The maximum number of scatter/gather entries
114			  * per Work Request supported
115			  */
116	u8	max_srq_sge; /* The maximum number of scatter/gather entries
117			      * per Work Request supported for SRQ
118			      */
119	u16	max_inline;
120	u32	max_wqe; /* The maximum number of outstanding work
121			  * requests on any Work Queue supported
122			  */
123	u32	max_srq_wqe; /* The maximum number of outstanding work
124			      * requests on any Work Queue supported for SRQ
125			      */
126	u8	max_qp_resp_rd_atomic_resc; /* The maximum number of RDMA Reads
127					     * & atomic operation that can be
128					     * outstanding per QP
129					     */
130
131	u8	max_qp_req_rd_atomic_resc; /* The maximum depth per QP for
132					    * initiation of RDMA Read
133					    * & atomic operations
134					    */
135	u64	max_dev_resp_rd_atomic_resc;
136	u32	max_cq;
137	u32	max_qp;
138	u32	max_srq; /* Maximum number of SRQs */
139	u32	max_mr; /* Maximum number of MRs supported by this device */
140	u64	max_mr_size; /* Size (in bytes) of the largest contiguous memory
141			      * block that can be registered by this device
142			      */
143	u32	max_cqe;
144	u32	max_mw; /* The maximum number of memory windows supported */
145	u32	max_fmr;
146	u32	max_mr_mw_fmr_pbl;
147	u64	max_mr_mw_fmr_size;
148	u32	max_pd; /* The maximum number of protection domains supported */
149	u32	max_ah;
150	u8	max_pkey;
151	u16	max_srq_wr; /* Maximum number of WRs per SRQ */
152	u8	max_stats_queues; /* Maximum number of statistics queues */
153	u32	dev_caps;
154
155	/* Abilty to support RNR-NAK generation */
156
157#define ECORE_RDMA_DEV_CAP_RNR_NAK_MASK				0x1
158#define ECORE_RDMA_DEV_CAP_RNR_NAK_SHIFT			0
159	/* Abilty to support shutdown port */
160#define ECORE_RDMA_DEV_CAP_SHUTDOWN_PORT_MASK			0x1
161#define ECORE_RDMA_DEV_CAP_SHUTDOWN_PORT_SHIFT			1
162	/* Abilty to support port active event */
163#define ECORE_RDMA_DEV_CAP_PORT_ACTIVE_EVENT_MASK		0x1
164#define ECORE_RDMA_DEV_CAP_PORT_ACTIVE_EVENT_SHIFT		2
165	/* Abilty to support port change event */
166#define ECORE_RDMA_DEV_CAP_PORT_CHANGE_EVENT_MASK		0x1
167#define ECORE_RDMA_DEV_CAP_PORT_CHANGE_EVENT_SHIFT		3
168	/* Abilty to support system image GUID */
169#define ECORE_RDMA_DEV_CAP_SYS_IMAGE_MASK			0x1
170#define ECORE_RDMA_DEV_CAP_SYS_IMAGE_SHIFT			4
171	/* Abilty to support bad P_Key counter support */
172#define ECORE_RDMA_DEV_CAP_BAD_PKEY_CNT_MASK			0x1
173#define ECORE_RDMA_DEV_CAP_BAD_PKEY_CNT_SHIFT			5
174	/* Abilty to support atomic operations */
175#define ECORE_RDMA_DEV_CAP_ATOMIC_OP_MASK			0x1
176#define ECORE_RDMA_DEV_CAP_ATOMIC_OP_SHIFT			6
177#define ECORE_RDMA_DEV_CAP_RESIZE_CQ_MASK			0x1
178#define ECORE_RDMA_DEV_CAP_RESIZE_CQ_SHIFT			7
179	/* Abilty to support modifying the maximum number of
180	 * outstanding work requests per QP
181	 */
182#define ECORE_RDMA_DEV_CAP_RESIZE_MAX_WR_MASK			0x1
183#define ECORE_RDMA_DEV_CAP_RESIZE_MAX_WR_SHIFT			8
184	/* Abilty to support automatic path migration */
185#define ECORE_RDMA_DEV_CAP_AUTO_PATH_MIG_MASK			0x1
186#define ECORE_RDMA_DEV_CAP_AUTO_PATH_MIG_SHIFT			9
187	/* Abilty to support the base memory management extensions */
188#define ECORE_RDMA_DEV_CAP_BASE_MEMORY_EXT_MASK			0x1
189#define ECORE_RDMA_DEV_CAP_BASE_MEMORY_EXT_SHIFT		10
190#define ECORE_RDMA_DEV_CAP_BASE_QUEUE_EXT_MASK			0x1
191#define ECORE_RDMA_DEV_CAP_BASE_QUEUE_EXT_SHIFT			11
192	/* Abilty to support multipile page sizes per memory region */
193#define ECORE_RDMA_DEV_CAP_MULTI_PAGE_PER_MR_EXT_MASK		0x1
194#define ECORE_RDMA_DEV_CAP_MULTI_PAGE_PER_MR_EXT_SHIFT		12
195	/* Abilty to support block list physical buffer list */
196#define ECORE_RDMA_DEV_CAP_BLOCK_MODE_MASK			0x1
197#define ECORE_RDMA_DEV_CAP_BLOCK_MODE_SHIFT			13
198	/* Abilty to support zero based virtual addresses */
199#define ECORE_RDMA_DEV_CAP_ZBVA_MASK				0x1
200#define ECORE_RDMA_DEV_CAP_ZBVA_SHIFT				14
201	/* Abilty to support local invalidate fencing */
202#define ECORE_RDMA_DEV_CAP_LOCAL_INV_FENCE_MASK			0x1
203#define ECORE_RDMA_DEV_CAP_LOCAL_INV_FENCE_SHIFT		15
204	/* Abilty to support Loopback on QP */
205#define ECORE_RDMA_DEV_CAP_LB_INDICATOR_MASK			0x1
206#define ECORE_RDMA_DEV_CAP_LB_INDICATOR_SHIFT			16
207	u64	page_size_caps;
208	u8	dev_ack_delay;
209	u32	reserved_lkey; /* Value of reserved L_key */
210	u32	bad_pkey_counter; /* Bad P_key counter support indicator */
211	struct ecore_rdma_events events;
212};
213
214enum ecore_port_state {
215	ECORE_RDMA_PORT_UP,
216	ECORE_RDMA_PORT_DOWN,
217};
218
219enum ecore_roce_capability {
220	ECORE_ROCE_V1	= 1 << 0,
221	ECORE_ROCE_V2	= 1 << 1,
222};
223
224struct ecore_rdma_port {
225	enum ecore_port_state port_state;
226	int	link_speed;
227	u64	max_msg_size;
228	u8	source_gid_table_len;
229	void	*source_gid_table_ptr;
230	u8	pkey_table_len;
231	void	*pkey_table_ptr;
232	u32	pkey_bad_counter;
233	enum ecore_roce_capability capability;
234};
235
236struct ecore_rdma_cnq_params
237{
238	u8  num_pbl_pages; /* Number of pages in the PBL allocated
239				   * for this queue
240				   */
241	u64 pbl_ptr; /* Address to the first entry of the queue PBL */
242};
243
244/* The CQ Mode affects the CQ doorbell transaction size.
245 * 64/32 bit machines should configure to 32/16 bits respectively.
246 */
247enum ecore_rdma_cq_mode {
248	ECORE_RDMA_CQ_MODE_16_BITS,
249	ECORE_RDMA_CQ_MODE_32_BITS,
250};
251
252struct ecore_roce_dcqcn_params {
253	u8	notification_point;
254	u8	reaction_point;
255
256	/* fields for notification point */
257	u32	cnp_send_timeout;
258	u8	cnp_dscp;
259	u8	cnp_vlan_priority;
260
261	/* fields for reaction point */
262	u32	rl_bc_rate;  /* Byte Counter Limit. */
263	u32	rl_max_rate; /* Maximum rate in Mbps resolution */
264	u32	rl_r_ai;     /* Active increase rate */
265	u32	rl_r_hai;    /* Hyper active increase rate */
266	u32	dcqcn_gd;    /* Alpha denominator */
267	u32	dcqcn_k_us;  /* Alpha update interval */
268	u32	dcqcn_timeout_us;
269};
270
271struct ecore_rdma_glob_cfg {
272	/* global tunables affecting all QPs created after they are
273	 * set.
274	 */
275	u8 vlan_pri_en;
276	u8 vlan_pri;
277	u8 ecn_en;
278	u8 ecn;
279	u8 dscp_en;
280	u8 dscp;
281};
282
283#ifndef LINUX_REMOVE
284#define ECORE_RDMA_DCSP_BIT_MASK			0x01
285#define ECORE_RDMA_DCSP_EN_BIT_MASK			0x02
286#define ECORE_RDMA_ECN_BIT_MASK				0x04
287#define ECORE_RDMA_ECN_EN_BIT_MASK			0x08
288#define ECORE_RDMA_VLAN_PRIO_BIT_MASK		0x10
289#define ECORE_RDMA_VLAN_PRIO_EN_BIT_MASK	0x20
290
291enum _ecore_status_t
292ecore_rdma_set_glob_cfg(struct ecore_hwfn *p_hwfn,
293			struct ecore_rdma_glob_cfg *in_params,
294			u32 glob_cfg_bits);
295
296enum _ecore_status_t
297ecore_rdma_get_glob_cfg(struct ecore_hwfn *p_hwfn,
298			struct ecore_rdma_glob_cfg *out_params);
299#endif /* LINUX_REMOVE */
300
301#ifdef CONFIG_ECORE_IWARP
302
303#define ECORE_IWARP_MAX_LIS_BACKLOG		(256)
304
305#define ECORE_MPA_RTR_TYPE_NONE		0 /* No RTR type */
306#define ECORE_MPA_RTR_TYPE_ZERO_SEND	(1 << 0)
307#define ECORE_MPA_RTR_TYPE_ZERO_WRITE	(1 << 1)
308#define ECORE_MPA_RTR_TYPE_ZERO_READ	(1 << 2)
309
310enum ecore_mpa_rev {
311	ECORE_MPA_REV1,
312	ECORE_MPA_REV2,
313};
314
315struct ecore_iwarp_params {
316	u32				rcv_wnd_size;
317	u16				ooo_num_rx_bufs;
318#define ECORE_IWARP_TS_EN (1 << 0)
319#define ECORE_IWARP_DA_EN (1 << 1)
320	u8				flags;
321	u8				crc_needed;
322	enum ecore_mpa_rev		mpa_rev;
323	u8				mpa_rtr;
324	u8				mpa_peer2peer;
325};
326
327#endif
328
329struct ecore_roce_params {
330	enum ecore_rdma_cq_mode		cq_mode;
331	struct ecore_roce_dcqcn_params	dcqcn_params;
332	u8				ll2_handle; /* required for UD QPs */
333};
334
335struct ecore_rdma_start_in_params {
336	struct ecore_rdma_events	*events;
337	struct ecore_rdma_cnq_params	cnq_pbl_list[128];
338	u8				desired_cnq;
339	u16				max_mtu;
340	u8				mac_addr[ETH_ALEN];
341#ifdef CONFIG_ECORE_IWARP
342	struct ecore_iwarp_params	iwarp;
343#endif
344	struct ecore_roce_params	roce;
345};
346
347struct ecore_rdma_add_user_out_params {
348	/* output variables (given to miniport) */
349	u16	dpi;
350	u64	dpi_addr;
351	u64	dpi_phys_addr;
352	u32	dpi_size;
353	u16	wid_count;
354};
355
356enum roce_mode
357{
358	ROCE_V1,
359	ROCE_V2_IPV4,
360	ROCE_V2_IPV6,
361	MAX_ROCE_MODE
362};
363
364/* ECORE GID can be used as IPv4/6 address in RoCE v2 */
365union ecore_gid {
366	u8 bytes[16];
367	u16 words[8];
368	u32 dwords[4];
369	u64 qwords[2];
370	u32 ipv4_addr;
371};
372
373struct ecore_rdma_register_tid_in_params {
374	/* input variables (given by miniport) */
375	u32	itid; /* index only, 18 bit long, lkey = itid << 8 | key */
376	enum ecore_rdma_tid_type tid_type;
377	u8	key;
378	u16	pd;
379	bool	local_read;
380	bool	local_write;
381	bool	remote_read;
382	bool	remote_write;
383	bool	remote_atomic;
384	bool	mw_bind;
385	u64	pbl_ptr;
386	bool	pbl_two_level;
387	u8	pbl_page_size_log; /* for the pages that contain the pointers
388		       * to the MR pages
389		       */
390	u8	page_size_log; /* for the MR pages */
391	u32	fbo;
392	u64	length; /* only lower 40 bits are valid */
393	u64	vaddr;
394	bool	zbva;
395	bool	phy_mr;
396	bool	dma_mr;
397
398	/* DIF related fields */
399	bool	dif_enabled;
400	u64	dif_error_addr;
401	u64	dif_runt_addr;
402};
403
404/*Returns the CQ CID or zero in case of failure */
405struct ecore_rdma_create_cq_in_params {
406	/* input variables (given by miniport) */
407	u32	cq_handle_lo; /* CQ handle to be written in CNQ */
408	u32	cq_handle_hi;
409	u32	cq_size;
410	u16	dpi;
411	bool	pbl_two_level;
412	u64	pbl_ptr;
413	u16	pbl_num_pages;
414	u8	pbl_page_size_log; /* for the pages that contain the
415			   * pointers to the CQ pages
416			   */
417	u8	cnq_id;
418	u16	int_timeout;
419};
420
421struct ecore_rdma_create_srq_in_params	{
422	u64 pbl_base_addr;
423	u64 prod_pair_addr;
424	u16 num_pages;
425	u16 pd_id;
426	u16 page_size;
427
428	/* XRC related only */
429	bool is_xrc;
430	u16 xrcd_id;
431	u32 cq_cid;
432	bool reserved_key_en;
433};
434
435struct ecore_rdma_destroy_cq_in_params {
436	/* input variables (given by miniport) */
437	u16 icid;
438};
439
440struct ecore_rdma_destroy_cq_out_params {
441	/* output variables, provided to the upper layer */
442
443	/* Sequence number of completion notification sent for the CQ on
444	 * the associated CNQ
445	 */
446	u16	num_cq_notif;
447};
448#endif
449
450struct ecore_rdma_resize_cq_in_params {
451	/* input variables (given by miniport) */
452
453	u16	icid;
454	u32	cq_size;
455	bool	pbl_two_level;
456	u64	pbl_ptr;
457	u16	pbl_num_pages;
458	u8	pbl_page_size_log; /* for the pages that contain the
459		       * pointers to the CQ pages
460		       */
461};
462
463#ifndef __EXTRACT__LINUX__
464
465struct ecore_rdma_create_qp_in_params {
466	/* input variables (given by miniport) */
467	u32	qp_handle_lo; /* QP handle to be written in CQE */
468	u32	qp_handle_hi;
469	u32	qp_handle_async_lo; /* QP handle to be written in async event */
470	u32	qp_handle_async_hi;
471	bool	use_srq;
472	bool	signal_all;
473	bool	fmr_and_reserved_lkey;
474	u16	pd;
475	u16	dpi;
476	u16	sq_cq_id;
477	u16	sq_num_pages;
478	u64	sq_pbl_ptr;	/* Not relevant for iWARP */
479	u8	max_sq_sges;
480	u16	rq_cq_id;
481	u16	rq_num_pages;
482	u64	rq_pbl_ptr;	/* Not relevant for iWARP */
483	u16	srq_id;
484	u8	stats_queue;
485	enum	ecore_rdma_qp_type qp_type;
486	u16	xrcd_id;
487};
488
489struct ecore_rdma_create_qp_out_params {
490	/* output variables (given to miniport) */
491	u32		qp_id;
492	u16		icid;
493	void		*rq_pbl_virt;
494	dma_addr_t	rq_pbl_phys;
495	void		*sq_pbl_virt;
496	dma_addr_t	sq_pbl_phys;
497};
498
499struct ecore_rdma_modify_qp_in_params {
500	/* input variables (given by miniport) */
501	u32		modify_flags;
502#define ECORE_RDMA_MODIFY_QP_VALID_NEW_STATE_MASK               0x1
503#define ECORE_RDMA_MODIFY_QP_VALID_NEW_STATE_SHIFT              0
504#define ECORE_ROCE_MODIFY_QP_VALID_PKEY_MASK                    0x1
505#define ECORE_ROCE_MODIFY_QP_VALID_PKEY_SHIFT                   1
506#define ECORE_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN_MASK             0x1
507#define ECORE_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN_SHIFT            2
508#define ECORE_ROCE_MODIFY_QP_VALID_DEST_QP_MASK                 0x1
509#define ECORE_ROCE_MODIFY_QP_VALID_DEST_QP_SHIFT                3
510#define ECORE_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR_MASK          0x1
511#define ECORE_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR_SHIFT         4
512#define ECORE_ROCE_MODIFY_QP_VALID_RQ_PSN_MASK                  0x1
513#define ECORE_ROCE_MODIFY_QP_VALID_RQ_PSN_SHIFT                 5
514#define ECORE_ROCE_MODIFY_QP_VALID_SQ_PSN_MASK                  0x1
515#define ECORE_ROCE_MODIFY_QP_VALID_SQ_PSN_SHIFT                 6
516#define ECORE_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ_MASK       0x1
517#define ECORE_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ_SHIFT      7
518#define ECORE_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP_MASK      0x1
519#define ECORE_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP_SHIFT     8
520#define ECORE_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT_MASK             0x1
521#define ECORE_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT_SHIFT            9
522#define ECORE_ROCE_MODIFY_QP_VALID_RETRY_CNT_MASK               0x1
523#define ECORE_ROCE_MODIFY_QP_VALID_RETRY_CNT_SHIFT              10
524#define ECORE_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT_MASK           0x1
525#define ECORE_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT_SHIFT          11
526#define ECORE_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER_MASK       0x1
527#define ECORE_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER_SHIFT      12
528#define ECORE_ROCE_MODIFY_QP_VALID_E2E_FLOW_CONTROL_EN_MASK     0x1
529#define ECORE_ROCE_MODIFY_QP_VALID_E2E_FLOW_CONTROL_EN_SHIFT    13
530#define ECORE_ROCE_MODIFY_QP_VALID_ROCE_MODE_MASK               0x1
531#define ECORE_ROCE_MODIFY_QP_VALID_ROCE_MODE_SHIFT              14
532
533	enum ecore_roce_qp_state	new_state;
534	u16		pkey;
535	bool		incoming_rdma_read_en;
536	bool		incoming_rdma_write_en;
537	bool		incoming_atomic_en;
538	bool		e2e_flow_control_en;
539	u32		dest_qp;
540	u16		mtu;
541	u8		traffic_class_tos; /* IPv6/GRH tc; IPv4 TOS */
542	u8		hop_limit_ttl; /* IPv6/GRH hop limit; IPv4 TTL */
543	u32		flow_label; /* ignored in IPv4 */
544	union ecore_gid	sgid; /* GRH SGID; IPv4/6 Source IP */
545	union ecore_gid	dgid; /* GRH DGID; IPv4/6 Destination IP */
546	u16		udp_src_port; /* RoCEv2 only */
547
548	u16		vlan_id;
549
550	u32		rq_psn;
551	u32		sq_psn;
552	u8		max_rd_atomic_resp;
553	u8		max_rd_atomic_req;
554	u32		ack_timeout;
555	u8		retry_cnt;
556	u8		rnr_retry_cnt;
557	u8		min_rnr_nak_timer;
558	bool		sqd_async;
559	u8		remote_mac_addr[6];
560	u8		local_mac_addr[6];
561	bool		use_local_mac;
562	enum roce_mode	roce_mode;
563};
564
565struct ecore_rdma_query_qp_out_params {
566	/* output variables (given to miniport) */
567	enum ecore_roce_qp_state	state;
568	u32		rq_psn; /* responder */
569	u32		sq_psn; /* requester */
570	bool		draining; /* send queue is draining */
571	u16		mtu;
572	u32		dest_qp;
573	bool		incoming_rdma_read_en;
574	bool		incoming_rdma_write_en;
575	bool		incoming_atomic_en;
576	bool		e2e_flow_control_en;
577	union ecore_gid sgid; /* GRH SGID; IPv4/6 Source IP */
578	union ecore_gid dgid; /* GRH DGID; IPv4/6 Destination IP */
579	u32		flow_label; /* ignored in IPv4 */
580	u8		hop_limit_ttl; /* IPv6/GRH hop limit; IPv4 TTL */
581	u8		traffic_class_tos; /* IPv6/GRH tc; IPv4 TOS */
582	u32		timeout;
583	u8		rnr_retry;
584	u8		retry_cnt;
585	u8		min_rnr_nak_timer;
586	u16		pkey_index;
587	u8		max_rd_atomic;
588	u8		max_dest_rd_atomic;
589	bool		sqd_async;
590};
591
592struct ecore_rdma_destroy_qp_out_params {
593	u32		sq_cq_prod;
594	u32		rq_cq_prod;
595};
596
597struct ecore_rdma_create_srq_out_params {
598	u16 srq_id;
599};
600
601struct ecore_rdma_destroy_srq_in_params {
602	u16 srq_id;
603	bool is_xrc;
604};
605
606struct ecore_rdma_modify_srq_in_params {
607	u32 wqe_limit;
608	u16 srq_id;
609	bool is_xrc;
610};
611#endif
612
613struct ecore_rdma_resize_cq_out_params {
614	/* output variables, provided to the upper layer */
615	u32 prod; /* CQ producer value on old PBL */
616	u32 cons; /* CQ consumer value on old PBL */
617};
618
619struct ecore_rdma_resize_cnq_in_params {
620	/* input variables (given by miniport) */
621	u32	cnq_id;
622	u32	pbl_page_size_log; /* for the pages that contain the
623			* pointers to the cnq pages
624			*/
625	u64	pbl_ptr;
626};
627
628#ifndef __EXTRACT__LINUX__
629struct ecore_rdma_stats_out_params {
630	u64	sent_bytes;
631	u64	sent_pkts;
632	u64	rcv_bytes;
633	u64	rcv_pkts;
634
635	/* RoCE only */
636	u64	icrc_errors;		/* wraps at 32 bits */
637	u64	retransmit_events;	/* wraps at 32 bits */
638	u64	silent_drops;		/* wraps at 16 bits */
639	u64	rnr_nacks_sent;		/* wraps at 16 bits */
640
641	/* RoCE DCQCN */
642	u64	ecn_pkt_rcv;
643	u64	cnp_pkt_rcv;
644	u64	cnp_pkt_sent;
645
646	/* iWARP only */
647	u64	iwarp_tx_fast_rxmit_cnt;
648	u64	iwarp_tx_slow_start_cnt;
649	u64	unalign_rx_comp;
650};
651
652struct ecore_rdma_counters_out_params {
653	u64	pd_count;
654	u64	max_pd;
655	u64	dpi_count;
656	u64	max_dpi;
657	u64	cq_count;
658	u64	max_cq;
659	u64	qp_count;
660	u64	max_qp;
661	u64	tid_count;
662	u64	max_tid;
663	u64	srq_count;
664	u64	max_srq;
665	u64	xrc_srq_count;
666	u64	max_xrc_srq;
667	u64	xrcd_count;
668	u64	max_xrcd;
669};
670#endif
671
672enum _ecore_status_t
673ecore_rdma_add_user(void *rdma_cxt,
674		    struct ecore_rdma_add_user_out_params *out_params);
675
676enum _ecore_status_t
677ecore_rdma_alloc_pd(void *rdma_cxt,
678		    u16	*pd);
679
680enum _ecore_status_t
681ecore_rdma_alloc_tid(void *rdma_cxt,
682		     u32 *tid);
683
684enum _ecore_status_t
685ecore_rdma_create_cq(void *rdma_cxt,
686		     struct ecore_rdma_create_cq_in_params *params,
687		     u16 *icid);
688
689/* Returns a pointer to the responders' CID, which is also a pointer to the
690 * ecore_qp_params struct. Returns NULL in case of failure.
691 */
692struct ecore_rdma_qp*
693ecore_rdma_create_qp(void *rdma_cxt,
694		     struct ecore_rdma_create_qp_in_params  *in_params,
695		     struct ecore_rdma_create_qp_out_params *out_params);
696
697enum _ecore_status_t
698ecore_roce_create_ud_qp(void *rdma_cxt,
699			struct ecore_rdma_create_qp_out_params *out_params);
700
701enum _ecore_status_t
702ecore_rdma_deregister_tid(void *rdma_cxt,
703			  u32		tid);
704
705enum _ecore_status_t
706ecore_rdma_destroy_cq(void *rdma_cxt,
707		      struct ecore_rdma_destroy_cq_in_params  *in_params,
708		      struct ecore_rdma_destroy_cq_out_params *out_params);
709
710enum _ecore_status_t
711ecore_rdma_destroy_qp(void *rdma_cxt,
712		      struct ecore_rdma_qp *qp,
713		      struct ecore_rdma_destroy_qp_out_params *out_params);
714
715enum _ecore_status_t
716ecore_roce_destroy_ud_qp(void *rdma_cxt, u16 cid);
717
718void
719ecore_rdma_free_pd(void *rdma_cxt,
720		   u16	pd);
721
722enum _ecore_status_t
723ecore_rdma_alloc_xrcd(void *rdma_cxt, u16 *xrcd_id);
724
725void
726ecore_rdma_free_xrcd(void  *rdma_cxt, u16 xrcd_id);
727
728void
729ecore_rdma_free_tid(void *rdma_cxt,
730		    u32	tid);
731
732enum _ecore_status_t
733ecore_rdma_modify_qp(void *rdma_cxt,
734		     struct ecore_rdma_qp *qp,
735		     struct ecore_rdma_modify_qp_in_params *params);
736
737struct ecore_rdma_device*
738ecore_rdma_query_device(void *rdma_cxt);
739
740struct ecore_rdma_port*
741ecore_rdma_query_port(void *rdma_cxt);
742
743enum _ecore_status_t
744ecore_rdma_query_qp(void *rdma_cxt,
745		    struct ecore_rdma_qp		  *qp,
746		    struct ecore_rdma_query_qp_out_params *out_params);
747
748enum _ecore_status_t
749ecore_rdma_register_tid(void *rdma_cxt,
750			struct ecore_rdma_register_tid_in_params *params);
751
752void ecore_rdma_remove_user(void *rdma_cxt,
753			    u16		dpi);
754
755enum _ecore_status_t
756ecore_rdma_resize_cnq(void *rdma_cxt,
757		      struct ecore_rdma_resize_cnq_in_params *in_params);
758
759/*Returns the CQ CID or zero in case of failure */
760enum _ecore_status_t
761ecore_rdma_resize_cq(void *rdma_cxt,
762		     struct ecore_rdma_resize_cq_in_params  *in_params,
763		     struct ecore_rdma_resize_cq_out_params *out_params);
764
765/* Before calling rdma_start upper layer (VBD/qed) should fill the
766 * page-size and mtu in hwfn context
767 */
768enum _ecore_status_t
769ecore_rdma_start(void *p_hwfn,
770		 struct ecore_rdma_start_in_params *params);
771
772enum _ecore_status_t
773ecore_rdma_stop(void *rdma_cxt);
774
775enum _ecore_status_t
776ecore_rdma_query_stats(void *rdma_cxt, u8 stats_queue,
777		       struct ecore_rdma_stats_out_params *out_parms);
778
779enum _ecore_status_t
780ecore_rdma_query_counters(void *rdma_cxt,
781			  struct ecore_rdma_counters_out_params *out_parms);
782
783u32 ecore_rdma_get_sb_id(struct ecore_hwfn *p_hwfn, u32 rel_sb_id);
784
785#ifndef LINUX_REMOVE
786u32 ecore_rdma_query_cau_timer_res(void);
787#endif
788
789void ecore_rdma_cnq_prod_update(void *rdma_cxt, u8 cnq_index, u16 prod);
790
791void ecore_rdma_resc_free(struct ecore_hwfn *p_hwfn);
792
793enum _ecore_status_t
794ecore_rdma_create_srq(void *rdma_cxt,
795		      struct ecore_rdma_create_srq_in_params *in_params,
796		      struct ecore_rdma_create_srq_out_params *out_params);
797
798enum _ecore_status_t
799ecore_rdma_destroy_srq(void *rdma_cxt,
800		       struct ecore_rdma_destroy_srq_in_params *in_params);
801
802enum _ecore_status_t
803ecore_rdma_modify_srq(void *rdma_cxt,
804		      struct ecore_rdma_modify_srq_in_params *in_params);
805
806#ifdef CONFIG_ECORE_IWARP
807
808/* iWARP API */
809
810#ifndef __EXTRACT__LINUX__
811
812enum ecore_iwarp_event_type {
813	ECORE_IWARP_EVENT_MPA_REQUEST, /* Passive side request received */
814	ECORE_IWARP_EVENT_PASSIVE_COMPLETE, /* Passive side established
815					     * ( ack on mpa response )
816					     */
817	ECORE_IWARP_EVENT_LISTEN_PAUSE_COMP, /* Passive side will drop
818					      * MPA requests
819					      */
820	ECORE_IWARP_EVENT_ACTIVE_COMPLETE, /* Active side reply received */
821	ECORE_IWARP_EVENT_DISCONNECT,
822	ECORE_IWARP_EVENT_CLOSE,
823    /* Slow/Error path events start from here */
824	ECORE_IWARP_EVENT_IRQ_FULL,
825	ECORE_IWARP_ERROR_EVENTS_START = ECORE_IWARP_EVENT_IRQ_FULL,
826	ECORE_IWARP_EVENT_RQ_EMPTY,
827	ECORE_IWARP_EVENT_LLP_TIMEOUT,
828	ECORE_IWARP_EVENT_REMOTE_PROTECTION_ERROR,
829	ECORE_IWARP_EVENT_CQ_OVERFLOW,
830	ECORE_IWARP_EVENT_QP_CATASTROPHIC,
831	ECORE_IWARP_EVENT_ACTIVE_MPA_REPLY,
832	ECORE_IWARP_EVENT_LOCAL_ACCESS_ERROR,
833	ECORE_IWARP_EVENT_REMOTE_OPERATION_ERROR,
834	ECORE_IWARP_EVENT_TERMINATE_RECEIVED
835};
836
837enum ecore_tcp_ip_version
838{
839	ECORE_TCP_IPV4,
840	ECORE_TCP_IPV6,
841};
842
843struct ecore_iwarp_cm_info {
844	enum ecore_tcp_ip_version ip_version;
845	u32 remote_ip[4];
846	u32 local_ip[4];
847	u16 remote_port;
848	u16 local_port;
849	u16 vlan;
850	const void *private_data;
851	u16 private_data_len;
852	u8 ord;
853	u8 ird;
854};
855
856struct ecore_iwarp_cm_event_params {
857	enum ecore_iwarp_event_type event;
858	const struct ecore_iwarp_cm_info *cm_info;
859	void *ep_context; /* To be passed to accept call */
860	int status;
861};
862
863typedef int (*iwarp_event_handler)(void *context,
864				   struct ecore_iwarp_cm_event_params *event);
865
866/* Active Side Connect Flow:
867 * upper layer driver calls ecore_iwarp_connect
868 * Function is blocking: i.e. returns after tcp connection is established
869 * After MPA connection is established ECORE_IWARP_EVENT_ACTIVE_COMPLETE event
870 * will be passed to upperlayer driver using the event_cb passed in
871 * ecore_iwarp_connect_in. Information of the established connection will be
872 * initialized in event data.
873 */
874struct ecore_iwarp_connect_in {
875	iwarp_event_handler event_cb;
876	void *cb_context;
877	struct ecore_rdma_qp *qp;
878	struct ecore_iwarp_cm_info cm_info;
879	u16 mss;
880	u8 remote_mac_addr[6];
881	u8 local_mac_addr[6];
882};
883
884struct ecore_iwarp_connect_out {
885	void *ep_context;
886};
887
888/* Passive side connect flow:
889 * upper layer driver calls ecore_iwarp_create_listen
890 * once Syn packet that matches a ip/port that is listened on arrives, ecore
891 * will offload the tcp connection. After MPA Request is received on the
892 * offload connection, the event ECORE_IWARP_EVENT_MPA_REQUEST will be sent
893 * to upper layer driver using the event_cb passed below. The event data
894 * will be placed in event parameter. After upper layer driver processes the
895 * event, ecore_iwarp_accept or ecore_iwarp_reject should be called to continue
896 * MPA negotiation. Once negotiation is complete the event
897 * ECORE_IWARP_EVENT_PASSIVE_COMPLETE will be passed to the event_cb passed
898 * originally in ecore_iwarp_listen_in structure.
899 */
900struct ecore_iwarp_listen_in {
901	iwarp_event_handler event_cb; /* Callback func for delivering events */
902	void *cb_context; /* passed to event_cb */
903	u32 max_backlog; /* Max num of pending incoming connection requests */
904	enum ecore_tcp_ip_version ip_version;
905	u32 ip_addr[4];
906	u16 port;
907	u16 vlan;
908};
909
910struct ecore_iwarp_listen_out {
911	void *handle; /* to be sent to destroy */
912};
913
914struct ecore_iwarp_accept_in {
915	void *ep_context; /* From event data of ECORE_IWARP_EVENT_MPA_REQUEST */
916	void *cb_context; /* context to be passed to event_cb */
917	struct ecore_rdma_qp *qp;
918	const void *private_data;
919	u16 private_data_len;
920	u8 ord;
921	u8 ird;
922};
923
924struct ecore_iwarp_reject_in {
925	void *ep_context; /* From event data of ECORE_IWARP_EVENT_MPA_REQUEST */
926	void *cb_context; /* context to be passed to event_cb */
927	const void *private_data;
928	u16 private_data_len;
929};
930
931struct ecore_iwarp_send_rtr_in {
932	void *ep_context;
933};
934
935struct ecore_iwarp_tcp_abort_in {
936	void *ep_context;
937};
938
939#endif
940
941enum _ecore_status_t
942ecore_iwarp_connect(void *rdma_cxt,
943		    struct ecore_iwarp_connect_in *iparams,
944		    struct ecore_iwarp_connect_out *oparams);
945
946enum _ecore_status_t
947ecore_iwarp_create_listen(void *rdma_cxt,
948			  struct ecore_iwarp_listen_in *iparams,
949			  struct ecore_iwarp_listen_out *oparams);
950
951enum _ecore_status_t
952ecore_iwarp_accept(void *rdma_cxt,
953		   struct ecore_iwarp_accept_in *iparams);
954
955enum _ecore_status_t
956ecore_iwarp_reject(void *rdma_cxt,
957		   struct ecore_iwarp_reject_in *iparams);
958
959enum _ecore_status_t
960ecore_iwarp_destroy_listen(void *rdma_cxt, void *handle);
961
962enum _ecore_status_t
963ecore_iwarp_send_rtr(void *rdma_cxt, struct ecore_iwarp_send_rtr_in *iparams);
964
965enum _ecore_status_t
966ecore_iwarp_pause_listen(void *rdma_cxt, void *handle, bool pause, bool comp);
967
968#endif /* CONFIG_ECORE_IWARP */
969
970#endif
971