1336695Sdavidcs/*
2336695Sdavidcs * Copyright (c) 2018-2019 Cavium, Inc.
3336695Sdavidcs * All rights reserved.
4336695Sdavidcs *
5336695Sdavidcs *  Redistribution and use in source and binary forms, with or without
6336695Sdavidcs *  modification, are permitted provided that the following conditions
7336695Sdavidcs *  are met:
8336695Sdavidcs *
9336695Sdavidcs *  1. Redistributions of source code must retain the above copyright
10336695Sdavidcs *     notice, this list of conditions and the following disclaimer.
11336695Sdavidcs *  2. Redistributions in binary form must reproduce the above copyright
12336695Sdavidcs *     notice, this list of conditions and the following disclaimer in the
13336695Sdavidcs *     documentation and/or other materials provided with the distribution.
14336695Sdavidcs *
15336695Sdavidcs *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16336695Sdavidcs *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17336695Sdavidcs *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18336695Sdavidcs *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19336695Sdavidcs *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20336695Sdavidcs *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21336695Sdavidcs *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22336695Sdavidcs *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23336695Sdavidcs *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24336695Sdavidcs *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25336695Sdavidcs *  POSSIBILITY OF SUCH DAMAGE.
26336695Sdavidcs *
27336695Sdavidcs * $FreeBSD: stable/11/sys/dev/qlnx/qlnxe/ecore_rdma.h 337517 2018-08-09 01:17:35Z davidcs $
28336695Sdavidcs */
29336695Sdavidcs
30336695Sdavidcs#ifndef __ECORE_RDMA_H__
31336695Sdavidcs#define __ECORE_RDMA_H__
32336695Sdavidcs
33336695Sdavidcs#include "ecore_status.h"
34336695Sdavidcs#include "ecore.h"
35336695Sdavidcs#include "ecore_hsi_common.h"
36336695Sdavidcs#include "ecore_proto_if.h"
37336695Sdavidcs#include "ecore_rdma_api.h"
38336695Sdavidcs#include "ecore_dev_api.h"
39336695Sdavidcs#include "ecore_roce.h"
40336695Sdavidcs#include "ecore_iwarp.h"
41336695Sdavidcs
42336695Sdavidcs/* Constants */
43336695Sdavidcs
44336695Sdavidcs/* HW/FW RoCE Limitations (internal. For external see ecore_rdma_api.h) */
45336695Sdavidcs#define ECORE_RDMA_MAX_FMR                    (RDMA_MAX_TIDS) /* 2^17 - 1 */
46336695Sdavidcs#define ECORE_RDMA_MAX_P_KEY                  (1)
47336695Sdavidcs#define ECORE_RDMA_MAX_WQE                    (0x7FFF) /* 2^15 -1 */
48336695Sdavidcs#define ECORE_RDMA_MAX_SRQ_WQE_ELEM           (0x7FFF) /* 2^15 -1 */
49336695Sdavidcs#define ECORE_RDMA_PAGE_SIZE_CAPS             (0xFFFFF000) /* TODO: > 4k?! */
50336695Sdavidcs#define ECORE_RDMA_ACK_DELAY                  (15) /* 131 milliseconds */
51336695Sdavidcs#define ECORE_RDMA_MAX_MR_SIZE                (0x10000000000ULL) /* 2^40 */
52336695Sdavidcs#define ECORE_RDMA_MAX_CQS                    (RDMA_MAX_CQS) /* 64k */
53336695Sdavidcs#define ECORE_RDMA_MAX_MRS                    (RDMA_MAX_TIDS) /* 2^17 - 1 */
54336695Sdavidcs/* Add 1 for header element */
55336695Sdavidcs#define ECORE_RDMA_MAX_SRQ_ELEM_PER_WQE	      (RDMA_MAX_SGE_PER_RQ_WQE + 1)
56336695Sdavidcs#define ECORE_RDMA_MAX_SGE_PER_SRQ_WQE	      (RDMA_MAX_SGE_PER_RQ_WQE)
57336695Sdavidcs#define ECORE_RDMA_SRQ_WQE_ELEM_SIZE          (16)
58336695Sdavidcs#define ECORE_RDMA_MAX_SRQS		      (32 * 1024) /* 32k */
59336695Sdavidcs
60336695Sdavidcs/* Configurable */
61336695Sdavidcs/* Max CQE is derived from u16/32 size, halved and decremented by 1 to handle
62336695Sdavidcs * wrap properly and then decremented by 1 again. The latter decrement comes
63336695Sdavidcs * from a requirement to create a chain that is bigger than what the user
64336695Sdavidcs * requested by one:
65336695Sdavidcs * The CQE size is 32 bytes but the FW writes in chunks of 64
66336695Sdavidcs * bytes, for performance purposes. Allocating an extra entry and telling the
67336695Sdavidcs * FW we have less prevents overwriting the first entry in case of a wrap i.e.
68336695Sdavidcs * when the FW writes the last entry and the application hasn't read the first
69336695Sdavidcs * one.
70336695Sdavidcs */
71336695Sdavidcs#define ECORE_RDMA_MAX_CQE_32_BIT             (0x7FFFFFFF - 1)
72336695Sdavidcs#define ECORE_RDMA_MAX_CQE_16_BIT             (0x7FFF - 1)
73336695Sdavidcs
74336695Sdavidcs#define ECORE_RDMA_MAX_XRC_SRQS		(RDMA_MAX_XRC_SRQS)
75336695Sdavidcs
76336695Sdavidcs/* Up to 2^16 XRC Domains are supported, but the actual number of supported XRC
77336695Sdavidcs * SRQs is much smaller so there's no need to have that many domains.
78336695Sdavidcs */
79336695Sdavidcs#define ECORE_RDMA_MAX_XRCDS	(OSAL_ROUNDUP_POW_OF_TWO(RDMA_MAX_XRC_SRQS))
80336695Sdavidcs
81336695Sdavidcs#define IS_IWARP(_p_hwfn) (_p_hwfn->p_rdma_info->proto == PROTOCOLID_IWARP)
82336695Sdavidcs#define IS_ROCE(_p_hwfn) (_p_hwfn->p_rdma_info->proto == PROTOCOLID_ROCE)
83336695Sdavidcs
84336695Sdavidcsenum ecore_rdma_toggle_bit {
85336695Sdavidcs	ECORE_RDMA_TOGGLE_BIT_CLEAR = 0,
86336695Sdavidcs	ECORE_RDMA_TOGGLE_BIT_SET   = 1
87336695Sdavidcs};
88336695Sdavidcs
89336695Sdavidcs/* @@@TBD Currently we support only affilited events
90336695Sdavidcs   * enum ecore_rdma_unaffiliated_event_code {
91336695Sdavidcs   * ECORE_RDMA_PORT_ACTIVE, // Link Up
92336695Sdavidcs   * ECORE_RDMA_PORT_CHANGED, // SGID table has changed
93336695Sdavidcs   * ECORE_RDMA_LOCAL_CATASTROPHIC_ERR, // Fatal device error
94336695Sdavidcs   * ECORE_RDMA_PORT_ERR, // Link down
95336695Sdavidcs   * };
96336695Sdavidcs   */
97336695Sdavidcs
98336695Sdavidcs#define QEDR_MAX_BMAP_NAME	(10)
99336695Sdavidcsstruct ecore_bmap {
100336695Sdavidcs	u32           max_count;
101336695Sdavidcs	unsigned long *bitmap;
102336695Sdavidcs	char name[QEDR_MAX_BMAP_NAME];
103336695Sdavidcs};
104336695Sdavidcs
105336695Sdavidcsstruct ecore_rdma_info {
106336695Sdavidcs	osal_spinlock_t			lock;
107336695Sdavidcs
108336695Sdavidcs	struct ecore_bmap		cq_map;
109336695Sdavidcs	struct ecore_bmap		pd_map;
110336695Sdavidcs	struct ecore_bmap		xrcd_map;
111336695Sdavidcs	struct ecore_bmap		tid_map;
112336695Sdavidcs	struct ecore_bmap		srq_map;
113336695Sdavidcs	struct ecore_bmap		xrc_srq_map;
114336695Sdavidcs	struct ecore_bmap		qp_map;
115336695Sdavidcs	struct ecore_bmap		tcp_cid_map;
116336695Sdavidcs	struct ecore_bmap		cid_map;
117336695Sdavidcs	struct ecore_bmap		dpi_map;
118336695Sdavidcs	struct ecore_bmap		toggle_bits;
119336695Sdavidcs	struct ecore_rdma_events	events;
120336695Sdavidcs	struct ecore_rdma_device	*dev;
121336695Sdavidcs	struct ecore_rdma_port		*port;
122336695Sdavidcs	u32				last_tid;
123336695Sdavidcs	u8				num_cnqs;
124336695Sdavidcs	struct rdma_sent_stats          rdma_sent_pstats;
125336695Sdavidcs	struct rdma_rcv_stats           rdma_rcv_tstats;
126336695Sdavidcs	u32				num_qps;
127336695Sdavidcs	u32				num_mrs;
128336695Sdavidcs	u32				num_srqs;
129336695Sdavidcs	u16				srq_id_offset;
130336695Sdavidcs	u16				queue_zone_base;
131336695Sdavidcs	u16				max_queue_zones;
132336695Sdavidcs
133336695Sdavidcs	struct ecore_rdma_glob_cfg	glob_cfg;
134336695Sdavidcs
135336695Sdavidcs	enum protocol_type		proto;
136336695Sdavidcs	struct ecore_roce_info		roce;
137336695Sdavidcs#ifdef CONFIG_ECORE_IWARP
138336695Sdavidcs	struct ecore_iwarp_info		iwarp;
139336695Sdavidcs#endif
140336695Sdavidcs	bool				active;
141336695Sdavidcs	int				ref_cnt;
142336695Sdavidcs};
143336695Sdavidcs
144336695Sdavidcsstruct cq_prod {
145336695Sdavidcs	u32	req;
146336695Sdavidcs	u32	resp;
147336695Sdavidcs};
148336695Sdavidcs
149336695Sdavidcsstruct ecore_rdma_qp {
150336695Sdavidcs	struct regpair qp_handle;
151336695Sdavidcs	struct regpair qp_handle_async;
152336695Sdavidcs	u32	qpid; /* iwarp: may differ from icid */
153336695Sdavidcs	u16	icid;
154336695Sdavidcs	u16	qp_idx;
155336695Sdavidcs	enum ecore_roce_qp_state cur_state;
156336695Sdavidcs	enum ecore_rdma_qp_type qp_type;
157336695Sdavidcs#ifdef CONFIG_ECORE_IWARP
158336695Sdavidcs	enum ecore_iwarp_qp_state iwarp_state;
159336695Sdavidcs#endif
160336695Sdavidcs	bool	use_srq;
161336695Sdavidcs	bool	signal_all;
162336695Sdavidcs	bool	fmr_and_reserved_lkey;
163336695Sdavidcs
164336695Sdavidcs	bool	incoming_rdma_read_en;
165336695Sdavidcs	bool	incoming_rdma_write_en;
166336695Sdavidcs	bool	incoming_atomic_en;
167336695Sdavidcs	bool	e2e_flow_control_en;
168336695Sdavidcs
169336695Sdavidcs	u16	pd;			/* Protection domain */
170336695Sdavidcs	u16	pkey;			/* Primary P_key index */
171336695Sdavidcs	u32	dest_qp;
172336695Sdavidcs	u16	mtu;
173336695Sdavidcs	u16	srq_id;
174336695Sdavidcs	u8	traffic_class_tos;	/* IPv6/GRH traffic class; IPv4 TOS */
175336695Sdavidcs	u8	hop_limit_ttl;		/* IPv6/GRH hop limit; IPv4 TTL */
176336695Sdavidcs	u16	dpi;
177336695Sdavidcs	u32	flow_label;		/* ignored in IPv4 */
178336695Sdavidcs	u16	vlan_id;
179336695Sdavidcs	u32	ack_timeout;
180336695Sdavidcs	u8	retry_cnt;
181336695Sdavidcs	u8	rnr_retry_cnt;
182336695Sdavidcs	u8	min_rnr_nak_timer;
183336695Sdavidcs	bool	sqd_async;
184336695Sdavidcs	union ecore_gid	sgid;		/* GRH SGID; IPv4/6 Source IP */
185336695Sdavidcs	union ecore_gid	dgid;		/* GRH DGID; IPv4/6 Destination IP */
186336695Sdavidcs	enum roce_mode roce_mode;
187336695Sdavidcs	u16	udp_src_port;		/* RoCEv2 only */
188336695Sdavidcs	u8	stats_queue;
189336695Sdavidcs
190336695Sdavidcs	/* requeseter */
191336695Sdavidcs	u8	max_rd_atomic_req;
192336695Sdavidcs	u32     sq_psn;
193336695Sdavidcs	u16	sq_cq_id; /* The cq to be associated with the send queue*/
194336695Sdavidcs	u16	sq_num_pages;
195336695Sdavidcs	dma_addr_t sq_pbl_ptr;
196336695Sdavidcs	void	*orq;
197336695Sdavidcs	dma_addr_t orq_phys_addr;
198336695Sdavidcs	u8	orq_num_pages;
199336695Sdavidcs	bool	req_offloaded;
200336695Sdavidcs	bool	has_req;
201336695Sdavidcs
202336695Sdavidcs	/* responder */
203336695Sdavidcs	u8	max_rd_atomic_resp;
204336695Sdavidcs	u32     rq_psn;
205336695Sdavidcs	u16	rq_cq_id; /* The cq to be associated with the receive queue */
206336695Sdavidcs	u16	rq_num_pages;
207336695Sdavidcs	dma_addr_t rq_pbl_ptr;
208336695Sdavidcs	void	*irq;
209336695Sdavidcs	dma_addr_t irq_phys_addr;
210336695Sdavidcs	u8	irq_num_pages;
211336695Sdavidcs	bool	resp_offloaded;
212336695Sdavidcs	bool	has_resp;
213336695Sdavidcs	struct cq_prod	cq_prod;
214336695Sdavidcs
215336695Sdavidcs	u8	remote_mac_addr[6];
216336695Sdavidcs	u8	local_mac_addr[6];
217336695Sdavidcs
218336695Sdavidcs	void	*shared_queue;
219336695Sdavidcs	dma_addr_t shared_queue_phys_addr;
220336695Sdavidcs#ifdef CONFIG_ECORE_IWARP
221336695Sdavidcs	struct ecore_iwarp_ep *ep;
222336695Sdavidcs#endif
223336695Sdavidcs
224336695Sdavidcs	u16 xrcd_id;
225336695Sdavidcs};
226336695Sdavidcs
227336695Sdavidcsstatic OSAL_INLINE bool ecore_rdma_is_xrc_qp(struct ecore_rdma_qp *qp)
228336695Sdavidcs{
229336695Sdavidcs	if ((qp->qp_type == ECORE_RDMA_QP_TYPE_XRC_TGT) ||
230336695Sdavidcs	    (qp->qp_type == ECORE_RDMA_QP_TYPE_XRC_INI))
231336695Sdavidcs		return 1;
232336695Sdavidcs
233336695Sdavidcs	return 0;
234336695Sdavidcs}
235336695Sdavidcs
236336695Sdavidcsenum _ecore_status_t ecore_rdma_info_alloc(struct ecore_hwfn *p_hwfn);
237336695Sdavidcsvoid ecore_rdma_info_free(struct ecore_hwfn *p_hwfn);
238336695Sdavidcs
239336695Sdavidcsenum _ecore_status_t
240336695Sdavidcsecore_rdma_bmap_alloc(struct ecore_hwfn *p_hwfn,
241336695Sdavidcs		      struct ecore_bmap *bmap,
242336695Sdavidcs		      u32 max_count,
243336695Sdavidcs		      char *name);
244336695Sdavidcs
245336695Sdavidcsvoid
246336695Sdavidcsecore_rdma_bmap_free(struct ecore_hwfn *p_hwfn,
247336695Sdavidcs		     struct ecore_bmap *bmap,
248336695Sdavidcs		     bool check);
249336695Sdavidcs
250336695Sdavidcsenum _ecore_status_t
251336695Sdavidcsecore_rdma_bmap_alloc_id(struct ecore_hwfn *p_hwfn,
252336695Sdavidcs			 struct ecore_bmap *bmap,
253336695Sdavidcs			 u32 *id_num);
254336695Sdavidcs
255336695Sdavidcsvoid
256336695Sdavidcsecore_bmap_set_id(struct ecore_hwfn *p_hwfn,
257336695Sdavidcs		  struct ecore_bmap *bmap,
258336695Sdavidcs		  u32 id_num);
259336695Sdavidcs
260336695Sdavidcsvoid
261336695Sdavidcsecore_bmap_release_id(struct ecore_hwfn *p_hwfn,
262336695Sdavidcs		      struct ecore_bmap *bmap,
263336695Sdavidcs		      u32 id_num);
264336695Sdavidcs
265336695Sdavidcsint
266336695Sdavidcsecore_bmap_test_id(struct ecore_hwfn *p_hwfn,
267336695Sdavidcs		   struct ecore_bmap *bmap,
268336695Sdavidcs		   u32 id_num);
269336695Sdavidcs
270336695Sdavidcsvoid
271336695Sdavidcsecore_rdma_set_fw_mac(u16 *p_fw_mac, u8 *p_ecore_mac);
272336695Sdavidcs
273336695Sdavidcsbool
274336695Sdavidcsecore_rdma_allocated_qps(struct ecore_hwfn *p_hwfn);
275336695Sdavidcs
276336695Sdavidcsu16 ecore_rdma_get_fw_srq_id(struct ecore_hwfn *p_hwfn, u16 id, bool is_xrc);
277336695Sdavidcs
278336695Sdavidcs#endif /*__ECORE_RDMA_H__*/
279336695Sdavidcs
280