1/*
2 * Copyright (c) 2018-2019 Cavium, Inc.
3 * All rights reserved.
4 *
5 *  Redistribution and use in source and binary forms, with or without
6 *  modification, are permitted provided that the following conditions
7 *  are met:
8 *
9 *  1. Redistributions of source code must retain the above copyright
10 *     notice, this list of conditions and the following disclaimer.
11 *  2. Redistributions in binary form must reproduce the above copyright
12 *     notice, this list of conditions and the following disclaimer in the
13 *     documentation and/or other materials provided with the distribution.
14 *
15 *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16 *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19 *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20 *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21 *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22 *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24 *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 *  POSSIBILITY OF SUCH DAMAGE.
26 *
27 * $FreeBSD: stable/11/sys/dev/qlnx/qlnxe/ecore_rdma.h 337517 2018-08-09 01:17:35Z davidcs $
28 */
29
30#ifndef __ECORE_RDMA_H__
31#define __ECORE_RDMA_H__
32
33#include "ecore_status.h"
34#include "ecore.h"
35#include "ecore_hsi_common.h"
36#include "ecore_proto_if.h"
37#include "ecore_rdma_api.h"
38#include "ecore_dev_api.h"
39#include "ecore_roce.h"
40#include "ecore_iwarp.h"
41
42/* Constants */
43
44/* HW/FW RoCE Limitations (internal. For external see ecore_rdma_api.h) */
45#define ECORE_RDMA_MAX_FMR                    (RDMA_MAX_TIDS) /* 2^17 - 1 */
46#define ECORE_RDMA_MAX_P_KEY                  (1)
47#define ECORE_RDMA_MAX_WQE                    (0x7FFF) /* 2^15 -1 */
48#define ECORE_RDMA_MAX_SRQ_WQE_ELEM           (0x7FFF) /* 2^15 -1 */
49#define ECORE_RDMA_PAGE_SIZE_CAPS             (0xFFFFF000) /* TODO: > 4k?! */
50#define ECORE_RDMA_ACK_DELAY                  (15) /* 131 milliseconds */
51#define ECORE_RDMA_MAX_MR_SIZE                (0x10000000000ULL) /* 2^40 */
52#define ECORE_RDMA_MAX_CQS                    (RDMA_MAX_CQS) /* 64k */
53#define ECORE_RDMA_MAX_MRS                    (RDMA_MAX_TIDS) /* 2^17 - 1 */
54/* Add 1 for header element */
55#define ECORE_RDMA_MAX_SRQ_ELEM_PER_WQE	      (RDMA_MAX_SGE_PER_RQ_WQE + 1)
56#define ECORE_RDMA_MAX_SGE_PER_SRQ_WQE	      (RDMA_MAX_SGE_PER_RQ_WQE)
57#define ECORE_RDMA_SRQ_WQE_ELEM_SIZE          (16)
58#define ECORE_RDMA_MAX_SRQS		      (32 * 1024) /* 32k */
59
60/* Configurable */
61/* Max CQE is derived from u16/32 size, halved and decremented by 1 to handle
62 * wrap properly and then decremented by 1 again. The latter decrement comes
63 * from a requirement to create a chain that is bigger than what the user
64 * requested by one:
65 * The CQE size is 32 bytes but the FW writes in chunks of 64
66 * bytes, for performance purposes. Allocating an extra entry and telling the
67 * FW we have less prevents overwriting the first entry in case of a wrap i.e.
68 * when the FW writes the last entry and the application hasn't read the first
69 * one.
70 */
71#define ECORE_RDMA_MAX_CQE_32_BIT             (0x7FFFFFFF - 1)
72#define ECORE_RDMA_MAX_CQE_16_BIT             (0x7FFF - 1)
73
74#define ECORE_RDMA_MAX_XRC_SRQS		(RDMA_MAX_XRC_SRQS)
75
76/* Up to 2^16 XRC Domains are supported, but the actual number of supported XRC
77 * SRQs is much smaller so there's no need to have that many domains.
78 */
79#define ECORE_RDMA_MAX_XRCDS	(OSAL_ROUNDUP_POW_OF_TWO(RDMA_MAX_XRC_SRQS))
80
81#define IS_IWARP(_p_hwfn) (_p_hwfn->p_rdma_info->proto == PROTOCOLID_IWARP)
82#define IS_ROCE(_p_hwfn) (_p_hwfn->p_rdma_info->proto == PROTOCOLID_ROCE)
83
84enum ecore_rdma_toggle_bit {
85	ECORE_RDMA_TOGGLE_BIT_CLEAR = 0,
86	ECORE_RDMA_TOGGLE_BIT_SET   = 1
87};
88
89/* @@@TBD Currently we support only affilited events
90   * enum ecore_rdma_unaffiliated_event_code {
91   * ECORE_RDMA_PORT_ACTIVE, // Link Up
92   * ECORE_RDMA_PORT_CHANGED, // SGID table has changed
93   * ECORE_RDMA_LOCAL_CATASTROPHIC_ERR, // Fatal device error
94   * ECORE_RDMA_PORT_ERR, // Link down
95   * };
96   */
97
98#define QEDR_MAX_BMAP_NAME	(10)
99struct ecore_bmap {
100	u32           max_count;
101	unsigned long *bitmap;
102	char name[QEDR_MAX_BMAP_NAME];
103};
104
105struct ecore_rdma_info {
106	osal_spinlock_t			lock;
107
108	struct ecore_bmap		cq_map;
109	struct ecore_bmap		pd_map;
110	struct ecore_bmap		xrcd_map;
111	struct ecore_bmap		tid_map;
112	struct ecore_bmap		srq_map;
113	struct ecore_bmap		xrc_srq_map;
114	struct ecore_bmap		qp_map;
115	struct ecore_bmap		tcp_cid_map;
116	struct ecore_bmap		cid_map;
117	struct ecore_bmap		dpi_map;
118	struct ecore_bmap		toggle_bits;
119	struct ecore_rdma_events	events;
120	struct ecore_rdma_device	*dev;
121	struct ecore_rdma_port		*port;
122	u32				last_tid;
123	u8				num_cnqs;
124	struct rdma_sent_stats          rdma_sent_pstats;
125	struct rdma_rcv_stats           rdma_rcv_tstats;
126	u32				num_qps;
127	u32				num_mrs;
128	u32				num_srqs;
129	u16				srq_id_offset;
130	u16				queue_zone_base;
131	u16				max_queue_zones;
132
133	struct ecore_rdma_glob_cfg	glob_cfg;
134
135	enum protocol_type		proto;
136	struct ecore_roce_info		roce;
137#ifdef CONFIG_ECORE_IWARP
138	struct ecore_iwarp_info		iwarp;
139#endif
140	bool				active;
141	int				ref_cnt;
142};
143
144struct cq_prod {
145	u32	req;
146	u32	resp;
147};
148
149struct ecore_rdma_qp {
150	struct regpair qp_handle;
151	struct regpair qp_handle_async;
152	u32	qpid; /* iwarp: may differ from icid */
153	u16	icid;
154	u16	qp_idx;
155	enum ecore_roce_qp_state cur_state;
156	enum ecore_rdma_qp_type qp_type;
157#ifdef CONFIG_ECORE_IWARP
158	enum ecore_iwarp_qp_state iwarp_state;
159#endif
160	bool	use_srq;
161	bool	signal_all;
162	bool	fmr_and_reserved_lkey;
163
164	bool	incoming_rdma_read_en;
165	bool	incoming_rdma_write_en;
166	bool	incoming_atomic_en;
167	bool	e2e_flow_control_en;
168
169	u16	pd;			/* Protection domain */
170	u16	pkey;			/* Primary P_key index */
171	u32	dest_qp;
172	u16	mtu;
173	u16	srq_id;
174	u8	traffic_class_tos;	/* IPv6/GRH traffic class; IPv4 TOS */
175	u8	hop_limit_ttl;		/* IPv6/GRH hop limit; IPv4 TTL */
176	u16	dpi;
177	u32	flow_label;		/* ignored in IPv4 */
178	u16	vlan_id;
179	u32	ack_timeout;
180	u8	retry_cnt;
181	u8	rnr_retry_cnt;
182	u8	min_rnr_nak_timer;
183	bool	sqd_async;
184	union ecore_gid	sgid;		/* GRH SGID; IPv4/6 Source IP */
185	union ecore_gid	dgid;		/* GRH DGID; IPv4/6 Destination IP */
186	enum roce_mode roce_mode;
187	u16	udp_src_port;		/* RoCEv2 only */
188	u8	stats_queue;
189
190	/* requeseter */
191	u8	max_rd_atomic_req;
192	u32     sq_psn;
193	u16	sq_cq_id; /* The cq to be associated with the send queue*/
194	u16	sq_num_pages;
195	dma_addr_t sq_pbl_ptr;
196	void	*orq;
197	dma_addr_t orq_phys_addr;
198	u8	orq_num_pages;
199	bool	req_offloaded;
200	bool	has_req;
201
202	/* responder */
203	u8	max_rd_atomic_resp;
204	u32     rq_psn;
205	u16	rq_cq_id; /* The cq to be associated with the receive queue */
206	u16	rq_num_pages;
207	dma_addr_t rq_pbl_ptr;
208	void	*irq;
209	dma_addr_t irq_phys_addr;
210	u8	irq_num_pages;
211	bool	resp_offloaded;
212	bool	has_resp;
213	struct cq_prod	cq_prod;
214
215	u8	remote_mac_addr[6];
216	u8	local_mac_addr[6];
217
218	void	*shared_queue;
219	dma_addr_t shared_queue_phys_addr;
220#ifdef CONFIG_ECORE_IWARP
221	struct ecore_iwarp_ep *ep;
222#endif
223
224	u16 xrcd_id;
225};
226
227static OSAL_INLINE bool ecore_rdma_is_xrc_qp(struct ecore_rdma_qp *qp)
228{
229	if ((qp->qp_type == ECORE_RDMA_QP_TYPE_XRC_TGT) ||
230	    (qp->qp_type == ECORE_RDMA_QP_TYPE_XRC_INI))
231		return 1;
232
233	return 0;
234}
235
236enum _ecore_status_t ecore_rdma_info_alloc(struct ecore_hwfn *p_hwfn);
237void ecore_rdma_info_free(struct ecore_hwfn *p_hwfn);
238
239enum _ecore_status_t
240ecore_rdma_bmap_alloc(struct ecore_hwfn *p_hwfn,
241		      struct ecore_bmap *bmap,
242		      u32 max_count,
243		      char *name);
244
245void
246ecore_rdma_bmap_free(struct ecore_hwfn *p_hwfn,
247		     struct ecore_bmap *bmap,
248		     bool check);
249
250enum _ecore_status_t
251ecore_rdma_bmap_alloc_id(struct ecore_hwfn *p_hwfn,
252			 struct ecore_bmap *bmap,
253			 u32 *id_num);
254
255void
256ecore_bmap_set_id(struct ecore_hwfn *p_hwfn,
257		  struct ecore_bmap *bmap,
258		  u32 id_num);
259
260void
261ecore_bmap_release_id(struct ecore_hwfn *p_hwfn,
262		      struct ecore_bmap *bmap,
263		      u32 id_num);
264
265int
266ecore_bmap_test_id(struct ecore_hwfn *p_hwfn,
267		   struct ecore_bmap *bmap,
268		   u32 id_num);
269
270void
271ecore_rdma_set_fw_mac(u16 *p_fw_mac, u8 *p_ecore_mac);
272
273bool
274ecore_rdma_allocated_qps(struct ecore_hwfn *p_hwfn);
275
276u16 ecore_rdma_get_fw_srq_id(struct ecore_hwfn *p_hwfn, u16 id, bool is_xrc);
277
278#endif /*__ECORE_RDMA_H__*/
279
280