1/*
2 * Copyright (c) 2018-2019 Cavium, Inc.
3 * All rights reserved.
4 *
5 *  Redistribution and use in source and binary forms, with or without
6 *  modification, are permitted provided that the following conditions
7 *  are met:
8 *
9 *  1. Redistributions of source code must retain the above copyright
10 *     notice, this list of conditions and the following disclaimer.
11 *  2. Redistributions in binary form must reproduce the above copyright
12 *     notice, this list of conditions and the following disclaimer in the
13 *     documentation and/or other materials provided with the distribution.
14 *
15 *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16 *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19 *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20 *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21 *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22 *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24 *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 *  POSSIBILITY OF SUCH DAMAGE.
26 */
27
28/*
29 * File : ecore_rdma.c
30 */
31#include <sys/cdefs.h>
32#include "bcm_osal.h"
33#include "ecore.h"
34#include "ecore_status.h"
35#include "ecore_sp_commands.h"
36#include "ecore_cxt.h"
37#include "ecore_rdma.h"
38#include "reg_addr.h"
39#include "ecore_rt_defs.h"
40#include "ecore_init_ops.h"
41#include "ecore_hw.h"
42#include "ecore_mcp.h"
43#include "ecore_init_fw_funcs.h"
44#include "ecore_int.h"
45#include "pcics_reg_driver.h"
46#include "ecore_iro.h"
47#include "ecore_gtt_reg_addr.h"
48#include "ecore_hsi_iwarp.h"
49#include "ecore_ll2.h"
50#include "ecore_ooo.h"
51#ifndef LINUX_REMOVE
52#include "ecore_tcp_ip.h"
53#endif
54
55enum _ecore_status_t ecore_rdma_bmap_alloc(struct ecore_hwfn *p_hwfn,
56					   struct ecore_bmap *bmap,
57					   u32		    max_count,
58					   char              *name)
59{
60	u32 size_in_bytes;
61
62	DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "max_count = %08x\n", max_count);
63
64	bmap->max_count = max_count;
65
66	if (!max_count) {
67		bmap->bitmap = OSAL_NULL;
68		return ECORE_SUCCESS;
69	}
70
71	size_in_bytes = sizeof(unsigned long) *
72		DIV_ROUND_UP(max_count, (sizeof(unsigned long) * 8));
73
74	bmap->bitmap = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, size_in_bytes);
75	if (!bmap->bitmap)
76	{
77		DP_NOTICE(p_hwfn, false,
78			  "ecore bmap alloc failed: cannot allocate memory (bitmap). rc = %d\n",
79			  ECORE_NOMEM);
80		return ECORE_NOMEM;
81	}
82
83	OSAL_SNPRINTF(bmap->name, QEDR_MAX_BMAP_NAME, "%s", name);
84
85	DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "ECORE_SUCCESS\n");
86	return ECORE_SUCCESS;
87}
88
89enum _ecore_status_t ecore_rdma_bmap_alloc_id(struct ecore_hwfn *p_hwfn,
90					      struct ecore_bmap *bmap,
91					      u32	       *id_num)
92{
93	*id_num = OSAL_FIND_FIRST_ZERO_BIT(bmap->bitmap, bmap->max_count);
94	if (*id_num >= bmap->max_count)
95		return ECORE_INVAL;
96
97	OSAL_SET_BIT(*id_num, bmap->bitmap);
98
99	DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "%s bitmap: allocated id %d\n",
100		   bmap->name, *id_num);
101
102	return ECORE_SUCCESS;
103}
104
105void ecore_bmap_set_id(struct ecore_hwfn *p_hwfn,
106		       struct ecore_bmap *bmap,
107		       u32		id_num)
108{
109	if (id_num >= bmap->max_count) {
110		DP_NOTICE(p_hwfn, true,
111			  "%s bitmap: cannot set id %d max is %d\n",
112			  bmap->name, id_num, bmap->max_count);
113
114		return;
115	}
116
117	OSAL_SET_BIT(id_num, bmap->bitmap);
118
119	DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "%s bitmap: set id %d\n",
120		   bmap->name, id_num);
121}
122
123void ecore_bmap_release_id(struct ecore_hwfn *p_hwfn,
124			   struct ecore_bmap *bmap,
125			   u32		    id_num)
126{
127	bool b_acquired;
128
129	if (id_num >= bmap->max_count)
130		return;
131
132	b_acquired = OSAL_TEST_AND_CLEAR_BIT(id_num, bmap->bitmap);
133	if (!b_acquired)
134	{
135		DP_NOTICE(p_hwfn, false, "%s bitmap: id %d already released\n",
136			  bmap->name, id_num);
137		return;
138	}
139
140	DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "%s bitmap: released id %d\n",
141		   bmap->name, id_num);
142}
143
144int ecore_bmap_test_id(struct ecore_hwfn *p_hwfn,
145		       struct ecore_bmap *bmap,
146		       u32		  id_num)
147{
148	if (id_num >= bmap->max_count) {
149		DP_NOTICE(p_hwfn, true,
150			  "%s bitmap: id %d too high. max is %d\n",
151			  bmap->name, id_num, bmap->max_count);
152		return -1;
153	}
154
155	DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "%s bitmap: tested id %d\n",
156		   bmap->name, id_num);
157
158	return OSAL_TEST_BIT(id_num, bmap->bitmap);
159}
160
161static bool ecore_bmap_is_empty(struct ecore_bmap *bmap)
162{
163	return (bmap->max_count ==
164		OSAL_FIND_FIRST_BIT(bmap->bitmap, bmap->max_count));
165}
166
167#ifndef LINUX_REMOVE
168u32 ecore_rdma_get_sb_id(struct ecore_hwfn *p_hwfn, u32 rel_sb_id)
169{
170	/* first sb id for RoCE is after all the l2 sb */
171	return FEAT_NUM(p_hwfn, ECORE_PF_L2_QUE) + rel_sb_id;
172}
173
174u32 ecore_rdma_query_cau_timer_res(void)
175{
176	return ECORE_CAU_DEF_RX_TIMER_RES;
177}
178#endif
179
180enum _ecore_status_t ecore_rdma_info_alloc(struct ecore_hwfn    *p_hwfn)
181{
182	struct ecore_rdma_info *p_rdma_info;
183
184	p_rdma_info = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_rdma_info));
185	if (!p_rdma_info) {
186		DP_NOTICE(p_hwfn, false,
187			  "ecore rdma alloc failed: cannot allocate memory (rdma info).\n");
188		return ECORE_NOMEM;
189	}
190	p_hwfn->p_rdma_info = p_rdma_info;
191
192#ifdef CONFIG_ECORE_LOCK_ALLOC
193	if (OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_rdma_info->lock)) {
194		ecore_rdma_info_free(p_hwfn);
195		return ECORE_NOMEM;
196	}
197#endif
198	OSAL_SPIN_LOCK_INIT(&p_rdma_info->lock);
199
200	return ECORE_SUCCESS;
201}
202
203void ecore_rdma_info_free(struct ecore_hwfn *p_hwfn)
204{
205#ifdef CONFIG_ECORE_LOCK_ALLOC
206	OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->p_rdma_info->lock);
207#endif
208	OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_rdma_info);
209	p_hwfn->p_rdma_info = OSAL_NULL;
210}
211
212static enum _ecore_status_t ecore_rdma_inc_ref_cnt(struct ecore_hwfn *p_hwfn)
213{
214	enum _ecore_status_t rc = ECORE_INVAL;
215
216	OSAL_SPIN_LOCK(&p_hwfn->p_rdma_info->lock);
217	if (p_hwfn->p_rdma_info->active) {
218		p_hwfn->p_rdma_info->ref_cnt++;
219		rc = ECORE_SUCCESS;
220	} else {
221		DP_INFO(p_hwfn, "Ref cnt requested for inactive rdma\n");
222	}
223	OSAL_SPIN_UNLOCK(&p_hwfn->p_rdma_info->lock);
224	return rc;
225}
226
227static void ecore_rdma_dec_ref_cnt(struct ecore_hwfn *p_hwfn)
228{
229	OSAL_SPIN_LOCK(&p_hwfn->p_rdma_info->lock);
230	p_hwfn->p_rdma_info->ref_cnt--;
231	OSAL_SPIN_UNLOCK(&p_hwfn->p_rdma_info->lock);
232}
233
234static void ecore_rdma_activate(struct ecore_hwfn *p_hwfn)
235{
236	OSAL_SPIN_LOCK(&p_hwfn->p_rdma_info->lock);
237	p_hwfn->p_rdma_info->active = true;
238	OSAL_SPIN_UNLOCK(&p_hwfn->p_rdma_info->lock);
239}
240
241/* Part of deactivating rdma is letting all the relevant flows complete before
242 * we start shutting down: Currently query-stats which can be called from MCP
243 * context.
244 */
245/* The longest time it can take a rdma flow to complete */
246#define ECORE_RDMA_MAX_FLOW_TIME (100)
247static enum _ecore_status_t ecore_rdma_deactivate(struct ecore_hwfn *p_hwfn)
248{
249	int wait_count;
250
251	OSAL_SPIN_LOCK(&p_hwfn->p_rdma_info->lock);
252	p_hwfn->p_rdma_info->active = false;
253	OSAL_SPIN_UNLOCK(&p_hwfn->p_rdma_info->lock);
254
255	/* We'll give each flow it's time to complete... */
256	wait_count = p_hwfn->p_rdma_info->ref_cnt;
257
258	while (p_hwfn->p_rdma_info->ref_cnt) {
259		OSAL_MSLEEP(ECORE_RDMA_MAX_FLOW_TIME);
260		if (--wait_count == 0) {
261			DP_NOTICE(p_hwfn, false,
262				  "Timeout on refcnt=%d\n",
263				  p_hwfn->p_rdma_info->ref_cnt);
264			return ECORE_TIMEOUT;
265		}
266	}
267	return ECORE_SUCCESS;
268}
269
270static enum _ecore_status_t ecore_rdma_alloc(struct ecore_hwfn *p_hwfn)
271{
272	struct ecore_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
273	u32 num_cons, num_tasks;
274	enum _ecore_status_t rc;
275
276	DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "Allocating RDMA\n");
277
278	if (!p_rdma_info)
279		return ECORE_INVAL;
280
281	if (p_hwfn->hw_info.personality == ECORE_PCI_ETH_IWARP)
282		p_rdma_info->proto = PROTOCOLID_IWARP;
283	else
284		p_rdma_info->proto = PROTOCOLID_ROCE;
285
286	num_cons = ecore_cxt_get_proto_cid_count(p_hwfn, p_rdma_info->proto,
287						 OSAL_NULL);
288
289	if (IS_IWARP(p_hwfn))
290		p_rdma_info->num_qps = num_cons;
291	else
292		p_rdma_info->num_qps = num_cons / 2;
293
294	/* INTERNAL: RoCE & iWARP use the same taskid */
295	num_tasks = ecore_cxt_get_proto_tid_count(p_hwfn, PROTOCOLID_ROCE);
296
297	/* Each MR uses a single task */
298	p_rdma_info->num_mrs = num_tasks;
299
300	/* Queue zone lines are shared between RoCE and L2 in such a way that
301	 * they can be used by each without obstructing the other.
302	 */
303	p_rdma_info->queue_zone_base = (u16) RESC_START(p_hwfn, ECORE_L2_QUEUE);
304	p_rdma_info->max_queue_zones = (u16) RESC_NUM(p_hwfn, ECORE_L2_QUEUE);
305
306	/* Allocate a struct with device params and fill it */
307	p_rdma_info->dev = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_rdma_info->dev));
308	if (!p_rdma_info->dev)
309	{
310		rc = ECORE_NOMEM;
311		DP_NOTICE(p_hwfn, false,
312			  "ecore rdma alloc failed: cannot allocate memory (rdma info dev). rc = %d\n",
313			  rc);
314		return rc;
315	}
316
317	/* Allocate a struct with port params and fill it */
318	p_rdma_info->port = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_rdma_info->port));
319	if (!p_rdma_info->port)
320	{
321		DP_NOTICE(p_hwfn, false,
322			  "ecore rdma alloc failed: cannot allocate memory (rdma info port)\n");
323		return ECORE_NOMEM;
324	}
325
326	/* Allocate bit map for pd's */
327	rc = ecore_rdma_bmap_alloc(p_hwfn, &p_rdma_info->pd_map, RDMA_MAX_PDS,
328				   "PD");
329	if (rc != ECORE_SUCCESS)
330	{
331		DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA,
332			   "Failed to allocate pd_map,rc = %d\n",
333			   rc);
334		return rc;
335	}
336
337	/* Allocate bit map for XRC Domains */
338	rc = ecore_rdma_bmap_alloc(p_hwfn, &p_rdma_info->xrcd_map,
339				   ECORE_RDMA_MAX_XRCDS, "XRCD");
340	if (rc != ECORE_SUCCESS)
341	{
342		DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA,
343			   "Failed to allocate xrcd_map,rc = %d\n",
344			   rc);
345		return rc;
346	}
347
348	/* Allocate DPI bitmap */
349	rc = ecore_rdma_bmap_alloc(p_hwfn, &p_rdma_info->dpi_map,
350				   p_hwfn->dpi_count, "DPI");
351	if (rc != ECORE_SUCCESS)
352	{
353		DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA,
354			   "Failed to allocate DPI bitmap, rc = %d\n", rc);
355		return rc;
356	}
357
358	/* Allocate bitmap for cq's. The maximum number of CQs is bounded to
359	 * twice the number of QPs.
360	 */
361	rc = ecore_rdma_bmap_alloc(p_hwfn, &p_rdma_info->cq_map,
362				   num_cons, "CQ");
363	if (rc != ECORE_SUCCESS)
364	{
365		DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA,
366			   "Failed to allocate cq bitmap, rc = %d\n", rc);
367		return rc;
368	}
369
370	/* Allocate bitmap for toggle bit for cq icids
371	 * We toggle the bit every time we create or resize cq for a given icid.
372	 * The maximum number of CQs is bounded to the number of connections we
373	 * support. (num_qps in iWARP or num_qps/2 in RoCE).
374	 */
375	rc = ecore_rdma_bmap_alloc(p_hwfn, &p_rdma_info->toggle_bits,
376				   num_cons, "Toggle");
377	if (rc != ECORE_SUCCESS)
378	{
379		DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA,
380			   "Failed to allocate toogle bits, rc = %d\n", rc);
381		return rc;
382	}
383
384	/* Allocate bitmap for itids */
385	rc = ecore_rdma_bmap_alloc(p_hwfn, &p_rdma_info->tid_map,
386				   p_rdma_info->num_mrs, "MR");
387	if (rc != ECORE_SUCCESS)
388	{
389		DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA,
390			   "Failed to allocate itids bitmaps, rc = %d\n", rc);
391		return rc;
392	}
393
394	/* Allocate bitmap for qps. */
395	rc = ecore_rdma_bmap_alloc(p_hwfn, &p_rdma_info->qp_map,
396				   p_rdma_info->num_qps, "QP");
397	if (rc != ECORE_SUCCESS)
398	{
399		DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA,
400			   "Failed to allocate qp bitmap, rc = %d\n", rc);
401		return rc;
402	}
403
404	/* Allocate bitmap for cids used for responders/requesters. */
405	rc = ecore_rdma_bmap_alloc(p_hwfn, &p_rdma_info->cid_map, num_cons,
406				   "REAL CID");
407	if (rc != ECORE_SUCCESS)
408	{
409		DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA,
410			   "Failed to allocate cid bitmap, rc = %d\n", rc);
411		return rc;
412	}
413
414	/* The first SRQ follows the last XRC SRQ. This means that the
415	 * SRQ IDs start from an offset equals to max_xrc_srqs.
416	 */
417	p_rdma_info->srq_id_offset = (u16)ecore_cxt_get_xrc_srq_count(p_hwfn);
418	rc = ecore_rdma_bmap_alloc(p_hwfn, &p_rdma_info->xrc_srq_map,
419				   p_rdma_info->srq_id_offset, "XRC SRQ");
420	if (rc != ECORE_SUCCESS) {
421		DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA,
422			   "Failed to allocate xrc srq bitmap, rc = %d\n", rc);
423		return rc;
424	}
425
426	/* Allocate bitmap for srqs */
427	p_rdma_info->num_srqs = ecore_cxt_get_srq_count(p_hwfn);
428	rc = ecore_rdma_bmap_alloc(p_hwfn, &p_rdma_info->srq_map,
429				   p_rdma_info->num_srqs,
430				   "SRQ");
431	if (rc != ECORE_SUCCESS) {
432		DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA,
433			   "Failed to allocate srq bitmap, rc = %d\n", rc);
434
435		return rc;
436	}
437
438	if (IS_IWARP(p_hwfn))
439		rc = ecore_iwarp_alloc(p_hwfn);
440
441	DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "rc = %d\n", rc);
442
443	return rc;
444}
445
446void ecore_rdma_bmap_free(struct ecore_hwfn *p_hwfn,
447			  struct ecore_bmap *bmap,
448			  bool check)
449{
450	int weight, line, item, last_line, last_item;
451	u64 *pmap;
452
453	if (!bmap || !bmap->bitmap)
454		return;
455
456	if (!check)
457		goto end;
458
459	weight = OSAL_BITMAP_WEIGHT(bmap->bitmap, bmap->max_count);
460	if (!weight)
461		goto end;
462
463	DP_NOTICE(p_hwfn, false,
464		  "%s bitmap not free - size=%d, weight=%d, 512 bits per line\n",
465		  bmap->name, bmap->max_count, weight);
466
467	pmap = (u64 *)bmap->bitmap;
468	last_line = bmap->max_count / (64*8);
469	last_item = last_line * 8 + (((bmap->max_count % (64*8)) + 63) / 64);
470
471	/* print aligned non-zero lines, if any */
472	for (item = 0, line = 0; line < last_line; line++, item += 8) {
473		if (OSAL_BITMAP_WEIGHT((unsigned long *)&pmap[item], 64*8))
474			DP_NOTICE(p_hwfn, false,
475				  "line 0x%04x: 0x%016llx 0x%016llx 0x%016llx 0x%016llx 0x%016llx 0x%016llx 0x%016llx 0x%016llx\n",
476				  line, (unsigned long long)pmap[item],
477				(unsigned long long)pmap[item+1],
478				(unsigned long long)pmap[item+2],
479				  (unsigned long long)pmap[item+3],
480				(unsigned long long)pmap[item+4],
481				(unsigned long long)pmap[item+5],
482				  (unsigned long long)pmap[item+6],
483				(unsigned long long)pmap[item+7]);
484	}
485
486	/* print last unaligned non-zero line, if any */
487	if ((bmap->max_count % (64*8)) &&
488	    (OSAL_BITMAP_WEIGHT((unsigned long *)&pmap[item],
489				bmap->max_count-item*64))) {
490		u8 str_last_line[200] = { 0 };
491		int  offset;
492
493		offset = OSAL_SPRINTF(str_last_line, "line 0x%04x: ", line);
494		for (; item < last_item; item++) {
495			offset += OSAL_SPRINTF(str_last_line+offset,
496					       "0x%016llx ",
497				(unsigned long long)pmap[item]);
498		}
499		DP_NOTICE(p_hwfn, false, "%s\n", str_last_line);
500	}
501
502end:
503	OSAL_FREE(p_hwfn->p_dev, bmap->bitmap);
504	bmap->bitmap = OSAL_NULL;
505}
506
507void ecore_rdma_resc_free(struct ecore_hwfn *p_hwfn)
508{
509	if (IS_IWARP(p_hwfn))
510		ecore_iwarp_resc_free(p_hwfn);
511
512	ecore_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->cid_map, 1);
513	ecore_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->qp_map, 1);
514	ecore_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->pd_map, 1);
515	ecore_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->xrcd_map, 1);
516	ecore_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->dpi_map, 1);
517	ecore_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->cq_map, 1);
518	ecore_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->toggle_bits, 0);
519	ecore_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->tid_map, 1);
520	ecore_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->srq_map, 1);
521	ecore_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->xrc_srq_map, 1);
522
523	OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_rdma_info->port);
524	p_hwfn->p_rdma_info->port = OSAL_NULL;
525
526	OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_rdma_info->dev);
527	p_hwfn->p_rdma_info->dev = OSAL_NULL;
528}
529
530static OSAL_INLINE void ecore_rdma_free_reserved_lkey(struct ecore_hwfn *p_hwfn)
531{
532	ecore_rdma_free_tid(p_hwfn, p_hwfn->p_rdma_info->dev->reserved_lkey);
533}
534
535static void ecore_rdma_free_ilt(struct ecore_hwfn *p_hwfn)
536{
537	/* Free Connection CXT */
538	ecore_cxt_free_ilt_range(
539		p_hwfn, ECORE_ELEM_CXT,
540		ecore_cxt_get_proto_cid_start(p_hwfn,
541					      p_hwfn->p_rdma_info->proto),
542		ecore_cxt_get_proto_cid_count(p_hwfn,
543					      p_hwfn->p_rdma_info->proto,
544					      OSAL_NULL));
545
546	/* Free Task CXT ( Intentionally RoCE as task-id is shared between
547	 * RoCE and iWARP
548	 */
549	ecore_cxt_free_ilt_range(p_hwfn, ECORE_ELEM_TASK, 0,
550				 ecore_cxt_get_proto_tid_count(
551					 p_hwfn, PROTOCOLID_ROCE));
552
553	/* Free TSDM CXT */
554	ecore_cxt_free_ilt_range(p_hwfn, ECORE_ELEM_SRQ, 0,
555				 ecore_cxt_get_srq_count(p_hwfn));
556}
557
558static void ecore_rdma_free(struct ecore_hwfn *p_hwfn)
559{
560	DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "\n");
561
562	ecore_rdma_free_reserved_lkey(p_hwfn);
563
564	ecore_rdma_resc_free(p_hwfn);
565
566	ecore_rdma_free_ilt(p_hwfn);
567}
568
569static void ecore_rdma_get_guid(struct ecore_hwfn *p_hwfn, u8 *guid)
570{
571	u8 mac_addr[6];
572
573	OSAL_MEMCPY(&mac_addr[0], &p_hwfn->hw_info.hw_mac_addr[0], ETH_ALEN);
574	guid[0] = mac_addr[0] ^ 2;
575	guid[1] = mac_addr[1];
576	guid[2] = mac_addr[2];
577	guid[3] = 0xff;
578	guid[4] = 0xfe;
579	guid[5] = mac_addr[3];
580	guid[6] = mac_addr[4];
581	guid[7] = mac_addr[5];
582}
583
584static void ecore_rdma_init_events(
585	struct ecore_hwfn *p_hwfn,
586	struct ecore_rdma_start_in_params *params)
587{
588	struct ecore_rdma_events *events;
589
590	events = &p_hwfn->p_rdma_info->events;
591
592	events->unaffiliated_event = params->events->unaffiliated_event;
593	events->affiliated_event = params->events->affiliated_event;
594	events->context = params->events->context;
595}
596
597static void ecore_rdma_init_devinfo(
598	struct ecore_hwfn *p_hwfn,
599	struct ecore_rdma_start_in_params *params)
600{
601	struct ecore_rdma_device *dev = p_hwfn->p_rdma_info->dev;
602	u32 pci_status_control;
603
604	/* Vendor specific information */
605	dev->vendor_id = p_hwfn->p_dev->vendor_id;
606	dev->vendor_part_id = p_hwfn->p_dev->device_id;
607	dev->hw_ver = 0;
608	dev->fw_ver = STORM_FW_VERSION;
609
610	ecore_rdma_get_guid(p_hwfn, (u8 *)(&dev->sys_image_guid));
611	dev->node_guid = dev->sys_image_guid;
612
613	dev->max_sge = OSAL_MIN_T(u32, RDMA_MAX_SGE_PER_SQ_WQE,
614				  RDMA_MAX_SGE_PER_RQ_WQE);
615
616	if (p_hwfn->p_dev->rdma_max_sge) {
617		dev->max_sge = OSAL_MIN_T(u32,
618				     p_hwfn->p_dev->rdma_max_sge,
619				     dev->max_sge);
620	}
621
622	/* Set these values according to configuration
623	 * MAX SGE for SRQ is not defined by FW for now
624	 * define it in driver.
625	 * TODO: Get this value from FW.
626	 */
627	dev->max_srq_sge = ECORE_RDMA_MAX_SGE_PER_SRQ_WQE;
628	if (p_hwfn->p_dev->rdma_max_srq_sge) {
629		dev->max_srq_sge = OSAL_MIN_T(u32,
630				     p_hwfn->p_dev->rdma_max_srq_sge,
631				     dev->max_srq_sge);
632	}
633
634	dev->max_inline = ROCE_REQ_MAX_INLINE_DATA_SIZE;
635	dev->max_inline = (p_hwfn->p_dev->rdma_max_inline) ?
636		OSAL_MIN_T(u32,
637			   p_hwfn->p_dev->rdma_max_inline,
638			   dev->max_inline) :
639			dev->max_inline;
640
641	dev->max_wqe = ECORE_RDMA_MAX_WQE;
642	dev->max_cnq = (u8)FEAT_NUM(p_hwfn, ECORE_RDMA_CNQ);
643
644	/* The number of QPs may be higher than ECORE_ROCE_MAX_QPS. because
645	 * it is up-aligned to 16 and then to ILT page size within ecore cxt.
646	 * This is OK in terms of ILT but we don't want to configure the FW
647	 * above its abilities
648	 */
649	dev->max_qp = OSAL_MIN_T(u64, ROCE_MAX_QPS,
650			     p_hwfn->p_rdma_info->num_qps);
651
652	/* CQs uses the same icids that QPs use hence they are limited by the
653	 * number of icids. There are two icids per QP.
654	 */
655	dev->max_cq = dev->max_qp * 2;
656
657	/* The number of mrs is smaller by 1 since the first is reserved */
658	dev->max_mr = p_hwfn->p_rdma_info->num_mrs - 1;
659	dev->max_mr_size = ECORE_RDMA_MAX_MR_SIZE;
660	/* The maximum CQE capacity per CQ supported */
661	/* max number of cqes will be in two layer pbl,
662	 * 8 is the pointer size in bytes
663	 * 32 is the size of cq element in bytes
664	 */
665	if (params->roce.cq_mode == ECORE_RDMA_CQ_MODE_32_BITS)
666		dev->max_cqe = ECORE_RDMA_MAX_CQE_32_BIT;
667	else
668		dev->max_cqe = ECORE_RDMA_MAX_CQE_16_BIT;
669
670	dev->max_mw = 0;
671	dev->max_fmr = ECORE_RDMA_MAX_FMR;
672	dev->max_mr_mw_fmr_pbl = (OSAL_PAGE_SIZE/8) * (OSAL_PAGE_SIZE/8);
673	dev->max_mr_mw_fmr_size = dev->max_mr_mw_fmr_pbl * OSAL_PAGE_SIZE;
674	dev->max_pkey = ECORE_RDMA_MAX_P_KEY;
675	/* Right now we dont take any parameters from user
676	 * So assign predefined max_srq to num_srqs.
677	 */
678	dev->max_srq = p_hwfn->p_rdma_info->num_srqs;
679
680	/* SRQ WQE size */
681	dev->max_srq_wr = ECORE_RDMA_MAX_SRQ_WQE_ELEM;
682
683	dev->max_qp_resp_rd_atomic_resc = RDMA_RING_PAGE_SIZE /
684					  (RDMA_RESP_RD_ATOMIC_ELM_SIZE*2);
685	dev->max_qp_req_rd_atomic_resc = RDMA_RING_PAGE_SIZE /
686					 RDMA_REQ_RD_ATOMIC_ELM_SIZE;
687
688	dev->max_dev_resp_rd_atomic_resc =
689		dev->max_qp_resp_rd_atomic_resc * p_hwfn->p_rdma_info->num_qps;
690	dev->page_size_caps = ECORE_RDMA_PAGE_SIZE_CAPS;
691	dev->dev_ack_delay = ECORE_RDMA_ACK_DELAY;
692	dev->max_pd = RDMA_MAX_PDS;
693	dev->max_ah = dev->max_qp;
694	dev->max_stats_queues = (u8)RESC_NUM(p_hwfn, ECORE_RDMA_STATS_QUEUE);
695
696	/* Set capablities */
697	dev->dev_caps = 0;
698	SET_FIELD(dev->dev_caps, ECORE_RDMA_DEV_CAP_RNR_NAK, 1);
699	SET_FIELD(dev->dev_caps, ECORE_RDMA_DEV_CAP_PORT_ACTIVE_EVENT, 1);
700	SET_FIELD(dev->dev_caps, ECORE_RDMA_DEV_CAP_PORT_CHANGE_EVENT, 1);
701	SET_FIELD(dev->dev_caps, ECORE_RDMA_DEV_CAP_RESIZE_CQ, 1);
702	SET_FIELD(dev->dev_caps, ECORE_RDMA_DEV_CAP_BASE_MEMORY_EXT, 1);
703	SET_FIELD(dev->dev_caps, ECORE_RDMA_DEV_CAP_BASE_QUEUE_EXT, 1);
704	SET_FIELD(dev->dev_caps, ECORE_RDMA_DEV_CAP_ZBVA, 1);
705	SET_FIELD(dev->dev_caps, ECORE_RDMA_DEV_CAP_LOCAL_INV_FENCE, 1);
706
707	/* Check atomic operations support in PCI configuration space. */
708	OSAL_PCI_READ_CONFIG_DWORD(p_hwfn->p_dev,
709				   PCICFG_DEVICE_STATUS_CONTROL_2,
710				   &pci_status_control);
711
712	if (pci_status_control &
713	    PCICFG_DEVICE_STATUS_CONTROL_2_ATOMIC_REQ_ENABLE)
714		SET_FIELD(dev->dev_caps, ECORE_RDMA_DEV_CAP_ATOMIC_OP, 1);
715
716	if (IS_IWARP(p_hwfn))
717		ecore_iwarp_init_devinfo(p_hwfn);
718}
719
720static void ecore_rdma_init_port(
721	struct ecore_hwfn *p_hwfn)
722{
723	struct ecore_rdma_port *port = p_hwfn->p_rdma_info->port;
724	struct ecore_rdma_device *dev = p_hwfn->p_rdma_info->dev;
725
726	port->port_state = p_hwfn->mcp_info->link_output.link_up ?
727		ECORE_RDMA_PORT_UP : ECORE_RDMA_PORT_DOWN;
728
729	port->max_msg_size = OSAL_MIN_T(u64,
730				   (dev->max_mr_mw_fmr_size *
731				    p_hwfn->p_dev->rdma_max_sge),
732					((u64)1 << 31));
733
734	port->pkey_bad_counter = 0;
735}
736
737static enum _ecore_status_t ecore_rdma_init_hw(
738	struct ecore_hwfn *p_hwfn,
739	struct ecore_ptt *p_ptt)
740{
741	u32 ll2_ethertype_en;
742
743	DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "Initializing HW\n");
744	p_hwfn->b_rdma_enabled_in_prs = false;
745
746	if (IS_IWARP(p_hwfn))
747		return ecore_iwarp_init_hw(p_hwfn, p_ptt);
748
749	ecore_wr(p_hwfn,
750		 p_ptt,
751		 PRS_REG_ROCE_DEST_QP_MAX_PF,
752		 0);
753
754	p_hwfn->rdma_prs_search_reg = PRS_REG_SEARCH_ROCE;
755
756	/* We delay writing to this reg until first cid is allocated. See
757	 * ecore_cxt_dynamic_ilt_alloc function for more details
758	 */
759
760	ll2_ethertype_en = ecore_rd(p_hwfn,
761			     p_ptt,
762			     PRS_REG_LIGHT_L2_ETHERTYPE_EN);
763	ecore_wr(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN,
764		 (ll2_ethertype_en | 0x01));
765
766#ifndef REAL_ASIC_ONLY
767	if (ECORE_IS_BB_A0(p_hwfn->p_dev) && ECORE_IS_CMT(p_hwfn->p_dev)) {
768		ecore_wr(p_hwfn,
769			 p_ptt,
770			 NIG_REG_LLH_ENG_CLS_ENG_ID_TBL,
771			 0);
772		ecore_wr(p_hwfn,
773			 p_ptt,
774			 NIG_REG_LLH_ENG_CLS_ENG_ID_TBL + 4,
775			 0);
776	}
777#endif
778
779	if (ecore_cxt_get_proto_cid_start(p_hwfn, PROTOCOLID_ROCE) % 2)
780	{
781		DP_NOTICE(p_hwfn,
782			  true,
783			  "The first RoCE's cid should be even\n");
784		return ECORE_UNKNOWN_ERROR;
785	}
786
787	DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "Initializing HW - Done\n");
788	return ECORE_SUCCESS;
789}
790
791static enum _ecore_status_t
792ecore_rdma_start_fw(struct ecore_hwfn *p_hwfn,
793#ifdef CONFIG_DCQCN
794		    struct ecore_ptt *p_ptt,
795#else
796		    struct ecore_ptt OSAL_UNUSED *p_ptt,
797#endif
798		    struct ecore_rdma_start_in_params *params)
799{
800	struct rdma_init_func_ramrod_data *p_ramrod;
801	struct rdma_init_func_hdr *pheader;
802	struct ecore_rdma_info *p_rdma_info;
803	struct ecore_sp_init_data init_data;
804	struct ecore_spq_entry *p_ent;
805	u16 igu_sb_id, sb_id;
806	u8 ll2_queue_id;
807	u32 cnq_id;
808	enum _ecore_status_t rc;
809
810	DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "Starting FW\n");
811
812	p_rdma_info = p_hwfn->p_rdma_info;
813
814	/* Save the number of cnqs for the function close ramrod */
815	p_rdma_info->num_cnqs = params->desired_cnq;
816
817	/* Get SPQ entry */
818	OSAL_MEMSET(&init_data, 0, sizeof(init_data));
819	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
820	init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
821
822	rc = ecore_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_FUNC_INIT,
823				   p_rdma_info->proto, &init_data);
824	if (rc != ECORE_SUCCESS)
825		return rc;
826
827	if (IS_IWARP(p_hwfn)) {
828		ecore_iwarp_init_fw_ramrod(p_hwfn,
829					   &p_ent->ramrod.iwarp_init_func);
830		p_ramrod = &p_ent->ramrod.iwarp_init_func.rdma;
831	} else {
832#ifdef CONFIG_DCQCN
833		rc = ecore_roce_dcqcn_cfg(p_hwfn, &params->roce.dcqcn_params,
834					  &p_ent->ramrod.roce_init_func, p_ptt);
835		if (rc != ECORE_SUCCESS) {
836			DP_NOTICE(p_hwfn, false,
837				  "Failed to configure DCQCN. rc = %d.\n", rc);
838			return rc;
839		}
840#endif
841		p_ramrod = &p_ent->ramrod.roce_init_func.rdma;
842
843		/* The ll2_queue_id is used only for UD QPs */
844		ll2_queue_id = ecore_ll2_handle_to_queue_id(
845			p_hwfn, params->roce.ll2_handle);
846		p_ent->ramrod.roce_init_func.roce.ll2_queue_id = ll2_queue_id;
847	}
848
849	pheader = &p_ramrod->params_header;
850	pheader->cnq_start_offset = (u8)RESC_START(p_hwfn, ECORE_RDMA_CNQ_RAM);
851	pheader->num_cnqs = params->desired_cnq;
852
853	/* The first SRQ ILT page is used for XRC SRQs and all the following
854	 * pages contain regular SRQs. Hence the first regular SRQ ID is the
855	 * maximum number XRC SRQs.
856	 */
857	pheader->first_reg_srq_id = p_rdma_info->srq_id_offset;
858	pheader->reg_srq_base_addr =
859		ecore_cxt_get_ilt_page_size(p_hwfn, ILT_CLI_TSDM);
860
861	if (params->roce.cq_mode == ECORE_RDMA_CQ_MODE_16_BITS)
862		pheader->cq_ring_mode = 1; /* 1=16 bits */
863	else
864		pheader->cq_ring_mode = 0; /* 0=32 bits */
865
866	for (cnq_id = 0; cnq_id < params->desired_cnq; cnq_id++)
867	{
868		sb_id = (u16)OSAL_GET_RDMA_SB_ID(p_hwfn, cnq_id);
869		igu_sb_id = ecore_get_igu_sb_id(p_hwfn, sb_id);
870		p_ramrod->cnq_params[cnq_id].sb_num =
871			OSAL_CPU_TO_LE16(igu_sb_id);
872
873		p_ramrod->cnq_params[cnq_id].sb_index =
874			p_hwfn->pf_params.rdma_pf_params.gl_pi;
875
876		p_ramrod->cnq_params[cnq_id].num_pbl_pages =
877			params->cnq_pbl_list[cnq_id].num_pbl_pages;
878
879		p_ramrod->cnq_params[cnq_id].pbl_base_addr.hi =
880			DMA_HI_LE(params->cnq_pbl_list[cnq_id].pbl_ptr);
881		p_ramrod->cnq_params[cnq_id].pbl_base_addr.lo =
882			DMA_LO_LE(params->cnq_pbl_list[cnq_id].pbl_ptr);
883
884		/* we arbitrarily decide that cnq_id will be as qz_offset */
885		p_ramrod->cnq_params[cnq_id].queue_zone_num =
886			OSAL_CPU_TO_LE16(p_rdma_info->queue_zone_base + cnq_id);
887	}
888
889	rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
890
891	return rc;
892}
893
894enum _ecore_status_t ecore_rdma_alloc_tid(void	*rdma_cxt,
895					  u32	*itid)
896{
897	struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)rdma_cxt;
898	enum _ecore_status_t rc;
899
900	DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "Allocate TID\n");
901
902	OSAL_SPIN_LOCK(&p_hwfn->p_rdma_info->lock);
903	rc = ecore_rdma_bmap_alloc_id(p_hwfn,
904				      &p_hwfn->p_rdma_info->tid_map,
905				      itid);
906	OSAL_SPIN_UNLOCK(&p_hwfn->p_rdma_info->lock);
907	if (rc != ECORE_SUCCESS) {
908		DP_NOTICE(p_hwfn, false, "Failed in allocating tid\n");
909		goto out;
910	}
911
912	rc = ecore_cxt_dynamic_ilt_alloc(p_hwfn, ECORE_ELEM_TASK, *itid);
913out:
914	DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "Allocate TID - done, rc = %d\n", rc);
915	return rc;
916}
917
918static OSAL_INLINE enum _ecore_status_t ecore_rdma_reserve_lkey(
919		struct ecore_hwfn *p_hwfn)
920{
921	struct ecore_rdma_device *dev = p_hwfn->p_rdma_info->dev;
922
923	/* Tid 0 will be used as the key for "reserved MR".
924	 * The driver should allocate memory for it so it can be loaded but no
925	 * ramrod should be passed on it.
926	 */
927	ecore_rdma_alloc_tid(p_hwfn, &dev->reserved_lkey);
928	if (dev->reserved_lkey != RDMA_RESERVED_LKEY)
929	{
930		DP_NOTICE(p_hwfn, true,
931			  "Reserved lkey should be equal to RDMA_RESERVED_LKEY\n");
932		return ECORE_INVAL;
933	}
934
935	return ECORE_SUCCESS;
936}
937
938static enum _ecore_status_t ecore_rdma_setup(struct ecore_hwfn    *p_hwfn,
939				struct ecore_ptt                  *p_ptt,
940				struct ecore_rdma_start_in_params *params)
941{
942	enum _ecore_status_t rc = 0;
943
944	DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "RDMA setup\n");
945
946	ecore_rdma_init_devinfo(p_hwfn, params);
947	ecore_rdma_init_port(p_hwfn);
948	ecore_rdma_init_events(p_hwfn, params);
949
950	rc = ecore_rdma_reserve_lkey(p_hwfn);
951	if (rc != ECORE_SUCCESS)
952		return rc;
953
954	rc = ecore_rdma_init_hw(p_hwfn, p_ptt);
955	if (rc != ECORE_SUCCESS)
956		return rc;
957
958	if (IS_IWARP(p_hwfn)) {
959		rc = ecore_iwarp_setup(p_hwfn, params);
960		if (rc != ECORE_SUCCESS)
961			return rc;
962	} else {
963		rc = ecore_roce_setup(p_hwfn);
964		if (rc != ECORE_SUCCESS)
965			return rc;
966	}
967
968	return ecore_rdma_start_fw(p_hwfn, p_ptt, params);
969}
970
971enum _ecore_status_t ecore_rdma_stop(void *rdma_cxt)
972{
973	struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)rdma_cxt;
974	struct rdma_close_func_ramrod_data *p_ramrod;
975	struct ecore_sp_init_data init_data;
976	struct ecore_spq_entry *p_ent;
977	struct ecore_ptt *p_ptt;
978	u32 ll2_ethertype_en;
979	enum _ecore_status_t rc = ECORE_TIMEOUT;
980
981	DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "RDMA stop\n");
982
983	rc = ecore_rdma_deactivate(p_hwfn);
984	if (rc != ECORE_SUCCESS)
985		return rc;
986
987	p_ptt = ecore_ptt_acquire(p_hwfn);
988	if (!p_ptt) {
989		DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "Failed to acquire PTT\n");
990		return rc;
991	}
992
993#ifdef CONFIG_DCQCN
994	ecore_roce_stop_rl(p_hwfn);
995#endif
996
997	/* Disable RoCE search */
998	ecore_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 0);
999	p_hwfn->b_rdma_enabled_in_prs = false;
1000
1001	ecore_wr(p_hwfn,
1002		 p_ptt,
1003		 PRS_REG_ROCE_DEST_QP_MAX_PF,
1004		 0);
1005
1006	ll2_ethertype_en = ecore_rd(p_hwfn,
1007				    p_ptt,
1008				    PRS_REG_LIGHT_L2_ETHERTYPE_EN);
1009
1010	ecore_wr(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN,
1011		 (ll2_ethertype_en & 0xFFFE));
1012
1013#ifndef REAL_ASIC_ONLY
1014	/* INTERNAL: In CMT mode, re-initialize nig to direct packets to both
1015	 * enginesfor L2 performance, Roce requires all traffic to go just to
1016	 * engine 0.
1017	 */
1018	if (ECORE_IS_BB_A0(p_hwfn->p_dev) && ECORE_IS_CMT(p_hwfn->p_dev)) {
1019		DP_ERR(p_hwfn->p_dev,
1020		       "On Everest 4 Big Bear Board revision A0 when RoCE driver is loaded L2 performance is sub-optimal (all traffic is routed to engine 0). For optimal L2 results either remove RoCE driver or use board revision B0\n");
1021
1022		ecore_wr(p_hwfn,
1023			 p_ptt,
1024			 NIG_REG_LLH_ENG_CLS_ENG_ID_TBL,
1025			 0x55555555);
1026		ecore_wr(p_hwfn,
1027			 p_ptt,
1028			 NIG_REG_LLH_ENG_CLS_ENG_ID_TBL + 0x4,
1029			 0x55555555);
1030	}
1031#endif
1032
1033	if (IS_IWARP(p_hwfn)) {
1034		rc = ecore_iwarp_stop(p_hwfn);
1035		if (rc != ECORE_SUCCESS) {
1036			ecore_ptt_release(p_hwfn, p_ptt);
1037			return 0;
1038		}
1039	} else {
1040		rc = ecore_roce_stop(p_hwfn);
1041		if (rc != ECORE_SUCCESS) {
1042			ecore_ptt_release(p_hwfn, p_ptt);
1043			return 0;
1044		}
1045	}
1046
1047	ecore_ptt_release(p_hwfn, p_ptt);
1048
1049	/* Get SPQ entry */
1050	OSAL_MEMSET(&init_data, 0, sizeof(init_data));
1051	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1052	init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
1053
1054	/* Stop RoCE */
1055	rc = ecore_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_FUNC_CLOSE,
1056				   p_hwfn->p_rdma_info->proto, &init_data);
1057	if (rc != ECORE_SUCCESS)
1058		goto out;
1059
1060	p_ramrod = &p_ent->ramrod.rdma_close_func;
1061
1062	p_ramrod->num_cnqs = p_hwfn->p_rdma_info->num_cnqs;
1063	p_ramrod->cnq_start_offset = (u8)RESC_START(p_hwfn, ECORE_RDMA_CNQ_RAM);
1064
1065	rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
1066
1067out:
1068	ecore_rdma_free(p_hwfn);
1069
1070	DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "RDMA stop done, rc = %d\n", rc);
1071	return rc;
1072}
1073
1074enum _ecore_status_t ecore_rdma_add_user(void		      *rdma_cxt,
1075			struct ecore_rdma_add_user_out_params *out_params)
1076{
1077	struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)rdma_cxt;
1078	u32 dpi_start_offset;
1079	u32 returned_id = 0;
1080	enum _ecore_status_t rc;
1081
1082	DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "Adding User\n");
1083
1084	/* Allocate DPI */
1085	OSAL_SPIN_LOCK(&p_hwfn->p_rdma_info->lock);
1086	rc = ecore_rdma_bmap_alloc_id(p_hwfn, &p_hwfn->p_rdma_info->dpi_map,
1087				      &returned_id);
1088	OSAL_SPIN_UNLOCK(&p_hwfn->p_rdma_info->lock);
1089
1090	if (rc != ECORE_SUCCESS)
1091		DP_NOTICE(p_hwfn, false, "Failed in allocating dpi\n");
1092
1093	out_params->dpi = (u16)returned_id;
1094
1095	/* Calculate the corresponding DPI address */
1096	dpi_start_offset = p_hwfn->dpi_start_offset;
1097
1098	out_params->dpi_addr = (u64)(osal_int_ptr_t)((u8 OSAL_IOMEM*)p_hwfn->doorbells +
1099						     dpi_start_offset +
1100						     ((out_params->dpi) * p_hwfn->dpi_size));
1101
1102	out_params->dpi_phys_addr = p_hwfn->db_phys_addr + dpi_start_offset +
1103				    out_params->dpi * p_hwfn->dpi_size;
1104
1105	out_params->dpi_size = p_hwfn->dpi_size;
1106	out_params->wid_count = p_hwfn->wid_count;
1107
1108	DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "Adding user - done, rc = %d\n", rc);
1109	return rc;
1110}
1111
1112struct ecore_rdma_port *ecore_rdma_query_port(void	*rdma_cxt)
1113{
1114	struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)rdma_cxt;
1115	struct ecore_rdma_port *p_port = p_hwfn->p_rdma_info->port;
1116	struct ecore_mcp_link_state *p_link_output;
1117
1118	DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "RDMA Query port\n");
1119
1120	/* The link state is saved only for the leading hwfn */
1121	p_link_output =
1122		&ECORE_LEADING_HWFN(p_hwfn->p_dev)->mcp_info->link_output;
1123
1124	/* Link may have changed... */
1125	p_port->port_state = p_link_output->link_up ? ECORE_RDMA_PORT_UP
1126						    : ECORE_RDMA_PORT_DOWN;
1127
1128	p_port->link_speed = p_link_output->speed;
1129
1130	p_port->max_msg_size = RDMA_MAX_DATA_SIZE_IN_WQE;
1131
1132	return p_port;
1133}
1134
1135struct ecore_rdma_device *ecore_rdma_query_device(void	*rdma_cxt)
1136{
1137	struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)rdma_cxt;
1138
1139	DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "Query device\n");
1140
1141	/* Return struct with device parameters */
1142	return p_hwfn->p_rdma_info->dev;
1143}
1144
1145void ecore_rdma_free_tid(void	*rdma_cxt,
1146			 u32	itid)
1147{
1148	struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)rdma_cxt;
1149
1150	DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "itid = %08x\n", itid);
1151
1152	OSAL_SPIN_LOCK(&p_hwfn->p_rdma_info->lock);
1153	ecore_bmap_release_id(p_hwfn,
1154			      &p_hwfn->p_rdma_info->tid_map,
1155			      itid);
1156	OSAL_SPIN_UNLOCK(&p_hwfn->p_rdma_info->lock);
1157}
1158
1159void ecore_rdma_cnq_prod_update(void *rdma_cxt, u8 qz_offset, u16 prod)
1160{
1161	struct ecore_hwfn *p_hwfn;
1162	u16 qz_num;
1163	u32 addr;
1164
1165	p_hwfn = (struct ecore_hwfn *)rdma_cxt;
1166
1167	if (qz_offset > p_hwfn->p_rdma_info->max_queue_zones) {
1168		DP_NOTICE(p_hwfn, false,
1169			  "queue zone offset %d is too large (max is %d)\n",
1170			  qz_offset, p_hwfn->p_rdma_info->max_queue_zones);
1171		return;
1172	}
1173
1174	qz_num = p_hwfn->p_rdma_info->queue_zone_base + qz_offset;
1175	addr = GTT_BAR0_MAP_REG_USDM_RAM +
1176	       USTORM_COMMON_QUEUE_CONS_OFFSET(qz_num);
1177
1178	REG_WR16(p_hwfn, addr, prod);
1179
1180	/* keep prod updates ordered */
1181	OSAL_WMB(p_hwfn->p_dev);
1182}
1183
1184enum _ecore_status_t ecore_rdma_alloc_pd(void	*rdma_cxt,
1185					 u16	*pd)
1186{
1187	struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)rdma_cxt;
1188	u32                  returned_id;
1189	enum _ecore_status_t rc;
1190
1191	DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "Alloc PD\n");
1192
1193	/* Allocates an unused protection domain */
1194	OSAL_SPIN_LOCK(&p_hwfn->p_rdma_info->lock);
1195	rc = ecore_rdma_bmap_alloc_id(p_hwfn,
1196				      &p_hwfn->p_rdma_info->pd_map,
1197				      &returned_id);
1198	OSAL_SPIN_UNLOCK(&p_hwfn->p_rdma_info->lock);
1199	if (rc != ECORE_SUCCESS)
1200		DP_NOTICE(p_hwfn, false, "Failed in allocating pd id\n");
1201
1202	*pd = (u16)returned_id;
1203
1204	DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "Alloc PD - done, rc = %d\n", rc);
1205	return rc;
1206}
1207
1208void ecore_rdma_free_pd(void	*rdma_cxt,
1209			u16	pd)
1210{
1211	struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)rdma_cxt;
1212
1213	DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "pd = %08x\n", pd);
1214
1215	/* Returns a previously allocated protection domain for reuse */
1216	OSAL_SPIN_LOCK(&p_hwfn->p_rdma_info->lock);
1217	ecore_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->pd_map, pd);
1218	OSAL_SPIN_UNLOCK(&p_hwfn->p_rdma_info->lock);
1219}
1220
1221enum _ecore_status_t ecore_rdma_alloc_xrcd(void	*rdma_cxt,
1222					   u16	*xrcd_id)
1223{
1224	struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)rdma_cxt;
1225	u32                  returned_id;
1226	enum _ecore_status_t rc;
1227
1228	DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "Alloc XRCD\n");
1229
1230	/* Allocates an unused XRC domain */
1231	OSAL_SPIN_LOCK(&p_hwfn->p_rdma_info->lock);
1232	rc = ecore_rdma_bmap_alloc_id(p_hwfn,
1233				      &p_hwfn->p_rdma_info->xrcd_map,
1234				      &returned_id);
1235	OSAL_SPIN_UNLOCK(&p_hwfn->p_rdma_info->lock);
1236	if (rc != ECORE_SUCCESS)
1237		DP_NOTICE(p_hwfn, false, "Failed in allocating xrcd id\n");
1238
1239	*xrcd_id = (u16)returned_id;
1240
1241	DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "Alloc XRCD - done, rc = %d\n", rc);
1242	return rc;
1243}
1244
1245void ecore_rdma_free_xrcd(void	*rdma_cxt,
1246			  u16	xrcd_id)
1247{
1248	struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)rdma_cxt;
1249
1250	DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "xrcd_id = %08x\n", xrcd_id);
1251
1252	/* Returns a previously allocated protection domain for reuse */
1253	OSAL_SPIN_LOCK(&p_hwfn->p_rdma_info->lock);
1254	ecore_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->xrcd_map, xrcd_id);
1255	OSAL_SPIN_UNLOCK(&p_hwfn->p_rdma_info->lock);
1256}
1257
1258static enum ecore_rdma_toggle_bit
1259ecore_rdma_toggle_bit_create_resize_cq(struct ecore_hwfn *p_hwfn,
1260				       u16 icid)
1261{
1262	struct ecore_rdma_info *p_info = p_hwfn->p_rdma_info;
1263	enum ecore_rdma_toggle_bit toggle_bit;
1264	u32 bmap_id;
1265
1266	DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "icid = %08x\n", icid);
1267
1268	/* the function toggle the bit that is related to a given icid
1269	 * and returns the new toggle bit's value
1270	 */
1271	bmap_id = icid - ecore_cxt_get_proto_cid_start(p_hwfn, p_info->proto);
1272
1273	OSAL_SPIN_LOCK(&p_info->lock);
1274	toggle_bit = !OSAL_TEST_AND_FLIP_BIT(bmap_id, p_info->toggle_bits.bitmap);
1275	OSAL_SPIN_UNLOCK(&p_info->lock);
1276
1277	DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "ECORE_RDMA_TOGGLE_BIT_= %d\n",
1278		   toggle_bit);
1279
1280	return toggle_bit;
1281}
1282
1283enum _ecore_status_t ecore_rdma_create_cq(void			      *rdma_cxt,
1284				struct ecore_rdma_create_cq_in_params *params,
1285				u16                                   *icid)
1286{
1287	struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)rdma_cxt;
1288	struct ecore_rdma_info *p_info = p_hwfn->p_rdma_info;
1289	struct rdma_create_cq_ramrod_data	*p_ramrod;
1290	enum ecore_rdma_toggle_bit		toggle_bit;
1291	struct ecore_sp_init_data		init_data;
1292	struct ecore_spq_entry			*p_ent;
1293	enum _ecore_status_t			rc;
1294	u32					returned_id;
1295
1296	DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "cq_handle = %08x%08x\n",
1297		   params->cq_handle_hi, params->cq_handle_lo);
1298
1299	/* Allocate icid */
1300	OSAL_SPIN_LOCK(&p_info->lock);
1301	rc = ecore_rdma_bmap_alloc_id(p_hwfn, &p_info->cq_map, &returned_id);
1302	OSAL_SPIN_UNLOCK(&p_info->lock);
1303
1304	if (rc != ECORE_SUCCESS)
1305	{
1306		DP_NOTICE(p_hwfn, false, "Can't create CQ, rc = %d\n", rc);
1307		return rc;
1308	}
1309
1310	*icid = (u16)(returned_id +
1311		      ecore_cxt_get_proto_cid_start(
1312			      p_hwfn, p_info->proto));
1313
1314	/* Check if icid requires a page allocation */
1315	rc = ecore_cxt_dynamic_ilt_alloc(p_hwfn, ECORE_ELEM_CXT, *icid);
1316	if (rc != ECORE_SUCCESS)
1317		goto err;
1318
1319	/* Get SPQ entry */
1320	OSAL_MEMSET(&init_data, 0, sizeof(init_data));
1321	init_data.cid = *icid;
1322	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1323	init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
1324
1325	/* Send create CQ ramrod */
1326	rc = ecore_sp_init_request(p_hwfn, &p_ent,
1327				   RDMA_RAMROD_CREATE_CQ,
1328				   p_info->proto, &init_data);
1329	if (rc != ECORE_SUCCESS)
1330		goto err;
1331
1332	p_ramrod = &p_ent->ramrod.rdma_create_cq;
1333
1334	p_ramrod->cq_handle.hi = OSAL_CPU_TO_LE32(params->cq_handle_hi);
1335	p_ramrod->cq_handle.lo = OSAL_CPU_TO_LE32(params->cq_handle_lo);
1336	p_ramrod->dpi = OSAL_CPU_TO_LE16(params->dpi);
1337	p_ramrod->is_two_level_pbl = params->pbl_two_level;
1338	p_ramrod->max_cqes = OSAL_CPU_TO_LE32(params->cq_size);
1339	DMA_REGPAIR_LE(p_ramrod->pbl_addr, params->pbl_ptr);
1340	p_ramrod->pbl_num_pages = OSAL_CPU_TO_LE16(params->pbl_num_pages);
1341	p_ramrod->cnq_id = (u8)RESC_START(p_hwfn, ECORE_RDMA_CNQ_RAM)
1342			+ params->cnq_id;
1343	p_ramrod->int_timeout = params->int_timeout;
1344	/* INTERNAL: Two layer PBL is currently not supported, ignoring next line */
1345	/* INTERNAL: p_ramrod->pbl_log_page_size = params->pbl_page_size_log - 12; */
1346
1347	/* toggle the bit for every resize or create cq for a given icid */
1348	toggle_bit = ecore_rdma_toggle_bit_create_resize_cq(p_hwfn, *icid);
1349
1350	p_ramrod->toggle_bit = toggle_bit;
1351
1352	rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
1353	if (rc != ECORE_SUCCESS) {
1354		/* restore toggle bit */
1355		ecore_rdma_toggle_bit_create_resize_cq(p_hwfn, *icid);
1356		goto err;
1357	}
1358
1359	DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "Created CQ, rc = %d\n", rc);
1360	return rc;
1361
1362err:
1363	/* release allocated icid */
1364	OSAL_SPIN_LOCK(&p_info->lock);
1365	ecore_bmap_release_id(p_hwfn, &p_info->cq_map, returned_id);
1366	OSAL_SPIN_UNLOCK(&p_info->lock);
1367
1368	DP_NOTICE(p_hwfn, false, "Create CQ failed, rc = %d\n", rc);
1369
1370	return rc;
1371}
1372
1373enum _ecore_status_t ecore_rdma_destroy_cq(void			*rdma_cxt,
1374			struct ecore_rdma_destroy_cq_in_params	*in_params,
1375			struct ecore_rdma_destroy_cq_out_params	*out_params)
1376{
1377	struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)rdma_cxt;
1378	struct rdma_destroy_cq_output_params *p_ramrod_res;
1379	struct rdma_destroy_cq_ramrod_data	*p_ramrod;
1380	struct ecore_sp_init_data		init_data;
1381	struct ecore_spq_entry			*p_ent;
1382	dma_addr_t				ramrod_res_phys;
1383	enum _ecore_status_t			rc = ECORE_NOMEM;
1384
1385	DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "icid = %08x\n", in_params->icid);
1386
1387	p_ramrod_res = (struct rdma_destroy_cq_output_params *)
1388			OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, &ramrod_res_phys,
1389				sizeof(struct rdma_destroy_cq_output_params));
1390	if (!p_ramrod_res)
1391	{
1392		DP_NOTICE(p_hwfn, false,
1393			  "ecore destroy cq failed: cannot allocate memory (ramrod)\n");
1394		return rc;
1395	}
1396
1397	/* Get SPQ entry */
1398	OSAL_MEMSET(&init_data, 0, sizeof(init_data));
1399	init_data.cid =  in_params->icid;
1400	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1401	init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
1402
1403	/* Send destroy CQ ramrod */
1404	rc = ecore_sp_init_request(p_hwfn, &p_ent,
1405				   RDMA_RAMROD_DESTROY_CQ,
1406				   p_hwfn->p_rdma_info->proto, &init_data);
1407	if (rc != ECORE_SUCCESS)
1408		goto err;
1409
1410	p_ramrod = &p_ent->ramrod.rdma_destroy_cq;
1411	DMA_REGPAIR_LE(p_ramrod->output_params_addr, ramrod_res_phys);
1412
1413	rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
1414	if (rc != ECORE_SUCCESS)
1415		goto err;
1416
1417	out_params->num_cq_notif =
1418		OSAL_LE16_TO_CPU(p_ramrod_res->cnq_num);
1419
1420	OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, p_ramrod_res, ramrod_res_phys,
1421			       sizeof(struct rdma_destroy_cq_output_params));
1422
1423	/* Free icid */
1424	OSAL_SPIN_LOCK(&p_hwfn->p_rdma_info->lock);
1425
1426	ecore_bmap_release_id(p_hwfn,
1427			      &p_hwfn->p_rdma_info->cq_map,
1428		(in_params->icid - ecore_cxt_get_proto_cid_start(
1429			p_hwfn, p_hwfn->p_rdma_info->proto)));
1430
1431	OSAL_SPIN_UNLOCK(&p_hwfn->p_rdma_info->lock);
1432
1433	DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "Destroyed CQ, rc = %d\n", rc);
1434	return rc;
1435
1436err:
1437	OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, p_ramrod_res, ramrod_res_phys,
1438			       sizeof(struct rdma_destroy_cq_output_params));
1439
1440	return rc;
1441}
1442
1443void ecore_rdma_set_fw_mac(u16 *p_fw_mac, u8 *p_ecore_mac)
1444{
1445	p_fw_mac[0] = OSAL_CPU_TO_LE16((p_ecore_mac[0] << 8) + p_ecore_mac[1]);
1446	p_fw_mac[1] = OSAL_CPU_TO_LE16((p_ecore_mac[2] << 8) + p_ecore_mac[3]);
1447	p_fw_mac[2] = OSAL_CPU_TO_LE16((p_ecore_mac[4] << 8) + p_ecore_mac[5]);
1448}
1449
1450enum _ecore_status_t ecore_rdma_query_qp(void			*rdma_cxt,
1451			struct ecore_rdma_qp			*qp,
1452			struct ecore_rdma_query_qp_out_params	*out_params)
1453
1454{
1455	struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)rdma_cxt;
1456	enum _ecore_status_t rc = ECORE_SUCCESS;
1457
1458	DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "icid = %08x\n", qp->icid);
1459
1460	/* The following fields are filled in from qp and not FW as they can't
1461	 * be modified by FW
1462	 */
1463	out_params->mtu = qp->mtu;
1464	out_params->dest_qp = qp->dest_qp;
1465	out_params->incoming_atomic_en = qp->incoming_atomic_en;
1466	out_params->e2e_flow_control_en = qp->e2e_flow_control_en;
1467	out_params->incoming_rdma_read_en = qp->incoming_rdma_read_en;
1468	out_params->incoming_rdma_write_en = qp->incoming_rdma_write_en;
1469	out_params->dgid = qp->dgid;
1470	out_params->flow_label = qp->flow_label;
1471	out_params->hop_limit_ttl = qp->hop_limit_ttl;
1472	out_params->traffic_class_tos = qp->traffic_class_tos;
1473	out_params->timeout = qp->ack_timeout;
1474	out_params->rnr_retry = qp->rnr_retry_cnt;
1475	out_params->retry_cnt = qp->retry_cnt;
1476	out_params->min_rnr_nak_timer = qp->min_rnr_nak_timer;
1477	out_params->pkey_index = 0;
1478	out_params->max_rd_atomic = qp->max_rd_atomic_req;
1479	out_params->max_dest_rd_atomic = qp->max_rd_atomic_resp;
1480	out_params->sqd_async = qp->sqd_async;
1481
1482	if (IS_IWARP(p_hwfn))
1483		rc = ecore_iwarp_query_qp(qp, out_params);
1484	else
1485		rc = ecore_roce_query_qp(p_hwfn, qp, out_params);
1486
1487	DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "Query QP, rc = %d\n", rc);
1488	return rc;
1489}
1490
1491enum _ecore_status_t ecore_rdma_destroy_qp(void *rdma_cxt,
1492					   struct ecore_rdma_qp *qp,
1493					   struct ecore_rdma_destroy_qp_out_params *out_params)
1494{
1495	struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)rdma_cxt;
1496	enum _ecore_status_t rc = ECORE_SUCCESS;
1497
1498	if (!rdma_cxt || !qp) {
1499		DP_ERR(p_hwfn,
1500		       "ecore rdma destroy qp failed: invalid NULL input. rdma_cxt=%p, qp=%p\n",
1501		       rdma_cxt, qp);
1502		return ECORE_INVAL;
1503	}
1504
1505	DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "QP(0x%x)\n", qp->icid);
1506
1507	if (IS_IWARP(p_hwfn))
1508		rc = ecore_iwarp_destroy_qp(p_hwfn, qp);
1509	else
1510		rc = ecore_roce_destroy_qp(p_hwfn, qp, out_params);
1511
1512	/* free qp params struct */
1513	OSAL_FREE(p_hwfn->p_dev, qp);
1514
1515	return rc;
1516}
1517
1518struct ecore_rdma_qp *ecore_rdma_create_qp(void			*rdma_cxt,
1519			struct ecore_rdma_create_qp_in_params	*in_params,
1520			struct ecore_rdma_create_qp_out_params	*out_params)
1521{
1522	struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)rdma_cxt;
1523	struct ecore_rdma_qp *qp;
1524	u8 max_stats_queues;
1525	enum _ecore_status_t rc = 0;
1526
1527	if (!rdma_cxt || !in_params || !out_params || !p_hwfn->p_rdma_info) {
1528		DP_ERR(p_hwfn->p_dev,
1529		       "ecore roce create qp failed due to NULL entry (rdma_cxt=%p, in=%p, out=%p, roce_info=?\n",
1530		       rdma_cxt,
1531		       in_params,
1532		       out_params);
1533		return OSAL_NULL;
1534	}
1535
1536	/* Some sanity checks... */
1537	max_stats_queues = p_hwfn->p_rdma_info->dev->max_stats_queues;
1538	if (in_params->stats_queue >= max_stats_queues) {
1539		DP_ERR(p_hwfn->p_dev,
1540		       "ecore rdma create qp failed due to invalid statistics queue %d. maximum is %d\n",
1541		       in_params->stats_queue, max_stats_queues);
1542		return OSAL_NULL;
1543	}
1544
1545	if (IS_IWARP(p_hwfn)) {
1546		if (in_params->sq_num_pages*sizeof(struct regpair) >
1547		    IWARP_SHARED_QUEUE_PAGE_SQ_PBL_MAX_SIZE) {
1548			DP_NOTICE(p_hwfn->p_dev, true, "Sq num pages: %d exceeds maximum\n",
1549				  in_params->sq_num_pages);
1550			return OSAL_NULL;
1551		}
1552		if (in_params->rq_num_pages*sizeof(struct regpair) >
1553		    IWARP_SHARED_QUEUE_PAGE_RQ_PBL_MAX_SIZE) {
1554			DP_NOTICE(p_hwfn->p_dev, true,
1555				  "Rq num pages: %d exceeds maximum\n",
1556				  in_params->rq_num_pages);
1557			return OSAL_NULL;
1558		}
1559	}
1560
1561	qp = OSAL_ZALLOC(p_hwfn->p_dev,
1562			 GFP_KERNEL,
1563			 sizeof(struct ecore_rdma_qp));
1564	if (!qp)
1565	{
1566		DP_NOTICE(p_hwfn, false, "Failed to allocate ecore_rdma_qp\n");
1567		return OSAL_NULL;
1568	}
1569
1570	qp->cur_state = ECORE_ROCE_QP_STATE_RESET;
1571#ifdef CONFIG_ECORE_IWARP
1572	qp->iwarp_state = ECORE_IWARP_QP_STATE_IDLE;
1573#endif
1574	qp->qp_handle.hi = OSAL_CPU_TO_LE32(in_params->qp_handle_hi);
1575	qp->qp_handle.lo = OSAL_CPU_TO_LE32(in_params->qp_handle_lo);
1576	qp->qp_handle_async.hi = OSAL_CPU_TO_LE32(in_params->qp_handle_async_hi);
1577	qp->qp_handle_async.lo = OSAL_CPU_TO_LE32(in_params->qp_handle_async_lo);
1578	qp->use_srq = in_params->use_srq;
1579	qp->signal_all = in_params->signal_all;
1580	qp->fmr_and_reserved_lkey = in_params->fmr_and_reserved_lkey;
1581	qp->pd = in_params->pd;
1582	qp->dpi = in_params->dpi;
1583	qp->sq_cq_id = in_params->sq_cq_id;
1584	qp->sq_num_pages = in_params->sq_num_pages;
1585	qp->sq_pbl_ptr = in_params->sq_pbl_ptr;
1586	qp->rq_cq_id = in_params->rq_cq_id;
1587	qp->rq_num_pages = in_params->rq_num_pages;
1588	qp->rq_pbl_ptr = in_params->rq_pbl_ptr;
1589	qp->srq_id = in_params->srq_id;
1590	qp->req_offloaded = false;
1591	qp->resp_offloaded = false;
1592	/* e2e_flow_control cannot be done in case of S-RQ.
1593	 * Refer to 9.7.7.2 End-to-End Flow Control section of IB spec
1594	 */
1595	qp->e2e_flow_control_en = qp->use_srq ? false : true;
1596	qp->stats_queue = in_params->stats_queue;
1597	qp->qp_type = in_params->qp_type;
1598	qp->xrcd_id = in_params->xrcd_id;
1599
1600	if (IS_IWARP(p_hwfn)) {
1601		rc = ecore_iwarp_create_qp(p_hwfn, qp, out_params);
1602		qp->qpid = qp->icid;
1603	} else {
1604		rc = ecore_roce_alloc_qp_idx(p_hwfn, &qp->qp_idx);
1605		qp->icid = ECORE_ROCE_QP_TO_ICID(qp->qp_idx);
1606		qp->qpid = ((0xFF << 16) | qp->icid);
1607	}
1608
1609	if (rc != ECORE_SUCCESS) {
1610		OSAL_FREE(p_hwfn->p_dev, qp);
1611		return OSAL_NULL;
1612	}
1613
1614	out_params->icid = qp->icid;
1615	out_params->qp_id = qp->qpid;
1616
1617	/* INTERNAL: max_sq_sges future use only*/
1618
1619	DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "Create QP, rc = %d\n", rc);
1620	return qp;
1621}
1622
1623#define ECORE_RDMA_ECN_SHIFT 0
1624#define ECORE_RDMA_ECN_MASK 0x3
1625#define ECORE_RDMA_DSCP_SHIFT 2
1626#define ECORE_RDMA_DSCP_MASK 0x3f
1627#define ECORE_RDMA_VLAN_PRIO_SHIFT 13
1628#define ECORE_RDMA_VLAN_PRIO_MASK 0x7
1629enum _ecore_status_t ecore_rdma_modify_qp(
1630	void *rdma_cxt,
1631	struct ecore_rdma_qp *qp,
1632	struct ecore_rdma_modify_qp_in_params *params)
1633{
1634	struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)rdma_cxt;
1635	enum ecore_roce_qp_state prev_state;
1636	enum _ecore_status_t     rc = ECORE_SUCCESS;
1637
1638	if (GET_FIELD(params->modify_flags,
1639		      ECORE_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN))
1640	{
1641		qp->incoming_rdma_read_en = params->incoming_rdma_read_en;
1642		qp->incoming_rdma_write_en = params->incoming_rdma_write_en;
1643		qp->incoming_atomic_en = params->incoming_atomic_en;
1644	}
1645
1646	/* Update QP structure with the updated values */
1647	if (GET_FIELD(params->modify_flags,
1648		      ECORE_ROCE_MODIFY_QP_VALID_ROCE_MODE))
1649	{
1650		qp->roce_mode = params->roce_mode;
1651	}
1652	if (GET_FIELD(params->modify_flags, ECORE_ROCE_MODIFY_QP_VALID_PKEY))
1653	{
1654		qp->pkey = params->pkey;
1655	}
1656	if (GET_FIELD(params->modify_flags,
1657		      ECORE_ROCE_MODIFY_QP_VALID_E2E_FLOW_CONTROL_EN))
1658	{
1659		qp->e2e_flow_control_en = params->e2e_flow_control_en;
1660	}
1661	if (GET_FIELD(params->modify_flags,
1662		      ECORE_ROCE_MODIFY_QP_VALID_DEST_QP))
1663	{
1664		qp->dest_qp = params->dest_qp;
1665	}
1666	if (GET_FIELD(params->modify_flags,
1667		      ECORE_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR))
1668	{
1669		/* Indicates that the following parameters have changed:
1670		 * Traffic class, flow label, hop limit, source GID,
1671		 * destination GID, loopback indicator
1672		 */
1673		qp->flow_label = params->flow_label;
1674		qp->hop_limit_ttl = params->hop_limit_ttl;
1675
1676		qp->sgid = params->sgid;
1677		qp->dgid = params->dgid;
1678		qp->udp_src_port = params->udp_src_port;
1679		qp->vlan_id = params->vlan_id;
1680		qp->traffic_class_tos = params->traffic_class_tos;
1681
1682		/* apply global override values */
1683		if (p_hwfn->p_rdma_info->glob_cfg.vlan_pri_en)
1684			SET_FIELD(qp->vlan_id, ECORE_RDMA_VLAN_PRIO,
1685				  p_hwfn->p_rdma_info->glob_cfg.vlan_pri);
1686
1687		if (p_hwfn->p_rdma_info->glob_cfg.ecn_en)
1688			SET_FIELD(qp->traffic_class_tos, ECORE_RDMA_ECN,
1689				  p_hwfn->p_rdma_info->glob_cfg.ecn);
1690
1691		if (p_hwfn->p_rdma_info->glob_cfg.dscp_en)
1692			SET_FIELD(qp->traffic_class_tos, ECORE_RDMA_DSCP,
1693				  p_hwfn->p_rdma_info->glob_cfg.dscp);
1694
1695		qp->mtu = params->mtu;
1696
1697		OSAL_MEMCPY((u8 *)&qp->remote_mac_addr[0],
1698			    (u8 *)&params->remote_mac_addr[0], ETH_ALEN);
1699		if (params->use_local_mac) {
1700			OSAL_MEMCPY((u8 *)&qp->local_mac_addr[0],
1701				    (u8 *)&params->local_mac_addr[0],
1702				    ETH_ALEN);
1703		} else {
1704			OSAL_MEMCPY((u8 *)&qp->local_mac_addr[0],
1705				    (u8 *)&p_hwfn->hw_info.hw_mac_addr,
1706				    ETH_ALEN);
1707		}
1708	}
1709	if (GET_FIELD(params->modify_flags, ECORE_ROCE_MODIFY_QP_VALID_RQ_PSN))
1710	{
1711		qp->rq_psn = params->rq_psn;
1712	}
1713	if (GET_FIELD(params->modify_flags, ECORE_ROCE_MODIFY_QP_VALID_SQ_PSN))
1714	{
1715		qp->sq_psn = params->sq_psn;
1716	}
1717	if (GET_FIELD(params->modify_flags,
1718		      ECORE_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ))
1719	{
1720		qp->max_rd_atomic_req = params->max_rd_atomic_req;
1721	}
1722	if (GET_FIELD(params->modify_flags,
1723		      ECORE_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP))
1724	{
1725		qp->max_rd_atomic_resp = params->max_rd_atomic_resp;
1726	}
1727	if (GET_FIELD(params->modify_flags,
1728		      ECORE_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT))
1729	{
1730		qp->ack_timeout = params->ack_timeout;
1731	}
1732	if (GET_FIELD(params->modify_flags,
1733		      ECORE_ROCE_MODIFY_QP_VALID_RETRY_CNT))
1734	{
1735		qp->retry_cnt = params->retry_cnt;
1736	}
1737	if (GET_FIELD(params->modify_flags,
1738		      ECORE_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT))
1739	{
1740		qp->rnr_retry_cnt = params->rnr_retry_cnt;
1741	}
1742	if (GET_FIELD(params->modify_flags,
1743		      ECORE_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER))
1744	{
1745		qp->min_rnr_nak_timer = params->min_rnr_nak_timer;
1746	}
1747
1748	qp->sqd_async = params->sqd_async;
1749
1750	prev_state = qp->cur_state;
1751	if (GET_FIELD(params->modify_flags,
1752		      ECORE_RDMA_MODIFY_QP_VALID_NEW_STATE))
1753	{
1754		qp->cur_state = params->new_state;
1755		DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "qp->cur_state=%d\n",
1756			   qp->cur_state);
1757	}
1758
1759	if (qp->qp_type == ECORE_RDMA_QP_TYPE_XRC_INI) {
1760		qp->has_req = 1;
1761	} else if (qp->qp_type == ECORE_RDMA_QP_TYPE_XRC_TGT)
1762	{
1763		qp->has_resp = 1;
1764	} else {
1765		qp->has_req = 1;
1766		qp->has_resp = 1;
1767	}
1768
1769	if (IS_IWARP(p_hwfn)) {
1770		enum ecore_iwarp_qp_state new_state =
1771			ecore_roce2iwarp_state(qp->cur_state);
1772
1773		rc = ecore_iwarp_modify_qp(p_hwfn, qp, new_state, 0);
1774	} else {
1775		rc = ecore_roce_modify_qp(p_hwfn, qp, prev_state, params);
1776	}
1777
1778	DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "Modify QP, rc = %d\n", rc);
1779	return rc;
1780}
1781
1782enum _ecore_status_t ecore_rdma_register_tid(void		 *rdma_cxt,
1783			struct ecore_rdma_register_tid_in_params *params)
1784{
1785	struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)rdma_cxt;
1786	struct rdma_register_tid_ramrod_data *p_ramrod;
1787	struct ecore_sp_init_data	     init_data;
1788	struct ecore_spq_entry               *p_ent;
1789	enum rdma_tid_type                   tid_type;
1790	u8                                   fw_return_code;
1791	enum _ecore_status_t                 rc;
1792
1793	DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "itid = %08x\n", params->itid);
1794
1795	/* Get SPQ entry */
1796	OSAL_MEMSET(&init_data, 0, sizeof(init_data));
1797	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1798	init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
1799
1800	rc = ecore_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_REGISTER_MR,
1801				   p_hwfn->p_rdma_info->proto, &init_data);
1802	if (rc != ECORE_SUCCESS) {
1803		DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "rc = %d\n", rc);
1804		return rc;
1805	}
1806
1807	if (p_hwfn->p_rdma_info->last_tid < params->itid) {
1808		p_hwfn->p_rdma_info->last_tid = params->itid;
1809	}
1810
1811	p_ramrod = &p_ent->ramrod.rdma_register_tid;
1812
1813	p_ramrod->flags = 0;
1814	SET_FIELD(p_ramrod->flags,
1815		  RDMA_REGISTER_TID_RAMROD_DATA_TWO_LEVEL_PBL,
1816		  params->pbl_two_level);
1817
1818	SET_FIELD(p_ramrod->flags,
1819		  RDMA_REGISTER_TID_RAMROD_DATA_ZERO_BASED,
1820		  params->zbva);
1821
1822	SET_FIELD(p_ramrod->flags,
1823		  RDMA_REGISTER_TID_RAMROD_DATA_PHY_MR,
1824		  params->phy_mr);
1825
1826	/* Don't initialize D/C field, as it may override other bits. */
1827	if (!(params->tid_type == ECORE_RDMA_TID_FMR) &&
1828	    !(params->dma_mr))
1829		SET_FIELD(p_ramrod->flags,
1830			  RDMA_REGISTER_TID_RAMROD_DATA_PAGE_SIZE_LOG,
1831			  params->page_size_log - 12);
1832
1833	SET_FIELD(p_ramrod->flags,
1834		  RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_READ,
1835		  params->remote_read);
1836
1837	SET_FIELD(p_ramrod->flags,
1838		  RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_WRITE,
1839		  params->remote_write);
1840
1841	SET_FIELD(p_ramrod->flags,
1842		  RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_ATOMIC,
1843		  params->remote_atomic);
1844
1845	SET_FIELD(p_ramrod->flags,
1846		  RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_WRITE,
1847		  params->local_write);
1848
1849	SET_FIELD(p_ramrod->flags,
1850		  RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_READ,
1851		  params->local_read);
1852
1853	SET_FIELD(p_ramrod->flags,
1854		  RDMA_REGISTER_TID_RAMROD_DATA_ENABLE_MW_BIND,
1855		  params->mw_bind);
1856
1857	SET_FIELD(p_ramrod->flags1,
1858		  RDMA_REGISTER_TID_RAMROD_DATA_PBL_PAGE_SIZE_LOG,
1859		  params->pbl_page_size_log - 12);
1860
1861	SET_FIELD(p_ramrod->flags2,
1862		  RDMA_REGISTER_TID_RAMROD_DATA_DMA_MR,
1863		  params->dma_mr);
1864
1865	switch (params->tid_type)
1866	{
1867	case ECORE_RDMA_TID_REGISTERED_MR:
1868		tid_type = RDMA_TID_REGISTERED_MR;
1869		break;
1870	case ECORE_RDMA_TID_FMR:
1871		tid_type = RDMA_TID_FMR;
1872		break;
1873	case ECORE_RDMA_TID_MW_TYPE1:
1874		tid_type = RDMA_TID_MW_TYPE1;
1875		break;
1876	case ECORE_RDMA_TID_MW_TYPE2A:
1877		tid_type = RDMA_TID_MW_TYPE2A;
1878		break;
1879	default:
1880		rc = ECORE_INVAL;
1881		DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "rc = %d\n", rc);
1882		return rc;
1883	}
1884	SET_FIELD(p_ramrod->flags1,
1885		  RDMA_REGISTER_TID_RAMROD_DATA_TID_TYPE,
1886		  tid_type);
1887
1888	p_ramrod->itid = OSAL_CPU_TO_LE32(params->itid);
1889	p_ramrod->key = params->key;
1890	p_ramrod->pd = OSAL_CPU_TO_LE16(params->pd);
1891	p_ramrod->length_hi = (u8)(params->length >> 32);
1892	p_ramrod->length_lo = DMA_LO_LE(params->length);
1893	if (params->zbva)
1894	{
1895		/* Lower 32 bits of the registered MR address.
1896		 * In case of zero based MR, will hold FBO
1897		 */
1898		p_ramrod->va.hi = 0;
1899		p_ramrod->va.lo = OSAL_CPU_TO_LE32(params->fbo);
1900	} else {
1901		DMA_REGPAIR_LE(p_ramrod->va, params->vaddr);
1902	}
1903	DMA_REGPAIR_LE(p_ramrod->pbl_base, params->pbl_ptr);
1904
1905	/* DIF */
1906	if (params->dif_enabled) {
1907		SET_FIELD(p_ramrod->flags2,
1908			  RDMA_REGISTER_TID_RAMROD_DATA_DIF_ON_HOST_FLG, 1);
1909		DMA_REGPAIR_LE(p_ramrod->dif_error_addr,
1910			       params->dif_error_addr);
1911		DMA_REGPAIR_LE(p_ramrod->dif_runt_addr, params->dif_runt_addr);
1912	}
1913
1914	rc = ecore_spq_post(p_hwfn, p_ent, &fw_return_code);
1915	if (rc)
1916		return rc;
1917
1918	if (fw_return_code != RDMA_RETURN_OK) {
1919		DP_NOTICE(p_hwfn, true, "fw_return_code = %d\n", fw_return_code);
1920		return ECORE_UNKNOWN_ERROR;
1921	}
1922
1923	DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "Register TID, rc = %d\n", rc);
1924	return rc;
1925}
1926
1927static OSAL_INLINE int ecore_rdma_send_deregister_tid_ramrod(
1928		struct ecore_hwfn *p_hwfn,
1929		u32 itid,
1930		u8 *fw_return_code)
1931{
1932	struct ecore_sp_init_data              init_data;
1933	struct rdma_deregister_tid_ramrod_data *p_ramrod;
1934	struct ecore_spq_entry                 *p_ent;
1935	enum _ecore_status_t                   rc;
1936
1937	/* Get SPQ entry */
1938	OSAL_MEMSET(&init_data, 0, sizeof(init_data));
1939	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1940	init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
1941
1942	rc = ecore_sp_init_request(p_hwfn, &p_ent,
1943				   RDMA_RAMROD_DEREGISTER_MR,
1944				   p_hwfn->p_rdma_info->proto, &init_data);
1945	if (rc != ECORE_SUCCESS) {
1946		DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "rc = %d\n", rc);
1947		return rc;
1948	}
1949
1950	p_ramrod = &p_ent->ramrod.rdma_deregister_tid;
1951	p_ramrod->itid = OSAL_CPU_TO_LE32(itid);
1952
1953	rc = ecore_spq_post(p_hwfn, p_ent, fw_return_code);
1954	if (rc != ECORE_SUCCESS)
1955	{
1956		DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "rc = %d\n", rc);
1957		return rc;
1958	}
1959
1960	return rc;
1961}
1962
1963#define ECORE_RDMA_DEREGISTER_TIMEOUT_MSEC	(1)
1964
1965enum _ecore_status_t ecore_rdma_deregister_tid(void	*rdma_cxt,
1966					       u32	itid)
1967{
1968	enum _ecore_status_t                   rc;
1969	u8                                     fw_ret_code;
1970	struct ecore_ptt                       *p_ptt;
1971	struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)rdma_cxt;
1972
1973	/* First attempt */
1974	rc = ecore_rdma_send_deregister_tid_ramrod(p_hwfn, itid, &fw_ret_code);
1975	if (rc != ECORE_SUCCESS)
1976		return rc;
1977
1978	if (fw_ret_code != RDMA_RETURN_NIG_DRAIN_REQ)
1979		goto done;
1980
1981	/* Second attempt, after 1msec, if device still holds data.
1982	 * This can occur since 'destroy QP' returns to the caller rather fast.
1983	 * The synchronous part of it returns after freeing a few of the
1984	 * resources but not all of them, allowing the consumer to continue its
1985	 * flow. All of the resources will be freed after the asynchronous part
1986	 * of the destroy QP is complete.
1987	 */
1988	OSAL_MSLEEP(ECORE_RDMA_DEREGISTER_TIMEOUT_MSEC);
1989	rc = ecore_rdma_send_deregister_tid_ramrod(p_hwfn, itid, &fw_ret_code);
1990	if (rc != ECORE_SUCCESS)
1991		return rc;
1992
1993	if (fw_ret_code != RDMA_RETURN_NIG_DRAIN_REQ)
1994		goto done;
1995
1996	/* Third and last attempt, perform NIG drain and resend the ramrod */
1997	p_ptt = ecore_ptt_acquire(p_hwfn);
1998	if (!p_ptt)
1999		return ECORE_TIMEOUT;
2000
2001	rc = ecore_mcp_drain(p_hwfn, p_ptt);
2002	if (rc != ECORE_SUCCESS) {
2003		ecore_ptt_release(p_hwfn, p_ptt);
2004		return rc;
2005	}
2006
2007	ecore_ptt_release(p_hwfn, p_ptt);
2008
2009	rc = ecore_rdma_send_deregister_tid_ramrod(p_hwfn, itid, &fw_ret_code);
2010	if (rc != ECORE_SUCCESS)
2011		return rc;
2012
2013done:
2014	if (fw_ret_code == RDMA_RETURN_OK) {
2015		DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "De-registered itid=%d\n",
2016			   itid);
2017		return ECORE_SUCCESS;
2018	} else if (fw_ret_code == RDMA_RETURN_DEREGISTER_MR_BAD_STATE_ERR) {
2019		/* INTERNAL: This error is returned in case trying to deregister
2020		 * a MR that is not allocated. We define "allocated" as either:
2021		 * 1. Registered.
2022		 * 2. This is an FMR MR type, which is not currently registered
2023		 *    but can accept FMR WQEs on SQ.
2024		 */
2025		DP_NOTICE(p_hwfn, false, "itid=%d, fw_ret_code=%d\n", itid,
2026			  fw_ret_code);
2027		return ECORE_INVAL;
2028	} else { /* fw_ret_code == RDMA_RETURN_NIG_DRAIN_REQ */
2029		DP_NOTICE(p_hwfn, true,
2030			  "deregister failed after three attempts. itid=%d, fw_ret_code=%d\n",
2031			  itid, fw_ret_code);
2032		return ECORE_UNKNOWN_ERROR;
2033	}
2034}
2035
2036static struct ecore_bmap *ecore_rdma_get_srq_bmap(struct ecore_hwfn *p_hwfn, bool is_xrc)
2037{
2038	if (is_xrc)
2039		return &p_hwfn->p_rdma_info->xrc_srq_map;
2040
2041	return &p_hwfn->p_rdma_info->srq_map;
2042}
2043
2044u16 ecore_rdma_get_fw_srq_id(struct ecore_hwfn *p_hwfn, u16 id, bool is_xrc)
2045{
2046	if (is_xrc)
2047		return id;
2048
2049	return id + p_hwfn->p_rdma_info->srq_id_offset;
2050}
2051
2052enum _ecore_status_t
2053ecore_rdma_modify_srq(void *rdma_cxt,
2054		      struct ecore_rdma_modify_srq_in_params *in_params)
2055{
2056	struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)rdma_cxt;
2057	struct rdma_srq_modify_ramrod_data *p_ramrod;
2058	struct ecore_sp_init_data init_data;
2059	struct ecore_spq_entry *p_ent;
2060	u16 opaque_fid, fw_srq_id;
2061	enum _ecore_status_t rc;
2062
2063	OSAL_MEMSET(&init_data, 0, sizeof(init_data));
2064	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
2065	init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
2066	/* Send modify SRQ ramrod */
2067	rc = ecore_sp_init_request(p_hwfn, &p_ent,
2068				   RDMA_RAMROD_MODIFY_SRQ,
2069				   p_hwfn->p_rdma_info->proto, &init_data);
2070	if (rc != ECORE_SUCCESS)
2071		return rc;
2072
2073	p_ramrod = &p_ent->ramrod.rdma_modify_srq;
2074
2075	fw_srq_id = ecore_rdma_get_fw_srq_id(p_hwfn, in_params->srq_id,
2076					     in_params->is_xrc);
2077	p_ramrod->srq_id.srq_idx = OSAL_CPU_TO_LE16(fw_srq_id);
2078	opaque_fid = p_hwfn->hw_info.opaque_fid;
2079	p_ramrod->srq_id.opaque_fid = OSAL_CPU_TO_LE16(opaque_fid);
2080	p_ramrod->wqe_limit = OSAL_CPU_TO_LE16(in_params->wqe_limit);
2081
2082	rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
2083	if (rc != ECORE_SUCCESS)
2084		return rc;
2085
2086	DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "modified SRQ id = %x, is_xrc=%u\n",
2087		   in_params->srq_id, in_params->is_xrc);
2088
2089	return rc;
2090}
2091
2092enum _ecore_status_t
2093ecore_rdma_destroy_srq(void *rdma_cxt,
2094		       struct ecore_rdma_destroy_srq_in_params *in_params)
2095{
2096	struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)rdma_cxt;
2097	struct rdma_srq_destroy_ramrod_data *p_ramrod;
2098	struct ecore_sp_init_data init_data;
2099	struct ecore_spq_entry *p_ent;
2100	u16 opaque_fid, fw_srq_id;
2101	struct ecore_bmap *bmap;
2102	enum _ecore_status_t rc;
2103
2104	opaque_fid = p_hwfn->hw_info.opaque_fid;
2105
2106	OSAL_MEMSET(&init_data, 0, sizeof(init_data));
2107	init_data.opaque_fid = opaque_fid;
2108	init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
2109
2110	/* Send destroy SRQ ramrod */
2111	rc = ecore_sp_init_request(p_hwfn, &p_ent,
2112				   RDMA_RAMROD_DESTROY_SRQ,
2113				   p_hwfn->p_rdma_info->proto, &init_data);
2114	if (rc != ECORE_SUCCESS)
2115		return rc;
2116
2117	p_ramrod = &p_ent->ramrod.rdma_destroy_srq;
2118
2119	fw_srq_id = ecore_rdma_get_fw_srq_id(p_hwfn, in_params->srq_id,
2120					     in_params->is_xrc);
2121	p_ramrod->srq_id.srq_idx = OSAL_CPU_TO_LE16(fw_srq_id);
2122	p_ramrod->srq_id.opaque_fid = OSAL_CPU_TO_LE16(opaque_fid);
2123
2124	rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
2125
2126	if (rc != ECORE_SUCCESS)
2127		return rc;
2128
2129	bmap = ecore_rdma_get_srq_bmap(p_hwfn, in_params->is_xrc);
2130
2131	OSAL_SPIN_LOCK(&p_hwfn->p_rdma_info->lock);
2132	ecore_bmap_release_id(p_hwfn, bmap, in_params->srq_id);
2133	OSAL_SPIN_UNLOCK(&p_hwfn->p_rdma_info->lock);
2134
2135	DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA,
2136		   "XRC/SRQ destroyed Id = %x, is_xrc=%u\n",
2137		   in_params->srq_id, in_params->is_xrc);
2138
2139	return rc;
2140}
2141
2142enum _ecore_status_t
2143ecore_rdma_create_srq(void *rdma_cxt,
2144		      struct ecore_rdma_create_srq_in_params *in_params,
2145		      struct ecore_rdma_create_srq_out_params *out_params)
2146{
2147	struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)rdma_cxt;
2148	struct rdma_srq_create_ramrod_data *p_ramrod;
2149	struct ecore_sp_init_data init_data;
2150	enum ecore_cxt_elem_type elem_type;
2151	struct ecore_spq_entry *p_ent;
2152	u16 opaque_fid, fw_srq_id;
2153	struct ecore_bmap *bmap;
2154	u32 returned_id;
2155	enum _ecore_status_t rc;
2156
2157	/* Allocate XRC/SRQ ID */
2158	bmap = ecore_rdma_get_srq_bmap(p_hwfn, in_params->is_xrc);
2159	OSAL_SPIN_LOCK(&p_hwfn->p_rdma_info->lock);
2160	rc = ecore_rdma_bmap_alloc_id(p_hwfn, bmap, &returned_id);
2161	OSAL_SPIN_UNLOCK(&p_hwfn->p_rdma_info->lock);
2162
2163	if (rc != ECORE_SUCCESS) {
2164		DP_NOTICE(p_hwfn, false,
2165			  "failed to allocate xrc/srq id (is_xrc=%u)\n",
2166			  in_params->is_xrc);
2167		return rc;
2168	}
2169	/* Allocate XRC/SRQ ILT page */
2170	elem_type = (in_params->is_xrc) ? (ECORE_ELEM_XRC_SRQ) : (ECORE_ELEM_SRQ);
2171	rc = ecore_cxt_dynamic_ilt_alloc(p_hwfn, elem_type, returned_id);
2172	if (rc != ECORE_SUCCESS)
2173		goto err;
2174
2175	OSAL_MEMSET(&init_data, 0, sizeof(init_data));
2176	opaque_fid = p_hwfn->hw_info.opaque_fid;
2177	init_data.opaque_fid = opaque_fid;
2178	init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
2179
2180	/* Create XRC/SRQ ramrod */
2181	rc = ecore_sp_init_request(p_hwfn, &p_ent,
2182				   RDMA_RAMROD_CREATE_SRQ,
2183				   p_hwfn->p_rdma_info->proto, &init_data);
2184	if (rc != ECORE_SUCCESS)
2185		goto err;
2186
2187	p_ramrod = &p_ent->ramrod.rdma_create_srq;
2188
2189	p_ramrod->pbl_base_addr.hi = DMA_HI_LE(in_params->pbl_base_addr);
2190	p_ramrod->pbl_base_addr.lo = DMA_LO_LE(in_params->pbl_base_addr);
2191	p_ramrod->pages_in_srq_pbl = OSAL_CPU_TO_LE16(in_params->num_pages);
2192	p_ramrod->pd_id = OSAL_CPU_TO_LE16(in_params->pd_id);
2193	p_ramrod->srq_id.opaque_fid = OSAL_CPU_TO_LE16(opaque_fid);
2194	p_ramrod->page_size = OSAL_CPU_TO_LE16(in_params->page_size);
2195	p_ramrod->producers_addr.hi = DMA_HI_LE(in_params->prod_pair_addr);
2196	p_ramrod->producers_addr.lo = DMA_LO_LE(in_params->prod_pair_addr);
2197	fw_srq_id = ecore_rdma_get_fw_srq_id(p_hwfn, (u16) returned_id,
2198					     in_params->is_xrc);
2199	p_ramrod->srq_id.srq_idx = OSAL_CPU_TO_LE16(fw_srq_id);
2200
2201	if (in_params->is_xrc) {
2202		SET_FIELD(p_ramrod->flags,
2203			  RDMA_SRQ_CREATE_RAMROD_DATA_XRC_FLAG,
2204			  1);
2205		SET_FIELD(p_ramrod->flags,
2206			  RDMA_SRQ_CREATE_RAMROD_DATA_RESERVED_KEY_EN,
2207			  in_params->reserved_key_en);
2208		p_ramrod->xrc_srq_cq_cid = OSAL_CPU_TO_LE32(in_params->cq_cid);
2209		p_ramrod->xrc_domain = OSAL_CPU_TO_LE16(in_params->xrcd_id);
2210	}
2211
2212	rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
2213
2214	if (rc != ECORE_SUCCESS)
2215		goto err;
2216
2217	out_params->srq_id = (u16)returned_id;
2218
2219	DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "XRC/SRQ created Id = %x (is_xrc=%u)\n",
2220		   out_params->srq_id, in_params->is_xrc);
2221	return rc;
2222
2223err:
2224	OSAL_SPIN_LOCK(&p_hwfn->p_rdma_info->lock);
2225	ecore_bmap_release_id(p_hwfn, bmap, returned_id);
2226	OSAL_SPIN_UNLOCK(&p_hwfn->p_rdma_info->lock);
2227
2228	return rc;
2229}
2230
2231bool ecore_rdma_allocated_qps(struct ecore_hwfn *p_hwfn)
2232{
2233	bool result;
2234
2235	/* if rdma info has not been allocated, naturally there are no qps */
2236	if (!p_hwfn->p_rdma_info)
2237		return false;
2238
2239	OSAL_SPIN_LOCK(&p_hwfn->p_rdma_info->lock);
2240	if (!p_hwfn->p_rdma_info->qp_map.bitmap)
2241		result = false;
2242	else
2243		result = !ecore_bmap_is_empty(&p_hwfn->p_rdma_info->qp_map);
2244	OSAL_SPIN_UNLOCK(&p_hwfn->p_rdma_info->lock);
2245	return result;
2246}
2247
2248enum _ecore_status_t ecore_rdma_resize_cq(void			*rdma_cxt,
2249			struct ecore_rdma_resize_cq_in_params	*in_params,
2250			struct ecore_rdma_resize_cq_out_params	*out_params)
2251{
2252	enum _ecore_status_t			rc;
2253	enum ecore_rdma_toggle_bit		toggle_bit;
2254	struct ecore_spq_entry			*p_ent;
2255	struct rdma_resize_cq_ramrod_data	*p_ramrod;
2256	u8                                      fw_return_code;
2257	struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)rdma_cxt;
2258	dma_addr_t							ramrod_res_phys;
2259	struct rdma_resize_cq_output_params	*p_ramrod_res;
2260	struct ecore_sp_init_data		init_data;
2261
2262	DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "icid = %08x\n", in_params->icid);
2263
2264	/* Send resize CQ ramrod */
2265
2266	p_ramrod_res = (struct rdma_resize_cq_output_params *)
2267			OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, &ramrod_res_phys,
2268				sizeof(*p_ramrod_res));
2269	if (!p_ramrod_res)
2270	{
2271		rc = ECORE_NOMEM;
2272		DP_NOTICE(p_hwfn, false,
2273			  "ecore resize cq failed: cannot allocate memory (ramrod). rc = %d\n",
2274			  rc);
2275		return rc;
2276	}
2277
2278	/* Get SPQ entry */
2279	OSAL_MEMSET(&init_data, 0, sizeof(init_data));
2280	init_data.cid = in_params->icid;
2281	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
2282	init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
2283
2284	rc = ecore_sp_init_request(p_hwfn, &p_ent,
2285				   RDMA_RAMROD_RESIZE_CQ,
2286				   p_hwfn->p_rdma_info->proto, &init_data);
2287	if (rc != ECORE_SUCCESS)
2288		goto err;
2289
2290	p_ramrod = &p_ent->ramrod.rdma_resize_cq;
2291
2292	p_ramrod->flags = 0;
2293
2294	/* toggle the bit for every resize or create cq for a given icid */
2295	toggle_bit = ecore_rdma_toggle_bit_create_resize_cq(p_hwfn,
2296							    in_params->icid);
2297
2298	SET_FIELD(p_ramrod->flags,
2299		  RDMA_RESIZE_CQ_RAMROD_DATA_TOGGLE_BIT,
2300		  toggle_bit);
2301
2302	SET_FIELD(p_ramrod->flags,
2303		  RDMA_RESIZE_CQ_RAMROD_DATA_IS_TWO_LEVEL_PBL,
2304		  in_params->pbl_two_level);
2305
2306	p_ramrod->pbl_log_page_size = in_params->pbl_page_size_log - 12;
2307	p_ramrod->pbl_num_pages = OSAL_CPU_TO_LE16(in_params->pbl_num_pages);
2308	p_ramrod->max_cqes = OSAL_CPU_TO_LE32(in_params->cq_size);
2309	p_ramrod->pbl_addr.hi = DMA_HI_LE(in_params->pbl_ptr);
2310	p_ramrod->pbl_addr.lo = DMA_LO_LE(in_params->pbl_ptr);
2311
2312	p_ramrod->output_params_addr.hi = DMA_HI_LE(ramrod_res_phys);
2313	p_ramrod->output_params_addr.lo = DMA_LO_LE(ramrod_res_phys);
2314
2315	rc = ecore_spq_post(p_hwfn, p_ent, &fw_return_code);
2316	if (rc != ECORE_SUCCESS)
2317		goto err;
2318
2319	if (fw_return_code != RDMA_RETURN_OK)
2320	{
2321		DP_NOTICE(p_hwfn, fw_return_code != RDMA_RETURN_RESIZE_CQ_ERR,
2322			  "fw_return_code = %d\n", fw_return_code);
2323		DP_NOTICE(p_hwfn,
2324			  true, "fw_return_code = %d\n", fw_return_code);
2325		rc = ECORE_UNKNOWN_ERROR;
2326		goto err;
2327	}
2328
2329	out_params->prod = OSAL_LE32_TO_CPU(p_ramrod_res->old_cq_prod);
2330	out_params->cons = OSAL_LE32_TO_CPU(p_ramrod_res->old_cq_cons);
2331
2332	OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, p_ramrod_res, ramrod_res_phys,
2333			       sizeof(*p_ramrod_res));
2334
2335	DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "rc = %d\n", rc);
2336
2337	return rc;
2338
2339err:
2340	OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, p_ramrod_res, ramrod_res_phys,
2341			       sizeof(*p_ramrod_res));
2342	DP_NOTICE(p_hwfn, false, "rc = %d\n", rc);
2343
2344	return rc;
2345}
2346
2347enum _ecore_status_t ecore_rdma_start(void *rdma_cxt,
2348				struct ecore_rdma_start_in_params *params)
2349{
2350	struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)rdma_cxt;
2351	struct ecore_ptt *p_ptt;
2352	enum _ecore_status_t rc = ECORE_TIMEOUT;
2353
2354	DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA,
2355		   "desired_cnq = %08x\n", params->desired_cnq);
2356
2357	p_ptt = ecore_ptt_acquire(p_hwfn);
2358	if (!p_ptt)
2359		goto err;
2360
2361	rc = ecore_rdma_alloc(p_hwfn);
2362	if (rc)
2363		goto err1;
2364
2365	rc = ecore_rdma_setup(p_hwfn, p_ptt, params);
2366	if (rc)
2367		goto err2;
2368
2369	ecore_ptt_release(p_hwfn, p_ptt);
2370
2371	ecore_rdma_activate(p_hwfn);
2372	return rc;
2373
2374err2:
2375	ecore_rdma_free(p_hwfn);
2376err1:
2377	ecore_ptt_release(p_hwfn, p_ptt);
2378err:
2379	DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "RDMA start - error, rc = %d\n", rc);
2380	return rc;
2381}
2382
2383enum _ecore_status_t ecore_rdma_query_stats(void *rdma_cxt, u8 stats_queue,
2384				struct ecore_rdma_stats_out_params *out_params)
2385{
2386	struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)rdma_cxt;
2387	u8 abs_stats_queue, max_stats_queues;
2388	u32 pstats_addr, tstats_addr, addr;
2389	struct ecore_rdma_info *info;
2390	struct ecore_ptt *p_ptt;
2391#ifdef CONFIG_ECORE_IWARP
2392	u32 xstats_addr;
2393#endif
2394	enum _ecore_status_t rc = ECORE_SUCCESS;
2395
2396	if (!p_hwfn)
2397		return ECORE_INVAL;
2398
2399	if (!p_hwfn->p_rdma_info) {
2400		DP_INFO(p_hwfn->p_dev, "ecore rdma query stats failed due to NULL rdma_info\n");
2401		return ECORE_INVAL;
2402	}
2403
2404	info = p_hwfn->p_rdma_info;
2405
2406	rc = ecore_rdma_inc_ref_cnt(p_hwfn);
2407	if (rc != ECORE_SUCCESS)
2408		return rc;
2409
2410	max_stats_queues = p_hwfn->p_rdma_info->dev->max_stats_queues;
2411	if (stats_queue >= max_stats_queues) {
2412		DP_ERR(p_hwfn->p_dev,
2413		       "ecore rdma query stats failed due to invalid statistics queue %d. maximum is %d\n",
2414		       stats_queue, max_stats_queues);
2415		rc = ECORE_INVAL;
2416		goto err;
2417	}
2418
2419	/* Statistics collected in statistics queues (for PF/VF) */
2420	abs_stats_queue = RESC_START(p_hwfn, ECORE_RDMA_STATS_QUEUE) +
2421			    stats_queue;
2422	pstats_addr = BAR0_MAP_REG_PSDM_RAM +
2423		      PSTORM_RDMA_QUEUE_STAT_OFFSET(abs_stats_queue);
2424	tstats_addr = BAR0_MAP_REG_TSDM_RAM +
2425		      TSTORM_RDMA_QUEUE_STAT_OFFSET(abs_stats_queue);
2426
2427#ifdef CONFIG_ECORE_IWARP
2428	/* Statistics per PF ID */
2429	xstats_addr = BAR0_MAP_REG_XSDM_RAM +
2430		      XSTORM_IWARP_RXMIT_STATS_OFFSET(p_hwfn->rel_pf_id);
2431#endif
2432
2433	OSAL_MEMSET(&info->rdma_sent_pstats, 0, sizeof(info->rdma_sent_pstats));
2434	OSAL_MEMSET(&info->rdma_rcv_tstats, 0, sizeof(info->rdma_rcv_tstats));
2435	OSAL_MEMSET(&info->roce.event_stats, 0, sizeof(info->roce.event_stats));
2436	OSAL_MEMSET(&info->roce.dcqcn_rx_stats, 0,sizeof(info->roce.dcqcn_rx_stats));
2437	OSAL_MEMSET(&info->roce.dcqcn_tx_stats, 0,sizeof(info->roce.dcqcn_tx_stats));
2438#ifdef CONFIG_ECORE_IWARP
2439	OSAL_MEMSET(&info->iwarp.stats, 0, sizeof(info->iwarp.stats));
2440#endif
2441
2442	p_ptt = ecore_ptt_acquire(p_hwfn);
2443
2444	if (!p_ptt) {
2445		rc = ECORE_TIMEOUT;
2446		DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "rc = %d\n", rc);
2447		goto err;
2448	}
2449
2450	ecore_memcpy_from(p_hwfn, p_ptt, &info->rdma_sent_pstats,
2451			  pstats_addr, sizeof(struct rdma_sent_stats));
2452
2453	ecore_memcpy_from(p_hwfn, p_ptt, &info->rdma_rcv_tstats,
2454			  tstats_addr, sizeof(struct rdma_rcv_stats));
2455
2456	addr = BAR0_MAP_REG_TSDM_RAM +
2457	       TSTORM_ROCE_EVENTS_STAT_OFFSET(p_hwfn->rel_pf_id);
2458	ecore_memcpy_from(p_hwfn, p_ptt, &info->roce.event_stats, addr,
2459			  sizeof(struct roce_events_stats));
2460
2461	addr = BAR0_MAP_REG_YSDM_RAM +
2462		YSTORM_ROCE_DCQCN_RECEIVED_STATS_OFFSET(p_hwfn->rel_pf_id);
2463	ecore_memcpy_from(p_hwfn, p_ptt, &info->roce.dcqcn_rx_stats, addr,
2464			  sizeof(struct roce_dcqcn_received_stats));
2465
2466	addr = BAR0_MAP_REG_PSDM_RAM +
2467	       PSTORM_ROCE_DCQCN_SENT_STATS_OFFSET(p_hwfn->rel_pf_id);
2468	ecore_memcpy_from(p_hwfn, p_ptt, &info->roce.dcqcn_tx_stats, addr,
2469			  sizeof(struct roce_dcqcn_sent_stats));
2470
2471#ifdef CONFIG_ECORE_IWARP
2472	ecore_memcpy_from(p_hwfn, p_ptt, &info->iwarp.stats,
2473			  xstats_addr, sizeof(struct iwarp_rxmit_stats_drv));
2474#endif
2475
2476	ecore_ptt_release(p_hwfn, p_ptt);
2477
2478	OSAL_MEMSET(out_params, 0, sizeof(*out_params));
2479
2480	out_params->sent_bytes =
2481		HILO_64_REGPAIR(info->rdma_sent_pstats.sent_bytes);
2482	out_params->sent_pkts =
2483		HILO_64_REGPAIR(info->rdma_sent_pstats.sent_pkts);
2484	out_params->rcv_bytes =
2485		HILO_64_REGPAIR(info->rdma_rcv_tstats.rcv_bytes);
2486	out_params->rcv_pkts =
2487		HILO_64_REGPAIR(info->rdma_rcv_tstats.rcv_pkts);
2488
2489	out_params->silent_drops =
2490		OSAL_LE16_TO_CPU(info->roce.event_stats.silent_drops);
2491	out_params->rnr_nacks_sent =
2492		OSAL_LE16_TO_CPU(info->roce.event_stats.rnr_naks_sent);
2493	out_params->icrc_errors =
2494		OSAL_LE32_TO_CPU(info->roce.event_stats.icrc_error_count);
2495	out_params->retransmit_events =
2496		OSAL_LE32_TO_CPU(info->roce.event_stats.retransmit_count);
2497	out_params->ecn_pkt_rcv =
2498		HILO_64_REGPAIR(info->roce.dcqcn_rx_stats.ecn_pkt_rcv);
2499	out_params->cnp_pkt_rcv =
2500		HILO_64_REGPAIR(info->roce.dcqcn_rx_stats.cnp_pkt_rcv);
2501	out_params->cnp_pkt_sent =
2502		HILO_64_REGPAIR(info->roce.dcqcn_tx_stats.cnp_pkt_sent);
2503
2504#ifdef CONFIG_ECORE_IWARP
2505	out_params->iwarp_tx_fast_rxmit_cnt =
2506		HILO_64_REGPAIR(info->iwarp.stats.tx_fast_retransmit_event_cnt);
2507	out_params->iwarp_tx_slow_start_cnt =
2508		HILO_64_REGPAIR(
2509			info->iwarp.stats.tx_go_to_slow_start_event_cnt);
2510	out_params->unalign_rx_comp = info->iwarp.unalign_rx_comp;
2511#endif
2512
2513err:
2514	ecore_rdma_dec_ref_cnt(p_hwfn);
2515
2516	return rc;
2517}
2518
2519enum _ecore_status_t
2520ecore_rdma_query_counters(void *rdma_cxt,
2521			  struct ecore_rdma_counters_out_params *out_params)
2522{
2523	struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)rdma_cxt;
2524	unsigned long *bitmap;
2525	unsigned int nbits;
2526
2527	if (!p_hwfn->p_rdma_info)
2528		return ECORE_INVAL;
2529
2530	OSAL_MEMSET(out_params, 0, sizeof(*out_params));
2531
2532	bitmap = p_hwfn->p_rdma_info->pd_map.bitmap;
2533	nbits = p_hwfn->p_rdma_info->pd_map.max_count;
2534	out_params->pd_count = OSAL_BITMAP_WEIGHT(bitmap, nbits);
2535	out_params->max_pd = nbits;
2536
2537	bitmap = p_hwfn->p_rdma_info->dpi_map.bitmap;
2538	nbits = p_hwfn->p_rdma_info->dpi_map.max_count;
2539	out_params->dpi_count = OSAL_BITMAP_WEIGHT(bitmap, nbits);
2540	out_params->max_dpi = nbits;
2541
2542	bitmap = p_hwfn->p_rdma_info->cq_map.bitmap;
2543	nbits = p_hwfn->p_rdma_info->cq_map.max_count;
2544	out_params->cq_count = OSAL_BITMAP_WEIGHT(bitmap, nbits);
2545	out_params->max_cq = nbits;
2546
2547	bitmap = p_hwfn->p_rdma_info->qp_map.bitmap;
2548	nbits = p_hwfn->p_rdma_info->qp_map.max_count;
2549	out_params->qp_count = OSAL_BITMAP_WEIGHT(bitmap, nbits);
2550	out_params->max_qp = nbits;
2551
2552	bitmap = p_hwfn->p_rdma_info->tid_map.bitmap;
2553	nbits = p_hwfn->p_rdma_info->tid_map.max_count;
2554	out_params->tid_count = OSAL_BITMAP_WEIGHT(bitmap, nbits);
2555	out_params->max_tid = nbits;
2556
2557	bitmap = p_hwfn->p_rdma_info->srq_map.bitmap;
2558	nbits = p_hwfn->p_rdma_info->srq_map.max_count;
2559	out_params->srq_count = OSAL_BITMAP_WEIGHT(bitmap, nbits);
2560	out_params->max_srq = nbits;
2561
2562	bitmap = p_hwfn->p_rdma_info->xrc_srq_map.bitmap;
2563	nbits = p_hwfn->p_rdma_info->xrc_srq_map.max_count;
2564	out_params->xrc_srq_count = OSAL_BITMAP_WEIGHT(bitmap, nbits);
2565	out_params->max_xrc_srq = nbits;
2566
2567	bitmap = p_hwfn->p_rdma_info->xrcd_map.bitmap;
2568	nbits = p_hwfn->p_rdma_info->xrcd_map.max_count;
2569	out_params->xrcd_count = OSAL_BITMAP_WEIGHT(bitmap, nbits);
2570	out_params->max_xrcd = nbits;
2571
2572	return ECORE_SUCCESS;
2573}
2574
2575enum _ecore_status_t ecore_rdma_resize_cnq(void			      *rdma_cxt,
2576				struct ecore_rdma_resize_cnq_in_params *params)
2577{
2578	struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)rdma_cxt;
2579
2580	DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "cnq_id = %08x\n", params->cnq_id);
2581
2582	/* @@@TBD: waiting for fw (there is no ramrod yet) */
2583	return ECORE_NOTIMPL;
2584}
2585
2586void ecore_rdma_remove_user(void	*rdma_cxt,
2587			    u16		dpi)
2588{
2589	struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)rdma_cxt;
2590
2591	DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "dpi = %08x\n", dpi);
2592
2593	OSAL_SPIN_LOCK(&p_hwfn->p_rdma_info->lock);
2594	ecore_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->dpi_map, dpi);
2595	OSAL_SPIN_UNLOCK(&p_hwfn->p_rdma_info->lock);
2596}
2597
2598#ifndef LINUX_REMOVE
2599enum _ecore_status_t
2600ecore_rdma_set_glob_cfg(struct ecore_hwfn *p_hwfn,
2601			struct ecore_rdma_glob_cfg *in_params,
2602			u32 glob_cfg_bits)
2603{
2604	struct ecore_rdma_glob_cfg glob_cfg;
2605	enum _ecore_status_t rc = ECORE_SUCCESS;
2606
2607	DP_VERBOSE(p_hwfn->p_dev, ECORE_MSG_RDMA,
2608		   "dscp %d dscp en %d ecn %d ecn en %d vlan pri %d vlan_pri_en %d\n",
2609		   in_params->dscp, in_params->dscp_en,
2610		   in_params->ecn, in_params->ecn_en, in_params->vlan_pri,
2611		   in_params->vlan_pri_en);
2612
2613	/* Read global cfg to local */
2614	OSAL_MEMCPY(&glob_cfg, &p_hwfn->p_rdma_info->glob_cfg,
2615		    sizeof(glob_cfg));
2616
2617	if (glob_cfg_bits & ECORE_RDMA_DCSP_BIT_MASK) {
2618		if (in_params->dscp > MAX_DSCP) {
2619			DP_ERR(p_hwfn->p_dev, "invalid glob dscp %d\n",
2620			       in_params->dscp);
2621			return ECORE_INVAL;
2622		}
2623		glob_cfg.dscp = in_params->dscp;
2624	}
2625
2626	if (glob_cfg_bits & ECORE_RDMA_DCSP_EN_BIT_MASK) {
2627		if (in_params->dscp_en > 1) {
2628			DP_ERR(p_hwfn->p_dev, "invalid glob_dscp_en %d\n",
2629			       in_params->dscp_en);
2630			return ECORE_INVAL;
2631		}
2632		glob_cfg.dscp_en = in_params->dscp_en;
2633	}
2634
2635	if (glob_cfg_bits & ECORE_RDMA_ECN_BIT_MASK) {
2636		if (in_params->ecn > INET_ECN_ECT_0) {
2637			DP_ERR(p_hwfn->p_dev, "invalid glob ecn %d\n",
2638			       in_params->ecn);
2639			return ECORE_INVAL;
2640		}
2641		glob_cfg.ecn = in_params->ecn;
2642	}
2643
2644	if (glob_cfg_bits & ECORE_RDMA_ECN_EN_BIT_MASK) {
2645		if (in_params->ecn_en > 1) {
2646			DP_ERR(p_hwfn->p_dev, "invalid glob ecn en %d\n",
2647			       in_params->ecn_en);
2648			return ECORE_INVAL;
2649		}
2650		glob_cfg.ecn_en = in_params->ecn_en;
2651	}
2652
2653	if (glob_cfg_bits & ECORE_RDMA_VLAN_PRIO_BIT_MASK) {
2654		if (in_params->vlan_pri > MAX_VLAN_PRIO) {
2655			DP_ERR(p_hwfn->p_dev, "invalid glob vlan pri %d\n",
2656			       in_params->vlan_pri);
2657			return ECORE_INVAL;
2658		}
2659		glob_cfg.vlan_pri = in_params->vlan_pri;
2660	}
2661
2662	if (glob_cfg_bits & ECORE_RDMA_VLAN_PRIO_EN_BIT_MASK) {
2663		if (in_params->vlan_pri_en > 1) {
2664			DP_ERR(p_hwfn->p_dev, "invalid glob vlan pri en %d\n",
2665			       in_params->vlan_pri_en);
2666			return ECORE_INVAL;
2667		}
2668		glob_cfg.vlan_pri_en = in_params->vlan_pri_en;
2669	}
2670
2671	/* Write back local cfg to global */
2672	OSAL_MEMCPY(&p_hwfn->p_rdma_info->glob_cfg, &glob_cfg,
2673		    sizeof(glob_cfg));
2674
2675	return rc;
2676}
2677
2678enum _ecore_status_t
2679ecore_rdma_get_glob_cfg(struct ecore_hwfn *p_hwfn,
2680			struct ecore_rdma_glob_cfg *out_params)
2681{
2682	OSAL_MEMCPY(out_params, &p_hwfn->p_rdma_info->glob_cfg,
2683		    sizeof(struct ecore_rdma_glob_cfg));
2684
2685	return ECORE_SUCCESS;
2686}
2687#endif /* LINUX_REMOVE */
2688