1/*
2 * Copyright (c) 2018-2019 Cavium, Inc.
3 * All rights reserved.
4 *
5 *  Redistribution and use in source and binary forms, with or without
6 *  modification, are permitted provided that the following conditions
7 *  are met:
8 *
9 *  1. Redistributions of source code must retain the above copyright
10 *     notice, this list of conditions and the following disclaimer.
11 *  2. Redistributions in binary form must reproduce the above copyright
12 *     notice, this list of conditions and the following disclaimer in the
13 *     documentation and/or other materials provided with the distribution.
14 *
15 *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16 *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19 *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20 *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21 *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22 *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24 *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 *  POSSIBILITY OF SUCH DAMAGE.
26 */
27
28/*
29 * File : ecore_iwarp.c
30 */
31#include <sys/cdefs.h>
32__FBSDID("$FreeBSD$");
33
34#include "bcm_osal.h"
35#include "ecore.h"
36#include "ecore_status.h"
37#include "ecore_sp_commands.h"
38#include "ecore_cxt.h"
39#include "ecore_rdma.h"
40#include "reg_addr.h"
41#include "ecore_hw.h"
42#include "ecore_hsi_iwarp.h"
43#include "ecore_ll2.h"
44#include "ecore_ooo.h"
45#ifndef LINUX_REMOVE
46#include "ecore_tcp_ip.h"
47#endif
48
49#ifdef _NTDDK_
50#pragma warning(push)
51#pragma warning(disable : 28123)
52#pragma warning(disable : 28167)
53#endif
54
55/* Default values used for MPA Rev 1 */
56#define ECORE_IWARP_ORD_DEFAULT 32
57#define ECORE_IWARP_IRD_DEFAULT 32
58
59#define ECORE_IWARP_MAX_FW_MSS  4120
60
61struct mpa_v2_hdr {
62	__be16 ird;
63	__be16 ord;
64};
65
66#define MPA_V2_PEER2PEER_MODEL	0x8000
67#define MPA_V2_SEND_RTR		0x4000 /* on ird */
68#define MPA_V2_READ_RTR		0x4000 /* on ord */
69#define MPA_V2_WRITE_RTR	0x8000
70#define MPA_V2_IRD_ORD_MASK	0x3FFF
71
72#define MPA_REV2(_mpa_rev) (_mpa_rev == MPA_NEGOTIATION_TYPE_ENHANCED)
73
74#define ECORE_IWARP_INVALID_TCP_CID 0xffffffff
75/* How many times fin will be sent before FW aborts and send RST */
76#define ECORE_IWARP_MAX_FIN_RT_DEFAULT 2
77#define ECORE_IWARP_RCV_WND_SIZE_MIN (0xffff)
78/* INTERNAL: These numbers are derived from BRB buffer sizes to obtain optimal performance */
79#define ECORE_IWARP_RCV_WND_SIZE_BB_DEF_2_PORTS (200*1024)
80#define ECORE_IWARP_RCV_WND_SIZE_BB_DEF_4_PORTS (100*1024)
81#define ECORE_IWARP_RCV_WND_SIZE_AH_DEF_2_PORTS (150*1024)
82#define ECORE_IWARP_RCV_WND_SIZE_AH_DEF_4_PORTS (90*1024)
83#define ECORE_IWARP_MAX_WND_SCALE    (14)
84/* Timestamp header is the length of the timestamp option (10):
85 * kind:8 bit, length:8 bit, timestamp:32 bit, ack: 32bit
86 * rounded up to a multiple of 4
87 */
88#define TIMESTAMP_HEADER_SIZE (12)
89
90static enum _ecore_status_t
91ecore_iwarp_async_event(struct ecore_hwfn *p_hwfn,
92			u8 fw_event_code,
93			u16 OSAL_UNUSED echo,
94			union event_ring_data *data,
95			u8 fw_return_code);
96
97static enum _ecore_status_t
98ecore_iwarp_empty_ramrod(struct ecore_hwfn *p_hwfn,
99			 struct ecore_iwarp_listener *listener);
100
101static OSAL_INLINE struct ecore_iwarp_fpdu *
102ecore_iwarp_get_curr_fpdu(struct ecore_hwfn *p_hwfn, u16 cid);
103
104/* Override devinfo with iWARP specific values */
105void
106ecore_iwarp_init_devinfo(struct ecore_hwfn *p_hwfn)
107{
108	struct ecore_rdma_device *dev = p_hwfn->p_rdma_info->dev;
109
110	dev->max_inline = IWARP_REQ_MAX_INLINE_DATA_SIZE;
111	dev->max_qp = OSAL_MIN_T(u64,
112				 IWARP_MAX_QPS,
113				 p_hwfn->p_rdma_info->num_qps) -
114		ECORE_IWARP_PREALLOC_CNT;
115
116	dev->max_cq = dev->max_qp;
117
118	dev->max_qp_resp_rd_atomic_resc = ECORE_IWARP_IRD_DEFAULT;
119	dev->max_qp_req_rd_atomic_resc = ECORE_IWARP_ORD_DEFAULT;
120}
121
122enum _ecore_status_t
123ecore_iwarp_init_hw(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
124{
125	p_hwfn->rdma_prs_search_reg = PRS_REG_SEARCH_TCP;
126	ecore_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 1);
127	p_hwfn->b_rdma_enabled_in_prs = true;
128
129	return 0;
130}
131
132void
133ecore_iwarp_init_fw_ramrod(struct ecore_hwfn *p_hwfn,
134			   struct iwarp_init_func_ramrod_data *p_ramrod)
135{
136	DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA,
137		   "ooo handle = %d\n",
138		   p_hwfn->p_rdma_info->iwarp.ll2_ooo_handle);
139
140	p_ramrod->iwarp.ll2_ooo_q_index =
141		p_hwfn->hw_info.resc_start[ECORE_LL2_QUEUE] +
142		p_hwfn->p_rdma_info->iwarp.ll2_ooo_handle;
143
144	p_ramrod->tcp.max_fin_rt = ECORE_IWARP_MAX_FIN_RT_DEFAULT;
145	return;
146}
147
148static enum _ecore_status_t
149ecore_iwarp_alloc_cid(struct ecore_hwfn *p_hwfn, u32 *cid)
150{
151	enum _ecore_status_t rc;
152
153	OSAL_SPIN_LOCK(&p_hwfn->p_rdma_info->lock);
154
155	rc = ecore_rdma_bmap_alloc_id(p_hwfn,
156				      &p_hwfn->p_rdma_info->cid_map,
157				      cid);
158
159	OSAL_SPIN_UNLOCK(&p_hwfn->p_rdma_info->lock);
160	*cid += ecore_cxt_get_proto_cid_start(p_hwfn,
161					      p_hwfn->p_rdma_info->proto);
162	if (rc != ECORE_SUCCESS) {
163		DP_NOTICE(p_hwfn, false, "Failed in allocating iwarp cid\n");
164		return rc;
165	}
166
167	rc = ecore_cxt_dynamic_ilt_alloc(p_hwfn, ECORE_ELEM_CXT, *cid);
168
169	if (rc != ECORE_SUCCESS) {
170		OSAL_SPIN_LOCK(&p_hwfn->p_rdma_info->lock);
171		*cid -= ecore_cxt_get_proto_cid_start(p_hwfn,
172					     p_hwfn->p_rdma_info->proto);
173
174		ecore_bmap_release_id(p_hwfn,
175				      &p_hwfn->p_rdma_info->cid_map,
176				      *cid);
177
178		OSAL_SPIN_UNLOCK(&p_hwfn->p_rdma_info->lock);
179	}
180
181	return rc;
182}
183
184static void
185ecore_iwarp_set_tcp_cid(struct ecore_hwfn *p_hwfn, u32 cid)
186{
187	cid -= ecore_cxt_get_proto_cid_start(p_hwfn,
188					     p_hwfn->p_rdma_info->proto);
189
190	OSAL_SPIN_LOCK(&p_hwfn->p_rdma_info->lock);
191	ecore_bmap_set_id(p_hwfn,
192			  &p_hwfn->p_rdma_info->tcp_cid_map,
193			  cid);
194	OSAL_SPIN_UNLOCK(&p_hwfn->p_rdma_info->lock);
195}
196
197/* This function allocates a cid for passive tcp ( called from syn receive)
198 * the reason it's separate from the regular cid allocation is because it
199 * is assured that these cids already have ilt alloacted. They are preallocated
200 * to ensure that we won't need to allocate memory during syn processing
201 */
202static enum _ecore_status_t
203ecore_iwarp_alloc_tcp_cid(struct ecore_hwfn *p_hwfn, u32 *cid)
204{
205	enum _ecore_status_t rc;
206
207	OSAL_SPIN_LOCK(&p_hwfn->p_rdma_info->lock);
208
209	rc = ecore_rdma_bmap_alloc_id(p_hwfn,
210				      &p_hwfn->p_rdma_info->tcp_cid_map,
211				      cid);
212
213	OSAL_SPIN_UNLOCK(&p_hwfn->p_rdma_info->lock);
214
215	*cid += ecore_cxt_get_proto_cid_start(p_hwfn,
216					      p_hwfn->p_rdma_info->proto);
217	if (rc != ECORE_SUCCESS) {
218		DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA,
219			   "can't allocate iwarp tcp cid max-count=%d\n",
220			   p_hwfn->p_rdma_info->tcp_cid_map.max_count);
221
222		*cid = ECORE_IWARP_INVALID_TCP_CID;
223	}
224
225	return rc;
226}
227
228/* We have two cid maps, one for tcp which should be used only from passive
229 * syn processing and replacing a pre-allocated ep in the list. the second
230 * for active tcp and for QPs.
231 */
232static void ecore_iwarp_cid_cleaned(struct ecore_hwfn *p_hwfn, u32 cid)
233{
234	cid -= ecore_cxt_get_proto_cid_start(p_hwfn,
235					     p_hwfn->p_rdma_info->proto);
236
237	OSAL_SPIN_LOCK(&p_hwfn->p_rdma_info->lock);
238
239	if (cid < ECORE_IWARP_PREALLOC_CNT) {
240		ecore_bmap_release_id(p_hwfn,
241				      &p_hwfn->p_rdma_info->tcp_cid_map,
242				      cid);
243	} else {
244		ecore_bmap_release_id(p_hwfn,
245				      &p_hwfn->p_rdma_info->cid_map,
246				      cid);
247	}
248
249	OSAL_SPIN_UNLOCK(&p_hwfn->p_rdma_info->lock);
250}
251
252enum _ecore_status_t
253ecore_iwarp_create_qp(struct ecore_hwfn *p_hwfn,
254		      struct ecore_rdma_qp *qp,
255		      struct ecore_rdma_create_qp_out_params *out_params)
256{
257	struct iwarp_create_qp_ramrod_data *p_ramrod;
258	struct ecore_sp_init_data init_data;
259	struct ecore_spq_entry *p_ent;
260	enum _ecore_status_t rc;
261	u16 physical_queue;
262	u32 cid;
263
264	qp->shared_queue =
265		OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
266					&qp->shared_queue_phys_addr,
267					IWARP_SHARED_QUEUE_PAGE_SIZE);
268	if (!qp->shared_queue) {
269		DP_NOTICE(p_hwfn, false,
270			  "ecore iwarp create qp failed: cannot allocate memory (shared queue).\n");
271		return ECORE_NOMEM;
272	} else {
273		out_params->sq_pbl_virt = (u8 *)qp->shared_queue +
274			IWARP_SHARED_QUEUE_PAGE_SQ_PBL_OFFSET;
275		out_params->sq_pbl_phys = qp->shared_queue_phys_addr +
276			IWARP_SHARED_QUEUE_PAGE_SQ_PBL_OFFSET;
277		out_params->rq_pbl_virt = (u8 *)qp->shared_queue +
278			IWARP_SHARED_QUEUE_PAGE_RQ_PBL_OFFSET;
279		out_params->rq_pbl_phys = qp->shared_queue_phys_addr +
280			IWARP_SHARED_QUEUE_PAGE_RQ_PBL_OFFSET;
281	}
282
283	rc = ecore_iwarp_alloc_cid(p_hwfn, &cid);
284	if (rc != ECORE_SUCCESS)
285		goto err1;
286
287	qp->icid = (u16)cid;
288
289	OSAL_MEMSET(&init_data, 0, sizeof(init_data));
290	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
291	init_data.cid = qp->icid;
292	init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
293
294	rc = ecore_sp_init_request(p_hwfn, &p_ent,
295				   IWARP_RAMROD_CMD_ID_CREATE_QP,
296				   PROTOCOLID_IWARP, &init_data);
297	if (rc != ECORE_SUCCESS)
298		return rc;
299
300	p_ramrod = &p_ent->ramrod.iwarp_create_qp;
301
302	SET_FIELD(p_ramrod->flags,
303		  IWARP_CREATE_QP_RAMROD_DATA_FMR_AND_RESERVED_EN,
304		  qp->fmr_and_reserved_lkey);
305
306	SET_FIELD(p_ramrod->flags,
307		  IWARP_CREATE_QP_RAMROD_DATA_SIGNALED_COMP,
308		  qp->signal_all);
309
310	SET_FIELD(p_ramrod->flags,
311		  IWARP_CREATE_QP_RAMROD_DATA_RDMA_RD_EN,
312		  qp->incoming_rdma_read_en);
313
314	SET_FIELD(p_ramrod->flags,
315		  IWARP_CREATE_QP_RAMROD_DATA_RDMA_WR_EN,
316		  qp->incoming_rdma_write_en);
317
318	SET_FIELD(p_ramrod->flags,
319		  IWARP_CREATE_QP_RAMROD_DATA_ATOMIC_EN,
320		  qp->incoming_atomic_en);
321
322	SET_FIELD(p_ramrod->flags,
323		  IWARP_CREATE_QP_RAMROD_DATA_SRQ_FLG,
324		  qp->use_srq);
325
326	p_ramrod->pd = qp->pd;
327	p_ramrod->sq_num_pages = qp->sq_num_pages;
328	p_ramrod->rq_num_pages = qp->rq_num_pages;
329
330	p_ramrod->qp_handle_for_cqe.hi = OSAL_CPU_TO_LE32(qp->qp_handle.hi);
331	p_ramrod->qp_handle_for_cqe.lo = OSAL_CPU_TO_LE32(qp->qp_handle.lo);
332
333	p_ramrod->cq_cid_for_sq =
334		OSAL_CPU_TO_LE32((p_hwfn->hw_info.opaque_fid << 16) |
335				 qp->sq_cq_id);
336	p_ramrod->cq_cid_for_rq =
337		OSAL_CPU_TO_LE32((p_hwfn->hw_info.opaque_fid << 16) |
338				 qp->rq_cq_id);
339
340	p_ramrod->dpi = OSAL_CPU_TO_LE16(qp->dpi);
341
342	physical_queue = ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD);
343	p_ramrod->physical_q0 = OSAL_CPU_TO_LE16(physical_queue);
344	physical_queue = ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_ACK);
345	p_ramrod->physical_q1 = OSAL_CPU_TO_LE16(physical_queue);
346
347	rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
348
349	if (rc != ECORE_SUCCESS)
350		goto err1;
351
352	return rc;
353
354err1:
355	OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
356			       qp->shared_queue,
357			       qp->shared_queue_phys_addr,
358			       IWARP_SHARED_QUEUE_PAGE_SIZE);
359
360	return rc;
361}
362
363static enum _ecore_status_t
364ecore_iwarp_modify_fw(struct ecore_hwfn *p_hwfn,
365		      struct ecore_rdma_qp *qp)
366{
367	struct iwarp_modify_qp_ramrod_data *p_ramrod;
368	struct ecore_sp_init_data init_data;
369	struct ecore_spq_entry *p_ent;
370	enum _ecore_status_t rc;
371
372	/* Get SPQ entry */
373	OSAL_MEMSET(&init_data, 0, sizeof(init_data));
374	init_data.cid = qp->icid;
375	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
376	init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
377
378	rc = ecore_sp_init_request(p_hwfn, &p_ent,
379				   IWARP_RAMROD_CMD_ID_MODIFY_QP,
380				   p_hwfn->p_rdma_info->proto,
381				   &init_data);
382	if (rc != ECORE_SUCCESS)
383		return rc;
384
385	p_ramrod = &p_ent->ramrod.iwarp_modify_qp;
386	SET_FIELD(p_ramrod->flags, IWARP_MODIFY_QP_RAMROD_DATA_STATE_TRANS_EN,
387		  0x1);
388	if (qp->iwarp_state == ECORE_IWARP_QP_STATE_CLOSING)
389		p_ramrod->transition_to_state = IWARP_MODIFY_QP_STATE_CLOSING;
390	else
391		p_ramrod->transition_to_state = IWARP_MODIFY_QP_STATE_ERROR;
392
393	rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
394
395	DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "QP(0x%x)rc=%d\n",
396		   qp->icid, rc);
397
398	return rc;
399}
400
401enum ecore_iwarp_qp_state
402ecore_roce2iwarp_state(enum ecore_roce_qp_state state)
403{
404	switch (state) {
405	case ECORE_ROCE_QP_STATE_RESET:
406	case ECORE_ROCE_QP_STATE_INIT:
407	case ECORE_ROCE_QP_STATE_RTR:
408		return ECORE_IWARP_QP_STATE_IDLE;
409	case ECORE_ROCE_QP_STATE_RTS:
410		return ECORE_IWARP_QP_STATE_RTS;
411	case ECORE_ROCE_QP_STATE_SQD:
412		return ECORE_IWARP_QP_STATE_CLOSING;
413	case ECORE_ROCE_QP_STATE_ERR:
414		return ECORE_IWARP_QP_STATE_ERROR;
415	case ECORE_ROCE_QP_STATE_SQE:
416		return ECORE_IWARP_QP_STATE_TERMINATE;
417	}
418	return ECORE_IWARP_QP_STATE_ERROR;
419}
420
421static enum ecore_roce_qp_state
422ecore_iwarp2roce_state(enum ecore_iwarp_qp_state state)
423{
424	switch (state) {
425	case ECORE_IWARP_QP_STATE_IDLE:
426		return ECORE_ROCE_QP_STATE_INIT;
427	case ECORE_IWARP_QP_STATE_RTS:
428		return ECORE_ROCE_QP_STATE_RTS;
429	case ECORE_IWARP_QP_STATE_TERMINATE:
430		return ECORE_ROCE_QP_STATE_SQE;
431	case ECORE_IWARP_QP_STATE_CLOSING:
432		return ECORE_ROCE_QP_STATE_SQD;
433	case ECORE_IWARP_QP_STATE_ERROR:
434		return ECORE_ROCE_QP_STATE_ERR;
435	}
436	return ECORE_ROCE_QP_STATE_ERR;
437}
438
439const char *iwarp_state_names[] = {
440	"IDLE",
441	"RTS",
442	"TERMINATE",
443	"CLOSING",
444	"ERROR",
445};
446
447enum _ecore_status_t
448ecore_iwarp_modify_qp(struct ecore_hwfn *p_hwfn,
449		      struct ecore_rdma_qp *qp,
450		      enum ecore_iwarp_qp_state new_state,
451		      bool internal)
452{
453	enum ecore_iwarp_qp_state prev_iw_state;
454	enum _ecore_status_t rc = 0;
455	bool modify_fw = false;
456
457	/* modify QP can be called from upper-layer or as a result of async
458	 * RST/FIN... therefore need to protect
459	 */
460	OSAL_SPIN_LOCK(&p_hwfn->p_rdma_info->iwarp.qp_lock);
461	prev_iw_state = qp->iwarp_state;
462
463	if (prev_iw_state == new_state) {
464		OSAL_SPIN_UNLOCK(&p_hwfn->p_rdma_info->iwarp.qp_lock);
465		return ECORE_SUCCESS;
466	}
467
468	switch (prev_iw_state) {
469	case ECORE_IWARP_QP_STATE_IDLE:
470		switch (new_state) {
471		case ECORE_IWARP_QP_STATE_RTS:
472			qp->iwarp_state = ECORE_IWARP_QP_STATE_RTS;
473			break;
474		case ECORE_IWARP_QP_STATE_ERROR:
475			qp->iwarp_state = ECORE_IWARP_QP_STATE_ERROR;
476			if (!internal)
477				modify_fw = true;
478			break;
479		default:
480			break;
481		}
482		break;
483	case ECORE_IWARP_QP_STATE_RTS:
484		switch (new_state) {
485		case ECORE_IWARP_QP_STATE_CLOSING:
486			if (!internal)
487				modify_fw = true;
488
489			qp->iwarp_state = ECORE_IWARP_QP_STATE_CLOSING;
490			break;
491		case ECORE_IWARP_QP_STATE_ERROR:
492			if (!internal)
493				modify_fw = true;
494			qp->iwarp_state = ECORE_IWARP_QP_STATE_ERROR;
495			break;
496		default:
497			break;
498		}
499		break;
500	case ECORE_IWARP_QP_STATE_ERROR:
501		switch (new_state) {
502		case ECORE_IWARP_QP_STATE_IDLE:
503			/* TODO: destroy flow -> need to destroy EP&QP */
504			qp->iwarp_state = new_state;
505			break;
506		case ECORE_IWARP_QP_STATE_CLOSING:
507			/* could happen due to race... do nothing.... */
508			break;
509		default:
510			rc = ECORE_INVAL;
511		}
512		break;
513	case ECORE_IWARP_QP_STATE_TERMINATE:
514	case ECORE_IWARP_QP_STATE_CLOSING:
515		qp->iwarp_state = new_state;
516		break;
517	default:
518		break;
519	}
520
521	DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "QP(0x%x) %s --> %s %s\n",
522		   qp->icid,
523		   iwarp_state_names[prev_iw_state],
524		   iwarp_state_names[qp->iwarp_state],
525		   internal ? "internal" : " ");
526
527	OSAL_SPIN_UNLOCK(&p_hwfn->p_rdma_info->iwarp.qp_lock);
528
529	if (modify_fw)
530		ecore_iwarp_modify_fw(p_hwfn, qp);
531
532	return rc;
533}
534
535enum _ecore_status_t
536ecore_iwarp_fw_destroy(struct ecore_hwfn *p_hwfn,
537		       struct ecore_rdma_qp *qp)
538{
539	struct ecore_sp_init_data init_data;
540	struct ecore_spq_entry *p_ent;
541	enum _ecore_status_t rc;
542
543	/* Get SPQ entry */
544	OSAL_MEMSET(&init_data, 0, sizeof(init_data));
545	init_data.cid = qp->icid;
546	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
547	init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
548
549	rc = ecore_sp_init_request(p_hwfn, &p_ent,
550				   IWARP_RAMROD_CMD_ID_DESTROY_QP,
551				   p_hwfn->p_rdma_info->proto,
552				   &init_data);
553	if (rc != ECORE_SUCCESS)
554		return rc;
555
556	rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
557
558	DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "QP(0x%x) rc = %d\n",  qp->icid, rc);
559
560	return rc;
561}
562
563static void ecore_iwarp_destroy_ep(struct ecore_hwfn *p_hwfn,
564				   struct ecore_iwarp_ep *ep,
565				   bool remove_from_active_list)
566{
567	OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
568			       ep->ep_buffer_virt,
569			       ep->ep_buffer_phys,
570			       sizeof(*ep->ep_buffer_virt));
571
572	if (remove_from_active_list) {
573		OSAL_SPIN_LOCK(&p_hwfn->p_rdma_info->iwarp.iw_lock);
574
575		OSAL_LIST_REMOVE_ENTRY(&ep->list_entry,
576				       &p_hwfn->p_rdma_info->iwarp.ep_list);
577
578		OSAL_SPIN_UNLOCK(&p_hwfn->p_rdma_info->iwarp.iw_lock);
579	}
580
581	if (ep->qp)
582		ep->qp->ep = OSAL_NULL;
583
584	OSAL_FREE(p_hwfn->p_dev, ep);
585}
586
587enum _ecore_status_t
588ecore_iwarp_destroy_qp(struct ecore_hwfn *p_hwfn,
589		       struct ecore_rdma_qp *qp)
590{
591	enum _ecore_status_t rc = ECORE_SUCCESS;
592	struct ecore_iwarp_ep *ep = qp->ep;
593	struct ecore_iwarp_fpdu *fpdu;
594	int wait_count = 0;
595
596	fpdu = ecore_iwarp_get_curr_fpdu(p_hwfn, qp->icid);
597	if (fpdu && fpdu->incomplete_bytes)
598		DP_NOTICE(p_hwfn, false,
599			  "Pending Partial fpdu with incomplete bytes=%d\n",
600			  fpdu->incomplete_bytes);
601
602	if (qp->iwarp_state != ECORE_IWARP_QP_STATE_ERROR) {
603		rc = ecore_iwarp_modify_qp(p_hwfn, qp,
604					   ECORE_IWARP_QP_STATE_ERROR,
605					   false);
606
607		if (rc != ECORE_SUCCESS)
608			return rc;
609	}
610
611	/* Make sure ep is closed before returning and freeing memory. */
612	if (ep) {
613		while (ep->state != ECORE_IWARP_EP_CLOSED) {
614			DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA,
615				   "Waiting for ep->state to be closed...state=%x\n",
616				   ep->state);
617
618			OSAL_MSLEEP(100);
619			if (wait_count++ > 200) {
620				DP_NOTICE(p_hwfn, false, "ep state close timeout state=%x\n",
621					  ep->state);
622				break;
623			}
624		}
625
626		ecore_iwarp_destroy_ep(p_hwfn, ep, false);
627	}
628
629	rc = ecore_iwarp_fw_destroy(p_hwfn, qp);
630
631	if (qp->shared_queue)
632		OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
633				       qp->shared_queue,
634				       qp->shared_queue_phys_addr,
635				       IWARP_SHARED_QUEUE_PAGE_SIZE);
636
637	return rc;
638}
639
640static enum _ecore_status_t
641ecore_iwarp_create_ep(struct ecore_hwfn *p_hwfn,
642		      struct ecore_iwarp_ep **ep_out)
643{
644	struct ecore_iwarp_ep *ep;
645	enum _ecore_status_t rc;
646
647	ep = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*ep));
648	if (!ep) {
649		DP_NOTICE(p_hwfn, false,
650			  "ecore create ep failed: cannot allocate memory (ep). rc = %d\n",
651			  ECORE_NOMEM);
652		return ECORE_NOMEM;
653	}
654
655	ep->state = ECORE_IWARP_EP_INIT;
656
657	/* ep_buffer is allocated once and is structured as follows:
658	 * [MAX_PRIV_DATA_LEN][MAX_PRIV_DATA_LEN][union async_output]
659	 * We could have allocated this in three calls but since all together
660	 * it is less than a page, we do one allocation and initialize pointers
661	 * accordingly
662	 */
663	ep->ep_buffer_virt = OSAL_DMA_ALLOC_COHERENT(
664		p_hwfn->p_dev,
665		&ep->ep_buffer_phys,
666		sizeof(*ep->ep_buffer_virt));
667
668	if (!ep->ep_buffer_virt) {
669		DP_NOTICE(p_hwfn, false,
670			  "ecore create ep failed: cannot allocate memory (ulp buffer). rc = %d\n",
671			  ECORE_NOMEM);
672		rc = ECORE_NOMEM;
673		goto err;
674	}
675
676	ep->sig = 0xdeadbeef;
677
678	*ep_out = ep;
679
680	return ECORE_SUCCESS;
681
682err:
683	OSAL_FREE(p_hwfn->p_dev, ep);
684	return rc;
685}
686
687static void
688ecore_iwarp_print_tcp_ramrod(struct ecore_hwfn *p_hwfn,
689			     struct iwarp_tcp_offload_ramrod_data *p_tcp_ramrod)
690{
691	DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, ">>> PRINT TCP RAMROD\n");
692
693	DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "local_mac=%x %x %x\n",
694		   p_tcp_ramrod->tcp.local_mac_addr_lo,
695		   p_tcp_ramrod->tcp.local_mac_addr_mid,
696		   p_tcp_ramrod->tcp.local_mac_addr_hi);
697
698	DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "remote_mac=%x %x %x\n",
699		   p_tcp_ramrod->tcp.remote_mac_addr_lo,
700		   p_tcp_ramrod->tcp.remote_mac_addr_mid,
701		   p_tcp_ramrod->tcp.remote_mac_addr_hi);
702
703	DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "vlan_id=%x\n",
704		   p_tcp_ramrod->tcp.vlan_id);
705	DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "flags=%x\n",
706		   p_tcp_ramrod->tcp.flags);
707
708	DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "ip_version=%x\n",
709		   p_tcp_ramrod->tcp.ip_version);
710	DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "local_ip=%x.%x.%x.%x\n",
711		   p_tcp_ramrod->tcp.local_ip[0],
712		   p_tcp_ramrod->tcp.local_ip[1],
713		   p_tcp_ramrod->tcp.local_ip[2],
714		   p_tcp_ramrod->tcp.local_ip[3]);
715	DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "remote_ip=%x.%x.%x.%x\n",
716		   p_tcp_ramrod->tcp.remote_ip[0],
717		   p_tcp_ramrod->tcp.remote_ip[1],
718		   p_tcp_ramrod->tcp.remote_ip[2],
719		   p_tcp_ramrod->tcp.remote_ip[3]);
720	DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "flow_label=%x\n",
721		   p_tcp_ramrod->tcp.flow_label);
722	DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "ttl=%x\n",
723		   p_tcp_ramrod->tcp.ttl);
724	DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "tos_or_tc=%x\n",
725		   p_tcp_ramrod->tcp.tos_or_tc);
726	DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "local_port=%x\n",
727		   p_tcp_ramrod->tcp.local_port);
728	DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "remote_port=%x\n",
729		   p_tcp_ramrod->tcp.remote_port);
730	DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "mss=%x\n",
731		   p_tcp_ramrod->tcp.mss);
732	DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "rcv_wnd_scale=%x\n",
733		   p_tcp_ramrod->tcp.rcv_wnd_scale);
734	DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "connect_mode=%x\n",
735		   p_tcp_ramrod->tcp.connect_mode);
736	DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "syn_ip_payload_length=%x\n",
737		   p_tcp_ramrod->tcp.syn_ip_payload_length);
738	DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "syn_phy_addr_lo=%x\n",
739		   p_tcp_ramrod->tcp.syn_phy_addr_lo);
740	DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "syn_phy_addr_hi=%x\n",
741		   p_tcp_ramrod->tcp.syn_phy_addr_hi);
742
743	DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "<<<f  PRINT TCP RAMROD\n");
744}
745
746/* Default values for tcp option2 */
747#define ECORE_IWARP_DEF_MAX_RT_TIME (0)
748#define ECORE_IWARP_DEF_CWND_FACTOR (4)
749#define ECORE_IWARP_DEF_KA_MAX_PROBE_CNT (5)
750#define ECORE_IWARP_DEF_KA_TIMEOUT (1200000) /* 20 min */
751#define ECORE_IWARP_DEF_KA_INTERVAL (1000) /* 1 sec */
752
753static enum _ecore_status_t
754ecore_iwarp_tcp_offload(struct ecore_hwfn *p_hwfn,
755			struct ecore_iwarp_ep *ep)
756{
757	struct ecore_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp;
758	struct iwarp_tcp_offload_ramrod_data *p_tcp_ramrod;
759	struct ecore_sp_init_data init_data;
760	struct ecore_spq_entry *p_ent;
761	dma_addr_t async_output_phys;
762	dma_addr_t in_pdata_phys;
763	enum _ecore_status_t rc;
764	u16 physical_q;
765	u8 tcp_flags;
766	int i;
767
768	OSAL_MEMSET(&init_data, 0, sizeof(init_data));
769	init_data.cid = ep->tcp_cid;
770	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
771
772	if (ep->connect_mode == TCP_CONNECT_PASSIVE) {
773		init_data.comp_mode = ECORE_SPQ_MODE_CB;
774	} else {
775		init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
776	}
777
778	rc = ecore_sp_init_request(p_hwfn, &p_ent,
779				   IWARP_RAMROD_CMD_ID_TCP_OFFLOAD,
780				   PROTOCOLID_IWARP, &init_data);
781	if (rc != ECORE_SUCCESS)
782		return rc;
783
784	p_tcp_ramrod = &p_ent->ramrod.iwarp_tcp_offload;
785
786	/* Point to the "second half" of the ulp buffer */
787	in_pdata_phys = ep->ep_buffer_phys +
788		OFFSETOF(struct ecore_iwarp_ep_memory, in_pdata);
789	p_tcp_ramrod->iwarp.incoming_ulp_buffer.addr.hi =
790		DMA_HI_LE(in_pdata_phys);
791	p_tcp_ramrod->iwarp.incoming_ulp_buffer.addr.lo =
792		DMA_LO_LE(in_pdata_phys);
793	p_tcp_ramrod->iwarp.incoming_ulp_buffer.len =
794		OSAL_CPU_TO_LE16(sizeof(ep->ep_buffer_virt->in_pdata));
795
796	async_output_phys = ep->ep_buffer_phys +
797		OFFSETOF(struct ecore_iwarp_ep_memory, async_output);
798
799	p_tcp_ramrod->iwarp.async_eqe_output_buf.hi =
800		DMA_HI_LE(async_output_phys);
801	p_tcp_ramrod->iwarp.async_eqe_output_buf.lo =
802		DMA_LO_LE(async_output_phys);
803	p_tcp_ramrod->iwarp.handle_for_async.hi = OSAL_CPU_TO_LE32(PTR_HI(ep));
804	p_tcp_ramrod->iwarp.handle_for_async.lo = OSAL_CPU_TO_LE32(PTR_LO(ep));
805
806	physical_q = ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD);
807	p_tcp_ramrod->iwarp.physical_q0 = OSAL_CPU_TO_LE16(physical_q);
808	physical_q = ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_ACK);
809	p_tcp_ramrod->iwarp.physical_q1 = OSAL_CPU_TO_LE16(physical_q);
810	p_tcp_ramrod->iwarp.mpa_mode = iwarp_info->mpa_rev;
811
812	ecore_set_fw_mac_addr(&p_tcp_ramrod->tcp.remote_mac_addr_hi,
813			      &p_tcp_ramrod->tcp.remote_mac_addr_mid,
814			      &p_tcp_ramrod->tcp.remote_mac_addr_lo,
815			      ep->remote_mac_addr);
816	ecore_set_fw_mac_addr(&p_tcp_ramrod->tcp.local_mac_addr_hi,
817			      &p_tcp_ramrod->tcp.local_mac_addr_mid,
818			      &p_tcp_ramrod->tcp.local_mac_addr_lo,
819			      ep->local_mac_addr);
820
821	p_tcp_ramrod->tcp.vlan_id = OSAL_CPU_TO_LE16(ep->cm_info.vlan);
822
823	tcp_flags = p_hwfn->p_rdma_info->iwarp.tcp_flags;
824	p_tcp_ramrod->tcp.flags = 0;
825	SET_FIELD(p_tcp_ramrod->tcp.flags,
826		  TCP_OFFLOAD_PARAMS_OPT2_TS_EN,
827		  !!(tcp_flags & ECORE_IWARP_TS_EN));
828
829	SET_FIELD(p_tcp_ramrod->tcp.flags,
830		  TCP_OFFLOAD_PARAMS_OPT2_DA_EN,
831		  !!(tcp_flags & ECORE_IWARP_DA_EN));
832
833	p_tcp_ramrod->tcp.ip_version = ep->cm_info.ip_version;
834
835	for (i = 0; i < 4; i++) {
836		p_tcp_ramrod->tcp.remote_ip[i] =
837			OSAL_CPU_TO_LE32(ep->cm_info.remote_ip[i]);
838		p_tcp_ramrod->tcp.local_ip[i] =
839			OSAL_CPU_TO_LE32(ep->cm_info.local_ip[i]);
840	}
841
842	p_tcp_ramrod->tcp.remote_port =
843		OSAL_CPU_TO_LE16(ep->cm_info.remote_port);
844	p_tcp_ramrod->tcp.local_port = OSAL_CPU_TO_LE16(ep->cm_info.local_port);
845	p_tcp_ramrod->tcp.mss = OSAL_CPU_TO_LE16(ep->mss);
846	p_tcp_ramrod->tcp.flow_label = 0;
847	p_tcp_ramrod->tcp.ttl = 0x40;
848	p_tcp_ramrod->tcp.tos_or_tc = 0;
849
850	p_tcp_ramrod->tcp.max_rt_time = ECORE_IWARP_DEF_MAX_RT_TIME;
851	p_tcp_ramrod->tcp.cwnd = ECORE_IWARP_DEF_CWND_FACTOR * p_tcp_ramrod->tcp.mss;
852	p_tcp_ramrod->tcp.ka_max_probe_cnt = ECORE_IWARP_DEF_KA_MAX_PROBE_CNT;
853	p_tcp_ramrod->tcp.ka_timeout = ECORE_IWARP_DEF_KA_TIMEOUT;
854	p_tcp_ramrod->tcp.ka_interval = ECORE_IWARP_DEF_KA_INTERVAL;
855
856	p_tcp_ramrod->tcp.rcv_wnd_scale =
857		(u8)p_hwfn->p_rdma_info->iwarp.rcv_wnd_scale;
858	p_tcp_ramrod->tcp.connect_mode = ep->connect_mode;
859
860	if (ep->connect_mode == TCP_CONNECT_PASSIVE) {
861		p_tcp_ramrod->tcp.syn_ip_payload_length =
862			OSAL_CPU_TO_LE16(ep->syn_ip_payload_length);
863		p_tcp_ramrod->tcp.syn_phy_addr_hi =
864			DMA_HI_LE(ep->syn_phy_addr);
865		p_tcp_ramrod->tcp.syn_phy_addr_lo =
866			DMA_LO_LE(ep->syn_phy_addr);
867	}
868
869	ecore_iwarp_print_tcp_ramrod(p_hwfn, p_tcp_ramrod);
870
871	rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
872
873	DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA,
874		   "EP(0x%x) Offload completed rc=%d\n" , ep->tcp_cid, rc);
875
876	return rc;
877}
878
879/* This function should be called after IWARP_EVENT_TYPE_ASYNC_CONNECT_COMPLETE
880 * is received. it will be called from the dpc context.
881 */
882static enum _ecore_status_t
883ecore_iwarp_mpa_offload(struct ecore_hwfn *p_hwfn,
884			struct ecore_iwarp_ep *ep)
885{
886	struct iwarp_mpa_offload_ramrod_data *p_mpa_ramrod;
887	struct ecore_iwarp_info *iwarp_info;
888	struct ecore_sp_init_data init_data;
889	struct ecore_spq_entry *p_ent;
890	dma_addr_t async_output_phys;
891	dma_addr_t out_pdata_phys;
892	dma_addr_t in_pdata_phys;
893	struct ecore_rdma_qp *qp;
894	bool reject;
895	enum _ecore_status_t rc;
896
897	if (!ep)
898		return ECORE_INVAL;
899
900	qp = ep->qp;
901	reject = (qp == OSAL_NULL);
902
903	OSAL_MEMSET(&init_data, 0, sizeof(init_data));
904	init_data.cid = reject ? ep->tcp_cid : qp->icid;
905	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
906
907	if (ep->connect_mode == TCP_CONNECT_ACTIVE || !ep->event_cb)
908		init_data.comp_mode = ECORE_SPQ_MODE_CB;
909	else
910		init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
911
912	rc = ecore_sp_init_request(p_hwfn, &p_ent,
913				   IWARP_RAMROD_CMD_ID_MPA_OFFLOAD,
914				   PROTOCOLID_IWARP, &init_data);
915
916	if (rc != ECORE_SUCCESS)
917		return rc;
918
919	p_mpa_ramrod = &p_ent->ramrod.iwarp_mpa_offload;
920	out_pdata_phys = ep->ep_buffer_phys +
921		OFFSETOF(struct ecore_iwarp_ep_memory, out_pdata);
922	p_mpa_ramrod->common.outgoing_ulp_buffer.addr.hi =
923		DMA_HI_LE(out_pdata_phys);
924	p_mpa_ramrod->common.outgoing_ulp_buffer.addr.lo =
925		DMA_LO_LE(out_pdata_phys);
926	p_mpa_ramrod->common.outgoing_ulp_buffer.len =
927		ep->cm_info.private_data_len;
928	p_mpa_ramrod->common.crc_needed = p_hwfn->p_rdma_info->iwarp.crc_needed;
929
930	p_mpa_ramrod->common.out_rq.ord = ep->cm_info.ord;
931	p_mpa_ramrod->common.out_rq.ird = ep->cm_info.ird;
932
933	p_mpa_ramrod->tcp_cid = p_hwfn->hw_info.opaque_fid << 16 | ep->tcp_cid;
934
935	in_pdata_phys = ep->ep_buffer_phys +
936		OFFSETOF(struct ecore_iwarp_ep_memory, in_pdata);
937	p_mpa_ramrod->tcp_connect_side = ep->connect_mode;
938	p_mpa_ramrod->incoming_ulp_buffer.addr.hi =
939		DMA_HI_LE(in_pdata_phys);
940	p_mpa_ramrod->incoming_ulp_buffer.addr.lo =
941		DMA_LO_LE(in_pdata_phys);
942	p_mpa_ramrod->incoming_ulp_buffer.len =
943		OSAL_CPU_TO_LE16(sizeof(ep->ep_buffer_virt->in_pdata));
944	async_output_phys = ep->ep_buffer_phys +
945		OFFSETOF(struct ecore_iwarp_ep_memory, async_output);
946	p_mpa_ramrod->async_eqe_output_buf.hi =
947		DMA_HI_LE(async_output_phys);
948	p_mpa_ramrod->async_eqe_output_buf.lo =
949		DMA_LO_LE(async_output_phys);
950	p_mpa_ramrod->handle_for_async.hi = OSAL_CPU_TO_LE32(PTR_HI(ep));
951	p_mpa_ramrod->handle_for_async.lo = OSAL_CPU_TO_LE32(PTR_LO(ep));
952
953	if (!reject) {
954		p_mpa_ramrod->shared_queue_addr.hi =
955			DMA_HI_LE(qp->shared_queue_phys_addr);
956		p_mpa_ramrod->shared_queue_addr.lo =
957			DMA_LO_LE(qp->shared_queue_phys_addr);
958
959		p_mpa_ramrod->stats_counter_id =
960			RESC_START(p_hwfn, ECORE_RDMA_STATS_QUEUE) +
961			qp->stats_queue;
962	} else {
963		p_mpa_ramrod->common.reject = 1;
964	}
965
966	iwarp_info = &p_hwfn->p_rdma_info->iwarp;
967	p_mpa_ramrod->rcv_wnd = iwarp_info->rcv_wnd_size;
968	p_mpa_ramrod->mode = ep->mpa_rev;
969	SET_FIELD(p_mpa_ramrod->rtr_pref,
970		  IWARP_MPA_OFFLOAD_RAMROD_DATA_RTR_SUPPORTED,
971		  ep->rtr_type);
972
973	ep->state = ECORE_IWARP_EP_MPA_OFFLOADED;
974	rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
975	if (!reject)
976		ep->cid = qp->icid; /* Now they're migrated. */
977
978	DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA,
979		   "QP(0x%x) EP(0x%x) MPA Offload rc = %d IRD=0x%x ORD=0x%x rtr_type=%d mpa_rev=%d reject=%d\n",
980		   reject ? 0xffff : qp->icid, ep->tcp_cid, rc, ep->cm_info.ird,
981		   ep->cm_info.ord, ep->rtr_type, ep->mpa_rev, reject);
982	return rc;
983}
984
985static void
986ecore_iwarp_mpa_received(struct ecore_hwfn *p_hwfn,
987			 struct ecore_iwarp_ep *ep)
988{
989	struct ecore_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp;
990	struct ecore_iwarp_cm_event_params params;
991	struct mpa_v2_hdr *mpa_v2_params;
992	union async_output *async_data;
993	u16 mpa_ord, mpa_ird;
994	u8 mpa_hdr_size = 0;
995	u8 mpa_rev;
996
997	async_data = &ep->ep_buffer_virt->async_output;
998
999	mpa_rev = async_data->mpa_request.mpa_handshake_mode;
1000	DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA,
1001		   "private_data_len=%x handshake_mode=%x private_data=(%x)\n",
1002		   async_data->mpa_request.ulp_data_len,
1003		   mpa_rev,
1004		   *((u32 *)((u8 *)ep->ep_buffer_virt->in_pdata)));
1005
1006	if (ep->listener->state > ECORE_IWARP_LISTENER_STATE_UNPAUSE) {
1007		/* MPA reject initiated by ecore */
1008		OSAL_MEMSET(&ep->cm_info, 0, sizeof(ep->cm_info));
1009		ep->event_cb = OSAL_NULL;
1010		ecore_iwarp_mpa_offload(p_hwfn, ep);
1011		return;
1012	}
1013
1014	if (mpa_rev == MPA_NEGOTIATION_TYPE_ENHANCED) {
1015		if (iwarp_info->mpa_rev == MPA_NEGOTIATION_TYPE_BASIC) {
1016			DP_ERR(p_hwfn, "MPA_NEGOTIATE Received MPA rev 2 on driver supporting only MPA rev 1\n");
1017			/* MPA_REV2 ToDo: close the tcp connection. */
1018			return;
1019		}
1020
1021		/* Read ord/ird values from private data buffer */
1022		mpa_v2_params =
1023			(struct mpa_v2_hdr *)(ep->ep_buffer_virt->in_pdata);
1024		mpa_hdr_size = sizeof(*mpa_v2_params);
1025
1026		mpa_ord = ntohs(mpa_v2_params->ord);
1027		mpa_ird = ntohs(mpa_v2_params->ird);
1028
1029		/* Temprary store in cm_info incoming ord/ird requested, later
1030		 * replace with negotiated value during accept
1031		 */
1032		ep->cm_info.ord = (u8)OSAL_MIN_T(u16,
1033						(mpa_ord & MPA_V2_IRD_ORD_MASK),
1034						ECORE_IWARP_ORD_DEFAULT);
1035
1036		ep->cm_info.ird = (u8)OSAL_MIN_T(u16,
1037						(mpa_ird & MPA_V2_IRD_ORD_MASK),
1038						ECORE_IWARP_IRD_DEFAULT);
1039
1040		/* Peer2Peer negotiation */
1041		ep->rtr_type = MPA_RTR_TYPE_NONE;
1042		if (mpa_ird & MPA_V2_PEER2PEER_MODEL) {
1043			if (mpa_ord & MPA_V2_WRITE_RTR)
1044				ep->rtr_type |= MPA_RTR_TYPE_ZERO_WRITE;
1045
1046			if (mpa_ord & MPA_V2_READ_RTR)
1047				ep->rtr_type |= MPA_RTR_TYPE_ZERO_READ;
1048
1049			if (mpa_ird & MPA_V2_SEND_RTR)
1050				ep->rtr_type |= MPA_RTR_TYPE_ZERO_SEND;
1051
1052			ep->rtr_type &= iwarp_info->rtr_type;
1053			/* if we're left with no match send our capabilities */
1054			if (ep->rtr_type == MPA_RTR_TYPE_NONE)
1055				ep->rtr_type = iwarp_info->rtr_type;
1056
1057			/* prioritize write over send and read */
1058			if (ep->rtr_type & MPA_RTR_TYPE_ZERO_WRITE)
1059					ep->rtr_type = MPA_RTR_TYPE_ZERO_WRITE;
1060		}
1061
1062		ep->mpa_rev = MPA_NEGOTIATION_TYPE_ENHANCED;
1063	} else {
1064		ep->cm_info.ord = ECORE_IWARP_ORD_DEFAULT;
1065		ep->cm_info.ird = ECORE_IWARP_IRD_DEFAULT;
1066		ep->mpa_rev = MPA_NEGOTIATION_TYPE_BASIC;
1067	}
1068
1069	DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA,
1070		   "MPA_NEGOTIATE (v%d): ORD: 0x%x IRD: 0x%x rtr:0x%x ulp_data_len = %x mpa_hdr_size = %x\n",
1071		   mpa_rev, ep->cm_info.ord, ep->cm_info.ird, ep->rtr_type,
1072		   async_data->mpa_request.ulp_data_len,
1073		   mpa_hdr_size);
1074
1075	/* Strip mpa v2 hdr from private data before sending to upper layer */
1076	ep->cm_info.private_data =
1077		ep->ep_buffer_virt->in_pdata + mpa_hdr_size;
1078
1079	ep->cm_info.private_data_len =
1080		async_data->mpa_request.ulp_data_len - mpa_hdr_size;
1081
1082	params.event = ECORE_IWARP_EVENT_MPA_REQUEST;
1083	params.cm_info = &ep->cm_info;
1084	params.ep_context = ep;
1085	params.status = ECORE_SUCCESS;
1086
1087	ep->state = ECORE_IWARP_EP_MPA_REQ_RCVD;
1088	ep->event_cb(ep->cb_context, &params);
1089}
1090
1091static void
1092ecore_iwarp_move_to_ep_list(struct ecore_hwfn *p_hwfn,
1093			    osal_list_t *list, struct ecore_iwarp_ep *ep)
1094{
1095	OSAL_SPIN_LOCK(&ep->listener->lock);
1096	OSAL_LIST_REMOVE_ENTRY(&ep->list_entry, &ep->listener->ep_list);
1097	OSAL_SPIN_UNLOCK(&ep->listener->lock);
1098	OSAL_SPIN_LOCK(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1099	OSAL_LIST_PUSH_TAIL(&ep->list_entry, list);
1100	OSAL_SPIN_UNLOCK(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1101}
1102
1103static void
1104ecore_iwarp_return_ep(struct ecore_hwfn *p_hwfn,
1105		      struct ecore_iwarp_ep *ep)
1106{
1107	ep->state = ECORE_IWARP_EP_INIT;
1108	if (ep->qp)
1109		ep->qp->ep = OSAL_NULL;
1110	ep->qp = OSAL_NULL;
1111	OSAL_MEMSET(&ep->cm_info, 0, sizeof(ep->cm_info));
1112
1113	if (ep->tcp_cid == ECORE_IWARP_INVALID_TCP_CID) {
1114		/* We don't care about the return code, it's ok if tcp_cid
1115		 * remains invalid...in this case we'll defer allocation
1116		 */
1117		ecore_iwarp_alloc_tcp_cid(p_hwfn, &ep->tcp_cid);
1118	}
1119
1120	ecore_iwarp_move_to_ep_list(p_hwfn,
1121				    &p_hwfn->p_rdma_info->iwarp.ep_free_list,
1122				    ep);
1123}
1124
1125static void
1126ecore_iwarp_parse_private_data(struct ecore_hwfn *p_hwfn,
1127			       struct ecore_iwarp_ep *ep)
1128{
1129	struct mpa_v2_hdr *mpa_v2_params;
1130	union async_output *async_data;
1131	u16 mpa_ird, mpa_ord;
1132	u8 mpa_data_size = 0;
1133
1134	if (MPA_REV2(p_hwfn->p_rdma_info->iwarp.mpa_rev)) {
1135		mpa_v2_params = (struct mpa_v2_hdr *)
1136		((u8 *)ep->ep_buffer_virt->in_pdata);
1137		mpa_data_size = sizeof(*mpa_v2_params);
1138		mpa_ird = ntohs(mpa_v2_params->ird);
1139		mpa_ord = ntohs(mpa_v2_params->ord);
1140
1141		ep->cm_info.ird = (u8)(mpa_ord & MPA_V2_IRD_ORD_MASK);
1142		ep->cm_info.ord = (u8)(mpa_ird & MPA_V2_IRD_ORD_MASK);
1143	} /* else: Ord / Ird already configured */
1144
1145	async_data = &ep->ep_buffer_virt->async_output;
1146
1147	ep->cm_info.private_data = ep->ep_buffer_virt->in_pdata + mpa_data_size;
1148	ep->cm_info.private_data_len =
1149		async_data->mpa_response.ulp_data_len - mpa_data_size;
1150}
1151
1152static void
1153ecore_iwarp_mpa_reply_arrived(struct ecore_hwfn *p_hwfn,
1154			      struct ecore_iwarp_ep *ep)
1155{
1156	struct ecore_iwarp_cm_event_params params;
1157
1158	if (ep->connect_mode == TCP_CONNECT_PASSIVE) {
1159		DP_NOTICE(p_hwfn, true, "MPA reply event not expected on passive side!\n");
1160		return;
1161	}
1162
1163	params.event = ECORE_IWARP_EVENT_ACTIVE_MPA_REPLY;
1164
1165	ecore_iwarp_parse_private_data(p_hwfn, ep);
1166
1167	DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA,
1168		   "MPA_NEGOTIATE (v%d): ORD: 0x%x IRD: 0x%x\n",
1169		   ep->mpa_rev, ep->cm_info.ord, ep->cm_info.ird);
1170
1171	params.cm_info = &ep->cm_info;
1172	params.ep_context = ep;
1173	params.status = ECORE_SUCCESS;
1174
1175	ep->mpa_reply_processed = true;
1176
1177	ep->event_cb(ep->cb_context, &params);
1178}
1179
1180#define ECORE_IWARP_CONNECT_MODE_STRING(ep) \
1181	(ep->connect_mode == TCP_CONNECT_PASSIVE) ? "Passive" : "Active"
1182
1183/* Called as a result of the event:
1184 * IWARP_EVENT_TYPE_ASYNC_MPA_HANDSHAKE_COMPLETE
1185 */
1186static void
1187ecore_iwarp_mpa_complete(struct ecore_hwfn *p_hwfn,
1188			 struct ecore_iwarp_ep *ep,
1189			 u8 fw_return_code)
1190{
1191	struct ecore_iwarp_cm_event_params params;
1192
1193	if (ep->connect_mode == TCP_CONNECT_ACTIVE)
1194		params.event = ECORE_IWARP_EVENT_ACTIVE_COMPLETE;
1195	else
1196		params.event = ECORE_IWARP_EVENT_PASSIVE_COMPLETE;
1197
1198	if (ep->connect_mode == TCP_CONNECT_ACTIVE &&
1199	    !ep->mpa_reply_processed) {
1200		ecore_iwarp_parse_private_data(p_hwfn, ep);
1201	}
1202
1203	DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA,
1204		   "MPA_NEGOTIATE (v%d): ORD: 0x%x IRD: 0x%x\n",
1205		   ep->mpa_rev, ep->cm_info.ord, ep->cm_info.ird);
1206
1207	params.cm_info = &ep->cm_info;
1208
1209	params.ep_context = ep;
1210
1211	if ((ep->connect_mode == TCP_CONNECT_PASSIVE) &&
1212	    (ep->state != ECORE_IWARP_EP_MPA_OFFLOADED)) {
1213		/* This is a FW bug. Shouldn't get complete without offload */
1214		DP_NOTICE(p_hwfn, false, "%s(0x%x) ERROR: Got MPA complete without MPA offload fw_return_code=%d ep->state=%d\n",
1215			  ECORE_IWARP_CONNECT_MODE_STRING(ep), ep->tcp_cid,
1216			  fw_return_code, ep->state);
1217		ep->state = ECORE_IWARP_EP_CLOSED;
1218		return;
1219	}
1220
1221	if ((ep->connect_mode == TCP_CONNECT_PASSIVE) &&
1222	    (ep->state == ECORE_IWARP_EP_ABORTING))
1223		return;
1224
1225	ep->state = ECORE_IWARP_EP_CLOSED;
1226
1227	switch (fw_return_code) {
1228	case RDMA_RETURN_OK:
1229		ep->qp->max_rd_atomic_req = ep->cm_info.ord;
1230		ep->qp->max_rd_atomic_resp = ep->cm_info.ird;
1231		ecore_iwarp_modify_qp(p_hwfn, ep->qp,
1232				      ECORE_IWARP_QP_STATE_RTS,
1233				      1);
1234		ep->state = ECORE_IWARP_EP_ESTABLISHED;
1235		params.status = ECORE_SUCCESS;
1236		break;
1237	case IWARP_CONN_ERROR_MPA_TIMEOUT:
1238		DP_NOTICE(p_hwfn, false, "%s(0x%x) MPA timeout\n",
1239			  ECORE_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
1240		params.status = ECORE_TIMEOUT;
1241		break;
1242	case IWARP_CONN_ERROR_MPA_ERROR_REJECT:
1243		DP_NOTICE(p_hwfn, false, "%s(0x%x) MPA Reject\n",
1244			  ECORE_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
1245		params.status = ECORE_CONN_REFUSED;
1246		break;
1247	case IWARP_CONN_ERROR_MPA_RST:
1248		DP_NOTICE(p_hwfn, false, "%s(0x%x) MPA reset(tcp cid: 0x%x)\n",
1249			  ECORE_IWARP_CONNECT_MODE_STRING(ep), ep->cid,
1250			  ep->tcp_cid);
1251		params.status = ECORE_CONN_RESET;
1252		break;
1253	case IWARP_CONN_ERROR_MPA_FIN:
1254		DP_NOTICE(p_hwfn, false, "%s(0x%x) MPA received FIN\n",
1255			  ECORE_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
1256		params.status = ECORE_CONN_REFUSED;
1257		break;
1258	case IWARP_CONN_ERROR_MPA_INSUF_IRD:
1259		DP_NOTICE(p_hwfn, false, "%s(0x%x) MPA insufficient ird\n",
1260			  ECORE_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
1261		params.status = ECORE_CONN_REFUSED;
1262		break;
1263	case IWARP_CONN_ERROR_MPA_RTR_MISMATCH:
1264		DP_NOTICE(p_hwfn, false, "%s(0x%x) MPA RTR MISMATCH\n",
1265			  ECORE_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
1266		params.status = ECORE_CONN_REFUSED;
1267		break;
1268	case IWARP_CONN_ERROR_MPA_INVALID_PACKET:
1269		DP_NOTICE(p_hwfn, false, "%s(0x%x) MPA Invalid Packet\n",
1270			  ECORE_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
1271		params.status = ECORE_CONN_REFUSED;
1272		break;
1273	case IWARP_CONN_ERROR_MPA_LOCAL_ERROR:
1274		DP_NOTICE(p_hwfn, false, "%s(0x%x) MPA Local Error\n",
1275			  ECORE_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
1276		params.status = ECORE_CONN_REFUSED;
1277		break;
1278	case IWARP_CONN_ERROR_MPA_TERMINATE:
1279		DP_NOTICE(p_hwfn, false, "%s(0x%x) MPA TERMINATE\n",
1280			  ECORE_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
1281		params.status = ECORE_CONN_REFUSED;
1282		break;
1283	default:
1284		params.status = ECORE_CONN_RESET;
1285		break;
1286	}
1287
1288	if (ep->event_cb)
1289		ep->event_cb(ep->cb_context, &params);
1290
1291	/* on passive side, if there is no associated QP (REJECT) we need to
1292	 * return the ep to the pool, otherwise we wait for QP to release it.
1293	 * Since we add an element in accept instead of this one. in anycase
1294	 * we need to remove it from the ep_list (active connections)...
1295	 */
1296	if (fw_return_code != RDMA_RETURN_OK) {
1297		ep->tcp_cid = ECORE_IWARP_INVALID_TCP_CID;
1298		if ((ep->connect_mode == TCP_CONNECT_PASSIVE) &&
1299		    (ep->qp == OSAL_NULL)) { /* Rejected */
1300			ecore_iwarp_return_ep(p_hwfn, ep);
1301		} else {
1302			OSAL_SPIN_LOCK(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1303			OSAL_LIST_REMOVE_ENTRY(
1304				&ep->list_entry,
1305				&p_hwfn->p_rdma_info->iwarp.ep_list);
1306			OSAL_SPIN_UNLOCK(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1307		}
1308	}
1309}
1310
1311static void
1312ecore_iwarp_mpa_v2_set_private(struct ecore_hwfn *p_hwfn,
1313			       struct ecore_iwarp_ep *ep,
1314			       u8 *mpa_data_size)
1315{
1316	struct mpa_v2_hdr *mpa_v2_params;
1317	u16 mpa_ird, mpa_ord;
1318
1319	*mpa_data_size = 0;
1320	if (MPA_REV2(ep->mpa_rev)) {
1321		mpa_v2_params =
1322			(struct mpa_v2_hdr *)ep->ep_buffer_virt->out_pdata;
1323		*mpa_data_size = sizeof(*mpa_v2_params);
1324
1325		mpa_ird = (u16)ep->cm_info.ird;
1326		mpa_ord = (u16)ep->cm_info.ord;
1327
1328		if (ep->rtr_type != MPA_RTR_TYPE_NONE) {
1329			mpa_ird |= MPA_V2_PEER2PEER_MODEL;
1330
1331			if (ep->rtr_type & MPA_RTR_TYPE_ZERO_SEND)
1332				mpa_ird |= MPA_V2_SEND_RTR;
1333
1334			if (ep->rtr_type & MPA_RTR_TYPE_ZERO_WRITE)
1335				mpa_ord |= MPA_V2_WRITE_RTR;
1336
1337			if (ep->rtr_type & MPA_RTR_TYPE_ZERO_READ)
1338				mpa_ord |= MPA_V2_READ_RTR;
1339		}
1340
1341		mpa_v2_params->ird = htons(mpa_ird);
1342		mpa_v2_params->ord = htons(mpa_ord);
1343
1344		DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA,
1345			   "MPA_NEGOTIATE Header: [%x ord:%x ird] %x ord:%x ird:%x peer2peer:%x rtr_send:%x rtr_write:%x rtr_read:%x\n",
1346			   mpa_v2_params->ird,
1347			   mpa_v2_params->ord,
1348			   *((u32 *)mpa_v2_params),
1349			   mpa_ord & MPA_V2_IRD_ORD_MASK,
1350			   mpa_ird & MPA_V2_IRD_ORD_MASK,
1351			   !!(mpa_ird & MPA_V2_PEER2PEER_MODEL),
1352			   !!(mpa_ird & MPA_V2_SEND_RTR),
1353			   !!(mpa_ord & MPA_V2_WRITE_RTR),
1354			   !!(mpa_ord & MPA_V2_READ_RTR));
1355	}
1356}
1357
1358enum _ecore_status_t
1359ecore_iwarp_connect(void *rdma_cxt,
1360		    struct ecore_iwarp_connect_in *iparams,
1361		    struct ecore_iwarp_connect_out *oparams)
1362{
1363	struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)rdma_cxt;
1364	struct ecore_iwarp_info *iwarp_info;
1365	struct ecore_iwarp_ep *ep;
1366	enum _ecore_status_t rc;
1367	u8 mpa_data_size = 0;
1368	u8 ts_hdr_size = 0;
1369	u32 cid;
1370
1371	if ((iparams->cm_info.ord > ECORE_IWARP_ORD_DEFAULT) ||
1372	    (iparams->cm_info.ird > ECORE_IWARP_IRD_DEFAULT)) {
1373		DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA,
1374			   "QP(0x%x) ERROR: Invalid ord(0x%x)/ird(0x%x)\n",
1375			   iparams->qp->icid, iparams->cm_info.ord,
1376			   iparams->cm_info.ird);
1377
1378		return ECORE_INVAL;
1379	}
1380
1381	iwarp_info = &p_hwfn->p_rdma_info->iwarp;
1382
1383	/* Allocate ep object */
1384	rc = ecore_iwarp_alloc_cid(p_hwfn, &cid);
1385	if (rc != ECORE_SUCCESS)
1386		return rc;
1387
1388	if (iparams->qp->ep == OSAL_NULL) {
1389		rc = ecore_iwarp_create_ep(p_hwfn, &ep);
1390		if (rc != ECORE_SUCCESS)
1391			return rc;
1392	} else {
1393		ep = iparams->qp->ep;
1394		DP_ERR(p_hwfn, "Note re-use of QP for different connect\n");
1395		ep->state = ECORE_IWARP_EP_INIT;
1396	}
1397
1398	ep->tcp_cid = cid;
1399
1400	OSAL_SPIN_LOCK(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1401	OSAL_LIST_PUSH_TAIL(&ep->list_entry,
1402			    &p_hwfn->p_rdma_info->iwarp.ep_list);
1403	OSAL_SPIN_UNLOCK(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1404
1405	ep->qp = iparams->qp;
1406	ep->qp->ep = ep;
1407	OSAL_MEMCPY(ep->remote_mac_addr,
1408		    iparams->remote_mac_addr,
1409		    ETH_ALEN);
1410	OSAL_MEMCPY(ep->local_mac_addr,
1411		    iparams->local_mac_addr,
1412		    ETH_ALEN);
1413	OSAL_MEMCPY(&ep->cm_info, &iparams->cm_info, sizeof(ep->cm_info));
1414
1415	ep->cm_info.ord = iparams->cm_info.ord;
1416	ep->cm_info.ird = iparams->cm_info.ird;
1417
1418	ep->rtr_type = iwarp_info->rtr_type;
1419	if (iwarp_info->peer2peer == 0)
1420		ep->rtr_type = MPA_RTR_TYPE_NONE;
1421
1422	if ((ep->rtr_type & MPA_RTR_TYPE_ZERO_READ) &&
1423	    (ep->cm_info.ord == 0))
1424		ep->cm_info.ord = 1;
1425
1426	ep->mpa_rev = iwarp_info->mpa_rev;
1427
1428	ecore_iwarp_mpa_v2_set_private(p_hwfn, ep, &mpa_data_size);
1429
1430	ep->cm_info.private_data = (u8 *)ep->ep_buffer_virt->out_pdata;
1431	ep->cm_info.private_data_len =
1432		iparams->cm_info.private_data_len + mpa_data_size;
1433
1434	OSAL_MEMCPY((u8 *)(u8 *)ep->ep_buffer_virt->out_pdata + mpa_data_size,
1435		    iparams->cm_info.private_data,
1436		    iparams->cm_info.private_data_len);
1437
1438	if (p_hwfn->p_rdma_info->iwarp.tcp_flags & ECORE_IWARP_TS_EN)
1439		ts_hdr_size = TIMESTAMP_HEADER_SIZE;
1440
1441	ep->mss = iparams->mss - ts_hdr_size;
1442	ep->mss = OSAL_MIN_T(u16, ECORE_IWARP_MAX_FW_MSS, ep->mss);
1443
1444	ep->event_cb = iparams->event_cb;
1445	ep->cb_context = iparams->cb_context;
1446	ep->connect_mode = TCP_CONNECT_ACTIVE;
1447
1448	oparams->ep_context = ep;
1449
1450	rc = ecore_iwarp_tcp_offload(p_hwfn, ep);
1451
1452	DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "QP(0x%x) EP(0x%x) rc = %d\n",
1453		   iparams->qp->icid, ep->tcp_cid, rc);
1454
1455	if (rc != ECORE_SUCCESS)
1456		ecore_iwarp_destroy_ep(p_hwfn, ep, true);
1457
1458	return rc;
1459}
1460
1461static struct ecore_iwarp_ep *
1462ecore_iwarp_get_free_ep(struct ecore_hwfn *p_hwfn)
1463{
1464	struct ecore_iwarp_ep *ep = OSAL_NULL;
1465	enum _ecore_status_t rc;
1466
1467	OSAL_SPIN_LOCK(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1468
1469	if (OSAL_LIST_IS_EMPTY(&p_hwfn->p_rdma_info->iwarp.ep_free_list)) {
1470		DP_ERR(p_hwfn, "Ep list is empty\n");
1471		goto out;
1472	}
1473
1474	ep = OSAL_LIST_FIRST_ENTRY(&p_hwfn->p_rdma_info->iwarp.ep_free_list,
1475				   struct ecore_iwarp_ep,
1476				   list_entry);
1477
1478	/* in some cases we could have failed allocating a tcp cid when added
1479	 * from accept / failure... retry now..this is not the common case.
1480	 */
1481	if (ep->tcp_cid == ECORE_IWARP_INVALID_TCP_CID) {
1482		rc = ecore_iwarp_alloc_tcp_cid(p_hwfn, &ep->tcp_cid);
1483		/* if we fail we could look for another entry with a valid
1484		 * tcp_cid, but since we don't expect to reach this anyway
1485		 * it's not worth the handling
1486		 */
1487		if (rc) {
1488			ep->tcp_cid = ECORE_IWARP_INVALID_TCP_CID;
1489			ep = OSAL_NULL;
1490			goto out;
1491		}
1492	}
1493
1494	OSAL_LIST_REMOVE_ENTRY(&ep->list_entry,
1495			       &p_hwfn->p_rdma_info->iwarp.ep_free_list);
1496
1497out:
1498	OSAL_SPIN_UNLOCK(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1499	return ep;
1500}
1501
1502/* takes into account timer scan ~20 ms and interrupt/dpc overhead */
1503#define ECORE_IWARP_MAX_CID_CLEAN_TIME  100
1504/* Technically we shouldn't reach this count with 100 ms iteration sleep */
1505#define ECORE_IWARP_MAX_NO_PROGRESS_CNT 5
1506
1507/* This function waits for all the bits of a bmap to be cleared, as long as
1508 * there is progress ( i.e. the number of bits left to be cleared decreases )
1509 * the function continues.
1510 */
1511static enum _ecore_status_t
1512ecore_iwarp_wait_cid_map_cleared(struct ecore_hwfn *p_hwfn,
1513				 struct ecore_bmap *bmap)
1514{
1515	int prev_weight = 0;
1516	int wait_count = 0;
1517	int weight = 0;
1518
1519	weight = OSAL_BITMAP_WEIGHT(bmap->bitmap, bmap->max_count);
1520	prev_weight = weight;
1521
1522	while (weight) {
1523		OSAL_MSLEEP(ECORE_IWARP_MAX_CID_CLEAN_TIME);
1524
1525		weight = OSAL_BITMAP_WEIGHT(bmap->bitmap, bmap->max_count);
1526
1527		if (prev_weight == weight) {
1528			wait_count++;
1529		} else {
1530			prev_weight = weight;
1531			wait_count = 0;
1532		}
1533
1534		if (wait_count > ECORE_IWARP_MAX_NO_PROGRESS_CNT) {
1535			DP_NOTICE(p_hwfn, false,
1536				  "%s bitmap wait timed out (%d cids pending)\n",
1537				  bmap->name, weight);
1538			return ECORE_TIMEOUT;
1539		}
1540	}
1541	return ECORE_SUCCESS;
1542}
1543
1544static enum _ecore_status_t
1545ecore_iwarp_wait_for_all_cids(struct ecore_hwfn *p_hwfn)
1546{
1547	enum _ecore_status_t rc;
1548	int i;
1549
1550	rc = ecore_iwarp_wait_cid_map_cleared(
1551		p_hwfn, &p_hwfn->p_rdma_info->tcp_cid_map);
1552	if (rc)
1553		return rc;
1554
1555	/* Now free the tcp cids from the main cid map */
1556	for (i = 0; i < ECORE_IWARP_PREALLOC_CNT; i++) {
1557		ecore_bmap_release_id(p_hwfn,
1558				      &p_hwfn->p_rdma_info->cid_map,
1559				      i);
1560	}
1561
1562	/* Now wait for all cids to be completed */
1563	rc = ecore_iwarp_wait_cid_map_cleared(
1564		p_hwfn, &p_hwfn->p_rdma_info->cid_map);
1565
1566	return rc;
1567}
1568
1569static void
1570ecore_iwarp_free_prealloc_ep(struct ecore_hwfn *p_hwfn)
1571{
1572	struct ecore_iwarp_ep *ep;
1573	u32 cid;
1574
1575	while (!OSAL_LIST_IS_EMPTY(&p_hwfn->p_rdma_info->iwarp.ep_free_list)) {
1576		OSAL_SPIN_LOCK(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1577
1578		ep = OSAL_LIST_FIRST_ENTRY(
1579			&p_hwfn->p_rdma_info->iwarp.ep_free_list,
1580			struct ecore_iwarp_ep, list_entry);
1581
1582		if (ep == OSAL_NULL) {
1583			OSAL_SPIN_UNLOCK(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1584			break;
1585		}
1586
1587#ifdef _NTDDK_
1588#pragma warning(suppress : 6011)
1589#endif
1590		OSAL_LIST_REMOVE_ENTRY(
1591			&ep->list_entry,
1592			&p_hwfn->p_rdma_info->iwarp.ep_free_list);
1593
1594		OSAL_SPIN_UNLOCK(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1595
1596		if (ep->tcp_cid != ECORE_IWARP_INVALID_TCP_CID) {
1597			cid = ep->tcp_cid - ecore_cxt_get_proto_cid_start(
1598				p_hwfn, p_hwfn->p_rdma_info->proto);
1599
1600			OSAL_SPIN_LOCK(&p_hwfn->p_rdma_info->lock);
1601
1602			ecore_bmap_release_id(p_hwfn,
1603					      &p_hwfn->p_rdma_info->tcp_cid_map,
1604					      cid);
1605
1606			OSAL_SPIN_UNLOCK(&p_hwfn->p_rdma_info->lock);
1607		}
1608
1609		ecore_iwarp_destroy_ep(p_hwfn, ep, false);
1610	}
1611}
1612
1613static enum _ecore_status_t
1614ecore_iwarp_prealloc_ep(struct ecore_hwfn *p_hwfn, bool init)
1615{
1616	struct ecore_iwarp_ep *ep;
1617	int rc = ECORE_SUCCESS;
1618	u32 cid;
1619	int count;
1620	int i;
1621
1622	if (init)
1623		count = ECORE_IWARP_PREALLOC_CNT;
1624	else
1625		count = 1;
1626
1627	for (i = 0; i < count; i++) {
1628		rc = ecore_iwarp_create_ep(p_hwfn, &ep);
1629		if (rc != ECORE_SUCCESS)
1630			return rc;
1631
1632		/* During initialization we allocate from the main pool,
1633		 * afterwards we allocate only from the tcp_cid.
1634		 */
1635		if (init) {
1636			rc = ecore_iwarp_alloc_cid(p_hwfn, &cid);
1637			if (rc != ECORE_SUCCESS)
1638				goto err;
1639			ecore_iwarp_set_tcp_cid(p_hwfn, cid);
1640		} else {
1641			/* We don't care about the return code, it's ok if
1642			 * tcp_cid remains invalid...in this case we'll
1643			 * defer allocation
1644			 */
1645			ecore_iwarp_alloc_tcp_cid(p_hwfn, &cid);
1646		}
1647
1648		ep->tcp_cid = cid;
1649
1650		OSAL_SPIN_LOCK(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1651		OSAL_LIST_PUSH_TAIL(&ep->list_entry,
1652				    &p_hwfn->p_rdma_info->iwarp.ep_free_list);
1653		OSAL_SPIN_UNLOCK(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1654	}
1655
1656	return rc;
1657
1658err:
1659	ecore_iwarp_destroy_ep(p_hwfn, ep, false);
1660
1661	return rc;
1662}
1663
1664enum _ecore_status_t
1665ecore_iwarp_alloc(struct ecore_hwfn *p_hwfn)
1666{
1667	enum _ecore_status_t rc;
1668
1669#ifdef CONFIG_ECORE_LOCK_ALLOC
1670	OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_hwfn->p_rdma_info->iwarp.iw_lock);
1671	OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_hwfn->p_rdma_info->iwarp.qp_lock);
1672#endif
1673	OSAL_SPIN_LOCK_INIT(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1674	OSAL_SPIN_LOCK_INIT(&p_hwfn->p_rdma_info->iwarp.qp_lock);
1675
1676	/* Allocate bitmap for tcp cid. These are used by passive side
1677	 * to ensure it can allocate a tcp cid during dpc that was
1678	 * pre-acquired and doesn't require dynamic allocation of ilt
1679	 */
1680	rc = ecore_rdma_bmap_alloc(p_hwfn, &p_hwfn->p_rdma_info->tcp_cid_map,
1681				   ECORE_IWARP_PREALLOC_CNT,
1682				   "TCP_CID");
1683	if (rc != ECORE_SUCCESS) {
1684		DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA,
1685			   "Failed to allocate tcp cid, rc = %d\n",
1686			   rc);
1687		return rc;
1688	}
1689
1690	OSAL_LIST_INIT(&p_hwfn->p_rdma_info->iwarp.ep_free_list);
1691//DAVIDS	OSAL_SPIN_LOCK_INIT(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1692	rc = ecore_iwarp_prealloc_ep(p_hwfn, true);
1693	if (rc != ECORE_SUCCESS) {
1694		DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA,
1695			   "ecore_iwarp_prealloc_ep failed, rc = %d\n",
1696			   rc);
1697		return rc;
1698	}
1699	DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA,
1700			   "ecore_iwarp_prealloc_ep success, rc = %d\n",
1701			   rc);
1702
1703	return ecore_ooo_alloc(p_hwfn);
1704}
1705
1706void
1707ecore_iwarp_resc_free(struct ecore_hwfn *p_hwfn)
1708{
1709	struct ecore_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp;
1710
1711#ifdef CONFIG_ECORE_LOCK_ALLOC
1712	OSAL_SPIN_LOCK_DEALLOC(iwarp_info->iw_lock);
1713	OSAL_SPIN_LOCK_DEALLOC(iwarp_info->qp_lock);
1714#endif
1715	ecore_ooo_free(p_hwfn);
1716	if (iwarp_info->partial_fpdus)
1717		OSAL_FREE(p_hwfn->p_dev, iwarp_info->partial_fpdus);
1718	if (iwarp_info->mpa_bufs)
1719		OSAL_FREE(p_hwfn->p_dev, iwarp_info->mpa_bufs);
1720	if (iwarp_info->mpa_intermediate_buf)
1721		OSAL_FREE(p_hwfn->p_dev, iwarp_info->mpa_intermediate_buf);
1722
1723	ecore_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->tcp_cid_map, 1);
1724}
1725
1726enum _ecore_status_t
1727ecore_iwarp_accept(void *rdma_cxt,
1728		   struct ecore_iwarp_accept_in *iparams)
1729{
1730	struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)rdma_cxt;
1731	struct ecore_iwarp_ep *ep;
1732	u8 mpa_data_size = 0;
1733	enum _ecore_status_t rc;
1734
1735	ep = (struct ecore_iwarp_ep *)iparams->ep_context;
1736	if (!ep) {
1737		DP_ERR(p_hwfn, "Ep Context receive in accept is NULL\n");
1738		return ECORE_INVAL;
1739	}
1740
1741	DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "QP(0x%x) EP(0x%x)\n",
1742		   iparams->qp->icid, ep->tcp_cid);
1743
1744	if ((iparams->ord > ECORE_IWARP_ORD_DEFAULT) ||
1745	    (iparams->ird > ECORE_IWARP_IRD_DEFAULT)) {
1746		DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA,
1747			   "QP(0x%x) EP(0x%x) ERROR: Invalid ord(0x%x)/ird(0x%x)\n",
1748			   iparams->qp->icid, ep->tcp_cid,
1749			   iparams->ord, iparams->ord);
1750		return ECORE_INVAL;
1751	}
1752
1753	/* We could reach qp->ep != OSAL NULL if we do accept on the same qp */
1754	if (iparams->qp->ep == OSAL_NULL) {
1755		/* We need to add a replacement for the ep to the free list */
1756		ecore_iwarp_prealloc_ep(p_hwfn, false);
1757	} else {
1758		DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA,
1759			   "Note re-use of QP for different connect\n");
1760		/* Return the old ep to the free_pool */
1761		ecore_iwarp_return_ep(p_hwfn, iparams->qp->ep);
1762	}
1763
1764	ecore_iwarp_move_to_ep_list(p_hwfn,
1765				    &p_hwfn->p_rdma_info->iwarp.ep_list,
1766				    ep);
1767	ep->listener = OSAL_NULL;
1768	ep->cb_context = iparams->cb_context;
1769	ep->qp = iparams->qp;
1770	ep->qp->ep = ep;
1771
1772	if (ep->mpa_rev == MPA_NEGOTIATION_TYPE_ENHANCED) {
1773		/* Negotiate ord/ird: if upperlayer requested ord larger than
1774		 * ird advertised by remote, we need to decrease our ord
1775		 * to match remote ord
1776		 */
1777		if (iparams->ord > ep->cm_info.ird) {
1778			iparams->ord = ep->cm_info.ird;
1779		}
1780
1781		/* For chelsio compatability, if rtr_zero read is requested
1782		 * we can't set ird to zero
1783		 */
1784		if ((ep->rtr_type & MPA_RTR_TYPE_ZERO_READ) &&
1785		    (iparams->ird == 0))
1786			iparams->ird = 1;
1787	}
1788
1789	/* Update cm_info ord/ird to be negotiated values */
1790	ep->cm_info.ord = iparams->ord;
1791	ep->cm_info.ird = iparams->ird;
1792
1793	ecore_iwarp_mpa_v2_set_private(p_hwfn, ep, &mpa_data_size);
1794
1795	ep->cm_info.private_data = ep->ep_buffer_virt->out_pdata;
1796	ep->cm_info.private_data_len =
1797		iparams->private_data_len + mpa_data_size;
1798
1799	OSAL_MEMCPY((u8 *)ep->ep_buffer_virt->out_pdata + mpa_data_size,
1800		    iparams->private_data,
1801		    iparams->private_data_len);
1802
1803	if (ep->state == ECORE_IWARP_EP_CLOSED) {
1804		DP_NOTICE(p_hwfn, false,
1805			  "(0x%x) Accept called on EP in CLOSED state\n",
1806			  ep->tcp_cid);
1807		ep->tcp_cid = ECORE_IWARP_INVALID_TCP_CID;
1808		ecore_iwarp_return_ep(p_hwfn, ep);
1809		return ECORE_CONN_RESET;
1810	}
1811
1812	rc = ecore_iwarp_mpa_offload(p_hwfn, ep);
1813	if (rc) {
1814		ecore_iwarp_modify_qp(p_hwfn,
1815				      iparams->qp,
1816				      ECORE_IWARP_QP_STATE_ERROR,
1817				      1);
1818	}
1819
1820	return rc;
1821}
1822
1823enum _ecore_status_t
1824ecore_iwarp_reject(void *rdma_cxt,
1825		   struct ecore_iwarp_reject_in *iparams)
1826{
1827	struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)rdma_cxt;
1828	struct ecore_iwarp_ep *ep;
1829	u8 mpa_data_size = 0;
1830	enum _ecore_status_t rc;
1831
1832	ep = (struct ecore_iwarp_ep *)iparams->ep_context;
1833	if (!ep) {
1834		DP_ERR(p_hwfn, "Ep Context receive in reject is NULL\n");
1835		return ECORE_INVAL;
1836	}
1837
1838	DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "EP(0x%x)\n", ep->tcp_cid);
1839
1840	ep->cb_context = iparams->cb_context;
1841	ep->qp = OSAL_NULL;
1842
1843	ecore_iwarp_mpa_v2_set_private(p_hwfn, ep, &mpa_data_size);
1844
1845	ep->cm_info.private_data = ep->ep_buffer_virt->out_pdata;
1846	ep->cm_info.private_data_len =
1847		iparams->private_data_len + mpa_data_size;
1848
1849	OSAL_MEMCPY((u8 *)ep->ep_buffer_virt->out_pdata + mpa_data_size,
1850		    iparams->private_data,
1851		    iparams->private_data_len);
1852
1853	if (ep->state == ECORE_IWARP_EP_CLOSED) {
1854		DP_NOTICE(p_hwfn, false,
1855			  "(0x%x) Reject called on EP in CLOSED state\n",
1856			  ep->tcp_cid);
1857		ep->tcp_cid = ECORE_IWARP_INVALID_TCP_CID;
1858		ecore_iwarp_return_ep(p_hwfn, ep);
1859		return ECORE_CONN_RESET;
1860	}
1861
1862	rc = ecore_iwarp_mpa_offload(p_hwfn, ep);
1863	return rc;
1864}
1865
1866static void
1867ecore_iwarp_print_cm_info(struct ecore_hwfn *p_hwfn,
1868			  struct ecore_iwarp_cm_info *cm_info)
1869{
1870	DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "ip_version = %d\n",
1871		   cm_info->ip_version);
1872	DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "remote_ip %x.%x.%x.%x\n",
1873		   cm_info->remote_ip[0],
1874		   cm_info->remote_ip[1],
1875		   cm_info->remote_ip[2],
1876		   cm_info->remote_ip[3]);
1877	DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "local_ip %x.%x.%x.%x\n",
1878		   cm_info->local_ip[0],
1879		   cm_info->local_ip[1],
1880		   cm_info->local_ip[2],
1881		   cm_info->local_ip[3]);
1882	DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "remote_port = %x\n",
1883		   cm_info->remote_port);
1884	DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "local_port = %x\n",
1885		   cm_info->local_port);
1886	DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "vlan = %x\n",
1887		   cm_info->vlan);
1888	DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "private_data_len = %x\n",
1889		   cm_info->private_data_len);
1890	DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "ord = %d\n",
1891		   cm_info->ord);
1892	DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "ird = %d\n",
1893		   cm_info->ird);
1894}
1895
1896static int
1897ecore_iwarp_ll2_post_rx(struct ecore_hwfn *p_hwfn,
1898			struct ecore_iwarp_ll2_buff *buf,
1899			u8 handle)
1900{
1901	enum _ecore_status_t rc;
1902
1903	rc = ecore_ll2_post_rx_buffer(
1904		p_hwfn,
1905		handle,
1906		buf->data_phys_addr,
1907		(u16)buf->buff_size,
1908		buf, 1);
1909
1910	if (rc) {
1911		DP_NOTICE(p_hwfn, false,
1912			  "Failed to repost rx buffer to ll2 rc = %d, handle=%d\n",
1913			  rc, handle);
1914		OSAL_DMA_FREE_COHERENT(
1915			p_hwfn->p_dev,
1916			buf->data,
1917			buf->data_phys_addr,
1918			buf->buff_size);
1919		OSAL_FREE(p_hwfn->p_dev, buf);
1920	}
1921
1922	return rc;
1923}
1924
1925static bool
1926ecore_iwarp_ep_exists(struct ecore_hwfn *p_hwfn,
1927		      struct ecore_iwarp_listener *listener,
1928		      struct ecore_iwarp_cm_info *cm_info)
1929{
1930	struct ecore_iwarp_ep *ep = OSAL_NULL;
1931	bool found = false;
1932
1933	OSAL_SPIN_LOCK(&listener->lock);
1934	OSAL_LIST_FOR_EACH_ENTRY(ep, &listener->ep_list,
1935				 list_entry, struct ecore_iwarp_ep) {
1936		if ((ep->cm_info.local_port == cm_info->local_port) &&
1937		    (ep->cm_info.remote_port == cm_info->remote_port) &&
1938		    (ep->cm_info.vlan == cm_info->vlan) &&
1939		    !OSAL_MEMCMP(&(ep->cm_info.local_ip), cm_info->local_ip,
1940				 sizeof(cm_info->local_ip)) &&
1941		    !OSAL_MEMCMP(&(ep->cm_info.remote_ip), cm_info->remote_ip,
1942				 sizeof(cm_info->remote_ip))) {
1943				found = true;
1944				break;
1945		}
1946	}
1947
1948	OSAL_SPIN_UNLOCK(&listener->lock);
1949
1950	if (found) {
1951		DP_NOTICE(p_hwfn, false, "SYN received on active connection - dropping\n");
1952		ecore_iwarp_print_cm_info(p_hwfn, cm_info);
1953
1954		return true;
1955	}
1956
1957	return false;
1958}
1959
1960static struct ecore_iwarp_listener *
1961ecore_iwarp_get_listener(struct ecore_hwfn *p_hwfn,
1962			 struct ecore_iwarp_cm_info *cm_info)
1963{
1964	struct ecore_iwarp_listener *listener = OSAL_NULL;
1965	static const u32 ip_zero[4] = {0, 0, 0, 0};
1966	bool found = false;
1967
1968	ecore_iwarp_print_cm_info(p_hwfn, cm_info);
1969
1970	OSAL_LIST_FOR_EACH_ENTRY(listener,
1971				 &p_hwfn->p_rdma_info->iwarp.listen_list,
1972				 list_entry, struct ecore_iwarp_listener) {
1973		if (listener->port == cm_info->local_port) {
1974			/* Any IP (i.e. 0.0.0.0 ) will be treated as any vlan */
1975			if (!OSAL_MEMCMP(listener->ip_addr,
1976					 ip_zero,
1977					 sizeof(ip_zero))) {
1978				found = true;
1979				break;
1980			}
1981
1982			/* If not any IP -> check vlan as well */
1983			if (!OSAL_MEMCMP(listener->ip_addr,
1984					 cm_info->local_ip,
1985					 sizeof(cm_info->local_ip)) &&
1986
1987			     (listener->vlan == cm_info->vlan)) {
1988				found = true;
1989				break;
1990			}
1991		}
1992	}
1993
1994	if (found && listener->state == ECORE_IWARP_LISTENER_STATE_ACTIVE) {
1995		DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "listener found = %p\n",
1996			   listener);
1997		return listener;
1998	}
1999	DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "listener not found\n");
2000	return OSAL_NULL;
2001}
2002
2003static enum _ecore_status_t
2004ecore_iwarp_parse_rx_pkt(struct ecore_hwfn *p_hwfn,
2005			 struct ecore_iwarp_cm_info *cm_info,
2006			 void *buf,
2007			 u8 *remote_mac_addr,
2008			 u8 *local_mac_addr,
2009			 int *payload_len,
2010			 int *tcp_start_offset)
2011{
2012	struct ecore_vlan_ethhdr *vethh;
2013	struct ecore_ethhdr *ethh;
2014	struct ecore_iphdr *iph;
2015	struct ecore_ipv6hdr *ip6h;
2016	struct ecore_tcphdr *tcph;
2017	bool vlan_valid = false;
2018	int eth_hlen, ip_hlen;
2019	u16 eth_type;
2020	int i;
2021
2022	ethh = (struct ecore_ethhdr *)buf;
2023	eth_type = ntohs(ethh->h_proto);
2024	if (eth_type == ETH_P_8021Q) {
2025		vlan_valid = true;
2026		vethh = (struct ecore_vlan_ethhdr *)ethh;
2027		cm_info->vlan = ntohs(vethh->h_vlan_TCI) & VLAN_VID_MASK;
2028		eth_type = ntohs(vethh->h_vlan_encapsulated_proto);
2029	}
2030
2031	eth_hlen = ETH_HLEN + (vlan_valid ? sizeof(u32) : 0);
2032
2033	OSAL_MEMCPY(remote_mac_addr,
2034		    ethh->h_source,
2035		    ETH_ALEN);
2036
2037	OSAL_MEMCPY(local_mac_addr,
2038		    ethh->h_dest,
2039		    ETH_ALEN);
2040
2041	DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "eth_type =%d Source mac: [0x%x]:[0x%x]:[0x%x]:[0x%x]:[0x%x]:[0x%x]\n",
2042		   eth_type, ethh->h_source[0], ethh->h_source[1],
2043		   ethh->h_source[2], ethh->h_source[3],
2044		   ethh->h_source[4], ethh->h_source[5]);
2045
2046	DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "eth_hlen=%d destination mac: [0x%x]:[0x%x]:[0x%x]:[0x%x]:[0x%x]:[0x%x]\n",
2047		   eth_hlen, ethh->h_dest[0], ethh->h_dest[1],
2048		   ethh->h_dest[2], ethh->h_dest[3],
2049		   ethh->h_dest[4], ethh->h_dest[5]);
2050
2051	iph = (struct ecore_iphdr *)((u8 *)(ethh) + eth_hlen);
2052
2053	if (eth_type == ETH_P_IP) {
2054		if (iph->protocol != IPPROTO_TCP) {
2055			DP_NOTICE(p_hwfn, false,
2056				  "Unexpected ip protocol on ll2 %x\n",
2057				  iph->protocol);
2058			return ECORE_INVAL;
2059		}
2060
2061		cm_info->local_ip[0] = ntohl(iph->daddr);
2062		cm_info->remote_ip[0] = ntohl(iph->saddr);
2063		cm_info->ip_version = (enum ecore_tcp_ip_version)TCP_IPV4;
2064
2065		ip_hlen = (iph->ihl)*sizeof(u32);
2066		*payload_len = ntohs(iph->tot_len) - ip_hlen;
2067
2068	} else if (eth_type == ETH_P_IPV6) {
2069		ip6h = (struct ecore_ipv6hdr *)iph;
2070
2071		if (ip6h->nexthdr != IPPROTO_TCP) {
2072			DP_NOTICE(p_hwfn, false,
2073				  "Unexpected ip protocol on ll2 %x\n",
2074				  iph->protocol);
2075			return ECORE_INVAL;
2076		}
2077
2078		for (i = 0; i < 4; i++) {
2079			cm_info->local_ip[i] =
2080				ntohl(ip6h->daddr.in6_u.u6_addr32[i]);
2081			cm_info->remote_ip[i] =
2082				ntohl(ip6h->saddr.in6_u.u6_addr32[i]);
2083		}
2084		cm_info->ip_version = (enum ecore_tcp_ip_version)TCP_IPV6;
2085
2086		ip_hlen = sizeof(*ip6h);
2087		*payload_len = ntohs(ip6h->payload_len);
2088	} else {
2089		DP_NOTICE(p_hwfn, false,
2090			  "Unexpected ethertype on ll2 %x\n", eth_type);
2091		return ECORE_INVAL;
2092	}
2093
2094	tcph = (struct ecore_tcphdr *)((u8 *)iph + ip_hlen);
2095
2096	if (!tcph->syn) {
2097		DP_NOTICE(p_hwfn, false,
2098			  "Only SYN type packet expected on this ll2 conn, iph->ihl=%d source=%d dest=%d\n",
2099			  iph->ihl, tcph->source, tcph->dest);
2100		return ECORE_INVAL;
2101	}
2102
2103	cm_info->local_port = ntohs(tcph->dest);
2104	cm_info->remote_port = ntohs(tcph->source);
2105
2106	ecore_iwarp_print_cm_info(p_hwfn, cm_info);
2107
2108	*tcp_start_offset = eth_hlen + ip_hlen;
2109
2110	return ECORE_SUCCESS;
2111}
2112
2113static struct ecore_iwarp_fpdu *
2114ecore_iwarp_get_curr_fpdu(struct ecore_hwfn *p_hwfn, u16 cid)
2115{
2116	struct ecore_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp;
2117	struct ecore_iwarp_fpdu *partial_fpdu;
2118	u32 idx = cid - ecore_cxt_get_proto_cid_start(p_hwfn, PROTOCOLID_IWARP);
2119
2120	if (idx >= iwarp_info->max_num_partial_fpdus) {
2121		DP_ERR(p_hwfn, "Invalid cid %x max_num_partial_fpdus=%x\n", cid,
2122		       iwarp_info->max_num_partial_fpdus);
2123		return OSAL_NULL;
2124	}
2125
2126	partial_fpdu = &iwarp_info->partial_fpdus[idx];
2127
2128	return partial_fpdu;
2129}
2130
2131enum ecore_iwarp_mpa_pkt_type {
2132	ECORE_IWARP_MPA_PKT_PACKED,
2133	ECORE_IWARP_MPA_PKT_PARTIAL,
2134	ECORE_IWARP_MPA_PKT_UNALIGNED
2135};
2136
2137#define ECORE_IWARP_INVALID_FPDU_LENGTH 0xffff
2138#define ECORE_IWARP_MPA_FPDU_LENGTH_SIZE (2)
2139#define ECORE_IWARP_MPA_CRC32_DIGEST_SIZE (4)
2140
2141/* Pad to multiple of 4 */
2142#define ECORE_IWARP_PDU_DATA_LEN_WITH_PAD(data_len) (((data_len) + 3) & ~3)
2143
2144#define ECORE_IWARP_FPDU_LEN_WITH_PAD(_mpa_len) \
2145	(ECORE_IWARP_PDU_DATA_LEN_WITH_PAD(_mpa_len + \
2146					   ECORE_IWARP_MPA_FPDU_LENGTH_SIZE) + \
2147					   ECORE_IWARP_MPA_CRC32_DIGEST_SIZE)
2148
2149/* fpdu can be fragmented over maximum 3 bds: header, partial mpa, unaligned */
2150#define ECORE_IWARP_MAX_BDS_PER_FPDU 3
2151
2152char *pkt_type_str[] = {
2153	"ECORE_IWARP_MPA_PKT_PACKED",
2154	"ECORE_IWARP_MPA_PKT_PARTIAL",
2155	"ECORE_IWARP_MPA_PKT_UNALIGNED"
2156};
2157
2158static enum _ecore_status_t
2159ecore_iwarp_recycle_pkt(struct ecore_hwfn *p_hwfn,
2160			struct ecore_iwarp_fpdu *fpdu,
2161			struct ecore_iwarp_ll2_buff *buf);
2162
2163static enum ecore_iwarp_mpa_pkt_type
2164ecore_iwarp_mpa_classify(struct ecore_hwfn *p_hwfn,
2165			 struct ecore_iwarp_fpdu *fpdu,
2166			 u16 tcp_payload_len,
2167			 u8 *mpa_data)
2168
2169{
2170	enum ecore_iwarp_mpa_pkt_type pkt_type;
2171	u16 mpa_len;
2172
2173	if (fpdu->incomplete_bytes) {
2174		pkt_type = ECORE_IWARP_MPA_PKT_UNALIGNED;
2175		goto out;
2176	}
2177
2178	/* special case of one byte remaining... */
2179	if (tcp_payload_len == 1) {
2180		/* lower byte will be read next packet */
2181		fpdu->fpdu_length = *mpa_data << 8;
2182		pkt_type = ECORE_IWARP_MPA_PKT_PARTIAL;
2183		goto out;
2184	}
2185
2186	mpa_len = ntohs(*((u16 *)(mpa_data)));
2187	fpdu->fpdu_length = ECORE_IWARP_FPDU_LEN_WITH_PAD(mpa_len);
2188
2189	if (fpdu->fpdu_length <= tcp_payload_len)
2190		pkt_type = ECORE_IWARP_MPA_PKT_PACKED;
2191	else
2192		pkt_type = ECORE_IWARP_MPA_PKT_PARTIAL;
2193
2194out:
2195	DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA,
2196		   "MPA_ALIGN: %s: fpdu_length=0x%x tcp_payload_len:0x%x\n",
2197		   pkt_type_str[pkt_type], fpdu->fpdu_length, tcp_payload_len);
2198
2199	return pkt_type;
2200}
2201
2202static void
2203ecore_iwarp_init_fpdu(struct ecore_iwarp_ll2_buff *buf,
2204		      struct ecore_iwarp_fpdu *fpdu,
2205		      struct unaligned_opaque_data *pkt_data,
2206		      u16 tcp_payload_size, u8 placement_offset)
2207{
2208	fpdu->mpa_buf = buf;
2209	fpdu->pkt_hdr = buf->data_phys_addr + placement_offset;
2210	fpdu->pkt_hdr_size = pkt_data->tcp_payload_offset;
2211
2212	fpdu->mpa_frag = buf->data_phys_addr + pkt_data->first_mpa_offset;
2213	fpdu->mpa_frag_virt = (u8 *)(buf->data) + pkt_data->first_mpa_offset;
2214
2215	if (tcp_payload_size == 1)
2216		fpdu->incomplete_bytes = ECORE_IWARP_INVALID_FPDU_LENGTH;
2217	else if (tcp_payload_size < fpdu->fpdu_length)
2218		fpdu->incomplete_bytes = fpdu->fpdu_length - tcp_payload_size;
2219	else
2220		fpdu->incomplete_bytes = 0; /* complete fpdu */
2221
2222	fpdu->mpa_frag_len = fpdu->fpdu_length - fpdu->incomplete_bytes;
2223}
2224
2225static enum _ecore_status_t
2226ecore_iwarp_copy_fpdu(struct ecore_hwfn *p_hwfn,
2227		      struct ecore_iwarp_fpdu *fpdu,
2228		      struct unaligned_opaque_data *pkt_data,
2229		      struct ecore_iwarp_ll2_buff *buf,
2230		      u16 tcp_payload_size)
2231
2232{
2233	u8 *tmp_buf = p_hwfn->p_rdma_info->iwarp.mpa_intermediate_buf;
2234	enum _ecore_status_t rc;
2235
2236	/* need to copy the data from the partial packet stored in fpdu
2237	 * to the new buf, for this we also need to move the data currently
2238	 * placed on the buf. The assumption is that the buffer is big enough
2239	 * since fpdu_length <= mss, we use an intermediate buffer since
2240	 * we may need to copy the new data to an overlapping location
2241	 */
2242	if ((fpdu->mpa_frag_len + tcp_payload_size) > (u16)buf->buff_size) {
2243		DP_ERR(p_hwfn,
2244		       "MPA ALIGN: Unexpected: buffer is not large enough for split fpdu buff_size = %d mpa_frag_len = %d, tcp_payload_size = %d, incomplete_bytes = %d\n",
2245		       buf->buff_size, fpdu->mpa_frag_len, tcp_payload_size,
2246		       fpdu->incomplete_bytes);
2247		return ECORE_INVAL;
2248	}
2249
2250	DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA,
2251		   "MPA ALIGN Copying fpdu: [%p, %d] [%p, %d]\n",
2252		   fpdu->mpa_frag_virt, fpdu->mpa_frag_len,
2253		   (u8 *)(buf->data) + pkt_data->first_mpa_offset,
2254		   tcp_payload_size);
2255
2256	OSAL_MEMCPY(tmp_buf, fpdu->mpa_frag_virt, fpdu->mpa_frag_len);
2257	OSAL_MEMCPY(tmp_buf + fpdu->mpa_frag_len,
2258		    (u8 *)(buf->data) + pkt_data->first_mpa_offset,
2259		    tcp_payload_size);
2260
2261	rc = ecore_iwarp_recycle_pkt(p_hwfn, fpdu, fpdu->mpa_buf);
2262	if (rc)
2263		return rc;
2264
2265	/* If we managed to post the buffer copy the data to the new buffer
2266	 * o/w this will occur in the next round...
2267	 */
2268	OSAL_MEMCPY((u8 *)(buf->data), tmp_buf,
2269		    fpdu->mpa_frag_len + tcp_payload_size);
2270
2271	fpdu->mpa_buf = buf;
2272	/* fpdu->pkt_hdr remains as is */
2273	/* fpdu->mpa_frag is overriden with new buf */
2274	fpdu->mpa_frag = buf->data_phys_addr;
2275	fpdu->mpa_frag_virt = buf->data;
2276	fpdu->mpa_frag_len += tcp_payload_size;
2277
2278	fpdu->incomplete_bytes -= tcp_payload_size;
2279
2280	DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA,
2281		   "MPA ALIGN: split fpdu buff_size = %d mpa_frag_len = %d, tcp_payload_size = %d, incomplete_bytes = %d\n",
2282		   buf->buff_size, fpdu->mpa_frag_len, tcp_payload_size,
2283		   fpdu->incomplete_bytes);
2284
2285	return 0;
2286}
2287
2288static void
2289ecore_iwarp_update_fpdu_length(struct ecore_hwfn *p_hwfn,
2290			       struct ecore_iwarp_fpdu *fpdu,
2291			       u8 *mpa_data)
2292{
2293	u16 mpa_len;
2294
2295	/* Update incomplete packets if needed */
2296	if (fpdu->incomplete_bytes == ECORE_IWARP_INVALID_FPDU_LENGTH) {
2297		mpa_len = fpdu->fpdu_length | *mpa_data;
2298		fpdu->fpdu_length = ECORE_IWARP_FPDU_LEN_WITH_PAD(mpa_len);
2299		fpdu->mpa_frag_len = fpdu->fpdu_length;
2300		/* one byte of hdr */
2301		fpdu->incomplete_bytes = fpdu->fpdu_length - 1;
2302		DP_VERBOSE(p_hwfn,
2303			   ECORE_MSG_RDMA,
2304			   "MPA_ALIGN: Partial header mpa_len=%x fpdu_length=%x incomplete_bytes=%x\n",
2305			   mpa_len, fpdu->fpdu_length, fpdu->incomplete_bytes);
2306	}
2307}
2308
2309#define ECORE_IWARP_IS_RIGHT_EDGE(_curr_pkt) \
2310	(GET_FIELD(_curr_pkt->flags, \
2311		   UNALIGNED_OPAQUE_DATA_PKT_REACHED_WIN_RIGHT_EDGE))
2312
2313/* This function is used to recycle a buffer using the ll2 drop option. It
2314 * uses the mechanism to ensure that all buffers posted to tx before this one
2315 * were completed. The buffer sent here will be sent as a cookie in the tx
2316 * completion function and can then be reposted to rx chain when done. The flow
2317 * that requires this is the flow where a FPDU splits over more than 3 tcp
2318 * segments. In this case the driver needs to re-post a rx buffer instead of
2319 * the one received, but driver can't simply repost a buffer it copied from
2320 * as there is a case where the buffer was originally a packed FPDU, and is
2321 * partially posted to FW. Driver needs to ensure FW is done with it.
2322 */
2323static enum _ecore_status_t
2324ecore_iwarp_recycle_pkt(struct ecore_hwfn *p_hwfn,
2325			struct ecore_iwarp_fpdu *fpdu,
2326			struct ecore_iwarp_ll2_buff *buf)
2327{
2328	struct ecore_ll2_tx_pkt_info tx_pkt;
2329	enum _ecore_status_t rc;
2330	u8 ll2_handle;
2331
2332	OSAL_MEM_ZERO(&tx_pkt, sizeof(tx_pkt));
2333	tx_pkt.num_of_bds = 1;
2334	tx_pkt.tx_dest = ECORE_LL2_TX_DEST_DROP;
2335	tx_pkt.l4_hdr_offset_w = fpdu->pkt_hdr_size >> 2;
2336	tx_pkt.first_frag = fpdu->pkt_hdr;
2337	tx_pkt.first_frag_len = fpdu->pkt_hdr_size;
2338	buf->piggy_buf = OSAL_NULL;
2339	tx_pkt.cookie = buf;
2340
2341	ll2_handle = p_hwfn->p_rdma_info->iwarp.ll2_mpa_handle;
2342
2343	rc = ecore_ll2_prepare_tx_packet(p_hwfn,
2344					 ll2_handle,
2345					 &tx_pkt, true);
2346
2347	DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA,
2348		   "MPA_ALIGN: send drop tx packet [%lx, 0x%x], buf=%p, rc=%d\n",
2349		   (long unsigned int)tx_pkt.first_frag,
2350		   tx_pkt.first_frag_len, buf, rc);
2351
2352	if (rc)
2353		DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA,
2354			   "Can't drop packet rc=%d\n", rc);
2355
2356	return rc;
2357}
2358
2359static enum _ecore_status_t
2360ecore_iwarp_win_right_edge(struct ecore_hwfn *p_hwfn,
2361			   struct ecore_iwarp_fpdu *fpdu)
2362{
2363	struct ecore_ll2_tx_pkt_info tx_pkt;
2364	enum _ecore_status_t rc;
2365	u8 ll2_handle;
2366
2367	OSAL_MEM_ZERO(&tx_pkt, sizeof(tx_pkt));
2368	tx_pkt.num_of_bds = 1;
2369	tx_pkt.tx_dest = ECORE_LL2_TX_DEST_LB;
2370	tx_pkt.l4_hdr_offset_w = fpdu->pkt_hdr_size >> 2;
2371
2372	tx_pkt.first_frag = fpdu->pkt_hdr;
2373	tx_pkt.first_frag_len = fpdu->pkt_hdr_size;
2374	tx_pkt.enable_ip_cksum = true;
2375	tx_pkt.enable_l4_cksum = true;
2376	tx_pkt.calc_ip_len = true;
2377	/* vlan overload with enum iwarp_ll2_tx_queues */
2378	tx_pkt.vlan = IWARP_LL2_ALIGNED_RIGHT_TRIMMED_TX_QUEUE;
2379
2380	ll2_handle = p_hwfn->p_rdma_info->iwarp.ll2_mpa_handle;
2381
2382	rc = ecore_ll2_prepare_tx_packet(p_hwfn,
2383					 ll2_handle,
2384					 &tx_pkt, true);
2385
2386	DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA,
2387		   "MPA_ALIGN: Sent right edge FPDU num_bds=%d [%lx, 0x%x], rc=%d\n",
2388		   tx_pkt.num_of_bds, (long unsigned int)tx_pkt.first_frag,
2389		   tx_pkt.first_frag_len, rc);
2390
2391	if (rc)
2392		DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA,
2393			   "Can't send right edge rc=%d\n", rc);
2394
2395	return rc;
2396}
2397
2398static enum _ecore_status_t
2399ecore_iwarp_send_fpdu(struct ecore_hwfn *p_hwfn,
2400		      struct ecore_iwarp_fpdu *fpdu,
2401		      struct unaligned_opaque_data *curr_pkt,
2402		      struct ecore_iwarp_ll2_buff *buf,
2403		      u16 tcp_payload_size,
2404		      enum ecore_iwarp_mpa_pkt_type pkt_type)
2405{
2406	struct ecore_ll2_tx_pkt_info tx_pkt;
2407	enum _ecore_status_t rc;
2408	u8 ll2_handle;
2409
2410	OSAL_MEM_ZERO(&tx_pkt, sizeof(tx_pkt));
2411
2412	tx_pkt.num_of_bds = (pkt_type == ECORE_IWARP_MPA_PKT_UNALIGNED) ? 3 : 2;
2413	tx_pkt.tx_dest = ECORE_LL2_TX_DEST_LB;
2414	tx_pkt.l4_hdr_offset_w = fpdu->pkt_hdr_size >> 2;
2415
2416	/* Send the mpa_buf only with the last fpdu (in case of packed) */
2417	if ((pkt_type == ECORE_IWARP_MPA_PKT_UNALIGNED) ||
2418	    (tcp_payload_size <= fpdu->fpdu_length))
2419		tx_pkt.cookie = fpdu->mpa_buf;
2420
2421	tx_pkt.first_frag = fpdu->pkt_hdr;
2422	tx_pkt.first_frag_len = fpdu->pkt_hdr_size;
2423	tx_pkt.enable_ip_cksum = true;
2424	tx_pkt.enable_l4_cksum = true;
2425	tx_pkt.calc_ip_len = true;
2426	/* vlan overload with enum iwarp_ll2_tx_queues */
2427	tx_pkt.vlan = IWARP_LL2_ALIGNED_TX_QUEUE;
2428
2429	/* special case of unaligned packet and not packed, need to send
2430	 * both buffers as cookie to release.
2431	 */
2432	if (tcp_payload_size == fpdu->incomplete_bytes) {
2433		fpdu->mpa_buf->piggy_buf = buf;
2434	}
2435
2436	ll2_handle = p_hwfn->p_rdma_info->iwarp.ll2_mpa_handle;
2437
2438	rc = ecore_ll2_prepare_tx_packet(p_hwfn,
2439					 ll2_handle,
2440					 &tx_pkt, true);
2441	if (rc)
2442		goto err;
2443
2444	rc = ecore_ll2_set_fragment_of_tx_packet(p_hwfn, ll2_handle,
2445						 fpdu->mpa_frag,
2446						 fpdu->mpa_frag_len);
2447	if (rc)
2448		goto err;
2449
2450	if (fpdu->incomplete_bytes) {
2451		rc = ecore_ll2_set_fragment_of_tx_packet(
2452			p_hwfn, ll2_handle,
2453			buf->data_phys_addr + curr_pkt->first_mpa_offset,
2454			fpdu->incomplete_bytes);
2455
2456		if (rc)
2457			goto err;
2458	}
2459
2460err:
2461	DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA,
2462		   "MPA_ALIGN: Sent FPDU num_bds=%d [%lx, 0x%x], [0x%lx, 0x%x], [0x%lx, 0x%x] (cookie %p) rc=%d\n",
2463		   tx_pkt.num_of_bds, (long unsigned int)tx_pkt.first_frag,
2464		   tx_pkt.first_frag_len, (long unsigned int)fpdu->mpa_frag,
2465		   fpdu->mpa_frag_len, (long unsigned int)buf->data_phys_addr +
2466		   curr_pkt->first_mpa_offset, fpdu->incomplete_bytes,
2467		   tx_pkt.cookie, rc);
2468
2469	return rc;
2470}
2471
2472static void
2473ecore_iwarp_mpa_get_data(struct ecore_hwfn *p_hwfn,
2474			 struct unaligned_opaque_data *curr_pkt,
2475			 u32 opaque_data0, u32 opaque_data1)
2476{
2477	u64 opaque_data;
2478
2479	opaque_data = HILO_64(opaque_data1, opaque_data0);
2480	*curr_pkt = *((struct unaligned_opaque_data *)&opaque_data);
2481
2482	/* fix endianity */
2483	curr_pkt->first_mpa_offset = curr_pkt->tcp_payload_offset +
2484		OSAL_LE16_TO_CPU(curr_pkt->first_mpa_offset);
2485	curr_pkt->cid = OSAL_LE32_TO_CPU(curr_pkt->cid);
2486
2487	DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA,
2488		   "OPAQUE0=0x%x OPAQUE1=0x%x first_mpa_offset:0x%x\ttcp_payload_offset:0x%x\tflags:0x%x\tcid:0x%x\n",
2489		   opaque_data0, opaque_data1, curr_pkt->first_mpa_offset,
2490		   curr_pkt->tcp_payload_offset, curr_pkt->flags,
2491		   curr_pkt->cid);
2492}
2493
2494static void
2495ecore_iwarp_mpa_print_tcp_seq(struct ecore_hwfn *p_hwfn,
2496			      void *buf)
2497{
2498	struct ecore_vlan_ethhdr *vethh;
2499	struct ecore_ethhdr *ethh;
2500	struct ecore_iphdr *iph;
2501	struct ecore_ipv6hdr *ip6h;
2502	struct ecore_tcphdr *tcph;
2503	bool vlan_valid = false;
2504	int eth_hlen, ip_hlen;
2505	u16 eth_type;
2506
2507	if ((p_hwfn->dp_level > ECORE_LEVEL_VERBOSE) ||
2508	    !(p_hwfn->dp_module & ECORE_MSG_RDMA))
2509		return;
2510
2511	ethh = (struct ecore_ethhdr *)buf;
2512	eth_type = ntohs(ethh->h_proto);
2513	if (eth_type == ETH_P_8021Q) {
2514		vlan_valid = true;
2515		vethh = (struct ecore_vlan_ethhdr *)ethh;
2516		eth_type = ntohs(vethh->h_vlan_encapsulated_proto);
2517	}
2518
2519	eth_hlen = ETH_HLEN + (vlan_valid ? sizeof(u32) : 0);
2520
2521	iph = (struct ecore_iphdr *)((u8 *)(ethh) + eth_hlen);
2522
2523	if (eth_type == ETH_P_IP) {
2524		ip_hlen = (iph->ihl)*sizeof(u32);
2525	} else if (eth_type == ETH_P_IPV6) {
2526		ip6h = (struct ecore_ipv6hdr *)iph;
2527		ip_hlen = sizeof(*ip6h);
2528	} else {
2529		DP_ERR(p_hwfn, "Unexpected ethertype on ll2 %x\n", eth_type);
2530		return;
2531	}
2532
2533	tcph = (struct ecore_tcphdr *)((u8 *)iph + ip_hlen);
2534
2535	DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "Processing MPA PKT: tcp_seq=0x%x tcp_ack_seq=0x%x\n",
2536		   ntohl(tcph->seq), ntohl(tcph->ack_seq));
2537
2538	DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "eth_type =%d Source mac: [0x%x]:[0x%x]:[0x%x]:[0x%x]:[0x%x]:[0x%x]\n",
2539		   eth_type, ethh->h_source[0], ethh->h_source[1],
2540		   ethh->h_source[2], ethh->h_source[3],
2541		   ethh->h_source[4], ethh->h_source[5]);
2542
2543	DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "eth_hlen=%d destination mac: [0x%x]:[0x%x]:[0x%x]:[0x%x]:[0x%x]:[0x%x]\n",
2544		   eth_hlen, ethh->h_dest[0], ethh->h_dest[1],
2545		   ethh->h_dest[2], ethh->h_dest[3],
2546		   ethh->h_dest[4], ethh->h_dest[5]);
2547
2548	return;
2549}
2550
2551/* This function is called when an unaligned or incomplete MPA packet arrives
2552 * driver needs to align the packet, perhaps using previous data and send
2553 * it down to FW once it is aligned.
2554 */
2555static enum _ecore_status_t
2556ecore_iwarp_process_mpa_pkt(struct ecore_hwfn *p_hwfn,
2557			    struct ecore_iwarp_ll2_mpa_buf *mpa_buf)
2558{
2559	struct ecore_iwarp_ll2_buff *buf = mpa_buf->ll2_buf;
2560	enum ecore_iwarp_mpa_pkt_type pkt_type;
2561	struct unaligned_opaque_data *curr_pkt = &mpa_buf->data;
2562	struct ecore_iwarp_fpdu *fpdu;
2563	u8 *mpa_data;
2564	enum _ecore_status_t rc = ECORE_SUCCESS;
2565
2566	ecore_iwarp_mpa_print_tcp_seq(
2567		p_hwfn, (u8 *)(buf->data) + mpa_buf->placement_offset);
2568
2569	fpdu = ecore_iwarp_get_curr_fpdu(p_hwfn, curr_pkt->cid & 0xffff);
2570	if (!fpdu) {/* something corrupt with cid, post rx back */
2571		DP_ERR(p_hwfn, "Invalid cid, drop and post back to rx cid=%x\n",
2572		       curr_pkt->cid);
2573		rc = ecore_iwarp_ll2_post_rx(
2574			p_hwfn, buf, p_hwfn->p_rdma_info->iwarp.ll2_mpa_handle);
2575
2576		if (rc) { /* not much we can do here except log and free */
2577			DP_ERR(p_hwfn, "Post rx buffer failed\n");
2578
2579			/* we don't expect any failures from rx, not even
2580			 * busy since we allocate #bufs=#descs
2581			 */
2582			rc = ECORE_UNKNOWN_ERROR;
2583		}
2584		return rc;
2585	}
2586
2587	do {
2588		mpa_data = ((u8 *)(buf->data) + curr_pkt->first_mpa_offset);
2589
2590		pkt_type = ecore_iwarp_mpa_classify(p_hwfn, fpdu,
2591						    mpa_buf->tcp_payload_len,
2592						    mpa_data);
2593
2594		switch (pkt_type) {
2595		case ECORE_IWARP_MPA_PKT_PARTIAL:
2596			ecore_iwarp_init_fpdu(buf, fpdu,
2597					      curr_pkt,
2598					      mpa_buf->tcp_payload_len,
2599					      mpa_buf->placement_offset);
2600
2601			if (!ECORE_IWARP_IS_RIGHT_EDGE(curr_pkt)) {
2602				mpa_buf->tcp_payload_len = 0;
2603				break;
2604			}
2605
2606			rc = ecore_iwarp_win_right_edge(p_hwfn, fpdu);
2607
2608			if (rc) {
2609				DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA,
2610					   "Can't send FPDU:reset rc=%d\n", rc);
2611				OSAL_MEM_ZERO(fpdu, sizeof(*fpdu));
2612				break;
2613			}
2614
2615			mpa_buf->tcp_payload_len = 0;
2616			break;
2617		case ECORE_IWARP_MPA_PKT_PACKED:
2618			if (fpdu->fpdu_length == 8) {
2619				DP_ERR(p_hwfn, "SUSPICIOUS fpdu_length = 0x%x: assuming bug...aborting this packet...\n",
2620				       fpdu->fpdu_length);
2621				mpa_buf->tcp_payload_len = 0;
2622				break;
2623			}
2624
2625			ecore_iwarp_init_fpdu(buf, fpdu,
2626					      curr_pkt,
2627					      mpa_buf->tcp_payload_len,
2628					      mpa_buf->placement_offset);
2629
2630			rc = ecore_iwarp_send_fpdu(p_hwfn, fpdu, curr_pkt, buf,
2631						   mpa_buf->tcp_payload_len,
2632						   pkt_type);
2633			if (rc) {
2634				DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA,
2635					   "Can't send FPDU:reset rc=%d\n", rc);
2636				OSAL_MEM_ZERO(fpdu, sizeof(*fpdu));
2637				break;
2638			}
2639			mpa_buf->tcp_payload_len -= fpdu->fpdu_length;
2640			curr_pkt->first_mpa_offset += fpdu->fpdu_length;
2641			break;
2642		case ECORE_IWARP_MPA_PKT_UNALIGNED:
2643			ecore_iwarp_update_fpdu_length(p_hwfn, fpdu, mpa_data);
2644			if (mpa_buf->tcp_payload_len < fpdu->incomplete_bytes) {
2645				/* special handling of fpdu split over more
2646				 * than 2 segments
2647				 */
2648				if (ECORE_IWARP_IS_RIGHT_EDGE(curr_pkt)) {
2649					rc = ecore_iwarp_win_right_edge(p_hwfn,
2650									fpdu);
2651					/* packet will be re-processed later */
2652					if (rc)
2653						return rc;
2654				}
2655
2656				rc = ecore_iwarp_copy_fpdu(
2657					p_hwfn, fpdu, curr_pkt,
2658					buf, mpa_buf->tcp_payload_len);
2659
2660				/* packet will be re-processed later */
2661				if (rc)
2662					return rc;
2663
2664				mpa_buf->tcp_payload_len = 0;
2665
2666				break;
2667			}
2668
2669			rc = ecore_iwarp_send_fpdu(p_hwfn, fpdu, curr_pkt, buf,
2670						   mpa_buf->tcp_payload_len,
2671						   pkt_type);
2672			if (rc) {
2673				DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA,
2674					   "Can't send FPDU:delay rc=%d\n", rc);
2675				/* don't reset fpdu -> we need it for next
2676				 * classify
2677				 */
2678				break;
2679			}
2680			mpa_buf->tcp_payload_len -= fpdu->incomplete_bytes;
2681			curr_pkt->first_mpa_offset += fpdu->incomplete_bytes;
2682			/* The framed PDU was sent - no more incomplete bytes */
2683			fpdu->incomplete_bytes = 0;
2684			break;
2685		}
2686
2687	} while (mpa_buf->tcp_payload_len && !rc);
2688
2689	return rc;
2690}
2691
2692static void
2693ecore_iwarp_process_pending_pkts(struct ecore_hwfn *p_hwfn)
2694{
2695	struct ecore_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp;
2696	struct ecore_iwarp_ll2_mpa_buf *mpa_buf = OSAL_NULL;
2697	enum _ecore_status_t rc;
2698
2699	while (!OSAL_LIST_IS_EMPTY(&iwarp_info->mpa_buf_pending_list)) {
2700		mpa_buf = OSAL_LIST_FIRST_ENTRY(
2701			&iwarp_info->mpa_buf_pending_list,
2702			struct ecore_iwarp_ll2_mpa_buf,
2703			list_entry);
2704
2705		rc = ecore_iwarp_process_mpa_pkt(p_hwfn, mpa_buf);
2706
2707		 /* busy means break and continue processing later, don't
2708		  * remove the buf from the pending list.
2709		  */
2710		if (rc == ECORE_BUSY)
2711			break;
2712
2713#ifdef _NTDDK_
2714#pragma warning(suppress : 6011)
2715#pragma warning(suppress : 28182)
2716#endif
2717		OSAL_LIST_REMOVE_ENTRY(
2718			&mpa_buf->list_entry,
2719			&iwarp_info->mpa_buf_pending_list);
2720
2721		OSAL_LIST_PUSH_TAIL(&mpa_buf->list_entry,
2722				    &iwarp_info->mpa_buf_list);
2723
2724		if (rc) { /* different error, don't continue */
2725			DP_NOTICE(p_hwfn, false, "process pkts failed rc=%d\n",
2726				  rc);
2727			break;
2728		}
2729	}
2730}
2731
2732static void
2733ecore_iwarp_ll2_comp_mpa_pkt(void *cxt,
2734			     struct ecore_ll2_comp_rx_data *data)
2735{
2736	struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)cxt;
2737	struct ecore_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp;
2738	struct ecore_iwarp_ll2_mpa_buf *mpa_buf;
2739
2740	iwarp_info->unalign_rx_comp++;
2741
2742	mpa_buf = OSAL_LIST_FIRST_ENTRY(&iwarp_info->mpa_buf_list,
2743					struct ecore_iwarp_ll2_mpa_buf,
2744					list_entry);
2745
2746	DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA,
2747		   "LL2 MPA CompRx buf=%p placement_offset=%d, payload_len=0x%x mpa_buf=%p\n",
2748		   data->cookie, data->u.placement_offset,
2749		   data->length.packet_length, mpa_buf);
2750
2751	if (!mpa_buf) {
2752		DP_ERR(p_hwfn, "no free mpa buf. this is a driver bug.\n");
2753		return;
2754	}
2755	OSAL_LIST_REMOVE_ENTRY(&mpa_buf->list_entry, &iwarp_info->mpa_buf_list);
2756
2757	ecore_iwarp_mpa_get_data(p_hwfn, &mpa_buf->data,
2758				 data->opaque_data_0, data->opaque_data_1);
2759
2760	mpa_buf->tcp_payload_len = data->length.packet_length -
2761		 mpa_buf->data.first_mpa_offset;
2762	mpa_buf->ll2_buf = (struct ecore_iwarp_ll2_buff *)data->cookie;
2763	mpa_buf->data.first_mpa_offset += data->u.placement_offset;
2764	mpa_buf->placement_offset = data->u.placement_offset;
2765
2766	OSAL_LIST_PUSH_TAIL(&mpa_buf->list_entry,
2767			    &iwarp_info->mpa_buf_pending_list);
2768
2769	ecore_iwarp_process_pending_pkts(p_hwfn);
2770}
2771
2772static void
2773ecore_iwarp_ll2_comp_syn_pkt(void *cxt, struct ecore_ll2_comp_rx_data *data)
2774{
2775	struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)cxt;
2776	struct ecore_iwarp_ll2_buff *buf =
2777		(struct ecore_iwarp_ll2_buff *)data->cookie;
2778	struct ecore_iwarp_listener *listener;
2779	struct ecore_iwarp_cm_info cm_info;
2780	struct ecore_ll2_tx_pkt_info tx_pkt;
2781	u8 remote_mac_addr[ETH_ALEN];
2782	u8 local_mac_addr[ETH_ALEN];
2783	struct ecore_iwarp_ep *ep;
2784	enum _ecore_status_t rc;
2785	int tcp_start_offset;
2786	u8 ts_hdr_size = 0;
2787	int payload_len;
2788	u32 hdr_size;
2789
2790	OSAL_MEM_ZERO(&cm_info, sizeof(cm_info));
2791
2792	/* Check if packet was received with errors... */
2793	if (data->err_flags != 0) {
2794		DP_NOTICE(p_hwfn, false, "Error received on SYN packet: 0x%x\n",
2795			  data->err_flags);
2796		goto err;
2797	}
2798
2799	if (GET_FIELD(data->parse_flags,
2800		      PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED) &&
2801	    GET_FIELD(data->parse_flags,
2802		      PARSING_AND_ERR_FLAGS_L4CHKSMERROR)) {
2803		DP_NOTICE(p_hwfn, false, "Syn packet received with checksum error\n");
2804		goto err;
2805	}
2806
2807	rc = ecore_iwarp_parse_rx_pkt(
2808		p_hwfn, &cm_info, (u8 *)(buf->data) + data->u.placement_offset,
2809		remote_mac_addr, local_mac_addr, &payload_len,
2810		&tcp_start_offset);
2811	if (rc)
2812		goto err;
2813
2814	/* Check if there is a listener for this 4-tuple */
2815	listener = ecore_iwarp_get_listener(p_hwfn, &cm_info);
2816	if (!listener) {
2817		DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA,
2818			   "SYN received on tuple not listened on parse_flags=%d packet len=%d\n",
2819			   data->parse_flags, data->length.packet_length);
2820
2821		OSAL_MEMSET(&tx_pkt, 0, sizeof(tx_pkt));
2822		tx_pkt.num_of_bds = 1;
2823		tx_pkt.bd_flags = 0;
2824		tx_pkt.l4_hdr_offset_w = (data->length.packet_length) >> 2;
2825		tx_pkt.tx_dest = ECORE_LL2_TX_DEST_LB;
2826		tx_pkt.first_frag = buf->data_phys_addr +
2827			data->u.placement_offset;
2828		tx_pkt.first_frag_len = data->length.packet_length;
2829		tx_pkt.cookie = buf;
2830
2831		rc = ecore_ll2_prepare_tx_packet(
2832			p_hwfn,
2833			p_hwfn->p_rdma_info->iwarp.ll2_syn_handle,
2834			&tx_pkt, true);
2835
2836		if (rc) {
2837			DP_NOTICE(p_hwfn, false,
2838				  "Can't post SYN back to chip rc=%d\n", rc);
2839			goto err;
2840		}
2841		return;
2842	}
2843
2844	DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "Received syn on listening port\n");
2845
2846	/* For debugging purpose... */
2847	if (listener->drop)
2848		goto err;
2849
2850	/* There may be an open ep on this connection if this is a syn
2851	 * retrasnmit... need to make sure there isn't...
2852	 */
2853	if (ecore_iwarp_ep_exists(p_hwfn, listener, &cm_info))
2854		goto err;
2855
2856	ep = ecore_iwarp_get_free_ep(p_hwfn);
2857	if (ep == OSAL_NULL)
2858		goto err;
2859
2860	OSAL_SPIN_LOCK(&listener->lock);
2861	OSAL_LIST_PUSH_TAIL(&ep->list_entry, &listener->ep_list);
2862	OSAL_SPIN_UNLOCK(&listener->lock);
2863
2864	OSAL_MEMCPY(ep->remote_mac_addr,
2865		    remote_mac_addr,
2866		    ETH_ALEN);
2867	OSAL_MEMCPY(ep->local_mac_addr,
2868		    local_mac_addr,
2869		    ETH_ALEN);
2870
2871	OSAL_MEMCPY(&ep->cm_info, &cm_info, sizeof(ep->cm_info));
2872
2873	if (p_hwfn->p_rdma_info->iwarp.tcp_flags & ECORE_IWARP_TS_EN)
2874		ts_hdr_size = TIMESTAMP_HEADER_SIZE;
2875
2876	hdr_size = ((cm_info.ip_version == ECORE_TCP_IPV4) ? 40 : 60) +
2877		ts_hdr_size;
2878	ep->mss = p_hwfn->p_rdma_info->iwarp.max_mtu - hdr_size;
2879	ep->mss = OSAL_MIN_T(u16, ECORE_IWARP_MAX_FW_MSS, ep->mss);
2880
2881	ep->listener = listener;
2882	ep->event_cb = listener->event_cb;
2883	ep->cb_context = listener->cb_context;
2884	ep->connect_mode = TCP_CONNECT_PASSIVE;
2885
2886	ep->syn = buf;
2887	ep->syn_ip_payload_length = (u16)payload_len;
2888	ep->syn_phy_addr = buf->data_phys_addr + data->u.placement_offset +
2889		tcp_start_offset;
2890
2891	rc = ecore_iwarp_tcp_offload(p_hwfn, ep);
2892	if (rc != ECORE_SUCCESS) {
2893		ecore_iwarp_return_ep(p_hwfn, ep);
2894		goto err;
2895	}
2896	return;
2897
2898err:
2899	ecore_iwarp_ll2_post_rx(
2900		p_hwfn, buf, p_hwfn->p_rdma_info->iwarp.ll2_syn_handle);
2901}
2902
2903static void
2904ecore_iwarp_ll2_rel_rx_pkt(void *cxt,
2905			   u8 OSAL_UNUSED connection_handle,
2906			   void *cookie,
2907			   dma_addr_t OSAL_UNUSED rx_buf_addr,
2908			   bool OSAL_UNUSED b_last_packet)
2909{
2910	struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)cxt;
2911	struct ecore_iwarp_ll2_buff *buffer =
2912		(struct ecore_iwarp_ll2_buff *)cookie;
2913
2914	OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
2915			       buffer->data,
2916			       buffer->data_phys_addr,
2917			       buffer->buff_size);
2918
2919	OSAL_FREE(p_hwfn->p_dev, buffer);
2920}
2921
2922static void
2923ecore_iwarp_ll2_comp_tx_pkt(void *cxt,
2924			    u8 connection_handle,
2925			    void *cookie,
2926			    dma_addr_t OSAL_UNUSED first_frag_addr,
2927			    bool OSAL_UNUSED b_last_fragment,
2928			    bool OSAL_UNUSED b_last_packet)
2929{
2930	struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)cxt;
2931	struct ecore_iwarp_ll2_buff *buffer =
2932		(struct ecore_iwarp_ll2_buff *)cookie;
2933	struct ecore_iwarp_ll2_buff *piggy;
2934
2935	if (!buffer) /* can happen in packed mpa unaligned... */
2936		return;
2937
2938	DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA,
2939		   "LL2 CompTX buf=%p piggy_buf=%p handle=%d\n",
2940		   buffer, buffer->piggy_buf, connection_handle);
2941
2942	/* we got a tx packet -> this was originally a rx packet... now we
2943	 * can post it back...
2944	 */
2945	piggy = buffer->piggy_buf;
2946	if (piggy) {
2947		buffer->piggy_buf = OSAL_NULL;
2948		ecore_iwarp_ll2_post_rx(p_hwfn, piggy,
2949					connection_handle);
2950	}
2951
2952	ecore_iwarp_ll2_post_rx(p_hwfn, buffer,
2953				connection_handle);
2954
2955	if (connection_handle == p_hwfn->p_rdma_info->iwarp.ll2_mpa_handle)
2956		ecore_iwarp_process_pending_pkts(p_hwfn);
2957
2958	return;
2959}
2960
2961static void
2962ecore_iwarp_ll2_rel_tx_pkt(void *cxt,
2963			   u8 OSAL_UNUSED connection_handle,
2964			   void *cookie,
2965			   dma_addr_t OSAL_UNUSED first_frag_addr,
2966			   bool OSAL_UNUSED b_last_fragment,
2967			   bool OSAL_UNUSED b_last_packet)
2968{
2969	struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)cxt;
2970	struct ecore_iwarp_ll2_buff *buffer =
2971		(struct ecore_iwarp_ll2_buff *)cookie;
2972
2973	if (!buffer)
2974		return;
2975
2976	if (buffer->piggy_buf) {
2977		OSAL_DMA_FREE_COHERENT(
2978			p_hwfn->p_dev,
2979			buffer->piggy_buf->data,
2980			buffer->piggy_buf->data_phys_addr,
2981			buffer->piggy_buf->buff_size);
2982
2983		OSAL_FREE(p_hwfn->p_dev, buffer->piggy_buf);
2984	}
2985
2986	OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
2987			       buffer->data,
2988			       buffer->data_phys_addr,
2989			       buffer->buff_size);
2990
2991	OSAL_FREE(p_hwfn->p_dev, buffer);
2992	return;
2993}
2994
2995/* Current known slowpath for iwarp ll2 is unalign flush. When this completion
2996 * is received, need to reset the FPDU.
2997 */
2998static void
2999ecore_iwarp_ll2_slowpath(void *cxt,
3000			 u8 OSAL_UNUSED connection_handle,
3001			 u32 opaque_data_0,
3002			 u32 opaque_data_1)
3003{
3004	struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)cxt;
3005	struct unaligned_opaque_data unalign_data;
3006	struct ecore_iwarp_fpdu *fpdu;
3007
3008	ecore_iwarp_mpa_get_data(p_hwfn, &unalign_data,
3009				 opaque_data_0, opaque_data_1);
3010
3011	DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "(0x%x) Flush fpdu\n",
3012		   unalign_data.cid);
3013
3014	fpdu = ecore_iwarp_get_curr_fpdu(p_hwfn, (u16)unalign_data.cid);
3015	if (fpdu)
3016		OSAL_MEM_ZERO(fpdu, sizeof(*fpdu));
3017}
3018
3019static int
3020ecore_iwarp_ll2_stop(struct ecore_hwfn *p_hwfn)
3021{
3022	struct ecore_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp;
3023	int rc = 0;
3024
3025	if (iwarp_info->ll2_syn_handle != ECORE_IWARP_HANDLE_INVAL) {
3026		rc = ecore_ll2_terminate_connection(p_hwfn,
3027						    iwarp_info->ll2_syn_handle);
3028		if (rc)
3029			DP_INFO(p_hwfn, "Failed to terminate syn connection\n");
3030
3031		ecore_ll2_release_connection(p_hwfn,
3032					     iwarp_info->ll2_syn_handle);
3033		iwarp_info->ll2_syn_handle = ECORE_IWARP_HANDLE_INVAL;
3034	}
3035
3036	if (iwarp_info->ll2_ooo_handle != ECORE_IWARP_HANDLE_INVAL) {
3037		rc = ecore_ll2_terminate_connection(p_hwfn,
3038						    iwarp_info->ll2_ooo_handle);
3039		if (rc)
3040			DP_INFO(p_hwfn, "Failed to terminate ooo connection\n");
3041
3042		ecore_ll2_release_connection(p_hwfn,
3043					     iwarp_info->ll2_ooo_handle);
3044		iwarp_info->ll2_ooo_handle = ECORE_IWARP_HANDLE_INVAL;
3045	}
3046
3047	if (iwarp_info->ll2_mpa_handle != ECORE_IWARP_HANDLE_INVAL) {
3048		rc = ecore_ll2_terminate_connection(p_hwfn,
3049						    iwarp_info->ll2_mpa_handle);
3050		if (rc)
3051			DP_INFO(p_hwfn, "Failed to terminate mpa connection\n");
3052
3053		ecore_ll2_release_connection(p_hwfn,
3054					     iwarp_info->ll2_mpa_handle);
3055		iwarp_info->ll2_mpa_handle = ECORE_IWARP_HANDLE_INVAL;
3056	}
3057
3058	ecore_llh_remove_mac_filter(p_hwfn->p_dev, 0,
3059				    p_hwfn->p_rdma_info->iwarp.mac_addr);
3060
3061	return rc;
3062}
3063
3064static int
3065ecore_iwarp_ll2_alloc_buffers(struct ecore_hwfn *p_hwfn,
3066			      int num_rx_bufs,
3067			      int buff_size,
3068			      u8 ll2_handle)
3069{
3070	struct ecore_iwarp_ll2_buff *buffer;
3071	int rc = 0;
3072	int i;
3073
3074	for (i = 0; i < num_rx_bufs; i++) {
3075		buffer = OSAL_ZALLOC(p_hwfn->p_dev,
3076				     GFP_KERNEL, sizeof(*buffer));
3077		if (!buffer) {
3078			DP_INFO(p_hwfn, "Failed to allocate LL2 buffer desc\n");
3079			break;
3080		}
3081
3082		buffer->data =
3083			OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
3084						&buffer->data_phys_addr,
3085						buff_size);
3086
3087		if (!buffer->data) {
3088			DP_INFO(p_hwfn, "Failed to allocate LL2 buffers\n");
3089			OSAL_FREE(p_hwfn->p_dev, buffer);
3090			rc = ECORE_NOMEM;
3091			break;
3092		}
3093
3094		buffer->buff_size = buff_size;
3095		rc = ecore_iwarp_ll2_post_rx(p_hwfn, buffer, ll2_handle);
3096
3097		if (rc)
3098			break; /* buffers will be deallocated by ecore_ll2 */
3099	}
3100	return rc;
3101}
3102
3103#define ECORE_IWARP_CACHE_PADDING(size) \
3104	(((size) + ETH_CACHE_LINE_SIZE - 1) & ~(ETH_CACHE_LINE_SIZE - 1))
3105
3106#define ECORE_IWARP_MAX_BUF_SIZE(mtu) \
3107	ECORE_IWARP_CACHE_PADDING(mtu + ETH_HLEN + 2*VLAN_HLEN + 2 +\
3108				  ETH_CACHE_LINE_SIZE)
3109
3110static int
3111ecore_iwarp_ll2_start(struct ecore_hwfn *p_hwfn,
3112		      struct ecore_rdma_start_in_params *params)
3113{
3114	struct ecore_iwarp_info *iwarp_info;
3115	struct ecore_ll2_acquire_data data;
3116	struct ecore_ll2_cbs cbs;
3117	u32 mpa_buff_size;
3118	int rc = ECORE_SUCCESS;
3119	u16 n_ooo_bufs;
3120	int i;
3121
3122	iwarp_info = &p_hwfn->p_rdma_info->iwarp;
3123	iwarp_info->ll2_syn_handle = ECORE_IWARP_HANDLE_INVAL;
3124	iwarp_info->ll2_ooo_handle = ECORE_IWARP_HANDLE_INVAL;
3125	iwarp_info->ll2_mpa_handle = ECORE_IWARP_HANDLE_INVAL;
3126
3127	iwarp_info->max_mtu = params->max_mtu;
3128
3129	OSAL_MEMCPY(p_hwfn->p_rdma_info->iwarp.mac_addr, params->mac_addr,
3130		    ETH_ALEN);
3131
3132	rc = ecore_llh_add_mac_filter(p_hwfn->p_dev, 0, params->mac_addr);
3133	if (rc != ECORE_SUCCESS)
3134		return rc;
3135
3136	/* Start SYN connection */
3137	cbs.rx_comp_cb = ecore_iwarp_ll2_comp_syn_pkt;
3138	cbs.rx_release_cb = ecore_iwarp_ll2_rel_rx_pkt;
3139	cbs.tx_comp_cb = ecore_iwarp_ll2_comp_tx_pkt;
3140	cbs.tx_release_cb = ecore_iwarp_ll2_rel_tx_pkt;
3141	cbs.cookie = p_hwfn;
3142
3143	OSAL_MEMSET(&data, 0, sizeof(data));
3144	data.input.conn_type = ECORE_LL2_TYPE_IWARP;
3145	data.input.mtu = ECORE_IWARP_MAX_SYN_PKT_SIZE;
3146	data.input.rx_num_desc = ECORE_IWARP_LL2_SYN_RX_SIZE;
3147	data.input.tx_num_desc = ECORE_IWARP_LL2_SYN_TX_SIZE;
3148	data.input.tx_max_bds_per_packet = 1; /* will never be fragmented */
3149	data.input.tx_tc = PKT_LB_TC;
3150	data.input.tx_dest = ECORE_LL2_TX_DEST_LB;
3151	data.p_connection_handle = &iwarp_info->ll2_syn_handle;
3152	data.cbs = &cbs;
3153
3154	rc = ecore_ll2_acquire_connection(p_hwfn, &data);
3155	if (rc) {
3156		DP_NOTICE(p_hwfn, false, "Failed to acquire LL2 connection\n");
3157		ecore_llh_remove_mac_filter(p_hwfn->p_dev, 0, params->mac_addr);
3158		return rc;
3159	}
3160
3161	rc = ecore_ll2_establish_connection(p_hwfn, iwarp_info->ll2_syn_handle);
3162	if (rc) {
3163		DP_NOTICE(p_hwfn, false,
3164			  "Failed to establish LL2 connection\n");
3165		goto err;
3166	}
3167
3168	rc = ecore_iwarp_ll2_alloc_buffers(p_hwfn,
3169					   ECORE_IWARP_LL2_SYN_RX_SIZE,
3170					   ECORE_IWARP_MAX_SYN_PKT_SIZE,
3171					   iwarp_info->ll2_syn_handle);
3172	if (rc)
3173		goto err;
3174
3175	/* Start OOO connection */
3176	data.input.conn_type = ECORE_LL2_TYPE_OOO;
3177	data.input.mtu = params->max_mtu;
3178
3179	n_ooo_bufs = params->iwarp.ooo_num_rx_bufs;
3180
3181	if (n_ooo_bufs > ECORE_IWARP_LL2_OOO_MAX_RX_SIZE)
3182		n_ooo_bufs = ECORE_IWARP_LL2_OOO_MAX_RX_SIZE;
3183
3184	data.input.rx_num_desc = n_ooo_bufs;
3185	data.input.rx_num_ooo_buffers = n_ooo_bufs;
3186
3187	p_hwfn->p_rdma_info->iwarp.num_ooo_rx_bufs = data.input.rx_num_desc;
3188	data.input.tx_max_bds_per_packet = 1; /* will never be fragmented */
3189	data.input.tx_num_desc = ECORE_IWARP_LL2_OOO_DEF_TX_SIZE;
3190	data.p_connection_handle = &iwarp_info->ll2_ooo_handle;
3191	data.input.secondary_queue = true;
3192
3193	rc = ecore_ll2_acquire_connection(p_hwfn, &data);
3194	if (rc)
3195		goto err;
3196
3197	rc = ecore_ll2_establish_connection(p_hwfn, iwarp_info->ll2_ooo_handle);
3198	if (rc)
3199		goto err;
3200
3201	/* Start MPA connection */
3202	cbs.rx_comp_cb = ecore_iwarp_ll2_comp_mpa_pkt;
3203	cbs.slowpath_cb = ecore_iwarp_ll2_slowpath;
3204
3205	OSAL_MEMSET(&data, 0, sizeof(data));
3206	data.input.conn_type = ECORE_LL2_TYPE_IWARP;
3207	data.input.mtu = params->max_mtu;
3208	data.input.rx_num_desc = n_ooo_bufs * 2;
3209	/* we allocate the same amount for TX to reduce the chance we
3210	 * run out of tx descriptors
3211	 */
3212	data.input.tx_num_desc = data.input.rx_num_desc;
3213	data.input.tx_max_bds_per_packet = ECORE_IWARP_MAX_BDS_PER_FPDU;
3214	data.p_connection_handle = &iwarp_info->ll2_mpa_handle;
3215	data.input.secondary_queue = true;
3216	data.cbs = &cbs;
3217
3218	rc = ecore_ll2_acquire_connection(p_hwfn, &data);
3219	if (rc)
3220		goto err;
3221
3222	rc = ecore_ll2_establish_connection(p_hwfn, iwarp_info->ll2_mpa_handle);
3223	if (rc)
3224		goto err;
3225
3226	mpa_buff_size = ECORE_IWARP_MAX_BUF_SIZE(params->max_mtu);
3227	rc = ecore_iwarp_ll2_alloc_buffers(p_hwfn,
3228					   data.input.rx_num_desc,
3229					   mpa_buff_size,
3230					   iwarp_info->ll2_mpa_handle);
3231	if (rc)
3232		goto err;
3233
3234	iwarp_info->partial_fpdus =
3235		OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
3236			    sizeof(*iwarp_info->partial_fpdus) *
3237			    (u16)p_hwfn->p_rdma_info->num_qps);
3238
3239	if (!iwarp_info->partial_fpdus) {
3240		DP_NOTICE(p_hwfn, false,
3241			  "Failed to allocate ecore_iwarp_info(partial_fpdus)\n");
3242		goto err;
3243	}
3244
3245	iwarp_info->max_num_partial_fpdus = (u16)p_hwfn->p_rdma_info->num_qps;
3246
3247	/* The mpa_bufs array serves for pending RX packets received on the
3248	 * mpa ll2 that don't have place on the tx ring and require later
3249	 * processing. We can't fail on allocation of such a struct therefore
3250	 * we allocate enough to take care of all rx packets
3251	 */
3252	iwarp_info->mpa_bufs =
3253		OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
3254			    sizeof(*iwarp_info->mpa_bufs) *
3255				   data.input.rx_num_desc);
3256
3257	if (!iwarp_info->mpa_bufs) {
3258		DP_NOTICE(p_hwfn, false,
3259			  "Failed to allocate mpa_bufs array mem_size=%d\n",
3260			  (u32)(sizeof(*iwarp_info->mpa_bufs) *
3261				data.input.rx_num_desc));
3262		goto err;
3263	}
3264
3265	iwarp_info->mpa_intermediate_buf =
3266		OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, mpa_buff_size);
3267	if (!iwarp_info->mpa_intermediate_buf) {
3268		DP_NOTICE(p_hwfn, false,
3269			  "Failed to allocate mpa_intermediate_buf mem_size=%d\n",
3270			  mpa_buff_size);
3271		goto err;
3272	}
3273
3274	OSAL_LIST_INIT(&iwarp_info->mpa_buf_pending_list);
3275	OSAL_LIST_INIT(&iwarp_info->mpa_buf_list);
3276	for (i = 0; i < data.input.rx_num_desc; i++) {
3277		OSAL_LIST_PUSH_TAIL(&iwarp_info->mpa_bufs[i].list_entry,
3278				    &iwarp_info->mpa_buf_list);
3279	}
3280
3281	return rc;
3282
3283err:
3284	ecore_iwarp_ll2_stop(p_hwfn);
3285
3286	return rc;
3287}
3288
3289static void
3290ecore_iwarp_set_defaults(struct ecore_hwfn *p_hwfn,
3291			 struct ecore_rdma_start_in_params *params)
3292{
3293	u32 rcv_wnd_size;
3294	u32 n_ooo_bufs;
3295
3296	/* rcv_wnd_size = 0: use defaults */
3297	rcv_wnd_size = params->iwarp.rcv_wnd_size;
3298	if (!rcv_wnd_size) {
3299		if (ecore_device_num_ports(p_hwfn->p_dev) == 4) {
3300			rcv_wnd_size = ECORE_IS_AH(p_hwfn->p_dev) ?
3301				       ECORE_IWARP_RCV_WND_SIZE_AH_DEF_4_PORTS :
3302				       ECORE_IWARP_RCV_WND_SIZE_BB_DEF_4_PORTS;
3303		} else {
3304			rcv_wnd_size = ECORE_IS_AH(p_hwfn->p_dev) ?
3305				       ECORE_IWARP_RCV_WND_SIZE_AH_DEF_2_PORTS :
3306				       ECORE_IWARP_RCV_WND_SIZE_BB_DEF_2_PORTS;
3307		}
3308		params->iwarp.rcv_wnd_size = rcv_wnd_size;
3309	}
3310
3311	n_ooo_bufs = params->iwarp.ooo_num_rx_bufs;
3312	if (!n_ooo_bufs) {
3313		n_ooo_bufs = (u32)(((u64)ECORE_MAX_OOO *
3314			      params->iwarp.rcv_wnd_size) /
3315			      params->max_mtu);
3316		n_ooo_bufs = OSAL_MIN_T(u32, n_ooo_bufs, USHRT_MAX);
3317		params->iwarp.ooo_num_rx_bufs = (u16)n_ooo_bufs;
3318	}
3319}
3320
3321enum _ecore_status_t
3322ecore_iwarp_setup(struct ecore_hwfn		    *p_hwfn,
3323		  struct ecore_rdma_start_in_params *params)
3324{
3325	enum _ecore_status_t rc = ECORE_SUCCESS;
3326	struct ecore_iwarp_info *iwarp_info;
3327	u32 rcv_wnd_size;
3328
3329	iwarp_info = &(p_hwfn->p_rdma_info->iwarp);
3330
3331	if (!params->iwarp.rcv_wnd_size || !params->iwarp.ooo_num_rx_bufs)
3332		ecore_iwarp_set_defaults(p_hwfn, params);
3333
3334	/* Scale 0 will set window of 0xFFFC (64K -4).
3335	 * Scale x will set window of 0xFFFC << (x)
3336	 * Therefore we subtract log2(64K) so that result is 0
3337	 */
3338	rcv_wnd_size = params->iwarp.rcv_wnd_size;
3339	if (rcv_wnd_size < ECORE_IWARP_RCV_WND_SIZE_MIN)
3340		rcv_wnd_size = ECORE_IWARP_RCV_WND_SIZE_MIN;
3341
3342	iwarp_info->rcv_wnd_scale = OSAL_MIN_T(u32, OSAL_LOG2(rcv_wnd_size) -
3343		OSAL_LOG2(ECORE_IWARP_RCV_WND_SIZE_MIN), ECORE_IWARP_MAX_WND_SCALE);
3344	iwarp_info->rcv_wnd_size = rcv_wnd_size >> iwarp_info->rcv_wnd_scale;
3345
3346	iwarp_info->tcp_flags = params->iwarp.flags;
3347	iwarp_info->crc_needed = params->iwarp.crc_needed;
3348	switch (params->iwarp.mpa_rev) {
3349	case ECORE_MPA_REV1:
3350		iwarp_info->mpa_rev = MPA_NEGOTIATION_TYPE_BASIC;
3351		break;
3352	case ECORE_MPA_REV2:
3353		iwarp_info->mpa_rev = MPA_NEGOTIATION_TYPE_ENHANCED;
3354		break;
3355	}
3356
3357	iwarp_info->peer2peer = params->iwarp.mpa_peer2peer;
3358	iwarp_info->rtr_type = MPA_RTR_TYPE_NONE;
3359
3360	if (params->iwarp.mpa_rtr & ECORE_MPA_RTR_TYPE_ZERO_SEND)
3361		iwarp_info->rtr_type |= MPA_RTR_TYPE_ZERO_SEND;
3362
3363	if (params->iwarp.mpa_rtr & ECORE_MPA_RTR_TYPE_ZERO_WRITE)
3364		iwarp_info->rtr_type |= MPA_RTR_TYPE_ZERO_WRITE;
3365
3366	if (params->iwarp.mpa_rtr & ECORE_MPA_RTR_TYPE_ZERO_READ)
3367		iwarp_info->rtr_type |= MPA_RTR_TYPE_ZERO_READ;
3368
3369	//DAVIDS OSAL_SPIN_LOCK_INIT(&p_hwfn->p_rdma_info->iwarp.qp_lock);
3370	OSAL_LIST_INIT(&p_hwfn->p_rdma_info->iwarp.ep_list);
3371	OSAL_LIST_INIT(&p_hwfn->p_rdma_info->iwarp.listen_list);
3372
3373	ecore_spq_register_async_cb(p_hwfn, PROTOCOLID_IWARP,
3374				    ecore_iwarp_async_event);
3375	ecore_ooo_setup(p_hwfn);
3376
3377	rc = ecore_iwarp_ll2_start(p_hwfn, params);
3378
3379	DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA,
3380		   "MPA_REV = %d. peer2peer=%d rtr=%x\n",
3381		   iwarp_info->mpa_rev,
3382		   iwarp_info->peer2peer,
3383		   iwarp_info->rtr_type);
3384
3385	return rc;
3386}
3387
3388enum _ecore_status_t
3389ecore_iwarp_stop(struct ecore_hwfn *p_hwfn)
3390{
3391	enum _ecore_status_t rc;
3392
3393	ecore_iwarp_free_prealloc_ep(p_hwfn);
3394	rc = ecore_iwarp_wait_for_all_cids(p_hwfn);
3395	if (rc != ECORE_SUCCESS)
3396		return rc;
3397
3398	ecore_spq_unregister_async_cb(p_hwfn, PROTOCOLID_IWARP);
3399
3400	return ecore_iwarp_ll2_stop(p_hwfn);
3401}
3402
3403static void
3404ecore_iwarp_qp_in_error(struct ecore_hwfn *p_hwfn,
3405			struct ecore_iwarp_ep *ep,
3406			u8 fw_return_code)
3407{
3408	struct ecore_iwarp_cm_event_params params;
3409
3410	ecore_iwarp_modify_qp(p_hwfn, ep->qp, ECORE_IWARP_QP_STATE_ERROR, true);
3411
3412	params.event = ECORE_IWARP_EVENT_CLOSE;
3413	params.ep_context = ep;
3414	params.cm_info = &ep->cm_info;
3415	params.status = (fw_return_code == IWARP_QP_IN_ERROR_GOOD_CLOSE) ?
3416		ECORE_SUCCESS : ECORE_CONN_RESET;
3417
3418	ep->state = ECORE_IWARP_EP_CLOSED;
3419	OSAL_SPIN_LOCK(&p_hwfn->p_rdma_info->iwarp.iw_lock);
3420	OSAL_LIST_REMOVE_ENTRY(&ep->list_entry,
3421			       &p_hwfn->p_rdma_info->iwarp.ep_list);
3422	OSAL_SPIN_UNLOCK(&p_hwfn->p_rdma_info->iwarp.iw_lock);
3423
3424	ep->event_cb(ep->cb_context, &params);
3425}
3426
3427static void
3428ecore_iwarp_exception_received(struct ecore_hwfn *p_hwfn,
3429			       struct ecore_iwarp_ep *ep,
3430			       int fw_ret_code)
3431{
3432	struct ecore_iwarp_cm_event_params params;
3433	bool event_cb = false;
3434
3435	DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "EP(0x%x) fw_ret_code=%d\n",
3436		   ep->cid, fw_ret_code);
3437
3438	switch (fw_ret_code) {
3439	case IWARP_EXCEPTION_DETECTED_LLP_CLOSED:
3440		params.status = ECORE_SUCCESS;
3441		params.event = ECORE_IWARP_EVENT_DISCONNECT;
3442		event_cb = true;
3443		break;
3444	case IWARP_EXCEPTION_DETECTED_LLP_RESET:
3445		params.status = ECORE_CONN_RESET;
3446		params.event = ECORE_IWARP_EVENT_DISCONNECT;
3447		event_cb = true;
3448		break;
3449	case IWARP_EXCEPTION_DETECTED_RQ_EMPTY:
3450		params.event = ECORE_IWARP_EVENT_RQ_EMPTY;
3451		event_cb = true;
3452		break;
3453	case IWARP_EXCEPTION_DETECTED_IRQ_FULL:
3454		params.event = ECORE_IWARP_EVENT_IRQ_FULL;
3455		event_cb = true;
3456		break;
3457	case IWARP_EXCEPTION_DETECTED_LLP_TIMEOUT:
3458		params.event = ECORE_IWARP_EVENT_LLP_TIMEOUT;
3459		event_cb = true;
3460		break;
3461	case IWARP_EXCEPTION_DETECTED_REMOTE_PROTECTION_ERROR:
3462		params.event = ECORE_IWARP_EVENT_REMOTE_PROTECTION_ERROR;
3463		event_cb = true;
3464		break;
3465	case IWARP_EXCEPTION_DETECTED_CQ_OVERFLOW:
3466		params.event = ECORE_IWARP_EVENT_CQ_OVERFLOW;
3467		event_cb = true;
3468		break;
3469	case IWARP_EXCEPTION_DETECTED_LOCAL_CATASTROPHIC:
3470		params.event = ECORE_IWARP_EVENT_QP_CATASTROPHIC;
3471		event_cb = true;
3472		break;
3473	case IWARP_EXCEPTION_DETECTED_LOCAL_ACCESS_ERROR:
3474		params.event = ECORE_IWARP_EVENT_LOCAL_ACCESS_ERROR;
3475		event_cb = true;
3476		break;
3477	case IWARP_EXCEPTION_DETECTED_REMOTE_OPERATION_ERROR:
3478		params.event = ECORE_IWARP_EVENT_REMOTE_OPERATION_ERROR;
3479		event_cb = true;
3480		break;
3481	case IWARP_EXCEPTION_DETECTED_TERMINATE_RECEIVED:
3482		params.event = ECORE_IWARP_EVENT_TERMINATE_RECEIVED;
3483		event_cb = true;
3484		break;
3485	default:
3486		DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA,
3487			   "Unhandled exception received...\n");
3488		break;
3489	}
3490
3491	if (event_cb) {
3492		params.ep_context = ep;
3493		params.cm_info = &ep->cm_info;
3494		ep->event_cb(ep->cb_context, &params);
3495	}
3496}
3497
3498static void
3499ecore_iwarp_tcp_connect_unsuccessful(struct ecore_hwfn *p_hwfn,
3500				     struct ecore_iwarp_ep *ep,
3501				     u8 fw_return_code)
3502{
3503	struct ecore_iwarp_cm_event_params params;
3504
3505	OSAL_MEM_ZERO(&params, sizeof(params));
3506	params.event = ECORE_IWARP_EVENT_ACTIVE_COMPLETE;
3507	params.ep_context = ep;
3508	params.cm_info = &ep->cm_info;
3509	ep->state = ECORE_IWARP_EP_CLOSED;
3510
3511	switch (fw_return_code) {
3512	case IWARP_CONN_ERROR_TCP_CONNECT_INVALID_PACKET:
3513		DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA,
3514			   "%s(0x%x) TCP connect got invalid packet\n",
3515			   ECORE_IWARP_CONNECT_MODE_STRING(ep),
3516			   ep->tcp_cid);
3517		params.status = ECORE_CONN_RESET;
3518		break;
3519	case IWARP_CONN_ERROR_TCP_CONNECTION_RST:
3520		DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA,
3521			   "%s(0x%x) TCP Connection Reset\n",
3522			   ECORE_IWARP_CONNECT_MODE_STRING(ep),
3523			   ep->tcp_cid);
3524		params.status = ECORE_CONN_RESET;
3525		break;
3526	case IWARP_CONN_ERROR_TCP_CONNECT_TIMEOUT:
3527		DP_NOTICE(p_hwfn, false, "%s(0x%x) TCP timeout\n",
3528			  ECORE_IWARP_CONNECT_MODE_STRING(ep),
3529			  ep->tcp_cid);
3530		params.status = ECORE_TIMEOUT;
3531		break;
3532	case IWARP_CONN_ERROR_MPA_NOT_SUPPORTED_VER:
3533		DP_NOTICE(p_hwfn, false, "%s(0x%x) MPA not supported VER\n",
3534			  ECORE_IWARP_CONNECT_MODE_STRING(ep),
3535			  ep->tcp_cid);
3536		params.status = ECORE_CONN_REFUSED;
3537		break;
3538	case IWARP_CONN_ERROR_MPA_INVALID_PACKET:
3539		DP_NOTICE(p_hwfn, false, "%s(0x%x) MPA Invalid Packet\n",
3540			  ECORE_IWARP_CONNECT_MODE_STRING(ep), ep->tcp_cid);
3541		params.status = ECORE_CONN_RESET;
3542		break;
3543	default:
3544		DP_ERR(p_hwfn, "%s(0x%x) Unexpected return code tcp connect: %d\n",
3545		       ECORE_IWARP_CONNECT_MODE_STRING(ep), ep->tcp_cid,
3546		       fw_return_code);
3547		params.status = ECORE_CONN_RESET;
3548		break;
3549	}
3550
3551	if (ep->connect_mode == TCP_CONNECT_PASSIVE) {
3552		ep->tcp_cid = ECORE_IWARP_INVALID_TCP_CID;
3553		ecore_iwarp_return_ep(p_hwfn, ep);
3554	} else {
3555		ep->event_cb(ep->cb_context, &params);
3556		OSAL_SPIN_LOCK(&p_hwfn->p_rdma_info->iwarp.iw_lock);
3557		OSAL_LIST_REMOVE_ENTRY(&ep->list_entry,
3558				       &p_hwfn->p_rdma_info->iwarp.ep_list);
3559		OSAL_SPIN_UNLOCK(&p_hwfn->p_rdma_info->iwarp.iw_lock);
3560	}
3561}
3562
3563static void
3564ecore_iwarp_connect_complete(struct ecore_hwfn *p_hwfn,
3565			     struct ecore_iwarp_ep *ep,
3566			     u8 fw_return_code)
3567{
3568	if (ep->connect_mode == TCP_CONNECT_PASSIVE) {
3569		/* Done with the SYN packet, post back to ll2 rx */
3570		ecore_iwarp_ll2_post_rx(
3571			p_hwfn, ep->syn,
3572			p_hwfn->p_rdma_info->iwarp.ll2_syn_handle);
3573
3574		ep->syn = OSAL_NULL;
3575
3576		if (ep->state == ECORE_IWARP_EP_ABORTING)
3577			return;
3578
3579		/* If connect failed - upper layer doesn't know about it */
3580		if (fw_return_code == RDMA_RETURN_OK)
3581			ecore_iwarp_mpa_received(p_hwfn, ep);
3582		else
3583			ecore_iwarp_tcp_connect_unsuccessful(p_hwfn, ep,
3584							     fw_return_code);
3585
3586	} else {
3587		if (fw_return_code == RDMA_RETURN_OK)
3588			ecore_iwarp_mpa_offload(p_hwfn, ep);
3589		else
3590			ecore_iwarp_tcp_connect_unsuccessful(p_hwfn, ep,
3591							     fw_return_code);
3592	}
3593}
3594
3595static OSAL_INLINE bool
3596ecore_iwarp_check_ep_ok(struct ecore_hwfn *p_hwfn,
3597			struct ecore_iwarp_ep *ep)
3598{
3599	if (ep == OSAL_NULL) {
3600		DP_ERR(p_hwfn, "ERROR ON ASYNC ep=%p\n", ep);
3601		return false;
3602	}
3603
3604	if (ep->sig != 0xdeadbeef) {
3605		DP_ERR(p_hwfn, "ERROR ON ASYNC ep=%p\n", ep);
3606		return false;
3607	}
3608
3609	return true;
3610}
3611
3612static enum _ecore_status_t
3613ecore_iwarp_async_event(struct ecore_hwfn *p_hwfn,
3614			u8 fw_event_code,
3615			u16 OSAL_UNUSED echo,
3616			union event_ring_data *data,
3617			u8 fw_return_code)
3618{
3619	struct regpair *fw_handle = &data->rdma_data.async_handle;
3620	struct ecore_iwarp_ep *ep = OSAL_NULL;
3621	u16 cid;
3622
3623	ep = (struct ecore_iwarp_ep *)(osal_uintptr_t)HILO_64(fw_handle->hi,
3624							      fw_handle->lo);
3625
3626	switch (fw_event_code) {
3627	/* Async completion after TCP 3-way handshake */
3628	case IWARP_EVENT_TYPE_ASYNC_CONNECT_COMPLETE:
3629		if (!ecore_iwarp_check_ep_ok(p_hwfn, ep))
3630			return ECORE_INVAL;
3631		DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA,
3632			   "EP(0x%x) IWARP_EVENT_TYPE_ASYNC_CONNECT_COMPLETE fw_ret_code=%d\n",
3633			   ep->tcp_cid, fw_return_code);
3634		ecore_iwarp_connect_complete(p_hwfn, ep, fw_return_code);
3635		break;
3636	case IWARP_EVENT_TYPE_ASYNC_EXCEPTION_DETECTED:
3637		if (!ecore_iwarp_check_ep_ok(p_hwfn, ep))
3638			return ECORE_INVAL;
3639		DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA,
3640			   "QP(0x%x) IWARP_EVENT_TYPE_ASYNC_EXCEPTION_DETECTED fw_ret_code=%d\n",
3641			   ep->cid, fw_return_code);
3642		ecore_iwarp_exception_received(p_hwfn, ep, fw_return_code);
3643		break;
3644	/* Async completion for Close Connection ramrod */
3645	case IWARP_EVENT_TYPE_ASYNC_QP_IN_ERROR_STATE:
3646		if (!ecore_iwarp_check_ep_ok(p_hwfn, ep))
3647			return ECORE_INVAL;
3648		DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA,
3649			   "QP(0x%x) IWARP_EVENT_TYPE_ASYNC_QP_IN_ERROR_STATE fw_ret_code=%d\n",
3650			   ep->cid, fw_return_code);
3651		ecore_iwarp_qp_in_error(p_hwfn, ep, fw_return_code);
3652		break;
3653	/* Async event for active side only */
3654	case IWARP_EVENT_TYPE_ASYNC_ENHANCED_MPA_REPLY_ARRIVED:
3655		if (!ecore_iwarp_check_ep_ok(p_hwfn, ep))
3656			return ECORE_INVAL;
3657		DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA,
3658			   "QP(0x%x) IWARP_EVENT_TYPE_ASYNC_MPA_HANDSHAKE_MPA_REPLY_ARRIVED fw_ret_code=%d\n",
3659			   ep->cid, fw_return_code);
3660		ecore_iwarp_mpa_reply_arrived(p_hwfn, ep);
3661		break;
3662	/* MPA Negotiations completed */
3663	case IWARP_EVENT_TYPE_ASYNC_MPA_HANDSHAKE_COMPLETE:
3664		if (!ecore_iwarp_check_ep_ok(p_hwfn, ep))
3665			return ECORE_INVAL;
3666		DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA,
3667			   "QP(0x%x) IWARP_EVENT_TYPE_ASYNC_MPA_HANDSHAKE_COMPLETE fw_ret_code=%d\n",
3668			   ep->cid, fw_return_code);
3669		ecore_iwarp_mpa_complete(p_hwfn, ep, fw_return_code);
3670		break;
3671	case IWARP_EVENT_TYPE_ASYNC_CID_CLEANED:
3672		cid = (u16)OSAL_LE32_TO_CPU(fw_handle->lo);
3673		DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA,
3674			   "(0x%x)IWARP_EVENT_TYPE_ASYNC_CID_CLEANED\n",
3675			   cid);
3676		ecore_iwarp_cid_cleaned(p_hwfn, cid);
3677
3678		break;
3679	case IWARP_EVENT_TYPE_ASYNC_CQ_OVERFLOW:
3680		DP_NOTICE(p_hwfn, false,
3681			  "IWARP_EVENT_TYPE_ASYNC_CQ_OVERFLOW\n");
3682
3683		p_hwfn->p_rdma_info->events.affiliated_event(
3684			p_hwfn->p_rdma_info->events.context,
3685			ECORE_IWARP_EVENT_CQ_OVERFLOW,
3686			(void *)fw_handle);
3687		break;
3688	default:
3689		DP_ERR(p_hwfn, "Received unexpected async iwarp event %d\n",
3690		       fw_event_code);
3691		return ECORE_INVAL;
3692	}
3693	return ECORE_SUCCESS;
3694}
3695
3696enum _ecore_status_t
3697ecore_iwarp_create_listen(void *rdma_cxt,
3698			  struct ecore_iwarp_listen_in *iparams,
3699			  struct ecore_iwarp_listen_out *oparams)
3700{
3701	struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)rdma_cxt;
3702	struct ecore_iwarp_listener *listener;
3703
3704	listener = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*listener));
3705
3706	if (!listener) {
3707		DP_NOTICE(p_hwfn,
3708			  false,
3709			  "ecore iwarp create listener failed: cannot allocate memory (listener). rc = %d\n",
3710			  ECORE_NOMEM);
3711		return ECORE_NOMEM;
3712	}
3713	listener->ip_version = iparams->ip_version;
3714	OSAL_MEMCPY(listener->ip_addr,
3715		    iparams->ip_addr,
3716		    sizeof(listener->ip_addr));
3717	listener->port = iparams->port;
3718	listener->vlan = iparams->vlan;
3719
3720	listener->event_cb = iparams->event_cb;
3721	listener->cb_context = iparams->cb_context;
3722	listener->max_backlog = iparams->max_backlog;
3723	listener->state = ECORE_IWARP_LISTENER_STATE_ACTIVE;
3724	oparams->handle = listener;
3725
3726	OSAL_SPIN_LOCK_INIT(&listener->lock);
3727	OSAL_LIST_INIT(&listener->ep_list);
3728	OSAL_SPIN_LOCK(&p_hwfn->p_rdma_info->iwarp.iw_lock);
3729	OSAL_LIST_PUSH_TAIL(&listener->list_entry,
3730			    &p_hwfn->p_rdma_info->iwarp.listen_list);
3731	OSAL_SPIN_UNLOCK(&p_hwfn->p_rdma_info->iwarp.iw_lock);
3732
3733	DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "callback=%p handle=%p ip=%x:%x:%x:%x port=0x%x vlan=0x%x\n",
3734		   listener->event_cb,
3735		   listener,
3736		   listener->ip_addr[0],
3737		   listener->ip_addr[1],
3738		   listener->ip_addr[2],
3739		   listener->ip_addr[3],
3740		   listener->port,
3741		   listener->vlan);
3742
3743	return ECORE_SUCCESS;
3744}
3745
3746static void
3747ecore_iwarp_pause_complete(struct ecore_iwarp_listener *listener)
3748{
3749	struct ecore_iwarp_cm_event_params params;
3750
3751	if (listener->state == ECORE_IWARP_LISTENER_STATE_UNPAUSE)
3752		listener->state = ECORE_IWARP_LISTENER_STATE_ACTIVE;
3753
3754	params.event = ECORE_IWARP_EVENT_LISTEN_PAUSE_COMP;
3755	listener->event_cb(listener->cb_context, &params);
3756}
3757
3758static void
3759ecore_iwarp_tcp_abort_comp(struct ecore_hwfn *p_hwfn, void *cookie,
3760			   union event_ring_data OSAL_UNUSED *data,
3761			   u8 OSAL_UNUSED fw_return_code)
3762{
3763	struct ecore_iwarp_ep *ep = (struct ecore_iwarp_ep *)cookie;
3764	struct ecore_iwarp_listener *listener = ep->listener;
3765
3766	ecore_iwarp_return_ep(p_hwfn, ep);
3767
3768	if (OSAL_LIST_IS_EMPTY(&listener->ep_list))
3769		listener->done = true;
3770}
3771
3772static void
3773ecore_iwarp_abort_inflight_connections(struct ecore_hwfn *p_hwfn,
3774				       struct ecore_iwarp_listener *listener)
3775{
3776	struct ecore_spq_entry *p_ent = OSAL_NULL;
3777	struct ecore_iwarp_ep *ep = OSAL_NULL;
3778	struct ecore_sp_init_data init_data;
3779	struct ecore_spq_comp_cb comp_data;
3780	enum _ecore_status_t rc;
3781
3782	/* remove listener from list before destroying listener */
3783	OSAL_LIST_REMOVE_ENTRY(&listener->list_entry,
3784			       &p_hwfn->p_rdma_info->iwarp.listen_list);
3785	if (OSAL_LIST_IS_EMPTY(&listener->ep_list)) {
3786		listener->done = true;
3787		return;
3788	}
3789	OSAL_MEMSET(&init_data, 0, sizeof(init_data));
3790	init_data.p_comp_data = &comp_data;
3791	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
3792	init_data.comp_mode = ECORE_SPQ_MODE_CB;
3793	init_data.p_comp_data->function = ecore_iwarp_tcp_abort_comp;
3794
3795	OSAL_LIST_FOR_EACH_ENTRY(ep, &listener->ep_list,
3796				 list_entry, struct ecore_iwarp_ep) {
3797		ep->state = ECORE_IWARP_EP_ABORTING;
3798		init_data.p_comp_data->cookie = ep;
3799		init_data.cid = ep->tcp_cid;
3800		rc = ecore_sp_init_request(p_hwfn, &p_ent,
3801					   IWARP_RAMROD_CMD_ID_ABORT_TCP_OFFLOAD,
3802					   PROTOCOLID_IWARP,
3803					   &init_data);
3804		if (rc == ECORE_SUCCESS)
3805			ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
3806	}
3807}
3808
3809static void
3810ecore_iwarp_listener_state_transition(struct ecore_hwfn *p_hwfn, void *cookie,
3811				      union event_ring_data OSAL_UNUSED *data,
3812				      u8 OSAL_UNUSED fw_return_code)
3813{
3814	struct ecore_iwarp_listener *listener = (struct ecore_iwarp_listener *)cookie;
3815
3816	switch (listener->state) {
3817	case ECORE_IWARP_LISTENER_STATE_PAUSE:
3818	case ECORE_IWARP_LISTENER_STATE_UNPAUSE:
3819		ecore_iwarp_pause_complete(listener);
3820		break;
3821	case ECORE_IWARP_LISTENER_STATE_DESTROYING:
3822		ecore_iwarp_abort_inflight_connections(p_hwfn, listener);
3823		break;
3824	default:
3825		break;
3826	}
3827}
3828
3829static enum _ecore_status_t
3830ecore_iwarp_empty_ramrod(struct ecore_hwfn *p_hwfn,
3831			 struct ecore_iwarp_listener *listener)
3832{
3833	struct ecore_spq_entry *p_ent = OSAL_NULL;
3834	struct ecore_spq_comp_cb comp_data;
3835	struct ecore_sp_init_data init_data;
3836	enum _ecore_status_t rc;
3837
3838	OSAL_MEMSET(&init_data, 0, sizeof(init_data));
3839	init_data.p_comp_data = &comp_data;
3840	init_data.cid = ecore_spq_get_cid(p_hwfn);
3841	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
3842	init_data.comp_mode = ECORE_SPQ_MODE_CB;
3843	init_data.p_comp_data->function = ecore_iwarp_listener_state_transition;
3844	init_data.p_comp_data->cookie = listener;
3845	rc = ecore_sp_init_request(p_hwfn, &p_ent,
3846				   COMMON_RAMROD_EMPTY,
3847				   PROTOCOLID_COMMON,
3848				   &init_data);
3849	if (rc != ECORE_SUCCESS)
3850		return rc;
3851
3852	rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
3853	if (rc != ECORE_SUCCESS)
3854		return rc;
3855
3856	return rc;
3857}
3858
3859enum _ecore_status_t
3860ecore_iwarp_pause_listen(void *rdma_cxt, void *handle,
3861			 bool pause, bool comp)
3862{
3863	struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)rdma_cxt;
3864	struct ecore_iwarp_listener *listener =
3865		(struct ecore_iwarp_listener *)handle;
3866	enum _ecore_status_t rc;
3867
3868	listener->state = pause ?
3869		ECORE_IWARP_LISTENER_STATE_PAUSE :
3870		ECORE_IWARP_LISTENER_STATE_UNPAUSE;
3871	if (!comp)
3872		return ECORE_SUCCESS;
3873
3874	rc = ecore_iwarp_empty_ramrod(p_hwfn, listener);
3875	if (rc != ECORE_SUCCESS)
3876		return rc;
3877
3878	DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "listener=%p, state=%d\n",
3879		   listener, listener->state);
3880
3881	return ECORE_PENDING;
3882}
3883
3884enum _ecore_status_t
3885ecore_iwarp_destroy_listen(void *rdma_cxt, void *handle)
3886{
3887	struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)rdma_cxt;
3888	struct ecore_iwarp_listener *listener =
3889		(struct ecore_iwarp_listener *)handle;
3890	enum _ecore_status_t rc;
3891	int wait_count = 0;
3892
3893	DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "handle=%p\n", handle);
3894
3895	listener->state = ECORE_IWARP_LISTENER_STATE_DESTROYING;
3896	rc = ecore_iwarp_empty_ramrod(p_hwfn, listener);
3897	if (rc != ECORE_SUCCESS)
3898		return rc;
3899
3900	while (!listener->done) {
3901		DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA,
3902			   "Waiting for ep list to be empty...\n");
3903		OSAL_MSLEEP(100);
3904		if (wait_count++ > 200) {
3905			DP_NOTICE(p_hwfn, false, "ep list close timeout\n");
3906			break;
3907		}
3908	}
3909
3910	OSAL_FREE(p_hwfn->p_dev, listener);
3911
3912	return ECORE_SUCCESS;
3913}
3914
3915enum _ecore_status_t
3916ecore_iwarp_send_rtr(void *rdma_cxt, struct ecore_iwarp_send_rtr_in *iparams)
3917{
3918	struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)rdma_cxt;
3919	struct ecore_sp_init_data init_data;
3920	struct ecore_spq_entry *p_ent;
3921	struct ecore_rdma_qp *qp;
3922	struct ecore_iwarp_ep *ep;
3923	enum _ecore_status_t rc;
3924
3925	ep = (struct ecore_iwarp_ep *)iparams->ep_context;
3926	if (!ep) {
3927		DP_ERR(p_hwfn, "Ep Context receive in send_rtr is NULL\n");
3928		return ECORE_INVAL;
3929	}
3930
3931	qp = ep->qp;
3932
3933	DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "QP(0x%x) EP(0x%x)\n",
3934		   qp->icid, ep->tcp_cid);
3935
3936	OSAL_MEMSET(&init_data, 0, sizeof(init_data));
3937	init_data.cid = qp->icid;
3938	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
3939	init_data.comp_mode = ECORE_SPQ_MODE_CB;
3940
3941	rc = ecore_sp_init_request(p_hwfn, &p_ent,
3942				   IWARP_RAMROD_CMD_ID_MPA_OFFLOAD_SEND_RTR,
3943				   PROTOCOLID_IWARP, &init_data);
3944
3945	if (rc != ECORE_SUCCESS)
3946		return rc;
3947
3948	rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
3949
3950	DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "ecore_iwarp_send_rtr, rc = 0x%x\n",
3951		   rc);
3952
3953	return rc;
3954}
3955
3956enum _ecore_status_t
3957ecore_iwarp_query_qp(struct ecore_rdma_qp *qp,
3958		     struct ecore_rdma_query_qp_out_params *out_params)
3959{
3960	out_params->state = ecore_iwarp2roce_state(qp->iwarp_state);
3961	return ECORE_SUCCESS;
3962}
3963
3964#ifdef _NTDDK_
3965#pragma warning(pop)
3966#endif
3967