1/*
2 * Copyright (c) 2018-2019 Cavium, Inc.
3 * All rights reserved.
4 *
5 *  Redistribution and use in source and binary forms, with or without
6 *  modification, are permitted provided that the following conditions
7 *  are met:
8 *
9 *  1. Redistributions of source code must retain the above copyright
10 *     notice, this list of conditions and the following disclaimer.
11 *  2. Redistributions in binary form must reproduce the above copyright
12 *     notice, this list of conditions and the following disclaimer in the
13 *     documentation and/or other materials provided with the distribution.
14 *
15 *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16 *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19 *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20 *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21 *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22 *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24 *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 *  POSSIBILITY OF SUCH DAMAGE.
26 */
27
28#include <sys/cdefs.h>
29__FBSDID("$FreeBSD$");
30
31#include "qlnxr_def.h"
32#include "rdma_common.h"
33#include "qlnxr_cm.h"
34
35void
36qlnxr_inc_sw_gsi_cons(struct qlnxr_qp_hwq_info *info)
37{
38	info->gsi_cons = (info->gsi_cons + 1) % info->max_wr;
39}
40
41void
42qlnxr_store_gsi_qp_cq(struct qlnxr_dev *dev,
43		struct qlnxr_qp *qp,
44		struct ib_qp_init_attr *attrs)
45{
46	QL_DPRINT12(dev->ha, "enter\n");
47
48	dev->gsi_qp_created = 1;
49	dev->gsi_sqcq = get_qlnxr_cq((attrs->send_cq));
50	dev->gsi_rqcq = get_qlnxr_cq((attrs->recv_cq));
51	dev->gsi_qp = qp;
52
53	QL_DPRINT12(dev->ha, "exit\n");
54
55	return;
56}
57
58void
59qlnxr_ll2_complete_tx_packet(void *cxt,
60		uint8_t connection_handle,
61		void *cookie,
62		dma_addr_t first_frag_addr,
63		bool b_last_fragment,
64		bool b_last_packet)
65{
66	struct qlnxr_dev *dev = (struct qlnxr_dev *)cxt;
67	struct ecore_roce_ll2_packet *pkt = cookie;
68	struct qlnxr_cq *cq = dev->gsi_sqcq;
69	struct qlnxr_qp *qp = dev->gsi_qp;
70	unsigned long flags;
71
72	QL_DPRINT12(dev->ha, "enter\n");
73
74	qlnx_dma_free_coherent(&dev->ha->cdev, pkt->header.vaddr,
75			pkt->header.baddr, pkt->header.len);
76	kfree(pkt);
77
78	spin_lock_irqsave(&qp->q_lock, flags);
79
80	qlnxr_inc_sw_gsi_cons(&qp->sq);
81
82	spin_unlock_irqrestore(&qp->q_lock, flags);
83
84	if (cq->ibcq.comp_handler)
85		(*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context);
86
87	QL_DPRINT12(dev->ha, "exit\n");
88
89	return;
90}
91
92void
93qlnxr_ll2_complete_rx_packet(void *cxt,
94		struct ecore_ll2_comp_rx_data *data)
95{
96	struct qlnxr_dev *dev = (struct qlnxr_dev *)cxt;
97	struct qlnxr_cq *cq = dev->gsi_rqcq;
98	// struct qlnxr_qp *qp = dev->gsi_qp;
99	struct qlnxr_qp *qp = NULL;
100	unsigned long flags;
101	uint32_t qp_num = 0;
102	// uint32_t delay_count = 0, gsi_cons = 0;
103	//void * dest_va;
104
105	QL_DPRINT12(dev->ha, "enter\n");
106
107	if (data->u.data_length_error) {
108		/* TODO: add statistic */
109	}
110
111	if (data->cookie == NULL) {
112		QL_DPRINT12(dev->ha, "cookie is NULL, bad sign\n");
113	}
114
115	qp_num = (0xFF << 16) | data->qp_id;
116
117	if (data->qp_id == 1) {
118		qp = dev->gsi_qp;
119	} else {
120		/* TODO: This will be needed for UD QP support */
121		/* For RoCEv1 this is invalid */
122		QL_DPRINT12(dev->ha, "invalid QP\n");
123		return;
124	}
125	/* note: currently only one recv sg is supported */
126	QL_DPRINT12(dev->ha, "MAD received on QP : %x\n", data->rx_buf_addr);
127
128	spin_lock_irqsave(&qp->q_lock, flags);
129
130	qp->rqe_wr_id[qp->rq.gsi_cons].rc =
131		data->u.data_length_error ? -EINVAL : 0;
132	qp->rqe_wr_id[qp->rq.gsi_cons].vlan_id = data->vlan;
133	/* note: length stands for data length i.e. GRH is excluded */
134	qp->rqe_wr_id[qp->rq.gsi_cons].sg_list[0].length =
135		data->length.data_length;
136	*((u32 *)&qp->rqe_wr_id[qp->rq.gsi_cons].smac[0]) =
137		ntohl(data->opaque_data_0);
138	*((u16 *)&qp->rqe_wr_id[qp->rq.gsi_cons].smac[4]) =
139		ntohs((u16)data->opaque_data_1);
140
141	qlnxr_inc_sw_gsi_cons(&qp->rq);
142
143	spin_unlock_irqrestore(&qp->q_lock, flags);
144
145	if (cq->ibcq.comp_handler)
146		(*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context);
147
148	QL_DPRINT12(dev->ha, "exit\n");
149
150	return;
151}
152
153void qlnxr_ll2_release_rx_packet(void *cxt,
154		u8 connection_handle,
155		void *cookie,
156		dma_addr_t rx_buf_addr,
157		bool b_last_packet)
158{
159	/* Do nothing... */
160}
161
162static void
163qlnxr_destroy_gsi_cq(struct qlnxr_dev *dev,
164		struct ib_qp_init_attr *attrs)
165{
166	struct ecore_rdma_destroy_cq_in_params iparams;
167	struct ecore_rdma_destroy_cq_out_params oparams;
168	struct qlnxr_cq *cq;
169
170	QL_DPRINT12(dev->ha, "enter\n");
171
172	cq = get_qlnxr_cq((attrs->send_cq));
173	iparams.icid = cq->icid;
174	ecore_rdma_destroy_cq(dev->rdma_ctx, &iparams, &oparams);
175	ecore_chain_free(&dev->ha->cdev, &cq->pbl);
176
177	cq = get_qlnxr_cq((attrs->recv_cq));
178	/* if a dedicated recv_cq was used, delete it too */
179	if (iparams.icid != cq->icid) {
180		iparams.icid = cq->icid;
181		ecore_rdma_destroy_cq(dev->rdma_ctx, &iparams, &oparams);
182		ecore_chain_free(&dev->ha->cdev, &cq->pbl);
183	}
184
185	QL_DPRINT12(dev->ha, "exit\n");
186
187	return;
188}
189
190static inline int
191qlnxr_check_gsi_qp_attrs(struct qlnxr_dev *dev,
192		struct ib_qp_init_attr *attrs)
193{
194	QL_DPRINT12(dev->ha, "enter\n");
195
196	if (attrs->cap.max_recv_sge > QLNXR_GSI_MAX_RECV_SGE) {
197		QL_DPRINT11(dev->ha,
198			"(attrs->cap.max_recv_sge > QLNXR_GSI_MAX_RECV_SGE)\n");
199		return -EINVAL;
200	}
201
202	if (attrs->cap.max_recv_wr > QLNXR_GSI_MAX_RECV_WR) {
203		QL_DPRINT11(dev->ha,
204			"(attrs->cap.max_recv_wr > QLNXR_GSI_MAX_RECV_WR)\n");
205		return -EINVAL;
206	}
207
208	if (attrs->cap.max_send_wr > QLNXR_GSI_MAX_SEND_WR) {
209		QL_DPRINT11(dev->ha,
210			"(attrs->cap.max_send_wr > QLNXR_GSI_MAX_SEND_WR)\n");
211		return -EINVAL;
212	}
213
214	QL_DPRINT12(dev->ha, "exit\n");
215
216	return 0;
217}
218
219static int
220qlnxr_ll2_post_tx(struct qlnxr_dev *dev, struct ecore_roce_ll2_packet *pkt)
221{
222	enum ecore_ll2_roce_flavor_type roce_flavor;
223	struct ecore_ll2_tx_pkt_info ll2_tx_pkt;
224	int rc;
225	int i;
226
227	QL_DPRINT12(dev->ha, "enter\n");
228
229	memset(&ll2_tx_pkt, 0, sizeof(ll2_tx_pkt));
230
231	if (pkt->roce_mode != ROCE_V1) {
232		QL_DPRINT11(dev->ha, "roce_mode != ROCE_V1\n");
233		return (-1);
234	}
235
236	roce_flavor = (pkt->roce_mode == ROCE_V1) ?
237		ECORE_LL2_ROCE : ECORE_LL2_RROCE;
238
239	ll2_tx_pkt.num_of_bds = 1 /* hdr */ +  pkt->n_seg;
240	ll2_tx_pkt.vlan = 0; /* ??? */
241	ll2_tx_pkt.tx_dest = ECORE_LL2_TX_DEST_NW;
242	ll2_tx_pkt.ecore_roce_flavor = roce_flavor;
243	ll2_tx_pkt.first_frag = pkt->header.baddr;
244	ll2_tx_pkt.first_frag_len = pkt->header.len;
245	ll2_tx_pkt.cookie = pkt;
246	ll2_tx_pkt.enable_ip_cksum = 1; // Only for RoCEv2:IPv4
247
248	/* tx header */
249	rc = ecore_ll2_prepare_tx_packet(dev->rdma_ctx,
250			dev->gsi_ll2_handle,
251			&ll2_tx_pkt,
252			1);
253	if (rc) {
254		QL_DPRINT11(dev->ha, "ecore_ll2_prepare_tx_packet failed\n");
255
256		/* TX failed while posting header - release resources*/
257                qlnx_dma_free_coherent(&dev->ha->cdev,
258			pkt->header.vaddr,
259			pkt->header.baddr,
260                        pkt->header.len);
261
262		kfree(pkt);
263
264		return rc;
265	}
266
267	/* tx payload */
268	for (i = 0; i < pkt->n_seg; i++) {
269		rc = ecore_ll2_set_fragment_of_tx_packet(dev->rdma_ctx,
270						       dev->gsi_ll2_handle,
271						       pkt->payload[i].baddr,
272						       pkt->payload[i].len);
273		if (rc) {
274			/* if failed not much to do here, partial packet has
275			 * been posted we can't free memory, will need to wait
276			 * for completion
277			 */
278			QL_DPRINT11(dev->ha,
279				"ecore_ll2_set_fragment_of_tx_packet failed\n");
280			return rc;
281		}
282	}
283	struct ecore_ll2_stats stats = {0};
284	rc = ecore_ll2_get_stats(dev->rdma_ctx, dev->gsi_ll2_handle, &stats);
285	if (rc) {
286		QL_DPRINT11(dev->ha, "failed to obtain ll2 stats\n");
287	}
288	QL_DPRINT12(dev->ha, "exit\n");
289
290	return 0;
291}
292
293int
294qlnxr_ll2_stop(struct qlnxr_dev *dev)
295{
296	int rc;
297
298	QL_DPRINT12(dev->ha, "enter\n");
299
300	if (dev->gsi_ll2_handle == 0xFF)
301		return 0;
302
303	/* remove LL2 MAC address filter */
304	rc = qlnx_rdma_ll2_set_mac_filter(dev->rdma_ctx,
305			  dev->gsi_ll2_mac_address, NULL);
306
307	rc = ecore_ll2_terminate_connection(dev->rdma_ctx,
308			dev->gsi_ll2_handle);
309
310	ecore_ll2_release_connection(dev->rdma_ctx, dev->gsi_ll2_handle);
311
312	dev->gsi_ll2_handle = 0xFF;
313
314	QL_DPRINT12(dev->ha, "exit rc = %d\n", rc);
315	return rc;
316}
317
318int qlnxr_ll2_start(struct qlnxr_dev *dev,
319		   struct ib_qp_init_attr *attrs,
320		   struct qlnxr_qp *qp)
321{
322	struct ecore_ll2_acquire_data data;
323	struct ecore_ll2_cbs cbs;
324	int rc;
325
326	QL_DPRINT12(dev->ha, "enter\n");
327
328	/* configure and start LL2 */
329	cbs.rx_comp_cb = qlnxr_ll2_complete_rx_packet;
330	cbs.tx_comp_cb = qlnxr_ll2_complete_tx_packet;
331	cbs.rx_release_cb = qlnxr_ll2_release_rx_packet;
332	cbs.tx_release_cb = qlnxr_ll2_complete_tx_packet;
333	cbs.cookie = dev;
334	dev->gsi_ll2_handle = 0xFF;
335
336	memset(&data, 0, sizeof(data));
337	data.input.conn_type = ECORE_LL2_TYPE_ROCE;
338	data.input.mtu = dev->ha->ifp->if_mtu;
339	data.input.rx_num_desc = 8 * 1024;
340	data.input.rx_drop_ttl0_flg = 1;
341	data.input.rx_vlan_removal_en = 0;
342	data.input.tx_num_desc = 8 * 1024;
343	data.input.tx_tc = 0;
344	data.input.tx_dest = ECORE_LL2_TX_DEST_NW;
345	data.input.ai_err_packet_too_big = ECORE_LL2_DROP_PACKET;
346	data.input.ai_err_no_buf = ECORE_LL2_DROP_PACKET;
347	data.input.gsi_enable = 1;
348	data.p_connection_handle = &dev->gsi_ll2_handle;
349	data.cbs = &cbs;
350
351	rc = ecore_ll2_acquire_connection(dev->rdma_ctx, &data);
352
353	if (rc) {
354		QL_DPRINT11(dev->ha,
355			"ecore_ll2_acquire_connection failed: %d\n",
356			rc);
357		return rc;
358	}
359
360	QL_DPRINT11(dev->ha,
361		"ll2 connection acquired successfully\n");
362	rc = ecore_ll2_establish_connection(dev->rdma_ctx,
363		dev->gsi_ll2_handle);
364
365	if (rc) {
366		QL_DPRINT11(dev->ha,
367			"ecore_ll2_establish_connection failed\n", rc);
368		goto err1;
369	}
370
371	QL_DPRINT11(dev->ha,
372		"ll2 connection established successfully\n");
373	rc = qlnx_rdma_ll2_set_mac_filter(dev->rdma_ctx, NULL,
374			dev->ha->primary_mac);
375	if (rc) {
376		QL_DPRINT11(dev->ha, "qlnx_rdma_ll2_set_mac_filter failed\n", rc);
377		goto err2;
378	}
379
380	QL_DPRINT12(dev->ha, "exit rc = %d\n", rc);
381	return 0;
382
383err2:
384	ecore_ll2_terminate_connection(dev->rdma_ctx, dev->gsi_ll2_handle);
385err1:
386	ecore_ll2_release_connection(dev->rdma_ctx, dev->gsi_ll2_handle);
387
388	QL_DPRINT12(dev->ha, "exit rc = %d\n", rc);
389	return rc;
390}
391
392struct ib_qp*
393qlnxr_create_gsi_qp(struct qlnxr_dev *dev,
394		 struct ib_qp_init_attr *attrs,
395		 struct qlnxr_qp *qp)
396{
397	int rc;
398
399	QL_DPRINT12(dev->ha, "enter\n");
400
401	rc = qlnxr_check_gsi_qp_attrs(dev, attrs);
402
403	if (rc) {
404		QL_DPRINT11(dev->ha, "qlnxr_check_gsi_qp_attrs failed\n");
405		return ERR_PTR(rc);
406	}
407
408	rc = qlnxr_ll2_start(dev, attrs, qp);
409	if (rc) {
410		QL_DPRINT11(dev->ha, "qlnxr_ll2_start failed\n");
411		return ERR_PTR(rc);
412	}
413
414	/* create QP */
415	qp->ibqp.qp_num = 1;
416	qp->rq.max_wr = attrs->cap.max_recv_wr;
417	qp->sq.max_wr = attrs->cap.max_send_wr;
418
419	qp->rqe_wr_id = kzalloc(qp->rq.max_wr * sizeof(*qp->rqe_wr_id),
420				GFP_KERNEL);
421	if (!qp->rqe_wr_id) {
422		QL_DPRINT11(dev->ha, "(!qp->rqe_wr_id)\n");
423		goto err;
424	}
425
426	qp->wqe_wr_id = kzalloc(qp->sq.max_wr * sizeof(*qp->wqe_wr_id),
427				GFP_KERNEL);
428	if (!qp->wqe_wr_id) {
429		QL_DPRINT11(dev->ha, "(!qp->wqe_wr_id)\n");
430		goto err;
431	}
432
433	qlnxr_store_gsi_qp_cq(dev, qp, attrs);
434	memcpy(dev->gsi_ll2_mac_address, dev->ha->primary_mac, ETH_ALEN);
435
436	/* the GSI CQ is handled by the driver so remove it from the FW */
437	qlnxr_destroy_gsi_cq(dev, attrs);
438	dev->gsi_rqcq->cq_type = QLNXR_CQ_TYPE_GSI;
439	dev->gsi_rqcq->cq_type = QLNXR_CQ_TYPE_GSI;
440
441	QL_DPRINT12(dev->ha, "exit &qp->ibqp = %p\n", &qp->ibqp);
442
443	return &qp->ibqp;
444err:
445	kfree(qp->rqe_wr_id);
446
447	rc = qlnxr_ll2_stop(dev);
448
449	QL_DPRINT12(dev->ha, "exit with error\n");
450
451	return ERR_PTR(-ENOMEM);
452}
453
454int
455qlnxr_destroy_gsi_qp(struct qlnxr_dev *dev)
456{
457	int rc = 0;
458
459	QL_DPRINT12(dev->ha, "enter\n");
460
461	rc = qlnxr_ll2_stop(dev);
462
463	QL_DPRINT12(dev->ha, "exit rc = %d\n", rc);
464	return (rc);
465}
466
467static inline bool
468qlnxr_get_vlan_id_gsi(struct ib_ah_attr *ah_attr, u16 *vlan_id)
469{
470	u16 tmp_vlan_id;
471	union ib_gid *dgid = &ah_attr->grh.dgid;
472
473	tmp_vlan_id = (dgid->raw[11] << 8) | dgid->raw[12];
474	if (tmp_vlan_id < 0x1000) {
475		*vlan_id = tmp_vlan_id;
476		return true;
477	} else {
478		*vlan_id = 0;
479		return false;
480	}
481}
482
483#define QLNXR_MAX_UD_HEADER_SIZE	(100)
484#define QLNXR_GSI_QPN		(1)
485static inline int
486qlnxr_gsi_build_header(struct qlnxr_dev *dev,
487		struct qlnxr_qp *qp,
488		struct ib_send_wr *swr,
489		struct ib_ud_header *udh,
490		int *roce_mode)
491{
492	bool has_vlan = false, has_grh_ipv6 = true;
493	struct ib_ah_attr *ah_attr = &get_qlnxr_ah((ud_wr(swr)->ah))->attr;
494	struct ib_global_route *grh = &ah_attr->grh;
495	union ib_gid sgid;
496	int send_size = 0;
497	u16 vlan_id = 0;
498	u16 ether_type;
499
500#if __FreeBSD_version >= 1102000
501	int rc = 0;
502	int ip_ver = 0;
503	bool has_udp = false;
504#endif /* #if __FreeBSD_version >= 1102000 */
505
506#if !DEFINE_IB_AH_ATTR_WITH_DMAC
507	u8 mac[ETH_ALEN];
508#endif
509	int i;
510
511	send_size = 0;
512	for (i = 0; i < swr->num_sge; ++i)
513		send_size += swr->sg_list[i].length;
514
515	has_vlan = qlnxr_get_vlan_id_gsi(ah_attr, &vlan_id);
516	ether_type = ETH_P_ROCE;
517	*roce_mode = ROCE_V1;
518	if (grh->sgid_index < QLNXR_MAX_SGID)
519		sgid = dev->sgid_tbl[grh->sgid_index];
520	else
521		sgid = dev->sgid_tbl[0];
522
523#if __FreeBSD_version >= 1102000
524
525	rc = ib_ud_header_init(send_size, false /* LRH */, true /* ETH */,
526			has_vlan, has_grh_ipv6, ip_ver, has_udp,
527			0 /* immediate */, udh);
528
529	if (rc) {
530		QL_DPRINT11(dev->ha, "gsi post send: failed to init header\n");
531		return rc;
532	}
533
534#else
535	ib_ud_header_init(send_size, false /* LRH */, true /* ETH */,
536			  has_vlan, has_grh_ipv6, 0 /* immediate */, udh);
537
538#endif /* #if __FreeBSD_version >= 1102000 */
539
540	/* ENET + VLAN headers*/
541#if DEFINE_IB_AH_ATTR_WITH_DMAC
542	memcpy(udh->eth.dmac_h, ah_attr->dmac, ETH_ALEN);
543#else
544	qlnxr_get_dmac(dev, ah_attr, mac);
545	memcpy(udh->eth.dmac_h, mac, ETH_ALEN);
546#endif
547	memcpy(udh->eth.smac_h, dev->ha->primary_mac, ETH_ALEN);
548	if (has_vlan) {
549		udh->eth.type = htons(ETH_P_8021Q);
550		udh->vlan.tag = htons(vlan_id);
551		udh->vlan.type = htons(ether_type);
552	} else {
553		udh->eth.type = htons(ether_type);
554	}
555
556	for (int j = 0; j < 4; j++) {
557		QL_DPRINT12(dev->ha, "destination mac: %x\n",
558				udh->eth.dmac_h[j]);
559	}
560	for (int j = 0; j < 4; j++) {
561		QL_DPRINT12(dev->ha, "source mac: %x\n",
562				udh->eth.smac_h[j]);
563	}
564
565	QL_DPRINT12(dev->ha, "QP: %p, opcode: %d, wq: %lx, roce: %x, hops:%d,"
566			"imm : %d, vlan :%d, AH: %p\n",
567			qp, swr->opcode, swr->wr_id, *roce_mode, grh->hop_limit,
568			0, has_vlan, get_qlnxr_ah((ud_wr(swr)->ah)));
569
570	if (has_grh_ipv6) {
571		/* GRH / IPv6 header */
572		udh->grh.traffic_class = grh->traffic_class;
573		udh->grh.flow_label = grh->flow_label;
574		udh->grh.hop_limit = grh->hop_limit;
575		udh->grh.destination_gid = grh->dgid;
576		memcpy(&udh->grh.source_gid.raw, &sgid.raw,
577		       sizeof(udh->grh.source_gid.raw));
578		QL_DPRINT12(dev->ha, "header: tc: %x, flow_label : %x, "
579			"hop_limit: %x \n", udh->grh.traffic_class,
580			udh->grh.flow_label, udh->grh.hop_limit);
581		for (i = 0; i < 16; i++) {
582			QL_DPRINT12(dev->ha, "udh dgid = %x\n", udh->grh.destination_gid.raw[i]);
583		}
584		for (i = 0; i < 16; i++) {
585			QL_DPRINT12(dev->ha, "udh sgid = %x\n", udh->grh.source_gid.raw[i]);
586		}
587		udh->grh.next_header = 0x1b;
588	}
589#ifdef DEFINE_IB_UD_HEADER_INIT_UDP_PRESENT
590        /* This is for RoCEv2 */
591	else {
592                /* IPv4 header */
593                u32 ipv4_addr;
594
595                udh->ip4.protocol = IPPROTO_UDP;
596                udh->ip4.tos = htonl(grh->flow_label);
597                udh->ip4.frag_off = htons(IP_DF);
598                udh->ip4.ttl = grh->hop_limit;
599
600                ipv4_addr = qedr_get_ipv4_from_gid(sgid.raw);
601                udh->ip4.saddr = ipv4_addr;
602                ipv4_addr = qedr_get_ipv4_from_gid(grh->dgid.raw);
603                udh->ip4.daddr = ipv4_addr;
604                /* note: checksum is calculated by the device */
605        }
606#endif
607
608	/* BTH */
609	udh->bth.solicited_event = !!(swr->send_flags & IB_SEND_SOLICITED);
610	udh->bth.pkey = QLNXR_ROCE_PKEY_DEFAULT;/* TODO: ib_get_cahced_pkey?! */
611	//udh->bth.destination_qpn = htonl(ud_wr(swr)->remote_qpn);
612	udh->bth.destination_qpn = OSAL_CPU_TO_BE32(ud_wr(swr)->remote_qpn);
613	//udh->bth.psn = htonl((qp->sq_psn++) & ((1 << 24) - 1));
614	udh->bth.psn = OSAL_CPU_TO_BE32((qp->sq_psn++) & ((1 << 24) - 1));
615	udh->bth.opcode = IB_OPCODE_UD_SEND_ONLY;
616
617	/* DETH */
618	//udh->deth.qkey = htonl(0x80010000); /* qp->qkey */ /* TODO: what is?! */
619	//udh->deth.source_qpn = htonl(QLNXR_GSI_QPN);
620	udh->deth.qkey = OSAL_CPU_TO_BE32(0x80010000); /* qp->qkey */ /* TODO: what is?! */
621	udh->deth.source_qpn = OSAL_CPU_TO_BE32(QLNXR_GSI_QPN);
622	QL_DPRINT12(dev->ha, "exit\n");
623	return 0;
624}
625
626static inline int
627qlnxr_gsi_build_packet(struct qlnxr_dev *dev,
628	struct qlnxr_qp *qp, struct ib_send_wr *swr,
629	struct ecore_roce_ll2_packet **p_packet)
630{
631	u8 ud_header_buffer[QLNXR_MAX_UD_HEADER_SIZE];
632	struct ecore_roce_ll2_packet *packet;
633	int roce_mode, header_size;
634	struct ib_ud_header udh;
635	int i, rc;
636
637	QL_DPRINT12(dev->ha, "enter\n");
638
639	*p_packet = NULL;
640
641	rc = qlnxr_gsi_build_header(dev, qp, swr, &udh, &roce_mode);
642	if (rc) {
643		QL_DPRINT11(dev->ha,
644			"qlnxr_gsi_build_header failed rc = %d\n", rc);
645		return rc;
646	}
647
648	header_size = ib_ud_header_pack(&udh, &ud_header_buffer);
649
650	packet = kzalloc(sizeof(*packet), GFP_ATOMIC);
651	if (!packet) {
652		QL_DPRINT11(dev->ha, "packet == NULL\n");
653		return -ENOMEM;
654	}
655
656	packet->header.vaddr = qlnx_dma_alloc_coherent(&dev->ha->cdev,
657					&packet->header.baddr,
658					header_size);
659	if (!packet->header.vaddr) {
660		QL_DPRINT11(dev->ha, "packet->header.vaddr == NULL\n");
661		kfree(packet);
662		return -ENOMEM;
663	}
664
665	if (memcmp(udh.eth.smac_h, udh.eth.dmac_h, ETH_ALEN))
666		packet->tx_dest = ECORE_ROCE_LL2_TX_DEST_NW;
667	else
668		packet->tx_dest = ECORE_ROCE_LL2_TX_DEST_LB;
669
670	packet->roce_mode = roce_mode;
671	memcpy(packet->header.vaddr, ud_header_buffer, header_size);
672	packet->header.len = header_size;
673	packet->n_seg = swr->num_sge;
674	qp->wqe_wr_id[qp->sq.prod].bytes_len = IB_GRH_BYTES; //RDMA_GRH_BYTES
675	for (i = 0; i < packet->n_seg; i++) {
676		packet->payload[i].baddr = swr->sg_list[i].addr;
677		packet->payload[i].len = swr->sg_list[i].length;
678		qp->wqe_wr_id[qp->sq.prod].bytes_len +=
679			packet->payload[i].len;
680		QL_DPRINT11(dev->ha, "baddr: %p, len: %d\n",
681				packet->payload[i].baddr,
682				packet->payload[i].len);
683	}
684
685	*p_packet = packet;
686
687	QL_DPRINT12(dev->ha, "exit, packet->n_seg: %d\n", packet->n_seg);
688	return 0;
689}
690
691int
692qlnxr_gsi_post_send(struct ib_qp *ibqp,
693		struct ib_send_wr *wr,
694		struct ib_send_wr **bad_wr)
695{
696	struct ecore_roce_ll2_packet *pkt = NULL;
697	struct qlnxr_qp *qp = get_qlnxr_qp(ibqp);
698	struct qlnxr_dev *dev = qp->dev;
699	unsigned long flags;
700	int rc;
701
702	QL_DPRINT12(dev->ha, "exit\n");
703
704	if (qp->state != ECORE_ROCE_QP_STATE_RTS) {
705		QL_DPRINT11(dev->ha,
706			"(qp->state != ECORE_ROCE_QP_STATE_RTS)\n");
707		*bad_wr = wr;
708		return -EINVAL;
709	}
710
711	if (wr->num_sge > RDMA_MAX_SGE_PER_SQ_WQE) {
712		QL_DPRINT11(dev->ha,
713			"(wr->num_sge > RDMA_MAX_SGE_PER_SQ_WQE)\n");
714		rc = -EINVAL;
715		goto err;
716	}
717
718	if (wr->opcode != IB_WR_SEND) {
719		QL_DPRINT11(dev->ha, "(wr->opcode > IB_WR_SEND)\n");
720		rc = -EINVAL;
721		goto err;
722	}
723
724	spin_lock_irqsave(&qp->q_lock, flags);
725
726	rc = qlnxr_gsi_build_packet(dev, qp, wr, &pkt);
727	if(rc) {
728		spin_unlock_irqrestore(&qp->q_lock, flags);
729		QL_DPRINT11(dev->ha, "qlnxr_gsi_build_packet failed\n");
730		goto err;
731	}
732
733	rc = qlnxr_ll2_post_tx(dev, pkt);
734
735	if (!rc) {
736		qp->wqe_wr_id[qp->sq.prod].wr_id = wr->wr_id;
737		qp->wqe_wr_id[qp->sq.prod].signaled =
738			!!(wr->send_flags & IB_SEND_SIGNALED);
739		qp->wqe_wr_id[qp->sq.prod].opcode = IB_WC_SEND;
740		qlnxr_inc_sw_prod(&qp->sq);
741		QL_DPRINT11(dev->ha, "packet sent over gsi qp\n");
742	} else {
743		QL_DPRINT11(dev->ha, "qlnxr_ll2_post_tx failed\n");
744		rc = -EAGAIN;
745		*bad_wr = wr;
746	}
747
748	spin_unlock_irqrestore(&qp->q_lock, flags);
749
750	if (wr->next != NULL) {
751		*bad_wr = wr->next;
752		rc=-EINVAL;
753	}
754
755	QL_DPRINT12(dev->ha, "exit\n");
756	return rc;
757
758err:
759	*bad_wr = wr;
760	QL_DPRINT12(dev->ha, "exit error\n");
761	return rc;
762}
763
764#define	QLNXR_LL2_RX_BUFFER_SIZE	(4 * 1024)
765int
766qlnxr_gsi_post_recv(struct ib_qp *ibqp,
767		struct ib_recv_wr *wr,
768		struct ib_recv_wr **bad_wr)
769{
770	struct qlnxr_dev *dev = get_qlnxr_dev((ibqp->device));
771	struct qlnxr_qp *qp = get_qlnxr_qp(ibqp);
772	unsigned long flags;
773	int rc = 0;
774
775	QL_DPRINT12(dev->ha, "enter, wr: %p\n", wr);
776
777	if ((qp->state != ECORE_ROCE_QP_STATE_RTR) &&
778	    (qp->state != ECORE_ROCE_QP_STATE_RTS)) {
779		*bad_wr = wr;
780		QL_DPRINT11(dev->ha, "exit 0\n");
781		return -EINVAL;
782	}
783
784	spin_lock_irqsave(&qp->q_lock, flags);
785
786	while (wr) {
787		if (wr->num_sge > QLNXR_GSI_MAX_RECV_SGE) {
788			QL_DPRINT11(dev->ha, "exit 1\n");
789			goto err;
790		}
791
792		rc = ecore_ll2_post_rx_buffer(dev->rdma_ctx,
793				dev->gsi_ll2_handle,
794				wr->sg_list[0].addr,
795				wr->sg_list[0].length,
796				0 /* cookie */,
797				1 /* notify_fw */);
798		if (rc) {
799			QL_DPRINT11(dev->ha, "exit 2\n");
800			goto err;
801		}
802
803		memset(&qp->rqe_wr_id[qp->rq.prod], 0,
804			sizeof(qp->rqe_wr_id[qp->rq.prod]));
805		qp->rqe_wr_id[qp->rq.prod].sg_list[0] = wr->sg_list[0];
806		qp->rqe_wr_id[qp->rq.prod].wr_id = wr->wr_id;
807
808		qlnxr_inc_sw_prod(&qp->rq);
809
810		wr = wr->next;
811	}
812
813	spin_unlock_irqrestore(&qp->q_lock, flags);
814
815	QL_DPRINT12(dev->ha, "exit rc = %d\n", rc);
816	return rc;
817err:
818
819	spin_unlock_irqrestore(&qp->q_lock, flags);
820	*bad_wr = wr;
821
822	QL_DPRINT12(dev->ha, "exit with -ENOMEM\n");
823	return -ENOMEM;
824}
825
826int
827qlnxr_gsi_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
828{
829	struct qlnxr_dev *dev = get_qlnxr_dev((ibcq->device));
830	struct qlnxr_cq *cq = get_qlnxr_cq(ibcq);
831	struct qlnxr_qp *qp = dev->gsi_qp;
832	unsigned long flags;
833	int i = 0;
834
835	QL_DPRINT12(dev->ha, "enter\n");
836
837	spin_lock_irqsave(&cq->cq_lock, flags);
838
839	while (i < num_entries && qp->rq.cons != qp->rq.gsi_cons) {
840		memset(&wc[i], 0, sizeof(*wc));
841
842		wc[i].qp = &qp->ibqp;
843		wc[i].wr_id = qp->rqe_wr_id[qp->rq.cons].wr_id;
844		wc[i].opcode = IB_WC_RECV;
845		wc[i].pkey_index = 0;
846		wc[i].status = (qp->rqe_wr_id[qp->rq.cons].rc)?
847			       IB_WC_GENERAL_ERR:IB_WC_SUCCESS;
848		/* 0 - currently only one recv sg is supported */
849		wc[i].byte_len = qp->rqe_wr_id[qp->rq.cons].sg_list[0].length;
850		wc[i].wc_flags |= IB_WC_GRH | IB_WC_IP_CSUM_OK;
851
852#if __FreeBSD_version >= 1100000
853		memcpy(&wc[i].smac, qp->rqe_wr_id[qp->rq.cons].smac, ETH_ALEN);
854		wc[i].wc_flags |= IB_WC_WITH_SMAC;
855
856		if (qp->rqe_wr_id[qp->rq.cons].vlan_id) {
857			wc[i].wc_flags |= IB_WC_WITH_VLAN;
858			wc[i].vlan_id = qp->rqe_wr_id[qp->rq.cons].vlan_id;
859		}
860
861#endif
862		qlnxr_inc_sw_cons(&qp->rq);
863		i++;
864	}
865
866	while (i < num_entries && qp->sq.cons != qp->sq.gsi_cons) {
867		memset(&wc[i], 0, sizeof(*wc));
868
869		wc[i].qp = &qp->ibqp;
870		wc[i].wr_id = qp->wqe_wr_id[qp->sq.cons].wr_id;
871		wc[i].opcode = IB_WC_SEND;
872		wc[i].status = IB_WC_SUCCESS;
873
874		qlnxr_inc_sw_cons(&qp->sq);
875		i++;
876	}
877
878	spin_unlock_irqrestore(&cq->cq_lock, flags);
879
880	QL_DPRINT12(dev->ha, "exit i = %d\n", i);
881	return i;
882}
883