1/*
2 * Broadcom NetXtreme-E RoCE driver.
3 *
4 * Copyright (c) 2016 - 2017, Broadcom. All rights reserved.  The term
5 * Broadcom refers to Broadcom Limited and/or its subsidiaries.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses.  You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * BSD license below:
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 *
17 * 1. Redistributions of source code must retain the above copyright
18 *    notice, this list of conditions and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 *    notice, this list of conditions and the following disclaimer in
21 *    the documentation and/or other materials provided with the
22 *    distribution.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
32 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
33 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
34 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 *
36 * Description: Fast Path Operators
37 */
38
39#define dev_fmt(fmt) "QPLIB: " fmt
40
41#include <linux/interrupt.h>
42#include <linux/spinlock.h>
43#include <linux/sched.h>
44#include <linux/slab.h>
45#include <linux/pci.h>
46#include <linux/delay.h>
47#include <linux/prefetch.h>
48#include <linux/if_ether.h>
49#include <rdma/ib_mad.h>
50
51#include "roce_hsi.h"
52
53#include "qplib_res.h"
54#include "qplib_rcfw.h"
55#include "qplib_sp.h"
56#include "qplib_fp.h"
57
58static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp);
59
60static void bnxt_qplib_cancel_phantom_processing(struct bnxt_qplib_qp *qp)
61{
62	qp->sq.condition = false;
63	qp->sq.send_phantom = false;
64	qp->sq.single = false;
65}
66
67/* Flush list */
68static void __bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp)
69{
70	struct bnxt_qplib_cq *scq, *rcq;
71
72	scq = qp->scq;
73	rcq = qp->rcq;
74
75	if (!qp->sq.flushed) {
76		dev_dbg(&scq->hwq.pdev->dev,
77			"FP: Adding to SQ Flush list = %p\n", qp);
78		bnxt_qplib_cancel_phantom_processing(qp);
79		list_add_tail(&qp->sq_flush, &scq->sqf_head);
80		qp->sq.flushed = true;
81	}
82	if (!qp->srq) {
83		if (!qp->rq.flushed) {
84			dev_dbg(&rcq->hwq.pdev->dev,
85				"FP: Adding to RQ Flush list = %p\n", qp);
86			list_add_tail(&qp->rq_flush, &rcq->rqf_head);
87			qp->rq.flushed = true;
88		}
89	}
90}
91
92static void bnxt_qplib_acquire_cq_flush_locks(struct bnxt_qplib_qp *qp,
93				       unsigned long *flags)
94	__acquires(&qp->scq->flush_lock) __acquires(&qp->rcq->flush_lock)
95{
96	spin_lock_irqsave(&qp->scq->flush_lock, *flags);
97	if (qp->scq == qp->rcq)
98		__acquire(&qp->rcq->flush_lock);
99	else
100		spin_lock(&qp->rcq->flush_lock);
101}
102
103static void bnxt_qplib_release_cq_flush_locks(struct bnxt_qplib_qp *qp,
104				       unsigned long *flags)
105	__releases(&qp->scq->flush_lock) __releases(&qp->rcq->flush_lock)
106{
107	if (qp->scq == qp->rcq)
108		__release(&qp->rcq->flush_lock);
109	else
110		spin_unlock(&qp->rcq->flush_lock);
111	spin_unlock_irqrestore(&qp->scq->flush_lock, *flags);
112}
113
114void bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp)
115{
116	unsigned long flags;
117
118	bnxt_qplib_acquire_cq_flush_locks(qp, &flags);
119	__bnxt_qplib_add_flush_qp(qp);
120	bnxt_qplib_release_cq_flush_locks(qp, &flags);
121}
122
123static void __bnxt_qplib_del_flush_qp(struct bnxt_qplib_qp *qp)
124{
125	if (qp->sq.flushed) {
126		qp->sq.flushed = false;
127		list_del(&qp->sq_flush);
128	}
129	if (!qp->srq) {
130		if (qp->rq.flushed) {
131			qp->rq.flushed = false;
132			list_del(&qp->rq_flush);
133		}
134	}
135}
136
137void bnxt_qplib_clean_qp(struct bnxt_qplib_qp *qp)
138{
139	unsigned long flags;
140
141	bnxt_qplib_acquire_cq_flush_locks(qp, &flags);
142	__clean_cq(qp->scq, (u64)(unsigned long)qp);
143	qp->sq.hwq.prod = 0;
144	qp->sq.hwq.cons = 0;
145	__clean_cq(qp->rcq, (u64)(unsigned long)qp);
146	qp->rq.hwq.prod = 0;
147	qp->rq.hwq.cons = 0;
148
149	__bnxt_qplib_del_flush_qp(qp);
150	bnxt_qplib_release_cq_flush_locks(qp, &flags);
151}
152
153static void bnxt_qpn_cqn_sched_task(struct work_struct *work)
154{
155	struct bnxt_qplib_nq_work *nq_work =
156			container_of(work, struct bnxt_qplib_nq_work, work);
157
158	struct bnxt_qplib_cq *cq = nq_work->cq;
159	struct bnxt_qplib_nq *nq = nq_work->nq;
160
161	if (cq && nq) {
162		spin_lock_bh(&cq->compl_lock);
163		if (atomic_read(&cq->arm_state) && nq->cqn_handler) {
164			dev_dbg(&nq->pdev->dev,
165				"%s:Trigger cq  = %p event nq = %p\n",
166				__func__, cq, nq);
167			nq->cqn_handler(nq, cq);
168		}
169		spin_unlock_bh(&cq->compl_lock);
170	}
171	kfree(nq_work);
172}
173
174static void bnxt_qplib_free_qp_hdr_buf(struct bnxt_qplib_res *res,
175				       struct bnxt_qplib_qp *qp)
176{
177	struct bnxt_qplib_q *rq = &qp->rq;
178	struct bnxt_qplib_q *sq = &qp->sq;
179
180	if (qp->rq_hdr_buf)
181		dma_free_coherent(&res->pdev->dev,
182				  rq->max_wqe * qp->rq_hdr_buf_size,
183				  qp->rq_hdr_buf, qp->rq_hdr_buf_map);
184	if (qp->sq_hdr_buf)
185		dma_free_coherent(&res->pdev->dev,
186				  sq->max_wqe * qp->sq_hdr_buf_size,
187				  qp->sq_hdr_buf, qp->sq_hdr_buf_map);
188	qp->rq_hdr_buf = NULL;
189	qp->sq_hdr_buf = NULL;
190	qp->rq_hdr_buf_map = 0;
191	qp->sq_hdr_buf_map = 0;
192	qp->sq_hdr_buf_size = 0;
193	qp->rq_hdr_buf_size = 0;
194}
195
196static int bnxt_qplib_alloc_qp_hdr_buf(struct bnxt_qplib_res *res,
197				       struct bnxt_qplib_qp *qp)
198{
199	struct bnxt_qplib_q *rq = &qp->rq;
200	struct bnxt_qplib_q *sq = &qp->sq;
201	int rc = 0;
202
203	if (qp->sq_hdr_buf_size && sq->max_wqe) {
204		qp->sq_hdr_buf = dma_alloc_coherent(&res->pdev->dev,
205					sq->max_wqe * qp->sq_hdr_buf_size,
206					&qp->sq_hdr_buf_map, GFP_KERNEL);
207		if (!qp->sq_hdr_buf) {
208			rc = -ENOMEM;
209			dev_err(&res->pdev->dev,
210				"Failed to create sq_hdr_buf\n");
211			goto fail;
212		}
213	}
214
215	if (qp->rq_hdr_buf_size && rq->max_wqe) {
216		qp->rq_hdr_buf = dma_alloc_coherent(&res->pdev->dev,
217						    rq->max_wqe *
218						    qp->rq_hdr_buf_size,
219						    &qp->rq_hdr_buf_map,
220						    GFP_KERNEL);
221		if (!qp->rq_hdr_buf) {
222			rc = -ENOMEM;
223			dev_err(&res->pdev->dev,
224				"Failed to create rq_hdr_buf\n");
225			goto fail;
226		}
227	}
228	return 0;
229
230fail:
231	bnxt_qplib_free_qp_hdr_buf(res, qp);
232	return rc;
233}
234
235static void clean_nq(struct bnxt_qplib_nq *nq, struct bnxt_qplib_cq *cq)
236{
237	struct bnxt_qplib_hwq *hwq = &nq->hwq;
238	struct nq_base *nqe, **nq_ptr;
239	int budget = nq->budget;
240	uintptr_t q_handle;
241	u16 type;
242
243	spin_lock_bh(&hwq->lock);
244	/* Service the NQ until empty */
245	while (budget--) {
246		nq_ptr = (struct nq_base **)hwq->pbl_ptr;
247		nqe = &nq_ptr[NQE_PG(hwq->cons)][NQE_IDX(hwq->cons)];
248		if (!NQE_CMP_VALID(nqe, nq->nq_db.dbinfo.flags))
249			break;
250
251		/*
252		 * The valid test of the entry must be done first before
253		 * reading any further.
254		 */
255		dma_rmb();
256
257		type = le16_to_cpu(nqe->info10_type) & NQ_BASE_TYPE_MASK;
258		switch (type) {
259		case NQ_BASE_TYPE_CQ_NOTIFICATION:
260		{
261			struct nq_cn *nqcne = (struct nq_cn *)nqe;
262
263			q_handle = le32_to_cpu(nqcne->cq_handle_low);
264			q_handle |= (u64)le32_to_cpu(nqcne->cq_handle_high)
265						     << 32;
266			if ((unsigned long)cq == q_handle) {
267				nqcne->cq_handle_low = 0;
268				nqcne->cq_handle_high = 0;
269				cq->cnq_events++;
270			}
271			break;
272		}
273		default:
274			break;
275		}
276		bnxt_qplib_hwq_incr_cons(hwq->max_elements, &hwq->cons,
277					 1, &nq->nq_db.dbinfo.flags);
278	}
279	spin_unlock_bh(&hwq->lock);
280}
281
282/* Wait for receiving all NQEs for this CQ and clean the NQEs associated with
283 * this CQ.
284 */
285static void __wait_for_all_nqes(struct bnxt_qplib_cq *cq, u16 cnq_events)
286{
287	u32 retry_cnt = 100;
288
289	while (retry_cnt--) {
290		if (cnq_events == cq->cnq_events)
291			return;
292		usleep_range(50, 100);
293		clean_nq(cq->nq, cq);
294	}
295}
296
297static void bnxt_qplib_service_nq(struct tasklet_struct *t)
298{
299	struct bnxt_qplib_nq *nq = from_tasklet(nq, t, nq_tasklet);
300	struct bnxt_qplib_hwq *hwq = &nq->hwq;
301	struct bnxt_qplib_cq *cq;
302	int budget = nq->budget;
303	struct nq_base *nqe;
304	uintptr_t q_handle;
305	u32 hw_polled = 0;
306	u16 type;
307
308	spin_lock_bh(&hwq->lock);
309	/* Service the NQ until empty */
310	while (budget--) {
311		nqe = bnxt_qplib_get_qe(hwq, hwq->cons, NULL);
312		if (!NQE_CMP_VALID(nqe, nq->nq_db.dbinfo.flags))
313			break;
314
315		/*
316		 * The valid test of the entry must be done first before
317		 * reading any further.
318		 */
319		dma_rmb();
320
321		type = le16_to_cpu(nqe->info10_type) & NQ_BASE_TYPE_MASK;
322		switch (type) {
323		case NQ_BASE_TYPE_CQ_NOTIFICATION:
324		{
325			struct nq_cn *nqcne = (struct nq_cn *)nqe;
326
327			q_handle = le32_to_cpu(nqcne->cq_handle_low);
328			q_handle |= (u64)le32_to_cpu(nqcne->cq_handle_high)
329						     << 32;
330			cq = (struct bnxt_qplib_cq *)(unsigned long)q_handle;
331			if (!cq)
332				break;
333			cq->toggle = (le16_to_cpu(nqe->info10_type) &
334					NQ_CN_TOGGLE_MASK) >> NQ_CN_TOGGLE_SFT;
335			cq->dbinfo.toggle = cq->toggle;
336			bnxt_qplib_armen_db(&cq->dbinfo,
337					    DBC_DBC_TYPE_CQ_ARMENA);
338			spin_lock_bh(&cq->compl_lock);
339			atomic_set(&cq->arm_state, 0);
340			if (nq->cqn_handler(nq, (cq)))
341				dev_warn(&nq->pdev->dev,
342					 "cqn - type 0x%x not handled\n", type);
343			cq->cnq_events++;
344			spin_unlock_bh(&cq->compl_lock);
345			break;
346		}
347		case NQ_BASE_TYPE_SRQ_EVENT:
348		{
349			struct bnxt_qplib_srq *srq;
350			struct nq_srq_event *nqsrqe =
351						(struct nq_srq_event *)nqe;
352
353			q_handle = le32_to_cpu(nqsrqe->srq_handle_low);
354			q_handle |= (u64)le32_to_cpu(nqsrqe->srq_handle_high)
355				     << 32;
356			srq = (struct bnxt_qplib_srq *)q_handle;
357			bnxt_qplib_armen_db(&srq->dbinfo,
358					    DBC_DBC_TYPE_SRQ_ARMENA);
359			if (nq->srqn_handler(nq,
360					     (struct bnxt_qplib_srq *)q_handle,
361					     nqsrqe->event))
362				dev_warn(&nq->pdev->dev,
363					 "SRQ event 0x%x not handled\n",
364					 nqsrqe->event);
365			break;
366		}
367		case NQ_BASE_TYPE_DBQ_EVENT:
368			break;
369		default:
370			dev_warn(&nq->pdev->dev,
371				 "nqe with type = 0x%x not handled\n", type);
372			break;
373		}
374		hw_polled++;
375		bnxt_qplib_hwq_incr_cons(hwq->max_elements, &hwq->cons,
376					 1, &nq->nq_db.dbinfo.flags);
377	}
378	if (hw_polled)
379		bnxt_qplib_ring_nq_db(&nq->nq_db.dbinfo, nq->res->cctx, true);
380	spin_unlock_bh(&hwq->lock);
381}
382
383/* bnxt_re_synchronize_nq - self polling notification queue.
384 * @nq      -     notification queue pointer
385 *
386 * This function will start polling entries of a given notification queue
387 * for all pending  entries.
388 * This function is useful to synchronize notification entries while resources
389 * are going away.
390 */
391
392void bnxt_re_synchronize_nq(struct bnxt_qplib_nq *nq)
393{
394	int budget = nq->budget;
395
396	nq->budget = nq->hwq.max_elements;
397	bnxt_qplib_service_nq(&nq->nq_tasklet);
398	nq->budget = budget;
399}
400
401static irqreturn_t bnxt_qplib_nq_irq(int irq, void *dev_instance)
402{
403	struct bnxt_qplib_nq *nq = dev_instance;
404	struct bnxt_qplib_hwq *hwq = &nq->hwq;
405	u32 sw_cons;
406
407	/* Prefetch the NQ element */
408	sw_cons = HWQ_CMP(hwq->cons, hwq);
409	prefetch(bnxt_qplib_get_qe(hwq, sw_cons, NULL));
410
411	/* Fan out to CPU affinitized kthreads? */
412	tasklet_schedule(&nq->nq_tasklet);
413
414	return IRQ_HANDLED;
415}
416
417void bnxt_qplib_nq_stop_irq(struct bnxt_qplib_nq *nq, bool kill)
418{
419	if (!nq->requested)
420		return;
421
422	nq->requested = false;
423	/* Mask h/w interrupt */
424	bnxt_qplib_ring_nq_db(&nq->nq_db.dbinfo, nq->res->cctx, false);
425	/* Sync with last running IRQ handler */
426	synchronize_irq(nq->msix_vec);
427	irq_set_affinity_hint(nq->msix_vec, NULL);
428	free_irq(nq->msix_vec, nq);
429	kfree(nq->name);
430	nq->name = NULL;
431
432	if (kill)
433		tasklet_kill(&nq->nq_tasklet);
434	tasklet_disable(&nq->nq_tasklet);
435}
436
437void bnxt_qplib_disable_nq(struct bnxt_qplib_nq *nq)
438{
439	if (nq->cqn_wq) {
440		destroy_workqueue(nq->cqn_wq);
441		nq->cqn_wq = NULL;
442	}
443
444	/* Make sure the HW is stopped! */
445	bnxt_qplib_nq_stop_irq(nq, true);
446
447	if (nq->nq_db.reg.bar_reg) {
448		iounmap(nq->nq_db.reg.bar_reg);
449		nq->nq_db.reg.bar_reg = NULL;
450	}
451
452	nq->cqn_handler = NULL;
453	nq->srqn_handler = NULL;
454	nq->msix_vec = 0;
455}
456
457int bnxt_qplib_nq_start_irq(struct bnxt_qplib_nq *nq, int nq_indx,
458			    int msix_vector, bool need_init)
459{
460	struct bnxt_qplib_res *res = nq->res;
461	int rc;
462
463	if (nq->requested)
464		return -EFAULT;
465
466	nq->msix_vec = msix_vector;
467	if (need_init)
468		tasklet_setup(&nq->nq_tasklet, bnxt_qplib_service_nq);
469	else
470		tasklet_enable(&nq->nq_tasklet);
471
472	nq->name = kasprintf(GFP_KERNEL, "bnxt_re-nq-%d@pci:%s",
473			     nq_indx, pci_name(res->pdev));
474	if (!nq->name)
475		return -ENOMEM;
476	rc = request_irq(nq->msix_vec, bnxt_qplib_nq_irq, 0, nq->name, nq);
477	if (rc) {
478		kfree(nq->name);
479		nq->name = NULL;
480		tasklet_disable(&nq->nq_tasklet);
481		return rc;
482	}
483
484	cpumask_clear(&nq->mask);
485	cpumask_set_cpu(nq_indx, &nq->mask);
486	rc = irq_set_affinity_hint(nq->msix_vec, &nq->mask);
487	if (rc) {
488		dev_warn(&nq->pdev->dev,
489			 "set affinity failed; vector: %d nq_idx: %d\n",
490			 nq->msix_vec, nq_indx);
491	}
492	nq->requested = true;
493	bnxt_qplib_ring_nq_db(&nq->nq_db.dbinfo, res->cctx, true);
494
495	return rc;
496}
497
498static int bnxt_qplib_map_nq_db(struct bnxt_qplib_nq *nq,  u32 reg_offt)
499{
500	resource_size_t reg_base;
501	struct bnxt_qplib_nq_db *nq_db;
502	struct pci_dev *pdev;
503
504	pdev = nq->pdev;
505	nq_db = &nq->nq_db;
506
507	nq_db->dbinfo.flags = 0;
508	nq_db->reg.bar_id = NQ_CONS_PCI_BAR_REGION;
509	nq_db->reg.bar_base = pci_resource_start(pdev, nq_db->reg.bar_id);
510	if (!nq_db->reg.bar_base) {
511		dev_err(&pdev->dev, "QPLIB: NQ BAR region %d resc start is 0!",
512			nq_db->reg.bar_id);
513		return -ENOMEM;
514	}
515
516	reg_base = nq_db->reg.bar_base + reg_offt;
517	/* Unconditionally map 8 bytes to support 57500 series */
518	nq_db->reg.len = 8;
519	nq_db->reg.bar_reg = ioremap(reg_base, nq_db->reg.len);
520	if (!nq_db->reg.bar_reg) {
521		dev_err(&pdev->dev, "QPLIB: NQ BAR region %d mapping failed",
522			nq_db->reg.bar_id);
523		return -ENOMEM;
524	}
525
526	nq_db->dbinfo.db = nq_db->reg.bar_reg;
527	nq_db->dbinfo.hwq = &nq->hwq;
528	nq_db->dbinfo.xid = nq->ring_id;
529
530	return 0;
531}
532
533int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq,
534			 int nq_idx, int msix_vector, int bar_reg_offset,
535			 cqn_handler_t cqn_handler,
536			 srqn_handler_t srqn_handler)
537{
538	int rc;
539
540	nq->pdev = pdev;
541	nq->cqn_handler = cqn_handler;
542	nq->srqn_handler = srqn_handler;
543
544	/* Have a task to schedule CQ notifiers in post send case */
545	nq->cqn_wq  = create_singlethread_workqueue("bnxt_qplib_nq");
546	if (!nq->cqn_wq)
547		return -ENOMEM;
548
549	rc = bnxt_qplib_map_nq_db(nq, bar_reg_offset);
550	if (rc)
551		goto fail;
552
553	rc = bnxt_qplib_nq_start_irq(nq, nq_idx, msix_vector, true);
554	if (rc) {
555		dev_err(&nq->pdev->dev,
556			"Failed to request irq for nq-idx %d\n", nq_idx);
557		goto fail;
558	}
559
560	return 0;
561fail:
562	bnxt_qplib_disable_nq(nq);
563	return rc;
564}
565
566void bnxt_qplib_free_nq(struct bnxt_qplib_nq *nq)
567{
568	if (nq->hwq.max_elements) {
569		bnxt_qplib_free_hwq(nq->res, &nq->hwq);
570		nq->hwq.max_elements = 0;
571	}
572}
573
574int bnxt_qplib_alloc_nq(struct bnxt_qplib_res *res, struct bnxt_qplib_nq *nq)
575{
576	struct bnxt_qplib_hwq_attr hwq_attr = {};
577	struct bnxt_qplib_sg_info sginfo = {};
578
579	nq->pdev = res->pdev;
580	nq->res = res;
581	if (!nq->hwq.max_elements ||
582	    nq->hwq.max_elements > BNXT_QPLIB_NQE_MAX_CNT)
583		nq->hwq.max_elements = BNXT_QPLIB_NQE_MAX_CNT;
584
585	sginfo.pgsize = PAGE_SIZE;
586	sginfo.pgshft = PAGE_SHIFT;
587	hwq_attr.res = res;
588	hwq_attr.sginfo = &sginfo;
589	hwq_attr.depth = nq->hwq.max_elements;
590	hwq_attr.stride = sizeof(struct nq_base);
591	hwq_attr.type = bnxt_qplib_get_hwq_type(nq->res);
592	if (bnxt_qplib_alloc_init_hwq(&nq->hwq, &hwq_attr)) {
593		dev_err(&nq->pdev->dev, "FP NQ allocation failed");
594		return -ENOMEM;
595	}
596	nq->budget = 8;
597	return 0;
598}
599
600/* SRQ */
601void bnxt_qplib_destroy_srq(struct bnxt_qplib_res *res,
602			   struct bnxt_qplib_srq *srq)
603{
604	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
605	struct creq_destroy_srq_resp resp = {};
606	struct bnxt_qplib_cmdqmsg msg = {};
607	struct cmdq_destroy_srq req = {};
608	int rc;
609
610	bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
611				 CMDQ_BASE_OPCODE_DESTROY_SRQ,
612				 sizeof(req));
613
614	/* Configure the request */
615	req.srq_cid = cpu_to_le32(srq->id);
616
617	bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), sizeof(resp), 0);
618	rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
619	kfree(srq->swq);
620	if (rc)
621		return;
622	bnxt_qplib_free_hwq(res, &srq->hwq);
623}
624
625int bnxt_qplib_create_srq(struct bnxt_qplib_res *res,
626			  struct bnxt_qplib_srq *srq)
627{
628	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
629	struct bnxt_qplib_hwq_attr hwq_attr = {};
630	struct creq_create_srq_resp resp = {};
631	struct bnxt_qplib_cmdqmsg msg = {};
632	struct cmdq_create_srq req = {};
633	struct bnxt_qplib_pbl *pbl;
634	u16 pg_sz_lvl;
635	int rc, idx;
636
637	hwq_attr.res = res;
638	hwq_attr.sginfo = &srq->sg_info;
639	hwq_attr.depth = srq->max_wqe;
640	hwq_attr.stride = srq->wqe_size;
641	hwq_attr.type = HWQ_TYPE_QUEUE;
642	rc = bnxt_qplib_alloc_init_hwq(&srq->hwq, &hwq_attr);
643	if (rc)
644		return rc;
645
646	srq->swq = kcalloc(srq->hwq.max_elements, sizeof(*srq->swq),
647			   GFP_KERNEL);
648	if (!srq->swq) {
649		rc = -ENOMEM;
650		goto fail;
651	}
652	srq->dbinfo.flags = 0;
653	bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
654				 CMDQ_BASE_OPCODE_CREATE_SRQ,
655				 sizeof(req));
656
657	/* Configure the request */
658	req.dpi = cpu_to_le32(srq->dpi->dpi);
659	req.srq_handle = cpu_to_le64((uintptr_t)srq);
660
661	req.srq_size = cpu_to_le16((u16)srq->hwq.max_elements);
662	pbl = &srq->hwq.pbl[PBL_LVL_0];
663	pg_sz_lvl = ((u16)bnxt_qplib_base_pg_size(&srq->hwq) <<
664		     CMDQ_CREATE_SRQ_PG_SIZE_SFT);
665	pg_sz_lvl |= (srq->hwq.level & CMDQ_CREATE_SRQ_LVL_MASK) <<
666		      CMDQ_CREATE_SRQ_LVL_SFT;
667	req.pg_size_lvl = cpu_to_le16(pg_sz_lvl);
668	req.pbl = cpu_to_le64(pbl->pg_map_arr[0]);
669	req.pd_id = cpu_to_le32(srq->pd->id);
670	req.eventq_id = cpu_to_le16(srq->eventq_hw_ring_id);
671
672	bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), sizeof(resp), 0);
673	rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
674	if (rc)
675		goto fail;
676
677	spin_lock_init(&srq->lock);
678	srq->start_idx = 0;
679	srq->last_idx = srq->hwq.max_elements - 1;
680	for (idx = 0; idx < srq->hwq.max_elements; idx++)
681		srq->swq[idx].next_idx = idx + 1;
682	srq->swq[srq->last_idx].next_idx = -1;
683
684	srq->id = le32_to_cpu(resp.xid);
685	srq->dbinfo.hwq = &srq->hwq;
686	srq->dbinfo.xid = srq->id;
687	srq->dbinfo.db = srq->dpi->dbr;
688	srq->dbinfo.max_slot = 1;
689	srq->dbinfo.priv_db = res->dpi_tbl.priv_db;
690	if (srq->threshold)
691		bnxt_qplib_armen_db(&srq->dbinfo, DBC_DBC_TYPE_SRQ_ARMENA);
692	srq->arm_req = false;
693
694	return 0;
695fail:
696	bnxt_qplib_free_hwq(res, &srq->hwq);
697	kfree(srq->swq);
698
699	return rc;
700}
701
702int bnxt_qplib_modify_srq(struct bnxt_qplib_res *res,
703			  struct bnxt_qplib_srq *srq)
704{
705	struct bnxt_qplib_hwq *srq_hwq = &srq->hwq;
706	u32 count;
707
708	count = __bnxt_qplib_get_avail(srq_hwq);
709	if (count > srq->threshold) {
710		srq->arm_req = false;
711		bnxt_qplib_srq_arm_db(&srq->dbinfo, srq->threshold);
712	} else {
713		/* Deferred arming */
714		srq->arm_req = true;
715	}
716
717	return 0;
718}
719
720int bnxt_qplib_query_srq(struct bnxt_qplib_res *res,
721			 struct bnxt_qplib_srq *srq)
722{
723	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
724	struct creq_query_srq_resp resp = {};
725	struct bnxt_qplib_cmdqmsg msg = {};
726	struct bnxt_qplib_rcfw_sbuf sbuf;
727	struct creq_query_srq_resp_sb *sb;
728	struct cmdq_query_srq req = {};
729	int rc;
730
731	bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
732				 CMDQ_BASE_OPCODE_QUERY_SRQ,
733				 sizeof(req));
734
735	/* Configure the request */
736	sbuf.size = ALIGN(sizeof(*sb), BNXT_QPLIB_CMDQE_UNITS);
737	sbuf.sb = dma_alloc_coherent(&rcfw->pdev->dev, sbuf.size,
738				     &sbuf.dma_addr, GFP_KERNEL);
739	if (!sbuf.sb)
740		return -ENOMEM;
741	req.resp_size = sbuf.size / BNXT_QPLIB_CMDQE_UNITS;
742	req.srq_cid = cpu_to_le32(srq->id);
743	sb = sbuf.sb;
744	bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, &sbuf, sizeof(req),
745				sizeof(resp), 0);
746	rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
747	if (!rc)
748		srq->threshold = le16_to_cpu(sb->srq_limit);
749	dma_free_coherent(&rcfw->pdev->dev, sbuf.size,
750			  sbuf.sb, sbuf.dma_addr);
751
752	return rc;
753}
754
755int bnxt_qplib_post_srq_recv(struct bnxt_qplib_srq *srq,
756			     struct bnxt_qplib_swqe *wqe)
757{
758	struct bnxt_qplib_hwq *srq_hwq = &srq->hwq;
759	struct rq_wqe *srqe;
760	struct sq_sge *hw_sge;
761	u32 count = 0;
762	int i, next;
763
764	spin_lock(&srq_hwq->lock);
765	if (srq->start_idx == srq->last_idx) {
766		dev_err(&srq_hwq->pdev->dev,
767			"FP: SRQ (0x%x) is full!\n", srq->id);
768		spin_unlock(&srq_hwq->lock);
769		return -EINVAL;
770	}
771	next = srq->start_idx;
772	srq->start_idx = srq->swq[next].next_idx;
773	spin_unlock(&srq_hwq->lock);
774
775	srqe = bnxt_qplib_get_qe(srq_hwq, srq_hwq->prod, NULL);
776	memset(srqe, 0, srq->wqe_size);
777	/* Calculate wqe_size16 and data_len */
778	for (i = 0, hw_sge = (struct sq_sge *)srqe->data;
779	     i < wqe->num_sge; i++, hw_sge++) {
780		hw_sge->va_or_pa = cpu_to_le64(wqe->sg_list[i].addr);
781		hw_sge->l_key = cpu_to_le32(wqe->sg_list[i].lkey);
782		hw_sge->size = cpu_to_le32(wqe->sg_list[i].size);
783	}
784	srqe->wqe_type = wqe->type;
785	srqe->flags = wqe->flags;
786	srqe->wqe_size = wqe->num_sge +
787			((offsetof(typeof(*srqe), data) + 15) >> 4);
788	srqe->wr_id[0] = cpu_to_le32((u32)next);
789	srq->swq[next].wr_id = wqe->wr_id;
790
791	bnxt_qplib_hwq_incr_prod(&srq->dbinfo, srq_hwq, srq->dbinfo.max_slot);
792
793	spin_lock(&srq_hwq->lock);
794	count = __bnxt_qplib_get_avail(srq_hwq);
795	spin_unlock(&srq_hwq->lock);
796	/* Ring DB */
797	bnxt_qplib_ring_prod_db(&srq->dbinfo, DBC_DBC_TYPE_SRQ);
798	if (srq->arm_req == true && count > srq->threshold) {
799		srq->arm_req = false;
800		bnxt_qplib_srq_arm_db(&srq->dbinfo, srq->threshold);
801	}
802
803	return 0;
804}
805
806/* QP */
807
808static int bnxt_qplib_alloc_init_swq(struct bnxt_qplib_q *que)
809{
810	int indx;
811
812	que->swq = kcalloc(que->max_wqe, sizeof(*que->swq), GFP_KERNEL);
813	if (!que->swq)
814		return -ENOMEM;
815
816	que->swq_start = 0;
817	que->swq_last = que->max_wqe - 1;
818	for (indx = 0; indx < que->max_wqe; indx++)
819		que->swq[indx].next_idx = indx + 1;
820	que->swq[que->swq_last].next_idx = 0; /* Make it circular */
821	que->swq_last = 0;
822
823	return 0;
824}
825
826int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
827{
828	struct bnxt_qplib_hwq_attr hwq_attr = {};
829	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
830	struct creq_create_qp1_resp resp = {};
831	struct bnxt_qplib_cmdqmsg msg = {};
832	struct bnxt_qplib_q *sq = &qp->sq;
833	struct bnxt_qplib_q *rq = &qp->rq;
834	struct cmdq_create_qp1 req = {};
835	struct bnxt_qplib_pbl *pbl;
836	u32 qp_flags = 0;
837	u8 pg_sz_lvl;
838	u32 tbl_indx;
839	int rc;
840
841	sq->dbinfo.flags = 0;
842	bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
843				 CMDQ_BASE_OPCODE_CREATE_QP1,
844				 sizeof(req));
845	/* General */
846	req.type = qp->type;
847	req.dpi = cpu_to_le32(qp->dpi->dpi);
848	req.qp_handle = cpu_to_le64(qp->qp_handle);
849
850	/* SQ */
851	hwq_attr.res = res;
852	hwq_attr.sginfo = &sq->sg_info;
853	hwq_attr.stride = sizeof(struct sq_sge);
854	hwq_attr.depth = bnxt_qplib_get_depth(sq);
855	hwq_attr.type = HWQ_TYPE_QUEUE;
856	rc = bnxt_qplib_alloc_init_hwq(&sq->hwq, &hwq_attr);
857	if (rc)
858		return rc;
859
860	rc = bnxt_qplib_alloc_init_swq(sq);
861	if (rc)
862		goto fail_sq;
863
864	req.sq_size = cpu_to_le32(bnxt_qplib_set_sq_size(sq, qp->wqe_mode));
865	pbl = &sq->hwq.pbl[PBL_LVL_0];
866	req.sq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
867	pg_sz_lvl = (bnxt_qplib_base_pg_size(&sq->hwq) <<
868		     CMDQ_CREATE_QP1_SQ_PG_SIZE_SFT);
869	pg_sz_lvl |= (sq->hwq.level & CMDQ_CREATE_QP1_SQ_LVL_MASK);
870	req.sq_pg_size_sq_lvl = pg_sz_lvl;
871	req.sq_fwo_sq_sge =
872		cpu_to_le16((sq->max_sge & CMDQ_CREATE_QP1_SQ_SGE_MASK) <<
873			     CMDQ_CREATE_QP1_SQ_SGE_SFT);
874	req.scq_cid = cpu_to_le32(qp->scq->id);
875
876	/* RQ */
877	if (rq->max_wqe) {
878		rq->dbinfo.flags = 0;
879		hwq_attr.res = res;
880		hwq_attr.sginfo = &rq->sg_info;
881		hwq_attr.stride = sizeof(struct sq_sge);
882		hwq_attr.depth = bnxt_qplib_get_depth(rq);
883		hwq_attr.type = HWQ_TYPE_QUEUE;
884		rc = bnxt_qplib_alloc_init_hwq(&rq->hwq, &hwq_attr);
885		if (rc)
886			goto sq_swq;
887		rc = bnxt_qplib_alloc_init_swq(rq);
888		if (rc)
889			goto fail_rq;
890		req.rq_size = cpu_to_le32(rq->max_wqe);
891		pbl = &rq->hwq.pbl[PBL_LVL_0];
892		req.rq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
893		pg_sz_lvl = (bnxt_qplib_base_pg_size(&rq->hwq) <<
894			     CMDQ_CREATE_QP1_RQ_PG_SIZE_SFT);
895		pg_sz_lvl |= (rq->hwq.level & CMDQ_CREATE_QP1_RQ_LVL_MASK);
896		req.rq_pg_size_rq_lvl = pg_sz_lvl;
897		req.rq_fwo_rq_sge =
898			cpu_to_le16((rq->max_sge &
899				     CMDQ_CREATE_QP1_RQ_SGE_MASK) <<
900				    CMDQ_CREATE_QP1_RQ_SGE_SFT);
901	}
902	req.rcq_cid = cpu_to_le32(qp->rcq->id);
903	/* Header buffer - allow hdr_buf pass in */
904	rc = bnxt_qplib_alloc_qp_hdr_buf(res, qp);
905	if (rc) {
906		rc = -ENOMEM;
907		goto rq_rwq;
908	}
909	qp_flags |= CMDQ_CREATE_QP1_QP_FLAGS_RESERVED_LKEY_ENABLE;
910	req.qp_flags = cpu_to_le32(qp_flags);
911	req.pd_id = cpu_to_le32(qp->pd->id);
912
913	bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), sizeof(resp), 0);
914	rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
915	if (rc)
916		goto fail;
917
918	qp->id = le32_to_cpu(resp.xid);
919	qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET;
920	qp->cctx = res->cctx;
921	sq->dbinfo.hwq = &sq->hwq;
922	sq->dbinfo.xid = qp->id;
923	sq->dbinfo.db = qp->dpi->dbr;
924	sq->dbinfo.max_slot = bnxt_qplib_set_sq_max_slot(qp->wqe_mode);
925	if (rq->max_wqe) {
926		rq->dbinfo.hwq = &rq->hwq;
927		rq->dbinfo.xid = qp->id;
928		rq->dbinfo.db = qp->dpi->dbr;
929		rq->dbinfo.max_slot = bnxt_qplib_set_rq_max_slot(rq->wqe_size);
930	}
931	tbl_indx = map_qp_id_to_tbl_indx(qp->id, rcfw);
932	rcfw->qp_tbl[tbl_indx].qp_id = qp->id;
933	rcfw->qp_tbl[tbl_indx].qp_handle = (void *)qp;
934
935	return 0;
936
937fail:
938	bnxt_qplib_free_qp_hdr_buf(res, qp);
939rq_rwq:
940	kfree(rq->swq);
941fail_rq:
942	bnxt_qplib_free_hwq(res, &rq->hwq);
943sq_swq:
944	kfree(sq->swq);
945fail_sq:
946	bnxt_qplib_free_hwq(res, &sq->hwq);
947	return rc;
948}
949
950static void bnxt_qplib_init_psn_ptr(struct bnxt_qplib_qp *qp, int size)
951{
952	struct bnxt_qplib_hwq *hwq;
953	struct bnxt_qplib_q *sq;
954	u64 fpsne, psn_pg;
955	u16 indx_pad = 0;
956
957	sq = &qp->sq;
958	hwq = &sq->hwq;
959	/* First psn entry */
960	fpsne = (u64)bnxt_qplib_get_qe(hwq, hwq->depth, &psn_pg);
961	if (!IS_ALIGNED(fpsne, PAGE_SIZE))
962		indx_pad = (fpsne & ~PAGE_MASK) / size;
963	hwq->pad_pgofft = indx_pad;
964	hwq->pad_pg = (u64 *)psn_pg;
965	hwq->pad_stride = size;
966}
967
968int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
969{
970	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
971	struct bnxt_qplib_hwq_attr hwq_attr = {};
972	struct bnxt_qplib_sg_info sginfo = {};
973	struct creq_create_qp_resp resp = {};
974	struct bnxt_qplib_cmdqmsg msg = {};
975	struct bnxt_qplib_q *sq = &qp->sq;
976	struct bnxt_qplib_q *rq = &qp->rq;
977	struct cmdq_create_qp req = {};
978	int rc, req_size, psn_sz = 0;
979	struct bnxt_qplib_hwq *xrrq;
980	struct bnxt_qplib_pbl *pbl;
981	u32 qp_flags = 0;
982	u8 pg_sz_lvl;
983	u32 tbl_indx;
984	u16 nsge;
985
986	if (res->dattr)
987		qp->dev_cap_flags = res->dattr->dev_cap_flags;
988
989	sq->dbinfo.flags = 0;
990	bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
991				 CMDQ_BASE_OPCODE_CREATE_QP,
992				 sizeof(req));
993
994	/* General */
995	req.type = qp->type;
996	req.dpi = cpu_to_le32(qp->dpi->dpi);
997	req.qp_handle = cpu_to_le64(qp->qp_handle);
998
999	/* SQ */
1000	if (qp->type == CMDQ_CREATE_QP_TYPE_RC) {
1001		psn_sz = bnxt_qplib_is_chip_gen_p5_p7(res->cctx) ?
1002			 sizeof(struct sq_psn_search_ext) :
1003			 sizeof(struct sq_psn_search);
1004
1005		if (BNXT_RE_HW_RETX(qp->dev_cap_flags)) {
1006			psn_sz = sizeof(struct sq_msn_search);
1007			qp->msn = 0;
1008		}
1009	}
1010
1011	hwq_attr.res = res;
1012	hwq_attr.sginfo = &sq->sg_info;
1013	hwq_attr.stride = sizeof(struct sq_sge);
1014	hwq_attr.depth = bnxt_qplib_get_depth(sq);
1015	hwq_attr.aux_stride = psn_sz;
1016	hwq_attr.aux_depth = bnxt_qplib_set_sq_size(sq, qp->wqe_mode);
1017	/* Update msn tbl size */
1018	if (BNXT_RE_HW_RETX(qp->dev_cap_flags) && psn_sz) {
1019		hwq_attr.aux_depth = roundup_pow_of_two(bnxt_qplib_set_sq_size(sq, qp->wqe_mode));
1020		qp->msn_tbl_sz = hwq_attr.aux_depth;
1021		qp->msn = 0;
1022	}
1023
1024	hwq_attr.type = HWQ_TYPE_QUEUE;
1025	rc = bnxt_qplib_alloc_init_hwq(&sq->hwq, &hwq_attr);
1026	if (rc)
1027		return rc;
1028
1029	rc = bnxt_qplib_alloc_init_swq(sq);
1030	if (rc)
1031		goto fail_sq;
1032
1033	if (psn_sz)
1034		bnxt_qplib_init_psn_ptr(qp, psn_sz);
1035
1036	req.sq_size = cpu_to_le32(bnxt_qplib_set_sq_size(sq, qp->wqe_mode));
1037	pbl = &sq->hwq.pbl[PBL_LVL_0];
1038	req.sq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
1039	pg_sz_lvl = (bnxt_qplib_base_pg_size(&sq->hwq) <<
1040		     CMDQ_CREATE_QP_SQ_PG_SIZE_SFT);
1041	pg_sz_lvl |= (sq->hwq.level & CMDQ_CREATE_QP_SQ_LVL_MASK);
1042	req.sq_pg_size_sq_lvl = pg_sz_lvl;
1043	req.sq_fwo_sq_sge =
1044		cpu_to_le16(((sq->max_sge & CMDQ_CREATE_QP_SQ_SGE_MASK) <<
1045			     CMDQ_CREATE_QP_SQ_SGE_SFT) | 0);
1046	req.scq_cid = cpu_to_le32(qp->scq->id);
1047
1048	/* RQ */
1049	if (!qp->srq) {
1050		rq->dbinfo.flags = 0;
1051		hwq_attr.res = res;
1052		hwq_attr.sginfo = &rq->sg_info;
1053		hwq_attr.stride = sizeof(struct sq_sge);
1054		hwq_attr.depth = bnxt_qplib_get_depth(rq);
1055		hwq_attr.aux_stride = 0;
1056		hwq_attr.aux_depth = 0;
1057		hwq_attr.type = HWQ_TYPE_QUEUE;
1058		rc = bnxt_qplib_alloc_init_hwq(&rq->hwq, &hwq_attr);
1059		if (rc)
1060			goto sq_swq;
1061		rc = bnxt_qplib_alloc_init_swq(rq);
1062		if (rc)
1063			goto fail_rq;
1064
1065		req.rq_size = cpu_to_le32(rq->max_wqe);
1066		pbl = &rq->hwq.pbl[PBL_LVL_0];
1067		req.rq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
1068		pg_sz_lvl = (bnxt_qplib_base_pg_size(&rq->hwq) <<
1069			     CMDQ_CREATE_QP_RQ_PG_SIZE_SFT);
1070		pg_sz_lvl |= (rq->hwq.level & CMDQ_CREATE_QP_RQ_LVL_MASK);
1071		req.rq_pg_size_rq_lvl = pg_sz_lvl;
1072		nsge = (qp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC) ?
1073			6 : rq->max_sge;
1074		req.rq_fwo_rq_sge =
1075			cpu_to_le16(((nsge &
1076				      CMDQ_CREATE_QP_RQ_SGE_MASK) <<
1077				     CMDQ_CREATE_QP_RQ_SGE_SFT) | 0);
1078	} else {
1079		/* SRQ */
1080		qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_SRQ_USED;
1081		req.srq_cid = cpu_to_le32(qp->srq->id);
1082	}
1083	req.rcq_cid = cpu_to_le32(qp->rcq->id);
1084
1085	qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_RESERVED_LKEY_ENABLE;
1086	qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_FR_PMR_ENABLED;
1087	if (qp->sig_type)
1088		qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_FORCE_COMPLETION;
1089	if (qp->wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE)
1090		qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_VARIABLE_SIZED_WQE_ENABLED;
1091	if (_is_ext_stats_supported(res->dattr->dev_cap_flags) && !res->is_vf)
1092		qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_EXT_STATS_ENABLED;
1093
1094	req.qp_flags = cpu_to_le32(qp_flags);
1095
1096	/* ORRQ and IRRQ */
1097	if (psn_sz) {
1098		xrrq = &qp->orrq;
1099		xrrq->max_elements =
1100			ORD_LIMIT_TO_ORRQ_SLOTS(qp->max_rd_atomic);
1101		req_size = xrrq->max_elements *
1102			   BNXT_QPLIB_MAX_ORRQE_ENTRY_SIZE + PAGE_SIZE - 1;
1103		req_size &= ~(PAGE_SIZE - 1);
1104		sginfo.pgsize = req_size;
1105		sginfo.pgshft = PAGE_SHIFT;
1106
1107		hwq_attr.res = res;
1108		hwq_attr.sginfo = &sginfo;
1109		hwq_attr.depth = xrrq->max_elements;
1110		hwq_attr.stride = BNXT_QPLIB_MAX_ORRQE_ENTRY_SIZE;
1111		hwq_attr.aux_stride = 0;
1112		hwq_attr.aux_depth = 0;
1113		hwq_attr.type = HWQ_TYPE_CTX;
1114		rc = bnxt_qplib_alloc_init_hwq(xrrq, &hwq_attr);
1115		if (rc)
1116			goto rq_swq;
1117		pbl = &xrrq->pbl[PBL_LVL_0];
1118		req.orrq_addr = cpu_to_le64(pbl->pg_map_arr[0]);
1119
1120		xrrq = &qp->irrq;
1121		xrrq->max_elements = IRD_LIMIT_TO_IRRQ_SLOTS(
1122						qp->max_dest_rd_atomic);
1123		req_size = xrrq->max_elements *
1124			   BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE + PAGE_SIZE - 1;
1125		req_size &= ~(PAGE_SIZE - 1);
1126		sginfo.pgsize = req_size;
1127		hwq_attr.depth =  xrrq->max_elements;
1128		hwq_attr.stride = BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE;
1129		rc = bnxt_qplib_alloc_init_hwq(xrrq, &hwq_attr);
1130		if (rc)
1131			goto fail_orrq;
1132
1133		pbl = &xrrq->pbl[PBL_LVL_0];
1134		req.irrq_addr = cpu_to_le64(pbl->pg_map_arr[0]);
1135	}
1136	req.pd_id = cpu_to_le32(qp->pd->id);
1137
1138	bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
1139				sizeof(resp), 0);
1140	rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
1141	if (rc)
1142		goto fail;
1143
1144	qp->id = le32_to_cpu(resp.xid);
1145	qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET;
1146	INIT_LIST_HEAD(&qp->sq_flush);
1147	INIT_LIST_HEAD(&qp->rq_flush);
1148	qp->cctx = res->cctx;
1149	sq->dbinfo.hwq = &sq->hwq;
1150	sq->dbinfo.xid = qp->id;
1151	sq->dbinfo.db = qp->dpi->dbr;
1152	sq->dbinfo.max_slot = bnxt_qplib_set_sq_max_slot(qp->wqe_mode);
1153	if (rq->max_wqe) {
1154		rq->dbinfo.hwq = &rq->hwq;
1155		rq->dbinfo.xid = qp->id;
1156		rq->dbinfo.db = qp->dpi->dbr;
1157		rq->dbinfo.max_slot = bnxt_qplib_set_rq_max_slot(rq->wqe_size);
1158	}
1159	tbl_indx = map_qp_id_to_tbl_indx(qp->id, rcfw);
1160	rcfw->qp_tbl[tbl_indx].qp_id = qp->id;
1161	rcfw->qp_tbl[tbl_indx].qp_handle = (void *)qp;
1162
1163	return 0;
1164fail:
1165	bnxt_qplib_free_hwq(res, &qp->irrq);
1166fail_orrq:
1167	bnxt_qplib_free_hwq(res, &qp->orrq);
1168rq_swq:
1169	kfree(rq->swq);
1170fail_rq:
1171	bnxt_qplib_free_hwq(res, &rq->hwq);
1172sq_swq:
1173	kfree(sq->swq);
1174fail_sq:
1175	bnxt_qplib_free_hwq(res, &sq->hwq);
1176	return rc;
1177}
1178
1179static void __modify_flags_from_init_state(struct bnxt_qplib_qp *qp)
1180{
1181	switch (qp->state) {
1182	case CMDQ_MODIFY_QP_NEW_STATE_RTR:
1183		/* INIT->RTR, configure the path_mtu to the default
1184		 * 2048 if not being requested
1185		 */
1186		if (!(qp->modify_flags &
1187		    CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU)) {
1188			qp->modify_flags |=
1189				CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
1190			qp->path_mtu =
1191				CMDQ_MODIFY_QP_PATH_MTU_MTU_2048;
1192		}
1193		qp->modify_flags &=
1194			~CMDQ_MODIFY_QP_MODIFY_MASK_VLAN_ID;
1195		/* Bono FW require the max_dest_rd_atomic to be >= 1 */
1196		if (qp->max_dest_rd_atomic < 1)
1197			qp->max_dest_rd_atomic = 1;
1198		qp->modify_flags &= ~CMDQ_MODIFY_QP_MODIFY_MASK_SRC_MAC;
1199		/* Bono FW 20.6.5 requires SGID_INDEX configuration */
1200		if (!(qp->modify_flags &
1201		    CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX)) {
1202			qp->modify_flags |=
1203				CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX;
1204			qp->ah.sgid_index = 0;
1205		}
1206		break;
1207	default:
1208		break;
1209	}
1210}
1211
1212static void __modify_flags_from_rtr_state(struct bnxt_qplib_qp *qp)
1213{
1214	switch (qp->state) {
1215	case CMDQ_MODIFY_QP_NEW_STATE_RTS:
1216		/* Bono FW requires the max_rd_atomic to be >= 1 */
1217		if (qp->max_rd_atomic < 1)
1218			qp->max_rd_atomic = 1;
1219		/* Bono FW does not allow PKEY_INDEX,
1220		 * DGID, FLOW_LABEL, SGID_INDEX, HOP_LIMIT,
1221		 * TRAFFIC_CLASS, DEST_MAC, PATH_MTU, RQ_PSN,
1222		 * MIN_RNR_TIMER, MAX_DEST_RD_ATOMIC, DEST_QP_ID
1223		 * modification
1224		 */
1225		qp->modify_flags &=
1226			~(CMDQ_MODIFY_QP_MODIFY_MASK_PKEY |
1227			  CMDQ_MODIFY_QP_MODIFY_MASK_DGID |
1228			  CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL |
1229			  CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX |
1230			  CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT |
1231			  CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS |
1232			  CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC |
1233			  CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU |
1234			  CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN |
1235			  CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER |
1236			  CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC |
1237			  CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID);
1238		break;
1239	default:
1240		break;
1241	}
1242}
1243
1244static void __filter_modify_flags(struct bnxt_qplib_qp *qp)
1245{
1246	switch (qp->cur_qp_state) {
1247	case CMDQ_MODIFY_QP_NEW_STATE_RESET:
1248		break;
1249	case CMDQ_MODIFY_QP_NEW_STATE_INIT:
1250		__modify_flags_from_init_state(qp);
1251		break;
1252	case CMDQ_MODIFY_QP_NEW_STATE_RTR:
1253		__modify_flags_from_rtr_state(qp);
1254		break;
1255	case CMDQ_MODIFY_QP_NEW_STATE_RTS:
1256		break;
1257	case CMDQ_MODIFY_QP_NEW_STATE_SQD:
1258		break;
1259	case CMDQ_MODIFY_QP_NEW_STATE_SQE:
1260		break;
1261	case CMDQ_MODIFY_QP_NEW_STATE_ERR:
1262		break;
1263	default:
1264		break;
1265	}
1266}
1267
1268int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
1269{
1270	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
1271	struct creq_modify_qp_resp resp = {};
1272	struct bnxt_qplib_cmdqmsg msg = {};
1273	struct cmdq_modify_qp req = {};
1274	u32 temp32[4];
1275	u32 bmask;
1276	int rc;
1277
1278	bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
1279				 CMDQ_BASE_OPCODE_MODIFY_QP,
1280				 sizeof(req));
1281
1282	/* Filter out the qp_attr_mask based on the state->new transition */
1283	__filter_modify_flags(qp);
1284	bmask = qp->modify_flags;
1285	req.modify_mask = cpu_to_le32(qp->modify_flags);
1286	req.qp_cid = cpu_to_le32(qp->id);
1287	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_STATE) {
1288		req.network_type_en_sqd_async_notify_new_state =
1289				(qp->state & CMDQ_MODIFY_QP_NEW_STATE_MASK) |
1290				(qp->en_sqd_async_notify ?
1291					CMDQ_MODIFY_QP_EN_SQD_ASYNC_NOTIFY : 0);
1292	}
1293	req.network_type_en_sqd_async_notify_new_state |= qp->nw_type;
1294
1295	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_ACCESS)
1296		req.access = qp->access;
1297
1298	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_PKEY)
1299		req.pkey = cpu_to_le16(IB_DEFAULT_PKEY_FULL);
1300
1301	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_QKEY)
1302		req.qkey = cpu_to_le32(qp->qkey);
1303
1304	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DGID) {
1305		memcpy(temp32, qp->ah.dgid.data, sizeof(struct bnxt_qplib_gid));
1306		req.dgid[0] = cpu_to_le32(temp32[0]);
1307		req.dgid[1] = cpu_to_le32(temp32[1]);
1308		req.dgid[2] = cpu_to_le32(temp32[2]);
1309		req.dgid[3] = cpu_to_le32(temp32[3]);
1310	}
1311	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL)
1312		req.flow_label = cpu_to_le32(qp->ah.flow_label);
1313
1314	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX)
1315		req.sgid_index = cpu_to_le16(res->sgid_tbl.hw_id
1316					     [qp->ah.sgid_index]);
1317
1318	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT)
1319		req.hop_limit = qp->ah.hop_limit;
1320
1321	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS)
1322		req.traffic_class = qp->ah.traffic_class;
1323
1324	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC)
1325		memcpy(req.dest_mac, qp->ah.dmac, 6);
1326
1327	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU)
1328		req.path_mtu_pingpong_push_enable |= qp->path_mtu;
1329
1330	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_TIMEOUT)
1331		req.timeout = qp->timeout;
1332
1333	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RETRY_CNT)
1334		req.retry_cnt = qp->retry_cnt;
1335
1336	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RNR_RETRY)
1337		req.rnr_retry = qp->rnr_retry;
1338
1339	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER)
1340		req.min_rnr_timer = qp->min_rnr_timer;
1341
1342	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN)
1343		req.rq_psn = cpu_to_le32(qp->rq.psn);
1344
1345	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN)
1346		req.sq_psn = cpu_to_le32(qp->sq.psn);
1347
1348	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MAX_RD_ATOMIC)
1349		req.max_rd_atomic =
1350			ORD_LIMIT_TO_ORRQ_SLOTS(qp->max_rd_atomic);
1351
1352	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC)
1353		req.max_dest_rd_atomic =
1354			IRD_LIMIT_TO_IRRQ_SLOTS(qp->max_dest_rd_atomic);
1355
1356	req.sq_size = cpu_to_le32(qp->sq.hwq.max_elements);
1357	req.rq_size = cpu_to_le32(qp->rq.hwq.max_elements);
1358	req.sq_sge = cpu_to_le16(qp->sq.max_sge);
1359	req.rq_sge = cpu_to_le16(qp->rq.max_sge);
1360	req.max_inline_data = cpu_to_le32(qp->max_inline_data);
1361	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID)
1362		req.dest_qp_id = cpu_to_le32(qp->dest_qpn);
1363
1364	req.vlan_pcp_vlan_dei_vlan_id = cpu_to_le16(qp->vlan_id);
1365
1366	bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),  sizeof(resp), 0);
1367	rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
1368	if (rc)
1369		return rc;
1370	qp->cur_qp_state = qp->state;
1371	return 0;
1372}
1373
1374int bnxt_qplib_query_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
1375{
1376	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
1377	struct creq_query_qp_resp resp = {};
1378	struct bnxt_qplib_cmdqmsg msg = {};
1379	struct bnxt_qplib_rcfw_sbuf sbuf;
1380	struct creq_query_qp_resp_sb *sb;
1381	struct cmdq_query_qp req = {};
1382	u32 temp32[4];
1383	int i, rc;
1384
1385	sbuf.size = ALIGN(sizeof(*sb), BNXT_QPLIB_CMDQE_UNITS);
1386	sbuf.sb = dma_alloc_coherent(&rcfw->pdev->dev, sbuf.size,
1387				     &sbuf.dma_addr, GFP_KERNEL);
1388	if (!sbuf.sb)
1389		return -ENOMEM;
1390	sb = sbuf.sb;
1391
1392	bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
1393				 CMDQ_BASE_OPCODE_QUERY_QP,
1394				 sizeof(req));
1395
1396	req.qp_cid = cpu_to_le32(qp->id);
1397	req.resp_size = sbuf.size / BNXT_QPLIB_CMDQE_UNITS;
1398	bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, &sbuf, sizeof(req),
1399				sizeof(resp), 0);
1400	rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
1401	if (rc)
1402		goto bail;
1403	/* Extract the context from the side buffer */
1404	qp->state = sb->en_sqd_async_notify_state &
1405			CREQ_QUERY_QP_RESP_SB_STATE_MASK;
1406	qp->en_sqd_async_notify = sb->en_sqd_async_notify_state &
1407				  CREQ_QUERY_QP_RESP_SB_EN_SQD_ASYNC_NOTIFY;
1408	qp->access = sb->access;
1409	qp->pkey_index = le16_to_cpu(sb->pkey);
1410	qp->qkey = le32_to_cpu(sb->qkey);
1411
1412	temp32[0] = le32_to_cpu(sb->dgid[0]);
1413	temp32[1] = le32_to_cpu(sb->dgid[1]);
1414	temp32[2] = le32_to_cpu(sb->dgid[2]);
1415	temp32[3] = le32_to_cpu(sb->dgid[3]);
1416	memcpy(qp->ah.dgid.data, temp32, sizeof(qp->ah.dgid.data));
1417
1418	qp->ah.flow_label = le32_to_cpu(sb->flow_label);
1419
1420	qp->ah.sgid_index = 0;
1421	for (i = 0; i < res->sgid_tbl.max; i++) {
1422		if (res->sgid_tbl.hw_id[i] == le16_to_cpu(sb->sgid_index)) {
1423			qp->ah.sgid_index = i;
1424			break;
1425		}
1426	}
1427	if (i == res->sgid_tbl.max)
1428		dev_warn(&res->pdev->dev, "SGID not found??\n");
1429
1430	qp->ah.hop_limit = sb->hop_limit;
1431	qp->ah.traffic_class = sb->traffic_class;
1432	memcpy(qp->ah.dmac, sb->dest_mac, 6);
1433	qp->ah.vlan_id = (le16_to_cpu(sb->path_mtu_dest_vlan_id) &
1434				CREQ_QUERY_QP_RESP_SB_VLAN_ID_MASK) >>
1435				CREQ_QUERY_QP_RESP_SB_VLAN_ID_SFT;
1436	qp->path_mtu = (le16_to_cpu(sb->path_mtu_dest_vlan_id) &
1437				    CREQ_QUERY_QP_RESP_SB_PATH_MTU_MASK) >>
1438				    CREQ_QUERY_QP_RESP_SB_PATH_MTU_SFT;
1439	qp->timeout = sb->timeout;
1440	qp->retry_cnt = sb->retry_cnt;
1441	qp->rnr_retry = sb->rnr_retry;
1442	qp->min_rnr_timer = sb->min_rnr_timer;
1443	qp->rq.psn = le32_to_cpu(sb->rq_psn);
1444	qp->max_rd_atomic = ORRQ_SLOTS_TO_ORD_LIMIT(sb->max_rd_atomic);
1445	qp->sq.psn = le32_to_cpu(sb->sq_psn);
1446	qp->max_dest_rd_atomic =
1447			IRRQ_SLOTS_TO_IRD_LIMIT(sb->max_dest_rd_atomic);
1448	qp->sq.max_wqe = qp->sq.hwq.max_elements;
1449	qp->rq.max_wqe = qp->rq.hwq.max_elements;
1450	qp->sq.max_sge = le16_to_cpu(sb->sq_sge);
1451	qp->rq.max_sge = le16_to_cpu(sb->rq_sge);
1452	qp->max_inline_data = le32_to_cpu(sb->max_inline_data);
1453	qp->dest_qpn = le32_to_cpu(sb->dest_qp_id);
1454	memcpy(qp->smac, sb->src_mac, 6);
1455	qp->vlan_id = le16_to_cpu(sb->vlan_pcp_vlan_dei_vlan_id);
1456bail:
1457	dma_free_coherent(&rcfw->pdev->dev, sbuf.size,
1458			  sbuf.sb, sbuf.dma_addr);
1459	return rc;
1460}
1461
1462static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp)
1463{
1464	struct bnxt_qplib_hwq *cq_hwq = &cq->hwq;
1465	u32 peek_flags, peek_cons;
1466	struct cq_base *hw_cqe;
1467	int i;
1468
1469	peek_flags = cq->dbinfo.flags;
1470	peek_cons = cq_hwq->cons;
1471	for (i = 0; i < cq_hwq->max_elements; i++) {
1472		hw_cqe = bnxt_qplib_get_qe(cq_hwq, peek_cons, NULL);
1473		if (!CQE_CMP_VALID(hw_cqe, peek_flags))
1474			continue;
1475		/*
1476		 * The valid test of the entry must be done first before
1477		 * reading any further.
1478		 */
1479		dma_rmb();
1480		switch (hw_cqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK) {
1481		case CQ_BASE_CQE_TYPE_REQ:
1482		case CQ_BASE_CQE_TYPE_TERMINAL:
1483		{
1484			struct cq_req *cqe = (struct cq_req *)hw_cqe;
1485
1486			if (qp == le64_to_cpu(cqe->qp_handle))
1487				cqe->qp_handle = 0;
1488			break;
1489		}
1490		case CQ_BASE_CQE_TYPE_RES_RC:
1491		case CQ_BASE_CQE_TYPE_RES_UD:
1492		case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1:
1493		{
1494			struct cq_res_rc *cqe = (struct cq_res_rc *)hw_cqe;
1495
1496			if (qp == le64_to_cpu(cqe->qp_handle))
1497				cqe->qp_handle = 0;
1498			break;
1499		}
1500		default:
1501			break;
1502		}
1503		bnxt_qplib_hwq_incr_cons(cq_hwq->max_elements, &peek_cons,
1504					 1, &peek_flags);
1505	}
1506}
1507
1508int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res,
1509			  struct bnxt_qplib_qp *qp)
1510{
1511	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
1512	struct creq_destroy_qp_resp resp = {};
1513	struct bnxt_qplib_cmdqmsg msg = {};
1514	struct cmdq_destroy_qp req = {};
1515	u32 tbl_indx;
1516	int rc;
1517
1518	tbl_indx = map_qp_id_to_tbl_indx(qp->id, rcfw);
1519	rcfw->qp_tbl[tbl_indx].qp_id = BNXT_QPLIB_QP_ID_INVALID;
1520	rcfw->qp_tbl[tbl_indx].qp_handle = NULL;
1521
1522	bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
1523				 CMDQ_BASE_OPCODE_DESTROY_QP,
1524				 sizeof(req));
1525
1526	req.qp_cid = cpu_to_le32(qp->id);
1527	bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
1528				sizeof(resp), 0);
1529	rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
1530	if (rc) {
1531		rcfw->qp_tbl[tbl_indx].qp_id = qp->id;
1532		rcfw->qp_tbl[tbl_indx].qp_handle = qp;
1533		return rc;
1534	}
1535
1536	return 0;
1537}
1538
1539void bnxt_qplib_free_qp_res(struct bnxt_qplib_res *res,
1540			    struct bnxt_qplib_qp *qp)
1541{
1542	bnxt_qplib_free_qp_hdr_buf(res, qp);
1543	bnxt_qplib_free_hwq(res, &qp->sq.hwq);
1544	kfree(qp->sq.swq);
1545
1546	bnxt_qplib_free_hwq(res, &qp->rq.hwq);
1547	kfree(qp->rq.swq);
1548
1549	if (qp->irrq.max_elements)
1550		bnxt_qplib_free_hwq(res, &qp->irrq);
1551	if (qp->orrq.max_elements)
1552		bnxt_qplib_free_hwq(res, &qp->orrq);
1553
1554}
1555
1556void *bnxt_qplib_get_qp1_sq_buf(struct bnxt_qplib_qp *qp,
1557				struct bnxt_qplib_sge *sge)
1558{
1559	struct bnxt_qplib_q *sq = &qp->sq;
1560	u32 sw_prod;
1561
1562	memset(sge, 0, sizeof(*sge));
1563
1564	if (qp->sq_hdr_buf) {
1565		sw_prod = sq->swq_start;
1566		sge->addr = (dma_addr_t)(qp->sq_hdr_buf_map +
1567					 sw_prod * qp->sq_hdr_buf_size);
1568		sge->lkey = 0xFFFFFFFF;
1569		sge->size = qp->sq_hdr_buf_size;
1570		return qp->sq_hdr_buf + sw_prod * sge->size;
1571	}
1572	return NULL;
1573}
1574
1575u32 bnxt_qplib_get_rq_prod_index(struct bnxt_qplib_qp *qp)
1576{
1577	struct bnxt_qplib_q *rq = &qp->rq;
1578
1579	return rq->swq_start;
1580}
1581
1582dma_addr_t bnxt_qplib_get_qp_buf_from_index(struct bnxt_qplib_qp *qp, u32 index)
1583{
1584	return (qp->rq_hdr_buf_map + index * qp->rq_hdr_buf_size);
1585}
1586
1587void *bnxt_qplib_get_qp1_rq_buf(struct bnxt_qplib_qp *qp,
1588				struct bnxt_qplib_sge *sge)
1589{
1590	struct bnxt_qplib_q *rq = &qp->rq;
1591	u32 sw_prod;
1592
1593	memset(sge, 0, sizeof(*sge));
1594
1595	if (qp->rq_hdr_buf) {
1596		sw_prod = rq->swq_start;
1597		sge->addr = (dma_addr_t)(qp->rq_hdr_buf_map +
1598					 sw_prod * qp->rq_hdr_buf_size);
1599		sge->lkey = 0xFFFFFFFF;
1600		sge->size = qp->rq_hdr_buf_size;
1601		return qp->rq_hdr_buf + sw_prod * sge->size;
1602	}
1603	return NULL;
1604}
1605
1606/* Fil the MSN table into the next psn row */
1607static void bnxt_qplib_fill_msn_search(struct bnxt_qplib_qp *qp,
1608				       struct bnxt_qplib_swqe *wqe,
1609				       struct bnxt_qplib_swq *swq)
1610{
1611	struct sq_msn_search *msns;
1612	u32 start_psn, next_psn;
1613	u16 start_idx;
1614
1615	msns = (struct sq_msn_search *)swq->psn_search;
1616	msns->start_idx_next_psn_start_psn = 0;
1617
1618	start_psn = swq->start_psn;
1619	next_psn = swq->next_psn;
1620	start_idx = swq->slot_idx;
1621	msns->start_idx_next_psn_start_psn |=
1622		bnxt_re_update_msn_tbl(start_idx, next_psn, start_psn);
1623	qp->msn++;
1624	qp->msn %= qp->msn_tbl_sz;
1625}
1626
1627static void bnxt_qplib_fill_psn_search(struct bnxt_qplib_qp *qp,
1628				       struct bnxt_qplib_swqe *wqe,
1629				       struct bnxt_qplib_swq *swq)
1630{
1631	struct sq_psn_search_ext *psns_ext;
1632	struct sq_psn_search *psns;
1633	u32 flg_npsn;
1634	u32 op_spsn;
1635
1636	if (!swq->psn_search)
1637		return;
1638	/* Handle MSN differently on cap flags  */
1639	if (BNXT_RE_HW_RETX(qp->dev_cap_flags)) {
1640		bnxt_qplib_fill_msn_search(qp, wqe, swq);
1641		return;
1642	}
1643	psns = (struct sq_psn_search *)swq->psn_search;
1644	psns = swq->psn_search;
1645	psns_ext = swq->psn_ext;
1646
1647	op_spsn = ((swq->start_psn << SQ_PSN_SEARCH_START_PSN_SFT) &
1648		    SQ_PSN_SEARCH_START_PSN_MASK);
1649	op_spsn |= ((wqe->type << SQ_PSN_SEARCH_OPCODE_SFT) &
1650		     SQ_PSN_SEARCH_OPCODE_MASK);
1651	flg_npsn = ((swq->next_psn << SQ_PSN_SEARCH_NEXT_PSN_SFT) &
1652		     SQ_PSN_SEARCH_NEXT_PSN_MASK);
1653
1654	if (bnxt_qplib_is_chip_gen_p5_p7(qp->cctx)) {
1655		psns_ext->opcode_start_psn = cpu_to_le32(op_spsn);
1656		psns_ext->flags_next_psn = cpu_to_le32(flg_npsn);
1657		psns_ext->start_slot_idx = cpu_to_le16(swq->slot_idx);
1658	} else {
1659		psns->opcode_start_psn = cpu_to_le32(op_spsn);
1660		psns->flags_next_psn = cpu_to_le32(flg_npsn);
1661	}
1662}
1663
1664static int bnxt_qplib_put_inline(struct bnxt_qplib_qp *qp,
1665				 struct bnxt_qplib_swqe *wqe,
1666				 u16 *idx)
1667{
1668	struct bnxt_qplib_hwq *hwq;
1669	int len, t_len, offt;
1670	bool pull_dst = true;
1671	void *il_dst = NULL;
1672	void *il_src = NULL;
1673	int t_cplen, cplen;
1674	int indx;
1675
1676	hwq = &qp->sq.hwq;
1677	t_len = 0;
1678	for (indx = 0; indx < wqe->num_sge; indx++) {
1679		len = wqe->sg_list[indx].size;
1680		il_src = (void *)wqe->sg_list[indx].addr;
1681		t_len += len;
1682		if (t_len > qp->max_inline_data)
1683			return -ENOMEM;
1684		while (len) {
1685			if (pull_dst) {
1686				pull_dst = false;
1687				il_dst = bnxt_qplib_get_prod_qe(hwq, *idx);
1688				(*idx)++;
1689				t_cplen = 0;
1690				offt = 0;
1691			}
1692			cplen = min_t(int, len, sizeof(struct sq_sge));
1693			cplen = min_t(int, cplen,
1694					(sizeof(struct sq_sge) - offt));
1695			memcpy(il_dst, il_src, cplen);
1696			t_cplen += cplen;
1697			il_src += cplen;
1698			il_dst += cplen;
1699			offt += cplen;
1700			len -= cplen;
1701			if (t_cplen == sizeof(struct sq_sge))
1702				pull_dst = true;
1703		}
1704	}
1705
1706	return t_len;
1707}
1708
1709static u32 bnxt_qplib_put_sges(struct bnxt_qplib_hwq *hwq,
1710			       struct bnxt_qplib_sge *ssge,
1711			       u16 nsge, u16 *idx)
1712{
1713	struct sq_sge *dsge;
1714	int indx, len = 0;
1715
1716	for (indx = 0; indx < nsge; indx++, (*idx)++) {
1717		dsge = bnxt_qplib_get_prod_qe(hwq, *idx);
1718		dsge->va_or_pa = cpu_to_le64(ssge[indx].addr);
1719		dsge->l_key = cpu_to_le32(ssge[indx].lkey);
1720		dsge->size = cpu_to_le32(ssge[indx].size);
1721		len += ssge[indx].size;
1722	}
1723
1724	return len;
1725}
1726
1727static u16 bnxt_qplib_required_slots(struct bnxt_qplib_qp *qp,
1728				     struct bnxt_qplib_swqe *wqe,
1729				     u16 *wqe_sz, u16 *qdf, u8 mode)
1730{
1731	u32 ilsize, bytes;
1732	u16 nsge;
1733	u16 slot;
1734
1735	nsge = wqe->num_sge;
1736	/* Adding sq_send_hdr is a misnomer, for rq also hdr size is same. */
1737	bytes = sizeof(struct sq_send_hdr) + nsge * sizeof(struct sq_sge);
1738	if (wqe->flags & BNXT_QPLIB_SWQE_FLAGS_INLINE) {
1739		ilsize = bnxt_qplib_calc_ilsize(wqe, qp->max_inline_data);
1740		bytes = ALIGN(ilsize, sizeof(struct sq_sge));
1741		bytes += sizeof(struct sq_send_hdr);
1742	}
1743
1744	*qdf =  __xlate_qfd(qp->sq.q_full_delta, bytes);
1745	slot = bytes >> 4;
1746	*wqe_sz = slot;
1747	if (mode == BNXT_QPLIB_WQE_MODE_STATIC)
1748		slot = 8;
1749	return slot;
1750}
1751
1752static void bnxt_qplib_pull_psn_buff(struct bnxt_qplib_qp *qp, struct bnxt_qplib_q *sq,
1753				     struct bnxt_qplib_swq *swq, bool hw_retx)
1754{
1755	struct bnxt_qplib_hwq *hwq;
1756	u32 pg_num, pg_indx;
1757	void *buff;
1758	u32 tail;
1759
1760	hwq = &sq->hwq;
1761	if (!hwq->pad_pg)
1762		return;
1763	tail = swq->slot_idx / sq->dbinfo.max_slot;
1764	if (hw_retx) {
1765		/* For HW retx use qp msn index */
1766		tail = qp->msn;
1767		tail %= qp->msn_tbl_sz;
1768	}
1769	pg_num = (tail + hwq->pad_pgofft) / (PAGE_SIZE / hwq->pad_stride);
1770	pg_indx = (tail + hwq->pad_pgofft) % (PAGE_SIZE / hwq->pad_stride);
1771	buff = (void *)(hwq->pad_pg[pg_num] + pg_indx * hwq->pad_stride);
1772	swq->psn_ext = buff;
1773	swq->psn_search = buff;
1774}
1775
1776void bnxt_qplib_post_send_db(struct bnxt_qplib_qp *qp)
1777{
1778	struct bnxt_qplib_q *sq = &qp->sq;
1779
1780	bnxt_qplib_ring_prod_db(&sq->dbinfo, DBC_DBC_TYPE_SQ);
1781}
1782
1783int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
1784			 struct bnxt_qplib_swqe *wqe)
1785{
1786	struct bnxt_qplib_nq_work *nq_work = NULL;
1787	int i, rc = 0, data_len = 0, pkt_num = 0;
1788	struct bnxt_qplib_q *sq = &qp->sq;
1789	struct bnxt_qplib_hwq *hwq;
1790	struct bnxt_qplib_swq *swq;
1791	bool sch_handler = false;
1792	u16 wqe_sz, qdf = 0;
1793	bool msn_update;
1794	void *base_hdr;
1795	void *ext_hdr;
1796	__le32 temp32;
1797	u32 wqe_idx;
1798	u32 slots;
1799	u16 idx;
1800
1801	hwq = &sq->hwq;
1802	if (qp->state != CMDQ_MODIFY_QP_NEW_STATE_RTS &&
1803	    qp->state != CMDQ_MODIFY_QP_NEW_STATE_ERR) {
1804		dev_err(&hwq->pdev->dev,
1805			"QPLIB: FP: QP (0x%x) is in the 0x%x state",
1806			qp->id, qp->state);
1807		rc = -EINVAL;
1808		goto done;
1809	}
1810
1811	slots = bnxt_qplib_required_slots(qp, wqe, &wqe_sz, &qdf, qp->wqe_mode);
1812	if (bnxt_qplib_queue_full(sq, slots + qdf)) {
1813		dev_err(&hwq->pdev->dev,
1814			"prod = %#x cons = %#x qdepth = %#x delta = %#x\n",
1815			hwq->prod, hwq->cons, hwq->depth, sq->q_full_delta);
1816		rc = -ENOMEM;
1817		goto done;
1818	}
1819
1820	swq = bnxt_qplib_get_swqe(sq, &wqe_idx);
1821	bnxt_qplib_pull_psn_buff(qp, sq, swq, BNXT_RE_HW_RETX(qp->dev_cap_flags));
1822
1823	idx = 0;
1824	swq->slot_idx = hwq->prod;
1825	swq->slots = slots;
1826	swq->wr_id = wqe->wr_id;
1827	swq->type = wqe->type;
1828	swq->flags = wqe->flags;
1829	swq->start_psn = sq->psn & BTH_PSN_MASK;
1830	if (qp->sig_type)
1831		swq->flags |= SQ_SEND_FLAGS_SIGNAL_COMP;
1832
1833	if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
1834		sch_handler = true;
1835		dev_dbg(&hwq->pdev->dev,
1836			"%s Error QP. Scheduling for poll_cq\n", __func__);
1837		goto queue_err;
1838	}
1839
1840	base_hdr = bnxt_qplib_get_prod_qe(hwq, idx++);
1841	ext_hdr = bnxt_qplib_get_prod_qe(hwq, idx++);
1842	memset(base_hdr, 0, sizeof(struct sq_sge));
1843	memset(ext_hdr, 0, sizeof(struct sq_sge));
1844
1845	if (wqe->flags & BNXT_QPLIB_SWQE_FLAGS_INLINE)
1846		/* Copy the inline data */
1847		data_len = bnxt_qplib_put_inline(qp, wqe, &idx);
1848	else
1849		data_len = bnxt_qplib_put_sges(hwq, wqe->sg_list, wqe->num_sge,
1850					       &idx);
1851	if (data_len < 0)
1852		goto queue_err;
1853	/* Make sure we update MSN table only for wired wqes */
1854	msn_update = true;
1855	/* Specifics */
1856	switch (wqe->type) {
1857	case BNXT_QPLIB_SWQE_TYPE_SEND:
1858		if (qp->type == CMDQ_CREATE_QP1_TYPE_GSI) {
1859			struct sq_send_raweth_qp1_hdr *sqe = base_hdr;
1860			struct sq_raw_ext_hdr *ext_sqe = ext_hdr;
1861			/* Assemble info for Raw Ethertype QPs */
1862
1863			sqe->wqe_type = wqe->type;
1864			sqe->flags = wqe->flags;
1865			sqe->wqe_size = wqe_sz;
1866			sqe->cfa_action = cpu_to_le16(wqe->rawqp1.cfa_action);
1867			sqe->lflags = cpu_to_le16(wqe->rawqp1.lflags);
1868			sqe->length = cpu_to_le32(data_len);
1869			ext_sqe->cfa_meta = cpu_to_le32((wqe->rawqp1.cfa_meta &
1870				SQ_SEND_RAWETH_QP1_CFA_META_VLAN_VID_MASK) <<
1871				SQ_SEND_RAWETH_QP1_CFA_META_VLAN_VID_SFT);
1872
1873			break;
1874		}
1875		fallthrough;
1876	case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM:
1877	case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV:
1878	{
1879		struct sq_ud_ext_hdr *ext_sqe = ext_hdr;
1880		struct sq_send_hdr *sqe = base_hdr;
1881
1882		sqe->wqe_type = wqe->type;
1883		sqe->flags = wqe->flags;
1884		sqe->wqe_size = wqe_sz;
1885		sqe->inv_key_or_imm_data = cpu_to_le32(wqe->send.inv_key);
1886		if (qp->type == CMDQ_CREATE_QP_TYPE_UD ||
1887		    qp->type == CMDQ_CREATE_QP_TYPE_GSI) {
1888			sqe->q_key = cpu_to_le32(wqe->send.q_key);
1889			sqe->length = cpu_to_le32(data_len);
1890			sq->psn = (sq->psn + 1) & BTH_PSN_MASK;
1891			ext_sqe->dst_qp = cpu_to_le32(wqe->send.dst_qp &
1892						      SQ_SEND_DST_QP_MASK);
1893			ext_sqe->avid = cpu_to_le32(wqe->send.avid &
1894						    SQ_SEND_AVID_MASK);
1895			msn_update = false;
1896		} else {
1897			sqe->length = cpu_to_le32(data_len);
1898			if (qp->mtu)
1899				pkt_num = (data_len + qp->mtu - 1) / qp->mtu;
1900			if (!pkt_num)
1901				pkt_num = 1;
1902			sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK;
1903		}
1904		break;
1905	}
1906	case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE:
1907	case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM:
1908	case BNXT_QPLIB_SWQE_TYPE_RDMA_READ:
1909	{
1910		struct sq_rdma_ext_hdr *ext_sqe = ext_hdr;
1911		struct sq_rdma_hdr *sqe = base_hdr;
1912
1913		sqe->wqe_type = wqe->type;
1914		sqe->flags = wqe->flags;
1915		sqe->wqe_size = wqe_sz;
1916		sqe->imm_data = cpu_to_le32(wqe->rdma.inv_key);
1917		sqe->length = cpu_to_le32((u32)data_len);
1918		ext_sqe->remote_va = cpu_to_le64(wqe->rdma.remote_va);
1919		ext_sqe->remote_key = cpu_to_le32(wqe->rdma.r_key);
1920		if (qp->mtu)
1921			pkt_num = (data_len + qp->mtu - 1) / qp->mtu;
1922		if (!pkt_num)
1923			pkt_num = 1;
1924		sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK;
1925		break;
1926	}
1927	case BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP:
1928	case BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD:
1929	{
1930		struct sq_atomic_ext_hdr *ext_sqe = ext_hdr;
1931		struct sq_atomic_hdr *sqe = base_hdr;
1932
1933		sqe->wqe_type = wqe->type;
1934		sqe->flags = wqe->flags;
1935		sqe->remote_key = cpu_to_le32(wqe->atomic.r_key);
1936		sqe->remote_va = cpu_to_le64(wqe->atomic.remote_va);
1937		ext_sqe->swap_data = cpu_to_le64(wqe->atomic.swap_data);
1938		ext_sqe->cmp_data = cpu_to_le64(wqe->atomic.cmp_data);
1939		if (qp->mtu)
1940			pkt_num = (data_len + qp->mtu - 1) / qp->mtu;
1941		if (!pkt_num)
1942			pkt_num = 1;
1943		sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK;
1944		break;
1945	}
1946	case BNXT_QPLIB_SWQE_TYPE_LOCAL_INV:
1947	{
1948		struct sq_localinvalidate *sqe = base_hdr;
1949
1950		sqe->wqe_type = wqe->type;
1951		sqe->flags = wqe->flags;
1952		sqe->inv_l_key = cpu_to_le32(wqe->local_inv.inv_l_key);
1953		msn_update = false;
1954		break;
1955	}
1956	case BNXT_QPLIB_SWQE_TYPE_FAST_REG_MR:
1957	{
1958		struct sq_fr_pmr_ext_hdr *ext_sqe = ext_hdr;
1959		struct sq_fr_pmr_hdr *sqe = base_hdr;
1960
1961		sqe->wqe_type = wqe->type;
1962		sqe->flags = wqe->flags;
1963		sqe->access_cntl = wqe->frmr.access_cntl |
1964				   SQ_FR_PMR_ACCESS_CNTL_LOCAL_WRITE;
1965		sqe->zero_based_page_size_log =
1966			(wqe->frmr.pg_sz_log & SQ_FR_PMR_PAGE_SIZE_LOG_MASK) <<
1967			SQ_FR_PMR_PAGE_SIZE_LOG_SFT |
1968			(wqe->frmr.zero_based ? SQ_FR_PMR_ZERO_BASED : 0);
1969		sqe->l_key = cpu_to_le32(wqe->frmr.l_key);
1970		temp32 = cpu_to_le32(wqe->frmr.length);
1971		memcpy(sqe->length, &temp32, sizeof(wqe->frmr.length));
1972		sqe->numlevels_pbl_page_size_log =
1973			((wqe->frmr.pbl_pg_sz_log <<
1974					SQ_FR_PMR_PBL_PAGE_SIZE_LOG_SFT) &
1975					SQ_FR_PMR_PBL_PAGE_SIZE_LOG_MASK) |
1976			((wqe->frmr.levels << SQ_FR_PMR_NUMLEVELS_SFT) &
1977					SQ_FR_PMR_NUMLEVELS_MASK);
1978
1979		for (i = 0; i < wqe->frmr.page_list_len; i++)
1980			wqe->frmr.pbl_ptr[i] = cpu_to_le64(
1981						wqe->frmr.page_list[i] |
1982						PTU_PTE_VALID);
1983		ext_sqe->pblptr = cpu_to_le64(wqe->frmr.pbl_dma_ptr);
1984		ext_sqe->va = cpu_to_le64(wqe->frmr.va);
1985		msn_update = false;
1986
1987		break;
1988	}
1989	case BNXT_QPLIB_SWQE_TYPE_BIND_MW:
1990	{
1991		struct sq_bind_ext_hdr *ext_sqe = ext_hdr;
1992		struct sq_bind_hdr *sqe = base_hdr;
1993
1994		sqe->wqe_type = wqe->type;
1995		sqe->flags = wqe->flags;
1996		sqe->access_cntl = wqe->bind.access_cntl;
1997		sqe->mw_type_zero_based = wqe->bind.mw_type |
1998			(wqe->bind.zero_based ? SQ_BIND_ZERO_BASED : 0);
1999		sqe->parent_l_key = cpu_to_le32(wqe->bind.parent_l_key);
2000		sqe->l_key = cpu_to_le32(wqe->bind.r_key);
2001		ext_sqe->va = cpu_to_le64(wqe->bind.va);
2002		ext_sqe->length_lo = cpu_to_le32(wqe->bind.length);
2003		msn_update = false;
2004		break;
2005	}
2006	default:
2007		/* Bad wqe, return error */
2008		rc = -EINVAL;
2009		goto done;
2010	}
2011	if (!BNXT_RE_HW_RETX(qp->dev_cap_flags) || msn_update) {
2012		swq->next_psn = sq->psn & BTH_PSN_MASK;
2013		bnxt_qplib_fill_psn_search(qp, wqe, swq);
2014	}
2015queue_err:
2016	bnxt_qplib_swq_mod_start(sq, wqe_idx);
2017	bnxt_qplib_hwq_incr_prod(&sq->dbinfo, hwq, swq->slots);
2018	qp->wqe_cnt++;
2019done:
2020	if (sch_handler) {
2021		nq_work = kzalloc(sizeof(*nq_work), GFP_ATOMIC);
2022		if (nq_work) {
2023			nq_work->cq = qp->scq;
2024			nq_work->nq = qp->scq->nq;
2025			INIT_WORK(&nq_work->work, bnxt_qpn_cqn_sched_task);
2026			queue_work(qp->scq->nq->cqn_wq, &nq_work->work);
2027		} else {
2028			dev_err(&hwq->pdev->dev,
2029				"FP: Failed to allocate SQ nq_work!\n");
2030			rc = -ENOMEM;
2031		}
2032	}
2033	return rc;
2034}
2035
2036void bnxt_qplib_post_recv_db(struct bnxt_qplib_qp *qp)
2037{
2038	struct bnxt_qplib_q *rq = &qp->rq;
2039
2040	bnxt_qplib_ring_prod_db(&rq->dbinfo, DBC_DBC_TYPE_RQ);
2041}
2042
2043int bnxt_qplib_post_recv(struct bnxt_qplib_qp *qp,
2044			 struct bnxt_qplib_swqe *wqe)
2045{
2046	struct bnxt_qplib_nq_work *nq_work = NULL;
2047	struct bnxt_qplib_q *rq = &qp->rq;
2048	struct rq_wqe_hdr *base_hdr;
2049	struct rq_ext_hdr *ext_hdr;
2050	struct bnxt_qplib_hwq *hwq;
2051	struct bnxt_qplib_swq *swq;
2052	bool sch_handler = false;
2053	u16 wqe_sz, idx;
2054	u32 wqe_idx;
2055	int rc = 0;
2056
2057	hwq = &rq->hwq;
2058	if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_RESET) {
2059		dev_err(&hwq->pdev->dev,
2060			"QPLIB: FP: QP (0x%x) is in the 0x%x state",
2061			qp->id, qp->state);
2062		rc = -EINVAL;
2063		goto done;
2064	}
2065
2066	if (bnxt_qplib_queue_full(rq, rq->dbinfo.max_slot)) {
2067		dev_err(&hwq->pdev->dev,
2068			"FP: QP (0x%x) RQ is full!\n", qp->id);
2069		rc = -EINVAL;
2070		goto done;
2071	}
2072
2073	swq = bnxt_qplib_get_swqe(rq, &wqe_idx);
2074	swq->wr_id = wqe->wr_id;
2075	swq->slots = rq->dbinfo.max_slot;
2076
2077	if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
2078		sch_handler = true;
2079		dev_dbg(&hwq->pdev->dev,
2080			"%s: Error QP. Scheduling for poll_cq\n", __func__);
2081		goto queue_err;
2082	}
2083
2084	idx = 0;
2085	base_hdr = bnxt_qplib_get_prod_qe(hwq, idx++);
2086	ext_hdr = bnxt_qplib_get_prod_qe(hwq, idx++);
2087	memset(base_hdr, 0, sizeof(struct sq_sge));
2088	memset(ext_hdr, 0, sizeof(struct sq_sge));
2089	wqe_sz = (sizeof(struct rq_wqe_hdr) +
2090	wqe->num_sge * sizeof(struct sq_sge)) >> 4;
2091	bnxt_qplib_put_sges(hwq, wqe->sg_list, wqe->num_sge, &idx);
2092	if (!wqe->num_sge) {
2093		struct sq_sge *sge;
2094
2095		sge = bnxt_qplib_get_prod_qe(hwq, idx++);
2096		sge->size = 0;
2097		wqe_sz++;
2098	}
2099	base_hdr->wqe_type = wqe->type;
2100	base_hdr->flags = wqe->flags;
2101	base_hdr->wqe_size = wqe_sz;
2102	base_hdr->wr_id[0] = cpu_to_le32(wqe_idx);
2103queue_err:
2104	bnxt_qplib_swq_mod_start(rq, wqe_idx);
2105	bnxt_qplib_hwq_incr_prod(&rq->dbinfo, hwq, swq->slots);
2106done:
2107	if (sch_handler) {
2108		nq_work = kzalloc(sizeof(*nq_work), GFP_ATOMIC);
2109		if (nq_work) {
2110			nq_work->cq = qp->rcq;
2111			nq_work->nq = qp->rcq->nq;
2112			INIT_WORK(&nq_work->work, bnxt_qpn_cqn_sched_task);
2113			queue_work(qp->rcq->nq->cqn_wq, &nq_work->work);
2114		} else {
2115			dev_err(&hwq->pdev->dev,
2116				"FP: Failed to allocate RQ nq_work!\n");
2117			rc = -ENOMEM;
2118		}
2119	}
2120
2121	return rc;
2122}
2123
2124/* CQ */
2125int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
2126{
2127	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
2128	struct bnxt_qplib_hwq_attr hwq_attr = {};
2129	struct creq_create_cq_resp resp = {};
2130	struct bnxt_qplib_cmdqmsg msg = {};
2131	struct cmdq_create_cq req = {};
2132	struct bnxt_qplib_pbl *pbl;
2133	u32 pg_sz_lvl;
2134	int rc;
2135
2136	if (!cq->dpi) {
2137		dev_err(&rcfw->pdev->dev,
2138			"FP: CREATE_CQ failed due to NULL DPI\n");
2139		return -EINVAL;
2140	}
2141
2142	cq->dbinfo.flags = 0;
2143	hwq_attr.res = res;
2144	hwq_attr.depth = cq->max_wqe;
2145	hwq_attr.stride = sizeof(struct cq_base);
2146	hwq_attr.type = HWQ_TYPE_QUEUE;
2147	hwq_attr.sginfo = &cq->sg_info;
2148	rc = bnxt_qplib_alloc_init_hwq(&cq->hwq, &hwq_attr);
2149	if (rc)
2150		return rc;
2151
2152	bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
2153				 CMDQ_BASE_OPCODE_CREATE_CQ,
2154				 sizeof(req));
2155
2156	req.dpi = cpu_to_le32(cq->dpi->dpi);
2157	req.cq_handle = cpu_to_le64(cq->cq_handle);
2158	req.cq_size = cpu_to_le32(cq->max_wqe);
2159	pbl = &cq->hwq.pbl[PBL_LVL_0];
2160	pg_sz_lvl = (bnxt_qplib_base_pg_size(&cq->hwq) <<
2161		     CMDQ_CREATE_CQ_PG_SIZE_SFT);
2162	pg_sz_lvl |= (cq->hwq.level & CMDQ_CREATE_CQ_LVL_MASK);
2163	req.pg_size_lvl = cpu_to_le32(pg_sz_lvl);
2164	req.pbl = cpu_to_le64(pbl->pg_map_arr[0]);
2165	req.cq_fco_cnq_id = cpu_to_le32(
2166			(cq->cnq_hw_ring_id & CMDQ_CREATE_CQ_CNQ_ID_MASK) <<
2167			 CMDQ_CREATE_CQ_CNQ_ID_SFT);
2168	bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
2169				sizeof(resp), 0);
2170	rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
2171	if (rc)
2172		goto fail;
2173
2174	cq->id = le32_to_cpu(resp.xid);
2175	cq->period = BNXT_QPLIB_QUEUE_START_PERIOD;
2176	init_waitqueue_head(&cq->waitq);
2177	INIT_LIST_HEAD(&cq->sqf_head);
2178	INIT_LIST_HEAD(&cq->rqf_head);
2179	spin_lock_init(&cq->compl_lock);
2180	spin_lock_init(&cq->flush_lock);
2181
2182	cq->dbinfo.hwq = &cq->hwq;
2183	cq->dbinfo.xid = cq->id;
2184	cq->dbinfo.db = cq->dpi->dbr;
2185	cq->dbinfo.priv_db = res->dpi_tbl.priv_db;
2186	cq->dbinfo.flags = 0;
2187	cq->dbinfo.toggle = 0;
2188
2189	bnxt_qplib_armen_db(&cq->dbinfo, DBC_DBC_TYPE_CQ_ARMENA);
2190
2191	return 0;
2192
2193fail:
2194	bnxt_qplib_free_hwq(res, &cq->hwq);
2195	return rc;
2196}
2197
2198void bnxt_qplib_resize_cq_complete(struct bnxt_qplib_res *res,
2199				   struct bnxt_qplib_cq *cq)
2200{
2201	bnxt_qplib_free_hwq(res, &cq->hwq);
2202	memcpy(&cq->hwq, &cq->resize_hwq, sizeof(cq->hwq));
2203       /* Reset only the cons bit in the flags */
2204	cq->dbinfo.flags &= ~(1UL << BNXT_QPLIB_FLAG_EPOCH_CONS_SHIFT);
2205}
2206
2207int bnxt_qplib_resize_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq,
2208			 int new_cqes)
2209{
2210	struct bnxt_qplib_hwq_attr hwq_attr = {};
2211	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
2212	struct creq_resize_cq_resp resp = {};
2213	struct bnxt_qplib_cmdqmsg msg = {};
2214	struct cmdq_resize_cq req = {};
2215	struct bnxt_qplib_pbl *pbl;
2216	u32 pg_sz, lvl, new_sz;
2217	int rc;
2218
2219	bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
2220				 CMDQ_BASE_OPCODE_RESIZE_CQ,
2221				 sizeof(req));
2222	hwq_attr.sginfo = &cq->sg_info;
2223	hwq_attr.res = res;
2224	hwq_attr.depth = new_cqes;
2225	hwq_attr.stride = sizeof(struct cq_base);
2226	hwq_attr.type = HWQ_TYPE_QUEUE;
2227	rc = bnxt_qplib_alloc_init_hwq(&cq->resize_hwq, &hwq_attr);
2228	if (rc)
2229		return rc;
2230
2231	req.cq_cid = cpu_to_le32(cq->id);
2232	pbl = &cq->resize_hwq.pbl[PBL_LVL_0];
2233	pg_sz = bnxt_qplib_base_pg_size(&cq->resize_hwq);
2234	lvl = (cq->resize_hwq.level << CMDQ_RESIZE_CQ_LVL_SFT) &
2235				       CMDQ_RESIZE_CQ_LVL_MASK;
2236	new_sz = (new_cqes << CMDQ_RESIZE_CQ_NEW_CQ_SIZE_SFT) &
2237		  CMDQ_RESIZE_CQ_NEW_CQ_SIZE_MASK;
2238	req.new_cq_size_pg_size_lvl = cpu_to_le32(new_sz | pg_sz | lvl);
2239	req.new_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
2240
2241	bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
2242				sizeof(resp), 0);
2243	rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
2244	return rc;
2245}
2246
2247int bnxt_qplib_destroy_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
2248{
2249	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
2250	struct creq_destroy_cq_resp resp = {};
2251	struct bnxt_qplib_cmdqmsg msg = {};
2252	struct cmdq_destroy_cq req = {};
2253	u16 total_cnq_events;
2254	int rc;
2255
2256	bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
2257				 CMDQ_BASE_OPCODE_DESTROY_CQ,
2258				 sizeof(req));
2259
2260	req.cq_cid = cpu_to_le32(cq->id);
2261	bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
2262				sizeof(resp), 0);
2263	rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
2264	if (rc)
2265		return rc;
2266	total_cnq_events = le16_to_cpu(resp.total_cnq_events);
2267	__wait_for_all_nqes(cq, total_cnq_events);
2268	bnxt_qplib_free_hwq(res, &cq->hwq);
2269	return 0;
2270}
2271
2272static int __flush_sq(struct bnxt_qplib_q *sq, struct bnxt_qplib_qp *qp,
2273		      struct bnxt_qplib_cqe **pcqe, int *budget)
2274{
2275	struct bnxt_qplib_cqe *cqe;
2276	u32 start, last;
2277	int rc = 0;
2278
2279	/* Now complete all outstanding SQEs with FLUSHED_ERR */
2280	start = sq->swq_start;
2281	cqe = *pcqe;
2282	while (*budget) {
2283		last = sq->swq_last;
2284		if (start == last)
2285			break;
2286		/* Skip the FENCE WQE completions */
2287		if (sq->swq[last].wr_id == BNXT_QPLIB_FENCE_WRID) {
2288			bnxt_qplib_cancel_phantom_processing(qp);
2289			goto skip_compl;
2290		}
2291		memset(cqe, 0, sizeof(*cqe));
2292		cqe->status = CQ_REQ_STATUS_WORK_REQUEST_FLUSHED_ERR;
2293		cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
2294		cqe->qp_handle = (u64)(unsigned long)qp;
2295		cqe->wr_id = sq->swq[last].wr_id;
2296		cqe->src_qp = qp->id;
2297		cqe->type = sq->swq[last].type;
2298		cqe++;
2299		(*budget)--;
2300skip_compl:
2301		bnxt_qplib_hwq_incr_cons(sq->hwq.max_elements, &sq->hwq.cons,
2302					 sq->swq[last].slots, &sq->dbinfo.flags);
2303		sq->swq_last = sq->swq[last].next_idx;
2304	}
2305	*pcqe = cqe;
2306	if (!(*budget) && sq->swq_last != start)
2307		/* Out of budget */
2308		rc = -EAGAIN;
2309
2310	return rc;
2311}
2312
2313static int __flush_rq(struct bnxt_qplib_q *rq, struct bnxt_qplib_qp *qp,
2314		      struct bnxt_qplib_cqe **pcqe, int *budget)
2315{
2316	struct bnxt_qplib_cqe *cqe;
2317	u32 start, last;
2318	int opcode = 0;
2319	int rc = 0;
2320
2321	switch (qp->type) {
2322	case CMDQ_CREATE_QP1_TYPE_GSI:
2323		opcode = CQ_BASE_CQE_TYPE_RES_RAWETH_QP1;
2324		break;
2325	case CMDQ_CREATE_QP_TYPE_RC:
2326		opcode = CQ_BASE_CQE_TYPE_RES_RC;
2327		break;
2328	case CMDQ_CREATE_QP_TYPE_UD:
2329	case CMDQ_CREATE_QP_TYPE_GSI:
2330		opcode = CQ_BASE_CQE_TYPE_RES_UD;
2331		break;
2332	}
2333
2334	/* Flush the rest of the RQ */
2335	start = rq->swq_start;
2336	cqe = *pcqe;
2337	while (*budget) {
2338		last = rq->swq_last;
2339		if (last == start)
2340			break;
2341		memset(cqe, 0, sizeof(*cqe));
2342		cqe->status =
2343		    CQ_RES_RC_STATUS_WORK_REQUEST_FLUSHED_ERR;
2344		cqe->opcode = opcode;
2345		cqe->qp_handle = (unsigned long)qp;
2346		cqe->wr_id = rq->swq[last].wr_id;
2347		cqe++;
2348		(*budget)--;
2349		bnxt_qplib_hwq_incr_cons(rq->hwq.max_elements, &rq->hwq.cons,
2350					 rq->swq[last].slots, &rq->dbinfo.flags);
2351		rq->swq_last = rq->swq[last].next_idx;
2352	}
2353	*pcqe = cqe;
2354	if (!*budget && rq->swq_last != start)
2355		/* Out of budget */
2356		rc = -EAGAIN;
2357
2358	return rc;
2359}
2360
2361void bnxt_qplib_mark_qp_error(void *qp_handle)
2362{
2363	struct bnxt_qplib_qp *qp = qp_handle;
2364
2365	if (!qp)
2366		return;
2367
2368	/* Must block new posting of SQ and RQ */
2369	qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2370	bnxt_qplib_cancel_phantom_processing(qp);
2371}
2372
2373/* Note: SQE is valid from sw_sq_cons up to cqe_sq_cons (exclusive)
2374 *       CQE is track from sw_cq_cons to max_element but valid only if VALID=1
2375 */
2376static int do_wa9060(struct bnxt_qplib_qp *qp, struct bnxt_qplib_cq *cq,
2377		     u32 cq_cons, u32 swq_last, u32 cqe_sq_cons)
2378{
2379	u32 peek_sw_cq_cons, peek_sq_cons_idx, peek_flags;
2380	struct bnxt_qplib_q *sq = &qp->sq;
2381	struct cq_req *peek_req_hwcqe;
2382	struct bnxt_qplib_qp *peek_qp;
2383	struct bnxt_qplib_q *peek_sq;
2384	struct bnxt_qplib_swq *swq;
2385	struct cq_base *peek_hwcqe;
2386	int i, rc = 0;
2387
2388	/* Normal mode */
2389	/* Check for the psn_search marking before completing */
2390	swq = &sq->swq[swq_last];
2391	if (swq->psn_search &&
2392	    le32_to_cpu(swq->psn_search->flags_next_psn) & 0x80000000) {
2393		/* Unmark */
2394		swq->psn_search->flags_next_psn = cpu_to_le32
2395			(le32_to_cpu(swq->psn_search->flags_next_psn)
2396				     & ~0x80000000);
2397		dev_dbg(&cq->hwq.pdev->dev,
2398			"FP: Process Req cq_cons=0x%x qp=0x%x sq cons sw=0x%x cqe=0x%x marked!\n",
2399			cq_cons, qp->id, swq_last, cqe_sq_cons);
2400		sq->condition = true;
2401		sq->send_phantom = true;
2402
2403		/* TODO: Only ARM if the previous SQE is ARMALL */
2404		bnxt_qplib_ring_db(&cq->dbinfo, DBC_DBC_TYPE_CQ_ARMALL);
2405		rc = -EAGAIN;
2406		goto out;
2407	}
2408	if (sq->condition) {
2409		/* Peek at the completions */
2410		peek_flags = cq->dbinfo.flags;
2411		peek_sw_cq_cons = cq_cons;
2412		i = cq->hwq.max_elements;
2413		while (i--) {
2414			peek_hwcqe = bnxt_qplib_get_qe(&cq->hwq,
2415						       peek_sw_cq_cons, NULL);
2416			/* If the next hwcqe is VALID */
2417			if (CQE_CMP_VALID(peek_hwcqe, peek_flags)) {
2418			/*
2419			 * The valid test of the entry must be done first before
2420			 * reading any further.
2421			 */
2422				dma_rmb();
2423				/* If the next hwcqe is a REQ */
2424				if ((peek_hwcqe->cqe_type_toggle &
2425				    CQ_BASE_CQE_TYPE_MASK) ==
2426				    CQ_BASE_CQE_TYPE_REQ) {
2427					peek_req_hwcqe = (struct cq_req *)
2428							 peek_hwcqe;
2429					peek_qp = (struct bnxt_qplib_qp *)
2430						((unsigned long)
2431						 le64_to_cpu
2432						 (peek_req_hwcqe->qp_handle));
2433					peek_sq = &peek_qp->sq;
2434					peek_sq_cons_idx =
2435						((le16_to_cpu(
2436						  peek_req_hwcqe->sq_cons_idx)
2437						  - 1) % sq->max_wqe);
2438					/* If the hwcqe's sq's wr_id matches */
2439					if (peek_sq == sq &&
2440					    sq->swq[peek_sq_cons_idx].wr_id ==
2441					    BNXT_QPLIB_FENCE_WRID) {
2442						/*
2443						 *  Unbreak only if the phantom
2444						 *  comes back
2445						 */
2446						dev_dbg(&cq->hwq.pdev->dev,
2447							"FP: Got Phantom CQE\n");
2448						sq->condition = false;
2449						sq->single = true;
2450						rc = 0;
2451						goto out;
2452					}
2453				}
2454				/* Valid but not the phantom, so keep looping */
2455			} else {
2456				/* Not valid yet, just exit and wait */
2457				rc = -EINVAL;
2458				goto out;
2459			}
2460			bnxt_qplib_hwq_incr_cons(cq->hwq.max_elements,
2461						 &peek_sw_cq_cons,
2462						 1, &peek_flags);
2463		}
2464		dev_err(&cq->hwq.pdev->dev,
2465			"Should not have come here! cq_cons=0x%x qp=0x%x sq cons sw=0x%x hw=0x%x\n",
2466			cq_cons, qp->id, swq_last, cqe_sq_cons);
2467		rc = -EINVAL;
2468	}
2469out:
2470	return rc;
2471}
2472
2473static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq,
2474				     struct cq_req *hwcqe,
2475				     struct bnxt_qplib_cqe **pcqe, int *budget,
2476				     u32 cq_cons, struct bnxt_qplib_qp **lib_qp)
2477{
2478	struct bnxt_qplib_swq *swq;
2479	struct bnxt_qplib_cqe *cqe;
2480	struct bnxt_qplib_qp *qp;
2481	struct bnxt_qplib_q *sq;
2482	u32 cqe_sq_cons;
2483	int rc = 0;
2484
2485	qp = (struct bnxt_qplib_qp *)((unsigned long)
2486				      le64_to_cpu(hwcqe->qp_handle));
2487	if (!qp) {
2488		dev_err(&cq->hwq.pdev->dev,
2489			"FP: Process Req qp is NULL\n");
2490		return -EINVAL;
2491	}
2492	sq = &qp->sq;
2493
2494	cqe_sq_cons = le16_to_cpu(hwcqe->sq_cons_idx) % sq->max_wqe;
2495	if (qp->sq.flushed) {
2496		dev_dbg(&cq->hwq.pdev->dev,
2497			"%s: QP in Flush QP = %p\n", __func__, qp);
2498		goto done;
2499	}
2500	/* Require to walk the sq's swq to fabricate CQEs for all previously
2501	 * signaled SWQEs due to CQE aggregation from the current sq cons
2502	 * to the cqe_sq_cons
2503	 */
2504	cqe = *pcqe;
2505	while (*budget) {
2506		if (sq->swq_last == cqe_sq_cons)
2507			/* Done */
2508			break;
2509
2510		swq = &sq->swq[sq->swq_last];
2511		memset(cqe, 0, sizeof(*cqe));
2512		cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
2513		cqe->qp_handle = (u64)(unsigned long)qp;
2514		cqe->src_qp = qp->id;
2515		cqe->wr_id = swq->wr_id;
2516		if (cqe->wr_id == BNXT_QPLIB_FENCE_WRID)
2517			goto skip;
2518		cqe->type = swq->type;
2519
2520		/* For the last CQE, check for status.  For errors, regardless
2521		 * of the request being signaled or not, it must complete with
2522		 * the hwcqe error status
2523		 */
2524		if (swq->next_idx == cqe_sq_cons &&
2525		    hwcqe->status != CQ_REQ_STATUS_OK) {
2526			cqe->status = hwcqe->status;
2527			dev_err(&cq->hwq.pdev->dev,
2528				"FP: CQ Processed Req wr_id[%d] = 0x%llx with status 0x%x\n",
2529				sq->swq_last, cqe->wr_id, cqe->status);
2530			cqe++;
2531			(*budget)--;
2532			bnxt_qplib_mark_qp_error(qp);
2533			/* Add qp to flush list of the CQ */
2534			bnxt_qplib_add_flush_qp(qp);
2535		} else {
2536			/* Before we complete, do WA 9060 */
2537			if (do_wa9060(qp, cq, cq_cons, sq->swq_last,
2538				      cqe_sq_cons)) {
2539				*lib_qp = qp;
2540				goto out;
2541			}
2542			if (swq->flags & SQ_SEND_FLAGS_SIGNAL_COMP) {
2543				cqe->status = CQ_REQ_STATUS_OK;
2544				cqe++;
2545				(*budget)--;
2546			}
2547		}
2548skip:
2549		bnxt_qplib_hwq_incr_cons(sq->hwq.max_elements, &sq->hwq.cons,
2550					 swq->slots, &sq->dbinfo.flags);
2551		sq->swq_last = swq->next_idx;
2552		if (sq->single)
2553			break;
2554	}
2555out:
2556	*pcqe = cqe;
2557	if (sq->swq_last != cqe_sq_cons) {
2558		/* Out of budget */
2559		rc = -EAGAIN;
2560		goto done;
2561	}
2562	/*
2563	 * Back to normal completion mode only after it has completed all of
2564	 * the WC for this CQE
2565	 */
2566	sq->single = false;
2567done:
2568	return rc;
2569}
2570
2571static void bnxt_qplib_release_srqe(struct bnxt_qplib_srq *srq, u32 tag)
2572{
2573	spin_lock(&srq->hwq.lock);
2574	srq->swq[srq->last_idx].next_idx = (int)tag;
2575	srq->last_idx = (int)tag;
2576	srq->swq[srq->last_idx].next_idx = -1;
2577	bnxt_qplib_hwq_incr_cons(srq->hwq.max_elements, &srq->hwq.cons,
2578				 srq->dbinfo.max_slot, &srq->dbinfo.flags);
2579	spin_unlock(&srq->hwq.lock);
2580}
2581
2582static int bnxt_qplib_cq_process_res_rc(struct bnxt_qplib_cq *cq,
2583					struct cq_res_rc *hwcqe,
2584					struct bnxt_qplib_cqe **pcqe,
2585					int *budget)
2586{
2587	struct bnxt_qplib_srq *srq;
2588	struct bnxt_qplib_cqe *cqe;
2589	struct bnxt_qplib_qp *qp;
2590	struct bnxt_qplib_q *rq;
2591	u32 wr_id_idx;
2592
2593	qp = (struct bnxt_qplib_qp *)((unsigned long)
2594				      le64_to_cpu(hwcqe->qp_handle));
2595	if (!qp) {
2596		dev_err(&cq->hwq.pdev->dev, "process_cq RC qp is NULL\n");
2597		return -EINVAL;
2598	}
2599	if (qp->rq.flushed) {
2600		dev_dbg(&cq->hwq.pdev->dev,
2601			"%s: QP in Flush QP = %p\n", __func__, qp);
2602		return 0;
2603	}
2604
2605	cqe = *pcqe;
2606	cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
2607	cqe->length = le32_to_cpu(hwcqe->length);
2608	cqe->invrkey = le32_to_cpu(hwcqe->imm_data_or_inv_r_key);
2609	cqe->mr_handle = le64_to_cpu(hwcqe->mr_handle);
2610	cqe->flags = le16_to_cpu(hwcqe->flags);
2611	cqe->status = hwcqe->status;
2612	cqe->qp_handle = (u64)(unsigned long)qp;
2613
2614	wr_id_idx = le32_to_cpu(hwcqe->srq_or_rq_wr_id) &
2615				CQ_RES_RC_SRQ_OR_RQ_WR_ID_MASK;
2616	if (cqe->flags & CQ_RES_RC_FLAGS_SRQ_SRQ) {
2617		srq = qp->srq;
2618		if (!srq)
2619			return -EINVAL;
2620		if (wr_id_idx >= srq->hwq.max_elements) {
2621			dev_err(&cq->hwq.pdev->dev,
2622				"FP: CQ Process RC wr_id idx 0x%x exceeded SRQ max 0x%x\n",
2623				wr_id_idx, srq->hwq.max_elements);
2624			return -EINVAL;
2625		}
2626		cqe->wr_id = srq->swq[wr_id_idx].wr_id;
2627		bnxt_qplib_release_srqe(srq, wr_id_idx);
2628		cqe++;
2629		(*budget)--;
2630		*pcqe = cqe;
2631	} else {
2632		struct bnxt_qplib_swq *swq;
2633
2634		rq = &qp->rq;
2635		if (wr_id_idx > (rq->max_wqe - 1)) {
2636			dev_err(&cq->hwq.pdev->dev,
2637				"FP: CQ Process RC wr_id idx 0x%x exceeded RQ max 0x%x\n",
2638				wr_id_idx, rq->max_wqe);
2639			return -EINVAL;
2640		}
2641		if (wr_id_idx != rq->swq_last)
2642			return -EINVAL;
2643		swq = &rq->swq[rq->swq_last];
2644		cqe->wr_id = swq->wr_id;
2645		cqe++;
2646		(*budget)--;
2647		bnxt_qplib_hwq_incr_cons(rq->hwq.max_elements, &rq->hwq.cons,
2648					 swq->slots, &rq->dbinfo.flags);
2649		rq->swq_last = swq->next_idx;
2650		*pcqe = cqe;
2651
2652		if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
2653			qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2654			/* Add qp to flush list of the CQ */
2655			bnxt_qplib_add_flush_qp(qp);
2656		}
2657	}
2658
2659	return 0;
2660}
2661
2662static int bnxt_qplib_cq_process_res_ud(struct bnxt_qplib_cq *cq,
2663					struct cq_res_ud *hwcqe,
2664					struct bnxt_qplib_cqe **pcqe,
2665					int *budget)
2666{
2667	struct bnxt_qplib_srq *srq;
2668	struct bnxt_qplib_cqe *cqe;
2669	struct bnxt_qplib_qp *qp;
2670	struct bnxt_qplib_q *rq;
2671	u32 wr_id_idx;
2672
2673	qp = (struct bnxt_qplib_qp *)((unsigned long)
2674				      le64_to_cpu(hwcqe->qp_handle));
2675	if (!qp) {
2676		dev_err(&cq->hwq.pdev->dev, "process_cq UD qp is NULL\n");
2677		return -EINVAL;
2678	}
2679	if (qp->rq.flushed) {
2680		dev_dbg(&cq->hwq.pdev->dev,
2681			"%s: QP in Flush QP = %p\n", __func__, qp);
2682		return 0;
2683	}
2684	cqe = *pcqe;
2685	cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
2686	cqe->length = le16_to_cpu(hwcqe->length) & CQ_RES_UD_LENGTH_MASK;
2687	cqe->cfa_meta = le16_to_cpu(hwcqe->cfa_metadata);
2688	cqe->invrkey = le32_to_cpu(hwcqe->imm_data);
2689	cqe->flags = le16_to_cpu(hwcqe->flags);
2690	cqe->status = hwcqe->status;
2691	cqe->qp_handle = (u64)(unsigned long)qp;
2692	/*FIXME: Endianness fix needed for smace */
2693	memcpy(cqe->smac, hwcqe->src_mac, ETH_ALEN);
2694	wr_id_idx = le32_to_cpu(hwcqe->src_qp_high_srq_or_rq_wr_id)
2695				& CQ_RES_UD_SRQ_OR_RQ_WR_ID_MASK;
2696	cqe->src_qp = le16_to_cpu(hwcqe->src_qp_low) |
2697				  ((le32_to_cpu(
2698				  hwcqe->src_qp_high_srq_or_rq_wr_id) &
2699				 CQ_RES_UD_SRC_QP_HIGH_MASK) >> 8);
2700
2701	if (cqe->flags & CQ_RES_RC_FLAGS_SRQ_SRQ) {
2702		srq = qp->srq;
2703		if (!srq)
2704			return -EINVAL;
2705
2706		if (wr_id_idx >= srq->hwq.max_elements) {
2707			dev_err(&cq->hwq.pdev->dev,
2708				"FP: CQ Process UD wr_id idx 0x%x exceeded SRQ max 0x%x\n",
2709				wr_id_idx, srq->hwq.max_elements);
2710			return -EINVAL;
2711		}
2712		cqe->wr_id = srq->swq[wr_id_idx].wr_id;
2713		bnxt_qplib_release_srqe(srq, wr_id_idx);
2714		cqe++;
2715		(*budget)--;
2716		*pcqe = cqe;
2717	} else {
2718		struct bnxt_qplib_swq *swq;
2719
2720		rq = &qp->rq;
2721		if (wr_id_idx > (rq->max_wqe - 1)) {
2722			dev_err(&cq->hwq.pdev->dev,
2723				"FP: CQ Process UD wr_id idx 0x%x exceeded RQ max 0x%x\n",
2724				wr_id_idx, rq->max_wqe);
2725			return -EINVAL;
2726		}
2727
2728		if (rq->swq_last != wr_id_idx)
2729			return -EINVAL;
2730		swq = &rq->swq[rq->swq_last];
2731		cqe->wr_id = swq->wr_id;
2732		cqe++;
2733		(*budget)--;
2734		bnxt_qplib_hwq_incr_cons(rq->hwq.max_elements, &rq->hwq.cons,
2735					 swq->slots, &rq->dbinfo.flags);
2736		rq->swq_last = swq->next_idx;
2737		*pcqe = cqe;
2738
2739		if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
2740			qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2741			/* Add qp to flush list of the CQ */
2742			bnxt_qplib_add_flush_qp(qp);
2743		}
2744	}
2745
2746	return 0;
2747}
2748
2749bool bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq *cq)
2750{
2751	struct cq_base *hw_cqe;
2752	bool rc = true;
2753
2754	hw_cqe = bnxt_qplib_get_qe(&cq->hwq, cq->hwq.cons, NULL);
2755	 /* Check for Valid bit. If the CQE is valid, return false */
2756	rc = !CQE_CMP_VALID(hw_cqe, cq->dbinfo.flags);
2757	return rc;
2758}
2759
2760static int bnxt_qplib_cq_process_res_raweth_qp1(struct bnxt_qplib_cq *cq,
2761						struct cq_res_raweth_qp1 *hwcqe,
2762						struct bnxt_qplib_cqe **pcqe,
2763						int *budget)
2764{
2765	struct bnxt_qplib_qp *qp;
2766	struct bnxt_qplib_q *rq;
2767	struct bnxt_qplib_srq *srq;
2768	struct bnxt_qplib_cqe *cqe;
2769	u32 wr_id_idx;
2770
2771	qp = (struct bnxt_qplib_qp *)((unsigned long)
2772				      le64_to_cpu(hwcqe->qp_handle));
2773	if (!qp) {
2774		dev_err(&cq->hwq.pdev->dev, "process_cq Raw/QP1 qp is NULL\n");
2775		return -EINVAL;
2776	}
2777	if (qp->rq.flushed) {
2778		dev_dbg(&cq->hwq.pdev->dev,
2779			"%s: QP in Flush QP = %p\n", __func__, qp);
2780		return 0;
2781	}
2782	cqe = *pcqe;
2783	cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
2784	cqe->flags = le16_to_cpu(hwcqe->flags);
2785	cqe->qp_handle = (u64)(unsigned long)qp;
2786
2787	wr_id_idx =
2788		le32_to_cpu(hwcqe->raweth_qp1_payload_offset_srq_or_rq_wr_id)
2789				& CQ_RES_RAWETH_QP1_SRQ_OR_RQ_WR_ID_MASK;
2790	cqe->src_qp = qp->id;
2791	if (qp->id == 1 && !cqe->length) {
2792		/* Add workaround for the length misdetection */
2793		cqe->length = 296;
2794	} else {
2795		cqe->length = le16_to_cpu(hwcqe->length);
2796	}
2797	cqe->pkey_index = qp->pkey_index;
2798	memcpy(cqe->smac, qp->smac, 6);
2799
2800	cqe->raweth_qp1_flags = le16_to_cpu(hwcqe->raweth_qp1_flags);
2801	cqe->raweth_qp1_flags2 = le32_to_cpu(hwcqe->raweth_qp1_flags2);
2802	cqe->raweth_qp1_metadata = le32_to_cpu(hwcqe->raweth_qp1_metadata);
2803
2804	if (cqe->flags & CQ_RES_RAWETH_QP1_FLAGS_SRQ_SRQ) {
2805		srq = qp->srq;
2806		if (!srq) {
2807			dev_err(&cq->hwq.pdev->dev,
2808				"FP: SRQ used but not defined??\n");
2809			return -EINVAL;
2810		}
2811		if (wr_id_idx >= srq->hwq.max_elements) {
2812			dev_err(&cq->hwq.pdev->dev,
2813				"FP: CQ Process Raw/QP1 wr_id idx 0x%x exceeded SRQ max 0x%x\n",
2814				wr_id_idx, srq->hwq.max_elements);
2815			return -EINVAL;
2816		}
2817		cqe->wr_id = srq->swq[wr_id_idx].wr_id;
2818		bnxt_qplib_release_srqe(srq, wr_id_idx);
2819		cqe++;
2820		(*budget)--;
2821		*pcqe = cqe;
2822	} else {
2823		struct bnxt_qplib_swq *swq;
2824
2825		rq = &qp->rq;
2826		if (wr_id_idx > (rq->max_wqe - 1)) {
2827			dev_err(&cq->hwq.pdev->dev,
2828				"FP: CQ Process Raw/QP1 RQ wr_id idx 0x%x exceeded RQ max 0x%x\n",
2829				wr_id_idx, rq->max_wqe);
2830			return -EINVAL;
2831		}
2832		if (rq->swq_last != wr_id_idx)
2833			return -EINVAL;
2834		swq = &rq->swq[rq->swq_last];
2835		cqe->wr_id = swq->wr_id;
2836		cqe++;
2837		(*budget)--;
2838		bnxt_qplib_hwq_incr_cons(rq->hwq.max_elements, &rq->hwq.cons,
2839					 swq->slots, &rq->dbinfo.flags);
2840		rq->swq_last = swq->next_idx;
2841		*pcqe = cqe;
2842
2843		if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
2844			qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2845			/* Add qp to flush list of the CQ */
2846			bnxt_qplib_add_flush_qp(qp);
2847		}
2848	}
2849
2850	return 0;
2851}
2852
2853static int bnxt_qplib_cq_process_terminal(struct bnxt_qplib_cq *cq,
2854					  struct cq_terminal *hwcqe,
2855					  struct bnxt_qplib_cqe **pcqe,
2856					  int *budget)
2857{
2858	struct bnxt_qplib_qp *qp;
2859	struct bnxt_qplib_q *sq, *rq;
2860	struct bnxt_qplib_cqe *cqe;
2861	u32 swq_last = 0, cqe_cons;
2862	int rc = 0;
2863
2864	/* Check the Status */
2865	if (hwcqe->status != CQ_TERMINAL_STATUS_OK)
2866		dev_warn(&cq->hwq.pdev->dev,
2867			 "FP: CQ Process Terminal Error status = 0x%x\n",
2868			 hwcqe->status);
2869
2870	qp = (struct bnxt_qplib_qp *)((unsigned long)
2871				      le64_to_cpu(hwcqe->qp_handle));
2872	if (!qp)
2873		return -EINVAL;
2874
2875	/* Must block new posting of SQ and RQ */
2876	qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2877
2878	sq = &qp->sq;
2879	rq = &qp->rq;
2880
2881	cqe_cons = le16_to_cpu(hwcqe->sq_cons_idx);
2882	if (cqe_cons == 0xFFFF)
2883		goto do_rq;
2884	cqe_cons %= sq->max_wqe;
2885
2886	if (qp->sq.flushed) {
2887		dev_dbg(&cq->hwq.pdev->dev,
2888			"%s: QP in Flush QP = %p\n", __func__, qp);
2889		goto sq_done;
2890	}
2891
2892	/* Terminal CQE can also include aggregated successful CQEs prior.
2893	 * So we must complete all CQEs from the current sq's cons to the
2894	 * cq_cons with status OK
2895	 */
2896	cqe = *pcqe;
2897	while (*budget) {
2898		swq_last = sq->swq_last;
2899		if (swq_last == cqe_cons)
2900			break;
2901		if (sq->swq[swq_last].flags & SQ_SEND_FLAGS_SIGNAL_COMP) {
2902			memset(cqe, 0, sizeof(*cqe));
2903			cqe->status = CQ_REQ_STATUS_OK;
2904			cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
2905			cqe->qp_handle = (u64)(unsigned long)qp;
2906			cqe->src_qp = qp->id;
2907			cqe->wr_id = sq->swq[swq_last].wr_id;
2908			cqe->type = sq->swq[swq_last].type;
2909			cqe++;
2910			(*budget)--;
2911		}
2912		bnxt_qplib_hwq_incr_cons(sq->hwq.max_elements, &sq->hwq.cons,
2913					 sq->swq[swq_last].slots, &sq->dbinfo.flags);
2914		sq->swq_last = sq->swq[swq_last].next_idx;
2915	}
2916	*pcqe = cqe;
2917	if (!(*budget) && swq_last != cqe_cons) {
2918		/* Out of budget */
2919		rc = -EAGAIN;
2920		goto sq_done;
2921	}
2922sq_done:
2923	if (rc)
2924		return rc;
2925do_rq:
2926	cqe_cons = le16_to_cpu(hwcqe->rq_cons_idx);
2927	if (cqe_cons == 0xFFFF) {
2928		goto done;
2929	} else if (cqe_cons > rq->max_wqe - 1) {
2930		dev_err(&cq->hwq.pdev->dev,
2931			"FP: CQ Processed terminal reported rq_cons_idx 0x%x exceeds max 0x%x\n",
2932			cqe_cons, rq->max_wqe);
2933		rc = -EINVAL;
2934		goto done;
2935	}
2936
2937	if (qp->rq.flushed) {
2938		dev_dbg(&cq->hwq.pdev->dev,
2939			"%s: QP in Flush QP = %p\n", __func__, qp);
2940		rc = 0;
2941		goto done;
2942	}
2943
2944	/* Terminal CQE requires all posted RQEs to complete with FLUSHED_ERR
2945	 * from the current rq->cons to the rq->prod regardless what the
2946	 * rq->cons the terminal CQE indicates
2947	 */
2948
2949	/* Add qp to flush list of the CQ */
2950	bnxt_qplib_add_flush_qp(qp);
2951done:
2952	return rc;
2953}
2954
2955static int bnxt_qplib_cq_process_cutoff(struct bnxt_qplib_cq *cq,
2956					struct cq_cutoff *hwcqe)
2957{
2958	/* Check the Status */
2959	if (hwcqe->status != CQ_CUTOFF_STATUS_OK) {
2960		dev_err(&cq->hwq.pdev->dev,
2961			"FP: CQ Process Cutoff Error status = 0x%x\n",
2962			hwcqe->status);
2963		return -EINVAL;
2964	}
2965	clear_bit(CQ_FLAGS_RESIZE_IN_PROG, &cq->flags);
2966	wake_up_interruptible(&cq->waitq);
2967
2968	return 0;
2969}
2970
2971int bnxt_qplib_process_flush_list(struct bnxt_qplib_cq *cq,
2972				  struct bnxt_qplib_cqe *cqe,
2973				  int num_cqes)
2974{
2975	struct bnxt_qplib_qp *qp = NULL;
2976	u32 budget = num_cqes;
2977	unsigned long flags;
2978
2979	spin_lock_irqsave(&cq->flush_lock, flags);
2980	list_for_each_entry(qp, &cq->sqf_head, sq_flush) {
2981		dev_dbg(&cq->hwq.pdev->dev, "FP: Flushing SQ QP= %p\n", qp);
2982		__flush_sq(&qp->sq, qp, &cqe, &budget);
2983	}
2984
2985	list_for_each_entry(qp, &cq->rqf_head, rq_flush) {
2986		dev_dbg(&cq->hwq.pdev->dev, "FP: Flushing RQ QP= %p\n", qp);
2987		__flush_rq(&qp->rq, qp, &cqe, &budget);
2988	}
2989	spin_unlock_irqrestore(&cq->flush_lock, flags);
2990
2991	return num_cqes - budget;
2992}
2993
2994int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe,
2995		       int num_cqes, struct bnxt_qplib_qp **lib_qp)
2996{
2997	struct cq_base *hw_cqe;
2998	int budget, rc = 0;
2999	u32 hw_polled = 0;
3000	u8 type;
3001
3002	budget = num_cqes;
3003
3004	while (budget) {
3005		hw_cqe = bnxt_qplib_get_qe(&cq->hwq, cq->hwq.cons, NULL);
3006
3007		/* Check for Valid bit */
3008		if (!CQE_CMP_VALID(hw_cqe, cq->dbinfo.flags))
3009			break;
3010
3011		/*
3012		 * The valid test of the entry must be done first before
3013		 * reading any further.
3014		 */
3015		dma_rmb();
3016		/* From the device's respective CQE format to qplib_wc*/
3017		type = hw_cqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
3018		switch (type) {
3019		case CQ_BASE_CQE_TYPE_REQ:
3020			rc = bnxt_qplib_cq_process_req(cq,
3021						       (struct cq_req *)hw_cqe,
3022						       &cqe, &budget,
3023						       cq->hwq.cons, lib_qp);
3024			break;
3025		case CQ_BASE_CQE_TYPE_RES_RC:
3026			rc = bnxt_qplib_cq_process_res_rc(cq,
3027							  (struct cq_res_rc *)
3028							  hw_cqe, &cqe,
3029							  &budget);
3030			break;
3031		case CQ_BASE_CQE_TYPE_RES_UD:
3032			rc = bnxt_qplib_cq_process_res_ud
3033					(cq, (struct cq_res_ud *)hw_cqe, &cqe,
3034					 &budget);
3035			break;
3036		case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1:
3037			rc = bnxt_qplib_cq_process_res_raweth_qp1
3038					(cq, (struct cq_res_raweth_qp1 *)
3039					 hw_cqe, &cqe, &budget);
3040			break;
3041		case CQ_BASE_CQE_TYPE_TERMINAL:
3042			rc = bnxt_qplib_cq_process_terminal
3043					(cq, (struct cq_terminal *)hw_cqe,
3044					 &cqe, &budget);
3045			break;
3046		case CQ_BASE_CQE_TYPE_CUT_OFF:
3047			bnxt_qplib_cq_process_cutoff
3048					(cq, (struct cq_cutoff *)hw_cqe);
3049			/* Done processing this CQ */
3050			goto exit;
3051		default:
3052			dev_err(&cq->hwq.pdev->dev,
3053				"process_cq unknown type 0x%lx\n",
3054				hw_cqe->cqe_type_toggle &
3055				CQ_BASE_CQE_TYPE_MASK);
3056			rc = -EINVAL;
3057			break;
3058		}
3059		if (rc < 0) {
3060			if (rc == -EAGAIN)
3061				break;
3062			/* Error while processing the CQE, just skip to the
3063			 * next one
3064			 */
3065			if (type != CQ_BASE_CQE_TYPE_TERMINAL)
3066				dev_err(&cq->hwq.pdev->dev,
3067					"process_cqe error rc = 0x%x\n", rc);
3068		}
3069		hw_polled++;
3070		bnxt_qplib_hwq_incr_cons(cq->hwq.max_elements, &cq->hwq.cons,
3071					 1, &cq->dbinfo.flags);
3072
3073	}
3074	if (hw_polled)
3075		bnxt_qplib_ring_db(&cq->dbinfo, DBC_DBC_TYPE_CQ);
3076exit:
3077	return num_cqes - budget;
3078}
3079
3080void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type)
3081{
3082	cq->dbinfo.toggle = cq->toggle;
3083	if (arm_type)
3084		bnxt_qplib_ring_db(&cq->dbinfo, arm_type);
3085	/* Using cq->arm_state variable to track whether to issue cq handler */
3086	atomic_set(&cq->arm_state, 1);
3087}
3088
3089void bnxt_qplib_flush_cqn_wq(struct bnxt_qplib_qp *qp)
3090{
3091	flush_workqueue(qp->scq->nq->cqn_wq);
3092	if (qp->scq != qp->rcq)
3093		flush_workqueue(qp->rcq->nq->cqn_wq);
3094}
3095