Lines Matching refs:qp

245 static int dump_qp(unsigned long id, struct c4iw_qp *qp,
250 if (id != qp->wq.sq.qid)
257 if (qp->ep) {
258 struct c4iw_ep *ep = qp->ep;
268 "rc qp sq id %u %s id %u state %u "
271 qp->wq.sq.qid, qp->srq ? "srq" : "rq",
272 qp->srq ? qp->srq->idx : qp->wq.rq.qid,
273 (int)qp->attr.state,
274 qp->wq.sq.flags & T4_SQ_ONCHIP,
289 "rc qp sq id %u rq id %u state %u "
292 qp->wq.sq.qid, qp->wq.rq.qid,
293 (int)qp->attr.state,
294 qp->wq.sq.flags & T4_SQ_ONCHIP,
305 "qp sq id %u rq id %u state %u onchip %u\n",
306 qp->wq.sq.qid, qp->wq.rq.qid,
307 (int)qp->attr.state,
308 qp->wq.sq.flags & T4_SQ_ONCHIP);
328 struct c4iw_qp *qp;
344 xa_for_each(&qpd->devp->qps, index, qp)
355 xa_for_each(&qpd->devp->qps, index, qp)
356 dump_qp(index, qp, qpd);
575 "ep %p cm_id %p qp %p state %d flags 0x%lx "
579 ep, ep->com.cm_id, ep->com.qp,
596 "ep %p cm_id %p qp %p state %d flags 0x%lx "
600 ep, ep->com.cm_id, ep->com.qp,
802 if (rdev->lldi.vr->qp.start != rdev->lldi.vr->cq.start ||
803 rdev->lldi.vr->qp.size != rdev->lldi.vr->cq.size) {
804 pr_err("%s: unsupported qp and cq id ranges qp start %u size %u cq start %u size %u\n",
805 pci_name(rdev->lldi.pdev), rdev->lldi.vr->qp.start,
806 rdev->lldi.vr->qp.size, rdev->lldi.vr->cq.size,
823 pr_debug("dev %s stag start 0x%0x size 0x%0x num stags %d pbl start 0x%0x size 0x%0x rq start 0x%0x size 0x%0x qp qid start %u size %u cq qid start %u size %u srq size %u\n",
829 rdev->lldi.vr->qp.start,
830 rdev->lldi.vr->qp.size,
848 rdev->stats.qid.total = rdev->lldi.vr->qp.size;
877 rdev->status_page->qp_start = rdev->lldi.vr->qp.start;
878 rdev->status_page->qp_size = rdev->lldi.vr->qp.size;
964 infop->vr->rq.size > 0 && infop->vr->qp.size > 0 &&
1012 * For T4 devices with onchip qp mem, we map only that part
1270 struct c4iw_qp *qp;
1277 xa_for_each(&ctx->dev->qps, index, qp)
1278 t4_disable_wq_db(&qp->wq);
1285 static void resume_rc_qp(struct c4iw_qp *qp)
1287 spin_lock(&qp->lock);
1288 t4_ring_sq_db(&qp->wq, qp->wq.sq.wq_pidx_inc, NULL);
1289 qp->wq.sq.wq_pidx_inc = 0;
1290 t4_ring_rq_db(&qp->wq, qp->wq.rq.wq_pidx_inc, NULL);
1291 qp->wq.rq.wq_pidx_inc = 0;
1292 spin_unlock(&qp->lock);
1298 struct c4iw_qp *qp;
1301 qp = list_first_entry(&ctx->dev->db_fc_list, struct c4iw_qp,
1303 list_del_init(&qp->db_fc_entry);
1304 resume_rc_qp(qp);
1318 struct c4iw_qp *qp;
1325 xa_for_each(&ctx->dev->qps, index, qp)
1326 t4_enable_wq_db(&qp->wq);
1374 struct c4iw_qp *qp = qp_list->qps[idx];
1376 xa_lock_irq(&qp->rhp->qps);
1377 spin_lock(&qp->lock);
1378 ret = cxgb4_sync_txq_pidx(qp->rhp->rdev.lldi.ports[0],
1379 qp->wq.sq.qid,
1380 t4_sq_host_wq_pidx(&qp->wq),
1381 t4_sq_wq_size(&qp->wq));
1384 pci_name(ctx->lldi.pdev), qp->wq.sq.qid);
1385 spin_unlock(&qp->lock);
1386 xa_unlock_irq(&qp->rhp->qps);
1389 qp->wq.sq.wq_pidx_inc = 0;
1391 ret = cxgb4_sync_txq_pidx(qp->rhp->rdev.lldi.ports[0],
1392 qp->wq.rq.qid,
1393 t4_rq_host_wq_pidx(&qp->wq),
1394 t4_rq_wq_size(&qp->wq));
1398 pci_name(ctx->lldi.pdev), qp->wq.rq.qid);
1399 spin_unlock(&qp->lock);
1400 xa_unlock_irq(&qp->rhp->qps);
1403 qp->wq.rq.wq_pidx_inc = 0;
1404 spin_unlock(&qp->lock);
1405 xa_unlock_irq(&qp->rhp->qps);
1408 while (cxgb4_dbfifo_count(qp->rhp->rdev.lldi.ports[0], 1) > 0) {
1417 struct c4iw_qp *qp;
1439 xa_for_each(&ctx->dev->qps, index, qp)
1449 /* add and ref each qp so it doesn't get freed */
1450 xa_for_each(&ctx->dev->qps, index, qp) {
1451 c4iw_qp_add_ref(&qp->ibqp);
1452 qp_list.qps[qp_list.idx++] = qp;