• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6/drivers/infiniband/hw/cxgb4/

Lines Matching refs:qhp

441 static int build_rdma_recv(struct c4iw_qp *qhp, union t4_recv_wr *wqe,
446 ret = build_isgl((__be64 *)qhp->wq.rq.queue,
447 (__be64 *)&qhp->wq.rq.queue[qhp->wq.rq.size],
535 struct c4iw_qp *qhp;
542 qhp = to_c4iw_qp(ibqp);
543 spin_lock_irqsave(&qhp->lock, flag);
544 if (t4_wq_in_error(&qhp->wq)) {
545 spin_unlock_irqrestore(&qhp->lock, flag);
548 num_wrs = t4_sq_avail(&qhp->wq);
550 spin_unlock_irqrestore(&qhp->lock, flag);
559 wqe = (union t4_wr *)((u8 *)qhp->wq.sq.queue +
560 qhp->wq.sq.wq_pidx * T4_EQ_ENTRY_SIZE);
567 swsqe = &qhp->wq.sq.sw_sq[qhp->wq.sq.pidx];
578 err = build_rdma_send(&qhp->wq.sq, wqe, wr, &len16);
583 err = build_rdma_write(&qhp->wq.sq, wqe, wr, &len16);
597 if (!qhp->wq.sq.oldest_read)
598 qhp->wq.sq.oldest_read = swsqe;
621 swsqe->idx = qhp->wq.sq.pidx;
626 init_wr_hdr(wqe, qhp->wq.sq.pidx, fw_opcode, fw_flags, len16);
629 __func__, (unsigned long long)wr->wr_id, qhp->wq.sq.pidx,
633 t4_sq_produce(&qhp->wq, len16);
636 if (t4_wq_db_enabled(&qhp->wq))
637 t4_ring_sq_db(&qhp->wq, idx);
638 spin_unlock_irqrestore(&qhp->lock, flag);
646 struct c4iw_qp *qhp;
653 qhp = to_c4iw_qp(ibqp);
654 spin_lock_irqsave(&qhp->lock, flag);
655 if (t4_wq_in_error(&qhp->wq)) {
656 spin_unlock_irqrestore(&qhp->lock, flag);
659 num_wrs = t4_rq_avail(&qhp->wq);
661 spin_unlock_irqrestore(&qhp->lock, flag);
670 wqe = (union t4_recv_wr *)((u8 *)qhp->wq.rq.queue +
671 qhp->wq.rq.wq_pidx *
674 err = build_rdma_recv(qhp, wqe, wr, &len16);
682 qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].wr_id = wr->wr_id;
686 wqe->recv.wrid = qhp->wq.rq.pidx;
692 (unsigned long long) wr->wr_id, qhp->wq.rq.pidx);
693 t4_rq_produce(&qhp->wq, len16);
698 if (t4_wq_db_enabled(&qhp->wq))
699 t4_ring_rq_db(&qhp->wq, idx);
700 spin_unlock_irqrestore(&qhp->lock, flag);
845 int c4iw_post_zb_read(struct c4iw_qp *qhp)
857 set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx);
872 return c4iw_ofld_send(&qhp->rhp->rdev, skb);
875 static void post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe,
882 PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid,
883 qhp->ep->hwtid);
888 set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx);
894 FW_WR_FLOWID(qhp->ep->hwtid) |
901 c4iw_ofld_send(&qhp->rhp->rdev, skb);
905 * Assumes qhp lock is held.
907 static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp,
913 PDBG("%s qhp %p rchp %p schp %p\n", __func__, qhp, rchp, schp);
914 /* take a ref on the qhp since we must release the lock */
915 atomic_inc(&qhp->refcnt);
916 spin_unlock_irqrestore(&qhp->lock, *flag);
920 spin_lock(&qhp->lock);
922 c4iw_count_rcqes(&rchp->cq, &qhp->wq, &count);
923 flushed = c4iw_flush_rq(&qhp->wq, &rchp->cq, count);
924 spin_unlock(&qhp->lock);
931 spin_lock(&qhp->lock);
933 c4iw_count_scqes(&schp->cq, &qhp->wq, &count);
934 flushed = c4iw_flush_sq(&qhp->wq, &schp->cq, count);
935 spin_unlock(&qhp->lock);
941 if (atomic_dec_and_test(&qhp->refcnt))
942 wake_up(&qhp->wait);
944 spin_lock_irqsave(&qhp->lock, *flag);
947 static void flush_qp(struct c4iw_qp *qhp, unsigned long *flag)
951 rchp = get_chp(qhp->rhp, qhp->attr.rcq);
952 schp = get_chp(qhp->rhp, qhp->attr.scq);
954 if (qhp->ibqp.uobject) {
955 t4_set_wq_in_error(&qhp->wq);
961 __flush_qp(qhp, rchp, schp, flag);
964 static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
972 PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid,
1007 pci_name(rhp->rdev.lldi.pdev), qhp->wq.sq.qid,
1039 static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp)
1046 PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid,
1047 qhp->ep->hwtid);
1052 set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx);
1060 FW_WR_FLOWID(qhp->ep->hwtid) |
1067 V_FW_RI_WR_MPAREQBIT(qhp->attr.mpa_attr.initiator) |
1068 V_FW_RI_WR_P2PTYPE(qhp->attr.mpa_attr.p2p_type);
1070 if (qhp->attr.mpa_attr.recv_marker_enabled)
1072 if (qhp->attr.mpa_attr.xmit_marker_enabled)
1074 if (qhp->attr.mpa_attr.crc_enabled)
1080 if (!qhp->ibqp.uobject)
1083 wqe->u.init.nrqe = cpu_to_be16(t4_rqes_posted(&qhp->wq));
1084 wqe->u.init.pdid = cpu_to_be32(qhp->attr.pd);
1085 wqe->u.init.qpid = cpu_to_be32(qhp->wq.sq.qid);
1086 wqe->u.init.sq_eqid = cpu_to_be32(qhp->wq.sq.qid);
1087 wqe->u.init.rq_eqid = cpu_to_be32(qhp->wq.rq.qid);
1088 wqe->u.init.scqid = cpu_to_be32(qhp->attr.scq);
1089 wqe->u.init.rcqid = cpu_to_be32(qhp->attr.rcq);
1090 wqe->u.init.ord_max = cpu_to_be32(qhp->attr.max_ord);
1091 wqe->u.init.ird_max = cpu_to_be32(qhp->attr.max_ird);
1092 wqe->u.init.iss = cpu_to_be32(qhp->ep->snd_seq);
1093 wqe->u.init.irs = cpu_to_be32(qhp->ep->rcv_seq);
1094 wqe->u.init.hwrqsize = cpu_to_be32(qhp->wq.rq.rqt_size);
1095 wqe->u.init.hwrqaddr = cpu_to_be32(qhp->wq.rq.rqt_hwaddr -
1097 if (qhp->attr.mpa_attr.initiator)
1098 build_rtr_msg(qhp->attr.mpa_attr.p2p_type, &wqe->u.init);
1118 int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
1124 struct c4iw_qp_attributes newattr = qhp->attr;
1132 PDBG("%s qhp %p sqid 0x%x rqid 0x%x ep %p state %d -> %d\n", __func__,
1133 qhp, qhp->wq.sq.qid, qhp->wq.rq.qid, qhp->ep, qhp->attr.state,
1136 spin_lock_irqsave(&qhp->lock, flag);
1140 if (qhp->attr.state != C4IW_QP_STATE_IDLE) {
1164 qhp->attr = newattr;
1169 if (qhp->attr.state == attrs->next_state)
1172 switch (qhp->attr.state) {
1184 qhp->attr.mpa_attr = attrs->mpa_attr;
1185 qhp->attr.llp_stream_handle = attrs->llp_stream_handle;
1186 qhp->ep = qhp->attr.llp_stream_handle;
1187 qhp->attr.state = C4IW_QP_STATE_RTS;
1195 c4iw_get_ep(&qhp->ep->com);
1196 spin_unlock_irqrestore(&qhp->lock, flag);
1197 ret = rdma_init(rhp, qhp);
1198 spin_lock_irqsave(&qhp->lock, flag);
1203 qhp->attr.state = C4IW_QP_STATE_ERROR;
1204 flush_qp(qhp, &flag);
1214 BUG_ON(atomic_read(&qhp->ep->com.kref.refcount) < 2);
1215 qhp->attr.state = C4IW_QP_STATE_CLOSING;
1216 ep = qhp->ep;
1222 spin_unlock_irqrestore(&qhp->lock, flag);
1223 ret = rdma_fini(rhp, qhp, ep);
1224 spin_lock_irqsave(&qhp->lock, flag);
1232 qhp->attr.state = C4IW_QP_STATE_TERMINATE;
1233 if (qhp->ibqp.uobject)
1234 t4_set_wq_in_error(&qhp->wq);
1235 ep = qhp->ep;
1241 qhp->attr.state = C4IW_QP_STATE_ERROR;
1245 ep = qhp->ep;
1262 flush_qp(qhp, &flag);
1263 qhp->attr.state = C4IW_QP_STATE_IDLE;
1264 qhp->attr.llp_stream_handle = NULL;
1265 c4iw_put_ep(&qhp->ep->com);
1266 qhp->ep = NULL;
1267 wake_up(&qhp->wait);
1281 if (!t4_sq_empty(&qhp->wq) || !t4_rq_empty(&qhp->wq)) {
1285 qhp->attr.state = C4IW_QP_STATE_IDLE;
1296 __func__, qhp->attr.state);
1303 PDBG("%s disassociating ep %p qpid 0x%x\n", __func__, qhp->ep,
1304 qhp->wq.sq.qid);
1307 qhp->attr.llp_stream_handle = NULL;
1308 ep = qhp->ep;
1309 qhp->ep = NULL;
1310 qhp->attr.state = C4IW_QP_STATE_ERROR;
1312 wake_up(&qhp->wait);
1314 flush_qp(qhp, &flag);
1316 spin_unlock_irqrestore(&qhp->lock, flag);
1319 post_terminate(qhp, NULL, internal ? GFP_ATOMIC : GFP_KERNEL);
1339 PDBG("%s exit state %d\n", __func__, qhp->attr.state);
1346 struct c4iw_qp *qhp;
1350 qhp = to_c4iw_qp(ib_qp);
1351 rhp = qhp->rhp;
1354 c4iw_modify_qp(rhp, qhp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 0);
1355 wait_event(qhp->wait, !qhp->ep);
1357 remove_handle(rhp, &rhp->qpidr, qhp->wq.sq.qid);
1358 atomic_dec(&qhp->refcnt);
1359 wait_event(qhp->wait, !atomic_read(&qhp->refcnt));
1363 destroy_qp(&rhp->rdev, &qhp->wq,
1366 PDBG("%s ib_qp %p qpid 0x%0x\n", __func__, ib_qp, qhp->wq.sq.qid);
1367 kfree(qhp);
1375 struct c4iw_qp *qhp;
1411 qhp = kzalloc(sizeof(*qhp), GFP_KERNEL);
1412 if (!qhp)
1414 qhp->wq.sq.size = sqsize;
1415 qhp->wq.sq.memsize = (sqsize + 1) * sizeof *qhp->wq.sq.queue;
1416 qhp->wq.rq.size = rqsize;
1417 qhp->wq.rq.memsize = (rqsize + 1) * sizeof *qhp->wq.rq.queue;
1420 qhp->wq.sq.memsize = roundup(qhp->wq.sq.memsize, PAGE_SIZE);
1421 qhp->wq.rq.memsize = roundup(qhp->wq.rq.memsize, PAGE_SIZE);
1425 __func__, sqsize, qhp->wq.sq.memsize, rqsize, qhp->wq.rq.memsize);
1427 ret = create_qp(&rhp->rdev, &qhp->wq, &schp->cq, &rchp->cq,
1436 qhp->rhp = rhp;
1437 qhp->attr.pd = php->pdid;
1438 qhp->attr.scq = ((struct c4iw_cq *) attrs->send_cq)->cq.cqid;
1439 qhp->attr.rcq = ((struct c4iw_cq *) attrs->recv_cq)->cq.cqid;
1440 qhp->attr.sq_num_entries = attrs->cap.max_send_wr;
1441 qhp->attr.rq_num_entries = attrs->cap.max_recv_wr;
1442 qhp->attr.sq_max_sges = attrs->cap.max_send_sge;
1443 qhp->attr.sq_max_sges_rdma_write = attrs->cap.max_send_sge;
1444 qhp->attr.rq_max_sges = attrs->cap.max_recv_sge;
1445 qhp->attr.state = C4IW_QP_STATE_IDLE;
1446 qhp->attr.next_state = C4IW_QP_STATE_IDLE;
1447 qhp->attr.enable_rdma_read = 1;
1448 qhp->attr.enable_rdma_write = 1;
1449 qhp->attr.enable_bind = 1;
1450 qhp->attr.max_ord = 1;
1451 qhp->attr.max_ird = 1;
1452 spin_lock_init(&qhp->lock);
1453 init_waitqueue_head(&qhp->wait);
1454 atomic_set(&qhp->refcnt, 1);
1456 ret = insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.sq.qid);
1483 uresp.sqid = qhp->wq.sq.qid;
1484 uresp.sq_size = qhp->wq.sq.size;
1485 uresp.sq_memsize = qhp->wq.sq.memsize;
1486 uresp.rqid = qhp->wq.rq.qid;
1487 uresp.rq_size = qhp->wq.rq.size;
1488 uresp.rq_memsize = qhp->wq.rq.memsize;
1503 mm1->addr = virt_to_phys(qhp->wq.sq.queue);
1504 mm1->len = PAGE_ALIGN(qhp->wq.sq.memsize);
1507 mm2->addr = virt_to_phys(qhp->wq.rq.queue);
1508 mm2->len = PAGE_ALIGN(qhp->wq.rq.memsize);
1511 mm3->addr = qhp->wq.sq.udb;
1515 mm4->addr = qhp->wq.rq.udb;
1519 qhp->ibqp.qp_num = qhp->wq.sq.qid;
1520 init_timer(&(qhp->timer));
1521 PDBG("%s qhp %p sq_num_entries %d, rq_num_entries %d qpid 0x%0x\n",
1522 __func__, qhp, qhp->attr.sq_num_entries, qhp->attr.rq_num_entries,
1523 qhp->wq.sq.qid);
1524 return &qhp->ibqp;
1534 remove_handle(rhp, &rhp->qpidr, qhp->wq.sq.qid);
1536 destroy_qp(&rhp->rdev, &qhp->wq,
1539 kfree(qhp);
1547 struct c4iw_qp *qhp;
1562 qhp = to_c4iw_qp(ibqp);
1563 rhp = qhp->rhp;
1579 return c4iw_modify_qp(rhp, qhp, mask, &attrs, 0);