• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /freebsd-13-stable/sys/dev/cxgbe/iw_cxgbe/

Lines Matching refs:qhp

97 static void set_state(struct c4iw_qp *qhp, enum c4iw_qp_state state)
100 spin_lock_irqsave(&qhp->lock, flag);
101 qhp->attr.state = state;
102 spin_unlock_irqrestore(&qhp->lock, flag);
547 static int build_rdma_recv(struct c4iw_qp *qhp, union t4_recv_wr *wqe,
552 ret = build_isgl((__be64 *)qhp->wq.rq.queue,
553 (__be64 *)&qhp->wq.rq.queue[qhp->wq.rq.size],
574 struct c4iw_qp *qhp;
577 qhp = container_of(work, struct c4iw_qp, free_work);
578 ucontext = qhp->ucontext;
579 rhp = qhp->rhp;
581 CTR3(KTR_IW_CXGBE, "%s qhp %p ucontext %p", __func__,
582 qhp, ucontext);
583 destroy_qp(&rhp->rdev, &qhp->wq,
588 kfree(qhp);
593 struct c4iw_qp *qhp;
595 qhp = container_of(kref, struct c4iw_qp, kref);
596 CTR2(KTR_IW_CXGBE, "%s qhp %p", __func__, qhp);
597 queue_work(qhp->rhp->rdev.free_workq, &qhp->free_work);
612 static void complete_sq_drain_wr(struct c4iw_qp *qhp, struct ib_send_wr *wr)
619 schp = to_c4iw_cq(qhp->ibqp.send_cq);
622 PDBG("%s drain sq id %u\n", __func__, qhp->wq.sq.qid);
628 V_CQE_QPID(qhp->wq.sq.qid));
642 static void complete_rq_drain_wr(struct c4iw_qp *qhp, struct ib_recv_wr *wr)
649 rchp = to_c4iw_cq(qhp->ibqp.recv_cq);
652 PDBG("%s drain rq id %u\n", __func__, qhp->wq.sq.qid);
658 V_CQE_QPID(qhp->wq.sq.qid));
782 struct c4iw_qp *qhp;
790 qhp = to_c4iw_qp(ibqp);
791 rdev = &qhp->rhp->rdev;
792 spin_lock_irqsave(&qhp->lock, flag);
793 if (t4_wq_in_error(&qhp->wq)) {
794 spin_unlock_irqrestore(&qhp->lock, flag);
795 complete_sq_drain_wr(qhp, wr);
798 num_wrs = t4_sq_avail(&qhp->wq);
800 spin_unlock_irqrestore(&qhp->lock, flag);
810 wqe = (union t4_wr *)((u8 *)qhp->wq.sq.queue +
811 qhp->wq.sq.wq_pidx * T4_EQ_ENTRY_SIZE);
816 if (wr->send_flags & IB_SEND_SIGNALED || qhp->sq_sig_all)
818 swsqe = &qhp->wq.sq.sw_sq[qhp->wq.sq.pidx];
829 err = build_rdma_send(&qhp->wq.sq, wqe, wr, &len16);
834 err = build_rdma_write(&qhp->wq.sq, wqe, wr, &len16);
841 c4iw_invalidate_mr(qhp->rhp,
851 if (!qhp->wq.sq.oldest_read)
852 qhp->wq.sq.oldest_read = swsqe;
865 err = build_memreg(&qhp->wq.sq, wqe, reg_wr(wr),
880 c4iw_invalidate_mr(qhp->rhp, wr->ex.invalidate_rkey);
891 swsqe->idx = qhp->wq.sq.pidx;
894 qhp->sq_sig_all;
898 init_wr_hdr(wqe, qhp->wq.sq.pidx, fw_opcode, fw_flags, len16);
902 __func__, (unsigned long long)wr->wr_id, qhp->wq.sq.pidx,
906 t4_sq_produce(&qhp->wq, len16);
910 t4_ring_sq_db(&qhp->wq, idx, wqe, rdev->adap->iwt.wc_en);
911 spin_unlock_irqrestore(&qhp->lock, flag);
919 struct c4iw_qp *qhp;
926 qhp = to_c4iw_qp(ibqp);
927 spin_lock_irqsave(&qhp->lock, flag);
928 if (t4_wq_in_error(&qhp->wq)) {
929 spin_unlock_irqrestore(&qhp->lock, flag);
930 complete_rq_drain_wr(qhp, wr);
933 num_wrs = t4_rq_avail(&qhp->wq);
935 spin_unlock_irqrestore(&qhp->lock, flag);
945 wqe = (union t4_recv_wr *)((u8 *)qhp->wq.rq.queue +
946 qhp->wq.rq.wq_pidx *
949 err = build_rdma_recv(qhp, wqe, wr, &len16);
957 qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].wr_id = wr->wr_id;
961 wqe->recv.wrid = qhp->wq.rq.pidx;
967 (unsigned long long) wr->wr_id, qhp->wq.rq.pidx);
968 t4_rq_produce(&qhp->wq, len16);
974 t4_ring_rq_db(&qhp->wq, idx, wqe, qhp->rhp->rdev.adap->iwt.wc_en);
975 spin_unlock_irqrestore(&qhp->lock, flag);
1115 static void post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe,
1122 struct socket *so = qhp->ep->com.so;
1127 CTR4(KTR_IW_CXGBE, "%s qhp %p qid 0x%x tid %u", __func__, qhp,
1128 qhp->wq.sq.qid, qhp->ep->hwtid);
1138 V_FW_WR_FLOWID(qhp->ep->hwtid) |
1144 if (qhp->attr.layer_etype == (LAYER_MPA|DDP_LLP)) {
1145 term->layer_etype = qhp->attr.layer_etype;
1146 term->ecode = qhp->attr.ecode;
1154 t4_wrq_tx(qhp->rhp->rdev.adap, wr);
1157 /* Assumes qhp lock is held. */
1158 static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp,
1165 CTR4(KTR_IW_CXGBE, "%s qhp %p rchp %p schp %p", __func__, qhp, rchp,
1170 spin_lock(&qhp->lock);
1172 if (qhp->wq.flushed) {
1173 spin_unlock(&qhp->lock);
1177 qhp->wq.flushed = 1;
1180 c4iw_count_rcqes(&rchp->cq, &qhp->wq, &count);
1181 rq_flushed = c4iw_flush_rq(&qhp->wq, &rchp->cq, count);
1182 spin_unlock(&qhp->lock);
1187 spin_lock(&qhp->lock);
1190 sq_flushed = c4iw_flush_sq(qhp);
1191 spin_unlock(&qhp->lock);
1218 static void flush_qp(struct c4iw_qp *qhp)
1223 rchp = to_c4iw_cq(qhp->ibqp.recv_cq);
1224 schp = to_c4iw_cq(qhp->ibqp.send_cq);
1226 t4_set_wq_in_error(&qhp->wq);
1227 if (qhp->ibqp.uobject) {
1241 __flush_qp(qhp, rchp, schp);
1245 rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp, struct c4iw_ep *ep)
1257 KASSERT(rhp == qhp->rhp && ep == qhp->ep, ("%s: EDOOFUS", __func__));
1259 CTR5(KTR_IW_CXGBE, "%s qhp %p qid 0x%x ep %p tid %u", __func__, qhp,
1260 qhp->wq.sq.qid, ep, ep->hwtid);
1285 qhp->wq.sq.qid, ep->com.so, __func__);
1340 static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp)
1345 struct c4iw_ep *ep = qhp->ep;
1346 struct c4iw_rdev *rdev = &qhp->rhp->rdev;
1353 CTR5(KTR_IW_CXGBE, "%s qhp %p qid 0x%x ep %p tid %u", __func__, qhp,
1354 qhp->wq.sq.qid, ep, ep->hwtid);
1360 ret = alloc_ird(rhp, qhp->attr.max_ird);
1362 qhp->attr.max_ird = 0;
1379 V_FW_RI_WR_MPAREQBIT(qhp->attr.mpa_attr.initiator) |
1380 V_FW_RI_WR_P2PTYPE(qhp->attr.mpa_attr.p2p_type);
1382 if (qhp->attr.mpa_attr.recv_marker_enabled)
1384 if (qhp->attr.mpa_attr.xmit_marker_enabled)
1386 if (qhp->attr.mpa_attr.crc_enabled)
1392 if (!qhp->ibqp.uobject)
1395 wqe->u.init.nrqe = cpu_to_be16(t4_rqes_posted(&qhp->wq));
1396 wqe->u.init.pdid = cpu_to_be32(qhp->attr.pd);
1397 wqe->u.init.qpid = cpu_to_be32(qhp->wq.sq.qid);
1398 wqe->u.init.sq_eqid = cpu_to_be32(qhp->wq.sq.qid);
1399 wqe->u.init.rq_eqid = cpu_to_be32(qhp->wq.rq.qid);
1400 wqe->u.init.scqid = cpu_to_be32(qhp->attr.scq);
1401 wqe->u.init.rcqid = cpu_to_be32(qhp->attr.rcq);
1402 wqe->u.init.ord_max = cpu_to_be32(qhp->attr.max_ord);
1403 wqe->u.init.ird_max = cpu_to_be32(qhp->attr.max_ird);
1406 wqe->u.init.hwrqsize = cpu_to_be32(qhp->wq.rq.rqt_size);
1407 wqe->u.init.hwrqaddr = cpu_to_be32(qhp->wq.rq.rqt_hwaddr -
1409 if (qhp->attr.mpa_attr.initiator)
1410 build_rtr_msg(qhp->attr.mpa_attr.p2p_type, &wqe->u.init);
1417 free_ird(rhp, qhp->attr.max_ird);
1423 qhp->wq.sq.qid, ep->com.so, __func__);
1426 free_ird(rhp, qhp->attr.max_ird);
1431 int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
1437 struct c4iw_qp_attributes newattr = qhp->attr;
1444 CTR5(KTR_IW_CXGBE, "%s qhp %p sqid 0x%x rqid 0x%x ep %p", __func__, qhp,
1445 qhp->wq.sq.qid, qhp->wq.rq.qid, qhp->ep);
1446 CTR3(KTR_IW_CXGBE, "%s state %d -> %d", __func__, qhp->attr.state,
1449 mutex_lock(&qhp->mutex);
1453 if (qhp->attr.state != C4IW_QP_STATE_IDLE) {
1477 qhp->attr = newattr;
1482 if (qhp->attr.state == attrs->next_state)
1495 if ((qhp->attr.state >= C4IW_QP_STATE_ERROR) &&
1501 switch (qhp->attr.state) {
1513 qhp->attr.mpa_attr = attrs->mpa_attr;
1514 qhp->attr.llp_stream_handle = attrs->llp_stream_handle;
1515 qhp->ep = qhp->attr.llp_stream_handle;
1516 set_state(qhp, C4IW_QP_STATE_RTS);
1524 c4iw_get_ep(&qhp->ep->com);
1525 ret = rdma_init(rhp, qhp);
1530 set_state(qhp, C4IW_QP_STATE_ERROR);
1531 flush_qp(qhp);
1541 BUG_ON(atomic_read(&qhp->ep->com.kref.refcount) < 2);
1542 t4_set_wq_in_error(&qhp->wq);
1543 set_state(qhp, C4IW_QP_STATE_CLOSING);
1544 ep = qhp->ep;
1548 c4iw_get_ep(&qhp->ep->com);
1550 ret = rdma_fini(rhp, qhp, ep);
1555 t4_set_wq_in_error(&qhp->wq);
1556 set_state(qhp, C4IW_QP_STATE_TERMINATE);
1557 qhp->attr.layer_etype = attrs->layer_etype;
1558 qhp->attr.ecode = attrs->ecode;
1559 ep = qhp->ep;
1561 c4iw_get_ep(&qhp->ep->com);
1565 terminate = qhp->attr.send_term;
1566 ret = rdma_fini(rhp, qhp, ep);
1572 t4_set_wq_in_error(&qhp->wq);
1573 set_state(qhp, C4IW_QP_STATE_ERROR);
1577 ep = qhp->ep;
1578 c4iw_get_ep(&qhp->ep->com);
1592 if (!internal && (qhp->ibqp.uobject || attrs->next_state !=
1599 flush_qp(qhp);
1600 set_state(qhp, C4IW_QP_STATE_IDLE);
1601 qhp->attr.llp_stream_handle = NULL;
1602 c4iw_put_ep(&qhp->ep->com);
1603 qhp->ep = NULL;
1604 wake_up(&qhp->wait);
1618 if (!t4_sq_empty(&qhp->wq) || !t4_rq_empty(&qhp->wq)) {
1622 set_state(qhp, C4IW_QP_STATE_IDLE);
1633 __func__, qhp->attr.state);
1641 qhp->ep, qhp->wq.sq.qid);
1644 qhp->attr.llp_stream_handle = NULL;
1646 ep = qhp->ep;
1647 qhp->ep = NULL;
1648 set_state(qhp, C4IW_QP_STATE_ERROR);
1652 flush_qp(qhp);
1653 wake_up(&qhp->wait);
1655 mutex_unlock(&qhp->mutex);
1658 post_terminate(qhp, NULL, internal ? GFP_ATOMIC : GFP_KERNEL);
1677 CTR2(KTR_IW_CXGBE, "%s exit state %d", __func__, qhp->attr.state);
1684 struct c4iw_qp *qhp;
1688 qhp = to_c4iw_qp(ib_qp);
1689 rhp = qhp->rhp;
1692 if (qhp->attr.state == C4IW_QP_STATE_TERMINATE)
1693 c4iw_modify_qp(rhp, qhp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
1695 c4iw_modify_qp(rhp, qhp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 0);
1696 wait_event(qhp->wait, !qhp->ep);
1698 remove_handle(rhp, &rhp->qpidr, qhp->wq.sq.qid);
1700 free_ird(rhp, qhp->attr.max_ird);
1704 qhp->wq.sq.qid);
1713 struct c4iw_qp *qhp;
1753 qhp = kzalloc(sizeof(*qhp), GFP_KERNEL);
1754 if (!qhp)
1756 qhp->wq.sq.size = sqsize;
1757 qhp->wq.sq.memsize =
1759 sizeof(*qhp->wq.sq.queue) + 16 * sizeof(__be64);
1760 qhp->wq.sq.flush_cidx = -1;
1761 qhp->wq.rq.size = rqsize;
1762 qhp->wq.rq.memsize =
1764 sizeof(*qhp->wq.rq.queue);
1767 qhp->wq.sq.memsize = roundup(qhp->wq.sq.memsize, PAGE_SIZE);
1768 qhp->wq.rq.memsize = roundup(qhp->wq.rq.memsize, PAGE_SIZE);
1772 __func__, sqsize, qhp->wq.sq.memsize, rqsize, qhp->wq.rq.memsize);
1774 ret = create_qp(&rhp->rdev, &qhp->wq, &schp->cq, &rchp->cq,
1783 qhp->rhp = rhp;
1784 qhp->attr.pd = php->pdid;
1785 qhp->attr.scq = ((struct c4iw_cq *) attrs->send_cq)->cq.cqid;
1786 qhp->attr.rcq = ((struct c4iw_cq *) attrs->recv_cq)->cq.cqid;
1787 qhp->attr.sq_num_entries = attrs->cap.max_send_wr;
1788 qhp->attr.rq_num_entries = attrs->cap.max_recv_wr;
1789 qhp->attr.sq_max_sges = attrs->cap.max_send_sge;
1790 qhp->attr.sq_max_sges_rdma_write = attrs->cap.max_send_sge;
1791 qhp->attr.rq_max_sges = attrs->cap.max_recv_sge;
1792 qhp->attr.state = C4IW_QP_STATE_IDLE;
1793 qhp->attr.next_state = C4IW_QP_STATE_IDLE;
1794 qhp->attr.enable_rdma_read = 1;
1795 qhp->attr.enable_rdma_write = 1;
1796 qhp->attr.enable_bind = 1;
1797 qhp->attr.max_ord = 0;
1798 qhp->attr.max_ird = 0;
1799 qhp->sq_sig_all = attrs->sq_sig_type == IB_SIGNAL_ALL_WR;
1800 spin_lock_init(&qhp->lock);
1801 mutex_init(&qhp->mutex);
1802 init_waitqueue_head(&qhp->wait);
1803 kref_init(&qhp->kref);
1804 INIT_WORK(&qhp->free_work, free_qp_work);
1806 ret = insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.sq.qid);
1833 uresp.sqid = qhp->wq.sq.qid;
1834 uresp.sq_size = qhp->wq.sq.size;
1835 uresp.sq_memsize = qhp->wq.sq.memsize;
1836 uresp.rqid = qhp->wq.rq.qid;
1837 uresp.rq_size = qhp->wq.rq.size;
1838 uresp.rq_memsize = qhp->wq.rq.memsize;
1854 sq_key_mm->addr = qhp->wq.sq.phys_addr;
1855 sq_key_mm->len = PAGE_ALIGN(qhp->wq.sq.memsize);
1861 rq_key_mm->addr = qhp->wq.rq.phys_addr;
1862 rq_key_mm->len = PAGE_ALIGN(qhp->wq.rq.memsize);
1868 sq_db_key_mm->addr = (u64)qhp->wq.sq.bar2_pa;
1875 rq_db_key_mm->addr = (u64)qhp->wq.rq.bar2_pa;
1883 qhp->ucontext = ucontext;
1885 qhp->ibqp.qp_num = qhp->wq.sq.qid;
1886 init_timer(&(qhp->timer));
1889 __func__, qhp->wq.sq.qid,
1890 qhp->wq.sq.size, qhp->wq.sq.memsize, attrs->cap.max_send_wr);
1892 __func__, qhp->wq.rq.qid,
1893 qhp->wq.rq.size, qhp->wq.rq.memsize, attrs->cap.max_recv_wr);
1894 return &qhp->ibqp;
1904 remove_handle(rhp, &rhp->qpidr, qhp->wq.sq.qid);
1906 destroy_qp(&rhp->rdev, &qhp->wq,
1909 kfree(qhp);
1917 struct c4iw_qp *qhp;
1932 qhp = to_c4iw_qp(ibqp);
1933 rhp = qhp->rhp;
1949 return c4iw_modify_qp(rhp, qhp, mask, &attrs, 0);
1961 struct c4iw_qp *qhp = to_c4iw_qp(ibqp);
1965 attr->qp_state = to_ib_qp_state(qhp->attr.state);
1966 init_attr->cap.max_send_wr = qhp->attr.sq_num_entries;
1967 init_attr->cap.max_recv_wr = qhp->attr.rq_num_entries;
1968 init_attr->cap.max_send_sge = qhp->attr.sq_max_sges;
1969 init_attr->cap.max_recv_sge = qhp->attr.sq_max_sges;
1971 init_attr->sq_sig_type = qhp->sq_sig_all ? IB_SIGNAL_ALL_WR : 0;