• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/drivers/infiniband/hw/cxgb3/

Lines Matching defs:qhp

247 static int build_rdma_recv(struct iwch_qp *qhp, union t3_wr *wqe,
254 err = iwch_sgl2pbl_map(qhp->rhp, wr->sg_list, wr->num_sge, pbl_addr,
280 qhp->wq.rq[Q_PTR2IDX(qhp->wq.rq_wptr,
281 qhp->wq.rq_size_log2)].wr_id = wr->wr_id;
282 qhp->wq.rq[Q_PTR2IDX(qhp->wq.rq_wptr,
283 qhp->wq.rq_size_log2)].pbl_addr = 0;
287 static int build_zero_stag_recv(struct iwch_qp *qhp, union t3_wr *wqe,
301 pbl_addr = cxio_hal_pblpool_alloc(&qhp->rhp->rdev, T3_STAG0_PBL_SIZE);
308 pbl_offset = (pbl_addr - qhp->rhp->rdev.rnic_info.pbl_base) >> 3;
343 qhp->wq.rq[Q_PTR2IDX(qhp->wq.rq_wptr,
344 qhp->wq.rq_size_log2)].wr_id = wr->wr_id;
345 qhp->wq.rq[Q_PTR2IDX(qhp->wq.rq_wptr,
346 qhp->wq.rq_size_log2)].pbl_addr = pbl_addr;
357 struct iwch_qp *qhp;
365 qhp = to_iwch_qp(ibqp);
366 spin_lock_irqsave(&qhp->lock, flag);
367 if (qhp->attr.state > IWCH_QP_STATE_RTS) {
368 spin_unlock_irqrestore(&qhp->lock, flag);
372 num_wrs = Q_FREECNT(qhp->wq.sq_rptr, qhp->wq.sq_wptr,
373 qhp->wq.sq_size_log2);
375 spin_unlock_irqrestore(&qhp->lock, flag);
384 idx = Q_PTR2IDX(qhp->wq.wptr, qhp->wq.size_log2);
385 wqe = (union t3_wr *) (qhp->wq.queue + idx);
391 sqp = qhp->wq.sq +
392 Q_PTR2IDX(qhp->wq.sq_wptr, qhp->wq.sq_size_log2);
414 if (!qhp->wq.oldest_read)
415 qhp->wq.oldest_read = sqp;
420 &wr_cnt, &qhp->wq);
435 wqe->send.wrid.id0.hi = qhp->wq.sq_wptr;
438 sqp->sq_wptr = qhp->wq.sq_wptr;
443 Q_GENBIT(qhp->wq.wptr, qhp->wq.size_log2),
448 Q_PTR2IDX(qhp->wq.sq_wptr, qhp->wq.sq_size_log2),
452 qhp->wq.wptr += wr_cnt;
453 ++(qhp->wq.sq_wptr);
455 spin_unlock_irqrestore(&qhp->lock, flag);
456 if (cxio_wq_db_enabled(&qhp->wq))
457 ring_doorbell(qhp->wq.doorbell, qhp->wq.qpid);
469 struct iwch_qp *qhp;
475 qhp = to_iwch_qp(ibqp);
476 spin_lock_irqsave(&qhp->lock, flag);
477 if (qhp->attr.state > IWCH_QP_STATE_RTS) {
478 spin_unlock_irqrestore(&qhp->lock, flag);
482 num_wrs = Q_FREECNT(qhp->wq.rq_rptr, qhp->wq.rq_wptr,
483 qhp->wq.rq_size_log2) - 1;
485 spin_unlock_irqrestore(&qhp->lock, flag);
494 idx = Q_PTR2IDX(qhp->wq.wptr, qhp->wq.size_log2);
495 wqe = (union t3_wr *) (qhp->wq.queue + idx);
498 err = build_rdma_recv(qhp, wqe, wr);
500 err = build_zero_stag_recv(qhp, wqe, wr);
508 Q_GENBIT(qhp->wq.wptr, qhp->wq.size_log2),
512 idx, qhp->wq.rq_wptr, qhp->wq.rq_rptr, wqe);
513 ++(qhp->wq.rq_wptr);
514 ++(qhp->wq.wptr);
518 spin_unlock_irqrestore(&qhp->lock, flag);
519 if (cxio_wq_db_enabled(&qhp->wq))
520 ring_doorbell(qhp->wq.doorbell, qhp->wq.qpid);
534 struct iwch_qp *qhp;
546 qhp = to_iwch_qp(qp);
548 rhp = qhp->rhp;
550 spin_lock_irqsave(&qhp->lock, flag);
551 if (qhp->attr.state > IWCH_QP_STATE_RTS) {
552 spin_unlock_irqrestore(&qhp->lock, flag);
555 num_wrs = Q_FREECNT(qhp->wq.sq_rptr, qhp->wq.sq_wptr,
556 qhp->wq.sq_size_log2);
558 spin_unlock_irqrestore(&qhp->lock, flag);
561 idx = Q_PTR2IDX(qhp->wq.wptr, qhp->wq.size_log2);
564 wqe = (union t3_wr *) (qhp->wq.queue + idx);
584 spin_unlock_irqrestore(&qhp->lock, flag);
587 wqe->send.wrid.id0.hi = qhp->wq.sq_wptr;
588 sqp = qhp->wq.sq + Q_PTR2IDX(qhp->wq.sq_wptr, qhp->wq.sq_size_log2);
591 sqp->sq_wptr = qhp->wq.sq_wptr;
597 Q_GENBIT(qhp->wq.wptr, qhp->wq.size_log2), 0,
599 ++(qhp->wq.wptr);
600 ++(qhp->wq.sq_wptr);
601 spin_unlock_irqrestore(&qhp->lock, flag);
603 if (cxio_wq_db_enabled(&qhp->wq))
604 ring_doorbell(qhp->wq.doorbell, qhp->wq.qpid);
741 int iwch_post_zb_read(struct iwch_qp *qhp)
764 wqe->send.wrh.gen_tid_len = cpu_to_be32(V_FW_RIWR_TID(qhp->ep->hwtid)|
767 return iwch_cxgb3_ofld_send(qhp->rhp->rdev.t3cdev_p, skb);
773 int iwch_post_terminate(struct iwch_qp *qhp, struct respQ_msg_t *rsp_msg)
797 wqe->send.wrh.gen_tid_len = cpu_to_be32(V_FW_RIWR_TID(qhp->ep->hwtid));
799 return iwch_cxgb3_ofld_send(qhp->rhp->rdev.t3cdev_p, skb);
803 * Assumes qhp lock is held.
805 static void __flush_qp(struct iwch_qp *qhp, unsigned long *flag)
811 rchp = get_chp(qhp->rhp, qhp->attr.rcq);
812 schp = get_chp(qhp->rhp, qhp->attr.scq);
814 PDBG("%s qhp %p rchp %p schp %p\n", __func__, qhp, rchp, schp);
815 /* take a ref on the qhp since we must release the lock */
816 atomic_inc(&qhp->refcnt);
817 spin_unlock_irqrestore(&qhp->lock, *flag);
821 spin_lock(&qhp->lock);
823 cxio_count_rcqes(&rchp->cq, &qhp->wq, &count);
824 flushed = cxio_flush_rq(&qhp->wq, &rchp->cq, count);
825 spin_unlock(&qhp->lock);
832 spin_lock(&qhp->lock);
834 cxio_count_scqes(&schp->cq, &qhp->wq, &count);
835 flushed = cxio_flush_sq(&qhp->wq, &schp->cq, count);
836 spin_unlock(&qhp->lock);
842 if (atomic_dec_and_test(&qhp->refcnt))
843 wake_up(&qhp->wait);
845 spin_lock_irqsave(&qhp->lock, *flag);
848 static void flush_qp(struct iwch_qp *qhp, unsigned long *flag)
850 if (qhp->ibqp.uobject)
851 cxio_set_wq_in_error(&qhp->wq);
853 __flush_qp(qhp, flag);
860 u16 iwch_rqes_posted(struct iwch_qp *qhp)
862 union t3_wr *wqe = qhp->wq.queue;
868 PDBG("%s qhp %p count %u\n", __func__, qhp, count);
872 static int rdma_init(struct iwch_dev *rhp, struct iwch_qp *qhp,
879 init_attr.tid = qhp->ep->hwtid;
880 init_attr.qpid = qhp->wq.qpid;
881 init_attr.pdid = qhp->attr.pd;
882 init_attr.scqid = qhp->attr.scq;
883 init_attr.rcqid = qhp->attr.rcq;
884 init_attr.rq_addr = qhp->wq.rq_addr;
885 init_attr.rq_size = 1 << qhp->wq.rq_size_log2;
887 qhp->attr.mpa_attr.recv_marker_enabled |
888 (qhp->attr.mpa_attr.xmit_marker_enabled << 1) |
889 (qhp->attr.mpa_attr.crc_enabled << 2);
894 if (!qhp->ibqp.uobject)
898 init_attr.tcp_emss = qhp->ep->emss;
899 init_attr.ord = qhp->attr.max_ord;
900 init_attr.ird = qhp->attr.max_ird;
901 init_attr.qp_dma_addr = qhp->wq.dma_addr;
902 init_attr.qp_dma_size = (1UL << qhp->wq.size_log2);
903 init_attr.rqe_count = iwch_rqes_posted(qhp);
904 init_attr.flags = qhp->attr.mpa_attr.initiator ? MPA_INITIATOR : 0;
905 init_attr.chan = qhp->ep->l2t->smt_idx;
908 if (init_attr.ord == 0 && qhp->attr.mpa_attr.initiator)
910 if (init_attr.ird == 0 && !qhp->attr.mpa_attr.initiator)
914 init_attr.irs = qhp->ep->rcv_seq;
924 int iwch_modify_qp(struct iwch_dev *rhp, struct iwch_qp *qhp,
930 struct iwch_qp_attributes newattr = qhp->attr;
938 PDBG("%s qhp %p qpid 0x%x ep %p state %d -> %d\n", __func__,
939 qhp, qhp->wq.qpid, qhp->ep, qhp->attr.state,
942 spin_lock_irqsave(&qhp->lock, flag);
946 if (qhp->attr.state != IWCH_QP_STATE_IDLE) {
972 qhp->attr = newattr;
977 if (qhp->attr.state == attrs->next_state)
980 switch (qhp->attr.state) {
992 qhp->attr.mpa_attr = attrs->mpa_attr;
993 qhp->attr.llp_stream_handle = attrs->llp_stream_handle;
994 qhp->ep = qhp->attr.llp_stream_handle;
995 qhp->attr.state = IWCH_QP_STATE_RTS;
1003 get_ep(&qhp->ep->com);
1004 spin_unlock_irqrestore(&qhp->lock, flag);
1005 ret = rdma_init(rhp, qhp, mask, attrs);
1006 spin_lock_irqsave(&qhp->lock, flag);
1011 qhp->attr.state = IWCH_QP_STATE_ERROR;
1012 flush_qp(qhp, &flag);
1022 BUG_ON(atomic_read(&qhp->ep->com.kref.refcount) < 2);
1023 qhp->attr.state = IWCH_QP_STATE_CLOSING;
1027 ep = qhp->ep;
1032 qhp->attr.state = IWCH_QP_STATE_TERMINATE;
1033 if (qhp->ibqp.uobject)
1034 cxio_set_wq_in_error(&qhp->wq);
1039 qhp->attr.state = IWCH_QP_STATE_ERROR;
1043 ep = qhp->ep;
1060 flush_qp(qhp, &flag);
1061 qhp->attr.state = IWCH_QP_STATE_IDLE;
1062 qhp->attr.llp_stream_handle = NULL;
1063 put_ep(&qhp->ep->com);
1064 qhp->ep = NULL;
1065 wake_up(&qhp->wait);
1080 if (!Q_EMPTY(qhp->wq.sq_rptr, qhp->wq.sq_wptr) ||
1081 !Q_EMPTY(qhp->wq.rq_rptr, qhp->wq.rq_wptr)) {
1085 qhp->attr.state = IWCH_QP_STATE_IDLE;
1096 __func__, qhp->attr.state);
1103 PDBG("%s disassociating ep %p qpid 0x%x\n", __func__, qhp->ep,
1104 qhp->wq.qpid);
1107 qhp->attr.llp_stream_handle = NULL;
1108 ep = qhp->ep;
1109 qhp->ep = NULL;
1110 qhp->attr.state = IWCH_QP_STATE_ERROR;
1112 wake_up(&qhp->wait);
1114 flush_qp(qhp, &flag);
1116 spin_unlock_irqrestore(&qhp->lock, flag);
1119 iwch_post_terminate(qhp, NULL);
1138 PDBG("%s exit state %d\n", __func__, qhp->attr.state);
1142 static int quiesce_qp(struct iwch_qp *qhp)
1144 spin_lock_irq(&qhp->lock);
1145 iwch_quiesce_tid(qhp->ep);
1146 qhp->flags |= QP_QUIESCED;
1147 spin_unlock_irq(&qhp->lock);
1151 static int resume_qp(struct iwch_qp *qhp)
1153 spin_lock_irq(&qhp->lock);
1154 iwch_resume_tid(qhp->ep);
1155 qhp->flags &= ~QP_QUIESCED;
1156 spin_unlock_irq(&qhp->lock);
1163 struct iwch_qp *qhp;
1166 qhp = get_qhp(chp->rhp, i);
1167 if (!qhp)
1169 if ((qhp->attr.rcq == chp->cq.cqid) && !qp_quiesced(qhp)) {
1170 quiesce_qp(qhp);
1173 if ((qhp->attr.scq == chp->cq.cqid) && !qp_quiesced(qhp))
1174 quiesce_qp(qhp);
1182 struct iwch_qp *qhp;
1185 qhp = get_qhp(chp->rhp, i);
1186 if (!qhp)
1188 if ((qhp->attr.rcq == chp->cq.cqid) && qp_quiesced(qhp)) {
1189 resume_qp(qhp);
1192 if ((qhp->attr.scq == chp->cq.cqid) && qp_quiesced(qhp))
1193 resume_qp(qhp);