• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-WNDR4500v2-V1.0.0.60_1.0.38/src/linux/linux-2.6/drivers/infiniband/hw/cxgb3/

Lines Matching defs:qhp

241 	struct iwch_qp *qhp;
248 qhp = to_iwch_qp(ibqp);
249 spin_lock_irqsave(&qhp->lock, flag);
250 if (qhp->attr.state > IWCH_QP_STATE_RTS) {
251 spin_unlock_irqrestore(&qhp->lock, flag);
254 num_wrs = Q_FREECNT(qhp->wq.sq_rptr, qhp->wq.sq_wptr,
255 qhp->wq.sq_size_log2);
257 spin_unlock_irqrestore(&qhp->lock, flag);
266 idx = Q_PTR2IDX(qhp->wq.wptr, qhp->wq.size_log2);
267 wqe = (union t3_wr *) (qhp->wq.queue + idx);
275 sqp = qhp->wq.sq +
276 Q_PTR2IDX(qhp->wq.sq_wptr, qhp->wq.sq_size_log2);
295 if (!qhp->wq.oldest_read)
296 qhp->wq.oldest_read = sqp;
307 wqe->send.wrid.id0.hi = qhp->wq.sq_wptr;
310 sqp->sq_wptr = qhp->wq.sq_wptr;
315 Q_GENBIT(qhp->wq.wptr, qhp->wq.size_log2),
319 Q_PTR2IDX(qhp->wq.sq_wptr, qhp->wq.sq_size_log2),
323 ++(qhp->wq.wptr);
324 ++(qhp->wq.sq_wptr);
326 spin_unlock_irqrestore(&qhp->lock, flag);
327 ring_doorbell(qhp->wq.doorbell, qhp->wq.qpid);
335 struct iwch_qp *qhp;
341 qhp = to_iwch_qp(ibqp);
342 spin_lock_irqsave(&qhp->lock, flag);
343 if (qhp->attr.state > IWCH_QP_STATE_RTS) {
344 spin_unlock_irqrestore(&qhp->lock, flag);
347 num_wrs = Q_FREECNT(qhp->wq.rq_rptr, qhp->wq.rq_wptr,
348 qhp->wq.rq_size_log2) - 1;
350 spin_unlock_irqrestore(&qhp->lock, flag);
354 idx = Q_PTR2IDX(qhp->wq.wptr, qhp->wq.size_log2);
355 wqe = (union t3_wr *) (qhp->wq.queue + idx);
357 err = iwch_build_rdma_recv(qhp->rhp, wqe, wr);
364 qhp->wq.rq[Q_PTR2IDX(qhp->wq.rq_wptr, qhp->wq.rq_size_log2)] =
367 Q_GENBIT(qhp->wq.wptr, qhp->wq.size_log2),
371 idx, qhp->wq.rq_wptr, qhp->wq.rq_rptr, wqe);
372 ++(qhp->wq.rq_wptr);
373 ++(qhp->wq.wptr);
377 spin_unlock_irqrestore(&qhp->lock, flag);
378 ring_doorbell(qhp->wq.doorbell, qhp->wq.qpid);
388 struct iwch_qp *qhp;
400 qhp = to_iwch_qp(qp);
402 rhp = qhp->rhp;
404 spin_lock_irqsave(&qhp->lock, flag);
405 if (qhp->attr.state > IWCH_QP_STATE_RTS) {
406 spin_unlock_irqrestore(&qhp->lock, flag);
409 num_wrs = Q_FREECNT(qhp->wq.sq_rptr, qhp->wq.sq_wptr,
410 qhp->wq.sq_size_log2);
412 spin_unlock_irqrestore(&qhp->lock, flag);
415 idx = Q_PTR2IDX(qhp->wq.wptr, qhp->wq.size_log2);
418 wqe = (union t3_wr *) (qhp->wq.queue + idx);
438 spin_unlock_irqrestore(&qhp->lock, flag);
441 wqe->send.wrid.id0.hi = qhp->wq.sq_wptr;
442 sqp = qhp->wq.sq + Q_PTR2IDX(qhp->wq.sq_wptr, qhp->wq.sq_size_log2);
445 sqp->sq_wptr = qhp->wq.sq_wptr;
452 Q_GENBIT(qhp->wq.wptr, qhp->wq.size_log2), 0,
454 ++(qhp->wq.wptr);
455 ++(qhp->wq.sq_wptr);
456 spin_unlock_irqrestore(&qhp->lock, flag);
458 ring_doorbell(qhp->wq.doorbell, qhp->wq.qpid);
598 int iwch_post_terminate(struct iwch_qp *qhp, struct respQ_msg_t *rsp_msg)
622 qhp->ep->hwtid, 5);
624 return cxgb3_ofld_send(qhp->rhp->rdev.t3cdev_p, skb);
628 * Assumes qhp lock is held.
630 static void __flush_qp(struct iwch_qp *qhp, unsigned long *flag)
635 rchp = get_chp(qhp->rhp, qhp->attr.rcq);
636 schp = get_chp(qhp->rhp, qhp->attr.scq);
638 PDBG("%s qhp %p rchp %p schp %p\n", __FUNCTION__, qhp, rchp, schp);
639 /* take a ref on the qhp since we must release the lock */
640 atomic_inc(&qhp->refcnt);
641 spin_unlock_irqrestore(&qhp->lock, *flag);
645 spin_lock(&qhp->lock);
647 cxio_count_rcqes(&rchp->cq, &qhp->wq, &count);
648 cxio_flush_rq(&qhp->wq, &rchp->cq, count);
649 spin_unlock(&qhp->lock);
654 spin_lock(&qhp->lock);
656 cxio_count_scqes(&schp->cq, &qhp->wq, &count);
657 cxio_flush_sq(&qhp->wq, &schp->cq, count);
658 spin_unlock(&qhp->lock);
662 if (atomic_dec_and_test(&qhp->refcnt))
663 wake_up(&qhp->wait);
665 spin_lock_irqsave(&qhp->lock, *flag);
668 static void flush_qp(struct iwch_qp *qhp, unsigned long *flag)
670 if (t3b_device(qhp->rhp))
671 cxio_set_wq_in_error(&qhp->wq);
673 __flush_qp(qhp, flag);
680 static int rqes_posted(struct iwch_qp *qhp)
682 return fw_riwrh_opcode((struct fw_riwrh *)qhp->wq.queue) == T3_WR_RCV;
685 static int rdma_init(struct iwch_dev *rhp, struct iwch_qp *qhp,
692 init_attr.tid = qhp->ep->hwtid;
693 init_attr.qpid = qhp->wq.qpid;
694 init_attr.pdid = qhp->attr.pd;
695 init_attr.scqid = qhp->attr.scq;
696 init_attr.rcqid = qhp->attr.rcq;
697 init_attr.rq_addr = qhp->wq.rq_addr;
698 init_attr.rq_size = 1 << qhp->wq.rq_size_log2;
700 qhp->attr.mpa_attr.recv_marker_enabled |
701 (qhp->attr.mpa_attr.xmit_marker_enabled << 1) |
702 (qhp->attr.mpa_attr.crc_enabled << 2);
705 init_attr.tcp_emss = qhp->ep->emss;
706 init_attr.ord = qhp->attr.max_ord;
707 init_attr.ird = qhp->attr.max_ird;
708 init_attr.qp_dma_addr = qhp->wq.dma_addr;
709 init_attr.qp_dma_size = (1UL << qhp->wq.size_log2);
710 init_attr.flags = rqes_posted(qhp) ? RECVS_POSTED : 0;
720 int iwch_modify_qp(struct iwch_dev *rhp, struct iwch_qp *qhp,
726 struct iwch_qp_attributes newattr = qhp->attr;
734 PDBG("%s qhp %p qpid 0x%x ep %p state %d -> %d\n", __FUNCTION__,
735 qhp, qhp->wq.qpid, qhp->ep, qhp->attr.state,
738 spin_lock_irqsave(&qhp->lock, flag);
742 if (qhp->attr.state != IWCH_QP_STATE_IDLE) {
768 qhp->attr = newattr;
773 if (qhp->attr.state == attrs->next_state)
776 switch (qhp->attr.state) {
788 qhp->attr.mpa_attr = attrs->mpa_attr;
789 qhp->attr.llp_stream_handle = attrs->llp_stream_handle;
790 qhp->ep = qhp->attr.llp_stream_handle;
791 qhp->attr.state = IWCH_QP_STATE_RTS;
799 get_ep(&qhp->ep->com);
800 spin_unlock_irqrestore(&qhp->lock, flag);
801 ret = rdma_init(rhp, qhp, mask, attrs);
802 spin_lock_irqsave(&qhp->lock, flag);
807 qhp->attr.state = IWCH_QP_STATE_ERROR;
808 flush_qp(qhp, &flag);
818 BUG_ON(atomic_read(&qhp->ep->com.kref.refcount) < 2);
819 qhp->attr.state = IWCH_QP_STATE_CLOSING;
823 ep = qhp->ep;
827 qhp->attr.state = IWCH_QP_STATE_TERMINATE;
828 if (t3b_device(qhp->rhp))
829 cxio_set_wq_in_error(&qhp->wq);
834 qhp->attr.state = IWCH_QP_STATE_ERROR;
838 ep = qhp->ep;
854 qhp->attr.state = IWCH_QP_STATE_IDLE;
855 qhp->attr.llp_stream_handle = NULL;
856 put_ep(&qhp->ep->com);
857 qhp->ep = NULL;
858 wake_up(&qhp->wait);
873 if (!Q_EMPTY(qhp->wq.sq_rptr, qhp->wq.sq_wptr) ||
874 !Q_EMPTY(qhp->wq.rq_rptr, qhp->wq.rq_wptr)) {
878 qhp->attr.state = IWCH_QP_STATE_IDLE;
879 memset(&qhp->attr, 0, sizeof(qhp->attr));
890 __FUNCTION__, qhp->attr.state);
897 PDBG("%s disassociating ep %p qpid 0x%x\n", __FUNCTION__, qhp->ep,
898 qhp->wq.qpid);
901 qhp->attr.llp_stream_handle = NULL;
902 ep = qhp->ep;
903 qhp->ep = NULL;
904 qhp->attr.state = IWCH_QP_STATE_ERROR;
906 wake_up(&qhp->wait);
908 flush_qp(qhp, &flag);
910 spin_unlock_irqrestore(&qhp->lock, flag);
913 iwch_post_terminate(qhp, NULL);
930 PDBG("%s exit state %d\n", __FUNCTION__, qhp->attr.state);
934 static int quiesce_qp(struct iwch_qp *qhp)
936 spin_lock_irq(&qhp->lock);
937 iwch_quiesce_tid(qhp->ep);
938 qhp->flags |= QP_QUIESCED;
939 spin_unlock_irq(&qhp->lock);
943 static int resume_qp(struct iwch_qp *qhp)
945 spin_lock_irq(&qhp->lock);
946 iwch_resume_tid(qhp->ep);
947 qhp->flags &= ~QP_QUIESCED;
948 spin_unlock_irq(&qhp->lock);
955 struct iwch_qp *qhp;
958 qhp = get_qhp(chp->rhp, i);
959 if (!qhp)
961 if ((qhp->attr.rcq == chp->cq.cqid) && !qp_quiesced(qhp)) {
962 quiesce_qp(qhp);
965 if ((qhp->attr.scq == chp->cq.cqid) && !qp_quiesced(qhp))
966 quiesce_qp(qhp);
974 struct iwch_qp *qhp;
977 qhp = get_qhp(chp->rhp, i);
978 if (!qhp)
980 if ((qhp->attr.rcq == chp->cq.cqid) && qp_quiesced(qhp)) {
981 resume_qp(qhp);
984 if ((qhp->attr.scq == chp->cq.cqid) && qp_quiesced(qhp))
985 resume_qp(qhp);