Lines Matching refs:chp

242 	struct c4iw_cq *chp = to_c4iw_cq(qhp->ibqp.send_cq);
243 struct t4_cq *cq = &chp->cq;
335 void c4iw_flush_hw_cq(struct c4iw_cq *chp, struct c4iw_qp *flush_qhp)
342 pr_debug("cqid 0x%x\n", chp->cq.cqid);
343 ret = t4_next_hw_cqe(&chp->cq, &hw_cqe);
351 qhp = get_qhp(chp->rhp, CQE_QPID(hw_cqe));
407 flush_completed_wrs(&qhp->wq, &chp->cq);
409 swcqe = &chp->cq.sw_queue[chp->cq.sw_pidx];
412 t4_swcq_produce(&chp->cq);
415 t4_hwcq_consume(&chp->cq);
416 ret = t4_next_hw_cqe(&chp->cq, &hw_cqe);
754 static int __c4iw_poll_cq_one(struct c4iw_cq *chp, struct c4iw_qp *qhp,
764 ret = poll_cq(wq, &(chp->cq), &cqe, &cqe_flushed, &cookie, &credit,
921 static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc)
928 ret = t4_next_cqe(&chp->cq, &rd_cqe);
933 qhp = get_qhp(chp->rhp, CQE_QPID(rd_cqe));
939 ret = __c4iw_poll_cq_one(chp, qhp, wc, srq);
944 ret = __c4iw_poll_cq_one(chp, NULL, wc, NULL);
951 struct c4iw_cq *chp;
956 chp = to_c4iw_cq(ibcq);
958 spin_lock_irqsave(&chp->lock, flags);
961 err = c4iw_poll_cq_one(chp, wc + npolled);
966 spin_unlock_irqrestore(&chp->lock, flags);
970 void c4iw_cq_rem_ref(struct c4iw_cq *chp)
972 if (refcount_dec_and_test(&chp->refcnt))
973 complete(&chp->cq_rel_comp);
978 struct c4iw_cq *chp;
982 chp = to_c4iw_cq(ib_cq);
984 xa_erase_irq(&chp->rhp->cqs, chp->cq.cqid);
985 c4iw_cq_rem_ref(chp);
986 wait_for_completion(&chp->cq_rel_comp);
990 destroy_cq(&chp->rhp->rdev, &chp->cq,
991 ucontext ? &ucontext->uctx : &chp->cq.rdev->uctx,
992 chp->destroy_skb, chp->wr_waitp);
993 c4iw_put_wr_wait(chp->wr_waitp);
1004 struct c4iw_cq *chp = to_c4iw_cq(ibcq);
1028 chp->wr_waitp = c4iw_alloc_wr_wait(GFP_KERNEL);
1029 if (!chp->wr_waitp) {
1033 c4iw_init_wr_wait(chp->wr_waitp);
1036 chp->destroy_skb = alloc_skb(wr_len, GFP_KERNEL);
1037 if (!chp->destroy_skb) {
1066 (sizeof(*chp->cq.queue) / 2) : sizeof(*chp->cq.queue));
1074 chp->cq.size = hwentries;
1075 chp->cq.memsize = memsize;
1076 chp->cq.vector = vector;
1078 ret = create_cq(&rhp->rdev, &chp->cq,
1080 chp->wr_waitp);
1084 chp->rhp = rhp;
1085 chp->cq.size--; /* status page */
1086 chp->ibcq.cqe = entries - 2;
1087 spin_lock_init(&chp->lock);
1088 spin_lock_init(&chp->comp_handler_lock);
1089 refcount_set(&chp->refcnt, 1);
1090 init_completion(&chp->cq_rel_comp);
1091 ret = xa_insert_irq(&rhp->cqs, chp->cq.cqid, chp, GFP_KERNEL);
1106 uresp.cqid = chp->cq.cqid;
1107 uresp.size = chp->cq.size;
1108 uresp.memsize = chp->cq.memsize;
1128 mm->addr = virt_to_phys(chp->cq.queue);
1129 mm->len = chp->cq.memsize;
1133 mm2->addr = chp->cq.bar2_pa;
1138 pr_debug("cqid 0x%0x chp %p size %u memsize %zu, dma_addr %pad\n",
1139 chp->cq.cqid, chp, chp->cq.size, chp->cq.memsize,
1140 &chp->cq.dma_addr);
1147 xa_erase_irq(&rhp->cqs, chp->cq.cqid);
1149 destroy_cq(&chp->rhp->rdev, &chp->cq,
1151 chp->destroy_skb, chp->wr_waitp);
1153 kfree_skb(chp->destroy_skb);
1155 c4iw_put_wr_wait(chp->wr_waitp);
1162 struct c4iw_cq *chp;
1166 chp = to_c4iw_cq(ibcq);
1167 spin_lock_irqsave(&chp->lock, flag);
1168 t4_arm_cq(&chp->cq,
1171 ret = t4_cq_notempty(&chp->cq);
1172 spin_unlock_irqrestore(&chp->lock, flag);