Lines Matching refs:cq

52 static int destroy_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
76 res->u.cq.restype = FW_RI_RES_TYPE_CQ;
77 res->u.cq.op = FW_RI_RES_OP_RESET;
78 res->u.cq.iqid = cpu_to_be32(cq->cqid);
86 kfree(cq->sw_queue);
88 cq->memsize, cq->queue,
89 dma_unmap_addr(cq, mapping));
90 c4iw_put_cqid(rdev, cq->cqid, uctx);
95 create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
109 cq->cqid = c4iw_get_cqid(rdev, uctx);
110 if (!cq->cqid) {
116 cq->sw_queue = kzalloc(cq->memsize, GFP_KERNEL);
117 if (!cq->sw_queue) {
122 cq->queue = dma_alloc_coherent(rhp->ibdev.dma_device, cq->memsize,
123 &cq->dma_addr, GFP_KERNEL);
124 if (!cq->queue) {
128 dma_unmap_addr_set(cq, mapping, cq->dma_addr);
129 memset(cq->queue, 0, cq->memsize);
147 res->u.cq.restype = FW_RI_RES_TYPE_CQ;
148 res->u.cq.op = FW_RI_RES_OP_WRITE;
149 res->u.cq.iqid = cpu_to_be32(cq->cqid);
151 res->u.cq.iqandst_to_iqandstindex = cpu_to_be32(
156 res->u.cq.iqdroprss_to_iqesize = cpu_to_be16(
162 res->u.cq.iqsize = cpu_to_be16(cq->size);
163 res->u.cq.iqaddr = cpu_to_be64(cq->dma_addr);
174 cq->gen = 1;
175 cq->rdev = rdev;
178 t4_bar2_sge_qregs(rdev->adap, cq->cqid, T4_BAR2_QTYPE_INGRESS, user,
179 &cq_bar2_qoffset, &cq->bar2_qid);
185 cq->bar2_pa = (rdev->bar2_pa + cq_bar2_qoffset) & PAGE_MASK;
187 cq->bar2_va = (void __iomem *)((u64)rdev->bar2_kva +
192 dma_free_coherent(rhp->ibdev.dma_device, cq->memsize, cq->queue,
193 dma_unmap_addr(cq, mapping));
195 kfree(cq->sw_queue);
197 c4iw_put_cqid(rdev, cq->cqid, uctx);
202 static void insert_recv_cqe(struct t4_wq *wq, struct t4_cq *cq)
206 CTR5(KTR_IW_CXGBE, "%s wq %p cq %p sw_cidx %u sw_pidx %u", __func__, wq,
207 cq, cq->sw_cidx, cq->sw_pidx);
214 cqe.bits_type_ts = cpu_to_be64(V_CQE_GENBIT((u64)cq->gen));
215 cq->sw_queue[cq->sw_pidx] = cqe;
216 t4_swcq_produce(cq);
219 int c4iw_flush_rq(struct t4_wq *wq, struct t4_cq *cq, int count)
225 CTR5(KTR_IW_CXGBE, "%s wq %p cq %p rq.in_use %u skip count %u",
226 __func__, wq, cq, wq->rq.in_use, count);
228 insert_recv_cqe(wq, cq);
234 static void insert_sq_cqe(struct t4_wq *wq, struct t4_cq *cq,
239 CTR5(KTR_IW_CXGBE, "%s wq %p cq %p sw_cidx %u sw_pidx %u", __func__, wq,
240 cq, cq->sw_cidx, cq->sw_pidx);
248 cqe.bits_type_ts = cpu_to_be64(V_CQE_GENBIT((u64)cq->gen));
249 cq->sw_queue[cq->sw_pidx] = cqe;
250 t4_swcq_produce(cq);
260 struct t4_cq *cq = &chp->cq;
272 insert_sq_cqe(wq, cq, swsqe);
287 static void flush_completed_wrs(struct t4_wq *wq, struct t4_cq *cq)
310 "%s moving cqe into swcq sq idx %u cq idx %u\n",
311 __func__, cidx, cq->sw_pidx);
313 cq->sw_queue[cq->sw_pidx] = swsqe->cqe;
314 t4_swcq_produce(cq);
366 CTR3(KTR_IW_CXGBE, "%s cq %p cqid 0x%x", __func__, &chp->cq,
367 chp->cq.cqid);
368 ret = t4_next_hw_cqe(&chp->cq, &hw_cqe);
425 flush_completed_wrs(&qhp->wq, &chp->cq);
427 swcqe = &chp->cq.sw_queue[chp->cq.sw_pidx];
430 t4_swcq_produce(&chp->cq);
433 t4_hwcq_consume(&chp->cq);
434 ret = t4_next_hw_cqe(&chp->cq, &hw_cqe);
454 void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count)
461 ptr = cq->sw_cidx;
462 while (ptr != cq->sw_pidx) {
463 cqe = &cq->sw_queue[ptr];
467 if (++ptr == cq->size)
470 CTR3(KTR_IW_CXGBE, "%s cq %p count %d", __func__, cq, *count);
480 * credit: cq credit to return to sge.
489 static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe,
497 ret = t4_next_cqe(cq, &hw_cqe);
689 flush_completed_wrs(wq, cq);
693 CTR4(KTR_IW_CXGBE, "%s cq %p cqid 0x%x skip sw cqe cidx %u",
694 __func__, cq, cq->cqid, cq->sw_cidx);
695 t4_swcq_consume(cq);
697 CTR4(KTR_IW_CXGBE, "%s cq %p cqid 0x%x skip hw cqe cidx %u",
698 __func__, cq, cq->cqid, cq->cidx);
699 t4_hwcq_consume(cq);
705 * Get one cq entry from c4iw and map it to openib.
723 ret = t4_next_cqe(&chp->cq, &rd_cqe);
735 ret = poll_cq(wq, &(chp->cq), &cqe, &cqe_flushed, &cookie, &credit);
892 remove_handle(chp->rhp, &chp->rhp->cqidr, chp->cq.cqid);
898 destroy_cq(&chp->rhp->rdev, &chp->cq,
899 ucontext ? &ucontext->uctx : &chp->cq.rdev->uctx);
948 memsize = hwentries * sizeof *chp->cq.queue;
951 * memsize must be a multiple of the page size if its a user cq.
955 chp->cq.size = hwentries;
956 chp->cq.memsize = memsize;
957 chp->cq.vector = vector;
959 ret = create_cq(&rhp->rdev, &chp->cq,
965 chp->cq.size--; /* status page */
971 ret = insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid);
986 uresp.cqid = chp->cq.cqid;
987 uresp.size = chp->cq.size;
988 uresp.memsize = chp->cq.memsize;
1001 mm->addr = vtophys(chp->cq.queue);
1002 mm->len = chp->cq.memsize;
1006 mm2->addr = chp->cq.bar2_pa;
1012 __func__, chp->cq.cqid, chp, chp->cq.size, chp->cq.memsize,
1013 (unsigned long long) chp->cq.dma_addr);
1020 remove_handle(rhp, &rhp->cqidr, chp->cq.cqid);
1022 destroy_cq(&chp->rhp->rdev, &chp->cq,
1028 int c4iw_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata)
1041 t4_arm_cq(&chp->cq,
1044 ret = t4_cq_notempty(&chp->cq);