Lines Matching refs:cq

169 static inline struct mthca_cqe *get_cqe(struct mthca_cq *cq, int entry)
171 return get_cqe_from_buf(&cq->buf, entry);
179 static inline struct mthca_cqe *next_cqe_sw(struct mthca_cq *cq)
181 return cqe_sw(get_cqe(cq, cq->cons_index & cq->ibcq.cqe));
201 * incr is ignored in native Arbel (mem-free) mode, so cq->cons_index
204 static inline void update_cons_index(struct mthca_dev *dev, struct mthca_cq *cq,
208 *cq->set_ci_db = cpu_to_be32(cq->cons_index);
211 mthca_write64(MTHCA_TAVOR_CQ_DB_INC_CI | cq->cqn, incr - 1,
219 struct mthca_cq *cq;
221 cq = mthca_array_get(&dev->cq_table.cq, cqn & (dev->limits.num_cqs - 1));
223 if (!cq) {
228 ++cq->arm_sn;
230 cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
236 struct mthca_cq *cq;
241 cq = mthca_array_get(&dev->cq_table.cq, cqn & (dev->limits.num_cqs - 1));
242 if (cq)
243 ++cq->refcount;
247 if (!cq) {
254 event.element.cq = &cq->ibcq;
255 if (cq->ibcq.event_handler)
256 cq->ibcq.event_handler(&event, cq->ibcq.cq_context);
259 if (!--cq->refcount)
260 wake_up(&cq->wait);
273 void mthca_cq_clean(struct mthca_dev *dev, struct mthca_cq *cq, u32 qpn,
280 spin_lock_irq(&cq->lock);
289 for (prod_index = cq->cons_index;
290 cqe_sw(get_cqe(cq, prod_index & cq->ibcq.cqe));
292 if (prod_index == cq->cons_index + cq->ibcq.cqe)
297 qpn, cq->cqn, cq->cons_index, prod_index);
303 while ((int) --prod_index - (int) cq->cons_index >= 0) {
304 cqe = get_cqe(cq, prod_index & cq->ibcq.cqe);
310 memcpy(get_cqe(cq, (prod_index + nfreed) & cq->ibcq.cqe),
316 set_cqe_hw(get_cqe(cq, (cq->cons_index + i) & cq->ibcq.cqe));
318 cq->cons_index += nfreed;
319 update_cons_index(dev, cq, nfreed);
322 spin_unlock_irq(&cq->lock);
325 void mthca_cq_resize_copy_cqes(struct mthca_cq *cq)
335 if (!mthca_is_memfree(to_mdev(cq->ibcq.device)) &&
336 cq->ibcq.cqe < cq->resize_buf->cqe) {
337 cq->cons_index &= cq->ibcq.cqe;
338 if (cqe_sw(get_cqe(cq, cq->ibcq.cqe)))
339 cq->cons_index -= cq->ibcq.cqe + 1;
342 for (i = cq->cons_index; cqe_sw(get_cqe(cq, i & cq->ibcq.cqe)); ++i)
343 memcpy(get_cqe_from_buf(&cq->resize_buf->buf,
344 i & cq->resize_buf->cqe),
345 get_cqe(cq, i & cq->ibcq.cqe), MTHCA_CQ_ENTRY_SIZE);
372 static void handle_error_cqe(struct mthca_dev *dev, struct mthca_cq *cq,
384 cq->cqn, cq->cons_index);
479 struct mthca_cq *cq,
493 cqe = next_cqe_sw(cq);
505 cq->cqn, cq->cons_index, be32_to_cpu(cqe->my_qpn),
571 handle_error_cqe(dev, cq, *cur_qp, wqe_index, is_send,
648 ++cq->cons_index;
658 struct mthca_cq *cq = to_mcq(ibcq);
665 spin_lock_irqsave(&cq->lock, flags);
670 err = mthca_poll_one(dev, cq, &qp,
679 update_cons_index(dev, cq, freed);
688 if (unlikely(err == -EAGAIN && cq->resize_buf &&
689 cq->resize_buf->state == CQ_RESIZE_READY)) {
698 cq->cons_index &= cq->ibcq.cqe;
700 if (cqe_sw(get_cqe_from_buf(&cq->resize_buf->buf,
701 cq->cons_index & cq->resize_buf->cqe))) {
705 tbuf = cq->buf;
706 tcqe = cq->ibcq.cqe;
707 cq->buf = cq->resize_buf->buf;
708 cq->ibcq.cqe = cq->resize_buf->cqe;
710 cq->resize_buf->buf = tbuf;
711 cq->resize_buf->cqe = tcqe;
712 cq->resize_buf->state = CQ_RESIZE_SWAPPED;
718 spin_unlock_irqrestore(&cq->lock, flags);
723 int mthca_tavor_arm_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags)
728 to_mcq(cq)->cqn;
730 mthca_write64(dbhi, 0xffffffff, to_mdev(cq->device)->kar + MTHCA_CQ_DOORBELL,
731 MTHCA_GET_DOORBELL_LOCK(&to_mdev(cq->device)->doorbell_lock));
738 struct mthca_cq *cq = to_mcq(ibcq);
741 u32 sn = cq->arm_sn & 3;
743 db_rec[0] = cpu_to_be32(cq->cons_index);
744 db_rec[1] = cpu_to_be32((cq->cqn << 8) | (2 << 5) | (sn << 3) |
748 mthca_write_db_rec(db_rec, cq->arm_db);
759 MTHCA_ARBEL_CQ_DB_REQ_NOT) | cq->cqn;
761 mthca_write64(dbhi, cq->cons_index,
770 struct mthca_cq *cq)
776 cq->ibcq.cqe = nent - 1;
777 cq->is_kernel = !ctx;
779 cq->cqn = mthca_alloc(&dev->cq_table.alloc);
780 if (cq->cqn == -1)
784 err = mthca_table_get(dev, dev->cq_table.table, cq->cqn);
788 if (cq->is_kernel) {
789 cq->arm_sn = 1;
793 cq->set_ci_db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_CQ_SET_CI,
794 cq->cqn, &cq->set_ci_db);
795 if (cq->set_ci_db_index < 0)
798 cq->arm_db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_CQ_ARM,
799 cq->cqn, &cq->arm_db);
800 if (cq->arm_db_index < 0)
813 if (cq->is_kernel) {
814 err = mthca_alloc_cq_buf(dev, &cq->buf, nent);
819 spin_lock_init(&cq->lock);
820 cq->refcount = 1;
821 init_waitqueue_head(&cq->wait);
822 mutex_init(&cq->mutex);
836 cq_context->lkey = cpu_to_be32(cq->buf.mr.ibmr.lkey);
837 cq_context->cqn = cpu_to_be32(cq->cqn);
840 cq_context->ci_db = cpu_to_be32(cq->set_ci_db_index);
841 cq_context->state_db = cpu_to_be32(cq->arm_db_index);
844 err = mthca_SW2HW_CQ(dev, mailbox, cq->cqn);
851 err = mthca_array_set(&dev->cq_table.cq,
852 cq->cqn & (dev->limits.num_cqs - 1), cq);
859 cq->cons_index = 0;
866 if (cq->is_kernel)
867 mthca_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe);
873 if (cq->is_kernel && mthca_is_memfree(dev))
874 mthca_free_db(dev, MTHCA_DB_TYPE_CQ_ARM, cq->arm_db_index);
877 if (cq->is_kernel && mthca_is_memfree(dev))
878 mthca_free_db(dev, MTHCA_DB_TYPE_CQ_SET_CI, cq->set_ci_db_index);
881 mthca_table_put(dev, dev->cq_table.table, cq->cqn);
884 mthca_free(&dev->cq_table.alloc, cq->cqn);
889 static inline int get_cq_refcount(struct mthca_dev *dev, struct mthca_cq *cq)
894 c = cq->refcount;
901 struct mthca_cq *cq)
912 err = mthca_HW2SW_CQ(dev, mailbox, cq->cqn);
921 cq->cqn, cq->cons_index,
922 cq->is_kernel ? !!next_cqe_sw(cq) : 0);
928 mthca_array_clear(&dev->cq_table.cq,
929 cq->cqn & (dev->limits.num_cqs - 1));
930 --cq->refcount;
938 wait_event(cq->wait, !get_cq_refcount(dev, cq));
940 if (cq->is_kernel) {
941 mthca_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe);
943 mthca_free_db(dev, MTHCA_DB_TYPE_CQ_ARM, cq->arm_db_index);
944 mthca_free_db(dev, MTHCA_DB_TYPE_CQ_SET_CI, cq->set_ci_db_index);
948 mthca_table_put(dev, dev->cq_table.table, cq->cqn);
949 mthca_free(&dev->cq_table.alloc, cq->cqn);
966 err = mthca_array_init(&dev->cq_table.cq,
976 mthca_array_cleanup(&dev->cq_table.cq, dev->limits.num_cqs);