Lines Matching refs:cq

97 static struct mlx4_cqe *get_cqe(struct mlx4_cq *cq, int entry)
99 return cq->buf.buf + entry * cq->cqe_size;
102 static void *get_sw_cqe(struct mlx4_cq *cq, int n)
104 struct mlx4_cqe *cqe = get_cqe(cq, n & cq->ibv_cq.cqe);
105 struct mlx4_cqe *tcqe = cq->cqe_size == 64 ? cqe + 1 : cqe;
108 !!(n & (cq->ibv_cq.cqe + 1))) ? NULL : cqe;
111 static struct mlx4_cqe *next_cqe_sw(struct mlx4_cq *cq)
113 return get_sw_cqe(cq, cq->cons_index);
200 static inline int mlx4_get_next_cqe(struct mlx4_cq *cq,
203 static inline int mlx4_get_next_cqe(struct mlx4_cq *cq,
208 cqe = next_cqe_sw(cq);
212 if (cq->cqe_size == 64)
215 ++cq->cons_index;
230 static inline int mlx4_parse_cqe(struct mlx4_cq *cq,
235 static inline int mlx4_parse_cqe(struct mlx4_cq *cq,
252 mctx = to_mctx(cq->ibv_cq.context);
255 cq->cqe = cqe;
256 cq->flags &= (~MLX4_CQ_FLAGS_RX_CSUM_VALID);
288 pwr_id = lazy ? &cq->ibv_cq.wr_id : &wc->wr_id;
305 pstatus = lazy ? &cq->ibv_cq.status : &wc->status;
318 cq->flags |= MLX4_CQ_FLAGS_RX_CSUM_VALID;
370 static inline int mlx4_parse_lazy_cqe(struct mlx4_cq *cq,
373 static inline int mlx4_parse_lazy_cqe(struct mlx4_cq *cq,
376 return mlx4_parse_cqe(cq, cqe, &cq->cur_qp, NULL, 1);
379 static inline int mlx4_poll_one(struct mlx4_cq *cq,
383 static inline int mlx4_poll_one(struct mlx4_cq *cq,
390 err = mlx4_get_next_cqe(cq, &cqe);
394 return mlx4_parse_cqe(cq, cqe, cur_qp, wc, 0);
399 struct mlx4_cq *cq = to_mcq(ibcq);
404 pthread_spin_lock(&cq->lock);
407 err = mlx4_poll_one(cq, &qp, wc + npolled);
413 mlx4_update_cons_index(cq);
415 pthread_spin_unlock(&cq->lock);
424 struct mlx4_cq *cq = to_mcq(ibv_cq_ex_to_cq(ibcq));
426 mlx4_update_cons_index(cq);
429 pthread_spin_unlock(&cq->lock);
440 struct mlx4_cq *cq = to_mcq(ibv_cq_ex_to_cq(ibcq));
448 pthread_spin_lock(&cq->lock);
450 cq->cur_qp = NULL;
452 err = mlx4_get_next_cqe(cq, &cqe);
455 pthread_spin_unlock(&cq->lock);
459 err = mlx4_parse_lazy_cqe(cq, cqe);
461 pthread_spin_unlock(&cq->lock);
468 struct mlx4_cq *cq = to_mcq(ibv_cq_ex_to_cq(ibcq));
472 err = mlx4_get_next_cqe(cq, &cqe);
476 return mlx4_parse_lazy_cqe(cq, cqe);
503 struct mlx4_cq *cq = to_mcq(ibv_cq_ex_to_cq(ibcq));
505 if (cq->cqe->owner_sr_opcode & MLX4_CQE_IS_SEND_MASK) {
506 switch (cq->cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) {
526 switch (cq->cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) {
541 struct mlx4_cq *cq = to_mcq(ibv_cq_ex_to_cq(ibcq));
543 return be32toh(cq->cqe->vlan_my_qpn) & MLX4_CQE_QPN_MASK;
548 struct mlx4_cq *cq = to_mcq(ibv_cq_ex_to_cq(ibcq));
549 int is_send = cq->cqe->owner_sr_opcode & MLX4_CQE_IS_SEND_MASK;
553 switch (cq->cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) {
560 if (cq->flags & MLX4_CQ_FLAGS_RX_CSUM_VALID)
561 wc_flags |= ((cq->cqe->status &
566 switch (cq->cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) {
575 wc_flags |= (be32toh(cq->cqe->g_mlpath_rqpn) & 0x80000000) ? IBV_WC_GRH : 0;
583 struct mlx4_cq *cq = to_mcq(ibv_cq_ex_to_cq(ibcq));
585 return be32toh(cq->cqe->byte_cnt);
590 struct mlx4_cq *cq = to_mcq(ibv_cq_ex_to_cq(ibcq));
591 struct mlx4_err_cqe *ecqe = (struct mlx4_err_cqe *)cq->cqe;
598 struct mlx4_cq *cq = to_mcq(ibv_cq_ex_to_cq(ibcq));
600 switch (cq->cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) {
602 return be32toh(cq->cqe->immed_rss_invalid);
604 return cq->cqe->immed_rss_invalid;
610 struct mlx4_cq *cq = to_mcq(ibv_cq_ex_to_cq(ibcq));
612 return (uint32_t)be16toh(cq->cqe->rlid);
617 struct mlx4_cq *cq = to_mcq(ibv_cq_ex_to_cq(ibcq));
619 if ((cq->cur_qp) && (cq->cur_qp->link_layer == IBV_LINK_LAYER_ETHERNET))
620 return be16toh(cq->cqe->sl_vid) >> 13;
622 return be16toh(cq->cqe->sl_vid) >> 12;
627 struct mlx4_cq *cq = to_mcq(ibv_cq_ex_to_cq(ibcq));
629 return be32toh(cq->cqe->g_mlpath_rqpn) & 0xffffff;
634 struct mlx4_cq *cq = to_mcq(ibv_cq_ex_to_cq(ibcq));
636 return (be32toh(cq->cqe->g_mlpath_rqpn) >> 24) & 0x7f;
641 struct mlx4_cq *cq = to_mcq(ibv_cq_ex_to_cq(ibcq));
643 return ((uint64_t)be32toh(cq->cqe->ts_47_16) << 16) |
644 (cq->cqe->ts_15_8 << 8) |
645 (cq->cqe->ts_7_0);
648 void mlx4_cq_fill_pfns(struct mlx4_cq *cq, const struct ibv_cq_init_attr_ex *cq_attr)
651 if (cq->flags & MLX4_CQ_FLAGS_SINGLE_THREADED) {
652 cq->ibv_cq.start_poll = mlx4_start_poll;
653 cq->ibv_cq.end_poll = mlx4_end_poll;
655 cq->ibv_cq.start_poll = mlx4_start_poll_lock;
656 cq->ibv_cq.end_poll = mlx4_end_poll_lock;
658 cq->ibv_cq.next_poll = mlx4_next_poll;
660 cq->ibv_cq.read_opcode = mlx4_cq_read_wc_opcode;
661 cq->ibv_cq.read_vendor_err = mlx4_cq_read_wc_vendor_err;
662 cq->ibv_cq.read_wc_flags = mlx4_cq_read_wc_flags;
664 cq->ibv_cq.read_byte_len = mlx4_cq_read_wc_byte_len;
666 cq->ibv_cq.read_imm_data = mlx4_cq_read_wc_imm_data;
668 cq->ibv_cq.read_qp_num = mlx4_cq_read_wc_qp_num;
670 cq->ibv_cq.read_src_qp = mlx4_cq_read_wc_src_qp;
672 cq->ibv_cq.read_slid = mlx4_cq_read_wc_slid;
674 cq->ibv_cq.read_sl = mlx4_cq_read_wc_sl;
676 cq->ibv_cq.read_dlid_path_bits = mlx4_cq_read_wc_dlid_path_bits;
678 cq->ibv_cq.read_completion_ts = mlx4_cq_read_wc_completion_ts;
683 struct mlx4_cq *cq = to_mcq(ibvcq);
689 sn = cq->arm_sn & 3;
690 ci = cq->cons_index & 0xffffff;
693 *cq->arm_db = htobe32(sn << 28 | cmd | ci);
701 doorbell[0] = htobe32(sn << 28 | cmd | cq->cqn);
709 void mlx4_cq_event(struct ibv_cq *cq)
711 to_mcq(cq)->arm_sn++;
714 void __mlx4_cq_clean(struct mlx4_cq *cq, uint32_t qpn, struct mlx4_srq *srq)
720 int cqe_inc = cq->cqe_size == 64 ? 1 : 0;
729 for (prod_index = cq->cons_index; get_sw_cqe(cq, prod_index); ++prod_index)
730 if (prod_index == cq->cons_index + cq->ibv_cq.cqe)
737 while ((int) --prod_index - (int) cq->cons_index >= 0) {
738 cqe = get_cqe(cq, prod_index & cq->ibv_cq.cqe);
750 dest = get_cqe(cq, (prod_index + nfreed) & cq->ibv_cq.cqe);
760 cq->cons_index += nfreed;
766 mlx4_update_cons_index(cq);
770 void mlx4_cq_clean(struct mlx4_cq *cq, uint32_t qpn, struct mlx4_srq *srq)
772 pthread_spin_lock(&cq->lock);
773 __mlx4_cq_clean(cq, qpn, srq);
774 pthread_spin_unlock(&cq->lock);
777 int mlx4_get_outstanding_cqes(struct mlx4_cq *cq)
781 for (i = cq->cons_index; get_sw_cqe(cq, i); ++i)
784 return i - cq->cons_index;
787 void mlx4_cq_resize_copy_cqes(struct mlx4_cq *cq, void *buf, int old_cqe)
791 int cqe_inc = cq->cqe_size == 64 ? 1 : 0;
793 i = cq->cons_index;
794 cqe = get_cqe(cq, (i & old_cqe));
799 (((i + 1) & (cq->ibv_cq.cqe + 1)) ? MLX4_CQE_OWNER_MASK : 0);
800 memcpy(buf + ((i + 1) & cq->ibv_cq.cqe) * cq->cqe_size,
801 cqe - cqe_inc, cq->cqe_size);
803 cqe = get_cqe(cq, (i & old_cqe));
807 ++cq->cons_index;