Lines Matching refs:mcq

41 static void mlx5_ib_cq_event(struct mlx5_core_cq *mcq, int type)
43 struct mlx5_ib_cq *cq = container_of(mcq, struct mlx5_ib_cq, mcq);
50 type, mcq->cqn);
69 return get_cqe_from_buf(&cq->buf, n, cq->mcq.cqe_sz);
82 cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64;
94 return get_sw_cqe(cq, cq->mcq.cons_index);
535 cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64;
537 ++cq->mcq.cons_index;
594 "Requestor" : "Responder", cq->mcq.cqn);
630 cq->mcq.cqn, mr->sig->err_item.key,
655 cq->mcq.cqn);
690 mlx5_cq_set_ci(&cq->mcq);
716 mlx5_cq_arm(&cq->mcq,
721 cq->mcq.cons_index);
849 cq->mcq.set_ci_db = cq->db.db;
850 cq->mcq.arm_db = cq->db.db + 1;
851 cq->mcq.cqe_sz = cqe_size;
973 err = mlx5_core_create_cq(dev->mdev, &cq->mcq, cqb, inlen, out, sizeof(out));
977 mlx5_ib_dbg(dev, "cqn 0x%x\n", cq->mcq.cqn);
978 cq->mcq.irqn = irqn;
979 cq->mcq.comp = mlx5_ib_cq_comp;
980 cq->mcq.event = mlx5_ib_cq_event;
985 if (ib_copy_to_udata(udata, &cq->mcq.cqn, sizeof(__u32))) {
995 mlx5_core_destroy_cq(dev->mdev, &cq->mcq);
1014 struct mlx5_ib_cq *mcq = to_mcq(cq);
1020 mlx5_core_destroy_cq(dev->mdev, &mcq->mcq);
1022 destroy_cq_user(mcq, context);
1024 destroy_cq_kernel(dev, mcq);
1026 kfree(mcq);
1053 for (prod_index = cq->mcq.cons_index; get_sw_cqe(cq, prod_index); prod_index++)
1054 if (prod_index == cq->mcq.cons_index + cq->ibcq.cqe)
1060 while ((int) --prod_index - (int) cq->mcq.cons_index >= 0) {
1062 cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64;
1069 dest64 = (cq->mcq.cqe_sz == 64) ? dest : dest + 64;
1071 memcpy(dest, cqe, cq->mcq.cqe_sz);
1078 cq->mcq.cons_index += nfreed;
1083 mlx5_cq_set_ci(&cq->mcq);
1100 struct mlx5_ib_cq *mcq = to_mcq(cq);
1106 err = mlx5_core_modify_cq_moderation(dev->mdev, &mcq->mcq,
1109 mlx5_ib_warn(dev, "modify cq 0x%x failed\n", mcq->mcq.cqn);
1205 i = cq->mcq.cons_index;
1233 cq->mcq.cqn);
1237 ++cq->mcq.cons_index;
1321 MLX5_SET(modify_cq_in, in, cqn, cq->mcq.cqn);
1323 err = mlx5_core_modify_cq(dev->mdev, &cq->mcq, in, inlen);