Lines Matching refs:cq

36 static void mlx5_ib_cq_comp(struct mlx5_core_cq *cq, struct mlx5_eqe *eqe __unused)
38 struct ib_cq *ibcq = &to_mibcq(cq)->ibcq;
45 struct mlx5_ib_cq *cq = container_of(mcq, struct mlx5_ib_cq, mcq);
46 struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device);
47 struct ib_cq *ibcq = &cq->ibcq;
59 event.element.cq = ibcq;
69 static void *get_cqe(struct mlx5_ib_cq *cq, int n)
71 return get_cqe_from_buf(&cq->buf, n, cq->mcq.cqe_sz);
79 static void *get_sw_cqe(struct mlx5_ib_cq *cq, int n)
81 void *cqe = get_cqe(cq, n & cq->ibcq.cqe);
84 cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64;
87 !((cqe64->op_own & MLX5_CQE_OWNER_MASK) ^ !!(n & (cq->ibcq.cqe + 1)))) {
94 static void *next_cqe_sw(struct mlx5_ib_cq *cq)
96 return get_sw_cqe(cq, cq->mcq.cons_index);
494 static void mlx5_ib_poll_sw_comp(struct mlx5_ib_cq *cq, int num_entries,
500 /* Find uncompleted WQEs belonging to that cq and retrun mmics ones */
501 list_for_each_entry(qp, &cq->list_send_qp, cq_send_list) {
507 list_for_each_entry(qp, &cq->list_recv_qp, cq_recv_list) {
514 static int mlx5_poll_one(struct mlx5_ib_cq *cq,
518 struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device);
534 cqe = next_cqe_sw(cq);
538 cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64;
540 ++cq->mcq.cons_index;
549 if (likely(cq->resize_buf)) {
550 free_cq_buf(dev, &cq->buf);
551 cq->buf = *cq->resize_buf;
552 kfree(cq->resize_buf);
553 cq->resize_buf = NULL;
597 "Requestor" : "Responder", cq->mcq.cqn);
633 cq->mcq.cqn, mr->sig->err_item.key,
646 static int poll_soft_wc(struct mlx5_ib_cq *cq, int num_entries,
649 struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device);
653 list_for_each_entry_safe(soft_wc, next, &cq->wc_list, list) {
658 cq->mcq.cqn);
670 struct mlx5_ib_cq *cq = to_mcq(ibcq);
672 struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device);
678 spin_lock_irqsave(&cq->lock, flags);
680 mlx5_ib_poll_sw_comp(cq, num_entries, wc, &npolled);
684 if (unlikely(!list_empty(&cq->wc_list)))
685 soft_polled = poll_soft_wc(cq, num_entries, wc);
688 if (mlx5_poll_one(cq, &cur_qp, wc + soft_polled + npolled))
693 mlx5_cq_set_ci(&cq->mcq);
695 spin_unlock_irqrestore(&cq->lock, flags);
703 struct mlx5_ib_cq *cq = to_mcq(ibcq);
711 spin_lock_irqsave(&cq->lock, irq_flags);
712 if (cq->notify_flags != IB_CQ_NEXT_COMP)
713 cq->notify_flags = flags & IB_CQ_SOLICITED_MASK;
715 if ((flags & IB_CQ_REPORT_MISSED_EVENTS) && !list_empty(&cq->wc_list))
717 spin_unlock_irqrestore(&cq->lock, irq_flags);
719 mlx5_cq_arm(&cq->mcq,
724 cq->mcq.cons_index);
746 struct mlx5_ib_cq *cq, int entries, u32 **cqb,
775 cq->buf.umem = ib_umem_get(&context->ibucontext, ucmd.buf_addr,
778 if (IS_ERR(cq->buf.umem)) {
779 err = PTR_ERR(cq->buf.umem);
784 &cq->db);
788 mlx5_ib_cont_pages(cq->buf.umem, ucmd.buf_addr, 0, &npages, &page_shift,
802 mlx5_ib_populate_pas(dev, cq->buf.umem, page_shift, pas, 0);
824 mlx5_ib_db_unmap_user(context, &cq->db);
827 ib_umem_release(cq->buf.umem);
831 static void destroy_cq_user(struct mlx5_ib_cq *cq, struct ib_udata *udata)
836 mlx5_ib_db_unmap_user(context, &cq->db);
837 ib_umem_release(cq->buf.umem);
840 static void init_cq_buf(struct mlx5_ib_cq *cq, struct mlx5_ib_cq_buf *buf)
853 static int create_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
861 err = mlx5_db_alloc(dev->mdev, &cq->db);
865 cq->mcq.set_ci_db = cq->db.db;
866 cq->mcq.arm_db = cq->db.db + 1;
867 cq->mcq.cqe_sz = cqe_size;
869 err = alloc_cq_buf(dev, &cq->buf, entries, cqe_size);
873 init_cq_buf(cq, &cq->buf);
876 MLX5_FLD_SZ_BYTES(create_cq_in, pas[0]) * cq->buf.buf.npages;
884 mlx5_fill_page_array(&cq->buf.buf, pas);
888 cq->buf.buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT);
895 free_cq_buf(dev, &cq->buf);
898 mlx5_db_free(dev->mdev, &cq->db);
902 static void destroy_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq)
904 free_cq_buf(dev, &cq->buf);
905 mlx5_db_free(dev->mdev, &cq->db);
910 struct mlx5_ib_cq *cq = container_of(work, struct mlx5_ib_cq,
913 cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
924 struct mlx5_ib_cq *cq = to_mcq(ibcq);
945 cq->ibcq.cqe = entries - 1;
946 mutex_init(&cq->resize_mutex);
947 spin_lock_init(&cq->lock);
948 cq->resize_buf = NULL;
949 cq->resize_umem = NULL;
950 cq->create_flags = attr->flags;
951 INIT_LIST_HEAD(&cq->list_send_qp);
952 INIT_LIST_HEAD(&cq->list_recv_qp);
955 err = create_cq_user(dev, udata, cq, entries, &cqb, &cqe_size,
961 err = create_cq_kernel(dev, cq, entries, cqe_size, &cqb,
966 INIT_WORK(&cq->notify_work, notify_soft_wc_handler);
973 cq->cqe_size = cqe_size;
980 MLX5_SET64(cqc, cqc, dbr_addr, cq->db.dma);
981 if (cq->create_flags & IB_CQ_FLAGS_IGNORE_OVERRUN)
984 err = mlx5_core_create_cq(dev->mdev, &cq->mcq, cqb, inlen, out, sizeof(out));
988 mlx5_ib_dbg(dev, "cqn 0x%x\n", cq->mcq.cqn);
989 cq->mcq.irqn = irqn;
990 cq->mcq.comp = mlx5_ib_cq_comp;
991 cq->mcq.event = mlx5_ib_cq_event;
993 INIT_LIST_HEAD(&cq->wc_list);
996 if (ib_copy_to_udata(udata, &cq->mcq.cqn, sizeof(__u32))) {
1006 mlx5_core_destroy_cq(dev->mdev, &cq->mcq);
1011 destroy_cq_user(cq, udata);
1013 destroy_cq_kernel(dev, cq);
1017 void mlx5_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata)
1019 struct mlx5_ib_dev *dev = to_mdev(cq->device);
1020 struct mlx5_ib_cq *mcq = to_mcq(cq);
1034 void __mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 rsn, struct mlx5_ib_srq *srq)
1042 if (!cq)
1051 for (prod_index = cq->mcq.cons_index; get_sw_cqe(cq, prod_index); prod_index++)
1052 if (prod_index == cq->mcq.cons_index + cq->ibcq.cqe)
1058 while ((int) --prod_index - (int) cq->mcq.cons_index >= 0) {
1059 cqe = get_cqe(cq, prod_index & cq->ibcq.cqe);
1060 cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64;
1066 dest = get_cqe(cq, (prod_index + nfreed) & cq->ibcq.cqe);
1067 dest64 = (cq->mcq.cqe_sz == 64) ? dest : dest + 64;
1069 memcpy(dest, cqe, cq->mcq.cqe_sz);
1076 cq->mcq.cons_index += nfreed;
1081 mlx5_cq_set_ci(&cq->mcq);
1085 void mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq)
1087 if (!cq)
1090 spin_lock_irq(&cq->lock);
1091 __mlx5_ib_cq_clean(cq, qpn, srq);
1092 spin_unlock_irq(&cq->lock);
1095 int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
1097 struct mlx5_ib_dev *dev = to_mdev(cq->device);
1098 struct mlx5_ib_cq *mcq = to_mcq(cq);
1107 mlx5_ib_warn(dev, "modify cq 0x%x failed\n", mcq->mcq.cqn);
1112 static int resize_user(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
1120 struct ib_ucontext *context = cq->buf.umem->context;
1144 cq->resize_umem = umem;
1150 static void un_resize_user(struct mlx5_ib_cq *cq)
1152 ib_umem_release(cq->resize_umem);
1155 static int resize_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
1160 cq->resize_buf = kzalloc(sizeof(*cq->resize_buf), GFP_KERNEL);
1161 if (!cq->resize_buf)
1164 err = alloc_cq_buf(dev, cq->resize_buf, entries, cqe_size);
1168 init_cq_buf(cq, cq->resize_buf);
1173 kfree(cq->resize_buf);
1177 static void un_resize_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq)
1179 free_cq_buf(dev, cq->resize_buf);
1180 cq->resize_buf = NULL;
1183 static int copy_resize_cqes(struct mlx5_ib_cq *cq)
1185 struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device);
1196 ssize = cq->buf.cqe_size;
1197 dsize = cq->resize_buf->cqe_size;
1203 i = cq->mcq.cons_index;
1204 scqe = get_sw_cqe(cq, i);
1213 dcqe = get_cqe_from_buf(cq->resize_buf,
1214 (i + 1) & (cq->resize_buf->nent),
1217 sw_own = sw_ownership_bit(i + 1, cq->resize_buf->nent);
1222 scqe = get_sw_cqe(cq, i);
1231 cq->mcq.cqn);
1235 ++cq->mcq.cons_index;
1242 struct mlx5_ib_cq *cq = to_mcq(ibcq);
1273 mutex_lock(&cq->resize_mutex);
1275 err = resize_user(dev, cq, entries, udata, &npas, &page_shift,
1279 err = resize_kernel(dev, cq, entries, cqe_size);
1281 npas = cq->resize_buf->buf.npages;
1282 page_shift = cq->resize_buf->buf.page_shift;
1300 mlx5_ib_populate_pas(dev, cq->resize_umem, page_shift,
1303 mlx5_fill_page_array(&cq->resize_buf->buf, pas);
1319 MLX5_SET(modify_cq_in, in, cqn, cq->mcq.cqn);
1321 err = mlx5_core_modify_cq(dev->mdev, &cq->mcq, in, inlen);
1326 cq->ibcq.cqe = entries - 1;
1327 ib_umem_release(cq->buf.umem);
1328 cq->buf.umem = cq->resize_umem;
1329 cq->resize_umem = NULL;
1334 spin_lock_irqsave(&cq->lock, flags);
1335 if (cq->resize_buf) {
1336 err = copy_resize_cqes(cq);
1338 tbuf = cq->buf;
1339 cq->buf = *cq->resize_buf;
1340 kfree(cq->resize_buf);
1341 cq->resize_buf = NULL;
1345 cq->ibcq.cqe = entries - 1;
1346 spin_unlock_irqrestore(&cq->lock, flags);
1350 mutex_unlock(&cq->resize_mutex);
1360 un_resize_user(cq);
1362 un_resize_kernel(dev, cq);
1364 mutex_unlock(&cq->resize_mutex);
1370 struct mlx5_ib_cq *cq;
1375 cq = to_mcq(ibcq);
1376 return cq->cqe_size;
1383 struct mlx5_ib_cq *cq = to_mcq(ibcq);
1391 spin_lock_irqsave(&cq->lock, flags);
1392 list_add_tail(&soft_wc->list, &cq->wc_list);
1393 if (cq->notify_flags == IB_CQ_NEXT_COMP ||
1395 cq->notify_flags = 0;
1396 schedule_work(&cq->notify_work);
1398 spin_unlock_irqrestore(&cq->lock, flags);