Lines Matching refs:cq

41 static void mlx5_ib_cq_comp(struct mlx5_core_cq *cq, struct mlx5_eqe *eqe)
43 struct ib_cq *ibcq = &to_mibcq(cq)->ibcq;
50 struct mlx5_ib_cq *cq = container_of(mcq, struct mlx5_ib_cq, mcq);
51 struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device);
52 struct ib_cq *ibcq = &cq->ibcq;
64 event.element.cq = ibcq;
69 static void *get_cqe(struct mlx5_ib_cq *cq, int n)
71 return mlx5_frag_buf_get_wqe(&cq->buf.fbc, n);
79 static void *get_sw_cqe(struct mlx5_ib_cq *cq, int n)
81 void *cqe = get_cqe(cq, n & cq->ibcq.cqe);
84 cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64;
87 !((cqe64->op_own & MLX5_CQE_OWNER_MASK) ^ !!(n & (cq->ibcq.cqe + 1)))) {
94 static void *next_cqe_sw(struct mlx5_ib_cq *cq)
96 return get_sw_cqe(cq, cq->mcq.cons_index);
427 static void mlx5_ib_poll_sw_comp(struct mlx5_ib_cq *cq, int num_entries,
433 /* Find uncompleted WQEs belonging to that cq and return mmics ones */
434 list_for_each_entry(qp, &cq->list_send_qp, cq_send_list) {
440 list_for_each_entry(qp, &cq->list_recv_qp, cq_recv_list) {
447 static int mlx5_poll_one(struct mlx5_ib_cq *cq,
451 struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device);
463 cqe = next_cqe_sw(cq);
467 cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64;
469 ++cq->mcq.cons_index;
478 if (likely(cq->resize_buf)) {
479 free_cq_buf(dev, &cq->buf);
480 cq->buf = *cq->resize_buf;
481 kfree(cq->resize_buf);
482 cq->resize_buf = NULL;
526 "Requestor" : "Responder", cq->mcq.cqn);
567 cq->mcq.cqn, sig->err_item.key,
581 static int poll_soft_wc(struct mlx5_ib_cq *cq, int num_entries,
584 struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device);
588 list_for_each_entry_safe(soft_wc, next, &cq->wc_list, list) {
593 cq->mcq.cqn);
609 struct mlx5_ib_cq *cq = to_mcq(ibcq);
611 struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device);
617 spin_lock_irqsave(&cq->lock, flags);
620 if (unlikely(!list_empty(&cq->wc_list)))
621 soft_polled = poll_soft_wc(cq, num_entries, wc, true);
623 mlx5_ib_poll_sw_comp(cq, num_entries - soft_polled,
628 if (unlikely(!list_empty(&cq->wc_list)))
629 soft_polled = poll_soft_wc(cq, num_entries, wc, false);
632 if (mlx5_poll_one(cq, &cur_qp, wc + soft_polled + npolled))
637 mlx5_cq_set_ci(&cq->mcq);
639 spin_unlock_irqrestore(&cq->lock, flags);
647 struct mlx5_ib_cq *cq = to_mcq(ibcq);
652 spin_lock_irqsave(&cq->lock, irq_flags);
653 if (cq->notify_flags != IB_CQ_NEXT_COMP)
654 cq->notify_flags = flags & IB_CQ_SOLICITED_MASK;
656 if ((flags & IB_CQ_REPORT_MISSED_EVENTS) && !list_empty(&cq->wc_list))
658 spin_unlock_irqrestore(&cq->lock, irq_flags);
660 mlx5_cq_arm(&cq->mcq,
716 struct mlx5_ib_cq *cq, int entries, u32 **cqb,
748 cq->buf.umem =
751 if (IS_ERR(cq->buf.umem)) {
752 err = PTR_ERR(cq->buf.umem);
757 cq->buf.umem, cqc, log_page_size, MLX5_ADAPTER_PAGE_SHIFT,
764 err = mlx5_ib_db_map_user(context, ucmd.db_addr, &cq->db);
768 ncont = ib_umem_num_dma_blocks(cq->buf.umem, page_size);
773 ib_umem_num_pages(cq->buf.umem), page_size, ncont);
784 mlx5_ib_populate_pas(cq->buf.umem, page_size, pas, 0);
837 cq->private_flags |= MLX5_IB_CQ_PR_FLAGS_CQE_128_PAD;
841 cq->private_flags |= MLX5_IB_CQ_PR_FLAGS_REAL_TIME_TS;
850 mlx5_ib_db_unmap_user(context, &cq->db);
853 ib_umem_release(cq->buf.umem);
857 static void destroy_cq_user(struct mlx5_ib_cq *cq, struct ib_udata *udata)
862 mlx5_ib_db_unmap_user(context, &cq->db);
863 ib_umem_release(cq->buf.umem);
879 static int create_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
887 err = mlx5_db_alloc(dev->mdev, &cq->db);
891 cq->mcq.set_ci_db = cq->db.db;
892 cq->mcq.arm_db = cq->db.db + 1;
893 cq->mcq.cqe_sz = cqe_size;
895 err = alloc_cq_frag_buf(dev, &cq->buf, entries, cqe_size);
899 init_cq_frag_buf(&cq->buf);
903 cq->buf.frag_buf.npages;
911 mlx5_fill_page_frag_array(&cq->buf.frag_buf, pas);
915 cq->buf.frag_buf.page_shift -
923 free_cq_buf(dev, &cq->buf);
926 mlx5_db_free(dev->mdev, &cq->db);
930 static void destroy_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq)
932 free_cq_buf(dev, &cq->buf);
933 mlx5_db_free(dev->mdev, &cq->db);
938 struct mlx5_ib_cq *cq = container_of(work, struct mlx5_ib_cq,
941 cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
951 struct mlx5_ib_cq *cq = to_mcq(ibcq);
972 cq->ibcq.cqe = entries - 1;
973 mutex_init(&cq->resize_mutex);
974 spin_lock_init(&cq->lock);
975 cq->resize_buf = NULL;
976 cq->resize_umem = NULL;
977 cq->create_flags = attr->flags;
978 INIT_LIST_HEAD(&cq->list_send_qp);
979 INIT_LIST_HEAD(&cq->list_recv_qp);
982 err = create_cq_user(dev, udata, cq, entries, &cqb, &cqe_size,
988 err = create_cq_kernel(dev, cq, entries, cqe_size, &cqb,
993 INIT_WORK(&cq->notify_work, notify_soft_wc_handler);
1000 cq->cqe_size = cqe_size;
1005 cq->private_flags &
1010 MLX5_SET64(cqc, cqc, dbr_addr, cq->db.dma);
1011 if (cq->create_flags & IB_UVERBS_CQ_FLAGS_IGNORE_OVERRUN)
1014 err = mlx5_core_create_cq(dev->mdev, &cq->mcq, cqb, inlen, out, sizeof(out));
1018 mlx5_ib_dbg(dev, "cqn 0x%x\n", cq->mcq.cqn);
1020 cq->mcq.tasklet_ctx.comp = mlx5_ib_cq_comp;
1022 cq->mcq.comp = mlx5_ib_cq_comp;
1023 cq->mcq.event = mlx5_ib_cq_event;
1025 INIT_LIST_HEAD(&cq->wc_list);
1028 if (ib_copy_to_udata(udata, &cq->mcq.cqn, sizeof(__u32))) {
1038 mlx5_core_destroy_cq(dev->mdev, &cq->mcq);
1043 destroy_cq_user(cq, udata);
1045 destroy_cq_kernel(dev, cq);
1049 int mlx5_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata)
1051 struct mlx5_ib_dev *dev = to_mdev(cq->device);
1052 struct mlx5_ib_cq *mcq = to_mcq(cq);
1071 void __mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 rsn, struct mlx5_ib_srq *srq)
1079 if (!cq)
1088 for (prod_index = cq->mcq.cons_index; get_sw_cqe(cq, prod_index); prod_index++)
1089 if (prod_index == cq->mcq.cons_index + cq->ibcq.cqe)
1095 while ((int) --prod_index - (int) cq->mcq.cons_index >= 0) {
1096 cqe = get_cqe(cq, prod_index & cq->ibcq.cqe);
1097 cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64;
1103 dest = get_cqe(cq, (prod_index + nfreed) & cq->ibcq.cqe);
1104 dest64 = (cq->mcq.cqe_sz == 64) ? dest : dest + 64;
1106 memcpy(dest, cqe, cq->mcq.cqe_sz);
1113 cq->mcq.cons_index += nfreed;
1118 mlx5_cq_set_ci(&cq->mcq);
1122 void mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq)
1124 if (!cq)
1127 spin_lock_irq(&cq->lock);
1128 __mlx5_ib_cq_clean(cq, qpn, srq);
1129 spin_unlock_irq(&cq->lock);
1132 int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
1134 struct mlx5_ib_dev *dev = to_mdev(cq->device);
1135 struct mlx5_ib_cq *mcq = to_mcq(cq);
1147 mlx5_ib_warn(dev, "modify cq 0x%x failed\n", mcq->mcq.cqn);
1152 static int resize_user(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
1179 cq->resize_umem = umem;
1185 static int resize_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
1190 cq->resize_buf = kzalloc(sizeof(*cq->resize_buf), GFP_KERNEL);
1191 if (!cq->resize_buf)
1194 err = alloc_cq_frag_buf(dev, cq->resize_buf, entries, cqe_size);
1198 init_cq_frag_buf(cq->resize_buf);
1203 kfree(cq->resize_buf);
1207 static int copy_resize_cqes(struct mlx5_ib_cq *cq)
1209 struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device);
1220 ssize = cq->buf.cqe_size;
1221 dsize = cq->resize_buf->cqe_size;
1227 i = cq->mcq.cons_index;
1228 scqe = get_sw_cqe(cq, i);
1237 dcqe = mlx5_frag_buf_get_wqe(&cq->resize_buf->fbc,
1238 (i + 1) & cq->resize_buf->nent);
1240 sw_own = sw_ownership_bit(i + 1, cq->resize_buf->nent);
1245 scqe = get_sw_cqe(cq, i);
1254 cq->mcq.cqn);
1258 ++cq->mcq.cons_index;
1265 struct mlx5_ib_cq *cq = to_mcq(ibcq);
1297 mutex_lock(&cq->resize_mutex);
1301 err = resize_user(dev, cq, entries, udata, &cqe_size);
1306 cq->resize_umem, cqc, log_page_size,
1313 npas = ib_umem_num_dma_blocks(cq->resize_umem, page_size);
1319 err = resize_kernel(dev, cq, entries, cqe_size);
1322 frag_buf = &cq->resize_buf->frag_buf;
1338 mlx5_ib_populate_pas(cq->resize_umem, 1UL << page_shift, pas,
1341 mlx5_fill_page_frag_array(&cq->resize_buf->frag_buf, pas);
1356 cq->private_flags &
1361 MLX5_SET(modify_cq_in, in, cqn, cq->mcq.cqn);
1363 err = mlx5_core_modify_cq(dev->mdev, &cq->mcq, in, inlen);
1368 cq->ibcq.cqe = entries - 1;
1369 ib_umem_release(cq->buf.umem);
1370 cq->buf.umem = cq->resize_umem;
1371 cq->resize_umem = NULL;
1376 spin_lock_irqsave(&cq->lock, flags);
1377 if (cq->resize_buf) {
1378 err = copy_resize_cqes(cq);
1380 tbuf = cq->buf;
1381 cq->buf = *cq->resize_buf;
1382 kfree(cq->resize_buf);
1383 cq->resize_buf = NULL;
1387 cq->ibcq.cqe = entries - 1;
1388 spin_unlock_irqrestore(&cq->lock, flags);
1392 mutex_unlock(&cq->resize_mutex);
1401 ib_umem_release(cq->resize_umem);
1403 free_cq_buf(dev, cq->resize_buf);
1404 cq->resize_buf = NULL;
1407 mutex_unlock(&cq->resize_mutex);
1413 struct mlx5_ib_cq *cq;
1418 cq = to_mcq(ibcq);
1419 return cq->cqe_size;
1426 struct mlx5_ib_cq *cq = to_mcq(ibcq);
1434 spin_lock_irqsave(&cq->lock, flags);
1435 list_add_tail(&soft_wc->list, &cq->wc_list);
1436 if (cq->notify_flags == IB_CQ_NEXT_COMP ||
1438 cq->notify_flags = 0;
1439 schedule_work(&cq->notify_work);
1441 spin_unlock_irqrestore(&cq->lock, flags);