/linux-master/drivers/net/ethernet/pensando/ionic/ |
H A D | ionic.h | 79 bool ionic_notifyq_service(struct ionic_cq *cq); 80 bool ionic_adminq_service(struct ionic_cq *cq);
|
/linux-master/drivers/infiniband/hw/ocrdma/ |
H A D | ocrdma_hw.c | 124 (dev->mq.cq.va + (dev->mq.cq.tail * sizeof(struct ocrdma_mcqe))); 133 dev->mq.cq.tail = (dev->mq.cq.tail + 1) & (OCRDMA_MQ_CQ_LEN - 1); 524 struct ocrdma_queue_info *cq, 536 cmd->pgsz_pgcnt = (cq->size / OCRDMA_MIN_Q_PAGE_SIZE) << 538 cmd->pgsz_pgcnt |= PAGES_4K_SPANNED(cq->va, cq->size); 542 cmd->pdid_cqecnt = cq->size / sizeof(struct ocrdma_mcqe); 544 ocrdma_build_q_pages(&cmd->pa[0], cq 523 ocrdma_mbx_mq_cq_create(struct ocrdma_dev *dev, struct ocrdma_queue_info *cq, struct ocrdma_queue_info *eq) argument 564 ocrdma_mbx_create_mq(struct ocrdma_dev *dev, struct ocrdma_queue_info *mq, struct ocrdma_queue_info *cq) argument 646 struct ocrdma_queue_info *mbxq, *cq; local 679 struct ocrdma_cq *cq = NULL; local 902 _ocrdma_qp_buddy_cq_handler(struct ocrdma_dev *dev, struct ocrdma_cq *cq, bool sq) argument 935 ocrdma_qp_buddy_cq_handler(struct ocrdma_dev *dev, struct ocrdma_cq *cq) argument 968 struct ocrdma_cq *cq; local 1779 ocrdma_mbx_create_cq(struct ocrdma_dev *dev, struct ocrdma_cq *cq, int entries, int dpp_cq, u16 pd_id) argument 1884 ocrdma_mbx_destroy_cq(struct ocrdma_dev *dev, struct ocrdma_cq *cq) argument 2078 ocrdma_is_qp_in_sq_flushlist(struct ocrdma_cq *cq, struct ocrdma_qp *qp) argument 2091 ocrdma_is_qp_in_rq_flushlist(struct ocrdma_cq *cq, struct ocrdma_qp *qp) argument 2372 struct ocrdma_cq *cq; local [all...] |
/linux-master/drivers/infiniband/core/ |
H A D | uverbs_std_types_wq.c | 35 struct ib_cq *cq = local 72 wq_init_attr.cq = cq; 82 wq->cq = cq; 88 atomic_inc(&cq->usecnt);
|
/linux-master/drivers/isdn/mISDN/ |
H A D | dsp_core.c | 190 struct mISDN_ctrl_req cq; local 193 memset(&cq, 0, sizeof(cq)); 222 cq.op = MISDN_CTRL_RX_OFF; 223 cq.p1 = rx_off; 224 if (dsp->ch.peer->ctrl(dsp->ch.peer, CONTROL_CHANNEL, &cq)) { 257 struct mISDN_ctrl_req cq; local 259 memset(&cq, 0, sizeof(cq)); 267 cq 629 struct mISDN_ctrl_req cq; local [all...] |
H A D | hwchannel.c | 156 mISDN_ctrl_bchannel(struct bchannel *bch, struct mISDN_ctrl_req *cq) argument 160 switch (cq->op) { 162 cq->op = MISDN_CTRL_RX_BUFFER | MISDN_CTRL_FILL_EMPTY | 166 if (cq->p1) { 167 memset(bch->fill, cq->p2 & 0xff, MISDN_BCH_FILL_SIZE); 175 cq->p2 = bch->dropcnt; 176 if (cq->p1) 183 if (cq->p2 > MISDN_CTRL_RX_SIZE_IGNORE) 184 bch->next_maxlen = cq->p2; 185 if (cq [all...] |
/linux-master/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/ |
H A D | setup.c | 131 &c->xskrq.cq); 140 &c->xsksq.cq); 161 mlx5e_close_cq(&c->xsksq.cq); 167 mlx5e_close_cq(&c->xskrq.cq); 181 mlx5e_close_cq(&c->xskrq.cq); 183 mlx5e_close_cq(&c->xsksq.cq);
|
/linux-master/drivers/infiniband/sw/rxe/ |
H A D | rxe_verbs.c | 1055 /* cq */ 1061 struct rxe_cq *cq = to_rcq(ibcq); local 1086 err = rxe_add_to_pool(&rxe->cq_pool, cq); 1088 rxe_dbg_dev(rxe, "unable to create cq, err = %d\n", err); 1092 err = rxe_cq_from_init(rxe, cq, attr->cqe, attr->comp_vector, udata, 1095 rxe_dbg_cq(cq, "create cq failed, err = %d\n", err); 1102 cleanup_err = rxe_cleanup(cq); 1104 rxe_err_cq(cq, "cleanup failed, err = %d\n", cleanup_err); 1112 struct rxe_cq *cq local 1148 struct rxe_cq *cq = to_rcq(ibcq); local 1168 struct rxe_cq *cq = to_rcq(ibcq); local 1178 struct rxe_cq *cq = to_rcq(ibcq); local 1197 struct rxe_cq *cq = to_rcq(ibcq); local [all...] |
/linux-master/drivers/infiniband/ulp/rtrs/ |
H A D | rtrs.c | 24 void (*done)(struct ib_cq *cq, struct ib_wc *wc)) 236 struct ib_cq *cq; local 239 cq = ib_alloc_cq(cm_id->device, con, nr_cqe, cq_vector, 242 cq = ib_cq_pool_get(cm_id->device, nr_cqe, cq_vector, poll_ctx); 244 if (IS_ERR(cq)) { 246 cq); 247 return PTR_ERR(cq); 249 con->cq = cq; 270 init_attr.send_cq = con->cq; 21 rtrs_iu_alloc(u32 iu_num, size_t size, gfp_t gfp_mask, struct ib_device *dma_dev, enum dma_data_direction dir, void (*done)(struct ib_cq *cq, struct ib_wc *wc)) argument [all...] |
/linux-master/drivers/infiniband/hw/mana/ |
H A D | qp.c | 109 struct mana_ib_cq *cq; local 194 ibcq = ibwq->cq; 195 cq = container_of(ibcq, struct mana_ib_cq, ibcq); 200 cq_spec.gdma_region = cq->gdma_region; 201 cq_spec.queue_size = cq->cqe * COMP_ENTRY_SIZE; 203 eq = &mpc->ac->eqs[cq->comp_vector % gc->max_num_queues]; 216 cq->gdma_region = GDMA_INVALID_DMA_REGION; 219 cq->id = cq_spec.queue_index; 222 "ret %d rx_object 0x%llx wq id %llu cq id %llu\n", 223 ret, wq->rx_object, wq->id, cq [all...] |
/linux-master/drivers/infiniband/hw/vmw_pvrdma/ |
H A D | pvrdma_main.c | 312 struct pvrdma_cq *cq; local 316 cq = dev->cq_tbl[cqn % dev->dsr->caps.max_cq]; 317 if (cq) 318 refcount_inc(&cq->refcnt); 321 if (cq && cq->ibcq.event_handler) { 322 struct ib_cq *ibcq = &cq->ibcq; 326 e.element.cq = ibcq; 330 if (cq) { 331 if (refcount_dec_and_test(&cq 483 struct pvrdma_cq *cq; local [all...] |
/linux-master/drivers/infiniband/hw/hfi1/ |
H A D | user_sdma.c | 50 struct hfi1_user_sdma_comp_q *cq, 112 struct hfi1_user_sdma_comp_q *cq; local 161 cq = kzalloc(sizeof(*cq), GFP_KERNEL); 162 if (!cq) 165 cq->comps = vmalloc_user(PAGE_ALIGN(sizeof(*cq->comps) 167 if (!cq->comps) 170 cq->nentries = hfi1_sdma_comp_ring_size; 177 fd->cq 285 struct hfi1_user_sdma_comp_q *cq = fd->cq; local 1160 struct hfi1_user_sdma_comp_q *cq; local 1213 set_comp_state(struct hfi1_user_sdma_pkt_q *pq, struct hfi1_user_sdma_comp_q *cq, u16 idx, enum hfi1_sdma_comp_state state, int ret) argument [all...] |
/linux-master/drivers/infiniband/hw/bnxt_re/ |
H A D | ib_verbs.c | 1358 struct bnxt_re_cq *cq; local 1395 cq = container_of(init_attr->send_cq, struct bnxt_re_cq, ib_cq); 1396 qplqp->scq = &cq->qplib_cq; 1397 qp->scq = cq; 1401 cq = container_of(init_attr->recv_cq, struct bnxt_re_cq, ib_cq); 1402 qplqp->rcq = &cq->qplib_cq; 1403 qp->rcq = cq; 1686 if (qplib_srq->cq) 1687 nq = qplib_srq->cq->nq; 2930 struct bnxt_re_cq *cq; local 2953 struct bnxt_re_cq *cq = container_of(ibcq, struct bnxt_re_cq, ib_cq); local 3072 bnxt_re_resize_cq_complete(struct bnxt_re_cq *cq) argument 3095 struct bnxt_re_cq *cq; local 3668 struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq); local 3807 struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq); local 4253 struct bnxt_re_cq *cq = NULL, *tmp_cq; local 4492 struct bnxt_re_cq *cq; local [all...] |
/linux-master/drivers/infiniband/hw/mlx5/ |
H A D | gsi.c | 68 static void handle_single_completion(struct ib_cq *cq, struct ib_wc *wc) argument 70 struct mlx5_ib_gsi_qp *gsi = cq->cq_context; 131 gsi->cq = ib_alloc_cq(pd->device, gsi, attr->cap.max_send_wr, 0, 133 if (IS_ERR(gsi->cq)) { 135 PTR_ERR(gsi->cq)); 136 ret = PTR_ERR(gsi->cq); 141 hw_init_attr.send_cq = gsi->cq; 160 ib_free_cq(gsi->cq); 192 ib_free_cq(gsi->cq); 205 .send_cq = gsi->cq, [all...] |
/linux-master/drivers/net/ethernet/marvell/octeontx2/nic/ |
H A D | cn10k.c | 85 aq->sq.cq = pfvf->hw.rx_queues + qidx; 110 int cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq) argument 113 int cnt = cq->pool_ptrs; 119 while (cq->pool_ptrs) { 120 if (otx2_alloc_buffer(pfvf, cq, &bufptr)) { 122 __cn10k_aura_freeptr(pfvf, cq->cq_idx, ptrs, 126 cq->pool_ptrs--; 129 if (num_ptrs == NPA_MAX_BURST || cq->pool_ptrs == 0) { 130 __cn10k_aura_freeptr(pfvf, cq->cq_idx, ptrs, 135 return cnt - cq [all...] |
/linux-master/drivers/infiniband/hw/qedr/ |
H A D | qedr_roce_cm.c | 75 struct qedr_cq *cq = dev->gsi_sqcq; local 82 cq->ibcq.comp_handler ? "Yes" : "No"); 92 if (cq->ibcq.comp_handler) 93 (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context); 100 struct qedr_cq *cq = dev->gsi_rqcq; local 121 if (cq->ibcq.comp_handler) 122 (*cq->ibcq.comp_handler) (&cq 137 struct qedr_cq *cq; local 673 struct qedr_cq *cq = get_qedr_cq(ibcq); local [all...] |
/linux-master/drivers/infiniband/hw/efa/ |
H A D | efa_verbs.c | 1008 static void efa_cq_user_mmap_entries_remove(struct efa_cq *cq) argument 1010 rdma_user_mmap_entry_remove(cq->db_mmap_entry); 1011 rdma_user_mmap_entry_remove(cq->mmap_entry); 1017 struct efa_cq *cq = to_ecq(ibcq); local 1020 "Destroy cq[%d] virt[0x%p] freed: size[%lu], dma[%pad]\n", 1021 cq->cq_idx, cq->cpu_addr, cq->size, &cq->dma_addr); 1023 efa_destroy_cq_idx(dev, cq 1039 cq_mmap_entries_setup(struct efa_dev *dev, struct efa_cq *cq, struct efa_ibv_create_cq_resp *resp, bool db_valid) argument 1080 struct efa_cq *cq = to_ecq(ibcq); local [all...] |
/linux-master/drivers/infiniband/ulp/iser/ |
H A D | iscsi_iser.h | 360 * @cq: Connection completion queue 370 struct ib_cq *cq; member in struct:ib_conn 497 void iser_login_rsp(struct ib_cq *cq, struct ib_wc *wc); 498 void iser_task_rsp(struct ib_cq *cq, struct ib_wc *wc); 499 void iser_cmd_comp(struct ib_cq *cq, struct ib_wc *wc); 500 void iser_ctrl_comp(struct ib_cq *cq, struct ib_wc *wc); 501 void iser_dataout_comp(struct ib_cq *cq, struct ib_wc *wc); 502 void iser_reg_comp(struct ib_cq *cq, struct ib_wc *wc);
|
/linux-master/drivers/net/ethernet/mellanox/mlx5/core/steering/ |
H A D | dr_send.c | 535 * the function will drain the cq till it empty. 552 ne = dr_poll_cq(send_ring->cq, 1); 1066 struct mlx5dr_cq *cq; local 1073 cq = kzalloc(sizeof(*cq), GFP_KERNEL); 1074 if (!cq) 1083 err = mlx5_cqwq_create(mdev, &wqp, temp_cqc, &cq->wq, 1084 &cq->wq_ctrl); 1088 for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) { 1089 cqe = mlx5_cqwq_get_wqe(&cq 1148 dr_destroy_cq(struct mlx5_core_dev *mdev, struct mlx5dr_cq *cq) argument [all...] |
/linux-master/drivers/net/ethernet/microsoft/mana/ |
H A D | gdma_main.c | 290 e.cq.id = qid; 291 e.cq.tail_ptr = tail_ptr; 292 e.cq.arm = num_req; 332 void mana_gd_ring_cq(struct gdma_queue *cq, u8 arm_bit) argument 334 struct gdma_context *gc = cq->gdma_dev->gdma_context; 336 u32 num_cqe = cq->queue_size / GDMA_CQE_SIZE; 338 u32 head = cq->head % (num_cqe << GDMA_CQE_OWNER_BITS); 340 mana_gd_ring_doorbell(gc, cq->gdma_dev->doorbell, cq->type, cq 352 struct gdma_queue *cq; local 1164 mana_gd_read_cqe(struct gdma_queue *cq, struct gdma_comp *comp) argument 1196 mana_gd_poll_cq(struct gdma_queue *cq, struct gdma_comp *comp, int num_cqe) argument [all...] |
H A D | hw_channel.c | 115 hwc->cq->gdma_eq->id = eq_db.eq_id; 126 hwc->cq->gdma_cq->id = val; 283 spec.cq.context = ctx; 284 spec.cq.callback = cb; 285 spec.cq.parent_eq = parent_eq; 357 struct gdma_queue *eq, *cq; local 383 eq, &cq); 388 hwc_cq->gdma_cq = cq; 613 return mana_gd_test_eq(gc, hwc->cq->gdma_eq); 623 struct gdma_queue *eq = hwc->cq 624 struct gdma_queue *cq = hwc->cq->gdma_cq; local [all...] |
H A D | mana_en.c | 234 struct mana_cq *cq; local 245 cq = &apc->tx_qp[txq_idx].tx_cq; 248 pkg.tx_oob.s_oob.vcq_num = cq->gdma_id; 1348 static void mana_poll_tx_cq(struct mana_cq *cq) argument 1350 struct gdma_comp *completions = cq->gdma_comp_buf; 1354 struct mana_txq *txq = cq->txq; 1368 comp_read = mana_gd_poll_cq(cq->gdma_cq, completions, 1429 napi_consume_skb(skb, cq->budget); 1459 cq->work_done = pkt_transmitted; 1676 static void mana_process_rx_cqe(struct mana_rxq *rxq, struct mana_cq *cq, argument 1743 mana_poll_rx_cq(struct mana_cq *cq) argument 1777 struct mana_cq *cq = context; local 1804 struct mana_cq *cq = container_of(napi, struct mana_cq, napi); local 1817 struct mana_cq *cq = context; local 1822 mana_deinit_cq(struct mana_port_context *apc, struct mana_cq *cq) argument 1877 struct mana_cq *cq; local 2162 struct mana_cq *cq = NULL; local [all...] |
/linux-master/drivers/net/ethernet/mellanox/mlx5/core/en/ |
H A D | ptp.c | 222 static bool mlx5e_ptp_poll_ts_cq(struct mlx5e_cq *cq, int napi_budget) argument 224 struct mlx5e_ptpsq *ptpsq = container_of(cq, struct mlx5e_ptpsq, ts_cq); 232 cqwq = &cq->wq; 250 /* ensure cq space is freed before enabling more cqes */ 277 busy |= mlx5e_poll_tx_cq(&c->ptpsq[i].txqsq.cq, budget); 282 work_done = mlx5e_poll_rx_cq(&rq->cq, budget); 302 mlx5e_cq_arm(&c->ptpsq[i].txqsq.cq); 307 mlx5e_cq_arm(&rq->cq); 471 csp.cqn = txqsq->cq.mcq.cqn; 570 struct mlx5e_cq *cq local 578 struct mlx5e_cq *cq = &c->ptpsq[tc].ts_cq; local 607 struct mlx5e_cq *cq = &c->rq.cq; local [all...] |
/linux-master/drivers/infiniband/hw/erdma/ |
H A D | erdma_verbs.c | 186 static int create_cq_cmd(struct erdma_ucontext *uctx, struct erdma_cq *cq) argument 188 struct erdma_dev *dev = to_edev(cq->ibcq.device); 196 req.cfg0 = FIELD_PREP(ERDMA_CMD_CREATE_CQ_CQN_MASK, cq->cqn) | 197 FIELD_PREP(ERDMA_CMD_CREATE_CQ_DEPTH_MASK, ilog2(cq->depth)); 198 req.cfg1 = FIELD_PREP(ERDMA_CMD_CREATE_CQ_EQN_MASK, cq->assoc_eqn); 200 if (rdma_is_kernel_res(&cq->ibcq.res)) { 204 req.qbuf_addr_l = lower_32_bits(cq->kern_cq.qbuf_dma_addr); 205 req.qbuf_addr_h = upper_32_bits(cq->kern_cq.qbuf_dma_addr); 213 cq->kern_cq.qbuf_dma_addr + (cq 1224 struct erdma_cq *cq = to_ecq(ibcq); local 1577 erdma_init_user_cq(struct erdma_ucontext *ctx, struct erdma_cq *cq, struct erdma_ureq_create_cq *ureq) argument 1598 erdma_init_kernel_cq(struct erdma_cq *cq) argument 1621 struct erdma_cq *cq = to_ecq(ibcq); local [all...] |
/linux-master/drivers/net/ethernet/mellanox/mlx5/core/ |
H A D | eq.c | 93 /* caller must eventually call mlx5_cq_put on the returned cq */ 97 struct mlx5_core_cq *cq = NULL; local 100 cq = radix_tree_lookup(&table->tree, cqn); 101 if (likely(cq)) 102 mlx5_cq_hold(cq); 105 return cq; 124 struct mlx5_core_cq *cq; local 133 cq = mlx5_eq_cq_get(eq, cqn); 134 if (likely(cq)) { 135 ++cq 407 mlx5_eq_add_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq) argument 419 mlx5_eq_del_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq) argument 505 struct mlx5_core_cq *cq; local [all...] |
H A D | debugfs.c | 35 #include <linux/mlx5/cq.h> 411 static u64 cq_read_field(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, argument 424 err = mlx5_core_query_cq(dev, cq, out); 426 mlx5_core_warn(dev, "failed to query cq\n"); 433 param = cq->pid; 578 int mlx5_debug_cq_add(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq) argument 586 &cq->dbg, cq->cqn, cq_fields, 587 ARRAY_SIZE(cq_fields), cq); 589 cq 594 mlx5_debug_cq_remove(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq) argument [all...] |