Searched refs:cq (Results 151 - 175 of 350) sorted by relevance

1234567891011>>

/linux-master/drivers/scsi/fnic/
H A Dfnic_res.c237 vnic_cq_free(&fnic->cq[i]);
268 "vNIC resources avail: cq %d intr %d cpy-wq desc count %d\n",
304 &fnic->cq[cq_index], cq_index,
314 err = vnic_cq_alloc(fnic->vdev, &fnic->cq[cq_index], cq_index,
325 err = vnic_cq_alloc(fnic->vdev, &fnic->cq[cq_index],
407 vnic_cq_init(&fnic->cq[i],
/linux-master/drivers/infiniband/hw/mlx5/
H A Dumr.c139 struct ib_cq *cq; local
149 cq = ib_alloc_cq(&dev->ib_dev, NULL, 128, 0, IB_POLL_SOFTIRQ);
150 if (IS_ERR(cq)) {
152 ret = PTR_ERR(cq);
156 init_attr.send_cq = cq;
157 init_attr.recv_cq = cq;
175 dev->umrc.cq = cq;
187 ib_free_cq(cq);
198 ib_free_cq(dev->umrc.cq);
273 mlx5r_umr_done(struct ib_cq *cq, struct ib_wc *wc) argument
[all...]
/linux-master/drivers/infiniband/hw/mthca/
H A Dmthca_dev.h241 struct mthca_array cq; member in struct:mthca_cq_table
485 int mthca_tavor_arm_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags);
486 int mthca_arbel_arm_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags);
489 struct mthca_cq *cq);
491 struct mthca_cq *cq);
495 void mthca_cq_clean(struct mthca_dev *dev, struct mthca_cq *cq, u32 qpn,
497 void mthca_cq_resize_copy_cqes(struct mthca_cq *cq);
/linux-master/drivers/net/ethernet/cisco/enic/
H A Denic_main.c1303 struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)]; local
1420 enic_intr_update_pkt_size(&cq->pkt_size_counter,
1449 struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)]; local
1450 u32 timer = cq->tobe_rx_coal_timeval;
1452 if (cq->tobe_rx_coal_timeval != cq->cur_rx_coal_timeval) {
1454 cq->cur_rx_coal_timeval = cq
1461 struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)]; local
1608 unsigned int cq; local
1637 unsigned int cq = enic_cq_rq(enic, rq); local
[all...]
/linux-master/drivers/net/ethernet/mellanox/mlx5/core/en/
H A Dreporter_tx.c118 mlx5e_trigger_napi_sched(sq->cq.napi);
141 eq = sq->cq.mcq.eq;
143 err = mlx5e_health_channel_eq_recover(sq->netdev, eq, sq->cq.ch_stats);
244 mlx5e_health_cq_diag_fmsg(&sq->cq, fmsg);
245 mlx5e_health_eq_diag_fmsg(sq->cq.mcq.eq, fmsg);
283 mlx5e_health_cq_common_diag_fmsg(&txqsq->cq, fmsg);
497 sq->ch_ix, sq->sqn, sq->cq.mcq.cqn, sq->cc, sq->pc,
/linux-master/drivers/isdn/mISDN/
H A Dl1oip_core.c933 channel_dctrl(struct dchannel *dch, struct mISDN_ctrl_req *cq) argument
938 switch (cq->op) {
940 cq->op = MISDN_CTRL_SETPEER | MISDN_CTRL_UNSETPEER
944 hc->remoteip = (u32)cq->p1;
945 hc->remoteport = cq->p2 & 0xffff;
946 hc->localport = cq->p2 >> 16;
965 cq->p1 = hc->remoteip;
966 cq->p2 = hc->remoteport | (hc->localport << 16);
970 __func__, cq->op);
1173 channel_bctrl(struct bchannel *bch, struct mISDN_ctrl_req *cq) argument
[all...]
/linux-master/drivers/infiniband/hw/bnxt_re/
H A Dmain.c1105 static int bnxt_re_handle_cq_async_error(void *event, struct bnxt_re_cq *cq) argument
1124 if (ibevent.event == IB_EVENT_CQ_ERR && cq->ib_cq.event_handler) {
1125 ibevent.element.cq = &cq->ib_cq;
1126 ibevent.device = &cq->rdev->ibdev;
1128 ibdev_dbg(&cq->rdev->ibdev,
1130 cq->ib_cq.event_handler(&ibevent, cq->ib_cq.cq_context);
1142 struct bnxt_re_cq *cq; local
1158 cq
1209 struct bnxt_re_cq *cq = container_of(handle, struct bnxt_re_cq, local
[all...]
/linux-master/drivers/net/ethernet/mellanox/mlx5/core/
H A Den_main.c905 pp_params.napi = rq->cq.napi;
1052 MLX5_SET(rqc, rqc, cqn, rq->cq.mcq.cqn);
1107 struct mlx5_cqwq *cqwq = &rq->cq.wq;
1783 csp.cqn = sq->cq.mcq.cqn;
1811 netif_queue_set_napi(sq->netdev, sq->txq_ix, NETDEV_QUEUE_TYPE_TX, sq->cq.napi);
1880 csp.cqn = sq->cq.mcq.cqn;
1940 csp.cqn = sq->cq.mcq.cqn;
2005 struct mlx5e_cq *cq)
2007 struct mlx5_core_cq *mcq = &cq->mcq;
2011 err = mlx5_cqwq_create(mdev, &param->wq, param->cqc, &cq
2001 mlx5e_alloc_cq_common(struct mlx5_core_dev *mdev, struct net_device *netdev, struct workqueue_struct *workqueue, struct mlx5e_cq_param *param, struct mlx5e_cq *cq) argument
2039 mlx5e_alloc_cq(struct mlx5_core_dev *mdev, struct mlx5e_cq_param *param, struct mlx5e_create_cq_param *ccp, struct mlx5e_cq *cq) argument
2058 mlx5e_free_cq(struct mlx5e_cq *cq) argument
2063 mlx5e_create_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param) argument
2111 mlx5e_destroy_cq(struct mlx5e_cq *cq) argument
2116 mlx5e_open_cq(struct mlx5_core_dev *mdev, struct dim_cq_moder moder, struct mlx5e_cq_param *param, struct mlx5e_create_cq_param *ccp, struct mlx5e_cq *cq) argument
2140 mlx5e_close_cq(struct mlx5e_cq *cq) argument
3338 mlx5e_alloc_drop_cq(struct mlx5e_priv *priv, struct mlx5e_cq *cq, struct mlx5e_cq_param *param) argument
3356 struct mlx5e_cq *cq = &drop_rq->cq; local
[all...]
H A Den_tx.c785 bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget) argument
796 sq = container_of(cq, struct mlx5e_txqsq, cq);
801 cqe = mlx5_cqwq_get_cqe(&cq->wq);
811 * otherwise a cq overrun may occur
825 mlx5_cqwq_pop(&cq->wq);
862 mlx5e_dump_error_cqe(&sq->cq, sq->sqn,
865 queue_work(cq->workqueue, &sq->recover_work);
870 } while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq)));
874 mlx5_cqwq_update_db_record(&cq
[all...]
H A Den.h42 #include <linux/mlx5/cq.h>
441 struct mlx5e_cq cq; member in struct:mlx5e_txqsq
507 struct mlx5e_cq cq; member in struct:mlx5e_xdpsq
542 struct mlx5e_cq cq; member in struct:mlx5e_icosq
708 struct mlx5e_cq cq; member in struct:mlx5e_rq
824 struct mlx5e_ptp_cq_stats cq[MLX5_MAX_NUM_TC]; member in struct:mlx5e_ptp_stats
1061 struct mlx5e_cq *cq);
1062 void mlx5e_close_cq(struct mlx5e_cq *cq);
/linux-master/drivers/scsi/lpfc/
H A Dlpfc_sli.c80 struct lpfc_queue *cq, struct lpfc_cqe *cqe);
91 struct lpfc_queue *cq,
593 struct lpfc_queue *cq = NULL, *childq = NULL; local
601 cq = NULL;
605 cq = childq;
610 if (cq) {
611 cqe = lpfc_sli4_cq_get(cq);
613 __lpfc_sli4_consume_cqe(phba, cq, cqe);
615 cqe = lpfc_sli4_cq_get(cq);
618 phba->sli4_hba.sli4_write_cq_db(phba, cq, cq_coun
709 __lpfc_sli4_consume_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq, struct lpfc_cqe *cqe) argument
14438 lpfc_sli4_sp_handle_mcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, struct lpfc_cqe *cqe) argument
14468 lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, struct lpfc_wcqe_complete *wcqe) argument
14552 lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, struct sli4_wcqe_xri_aborted *wcqe) argument
14746 lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq, struct lpfc_cqe *cqe) argument
14808 struct lpfc_queue *cq = NULL, *childq; local
14865 __lpfc_sli4_process_cq(struct lpfc_hba *phba, struct lpfc_queue *cq, bool (*handler)(struct lpfc_hba *, struct lpfc_queue *, struct lpfc_cqe *), unsigned long *delay) argument
14945 __lpfc_sli4_sp_process_cq(struct lpfc_queue *cq) argument
15005 struct lpfc_queue *cq = container_of(work, struct lpfc_queue, spwork); local
15019 struct lpfc_queue *cq = container_of(to_delayed_work(work), local
15035 lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, struct lpfc_wcqe_complete *wcqe) argument
15115 lpfc_sli4_fp_handle_rel_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, struct lpfc_wcqe_release *wcqe) argument
15152 lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, struct lpfc_rcqe *rcqe) argument
15293 lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq, struct lpfc_cqe *cqe) argument
15359 __lpfc_sli4_hba_process_cq(struct lpfc_queue *cq) argument
15399 struct lpfc_queue *cq = container_of(work, struct lpfc_queue, irqwork); local
15422 struct lpfc_queue *cq = NULL; local
15515 struct lpfc_queue *cq = container_of(to_delayed_work(work), local
16265 lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq, struct lpfc_queue *eq, uint32_t type, uint32_t subtype) argument
16416 struct lpfc_queue *cq; local
16655 lpfc_mq_create_fb_init(struct lpfc_hba *phba, struct lpfc_queue *mq, LPFC_MBOXQ_t *mbox, struct lpfc_queue *cq) argument
16721 lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq, struct lpfc_queue *cq, uint32_t subtype) argument
16876 lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq, struct lpfc_queue *cq, uint32_t subtype) argument
17140 lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq, struct lpfc_queue *drq, struct lpfc_queue *cq, uint32_t subtype) argument
17434 struct lpfc_queue *hrq, *drq, *cq; local
17671 lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq) argument
[all...]
/linux-master/drivers/infiniband/hw/irdma/
H A Dhw.c58 * @iwcq: iwarp cq receiving event
62 struct irdma_cq *cq = iwcq->back_cq; local
64 if (!cq->user_mode)
65 atomic_set(&cq->armed, 0);
66 if (cq->ibcq.comp_handler)
67 cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
73 * @cq: puda completion q for event
76 struct irdma_sc_cq *cq)
75 irdma_puda_ce_handler(struct irdma_pci_f *rf, struct irdma_sc_cq *cq) argument
109 struct irdma_sc_cq *cq; local
2075 irdma_cqp_ce_handler(struct irdma_pci_f *rf, struct irdma_sc_cq *cq) argument
2133 struct irdma_sc_cq *cq = &rf->ccq.sc_cq; local
[all...]
/linux-master/drivers/infiniband/hw/efa/
H A Defa_main.c78 struct efa_cq *cq; local
81 cq = xa_load(&dev->cqs_xa, cqn);
82 if (unlikely(!cq)) {
89 cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
/linux-master/drivers/net/ethernet/ibm/ehea/
H A Dehea_qmr.h333 static inline void ehea_inc_cq(struct ehea_cq *cq) argument
335 hw_qeit_inc(&cq->hw_queue);
367 int ehea_destroy_cq(struct ehea_cq *cq);
/linux-master/drivers/infiniband/sw/rxe/
H A Drxe_verbs.h443 static inline struct rxe_cq *to_rcq(struct ib_cq *cq) argument
445 return cq ? container_of(cq, struct rxe_cq, ibcq) : NULL;
/linux-master/drivers/infiniband/core/
H A DMakefile10 ib_core-y := packer.o ud_header.o verbs.o cq.o rw.o sysfs.o \
/linux-master/include/net/mana/
H A Dhw_channel.h182 struct hwc_cq *cq; member in struct:hw_channel_context
/linux-master/tools/testing/selftests/bpf/
H A Dxskxceiver.h75 struct xsk_ring_cons cq; member in struct:xsk_umem_info
/linux-master/drivers/infiniband/hw/hfi1/
H A Duser_sdma.h128 struct hfi1_user_sdma_comp_q *cq; member in struct:user_sdma_request
/linux-master/drivers/net/ethernet/mellanox/mlx4/
H A Den_netdev.c50 #include <linux/mlx4/cq.h>
1406 struct mlx4_en_cq *cq; local
1422 /* Setup cq moderation params */
1424 cq = priv->rx_cq[i];
1425 cq->moder_cnt = priv->rx_frames;
1426 cq->moder_time = priv->rx_usecs;
1434 cq = priv->tx_cq[t][i];
1435 cq->moder_cnt = priv->tx_frames;
1436 cq->moder_time = priv->tx_usecs;
1455 struct mlx4_en_cq *cq; local
1632 struct mlx4_en_cq *cq; local
2008 struct mlx4_en_cq *cq = priv->rx_cq[i]; local
[all...]
/linux-master/drivers/net/ethernet/amazon/ena/
H A Dena_com.c112 struct ena_com_admin_cq *cq = &admin_queue->cq; local
115 cq->entries = dma_alloc_coherent(admin_queue->q_dmadev, size, &cq->dma_addr, GFP_KERNEL);
117 if (!cq->entries) {
122 cq->head = 0;
123 cq->phase = 1;
468 head_masked = admin_queue->cq.head & (admin_queue->q_depth - 1);
469 phase = admin_queue->cq.phase;
471 cqe = &admin_queue->cq
1630 struct ena_com_admin_cq *cq = &admin_queue->cq; local
[all...]
/linux-master/drivers/infiniband/ulp/rtrs/
H A Drtrs-pri.h92 struct ib_cq *cq; member in struct:rtrs_con
299 void (*done)(struct ib_cq *cq, struct ib_wc *wc));
/linux-master/drivers/infiniband/hw/mlx4/
H A Dmlx4_ib.h51 #include <linux/mlx4/cq.h>
102 struct ib_cq *cq; member in struct:mlx4_ib_xrcd
482 struct ib_cq *cq; member in struct:mlx4_ib_demux_pv_ctx
767 int mlx4_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period);
771 int mlx4_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata);
773 int mlx4_ib_arm_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags);
774 void __mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq);
775 void mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq);
/linux-master/net/xdp/
H A Dxsk_diag.c73 if (!err && pool && pool->cq)
74 err = xsk_diag_put_ring(pool->cq,
/linux-master/drivers/vfio/pci/mlx5/
H A Dcmd.h13 #include <linux/mlx5/cq.h>
163 struct mlx5_vhca_cq cq; member in struct:mlx5_vhca_page_tracker

Completed in 414 milliseconds

1234567891011>>