Searched refs:cq (Results 1 - 25 of 350) sorted by relevance

1234567891011>>

/linux-master/drivers/net/ethernet/cisco/enic/
H A Dvnic_cq.c16 void vnic_cq_free(struct vnic_cq *cq) argument
18 vnic_dev_free_desc_ring(cq->vdev, &cq->ring);
20 cq->ctrl = NULL;
23 int vnic_cq_alloc(struct vnic_dev *vdev, struct vnic_cq *cq, unsigned int index, argument
26 cq->index = index;
27 cq->vdev = vdev;
29 cq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_CQ, index);
30 if (!cq->ctrl) {
35 return vnic_dev_alloc_desc_ring(vdev, &cq
38 vnic_cq_init(struct vnic_cq *cq, unsigned int flow_control_enable, unsigned int color_enable, unsigned int cq_head, unsigned int cq_tail, unsigned int cq_tail_color, unsigned int interrupt_enable, unsigned int cq_entry_enable, unsigned int cq_message_enable, unsigned int interrupt_offset, u64 cq_message_addr) argument
63 vnic_cq_clean(struct vnic_cq *cq) argument
[all...]
H A Dvnic_cq.h59 static inline unsigned int vnic_cq_service(struct vnic_cq *cq, argument
70 cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs +
71 cq->ring.desc_size * cq->to_clean);
75 while (color != cq->last_color) {
77 if ((*q_service)(cq->vdev, cq_desc, type,
81 cq->to_clean++;
82 if (cq->to_clean == cq->ring.desc_count) {
83 cq
[all...]
/linux-master/drivers/scsi/fnic/
H A Dvnic_cq_copy.h12 struct vnic_cq *cq,
23 desc = (struct fcpio_fw_req *)((u8 *)cq->ring.descs +
24 cq->ring.desc_size * cq->to_clean);
27 while (color != cq->last_color) {
29 if ((*q_service)(cq->vdev, cq->index, desc))
32 cq->to_clean++;
33 if (cq->to_clean == cq
11 vnic_cq_copy_service( struct vnic_cq *cq, int (*q_service)(struct vnic_dev *vdev, unsigned int index, struct fcpio_fw_req *desc), unsigned int work_to_do) argument
[all...]
H A Dvnic_cq.c12 void vnic_cq_free(struct vnic_cq *cq) argument
14 vnic_dev_free_desc_ring(cq->vdev, &cq->ring);
16 cq->ctrl = NULL;
19 int vnic_cq_alloc(struct vnic_dev *vdev, struct vnic_cq *cq, unsigned int index, argument
24 cq->index = index;
25 cq->vdev = vdev;
27 cq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_CQ, index);
28 if (!cq->ctrl) {
33 err = vnic_dev_alloc_desc_ring(vdev, &cq
40 vnic_cq_init(struct vnic_cq *cq, unsigned int flow_control_enable, unsigned int color_enable, unsigned int cq_head, unsigned int cq_tail, unsigned int cq_tail_color, unsigned int interrupt_enable, unsigned int cq_entry_enable, unsigned int cq_message_enable, unsigned int interrupt_offset, u64 cq_message_addr) argument
63 vnic_cq_clean(struct vnic_cq *cq) argument
[all...]
H A Dvnic_cq.h58 static inline unsigned int vnic_cq_service(struct vnic_cq *cq, argument
69 cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs +
70 cq->ring.desc_size * cq->to_clean);
74 while (color != cq->last_color) {
76 if ((*q_service)(cq->vdev, cq_desc, type,
80 cq->to_clean++;
81 if (cq->to_clean == cq->ring.desc_count) {
82 cq
[all...]
/linux-master/drivers/scsi/snic/
H A Dvnic_cq_fw.h10 vnic_cq_fw_service(struct vnic_cq *cq, argument
21 desc = (struct snic_fw_req *)((u8 *)cq->ring.descs +
22 cq->ring.desc_size * cq->to_clean);
25 while (color != cq->last_color) {
27 if ((*q_service)(cq->vdev, cq->index, desc))
30 cq->to_clean++;
31 if (cq->to_clean == cq
[all...]
H A Dvnic_cq.c10 void svnic_cq_free(struct vnic_cq *cq) argument
12 svnic_dev_free_desc_ring(cq->vdev, &cq->ring);
14 cq->ctrl = NULL;
17 int svnic_cq_alloc(struct vnic_dev *vdev, struct vnic_cq *cq, argument
20 cq->index = index;
21 cq->vdev = vdev;
23 cq->ctrl = svnic_dev_get_res(vdev, RES_TYPE_CQ, index);
24 if (!cq->ctrl) {
30 return svnic_dev_alloc_desc_ring(vdev, &cq
33 svnic_cq_init(struct vnic_cq *cq, unsigned int flow_control_enable, unsigned int color_enable, unsigned int cq_head, unsigned int cq_tail, unsigned int cq_tail_color, unsigned int interrupt_enable, unsigned int cq_entry_enable, unsigned int cq_message_enable, unsigned int interrupt_offset, u64 cq_message_addr) argument
56 svnic_cq_clean(struct vnic_cq *cq) argument
[all...]
H A Dvnic_cq.h46 static inline unsigned int svnic_cq_service(struct vnic_cq *cq, argument
57 cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs +
58 cq->ring.desc_size * cq->to_clean);
62 while (color != cq->last_color) {
64 if ((*q_service)(cq->vdev, cq_desc, type,
68 cq->to_clean++;
69 if (cq->to_clean == cq->ring.desc_count) {
70 cq
[all...]
/linux-master/drivers/net/ethernet/mellanox/mlx4/
H A Den_cq.c34 #include <linux/mlx4/cq.h>
40 static void mlx4_en_cq_event(struct mlx4_cq *cq, enum mlx4_event event) argument
52 struct mlx4_en_cq *cq; local
55 cq = kzalloc_node(sizeof(*cq), GFP_KERNEL, node);
56 if (!cq) {
61 cq->size = entries;
62 cq->buf_size = cq->size * mdev->dev->caps.cqe_size;
64 cq
89 mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq, int cq_idx) argument
176 struct mlx4_en_cq *cq = *pcq; local
189 mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq) argument
200 mlx4_en_set_cq_moder(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq) argument
206 mlx4_en_arm_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq) argument
[all...]
H A Dcq.c41 #include <linux/mlx4/cq.h>
82 static void mlx4_add_cq_to_tasklet(struct mlx4_cq *cq) argument
84 struct mlx4_eq_tasklet *tasklet_ctx = cq->tasklet_ctx.priv;
94 if (list_empty_careful(&cq->tasklet_ctx.list)) {
95 refcount_inc(&cq->refcount);
97 list_add_tail(&cq->tasklet_ctx.list, &tasklet_ctx->list);
106 struct mlx4_cq *cq; local
109 cq = radix_tree_lookup(&mlx4_priv(dev)->cq_table.tree,
113 if (!cq) {
121 ++cq
129 struct mlx4_cq *cq; local
169 mlx4_cq_modify(struct mlx4_dev *dev, struct mlx4_cq *cq, u16 count, u16 period) argument
191 mlx4_cq_resize(struct mlx4_dev *dev, struct mlx4_cq *cq, int entries, struct mlx4_mtt *mtt) argument
342 mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt, struct mlx4_uar *uar, u64 db_rec, struct mlx4_cq *cq, unsigned vector, int collapsed, int timestamp_en, void *buf_addr, bool user_cq) argument
436 mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq) argument
[all...]
/linux-master/drivers/net/ethernet/intel/idpf/
H A Didpf_controlq_setup.c9 * @cq: pointer to the specific Control queue
12 struct idpf_ctlq_info *cq)
14 size_t size = cq->ring_size * sizeof(struct idpf_ctlq_desc);
16 cq->desc_ring.va = idpf_alloc_dma_mem(hw, &cq->desc_ring, size);
17 if (!cq->desc_ring.va)
26 * @cq: pointer to the specific Control queue
32 struct idpf_ctlq_info *cq)
37 if (cq->cq_type == IDPF_CTLQ_TYPE_MAILBOX_TX)
43 cq
11 idpf_ctlq_alloc_desc_ring(struct idpf_hw *hw, struct idpf_ctlq_info *cq) argument
31 idpf_ctlq_alloc_bufs(struct idpf_hw *hw, struct idpf_ctlq_info *cq) argument
90 idpf_ctlq_free_desc_ring(struct idpf_hw *hw, struct idpf_ctlq_info *cq) argument
104 idpf_ctlq_free_bufs(struct idpf_hw *hw, struct idpf_ctlq_info *cq) argument
135 idpf_ctlq_dealloc_ring_res(struct idpf_hw *hw, struct idpf_ctlq_info *cq) argument
150 idpf_ctlq_alloc_ring_res(struct idpf_hw *hw, struct idpf_ctlq_info *cq) argument
[all...]
H A Didpf_controlq.c8 * @cq: pointer to the specific control queue
11 static void idpf_ctlq_setup_regs(struct idpf_ctlq_info *cq, argument
15 cq->reg.head = q_create_info->reg.head;
16 cq->reg.tail = q_create_info->reg.tail;
17 cq->reg.len = q_create_info->reg.len;
18 cq->reg.bah = q_create_info->reg.bah;
19 cq->reg.bal = q_create_info->reg.bal;
20 cq->reg.len_mask = q_create_info->reg.len_mask;
21 cq->reg.len_ena_mask = q_create_info->reg.len_ena_mask;
22 cq
34 idpf_ctlq_init_regs(struct idpf_hw *hw, struct idpf_ctlq_info *cq, bool is_rxq) argument
61 idpf_ctlq_init_rxq_bufs(struct idpf_ctlq_info *cq) argument
97 idpf_ctlq_shutdown(struct idpf_hw *hw, struct idpf_ctlq_info *cq) argument
127 struct idpf_ctlq_info *cq; local
198 idpf_ctlq_remove(struct idpf_hw *hw, struct idpf_ctlq_info *cq) argument
220 struct idpf_ctlq_info *cq, *tmp; local
249 struct idpf_ctlq_info *cq, *tmp; local
267 idpf_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq, u16 num_q_msg, struct idpf_ctlq_msg q_msg[]) argument
355 idpf_ctlq_clean_sq(struct idpf_ctlq_info *cq, u16 *clean_count, struct idpf_ctlq_msg *msg_status[]) argument
420 idpf_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq, u16 *buff_count, struct idpf_dma_mem **buffs) argument
543 idpf_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg, struct idpf_ctlq_msg *q_msg) argument
[all...]
/linux-master/drivers/infiniband/core/
H A Dcq.c42 struct ib_cq *cq = dim->priv; local
49 trace_cq_modify(cq, comps, usec);
50 cq->device->ops.modify_cq(cq, comps, usec);
53 static void rdma_dim_init(struct ib_cq *cq) argument
57 if (!cq->device->ops.modify_cq || !cq->device->use_cq_dim ||
58 cq->poll_ctx == IB_POLL_DIRECT)
68 dim->priv = cq;
69 cq
74 rdma_dim_destroy(struct ib_cq *cq) argument
83 __poll_cq(struct ib_cq *cq, int num_entries, struct ib_wc *wc) argument
92 __ib_process_cq(struct ib_cq *cq, int budget, struct ib_wc *wcs, int batch) argument
138 ib_process_cq_direct(struct ib_cq *cq, int budget) argument
146 ib_cq_completion_direct(struct ib_cq *cq, void *private) argument
153 struct ib_cq *cq = container_of(iop, struct ib_cq, iop); local
172 ib_cq_completion_softirq(struct ib_cq *cq, void *private) argument
180 struct ib_cq *cq = container_of(work, struct ib_cq, work); local
192 ib_cq_completion_workqueue(struct ib_cq *cq, void *private) argument
220 struct ib_cq *cq; local
318 ib_free_cq(struct ib_cq *cq) argument
353 struct ib_cq *cq, *n; local
372 struct ib_cq *cq, *n; local
434 struct ib_cq *cq, *found = NULL; local
498 ib_cq_pool_put(struct ib_cq *cq, unsigned int nr_cqe) argument
[all...]
/linux-master/drivers/infiniband/sw/rxe/
H A Drxe_cq.c11 int rxe_cq_chk_attr(struct rxe_dev *rxe, struct rxe_cq *cq, argument
27 if (cq) {
28 count = queue_count(cq->queue, QUEUE_TYPE_TO_CLIENT);
30 rxe_dbg_cq(cq, "cqe(%d) < current # elements in queue (%d)\n",
42 int rxe_cq_from_init(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe, argument
50 cq->queue = rxe_queue_init(rxe, &cqe,
52 if (!cq->queue) {
53 rxe_dbg_dev(rxe, "unable to create cq\n");
58 cq->queue->buf, cq
72 rxe_cq_resize_queue(struct rxe_cq *cq, int cqe, struct rxe_resize_cq_resp __user *uresp, struct ib_udata *udata) argument
88 rxe_cq_post(struct rxe_cq *cq, struct rxe_cqe *cqe, int solicited) argument
129 struct rxe_cq *cq = container_of(elem, typeof(*cq), elem); local
[all...]
/linux-master/drivers/net/ethernet/intel/ice/
H A Dice_controlq.c36 struct ice_ctl_q_info *cq = &hw->adminq; local
38 ICE_CQ_INIT_REGS(cq, PF_FW);
49 struct ice_ctl_q_info *cq = &hw->mailboxq; local
51 ICE_CQ_INIT_REGS(cq, PF_MBX);
62 struct ice_ctl_q_info *cq = &hw->sbq; local
64 ICE_CQ_INIT_REGS(cq, PF_SB);
70 * @cq: pointer to the specific Control queue
74 bool ice_check_sq_alive(struct ice_hw *hw, struct ice_ctl_q_info *cq) argument
77 if (cq->sq.len && cq
91 ice_alloc_ctrlq_sq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq) argument
122 ice_alloc_ctrlq_rq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq) argument
158 ice_alloc_rq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq) argument
230 ice_alloc_sq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq) argument
298 ice_cfg_sq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq) argument
310 ice_cfg_rq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq) argument
361 ice_init_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq) argument
421 ice_init_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq) argument
474 ice_shutdown_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq) argument
540 ice_shutdown_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq) argument
576 struct ice_ctl_q_info *cq = &hw->adminq; local
612 struct ice_ctl_q_info *cq; local
692 struct ice_ctl_q_info *cq; local
787 ice_init_ctrlq_locks(struct ice_ctl_q_info *cq) argument
825 ice_destroy_ctrlq_locks(struct ice_ctl_q_info *cq) argument
858 ice_clean_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq) argument
937 ice_sq_done(struct ice_hw *hw, struct ice_ctl_q_info *cq) argument
958 ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq, struct ice_aq_desc *desc, void *buf, u16 buf_size, struct ice_sq_cd *cd) argument
1156 ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq, struct ice_rq_event_info *e, u16 *pending) argument
[all...]
/linux-master/drivers/infiniband/sw/rdmavt/
H A Dcq.c8 #include "cq.h"
16 * @cq: completion queue
23 * false if cq is full.
25 bool rvt_cq_enter(struct rvt_cq *cq, struct ib_wc *entry, bool solicited) argument
36 spin_lock_irqsave(&cq->lock, flags);
38 if (cq->ip) {
39 u_wc = cq->queue;
44 k_wc = cq->kqueue;
54 if (head >= (unsigned)cq->ibcq.cqe) {
55 head = cq
121 struct rvt_cq *cq = container_of(work, struct rvt_cq, comptask); local
163 struct rvt_cq *cq = ibcq_to_rvtcq(ibcq); local
278 struct rvt_cq *cq = ibcq_to_rvtcq(ibcq); local
304 struct rvt_cq *cq = ibcq_to_rvtcq(ibcq); local
340 struct rvt_cq *cq = ibcq_to_rvtcq(ibcq); local
478 struct rvt_cq *cq = ibcq_to_rvtcq(ibcq); local
[all...]
/linux-master/drivers/net/ethernet/mellanox/mlx5/core/
H A Dcq.c37 #include <linux/mlx5/cq.h>
69 static void mlx5_add_cq_to_tasklet(struct mlx5_core_cq *cq, argument
73 struct mlx5_eq_tasklet *tasklet_ctx = cq->tasklet_ctx.priv;
81 if (list_empty_careful(&cq->tasklet_ctx.list)) {
82 mlx5_cq_hold(cq);
83 list_add_tail(&cq->tasklet_ctx.list, &tasklet_ctx->list);
89 int mlx5_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, argument
108 cq->cqn = MLX5_GET(create_cq_out, out, cqn);
109 cq->cons_index = 0;
110 cq
154 mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, u32 *in, int inlen, u32 *out, int outlen) argument
163 mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq) argument
170 mlx5_eq_del_cq(mlx5_get_async_eq(dev), cq); local
188 mlx5_core_query_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, u32 *out) argument
199 mlx5_core_modify_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, u32 *in, int inlen) argument
210 mlx5_core_modify_cq_moderation(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, u16 cq_period, u16 cq_max_count) argument
[all...]
/linux-master/drivers/infiniband/hw/mana/
H A Dcq.c11 struct mana_ib_cq *cq = container_of(ibcq, struct mana_ib_cq, ibcq); local
27 cq->comp_vector = attr->comp_vector;
32 "Failed to copy from udata for create cq, %d\n", err);
41 cq->cqe = attr->cqe;
42 cq->umem = ib_umem_get(ibdev, ucmd.buf_addr, cq->cqe * COMP_ENTRY_SIZE,
44 if (IS_ERR(cq->umem)) {
45 err = PTR_ERR(cq->umem);
46 ibdev_dbg(ibdev, "Failed to get umem for create cq, err %d\n",
51 err = mana_ib_create_zero_offset_dma_region(mdev, cq
77 struct mana_ib_cq *cq = container_of(ibcq, struct mana_ib_cq, ibcq); local
105 struct mana_ib_cq *cq = ctx; local
111 mana_ib_install_cq_cb(struct mana_ib_dev *mdev, struct mana_ib_cq *cq) argument
[all...]
/linux-master/drivers/infiniband/hw/mlx4/
H A Dcq.c34 #include <linux/mlx4/cq.h>
43 static void mlx4_ib_cq_comp(struct mlx4_cq *cq) argument
45 struct ib_cq *ibcq = &to_mibcq(cq)->ibcq;
49 static void mlx4_ib_cq_event(struct mlx4_cq *cq, enum mlx4_event type) argument
56 "on CQ %06x\n", type, cq->cqn);
60 ibcq = &to_mibcq(cq)->ibcq;
64 event.element.cq = ibcq;
74 static void *get_cqe(struct mlx4_ib_cq *cq, int n) argument
76 return get_cqe_from_buf(&cq->buf, n);
79 static void *get_sw_cqe(struct mlx4_ib_cq *cq, in argument
88 next_cqe_sw(struct mlx4_ib_cq *cq) argument
93 mlx4_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period) argument
181 struct mlx4_ib_cq *cq = to_mcq(ibcq); local
290 mlx4_alloc_resize_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq *cq, int entries) argument
314 mlx4_alloc_resize_umem(struct mlx4_ib_dev *dev, struct mlx4_ib_cq *cq, int entries, struct ib_udata *udata) argument
343 mlx4_ib_get_outstanding_cqes(struct mlx4_ib_cq *cq) argument
354 mlx4_ib_cq_resize_copy_cqes(struct mlx4_ib_cq *cq) argument
382 struct mlx4_ib_cq *cq = to_mcq(ibcq); local
477 mlx4_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata) argument
580 use_tunnel_data(struct mlx4_ib_qp *qp, struct mlx4_ib_cq *cq, struct ib_wc *wc, unsigned tail, struct mlx4_cqe *cqe, int is_eth) argument
631 mlx4_ib_poll_sw_comp(struct mlx4_ib_cq *cq, int num_entries, struct ib_wc *wc, int *npolled) argument
656 mlx4_ib_poll_one(struct mlx4_ib_cq *cq, struct mlx4_ib_qp **cur_qp, struct ib_wc *wc) argument
879 struct mlx4_ib_cq *cq = to_mcq(ibcq); local
915 __mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq) argument
968 mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq) argument
[all...]
/linux-master/drivers/infiniband/hw/mthca/
H A Dmthca_cq.c169 static inline struct mthca_cqe *get_cqe(struct mthca_cq *cq, int entry) argument
171 return get_cqe_from_buf(&cq->buf, entry);
179 static inline struct mthca_cqe *next_cqe_sw(struct mthca_cq *cq) argument
181 return cqe_sw(get_cqe(cq, cq->cons_index & cq->ibcq.cqe));
201 * incr is ignored in native Arbel (mem-free) mode, so cq->cons_index
204 static inline void update_cons_index(struct mthca_dev *dev, struct mthca_cq *cq, argument
208 *cq->set_ci_db = cpu_to_be32(cq
219 struct mthca_cq *cq; local
236 struct mthca_cq *cq; local
273 mthca_cq_clean(struct mthca_dev *dev, struct mthca_cq *cq, u32 qpn, struct mthca_srq *srq) argument
325 mthca_cq_resize_copy_cqes(struct mthca_cq *cq) argument
372 handle_error_cqe(struct mthca_dev *dev, struct mthca_cq *cq, struct mthca_qp *qp, int wqe_index, int is_send, struct mthca_err_cqe *cqe, struct ib_wc *entry, int *free_cqe) argument
478 mthca_poll_one(struct mthca_dev *dev, struct mthca_cq *cq, struct mthca_qp **cur_qp, int *freed, struct ib_wc *entry) argument
658 struct mthca_cq *cq = to_mcq(ibcq); local
723 mthca_tavor_arm_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags) argument
738 struct mthca_cq *cq = to_mcq(ibcq); local
768 mthca_init_cq(struct mthca_dev *dev, int nent, struct mthca_ucontext *ctx, u32 pdn, struct mthca_cq *cq) argument
889 get_cq_refcount(struct mthca_dev *dev, struct mthca_cq *cq) argument
900 mthca_free_cq(struct mthca_dev *dev, struct mthca_cq *cq) argument
[all...]
/linux-master/drivers/infiniband/hw/vmw_pvrdma/
H A Dpvrdma_cq.c67 struct pvrdma_cq *cq = to_vcq(ibcq); local
68 u32 val = cq->cq_handle;
75 spin_lock_irqsave(&cq->cq_lock, flags);
82 has_data = pvrdma_idx_ring_has_data(&cq->ring_state->rx,
83 cq->ibcq.cqe, &head);
88 spin_unlock_irqrestore(&cq->cq_lock, flags);
107 struct pvrdma_cq *cq = to_vcq(ibcq); local
132 cq->ibcq.cqe = entries;
133 cq->is_kernel = !udata;
135 if (!cq
225 pvrdma_free_cq(struct pvrdma_dev *dev, struct pvrdma_cq *cq) argument
241 pvrdma_destroy_cq(struct ib_cq *cq, struct ib_udata *udata) argument
270 get_cqe(struct pvrdma_cq *cq, int i) argument
278 _pvrdma_flush_cqe(struct pvrdma_qp *qp, struct pvrdma_cq *cq) argument
322 pvrdma_poll_one(struct pvrdma_cq *cq, struct pvrdma_qp **cur_qp, struct ib_wc *wc) argument
388 struct pvrdma_cq *cq = to_vcq(ibcq); local
[all...]
/linux-master/include/linux/mlx5/
H A Dcq.h49 void (*comp)(struct mlx5_core_cq *cq, struct mlx5_eqe *eqe);
57 void (*comp)(struct mlx5_core_cq *cq, struct mlx5_eqe *eqe);
141 static inline void mlx5_cq_set_ci(struct mlx5_core_cq *cq) argument
143 *cq->set_ci_db = cpu_to_be32(cq->cons_index & 0xffffff);
151 static inline void mlx5_cq_arm(struct mlx5_core_cq *cq, u32 cmd, argument
159 sn = cq->arm_sn & 3;
162 *cq->arm_db = cpu_to_be32(sn << 28 | cmd | ci);
170 doorbell[1] = cpu_to_be32(cq->cqn);
175 static inline void mlx5_cq_hold(struct mlx5_core_cq *cq) argument
180 mlx5_cq_put(struct mlx5_core_cq *cq) argument
[all...]
/linux-master/drivers/isdn/mISDN/
H A Ddsp_hwec.c38 struct mISDN_ctrl_req cq; local
82 memset(&cq, 0, sizeof(cq));
83 cq.op = MISDN_CTRL_HFC_ECHOCAN_ON;
84 cq.p1 = deftaps;
85 if (!dsp->ch.peer->ctrl(&dsp->ch, CONTROL_CHANNEL, &cq)) {
94 struct mISDN_ctrl_req cq; local
103 memset(&cq, 0, sizeof(cq));
104 cq
[all...]
/linux-master/drivers/infiniband/sw/siw/
H A Dsiw_cq.c48 int siw_reap_cqe(struct siw_cq *cq, struct ib_wc *wc) argument
53 spin_lock_irqsave(&cq->lock, flags);
55 cqe = &cq->queue[cq->cq_get % cq->num_cqe];
66 if (likely(rdma_is_kernel_res(&cq->base_cq.res))) {
74 siw_dbg_cq(cq,
76 cq->cq_get % cq->num_cqe, cqe->opcode,
100 cq
116 siw_cq_flush(struct siw_cq *cq) argument
[all...]
/linux-master/drivers/infiniband/hw/mlx5/
H A Dcq.c41 static void mlx5_ib_cq_comp(struct mlx5_core_cq *cq, struct mlx5_eqe *eqe) argument
43 struct ib_cq *ibcq = &to_mibcq(cq)->ibcq;
50 struct mlx5_ib_cq *cq = container_of(mcq, struct mlx5_ib_cq, mcq); local
51 struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device);
52 struct ib_cq *ibcq = &cq->ibcq;
64 event.element.cq = ibcq;
69 static void *get_cqe(struct mlx5_ib_cq *cq, int n) argument
71 return mlx5_frag_buf_get_wqe(&cq->buf.fbc, n);
79 static void *get_sw_cqe(struct mlx5_ib_cq *cq, int n) argument
81 void *cqe = get_cqe(cq,
94 next_cqe_sw(struct mlx5_ib_cq *cq) argument
427 mlx5_ib_poll_sw_comp(struct mlx5_ib_cq *cq, int num_entries, struct ib_wc *wc, int *npolled) argument
447 mlx5_poll_one(struct mlx5_ib_cq *cq, struct mlx5_ib_qp **cur_qp, struct ib_wc *wc) argument
581 poll_soft_wc(struct mlx5_ib_cq *cq, int num_entries, struct ib_wc *wc, bool is_fatal_err) argument
609 struct mlx5_ib_cq *cq = to_mcq(ibcq); local
647 struct mlx5_ib_cq *cq = to_mcq(ibcq); local
715 create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata, struct mlx5_ib_cq *cq, int entries, u32 **cqb, int *cqe_size, int *index, int *inlen) argument
857 destroy_cq_user(struct mlx5_ib_cq *cq, struct ib_udata *udata) argument
879 create_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq, int entries, int cqe_size, u32 **cqb, int *index, int *inlen) argument
930 destroy_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq) argument
938 struct mlx5_ib_cq *cq = container_of(work, struct mlx5_ib_cq, local
951 struct mlx5_ib_cq *cq = to_mcq(ibcq); local
1049 mlx5_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata) argument
1071 __mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 rsn, struct mlx5_ib_srq *srq) argument
1122 mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq) argument
1132 mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period) argument
1152 resize_user(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq, int entries, struct ib_udata *udata, int *cqe_size) argument
1185 resize_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq, int entries, int cqe_size) argument
1207 copy_resize_cqes(struct mlx5_ib_cq *cq) argument
1265 struct mlx5_ib_cq *cq = to_mcq(ibcq); local
1413 struct mlx5_ib_cq *cq; local
1426 struct mlx5_ib_cq *cq = to_mcq(ibcq); local
[all...]

Completed in 218 milliseconds

1234567891011>>