Searched refs:cq (Results 51 - 75 of 350) sorted by relevance

1234567891011>>

/linux-master/drivers/net/ethernet/amd/pds_core/
H A Ddebugfs.c112 struct pdsc_cq *cq = &qcq->cq; local
137 cq_dentry = debugfs_create_dir("cq", qcq->dentry);
141 debugfs_create_x64("base_pa", 0400, cq_dentry, &cq->base_pa);
142 debugfs_create_u32("num_descs", 0400, cq_dentry, &cq->num_descs);
143 debugfs_create_u32("desc_size", 0400, cq_dentry, &cq->desc_size);
144 debugfs_create_bool("done_color", 0400, cq_dentry, &cq->done_color);
145 debugfs_create_u16("tail", 0400, cq_dentry, &cq->tail_idx);
H A Dcore.c132 qcq->cq.bound_intr = &pdsc->intr_info[index];
156 vfree(qcq->cq.info);
174 static void pdsc_cq_map(struct pdsc_cq *cq, void *base, dma_addr_t base_pa) argument
179 cq->base = base;
180 cq->base_pa = base_pa;
182 for (i = 0, cur = cq->info; i < cq->num_descs; i++, cur++)
183 cur->comp = base + (i * cq->desc_size);
220 qcq->cq.info = vcalloc(num_descs, sizeof(*qcq->cq
[all...]
/linux-master/drivers/infiniband/hw/mlx5/
H A Dmem.c163 static int test_wc_poll_cq_result(struct mlx5_ib_dev *dev, struct ib_cq *cq) argument
170 ret = ib_poll_cq(cq, 1, &wc);
212 struct ib_cq *cq; local
239 cq = ib_create_cq(ibdev, NULL, NULL, NULL, &cq_attr);
240 if (IS_ERR(cq)) {
241 ret = PTR_ERR(cq);
245 qp_init_attr.recv_cq = cq;
246 qp_init_attr.send_cq = cq;
274 ret = test_wc_poll_cq_result(dev, cq);
283 ib_destroy_cq(cq);
[all...]
H A DMakefile8 cq.o \
/linux-master/drivers/net/ethernet/ibm/ehea/
H A Dehea_qmr.c111 struct ehea_cq *cq; local
117 cq = kzalloc(sizeof(*cq), GFP_KERNEL);
118 if (!cq)
121 cq->attr.max_nr_of_cqes = nr_of_cqe;
122 cq->attr.cq_token = cq_token;
123 cq->attr.eq_handle = eq_handle;
125 cq->adapter = adapter;
127 hret = ehea_h_alloc_resource_cq(adapter->handle, &cq->attr,
128 &cq
192 ehea_destroy_cq_res(struct ehea_cq *cq, u64 force) argument
208 ehea_destroy_cq(struct ehea_cq *cq) argument
[all...]
/linux-master/drivers/infiniband/hw/bnxt_re/
H A Dqplib_fp.c58 static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp);
158 struct bnxt_qplib_cq *cq = nq_work->cq; local
161 if (cq && nq) {
162 spin_lock_bh(&cq->compl_lock);
163 if (atomic_read(&cq->arm_state) && nq->cqn_handler) {
165 "%s:Trigger cq = %p event nq = %p\n",
166 __func__, cq, nq);
167 nq->cqn_handler(nq, cq);
169 spin_unlock_bh(&cq
235 clean_nq(struct bnxt_qplib_nq *nq, struct bnxt_qplib_cq *cq) argument
285 __wait_for_all_nqes(struct bnxt_qplib_cq *cq, u16 cnq_events) argument
301 struct bnxt_qplib_cq *cq; local
1462 __clean_cq(struct bnxt_qplib_cq *cq, u64 qp) argument
2125 bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq) argument
2198 bnxt_qplib_resize_cq_complete(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq) argument
2207 bnxt_qplib_resize_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq, int new_cqes) argument
2247 bnxt_qplib_destroy_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq) argument
2376 do_wa9060(struct bnxt_qplib_qp *qp, struct bnxt_qplib_cq *cq, u32 cq_cons, u32 swq_last, u32 cqe_sq_cons) argument
2473 bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq, struct cq_req *hwcqe, struct bnxt_qplib_cqe **pcqe, int *budget, u32 cq_cons, struct bnxt_qplib_qp **lib_qp) argument
2582 bnxt_qplib_cq_process_res_rc(struct bnxt_qplib_cq *cq, struct cq_res_rc *hwcqe, struct bnxt_qplib_cqe **pcqe, int *budget) argument
2662 bnxt_qplib_cq_process_res_ud(struct bnxt_qplib_cq *cq, struct cq_res_ud *hwcqe, struct bnxt_qplib_cqe **pcqe, int *budget) argument
2749 bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq *cq) argument
2760 bnxt_qplib_cq_process_res_raweth_qp1(struct bnxt_qplib_cq *cq, struct cq_res_raweth_qp1 *hwcqe, struct bnxt_qplib_cqe **pcqe, int *budget) argument
2853 bnxt_qplib_cq_process_terminal(struct bnxt_qplib_cq *cq, struct cq_terminal *hwcqe, struct bnxt_qplib_cqe **pcqe, int *budget) argument
2955 bnxt_qplib_cq_process_cutoff(struct bnxt_qplib_cq *cq, struct cq_cutoff *hwcqe) argument
2971 bnxt_qplib_process_flush_list(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe, int num_cqes) argument
2994 bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe, int num_cqes, struct bnxt_qplib_qp **lib_qp) argument
3080 bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type) argument
[all...]
/linux-master/drivers/net/ethernet/pensando/ionic/
H A Dionic_debugfs.c97 struct ionic_cq *cq = seq->private; local
99 seq_printf(seq, "%d\n", cq->tail_idx);
123 struct ionic_cq *cq = &qcq->cq; local
167 cq_dentry = debugfs_create_dir("cq", qcq->dentry);
169 debugfs_create_x64("base_pa", 0400, cq_dentry, &cq->base_pa);
170 debugfs_create_u32("num_descs", 0400, cq_dentry, &cq->num_descs);
171 debugfs_create_u32("desc_size", 0400, cq_dentry, &cq->desc_size);
172 debugfs_create_bool("done_color", 0400, cq_dentry, &cq->done_color);
174 debugfs_create_file("tail", 0400, cq_dentry, cq,
[all...]
H A Dionic_dev.c561 struct ionic_cq *cq = &qcq->cq; local
575 .q_init.cq_ring_base = cpu_to_le64(cq->base_pa),
613 int ionic_cq_init(struct ionic_lif *lif, struct ionic_cq *cq, argument
626 cq->lif = lif;
627 cq->bound_intr = intr;
628 cq->num_descs = num_descs;
629 cq->desc_size = desc_size;
630 cq->tail_idx = 0;
631 cq
637 ionic_cq_service(struct ionic_cq *cq, unsigned int work_to_do, ionic_cq_cb cb, ionic_cq_done_cb done_cb, void *done_arg) argument
[all...]
H A Dionic_txrx.c716 qcq->cq.desc_size -
736 bool ionic_rx_service(struct ionic_cq *cq) argument
739 struct ionic_queue *q = cq->bound_q;
742 comp = &((struct ionic_rxq_comp *)cq->base)[cq->tail_idx];
744 if (!color_match(comp->pkt_type_color, cq->done_color))
907 qi = qcq->cq.bound_q->index;
924 dim_update_sample(qcq->cq.bound_intr->rearm_count,
933 struct ionic_cq *cq = napi_to_cq(napi); local
937 work_done = ionic_tx_cq_service(cq, budge
961 ionic_xdp_do_flush(struct ionic_cq *cq) argument
972 struct ionic_cq *cq = napi_to_cq(napi); local
1209 ionic_tx_service(struct ionic_cq *cq, unsigned int *total_pkts, unsigned int *total_bytes) argument
1246 ionic_tx_cq_service(struct ionic_cq *cq, unsigned int work_to_do) argument
1277 ionic_tx_flush(struct ionic_cq *cq) argument
[all...]
/linux-master/drivers/infiniband/hw/cxgb4/
H A DMakefile7 iw_cxgb4-y := device.o cm.o provider.o mem.o cq.o qp.o resource.o ev.o id_table.o \
/linux-master/drivers/net/ethernet/mellanox/mlx5/core/en/
H A Dhealth.c20 void mlx5e_health_cq_diag_fmsg(struct mlx5e_cq *cq, struct devlink_fmsg *fmsg) argument
26 mlx5_core_query_cq(cq->mdev, &cq->mcq, out);
31 devlink_fmsg_u32_pair_put(fmsg, "cqn", cq->mcq.cqn);
33 devlink_fmsg_u32_pair_put(fmsg, "ci", mlx5_cqwq_get_ci(&cq->wq));
34 devlink_fmsg_u32_pair_put(fmsg, "size", mlx5_cqwq_get_size(&cq->wq));
38 void mlx5e_health_cq_common_diag_fmsg(struct mlx5e_cq *cq, struct devlink_fmsg *fmsg) argument
43 cq_sz = mlx5_cqwq_get_size(&cq->wq);
44 cq_log_stride = mlx5_cqwq_get_log_stride_size(&cq->wq);
H A Dhealth.h23 void mlx5e_health_cq_diag_fmsg(struct mlx5e_cq *cq, struct devlink_fmsg *fmsg);
24 void mlx5e_health_cq_common_diag_fmsg(struct mlx5e_cq *cq, struct devlink_fmsg *fmsg);
H A Dtxrx.h65 int mlx5e_poll_ico_cq(struct mlx5e_cq *cq);
70 int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget);
94 bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget);
276 static inline void mlx5e_cq_arm(struct mlx5e_cq *cq) argument
280 mcq = &cq->mcq;
281 mlx5_cq_arm(mcq, MLX5_CQ_DB_REQ_NOT, mcq->uar->map, cq->wq.cc);
355 static inline void mlx5e_dump_error_cqe(struct mlx5e_cq *cq, u32 qn, argument
358 struct mlx5_cqwq *wq = &cq->wq;
363 netdev_err(cq->netdev,
365 cq
[all...]
/linux-master/drivers/infiniband/sw/rxe/
H A Drxe.h54 #define rxe_dbg_cq(cq, fmt, ...) ibdev_dbg((cq)->ibcq.device, \
55 "cq#%d %s: " fmt, (cq)->elem.index, __func__, ##__VA_ARGS__)
75 #define rxe_err_cq(cq, fmt, ...) ibdev_err_ratelimited((cq)->ibcq.device, \
76 "cq#%d %s: " fmt, (cq)->elem.index, __func__, ##__VA_ARGS__)
96 #define rxe_info_cq(cq, fmt, ...) ibdev_info_ratelimited((cq)
[all...]
/linux-master/drivers/infiniband/hw/irdma/
H A Duk.c888 * irdma_uk_cq_resize - reset the cq buffer info
889 * @cq: cq to resize
890 * @cq_base: new cq buffer addr
893 void irdma_uk_cq_resize(struct irdma_cq_uk *cq, void *cq_base, int cq_size) argument
895 cq->cq_base = cq_base;
896 cq->cq_size = cq_size;
897 IRDMA_RING_INIT(cq->cq_ring, cq->cq_size);
898 cq
906 irdma_uk_cq_set_resized_cnt(struct irdma_cq_uk *cq, u16 cq_cnt) argument
936 irdma_uk_cq_request_notification(struct irdma_cq_uk *cq, enum irdma_cmpl_notify cq_notify) argument
970 irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq, struct irdma_cq_poll_info *info) argument
1472 irdma_uk_cq_init(struct irdma_cq_uk *cq, struct irdma_cq_uk_init_info *info) argument
1491 irdma_uk_clean_cq(void *q, struct irdma_cq_uk *cq) argument
[all...]
/linux-master/drivers/net/ethernet/mellanox/mlx5/core/fpga/
H A Dconn.c361 mlx5_cq_arm(&conn->cq.mcq, MLX5_CQ_DB_REQ_NOT,
362 conn->fdev->conn_res.uar->map, conn->cq.wq.cc);
371 cqe = mlx5_cqwq_get_cqe(&conn->cq.wq);
376 mlx5_cqwq_pop(&conn->cq.wq);
378 mlx5_cqwq_update_db_record(&conn->cq.wq);
381 tasklet_schedule(&conn->cq.tasklet);
385 mlx5_fpga_dbg(conn->fdev, "Re-arming CQ with cc# %u\n", conn->cq.wq.cc);
386 /* ensure cq space is freed before enabling more cqes */
393 struct mlx5_fpga_conn *conn = from_tasklet(conn, t, cq.tasklet);
405 conn = container_of(mcq, struct mlx5_fpga_conn, cq
[all...]
/linux-master/drivers/infiniband/hw/qedr/
H A Dverbs.c703 struct qedr_cq *cq, struct ib_udata *udata,
712 uresp.icid = cq->icid;
713 if (cq->q.db_mmap_entry)
715 rdma_user_mmap_get_offset(cq->q.db_mmap_entry);
719 DP_ERR(dev, "copy error cqid=0x%x.\n", cq->icid);
724 static void consume_cqe(struct qedr_cq *cq) argument
726 if (cq->latest_cqe == cq->toggle_cqe)
727 cq->pbl_toggle ^= RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK;
729 cq
702 qedr_copy_cq_uresp(struct qedr_dev *dev, struct qedr_cq *cq, struct ib_udata *udata, u32 db_offset) argument
840 qedr_init_cq_params(struct qedr_cq *cq, struct qedr_ucontext *ctx, struct qedr_dev *dev, int vector, int chain_entries, int page_cnt, u64 pbl_ptr, struct qed_rdma_create_cq_in_params *params) argument
859 doorbell_cq(struct qedr_cq *cq, u32 cons, u8 flags) argument
868 struct qedr_cq *cq = get_qedr_cq(ibcq); local
921 struct qedr_cq *cq = get_qedr_cq(ibcq); local
1064 struct qedr_cq *cq = get_qedr_cq(ibcq); local
1613 struct qedr_cq *cq = get_qedr_cq(init_attr->ext.cq); local
4007 is_valid_cqe(struct qedr_cq *cq, union rdma_cqe *cqe) argument
4034 get_cqe(struct qedr_cq *cq) argument
4052 process_req(struct qedr_dev *dev, struct qedr_qp *qp, struct qedr_cq *cq, int num_entries, struct ib_wc *wc, u16 hw_cons, enum ib_wc_status status, int force) argument
4107 qedr_poll_cq_req(struct qedr_dev *dev, struct qedr_qp *qp, struct qedr_cq *cq, int num_entries, struct ib_wc *wc, struct rdma_cqe_requester *req) argument
4264 __process_resp_one(struct qedr_dev *dev, struct qedr_qp *qp, struct qedr_cq *cq, struct ib_wc *wc, struct rdma_cqe_responder *resp, u64 wr_id) argument
4293 process_resp_one_srq(struct qedr_dev *dev, struct qedr_qp *qp, struct qedr_cq *cq, struct ib_wc *wc, struct rdma_cqe_responder *resp) argument
4318 process_resp_one(struct qedr_dev *dev, struct qedr_qp *qp, struct qedr_cq *cq, struct ib_wc *wc, struct rdma_cqe_responder *resp) argument
4333 process_resp_flush(struct qedr_qp *qp, struct qedr_cq *cq, int num_entries, struct ib_wc *wc, u16 hw_cons) argument
4358 try_consume_resp_cqe(struct qedr_cq *cq, struct qedr_qp *qp, struct rdma_cqe_responder *resp, int *update) argument
4367 qedr_poll_cq_resp_srq(struct qedr_dev *dev, struct qedr_qp *qp, struct qedr_cq *cq, int num_entries, struct ib_wc *wc, struct rdma_cqe_responder *resp) argument
4380 qedr_poll_cq_resp(struct qedr_dev *dev, struct qedr_qp *qp, struct qedr_cq *cq, int num_entries, struct ib_wc *wc, struct rdma_cqe_responder *resp, int *update) argument
4400 try_consume_req_cqe(struct qedr_cq *cq, struct qedr_qp *qp, struct rdma_cqe_requester *req, int *update) argument
4412 struct qedr_cq *cq = get_qedr_cq(ibcq); local
[all...]
/linux-master/drivers/infiniband/sw/siw/
H A Dsiw_qp.c1037 static bool siw_cq_notify_now(struct siw_cq *cq, u32 flags) argument
1041 if (!cq->base_cq.comp_handler)
1045 cq_notify = READ_ONCE(cq->notify->flags);
1056 WRITE_ONCE(cq->notify->flags, SIW_NOTIFY_NOT);
1066 struct siw_cq *cq = qp->scq; local
1069 if (cq) {
1075 spin_lock_irqsave(&cq->lock, flags);
1077 idx = cq->cq_put % cq->num_cqe;
1078 cqe = &cq
1124 struct siw_cq *cq = qp->rcq; local
[all...]
/linux-master/drivers/net/ethernet/mellanox/mlx5/core/lib/
H A Deq.h8 #include <linux/mlx5/cq.h>
83 int mlx5_eq_add_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq);
84 void mlx5_eq_del_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq);
/linux-master/drivers/accel/habanalabs/common/
H A Dirq.c27 * hl_cq_inc_ptr - increment ci or pi of cq
74 * @cq: completion queue
78 static void job_finish(struct hl_device *hdev, u32 cs_seq, struct hl_cq *cq, ktime_t timestamp) argument
83 queue = &hdev->kernel_queues[cq->hw_queue_id];
86 queue_work(hdev->cq_wq[cq->cq_idx], &job->finish_work);
132 struct hl_cq *cq = arg; local
133 struct hl_device *hdev = cq->hdev;
142 irq, cq->hw_queue_id);
146 cq_base = cq->kernel_address;
149 cq_entry = (struct hl_cq_entry *) &cq_base[cq
[all...]
/linux-master/drivers/infiniband/hw/efa/
H A Defa_com.c164 struct efa_com_admin_cq *cq = &aq->cq; local
165 u16 size = aq->depth * sizeof(*cq->entries);
170 cq->entries =
171 dma_alloc_coherent(aq->dmadev, size, &cq->dma_addr, GFP_KERNEL);
172 if (!cq->entries)
175 spin_lock_init(&cq->lock);
177 cq->cc = 0;
178 cq->phase = 1;
180 addr_high = upper_32_bits(cq
663 struct efa_com_admin_cq *cq = &aq->cq; local
[all...]
/linux-master/drivers/net/ethernet/mellanox/mlx4/
H A DMakefile4 mlx4_core-y := alloc.o catas.o cmd.o cq.o eq.o fw.o fw_qos.o icm.o intf.o \
/linux-master/include/rdma/
H A Drdmavt_cq.h65 bool rvt_cq_enter(struct rvt_cq *cq, struct ib_wc *entry, bool solicited);
/linux-master/drivers/net/ethernet/marvell/octeontx2/nic/
H A Dcn10k.h27 int cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq);
/linux-master/drivers/net/ethernet/intel/idpf/
H A Didpf_controlq.h122 struct idpf_ctlq_info *cq);
124 void idpf_ctlq_dealloc_ring_res(struct idpf_hw *hw, struct idpf_ctlq_info *cq);

Completed in 494 milliseconds

1234567891011>>