Searched refs:cqp (Results 1 - 19 of 19) sorted by relevance

/linux-master/drivers/infiniband/hw/irdma/
H A Duda.h39 int irdma_sc_access_ah(struct irdma_sc_cqp *cqp, struct irdma_ah_info *info,
41 int irdma_access_mcast_grp(struct irdma_sc_cqp *cqp,
50 static inline int irdma_sc_create_ah(struct irdma_sc_cqp *cqp, argument
53 return irdma_sc_access_ah(cqp, info, IRDMA_CQP_OP_CREATE_ADDR_HANDLE,
57 static inline int irdma_sc_destroy_ah(struct irdma_sc_cqp *cqp, argument
60 return irdma_sc_access_ah(cqp, info, IRDMA_CQP_OP_DESTROY_ADDR_HANDLE,
64 static inline int irdma_sc_create_mcast_grp(struct irdma_sc_cqp *cqp, argument
68 return irdma_access_mcast_grp(cqp, info, IRDMA_CQP_OP_CREATE_MCAST_GRP,
72 static inline int irdma_sc_modify_mcast_grp(struct irdma_sc_cqp *cqp, argument
76 return irdma_access_mcast_grp(cqp, inf
80 irdma_sc_destroy_mcast_grp(struct irdma_sc_cqp *cqp, struct irdma_mcast_grp_info *info, u64 scratch) argument
[all...]
H A Dctrl.c62 /* issue cqp suspend command */
176 * irdma_sc_add_arp_cache_entry - cqp wqe add arp cache entry
177 * @cqp: struct for cqp hw
179 * @scratch: u64 saved to be used during cqp completion
180 * @post_sq: flag for cqp db to ring
182 static int irdma_sc_add_arp_cache_entry(struct irdma_sc_cqp *cqp, argument
189 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
199 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
207 irdma_sc_cqp_post_sq(cqp);
219 irdma_sc_del_arp_cache_entry(struct irdma_sc_cqp *cqp, u64 scratch, u16 arp_index, bool post_sq) argument
252 irdma_sc_manage_apbvt_entry(struct irdma_sc_cqp *cqp, struct irdma_apbvt_info *info, u64 scratch, bool post_sq) argument
300 irdma_sc_manage_qhash_table_entry(struct irdma_sc_cqp *cqp, struct irdma_qhash_table_info *info, u64 scratch, bool post_sq) argument
451 struct irdma_sc_cqp *cqp; local
503 struct irdma_sc_cqp *cqp; local
573 struct irdma_sc_cqp *cqp; local
761 irdma_sc_alloc_local_mac_entry(struct irdma_sc_cqp *cqp, u64 scratch, bool post_sq) argument
795 irdma_sc_add_local_mac_entry(struct irdma_sc_cqp *cqp, struct irdma_local_mac_entry_info *info, u64 scratch, bool post_sq) argument
833 irdma_sc_del_local_mac_entry(struct irdma_sc_cqp *cqp, u64 scratch, u16 entry_idx, u8 ignore_ref_count, bool post_sq) argument
1060 struct irdma_sc_cqp *cqp; local
1125 struct irdma_sc_cqp *cqp; local
1216 struct irdma_sc_cqp *cqp; local
1255 struct irdma_sc_cqp *cqp; local
2047 irdma_sc_gather_stats(struct irdma_sc_cqp *cqp, struct irdma_stats_gather_info *info, u64 scratch) argument
2094 irdma_sc_manage_stats_inst(struct irdma_sc_cqp *cqp, struct irdma_stats_inst_info *info, bool alloc, u64 scratch) argument
2131 irdma_sc_set_up_map(struct irdma_sc_cqp *cqp, struct irdma_up_info *info, u64 scratch) argument
2173 irdma_sc_manage_ws_node(struct irdma_sc_cqp *cqp, struct irdma_ws_node_info *info, enum irdma_ws_node_op node_op, u64 scratch) argument
2221 struct irdma_sc_cqp *cqp; local
2295 struct irdma_sc_cqp *cqp; local
2334 struct irdma_sc_cqp *cqp; local
2369 irdma_sc_manage_push_page(struct irdma_sc_cqp *cqp, struct irdma_cqp_manage_push_page_info *info, u64 scratch, bool post_sq) argument
2408 irdma_sc_suspend_qp(struct irdma_sc_cqp *cqp, struct irdma_sc_qp *qp, u64 scratch) argument
2438 irdma_sc_resume_qp(struct irdma_sc_cqp *cqp, struct irdma_sc_qp *qp, u64 scratch) argument
2520 struct irdma_sc_cqp *cqp; local
2591 struct irdma_sc_cqp *cqp; local
2659 struct irdma_sc_cqp *cqp; local
2732 irdma_get_cqp_reg_info(struct irdma_sc_cqp *cqp, u32 *val, u32 *tail, u32 *error) argument
2746 irdma_cqp_poll_registers(struct irdma_sc_cqp *cqp, u32 tail, u32 count) argument
3088 irdma_sc_cqp_init(struct irdma_sc_cqp *cqp, struct irdma_cqp_init_info *info) argument
3136 (u64 *)(uintptr_t)cqp->sq_pa, cqp, cqp->polarity); local
3146 irdma_sc_cqp_create(struct irdma_sc_cqp *cqp, u16 *maj_err, u16 *min_err) argument
3244 irdma_sc_cqp_post_sq(struct irdma_sc_cqp *cqp) argument
3260 irdma_sc_cqp_get_next_send_wqe_idx(struct irdma_sc_cqp *cqp, u64 scratch, u32 *wqe_idx) argument
3291 irdma_sc_cqp_destroy(struct irdma_sc_cqp *cqp) argument
3350 struct irdma_sc_cqp *cqp; local
3414 irdma_sc_poll_for_cqp_op_done(struct irdma_sc_cqp *cqp, u8 op_code, struct irdma_ccq_cqe_info *compl_info) argument
3456 irdma_sc_manage_hmc_pm_func_table(struct irdma_sc_cqp *cqp, struct irdma_hmc_fcn_info *info, u64 scratch, bool post_sq) argument
3498 irdma_sc_commit_fpm_val_done(struct irdma_sc_cqp *cqp) argument
3513 irdma_sc_commit_fpm_val(struct irdma_sc_cqp *cqp, u64 scratch, u8 hmc_fn_id, struct irdma_dma_mem *commit_fpm_mem, bool post_sq, u8 wait_type) argument
3559 irdma_sc_query_fpm_val_done(struct irdma_sc_cqp *cqp) argument
3574 irdma_sc_query_fpm_val(struct irdma_sc_cqp *cqp, u64 scratch, u8 hmc_fn_id, struct irdma_dma_mem *query_fpm_mem, bool post_sq, u8 wait_type) argument
3668 struct irdma_sc_cqp *cqp; local
3709 struct irdma_sc_cqp *cqp; local
3722 struct irdma_sc_cqp *cqp; local
3766 struct irdma_sc_cqp *cqp; local
3933 struct irdma_sc_cqp *cqp; local
3972 struct irdma_sc_cqp *cqp; local
4219 struct irdma_sc_cqp *cqp; local
4260 struct irdma_sc_cqp *cqp; local
4413 cqp_sds_wqe_fill(struct irdma_sc_cqp *cqp, struct irdma_update_sds_info *info, u64 scratch) argument
4495 struct irdma_sc_cqp *cqp = dev->cqp; local
4514 struct irdma_sc_cqp *cqp = dev->cqp; local
4536 irdma_sc_static_hmc_pages_allocated(struct irdma_sc_cqp *cqp, u64 scratch, u8 hmc_fn_id, bool post_sq, bool poll_registers) argument
4582 irdma_cqp_ring_full(struct irdma_sc_cqp *cqp) argument
4621 irdma_sc_query_rdma_features_done(struct irdma_sc_cqp *cqp) argument
4634 irdma_sc_query_rdma_features(struct irdma_sc_cqp *cqp, struct irdma_dma_mem *buf, u64 scratch) argument
[all...]
H A Duda.c15 * @cqp: struct for cqp hw
18 * @scratch: u64 saved to be used during cqp completion
20 int irdma_sc_access_ah(struct irdma_sc_cqp *cqp, struct irdma_ah_info *info, argument
26 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
69 FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_WQEVALID, cqp->polarity) |
78 irdma_sc_cqp_post_sq(cqp);
110 * @cqp: Control QP
113 * @scratch: u64 saved to be used during cqp completion
115 int irdma_access_mcast_grp(struct irdma_sc_cqp *cqp, argument
[all...]
H A Dhw.c587 * Issue destroy cqp request and
588 * free the resources associated with the cqp
593 struct irdma_cqp *cqp = &rf->cqp; local
596 status = irdma_sc_cqp_destroy(dev->cqp);
601 dma_free_coherent(dev->hw->device, cqp->sq.size, cqp->sq.va,
602 cqp->sq.pa);
603 cqp->sq.va = NULL;
604 kfree(cqp
933 struct irdma_cqp *cqp = &rf->cqp; local
[all...]
H A Dutils.c425 * irdma_alloc_and_get_cqp_request - get cqp struct
426 * @cqp: device cqp ptr
427 * @wait: cqp to be used in wait mode
429 struct irdma_cqp_request *irdma_alloc_and_get_cqp_request(struct irdma_cqp *cqp, argument
435 spin_lock_irqsave(&cqp->req_lock, flags);
436 if (!list_empty(&cqp->cqp_avail_reqs)) {
437 cqp_request = list_first_entry(&cqp->cqp_avail_reqs,
441 spin_unlock_irqrestore(&cqp->req_lock, flags);
451 ibdev_dbg(to_ibdev(cqp
476 irdma_free_cqp_request(struct irdma_cqp *cqp, struct irdma_cqp_request *cqp_request) argument
500 irdma_put_cqp_request(struct irdma_cqp *cqp, struct irdma_cqp_request *cqp_request) argument
513 irdma_free_pending_cqp_request(struct irdma_cqp *cqp, struct irdma_cqp_request *cqp_request) argument
534 struct irdma_cqp *cqp = &rf->cqp; local
867 struct irdma_sc_cqp *cqp = dev->cqp; local
1892 struct irdma_sc_cqp *cqp = &iwcqp->sc_cqp; local
[all...]
H A Dtype.h604 struct irdma_sc_cqp *cqp; member in struct:irdma_sc_dev
668 struct irdma_sc_cqp *cqp; member in struct:irdma_ccq_cqe_info
1192 int irdma_sc_cqp_create(struct irdma_sc_cqp *cqp, u16 *maj_err, u16 *min_err);
1193 int irdma_sc_cqp_destroy(struct irdma_sc_cqp *cqp);
1194 int irdma_sc_cqp_init(struct irdma_sc_cqp *cqp,
1196 void irdma_sc_cqp_post_sq(struct irdma_sc_cqp *cqp);
1197 int irdma_sc_poll_for_cqp_op_done(struct irdma_sc_cqp *cqp, u8 opcode,
1224 int irdma_sc_static_hmc_pages_allocated(struct irdma_sc_cqp *cqp, u64 scratch,
1292 struct irdma_sc_cqp *cqp; member in struct:cqp_info::__anon933::__anon944
1298 struct irdma_sc_cqp *cqp; member in struct:cqp_info::__anon933::__anon945
1304 struct irdma_sc_cqp *cqp; member in struct:cqp_info::__anon933::__anon946
1310 struct irdma_sc_cqp *cqp; member in struct:cqp_info::__anon933::__anon947
1317 struct irdma_sc_cqp *cqp; member in struct:cqp_info::__anon933::__anon948
1322 struct irdma_sc_cqp *cqp; member in struct:cqp_info::__anon933::__anon949
1372 struct irdma_sc_cqp *cqp; member in struct:cqp_info::__anon933::__anon958
1380 struct irdma_sc_cqp *cqp; member in struct:cqp_info::__anon933::__anon959
1388 struct irdma_sc_cqp *cqp; member in struct:cqp_info::__anon933::__anon960
1394 struct irdma_sc_cqp *cqp; member in struct:cqp_info::__anon933::__anon961
1406 struct irdma_sc_cqp *cqp; member in struct:cqp_info::__anon933::__anon963
1412 struct irdma_sc_cqp *cqp; member in struct:cqp_info::__anon933::__anon964
1418 struct irdma_sc_cqp *cqp; member in struct:cqp_info::__anon933::__anon965
1424 struct irdma_sc_cqp *cqp; member in struct:cqp_info::__anon933::__anon966
1430 struct irdma_sc_cqp *cqp; member in struct:cqp_info::__anon933::__anon967
1436 struct irdma_sc_cqp *cqp; member in struct:cqp_info::__anon933::__anon968
1442 struct irdma_sc_cqp *cqp; member in struct:cqp_info::__anon933::__anon969
1448 struct irdma_sc_cqp *cqp; member in struct:cqp_info::__anon933::__anon970
1454 struct irdma_sc_cqp *cqp; member in struct:cqp_info::__anon933::__anon971
1460 struct irdma_sc_cqp *cqp; member in struct:cqp_info::__anon933::__anon972
1466 struct irdma_sc_cqp *cqp; member in struct:cqp_info::__anon933::__anon973
1488 irdma_sc_cqp_get_next_send_wqe(struct irdma_sc_cqp *cqp, u64 scratch) argument
[all...]
H A Dprotos.h18 void irdma_sc_cqp_post_sq(struct irdma_sc_cqp *cqp);
19 __le64 *irdma_sc_cqp_get_next_send_wqe(struct irdma_sc_cqp *cqp, u64 scratch);
77 int irdma_sc_static_hmc_pages_allocated(struct irdma_sc_cqp *cqp, u64 scratch,
H A Dpuda.c607 struct irdma_sc_cqp *cqp; local
613 cqp = dev->cqp;
614 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, 0);
626 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
633 irdma_sc_cqp_post_sq(cqp);
634 status = irdma_sc_poll_for_cqp_op_done(dev->cqp, IRDMA_CQP_OP_CREATE_QP,
726 struct irdma_sc_cqp *cqp; local
731 cqp = dev->cqp;
[all...]
H A Dmain.h303 struct irdma_cqp cqp; member in struct:irdma_pci_f
482 struct irdma_cqp_request *irdma_alloc_and_get_cqp_request(struct irdma_cqp *cqp,
484 void irdma_free_cqp_request(struct irdma_cqp *cqp,
486 void irdma_put_cqp_request(struct irdma_cqp *cqp,
H A Dhmc.c115 * irdma_hmc_sd_one - setup 1 sd entry for cqp
134 return dev->cqp->process_cqp_sds(dev, &sdinfo);
138 * irdma_hmc_sd_grp - setup group of sd entries for cqp
173 ret_code = dev->cqp->process_cqp_sds(dev, &sdinfo);
185 ret_code = dev->cqp->process_cqp_sds(dev, &sdinfo);
339 ibdev_dbg(to_ibdev(dev), "HMC: error cqp sd sd_grp\n");
348 ibdev_dbg(to_ibdev(dev), "HMC: error cqp sd mem\n");
H A Dverbs.c237 cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true);
249 cqp_info->in.u.manage_push_page.cqp = &iwdev->rf->cqp.sc_cqp;
259 irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
739 cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
755 irdma_put_cqp_request(&rf->cqp, cqp_request);
1977 cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
1995 irdma_put_cqp_request(&rf->cqp, cqp_request);
2197 cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
2210 irdma_put_cqp_request(&rf->cqp, cqp_reques
[all...]
/linux-master/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/
H A Dsetup.c130 err = mlx5e_open_cq(c->mdev, params->rx_cq_moderation, &cparam->rq.cqp, &ccp,
139 err = mlx5e_open_cq(c->mdev, params->tx_cq_moderation, &cparam->xdp_sq.cqp, &ccp,
/linux-master/drivers/net/ethernet/mellanox/mlx5/core/en/
H A Dparams.h23 struct mlx5e_cq_param cqp; member in struct:mlx5e_rq_param
31 struct mlx5e_cq_param cqp; member in struct:mlx5e_sq_param
H A Dparams.c1027 mlx5e_build_rx_cq_param(mdev, params, xsk, &param->cqp);
1084 mlx5e_build_tx_cq_param(mdev, params, &param->cqp);
1268 mlx5e_build_ico_cq_param(mdev, log_wq_size, &param->cqp);
1285 mlx5e_build_ico_cq_param(mdev, log_wq_size, &param->cqp);
1300 mlx5e_build_tx_cq_param(mdev, params, &param->cqp);
H A Dtrap.c79 err = mlx5e_open_cq(priv->mdev, trap_moder, &rq_param->cqp, &ccp, &rq->cq);
H A Dptp.c567 cq_param = &cparams->txq_sq_param.cqp;
616 cq_param = &cparams->rq_param.cqp;
644 mlx5e_build_tx_cq_param(mdev, params, &param->cqp);
/linux-master/drivers/scsi/lpfc/
H A Dlpfc_sli4.h1082 int lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp,
1092 struct lpfc_queue **drqp, struct lpfc_queue **cqp,
H A Dlpfc_sli.c16390 * @cqp: The queue structure array to use to create the completion queues.
16396 * as detailed in @cqp, on a port,
16412 lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp, argument
16429 if (!cqp || !hdwq || !numcq)
16437 length += ((numcq * cqp[0]->page_count) *
16455 cq = cqp[idx];
16629 cq = cqp[idx];
17410 * @cqp: The completion queue array to bind these receive queues to.
17431 struct lpfc_queue **drqp, struct lpfc_queue **cqp,
17446 if (!hrqp || !drqp || !cqp || !numr
17430 lpfc_mrq_create(struct lpfc_hba *phba, struct lpfc_queue **hrqp, struct lpfc_queue **drqp, struct lpfc_queue **cqp, uint32_t subtype) argument
[all...]
/linux-master/drivers/net/ethernet/mellanox/mlx5/core/
H A Den_main.c2155 err = mlx5e_open_cq(c->mdev, params->tx_cq_moderation, &cparam->txq_sq.cqp,
2360 err = mlx5e_open_cq(c->mdev, icocq_moder, &cparam->async_icosq.cqp, &ccp,
2365 err = mlx5e_open_cq(c->mdev, icocq_moder, &cparam->icosq.cqp, &ccp,
2374 err = mlx5e_open_cq(c->mdev, params->tx_cq_moderation, &cparam->xdp_sq.cqp, &ccp,
2379 err = mlx5e_open_cq(c->mdev, params->rx_cq_moderation, &cparam->rq.cqp, &ccp,
2384 err = c->xdp ? mlx5e_open_cq(c->mdev, params->tx_cq_moderation, &cparam->xdp_sq.cqp,

Completed in 312 milliseconds