Searched refs:qp (Results 201 - 225 of 380) sorted by relevance

1234567891011>>

/linux-master/net/9p/
H A Dtrans_rdma.c51 * @qp: Queue Pair pointer
78 struct ib_qp *qp; member in struct:p9_trans_rdma
367 if (rdma->qp && !IS_ERR(rdma->qp))
368 ib_destroy_qp(rdma->qp);
407 ret = ib_post_recv(rdma->qp, &wr, NULL);
516 err = ib_post_send(rdma->qp, &wr, NULL);
723 rdma->qp = rdma->cm_id->qp;
/linux-master/drivers/infiniband/sw/siw/
H A Dsiw_verbs.h53 int siw_create_qp(struct ib_qp *qp, struct ib_qp_init_attr *attr,
85 void siw_qp_event(struct siw_qp *qp, enum ib_event_type type);
/linux-master/drivers/infiniband/sw/rdmavt/
H A Dmr.c400 * @qp: the qp
407 static void rvt_dereg_clean_qp_cb(struct rvt_qp *qp, u64 v) argument
412 if (mr->pd != qp->ibqp.pd)
414 rvt_qp_mr_clean(qp, mr->lkey);
607 * @qp: the queue pair where the work request comes from
614 int rvt_fast_reg_mr(struct rvt_qp *qp, struct ib_mr *ibmr, u32 key, argument
619 if (qp->ibqp.pd != mr->mr.pd)
642 * @qp: queue pair associated with the invalidate op
647 int rvt_invalidate_rkey(struct rvt_qp *qp, u3 argument
826 rvt_rkey_ok(struct rvt_qp *qp, struct rvt_sge *sge, u32 len, u64 vaddr, u32 rkey, int acc) argument
[all...]
/linux-master/drivers/infiniband/hw/mlx5/
H A Drestrack.c161 struct mlx5_ib_qp *qp = to_mqp(ibqp); local
164 if (qp->type < IB_QPT_DRIVER)
167 switch (qp->type) {
H A Dmlx5_ib.h18 #include <linux/mlx5/qp.h>
28 #include "qp.h"
512 /* serialize qp state modifications
747 struct ib_qp *qp; member in struct:umr_common
1297 int mlx5_ib_enable_lb(struct mlx5_ib_dev *dev, bool td, bool qp);
1298 void mlx5_ib_disable_lb(struct mlx5_ib_dev *dev, bool td, bool qp);
1299 int mlx5_ib_create_qp(struct ib_qp *qp, struct ib_qp_init_attr *init_attr,
1305 int mlx5_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata);
1306 void mlx5_ib_drain_sq(struct ib_qp *qp);
1307 void mlx5_ib_drain_rq(struct ib_qp *qp);
[all...]
H A Dodp.c41 #include "qp.h"
1056 struct mlx5_ib_qp *qp, void **wqe, void **wqe_end, int wqe_length)
1062 u32 qpn = qp->trans_qp.base.mqp.qpn;
1083 if (qp->type == IB_QPT_XRC_INI)
1086 if (qp->type == IB_QPT_UD || qp->type == MLX5_IB_QPT_DCI) {
1132 struct mlx5_ib_qp *qp,
1136 struct mlx5_ib_wq *wq = &qp->rq;
1139 if (qp->flags_en & MLX5_QP_FLAG_SIGNATURE) {
1202 struct mlx5_ib_qp *qp; local
1054 mlx5_ib_mr_initiator_pfault_handler( struct mlx5_ib_dev *dev, struct mlx5_pagefault *pfault, struct mlx5_ib_qp *qp, void **wqe, void **wqe_end, int wqe_length) argument
1131 mlx5_ib_mr_responder_pfault_handler_rq(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, void *wqe, void **wqe_end, int wqe_length) argument
[all...]
/linux-master/drivers/infiniband/ulp/isert/
H A Dib_isert.c145 return cma_id->qp;
411 ib_destroy_qp(isert_conn->qp);
455 isert_conn->qp = isert_create_qp(isert_conn, cma_id);
456 if (IS_ERR(isert_conn->qp)) {
457 ret = PTR_ERR(isert_conn->qp);
501 if (isert_conn->qp)
518 struct isert_conn *isert_conn = cma_id->qp->qp_context;
632 struct isert_conn *isert_conn = cma_id->qp->qp_context;
640 ib_drain_qp(isert_conn->qp);
659 struct isert_conn *isert_conn = cma_id->qp
[all...]
/linux-master/drivers/scsi/
H A Dqlogicpti.h504 #define for_each_qlogicpti(qp) \
505 for((qp) = qptichain; (qp); (qp) = (qp)->next)
/linux-master/drivers/infiniband/hw/vmw_pvrdma/
H A Dpvrdma_main.c286 struct pvrdma_qp *qp; local
290 qp = dev->qp_tbl[qpn % dev->dsr->caps.max_qp];
291 if (qp)
292 refcount_inc(&qp->refcnt);
295 if (qp && qp->ibqp.event_handler) {
296 struct ib_qp *ibqp = &qp->ibqp;
300 e.element.qp = ibqp;
304 if (qp) {
305 if (refcount_dec_and_test(&qp
[all...]
/linux-master/drivers/infiniband/ulp/ipoib/
H A Dipoib_ib.c111 ret = ib_post_recv(priv->qp, &priv->rx_wr, NULL);
239 if (wc->slid == priv->local_lid && wc->src_qp == priv->qp->qp_num) {
355 ret = ib_query_qp(priv->qp, &qp_attr, IB_QP_STATE, &query_init_attr);
362 __func__, priv->qp->qp_num, qp_attr.qp_state);
368 ret = ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE);
371 ret, priv->qp->qp_num);
375 __func__, priv->qp->qp_num);
378 priv->qp->qp_num, qp_attr.qp_state);
587 return ib_post_send(priv->qp, &priv->tx_wr.wr, NULL);
760 struct ib_qp *qp,
759 check_qp_movement_and_print(struct ipoib_dev_priv *priv, struct ib_qp *qp, enum ib_qp_state new_state) argument
[all...]
/linux-master/drivers/infiniband/hw/cxgb4/
H A Dcm.c165 c4iw_qp_rem_ref(&ep->com.qp->ibqp);
174 c4iw_qp_add_ref(&ep->com.qp->ibqp);
704 if (!ep->com.qp || !ep->com.qp->srq) {
1643 err = c4iw_modify_qp(ep->com.qp->rhp,
1644 ep->com.qp, mask, &attrs, 1);
1658 err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
1677 err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
3123 struct c4iw_qp *qp = get_qhp(h, conn_param->qpn); local
[all...]
H A Dresource.c43 rdev->lldi.vr->qp.start,
44 rdev->lldi.vr->qp.size,
45 rdev->lldi.vr->qp.size, 0))
48 for (i = rdev->lldi.vr->qp.start;
49 i < rdev->lldi.vr->qp.start + rdev->lldi.vr->qp.size; i++)
137 * now put the same ids on the qp list since they all
/linux-master/drivers/infiniband/hw/hns/
H A Dhns_roce_qp.c81 void flush_cqe(struct hns_roce_dev *dev, struct hns_roce_qp *qp) argument
92 if (!test_and_set_bit(HNS_ROCE_FLUSH_FLAG, &qp->flush_flag))
93 init_flush_work(dev, qp);
99 struct hns_roce_qp *qp; local
102 qp = __hns_roce_qp_lookup(hr_dev, qpn);
103 if (qp)
104 refcount_inc(&qp->refcount);
107 if (!qp) {
117 qp->state = IB_QPS_ERR;
119 flush_cqe(hr_dev, qp);
1271 hns_roce_create_qp(struct ib_qp *qp, struct ib_qp_init_attr *init_attr, struct ib_udata *udata) argument
[all...]
/linux-master/drivers/net/ethernet/mellanox/mlx5/core/steering/
H A Ddr_send.c197 (dr_cq->qp->sq.wqe_cnt - 1);
198 dr_cq->qp->sq.cc = dr_cq->qp->sq.wqe_head[idx] + 1;
200 ++dr_cq->qp->sq.cc;
203 (dr_cq->qp->sq.wqe_cnt - 1);
204 dr_cq->qp->sq.cc = dr_cq->qp->sq.wqe_head[idx] + 1;
555 send_ring->qp->qpn);
647 dr_post_send(send_ring->qp, send_info);
996 struct mlx5dr_qp *dr_qp = dmn->send_ring->qp;
[all...]
/linux-master/drivers/infiniband/hw/mlx4/
H A Dmain.c58 #include <linux/mlx4/qp.h>
532 props->max_qp = dev->dev->quotas.qp;
1428 static int __mlx4_ib_default_rules_match(struct ib_qp *qp, argument
1434 u8 link_layer = rdma_port_get_link_layer(qp->device, flow_attr->port);
1484 struct ib_qp *qp,
1521 static int __mlx4_ib_create_flow(struct ib_qp *qp, struct ib_flow_attr *flow_attr, argument
1529 struct mlx4_ib_dev *mdev = to_mdev(qp->device);
1550 ctrl->qpn = cpu_to_be32(qp->qp_num);
1555 default_flow = __mlx4_ib_default_rules_match(qp, flow_attr);
1558 mdev, qp, default_tabl
1482 __mlx4_ib_create_default_rules( struct mlx4_ib_dev *mdev, struct ib_qp *qp, const struct default_rules *pdefault_rules, struct _rule_hw *mlx4_spec) argument
1613 mlx4_ib_tunnel_steer_add(struct ib_qp *qp, struct ib_flow_attr *flow_attr, u64 *reg_id) argument
1688 mlx4_ib_create_flow(struct ib_qp *qp, struct ib_flow_attr *flow_attr, struct ib_udata *udata) argument
1898 find_gid_entry(struct mlx4_ib_qp *qp, u8 *raw) argument
2280 struct mlx4_ib_qp *qp; local
[all...]
/linux-master/fs/smb/server/
H A Dtransport_rdma.c107 struct ib_qp *qp; member in struct:smb_direct_transport
426 if (t->qp) {
427 ib_drain_qp(t->qp);
428 ib_mr_pool_destroy(t->qp, &t->qp->rdma_mrs);
429 ib_destroy_qp(t->qp);
558 ib_dma_sync_single_for_cpu(wc->qp->device, recvmsg->sge.addr,
660 ret = ib_post_recv(t->qp, &wr, NULL);
908 ret = ib_post_send(t->qp, wr, NULL);
1320 rdma_rw_ctx_destroy(&msg->rw_ctx, t->qp,
[all...]
/linux-master/drivers/gpu/drm/amd/amdkfd/
H A Dkfd_mqd_manager_vi.c294 struct queue_properties *qp,
311 qp->doorbell_off <<
316 qp->is_active = 0;
399 struct queue_properties *qp,
412 qp->doorbell_off << SDMA0_RLC0_DOORBELL__OFFSET__SHIFT;
418 qp->is_active = 0;
292 restore_mqd(struct mqd_manager *mm, void **mqd, struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr, struct queue_properties *qp, const void *mqd_src, const void *ctl_stack_src, const u32 ctl_stack_size) argument
397 restore_mqd_sdma(struct mqd_manager *mm, void **mqd, struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr, struct queue_properties *qp, const void *mqd_src, const void *ctl_stack_src, const u32 ctl_stack_size) argument
H A Dkfd_mqd_manager_v10.c283 struct queue_properties *qp,
300 qp->doorbell_off <<
305 qp->is_active = 0;
402 struct queue_properties *qp,
416 qp->doorbell_off << SDMA0_RLC0_DOORBELL_OFFSET__OFFSET__SHIFT;
422 qp->is_active = 0;
281 restore_mqd(struct mqd_manager *mm, void **mqd, struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr, struct queue_properties *qp, const void *mqd_src, const void *ctl_stack_src, const u32 ctl_stack_size) argument
400 restore_mqd_sdma(struct mqd_manager *mm, void **mqd, struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr, struct queue_properties *qp, const void *mqd_src, const void *ctl_stack_src, const u32 ctl_stack_size) argument
/linux-master/include/rdma/
H A Dib_verbs.h344 /* Corresponding bit will be set if qp type from
758 struct ib_qp *qp; member in union:ib_event::__anon163
1023 struct ib_qp *qp; member in struct:ib_wc
1129 /* Reserve a range for qp types internal to the low level driver.
1130 * These qp types will not be visible at the IB core layer, so the
1761 struct ib_qp *qp; member in struct:ib_qp_security
1818 /* The counter the qp is bind to */
2094 struct ib_qp *qp; member in struct:ib_flow
2333 int (*post_send)(struct ib_qp *qp, const struct ib_send_wr *send_wr,
2335 int (*post_recv)(struct ib_qp *qp, cons
3792 ib_destroy_qp(struct ib_qp *qp) argument
3829 ib_post_send(struct ib_qp *qp, const struct ib_send_wr *send_wr, const struct ib_send_wr **bad_send_wr) argument
3846 ib_post_recv(struct ib_qp *qp, const struct ib_recv_wr *recv_wr, const struct ib_recv_wr **bad_recv_wr) argument
[all...]
/linux-master/drivers/media/pci/solo6x10/
H A Dsolo6x10-enc.c173 unsigned int qp)
178 if ((ch > 31) || (qp > 3))
197 solo_dev->jpeg_qp[idx] |= (qp & 3) << ch;
172 solo_s_jpeg_qp(struct solo_dev *solo_dev, unsigned int ch, unsigned int qp) argument
/linux-master/drivers/infiniband/hw/hfi1/
H A Dfault.c307 bool hfi1_dbg_should_fault_tx(struct rvt_qp *qp, u32 opcode) argument
309 struct hfi1_ibdev *ibd = to_idev(qp->ibqp.device);
312 trace_hfi1_fault_opcode(qp, opcode);
H A Dipoib.h127 struct rvt_qp *qp; member in struct:hfi1_ipoib_dev_priv
/linux-master/drivers/infiniband/hw/mthca/
H A Dmthca_eq.c144 } __packed qp; member in union:mthca_eqe::__anon201
282 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,
287 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,
292 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,
297 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,
307 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,
312 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,
317 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,
322 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,
/linux-master/drivers/scsi/lpfc/
H A Dlpfc_debugfs.h524 struct lpfc_queue *qp; local
526 qp = phba->sli4_hba.hdwq[qidx].hba_eq;
528 pr_err("EQ[Idx:%d|Qid:%d]\n", qidx, qp->queue_id);
530 lpfc_debug_dump_q(qp);
/linux-master/drivers/infiniband/core/
H A Dmad.c358 if (!port_priv->qp_info[qpn].qp) {
388 mad_agent_priv->agent.qp = port_priv->qp_info[qpn].qp;
545 static void build_smp_wc(struct ib_qp *qp, struct ib_cqe *cqe, u16 slid, argument
555 wc->qp = qp;
688 build_smp_wc(mad_agent_priv->agent.qp,
880 mad_send_wr->sg_list[0].lkey = mad_agent->qp->pd->local_dma_lkey;
889 mad_send_wr->sg_list[1].lkey = mad_agent->qp->pd->local_dma_lkey;
1034 ret = ib_post_send(mad_agent->qp,
2782 struct ib_qp *qp; local
[all...]

Completed in 235 milliseconds

1234567891011>>