Searched refs:qp (Results 301 - 325 of 380) sorted by relevance

<<111213141516

/linux-master/drivers/infiniband/ulp/rtrs/
H A Drtrs-clt.c331 struct rtrs_clt_con *con = to_clt_con(wc->qp->qp_context);
351 struct rtrs_clt_con *con = to_clt_con(wc->qp->qp_context);
377 return ib_post_send(con->c.qp, &wr, NULL);
600 return ib_post_recv(con->qp, wr, NULL);
605 struct rtrs_clt_con *con = to_clt_con(wc->qp->qp_context);
1681 * in case qp gets into error state.
1761 if (con->c.qp)
1762 ib_drain_qp(con->c.qp);
2385 struct rtrs_clt_con *con = to_clt_con(wc->qp->qp_context);
2469 struct rtrs_clt_con *con = to_clt_con(wc->qp
[all...]
/linux-master/drivers/media/pci/tw5864/
H A Dtw5864-video.c244 input->reg_dsp_qp = input->qp;
245 input->reg_dsp_ref_mvp_lambda = lambda_lookup_table[input->qp];
246 input->reg_dsp_i4x4_weight = intra4x4_lambda3[input->qp];
508 input->qp = ctrl->val;
509 input->reg_dsp_qp = input->qp;
510 input->reg_dsp_ref_mvp_lambda = lambda_lookup_table[input->qp];
511 input->reg_dsp_i4x4_weight = intra4x4_lambda3[input->qp];
1159 input->qp = QP_VALUE;
1254 tw5864_h264_put_stream_header(&dst, &dst_space, input->qp,
/linux-master/drivers/media/pci/solo6x10/
H A Dsolo6x10-v4l2-enc.c252 solo_reg_write(solo_dev, SOLO_VE_CH_QP(ch), solo_enc->qp);
257 solo_reg_write(solo_dev, SOLO_VE_CH_QP_E(ch), solo_enc->qp);
1075 solo_enc->qp = ctrl->val;
1076 solo_reg_write(solo_dev, SOLO_VE_CH_QP(solo_enc->ch), solo_enc->qp);
1077 solo_reg_write(solo_dev, SOLO_VE_CH_QP_E(solo_enc->ch), solo_enc->qp);
1258 solo_enc->qp = SOLO_DEFAULT_QP;
/linux-master/drivers/scsi/sym53c8xx_2/
H A Dsym_glue.c577 SYM_QUEHEAD *qp; local
592 FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) {
593 struct sym_ccb *cp = sym_que_entry(qp, struct sym_ccb, link_ccbq);
629 SYM_QUEHEAD *qp; local
645 FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) {
646 struct sym_ccb *cp = sym_que_entry(qp, struct sym_ccb,
/linux-master/drivers/infiniband/hw/cxgb4/
H A Dprovider.c280 props->max_qp = dev->rdev.lldi.vr->qp.size / 2;
292 props->max_cq = dev->rdev.lldi.vr->qp.size;
H A Dqp.c893 void c4iw_qp_add_ref(struct ib_qp *qp) argument
895 pr_debug("ib_qp %p\n", qp);
896 refcount_inc(&to_c4iw_qp(qp)->qp_refcnt);
899 void c4iw_qp_rem_ref(struct ib_qp *qp) argument
901 pr_debug("ib_qp %p\n", qp);
902 if (refcount_dec_and_test(&to_c4iw_qp(qp)->qp_refcnt))
903 complete(&to_c4iw_qp(qp)->qp_rel_comp);
1097 * If the qp has been flushed, then just insert a special
1276 * If the qp has been flushed, then just insert a special
1605 /* locking hierarchy: cqs lock first, then qp loc
2109 c4iw_create_qp(struct ib_qp *qp, struct ib_qp_init_attr *attrs, struct ib_udata *udata) argument
[all...]
/linux-master/drivers/infiniband/hw/mthca/
H A Dmthca_mad.c91 new_ah = rdma_create_ah(dev->send_agent[port_num - 1][0]->qp->pd,
/linux-master/drivers/net/ethernet/ibm/ehea/
H A Dehea.h350 struct ehea_qp *qp; member in struct:ehea_port_res
/linux-master/drivers/scsi/
H A Dncr53c8xx.c4389 struct list_head *qp; local
4396 qp = ncr_list_pop(&lp->wait_ccbq);
4397 if (!qp)
4400 cp = list_entry(qp, struct ccb, link_ccbq);
4401 list_add_tail(qp, &lp->busy_ccbq);
6371 struct list_head *qp; local
6385 qp = lp->busy_ccbq.prev;
6386 while (qp != &lp->busy_ccbq) {
6387 cp2 = list_entry(qp, struct ccb, link_ccbq);
6388 qp
6982 struct list_head *qp; local
[all...]
/linux-master/drivers/net/ethernet/mellanox/mlx4/
H A Dmlx4.h627 __be32 qp[MLX4_MAX_QP_PER_MGM]; member in struct:mlx4_mgm
1332 int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
1334 int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
1337 int mlx4_trans_to_dmfs_attach(struct mlx4_dev *dev, struct mlx4_qp *qp,
/linux-master/drivers/scsi/qla2xxx/
H A Dqla_nvme.c16 struct qla_qpair *qp,
1129 qla_nvme_ls_reject_iocb(struct scsi_qla_host *vha, struct qla_qpair *qp, argument
1134 lsrjt_iocb = __qla2x00_alloc_iocbs(qp, NULL);
1164 qla2x00_start_iocbs(vha, qp->req);
/linux-master/drivers/infiniband/core/
H A Duverbs_main.c178 void ib_uverbs_detach_umcast(struct ib_qp *qp, argument
184 ib_detach_mcast(qp, &mcast->gid, mcast->lid);
454 /* for XRC target qp's, check that qp is live */
455 if (!event->element.qp->uobject)
458 uverbs_uobj_event(&event->element.qp->uobject->uevent, event);
/linux-master/drivers/infiniband/hw/irdma/
H A Dcm.h413 void irdma_lpb_nop(struct irdma_sc_qp *qp);
/linux-master/drivers/infiniband/ulp/iser/
H A Discsi_iser.h359 * @qp: Connection Queue-pair
369 struct ib_qp *qp; member in struct:ib_conn
H A Diser_initiator.c535 struct ib_conn *ib_conn = wc->qp->qp_context;
636 struct ib_conn *ib_conn = wc->qp->qp_context;
699 struct ib_conn *ib_conn = wc->qp->qp_context;
/linux-master/drivers/infiniband/hw/mana/
H A Dmana_ib.h296 int mana_ib_create_qp(struct ib_qp *qp, struct ib_qp_init_attr *qp_init_attr,
/linux-master/net/rds/
H A Dib_send.c302 /* We expect errors as the qp is drained during shutdown */
737 ret = ib_post_send(ic->i_cm_id->qp, &first->s_wr, &failed_wr);
831 ret = ib_post_send(ic->i_cm_id->qp, &send->s_atomic_wr.wr, &failed_wr);
987 ret = ib_post_send(ic->i_cm_id->qp, &first->s_rdma_wr.wr, &failed_wr);
/linux-master/net/smc/
H A Dsmc_wr.c83 link = wc->qp->qp_context;
432 struct smc_link *link = (struct smc_link *)wc->qp->qp_context;
455 link = wc[i].qp->qp_context;
/linux-master/fs/smb/client/
H A Dsmbdirect.c468 wc->qp->device,
736 rc = ib_post_send(info->id->qp, &send_wr, NULL);
818 rc = ib_post_send(info->id->qp, &send_wr, NULL);
1029 rc = ib_post_recv(info->id->qp, &recv_wr, NULL);
1308 log_rdma_event(INFO, "destroying qp\n");
1309 ib_drain_qp(info->id->qp);
2316 rc = ib_post_send(info->id->qp, &reg_wr->wr, NULL);
2376 rc = ib_post_send(info->id->qp, wr, NULL);
/linux-master/drivers/infiniband/hw/mlx5/
H A Ddevx.c18 #include "qp.h"
632 struct mlx5_ib_qp *qp = to_mqp(uobj->object); local
634 if (qp->type == IB_QPT_RAW_PACKET ||
635 (qp->flags & IB_QP_CREATE_SOURCE_QPN)) {
637 &qp->raw_packet_qp;
651 if (qp->type == MLX5_IB_QPT_DCT)
653 qp->dct.mdct.mqp.qpn) == obj_id;
655 qp->ibqp.qp_num) == obj_id;
/linux-master/drivers/soc/fsl/qbman/
H A Dqman.c2944 static int qpool_cleanup(u32 qp) argument
2973 if (qm_fqd_get_chan(&fqd) == qp) {
2989 int qman_release_pool(u32 qp) argument
2993 ret = qpool_cleanup(qp);
2995 pr_debug("CHID %d leaked\n", qp);
2999 gen_pool_free(qm_qpalloc, qp | DPAA_GENALLOC_OFF, 1);
/linux-master/drivers/infiniband/hw/vmw_pvrdma/
H A Dpvrdma.h538 void _pvrdma_flush_cqe(struct pvrdma_qp *qp, struct pvrdma_cq *cq);
/linux-master/drivers/scsi/bnx2i/
H A Dbnx2i.h740 * @qp: QP information
760 struct qp_info qp; member in struct:bnx2i_endpoint
/linux-master/drivers/infiniband/hw/mlx4/
H A Dsrq.c34 #include <linux/mlx4/qp.h>
/linux-master/drivers/net/ethernet/chelsio/cxgb4/
H A Dcxgb4_uld.h363 struct cxgb4_range qp; member in struct:cxgb4_virt_res

Completed in 560 milliseconds

<<111213141516