Searched refs:qp (Results 226 - 250 of 380) sorted by relevance

1234567891011>>

/linux-master/drivers/infiniband/hw/erdma/
H A Derdma_cm.h99 struct erdma_qp *qp; member in struct:erdma_cep
/linux-master/drivers/infiniband/sw/siw/
H A Dsiw_cq.c71 wc->qp = cqe->base_qp;
/linux-master/drivers/scsi/sym53c8xx_2/
H A Dsym_hipd.c1529 SYM_QUEHEAD *qp; local
1543 qp = sym_remque_head(&lp->waiting_ccbq);
1544 if (!qp)
1546 cp = sym_que_entry(qp, struct sym_ccb, link2_ccbq);
1550 sym_insque_head(qp, &lp->waiting_ccbq);
1559 sym_insque_head(qp, &lp->waiting_ccbq);
1568 sym_insque_tail(qp, &lp->started_ccbq);
1630 SYM_QUEHEAD *qp; local
1633 while ((qp = sym_remque_head(&np->comp_ccbq)) != NULL) {
1635 cp = sym_que_entry(qp, struc
1934 SYM_QUEHEAD *qp; local
3188 SYM_QUEHEAD qtmp, *qp; local
3271 SYM_QUEHEAD *qp; local
4651 SYM_QUEHEAD *qp; local
5323 SYM_QUEHEAD *qp; local
5802 SYM_QUEHEAD *qp; local
[all...]
/linux-master/drivers/scsi/lpfc/
H A Dlpfc_init.c1010 struct lpfc_queue *qp = NULL; local
1035 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
1036 pring = qp->pring;
1088 struct lpfc_sli4_hdw_queue *qp; local
1126 qp = &phba->sli4_hba.hdwq[idx];
1128 spin_lock(&qp->abts_io_buf_list_lock);
1129 list_splice_init(&qp->lpfc_abts_io_buf_list,
1137 spin_lock(&qp->io_buf_list_put_lock);
1138 list_splice_init(&aborts, &qp->lpfc_io_buf_list_put);
1139 qp
3436 struct lpfc_sli4_hdw_queue *qp; local
3471 struct lpfc_sli4_hdw_queue *qp; local
3510 struct lpfc_sli4_hdw_queue *qp; local
3596 struct lpfc_sli4_hdw_queue *qp; local
4001 struct lpfc_sli4_hdw_queue *qp; local
4288 struct lpfc_sli4_hdw_queue *qp; local
4349 struct lpfc_sli4_hdw_queue *qp; local
10435 struct lpfc_sli4_hdw_queue *qp; local
10800 __lpfc_sli4_release_queue(struct lpfc_queue **qp) argument
11069 struct lpfc_sli4_hdw_queue *qp; local
11426 struct lpfc_sli4_hdw_queue *qp; local
13348 struct lpfc_sli4_hdw_queue *qp; local
[all...]
H A Dlpfc_nvme.c2040 struct lpfc_sli4_hdw_queue *qp; local
2078 qp = &phba->sli4_hba.hdwq[idx];
2079 qp->empty_io_bufs++;
2098 struct lpfc_sli4_hdw_queue *qp; local
2107 qp = lpfc_ncmd->hdwq;
2115 spin_lock_irqsave(&qp->abts_io_buf_list_lock, iflag);
2117 &qp->lpfc_abts_io_buf_list);
2118 qp->abts_nvme_io_bufs++;
2119 spin_unlock_irqrestore(&qp->abts_io_buf_list_lock, iflag);
2121 lpfc_release_io_buf(phba, (struct lpfc_io_buf *)lpfc_ncmd, qp);
2232 struct lpfc_sli4_hdw_queue *qp; local
[all...]
/linux-master/drivers/nvme/target/
H A Drdma.c89 struct ib_qp *qp; member in struct:nvmet_rdma_queue
506 ret = ib_post_recv(cmd->queue->qp, &cmd->wr, NULL);
634 ret = rdma_rw_ctx_signature_init(&rsp->rw, cm_id->qp,
639 ret = rdma_rw_ctx_init(&rsp->rw, cm_id->qp, cm_id->port_num,
652 rdma_rw_ctx_destroy_signature(&rsp->rw, cm_id->qp,
657 rdma_rw_ctx_destroy(&rsp->rw, cm_id->qp, cm_id->port_num,
697 struct nvmet_rdma_queue *queue = wc->qp->qp_context;
725 first_wr = rdma_rw_ctx_wrs(&rsp->rw, cm_id->qp,
728 first_wr = rdma_rw_ctx_wrs(&rsp->rw, cm_id->qp,
740 if (unlikely(ib_post_send(cm_id->qp, first_w
[all...]
/linux-master/drivers/net/ethernet/fungible/funeth/
H A Dfuneth_txrx.h256 struct funeth_txq **qp);
261 int state, struct funeth_rxq **qp);
/linux-master/drivers/net/ethernet/sun/
H A Dsunqe.c938 struct sunqe *qp = platform_get_drvdata(op); local
939 struct net_device *net_dev = qp->dev;
943 of_iounmap(&op->resource[0], qp->qcregs, CREG_REG_SIZE);
944 of_iounmap(&op->resource[1], qp->mregs, MREGS_REG_SIZE);
946 qp->qe_block, qp->qblock_dvma);
948 qp->buffers, qp->buffers_dvma);
/linux-master/drivers/infiniband/hw/mthca/
H A Dmthca_dev.h258 struct mthca_array qp; member in struct:mthca_qp_table
531 void mthca_free_err_wqe(struct mthca_dev *dev, struct mthca_qp *qp, int is_send,
540 struct mthca_qp *qp,
550 struct mthca_qp *qp,
552 void mthca_free_qp(struct mthca_dev *dev, struct mthca_qp *qp);
/linux-master/include/linux/qed/
H A Dqed_rdma_if.h505 struct qed_rdma_qp *qp; member in struct:qed_iwarp_connect_in
533 struct qed_rdma_qp *qp; member in struct:qed_iwarp_accept_in
619 int (*rdma_modify_qp)(void *roce_cxt, struct qed_rdma_qp *qp,
622 int (*rdma_query_qp)(void *rdma_cxt, struct qed_rdma_qp *qp,
624 int (*rdma_destroy_qp)(void *rdma_cxt, struct qed_rdma_qp *qp);
/linux-master/include/linux/
H A Dhisi_acc_qm.h429 void (*req_cb)(struct hisi_qp *qp, void *data);
430 void (*event_cb)(struct hisi_qp *qp);
533 int hisi_qm_start_qp(struct hisi_qp *qp, unsigned long arg);
534 void hisi_qm_stop_qp(struct hisi_qp *qp);
535 int hisi_qp_send(struct hisi_qp *qp, const void *msg);
/linux-master/drivers/infiniband/sw/rxe/
H A Drxe_mr.c638 int rxe_invalidate_mr(struct rxe_qp *qp, u32 key) argument
640 struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
647 rxe_dbg_qp(qp, "No MR for key %#x\n", key);
688 int rxe_reg_fast_mr(struct rxe_qp *qp, struct rxe_send_wqe *wqe) argument
700 /* user can only register mr with qp in same protection domain */
701 if (unlikely(qp->ibqp.pd != mr->ibmr.pd)) {
702 rxe_dbg_mr(mr, "qp->pd and mr->pd don't match\n");
/linux-master/drivers/crypto/hisilicon/sec2/
H A Dsec_crypto.c144 req_id = idr_alloc_cyclic(&qp_ctx->req_idr, NULL, 0, qp_ctx->qp->sq_depth, GFP_ATOMIC);
162 if (unlikely(req_id < 0 || req_id >= qp_ctx->qp->sq_depth)) {
233 static void sec_req_cb(struct hisi_qp *qp, void *resp) argument
235 struct sec_qp_ctx *qp_ctx = qp->qp_ctx;
260 atomic_inc(&qp->qp_status.used);
283 atomic_read(&qp_ctx->qp->qp_status.used) &&
288 ret = hisi_qp_send(qp_ctx->qp, &req->sec_sqe);
290 atomic_read(&qp_ctx->qp->qp_status.used) && !ret) {
492 u16 q_depth = qp_ctx->qp->sq_depth;
548 struct hisi_qp *qp; local
[all...]
/linux-master/drivers/infiniband/hw/qib/
H A Dqib.h249 struct rvt_qp *qp; member in struct:qib_verbs_txreq
1484 void qib_stop_send_queue(struct rvt_qp *qp);
1485 void qib_quiesce_qp(struct rvt_qp *qp);
1486 void qib_flush_qp_waiters(struct rvt_qp *qp);
1488 u32 qib_mtu_from_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, u32 pmtu);
1489 void qib_notify_error_qp(struct rvt_qp *qp);
1490 int qib_get_pmtu_from_attr(struct rvt_dev_info *rdi, struct rvt_qp *qp,
/linux-master/drivers/infiniband/hw/mlx4/
H A Dmlx4_ib.h50 #include <linux/mlx4/qp.h>
460 struct ib_qp *qp; member in struct:mlx4_ib_demux_pv_qp
487 struct mlx4_ib_demux_pv_qp qp[2]; member in struct:mlx4_ib_demux_pv_ctx
797 int mlx4_ib_create_qp(struct ib_qp *qp, struct ib_qp_init_attr *init_attr,
799 int mlx4_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata);
800 void mlx4_ib_drain_sq(struct ib_qp *qp);
801 void mlx4_ib_drain_rq(struct ib_qp *qp);
/linux-master/net/rds/
H A Dib_frmr.c164 ret = ib_post_send(ibmr->ic->i_cm_id->qp, &reg_wr.wr, NULL);
273 if (!i_cm_id || !i_cm_id->qp || !frmr->mr)
293 ret = ib_post_send(i_cm_id->qp, s_wr, NULL);
411 /* TODO: Add FRWR support for RDS_GET_MR using proxy qp*/
/linux-master/drivers/infiniband/ulp/srp/
H A Dib_srp.c271 struct ib_qp *qp)
292 ret = ib_modify_qp(qp, attr,
509 * Drain the qp before destroying it. This avoids that the receive
519 ib_drain_qp(ch->qp);
520 ib_destroy_qp(ch->qp);
530 struct ib_qp *qp; local
568 qp = ch->rdma_cm.cm_id->qp;
570 qp = ib_create_qp(dev->pd, init_attr);
571 if (!IS_ERR(qp)) {
270 srp_init_ib_qp(struct srp_target_port *target, struct ib_qp *qp) argument
[all...]
/linux-master/drivers/infiniband/core/
H A Dagent.c102 ah = ib_create_ah_from_wc(agent->qp->pd, wc, grh, port_num);
H A Dmad_priv.h188 struct ib_qp *qp; member in struct:ib_mad_qp_info
/linux-master/include/uapi/rdma/
H A Dvmw_pvrdma-abi.h292 __aligned_u64 qp; member in struct:pvrdma_cqe
/linux-master/drivers/infiniband/hw/qedr/
H A Dverbs.h58 int qedr_create_qp(struct ib_qp *qp, struct ib_qp_init_attr *attrs,
/linux-master/drivers/infiniband/ulp/isert/
H A Dib_isert.h182 struct ib_qp *qp; member in struct:isert_conn
/linux-master/drivers/infiniband/ulp/srpt/
H A Dib_srpt.h253 * @qp: IB queue pair used for communicating over this channel.
294 struct ib_qp *qp; member in struct:srpt_rdma_ch
/linux-master/net/sunrpc/xprtrdma/
H A Dfrwr_ops.c416 ret = ib_post_send(ep->re_id->qp, post_wr, NULL);
541 * unless re_id->qp is a valid pointer.
544 rc = ib_post_send(ep->re_id->qp, first, &bad_wr);
642 * unless re_id->qp is a valid pointer.
644 rc = ib_post_send(ep->re_id->qp, first, NULL);
695 return ib_post_send(ep->re_id->qp, &mr->mr_regwr.wr, NULL);
/linux-master/include/rdma/
H A Drdma_cm.h115 struct ib_qp *qp; member in struct:rdma_cm_id

Completed in 610 milliseconds

1234567891011>>