Lines Matching refs:lnk

47 static int smc_ib_modify_qp_init(struct smc_link *lnk)
54 qp_attr.port_num = lnk->ibport;
57 return ib_modify_qp(lnk->roce_qp, &qp_attr,
62 static int smc_ib_modify_qp_rtr(struct smc_link *lnk)
72 qp_attr.path_mtu = min(lnk->path_mtu, lnk->peer_mtu);
74 rdma_ah_set_port_num(&qp_attr.ah_attr, lnk->ibport);
75 if (lnk->lgr->smc_version == SMC_V2 && lnk->lgr->uses_gateway)
77 rdma_ah_set_grh(&qp_attr.ah_attr, NULL, 0, lnk->sgid_index, hop_lim, 0);
78 rdma_ah_set_dgid_raw(&qp_attr.ah_attr, lnk->peer_gid);
79 if (lnk->lgr->smc_version == SMC_V2 && lnk->lgr->uses_gateway)
80 memcpy(&qp_attr.ah_attr.roce.dmac, lnk->lgr->nexthop_mac,
81 sizeof(lnk->lgr->nexthop_mac));
83 memcpy(&qp_attr.ah_attr.roce.dmac, lnk->peer_mac,
84 sizeof(lnk->peer_mac));
85 qp_attr.dest_qp_num = lnk->peer_qpn;
86 qp_attr.rq_psn = lnk->peer_psn; /* starting receive packet seq # */
92 return ib_modify_qp(lnk->roce_qp, &qp_attr, qp_attr_mask);
95 int smc_ib_modify_qp_rts(struct smc_link *lnk)
104 qp_attr.sq_psn = lnk->psn_initial; /* starting send packet seq # */
108 return ib_modify_qp(lnk->roce_qp, &qp_attr,
114 int smc_ib_modify_qp_error(struct smc_link *lnk)
120 return ib_modify_qp(lnk->roce_qp, &qp_attr, IB_QP_STATE);
123 int smc_ib_ready_link(struct smc_link *lnk)
125 struct smc_link_group *lgr = smc_get_lgr(lnk);
128 rc = smc_ib_modify_qp_init(lnk);
132 rc = smc_ib_modify_qp_rtr(lnk);
135 smc_wr_remember_qp_attr(lnk);
136 rc = ib_req_notify_cq(lnk->smcibdev->roce_cq_recv,
140 rc = smc_wr_rx_post_init(lnk);
143 smc_wr_remember_qp_attr(lnk);
146 rc = smc_ib_modify_qp_rts(lnk);
149 smc_wr_remember_qp_attr(lnk);
346 if (lgr->lnk[i].state == SMC_LNK_UNUSED ||
347 lgr->lnk[i].smcibdev != smcibdev)
349 if (!smc_ib_check_link_gid(lgr->lnk[i].gid,
451 void smc_ib_dealloc_protection_domain(struct smc_link *lnk)
453 if (lnk->roce_pd)
454 ib_dealloc_pd(lnk->roce_pd);
455 lnk->roce_pd = NULL;
458 int smc_ib_create_protection_domain(struct smc_link *lnk)
462 lnk->roce_pd = ib_alloc_pd(lnk->smcibdev->ibdev, 0);
463 rc = PTR_ERR_OR_ZERO(lnk->roce_pd);
464 if (IS_ERR(lnk->roce_pd))
465 lnk->roce_pd = NULL;
481 if (lgr->lnk[i].state == SMC_LNK_UNUSED ||
482 lgr->lnk[i].smcibdev != smcibdev)
636 struct smc_link *lnk = (struct smc_link *)priv;
637 struct smc_ib_device *smcibdev = lnk->smcibdev;
655 void smc_ib_destroy_queue_pair(struct smc_link *lnk)
657 if (lnk->roce_qp)
658 ib_destroy_qp(lnk->roce_qp);
659 lnk->roce_qp = NULL;
663 int smc_ib_create_queue_pair(struct smc_link *lnk)
665 int sges_per_buf = (lnk->lgr->smc_version == SMC_V2) ? 2 : 1;
668 .qp_context = lnk,
669 .send_cq = lnk->smcibdev->roce_cq_send,
670 .recv_cq = lnk->smcibdev->roce_cq_recv,
687 lnk->roce_qp = ib_create_qp(lnk->roce_pd, &qp_attr);
688 rc = PTR_ERR_OR_ZERO(lnk->roce_qp);
689 if (IS_ERR(lnk->roce_qp))
690 lnk->roce_qp = NULL;
692 smc_wr_remember_qp_attr(lnk);
739 bool smc_ib_is_sg_need_sync(struct smc_link *lnk,
747 for_each_sg(buf_slot->sgt[lnk->link_idx].sgl, sg,
748 buf_slot->sgt[lnk->link_idx].nents, i) {
751 if (dma_need_sync(lnk->smcibdev->ibdev->dma_device,
763 void smc_ib_sync_sg_for_cpu(struct smc_link *lnk,
770 if (!(buf_slot->is_dma_need_sync & (1U << lnk->link_idx)))
774 for_each_sg(buf_slot->sgt[lnk->link_idx].sgl, sg,
775 buf_slot->sgt[lnk->link_idx].nents, i) {
778 ib_dma_sync_single_for_cpu(lnk->smcibdev->ibdev,
786 void smc_ib_sync_sg_for_device(struct smc_link *lnk,
793 if (!(buf_slot->is_dma_need_sync & (1U << lnk->link_idx)))
797 for_each_sg(buf_slot->sgt[lnk->link_idx].sgl, sg,
798 buf_slot->sgt[lnk->link_idx].nents, i) {
801 ib_dma_sync_single_for_device(lnk->smcibdev->ibdev,
809 int smc_ib_buf_map_sg(struct smc_link *lnk,
815 mapped_nents = ib_dma_map_sg(lnk->smcibdev->ibdev,
816 buf_slot->sgt[lnk->link_idx].sgl,
817 buf_slot->sgt[lnk->link_idx].orig_nents,
825 void smc_ib_buf_unmap_sg(struct smc_link *lnk,
829 if (!buf_slot->sgt[lnk->link_idx].sgl->dma_address)
832 ib_dma_unmap_sg(lnk->smcibdev->ibdev,
833 buf_slot->sgt[lnk->link_idx].sgl,
834 buf_slot->sgt[lnk->link_idx].orig_nents,
836 buf_slot->sgt[lnk->link_idx].sgl->dma_address = 0;