Lines Matching refs:lnk

71 static void smc_ibdev_cnt_inc(struct smc_link *lnk)
73 atomic_inc(&lnk->smcibdev->lnk_cnt_by_port[lnk->ibport - 1]);
76 static void smc_ibdev_cnt_dec(struct smc_link *lnk)
78 atomic_dec(&lnk->smcibdev->lnk_cnt_by_port[lnk->ibport - 1]);
130 conn->lnk = NULL; /* reset conn->lnk first */
132 struct smc_link *lnk = &conn->lgr->lnk[i];
134 if (lnk->state != expected || lnk->link_is_asym)
137 conn->lnk = lnk; /* temporary, SMC server assigns link*/
144 lnk2 = &conn->lgr->lnk[j];
147 conn->lnk = lnk2;
152 if (!conn->lnk)
153 conn->lnk = lnk;
156 if (!conn->lnk)
158 atomic_inc(&conn->lnk->conn_cnt);
202 if (conn->lnk)
203 atomic_dec(&conn->lnk->conn_cnt);
465 if (!smc_link_usable(&lgr->lnk[i]))
467 if (smc_nl_fill_lgr_link(lgr, &lgr->lnk[i], skb, cb))
666 struct smc_link *lnk = &lgr->lnk[i];
668 if (smc_link_sendable(lnk))
669 lnk->state = SMC_LNK_INACTIVE;
733 if (smc_link_usable(&lgr->lnk[i]) &&
734 lgr->lnk[i].link_id == link_id)
751 int smcr_link_init(struct smc_link_group *lgr, struct smc_link *lnk,
759 lnk->smcibdev = ini->smcrv2.ib_dev_v2;
760 lnk->ibport = ini->smcrv2.ib_port_v2;
762 lnk->smcibdev = ini->ib_dev;
763 lnk->ibport = ini->ib_port;
765 get_device(&lnk->smcibdev->ibdev->dev);
766 atomic_inc(&lnk->smcibdev->lnk_cnt);
767 refcount_set(&lnk->refcnt, 1); /* link refcnt is set to 1 */
768 lnk->clearing = 0;
769 lnk->path_mtu = lnk->smcibdev->pattr[lnk->ibport - 1].active_mtu;
770 lnk->link_id = smcr_next_link_id(lgr);
771 lnk->lgr = lgr;
773 lnk->link_idx = link_idx;
774 lnk->wr_rx_id_compl = 0;
775 smc_ibdev_cnt_inc(lnk);
776 smcr_copy_dev_info_to_link(lnk);
777 atomic_set(&lnk->conn_cnt, 0);
778 smc_llc_link_set_uid(lnk);
779 INIT_WORK(&lnk->link_down_wrk, smc_link_down_work);
780 if (!lnk->smcibdev->initialized) {
781 rc = (int)smc_ib_setup_per_ibdev(lnk->smcibdev);
786 lnk->psn_initial = rndvec[0] + (rndvec[1] << 8) +
788 rc = smc_ib_determine_gid(lnk->smcibdev, lnk->ibport,
789 ini->vlan_id, lnk->gid, &lnk->sgid_index,
794 rc = smc_llc_link_init(lnk);
797 rc = smc_wr_alloc_link_mem(lnk);
800 rc = smc_ib_create_protection_domain(lnk);
803 rc = smc_ib_create_queue_pair(lnk);
806 rc = smc_wr_create_link(lnk);
809 lnk->state = SMC_LNK_ACTIVATING;
813 smc_ib_destroy_queue_pair(lnk);
815 smc_ib_dealloc_protection_domain(lnk);
817 smc_wr_free_link_mem(lnk);
819 smc_llc_link_clear(lnk, false);
821 smc_ibdev_cnt_dec(lnk);
822 put_device(&lnk->smcibdev->ibdev->dev);
823 smcibdev = lnk->smcibdev;
824 memset(lnk, 0, sizeof(struct smc_link));
825 lnk->state = SMC_LNK_UNUSED;
838 struct smc_link *lnk;
928 lnk = &lgr->lnk[link_idx];
929 rc = smcr_link_init(lgr, lnk, link_idx, ini);
934 lgr->net = smc_ib_net(lnk->smcibdev);
1021 smc_wr_tx_put_slot(conn->lnk,
1030 atomic_dec(&conn->lnk->conn_cnt);
1032 smcr_link_put(conn->lnk);
1033 conn->lnk = to_lnk;
1034 atomic_inc(&conn->lnk->conn_cnt);
1036 smcr_link_hold(conn->lnk);
1054 if (!smc_link_active(&lgr->lnk[i]) || i == from_lnk->link_idx)
1056 if (is_dev_err && from_lnk->smcibdev == lgr->lnk[i].smcibdev &&
1057 from_lnk->ibport == lgr->lnk[i].ibport) {
1060 to_lnk = &lgr->lnk[i];
1071 if (conn->lnk != from_lnk)
1074 /* conn->lnk not yet set in SMC_INIT state */
1226 smcr_link_put(conn->lnk); /* link_hold in smc_conn_create() */
1232 struct smc_link *lnk)
1235 buf_desc->is_reg_mr[lnk->link_idx] = false;
1236 if (!buf_desc->is_map_ib[lnk->link_idx])
1240 buf_desc->mr[lnk->link_idx]) {
1241 smc_ib_put_memory_region(buf_desc->mr[lnk->link_idx]);
1242 buf_desc->mr[lnk->link_idx] = NULL;
1245 smc_ib_buf_unmap_sg(lnk, buf_desc, DMA_FROM_DEVICE);
1247 smc_ib_buf_unmap_sg(lnk, buf_desc, DMA_TO_DEVICE);
1249 sg_free_table(&buf_desc->sgt[lnk->link_idx]);
1250 buf_desc->is_map_ib[lnk->link_idx] = false;
1254 static void smcr_buf_unmap_lgr(struct smc_link *lnk)
1256 struct smc_link_group *lgr = lnk->lgr;
1263 smcr_buf_unmap_link(buf_desc, true, lnk);
1269 smcr_buf_unmap_link(buf_desc, false, lnk);
1274 static void smcr_rtoken_clear_link(struct smc_link *lnk)
1276 struct smc_link_group *lgr = lnk->lgr;
1280 lgr->rtokens[i][lnk->link_idx].rkey = 0;
1281 lgr->rtokens[i][lnk->link_idx].dma_addr = 0;
1285 static void __smcr_link_clear(struct smc_link *lnk)
1287 struct smc_link_group *lgr = lnk->lgr;
1290 smc_wr_free_link_mem(lnk);
1291 smc_ibdev_cnt_dec(lnk);
1292 put_device(&lnk->smcibdev->ibdev->dev);
1293 smcibdev = lnk->smcibdev;
1294 memset(lnk, 0, sizeof(struct smc_link));
1295 lnk->state = SMC_LNK_UNUSED;
1302 void smcr_link_clear(struct smc_link *lnk, bool log)
1304 if (!lnk->lgr || lnk->clearing ||
1305 lnk->state == SMC_LNK_UNUSED)
1307 lnk->clearing = 1;
1308 lnk->peer_qpn = 0;
1309 smc_llc_link_clear(lnk, log);
1310 smcr_buf_unmap_lgr(lnk);
1311 smcr_rtoken_clear_link(lnk);
1312 smc_ib_modify_qp_error(lnk);
1313 smc_wr_free_link(lnk);
1314 smc_ib_destroy_queue_pair(lnk);
1315 smc_ib_dealloc_protection_domain(lnk);
1316 smcr_link_put(lnk); /* theoretically last link_put */
1319 void smcr_link_hold(struct smc_link *lnk)
1321 refcount_inc(&lnk->refcnt);
1324 void smcr_link_put(struct smc_link *lnk)
1326 if (refcount_dec_and_test(&lnk->refcnt))
1327 __smcr_link_clear(lnk);
1336 smcr_buf_unmap_link(buf_desc, is_rmb, &lgr->lnk[i]);
1417 if (lgr->lnk[i].state != SMC_LNK_UNUSED)
1418 smcr_link_clear(&lgr->lnk[i], false);
1613 if (lgr->lnk[i].smcibdev == smcibdev)
1614 smcr_link_down_cond_sched(&lgr->lnk[i]);
1643 if (smc_link_usable(&lgr->lnk[i]))
1644 lgr->lnk[i].link_is_asym = false;
1676 lgr->lnk[asym_lnk_idx].link_is_asym = true;
1722 static void smcr_link_down(struct smc_link *lnk)
1724 struct smc_link_group *lgr = lnk->lgr;
1728 if (!lgr || lnk->state == SMC_LNK_UNUSED || list_empty(&lgr->list))
1731 to_lnk = smc_switch_conns(lgr, lnk, true);
1733 smcr_link_clear(lnk, true);
1737 del_link_id = lnk->link_id;
1756 smcr_link_clear(lnk, true);
1763 void smcr_link_down_cond(struct smc_link *lnk)
1765 if (smc_link_downing(&lnk->state)) {
1766 trace_smcr_link_down(lnk, __builtin_return_address(0));
1767 smcr_link_down(lnk);
1772 void smcr_link_down_cond_sched(struct smc_link *lnk)
1774 if (smc_link_downing(&lnk->state)) {
1775 trace_smcr_link_down(lnk, __builtin_return_address(0));
1776 schedule_work(&lnk->link_down_wrk);
1792 struct smc_link *lnk = &lgr->lnk[i];
1794 if (smc_link_usable(lnk) &&
1795 lnk->smcibdev == smcibdev && lnk->ibport == ibport)
1796 smcr_link_down_cond_sched(lnk);
1870 struct smc_link *lnk;
1878 lnk = &lgr->lnk[i];
1880 if (!smc_link_active(lnk))
1883 if (!rdma_dev_access_netns(lnk->smcibdev->ibdev, net))
1885 if ((lgr->role == SMC_SERV || lnk->peer_qpn == clcqpn) &&
1886 !memcmp(lnk->peer_gid, peer_gid, SMC_GID_SIZE) &&
1888 !memcmp(lnk->peer_mac, peer_mac_v1, ETH_ALEN)))
1987 smcr_link_hold(conn->lnk); /* link_put in smc_conn_free() */
2075 struct smc_link *lnk)
2081 if (buf_desc->is_map_ib[lnk->link_idx])
2093 rc = sg_alloc_table(&buf_desc->sgt[lnk->link_idx], nents, GFP_KERNEL);
2099 for_each_sg(buf_desc->sgt[lnk->link_idx].sgl, sg, nents, i) {
2108 sg_set_buf(buf_desc->sgt[lnk->link_idx].sgl,
2113 rc = smc_ib_buf_map_sg(lnk, buf_desc,
2122 smc_ib_is_sg_need_sync(lnk, buf_desc) << lnk->link_idx;
2130 rc = smc_ib_get_memory_region(lnk->roce_pd, access_flags,
2131 buf_desc, lnk->link_idx);
2134 smc_ib_sync_sg_for_device(lnk, buf_desc,
2137 buf_desc->is_map_ib[lnk->link_idx] = true;
2141 smc_ib_buf_unmap_sg(lnk, buf_desc,
2144 sg_free_table(&buf_desc->sgt[lnk->link_idx]);
2169 static int _smcr_buf_map_lgr(struct smc_link *lnk, struct rw_semaphore *lock,
2179 rc = smcr_buf_map_link(buf_desc, is_rmb, lnk);
2189 int smcr_buf_map_lgr(struct smc_link *lnk)
2191 struct smc_link_group *lgr = lnk->lgr;
2195 rc = _smcr_buf_map_lgr(lnk, &lgr->rmbs_lock,
2199 rc = _smcr_buf_map_lgr(lnk, &lgr->sndbufs_lock,
2210 int smcr_buf_reg_lgr(struct smc_link *lnk)
2212 struct smc_link_group *lgr = lnk->lgr;
2222 rc = smcr_link_reg_buf(lnk, buf_desc);
2240 rc = smcr_link_reg_buf(lnk, buf_desc);
2307 struct smc_link *lnk = &lgr->lnk[i];
2309 if (!smc_link_usable(lnk))
2311 if (smcr_buf_map_link(buf_desc, is_rmb, lnk)) {
2453 !smc_link_active(conn->lnk))
2455 smc_ib_sync_sg_for_device(conn->lnk, conn->sndbuf_desc, DMA_TO_DEVICE);
2467 if (!smc_link_active(&conn->lgr->lnk[i]))
2469 smc_ib_sync_sg_for_cpu(&conn->lgr->lnk[i], conn->rmb_desc,
2588 if (lgr->lnk[link_idx].link_id == link_id) {
2600 int smc_rtoken_add(struct smc_link *lnk, __be64 nw_vaddr, __be32 nw_rkey)
2602 struct smc_link_group *lgr = smc_get_lgr(lnk);
2608 if (lgr->rtokens[i][lnk->link_idx].rkey == rkey &&
2609 lgr->rtokens[i][lnk->link_idx].dma_addr == dma_addr &&
2618 lgr->rtokens[i][lnk->link_idx].rkey = rkey;
2619 lgr->rtokens[i][lnk->link_idx].dma_addr = dma_addr;
2624 int smc_rtoken_delete(struct smc_link *lnk, __be32 nw_rkey)
2626 struct smc_link_group *lgr = smc_get_lgr(lnk);
2631 if (lgr->rtokens[i][lnk->link_idx].rkey == rkey &&
2646 struct smc_link *lnk,
2649 conn->rtoken_idx = smc_rtoken_add(lnk, clc->r0.rmb_dma_addr,