Lines Matching refs:con

188 	return to_clt_con(clt_path->s.con[id]);
307 static void rtrs_rdma_error_recovery(struct rtrs_clt_con *con)
309 struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
331 struct rtrs_clt_con *con = to_clt_con(wc->qp->qp_context);
334 rtrs_err(con->c.path, "Failed IB_WR_REG_MR: %s\n",
336 rtrs_rdma_error_recovery(con);
351 struct rtrs_clt_con *con = to_clt_con(wc->qp->qp_context);
354 rtrs_err(con->c.path, "Failed IB_WR_LOCAL_INV: %s\n",
356 rtrs_rdma_error_recovery(con);
368 struct rtrs_clt_con *con = req->con;
377 return ib_post_send(con->c.qp, &wr, NULL);
383 struct rtrs_clt_con *con = req->con;
389 if (WARN_ON(!req->con))
391 clt_path = to_clt_path(con->c.path);
421 rtrs_err(con->c.path, "Send INV WR key=%#x: %d\n",
446 req->con = NULL;
449 rtrs_err_rl(con->c.path, "IO request failed: error=%d path=%s [%s:%u] notify=%d\n",
458 static int rtrs_post_send_rdma(struct rtrs_clt_con *con,
463 struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
468 rtrs_wrn(con->c.path,
482 flags = atomic_inc_return(&con->c.wr_cnt) % clt_path->s.signal_interval ?
489 return rtrs_iu_post_rdma_write_imm(&con->c, req->iu, &sge, 1,
508 static void rtrs_clt_recv_done(struct rtrs_clt_con *con, struct ib_wc *wc)
512 struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
517 err = rtrs_iu_post_recv(&con->c, iu);
519 rtrs_err(con->c.path, "post iu failed %d\n", err);
520 rtrs_rdma_error_recovery(con);
524 static void rtrs_clt_rkey_rsp_done(struct rtrs_clt_con *con, struct ib_wc *wc)
526 struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
539 rtrs_err(con->c.path, "rkey response is malformed: size %d\n",
571 return rtrs_clt_recv_done(con, wc);
573 rtrs_rdma_error_recovery(con);
586 static int rtrs_post_recv_empty_x2(struct rtrs_con *con, struct ib_cqe *cqe)
600 return ib_post_recv(con->qp, wr, NULL);
605 struct rtrs_clt_con *con = to_clt_con(wc->qp->qp_context);
606 struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
615 rtrs_rdma_error_recovery(con);
619 rtrs_clt_update_wc_stats(con);
640 WARN_ON(con->c.cid);
643 return rtrs_clt_recv_done(con, wc);
645 WARN_ON(con->c.cid);
650 return rtrs_clt_recv_done(con, wc);
652 rtrs_wrn(con->c.path, "Unknown IMM type %u\n",
660 err = rtrs_post_recv_empty_x2(&con->c, &io_comp_cqe);
662 err = rtrs_post_recv_empty(&con->c, &io_comp_cqe);
664 rtrs_err(con->c.path, "rtrs_post_recv_empty(): %d\n",
666 rtrs_rdma_error_recovery(con);
678 return rtrs_clt_recv_done(con, wc);
680 return rtrs_clt_rkey_rsp_done(con, wc);
696 static int post_recv_io(struct rtrs_clt_con *con, size_t q_size)
699 struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
703 struct rtrs_iu *iu = &con->rsp_ius[i];
705 err = rtrs_iu_post_recv(&con->c, iu);
707 err = rtrs_post_recv_empty(&con->c, &io_comp_cqe);
733 err = post_recv_io(to_clt_con(clt_path->s.con[cid]), q_size);
968 req->con = rtrs_permit_to_clt_con(clt_path, permit);
1017 static int rtrs_post_rdma_write_sg(struct rtrs_clt_con *con,
1024 struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
1056 flags = atomic_inc_return(&con->c.wr_cnt) % clt_path->s.signal_interval ?
1063 return rtrs_iu_post_rdma_write_imm(&con->c, req->iu, sge, num_sge,
1083 struct rtrs_clt_con *con = req->con;
1084 struct rtrs_path *s = con->c.path;
1157 ret = rtrs_post_rdma_write_sg(req->con, req, rbuf, fr_en, count,
1177 struct rtrs_clt_con *con = req->con;
1178 struct rtrs_path *s = con->c.path;
1264 ret = rtrs_post_send_rdma(req->con, req, &clt_path->rbufs[buf_id],
1496 struct rtrs_clt_con *con = container_of(c, typeof(*con), c);
1498 rtrs_rdma_error_recovery(con);
1546 clt_path->s.con = kcalloc(total_con, sizeof(*clt_path->s.con),
1548 if (!clt_path->s.con)
1601 kfree(clt_path->s.con);
1612 kfree(clt_path->s.con);
1619 struct rtrs_clt_con *con;
1621 con = kzalloc(sizeof(*con), GFP_KERNEL);
1622 if (!con)
1626 con->cpu = (cid ? cid - 1 : 0) % nr_cpu_ids;
1627 con->c.cid = cid;
1628 con->c.path = &clt_path->s;
1630 atomic_set(&con->c.wr_cnt, 1);
1631 mutex_init(&con->con_mutex);
1633 clt_path->s.con[cid] = &con->c;
1638 static void destroy_con(struct rtrs_clt_con *con)
1640 struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
1642 clt_path->s.con[con->c.cid] = NULL;
1643 mutex_destroy(&con->con_mutex);
1644 kfree(con);
1647 static int create_con_cq_qp(struct rtrs_clt_con *con)
1649 struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
1654 lockdep_assert_held(&con->con_mutex);
1655 if (con->c.cid == 0) {
1666 clt_path->s.dev = rtrs_ib_dev_find_or_add(con->c.cm_id->device,
1707 atomic_set(&con->c.sq_wr_avail, max_send_wr);
1710 if (clt_path->flags & RTRS_MSG_NEW_RKEY_F || con->c.cid == 0) {
1711 con->rsp_ius = rtrs_iu_alloc(cq_num, sizeof(*rsp),
1716 if (!con->rsp_ius)
1718 con->queue_num = cq_num;
1720 cq_vector = con->cpu % clt_path->s.dev->ib_dev->num_comp_vectors;
1721 if (con->c.cid >= clt_path->s.irq_con_num)
1722 err = rtrs_cq_qp_create(&clt_path->s, &con->c, max_send_sge,
1726 err = rtrs_cq_qp_create(&clt_path->s, &con->c, max_send_sge,
1736 static void destroy_con_cq_qp(struct rtrs_clt_con *con)
1738 struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
1744 lockdep_assert_held(&con->con_mutex);
1745 rtrs_cq_qp_destroy(&con->c);
1746 if (con->rsp_ius) {
1747 rtrs_iu_free(con->rsp_ius, clt_path->s.dev->ib_dev,
1748 con->queue_num);
1749 con->rsp_ius = NULL;
1750 con->queue_num = 0;
1758 static void stop_cm(struct rtrs_clt_con *con)
1760 rdma_disconnect(con->c.cm_id);
1761 if (con->c.qp)
1762 ib_drain_qp(con->c.qp);
1765 static void destroy_cm(struct rtrs_clt_con *con)
1767 rdma_destroy_id(con->c.cm_id);
1768 con->c.cm_id = NULL;
1771 static int rtrs_rdma_addr_resolved(struct rtrs_clt_con *con)
1773 struct rtrs_path *s = con->c.path;
1776 mutex_lock(&con->con_mutex);
1777 err = create_con_cq_qp(con);
1778 mutex_unlock(&con->con_mutex);
1783 err = rdma_resolve_route(con->c.cm_id, RTRS_CONNECT_TIMEOUT_MS);
1790 static int rtrs_rdma_route_resolved(struct rtrs_clt_con *con)
1792 struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
1809 .cid = cpu_to_le16(con->c.cid),
1817 err = rdma_connect_locked(con->c.cm_id, &param);
1824 static int rtrs_rdma_conn_established(struct rtrs_clt_con *con,
1827 struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
1856 if (con->c.cid == 0) {
1903 clt_path->hca_port = con->c.cm_id->port_num;
1906 clt_path->s.src_addr = con->c.cm_id->route.addr.src_addr;
1914 static inline void flag_success_on_conn(struct rtrs_clt_con *con)
1916 struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
1919 con->cm_err = 1;
1922 static int rtrs_rdma_conn_rejected(struct rtrs_clt_con *con,
1925 struct rtrs_path *s = con->c.path;
1932 rej_msg = rdma_reject_msg(con->c.cm_id, status);
1933 msg = rdma_consumer_reject_data(con->c.cm_id, ev, &data_len);
1963 static inline void flag_error_on_conn(struct rtrs_clt_con *con, int cm_err)
1965 if (con->cm_err == 1) {
1968 clt_path = to_clt_path(con->c.path);
1973 con->cm_err = cm_err;
1979 struct rtrs_clt_con *con = cm_id->context;
1980 struct rtrs_path *s = con->c.path;
1986 cm_err = rtrs_rdma_addr_resolved(con);
1989 cm_err = rtrs_rdma_route_resolved(con);
1992 cm_err = rtrs_rdma_conn_established(con, ev);
1998 flag_success_on_conn(con);
2004 cm_err = rtrs_rdma_conn_rejected(con, ev);
2044 flag_error_on_conn(con, cm_err);
2045 rtrs_rdma_error_recovery(con);
2052 static int create_cm(struct rtrs_clt_con *con)
2054 struct rtrs_path *s = con->c.path;
2059 cm_id = rdma_create_id(&init_net, rtrs_clt_rdma_cm_handler, con,
2066 con->c.cm_id = cm_id;
2067 con->cm_err = 0;
2088 con->cm_err || clt_path->state != RTRS_CLT_CONNECTING,
2096 if (con->cm_err < 0)
2097 return con->cm_err;
2153 struct rtrs_clt_con *con;
2181 if (!clt_path->s.con[cid])
2183 con = to_clt_con(clt_path->s.con[cid]);
2184 stop_cm(con);
2203 if (!clt_path->s.con[cid])
2205 con = to_clt_con(clt_path->s.con[cid]);
2206 mutex_lock(&con->con_mutex);
2207 destroy_con_cq_qp(con);
2208 mutex_unlock(&con->con_mutex);
2209 destroy_cm(con);
2210 destroy_con(con);
2345 err = create_cm(to_clt_con(clt_path->s.con[cid]));
2358 struct rtrs_clt_con *con;
2360 if (!clt_path->s.con[i])
2363 con = to_clt_con(clt_path->s.con[i]);
2364 if (con->c.cm_id) {
2365 stop_cm(con);
2366 mutex_lock(&con->con_mutex);
2367 destroy_con_cq_qp(con);
2368 mutex_unlock(&con->con_mutex);
2369 destroy_cm(con);
2371 destroy_con(con);
2385 struct rtrs_clt_con *con = to_clt_con(wc->qp->qp_context);
2386 struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
2399 rtrs_clt_update_wc_stats(con);
2469 struct rtrs_clt_con *con = to_clt_con(wc->qp->qp_context);
2470 struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
2479 WARN_ON(con->c.cid);
2519 rtrs_clt_update_wc_stats(con);
2526 struct rtrs_clt_con *usr_con = to_clt_con(clt_path->s.con[0]);
3049 struct rtrs_con *con;
3059 con = clt_path->s.con[index + 1];
3060 cnt = ib_process_cq_direct(con->cq, -1);