Lines Matching refs:conn_id

1992 	struct rdma_id_private *listen_id, *conn_id = NULL;
2016 conn_id = cma_new_udp_id(&listen_id->id, ib_event, net_dev);
2021 conn_id = cma_new_conn_id(&listen_id->id, ib_event, net_dev);
2025 if (!conn_id) {
2030 mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING);
2031 ret = cma_acquire_dev(conn_id, listen_id);
2035 conn_id->cm_id.ib = cm_id;
2036 cm_id->context = conn_id;
2040 * Protect against the user destroying conn_id from another thread
2043 atomic_inc(&conn_id->refcount);
2044 ret = conn_id->id.event_handler(&conn_id->id, &event);
2052 if (cma_comp(conn_id, RDMA_CM_CONNECT) &&
2053 (conn_id->id.qp_type != IB_QPT_UD))
2056 mutex_unlock(&conn_id->handler_mutex);
2058 cma_deref_id(conn_id);
2064 cma_deref_id(conn_id);
2066 conn_id->cm_id.ib = NULL;
2068 cma_exch(conn_id, RDMA_CM_DESTROYING);
2069 mutex_unlock(&conn_id->handler_mutex);
2072 if (conn_id)
2073 rdma_destroy_id(&conn_id->id);
2162 struct rdma_id_private *listen_id, *conn_id;
2183 conn_id = container_of(new_cm_id, struct rdma_id_private, id);
2184 mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING);
2185 conn_id->state = RDMA_CM_CONNECT;
2187 ret = rdma_translate_ip(laddr, &conn_id->id.route.addr.dev_addr);
2189 mutex_unlock(&conn_id->handler_mutex);
2194 ret = cma_acquire_dev(conn_id, listen_id);
2196 mutex_unlock(&conn_id->handler_mutex);
2201 conn_id->cm_id.iw = cm_id;
2202 cm_id->context = conn_id;
2205 memcpy(cma_src_addr(conn_id), laddr, rdma_addr_size(laddr));
2206 memcpy(cma_dst_addr(conn_id), raddr, rdma_addr_size(raddr));
2216 * Protect against the user destroying conn_id from another thread
2219 atomic_inc(&conn_id->refcount);
2220 ret = conn_id->id.event_handler(&conn_id->id, &event);
2223 conn_id->cm_id.iw = NULL;
2224 cma_exch(conn_id, RDMA_CM_DESTROYING);
2225 mutex_unlock(&conn_id->handler_mutex);
2226 cma_deref_id(conn_id);
2227 rdma_destroy_id(&conn_id->id);
2231 mutex_unlock(&conn_id->handler_mutex);
2232 cma_deref_id(conn_id);