Lines Matching refs:conn

89 	struct rds_connection *conn, *ret = NULL;
91 hlist_for_each_entry_rcu(conn, head, c_hash_node) {
92 if (ipv6_addr_equal(&conn->c_faddr, faddr) &&
93 ipv6_addr_equal(&conn->c_laddr, laddr) &&
94 conn->c_trans == trans &&
95 conn->c_tos == tos &&
96 net == rds_conn_net(conn) &&
97 conn->c_dev_if == dev_if) {
98 ret = conn;
102 rdsdebug("returning conn %p for %pI6c -> %pI6c\n", ret,
115 struct rds_connection *conn = cp->cp_conn;
118 &conn->c_laddr, &conn->c_faddr);
130 static void __rds_conn_path_init(struct rds_connection *conn,
139 cp->cp_conn = conn;
153 * There is only every one 'conn' for a given pair of addresses in the
168 struct rds_connection *conn, *parent = NULL;
176 conn = rds_conn_lookup(net, head, laddr, faddr, trans, tos, dev_if);
177 if (conn &&
178 conn->c_loopback &&
179 conn->c_trans != &rds_loop_transport &&
186 parent = conn;
187 conn = parent->c_passive;
190 if (conn)
193 conn = kmem_cache_zalloc(rds_conn_slab, gfp);
194 if (!conn) {
195 conn = ERR_PTR(-ENOMEM);
198 conn->c_path = kcalloc(npaths, sizeof(struct rds_conn_path), gfp);
199 if (!conn->c_path) {
200 kmem_cache_free(rds_conn_slab, conn);
201 conn = ERR_PTR(-ENOMEM);
205 INIT_HLIST_NODE(&conn->c_hash_node);
206 conn->c_laddr = *laddr;
207 conn->c_isv6 = !ipv6_addr_v4mapped(laddr);
208 conn->c_faddr = *faddr;
209 conn->c_dev_if = dev_if;
210 conn->c_tos = tos;
219 conn->c_bound_if = dev_if;
222 conn->c_bound_if = 0;
224 rds_conn_net_set(conn, net);
226 ret = rds_cong_get_maps(conn);
228 kfree(conn->c_path);
229 kmem_cache_free(rds_conn_slab, conn);
230 conn = ERR_PTR(ret);
239 loop_trans = rds_trans_get_preferred(net, faddr, conn->c_dev_if);
242 conn->c_loopback = 1;
256 kfree(conn->c_path);
257 kmem_cache_free(rds_conn_slab, conn);
258 conn = ERR_PTR(-EOPNOTSUPP);
264 conn->c_trans = trans;
266 init_waitqueue_head(&conn->c_hs_waitq);
268 __rds_conn_path_init(conn, &conn->c_path[i],
270 conn->c_path[i].cp_index = i;
273 if (rds_destroy_pending(conn))
276 ret = trans->conn_alloc(conn, GFP_ATOMIC);
279 kfree(conn->c_path);
280 kmem_cache_free(rds_conn_slab, conn);
281 conn = ERR_PTR(ret);
285 rdsdebug("allocated conn %p for %pI6c -> %pI6c over %s %s\n",
286 conn, laddr, faddr,
291 * Since we ran without holding the conn lock, someone could
292 * have created the same conn (either normal or passive) in the
294 * init and return our conn. If we lost, we rollback and return the
299 /* Creating passive conn */
301 trans->conn_free(conn->c_path[0].cp_transport_data);
302 kfree(conn->c_path);
303 kmem_cache_free(rds_conn_slab, conn);
304 conn = parent->c_passive;
306 parent->c_passive = conn;
307 rds_cong_add_conn(conn);
311 /* Creating normal conn */
321 cp = &conn->c_path[i];
329 kfree(conn->c_path);
330 kmem_cache_free(rds_conn_slab, conn);
331 conn = found;
333 conn->c_my_gen_num = rds_gen_num;
334 conn->c_peer_gen_num = 0;
335 hlist_add_head_rcu(&conn->c_hash_node, head);
336 rds_cong_add_conn(conn);
344 return conn;
369 struct rds_connection *conn = cp->cp_conn;
398 conn->c_trans->conn_path_shutdown(cp);
426 * to the conn hash, so we never trigger a reconnect on this
427 * conn - the reconnect is always triggered by the active peer. */
430 if (!hlist_unhashed(&conn->c_hash_node)) {
448 /* make sure lingering queued work won't try to ref the conn */
478 * the conn has been shutdown that no one else is referencing the connection.
481 void rds_conn_destroy(struct rds_connection *conn)
486 int npaths = (conn->c_trans->t_mp_capable ? RDS_MPATH_WORKERS : 1);
488 rdsdebug("freeing conn %p for %pI4 -> "
489 "%pI4\n", conn, &conn->c_laddr,
490 &conn->c_faddr);
492 /* Ensure conn will not be scheduled for reconnect */
494 hlist_del_init_rcu(&conn->c_hash_node);
500 cp = &conn->c_path[i];
510 rds_cong_remove_conn(conn);
512 kfree(conn->c_path);
513 kmem_cache_free(rds_conn_slab, conn);
541 struct rds_connection *conn;
557 hlist_for_each_entry_rcu(conn, head, c_hash_node) {
561 if (!isv6 && conn->c_isv6)
564 npaths = (conn->c_trans->t_mp_capable ?
568 cp = &conn->c_path[j];
582 &conn->c_laddr,
583 &conn->c_faddr,
660 struct rds_connection *conn;
670 hlist_for_each_entry_rcu(conn, head, c_hash_node) {
673 if (!visitor(conn, buffer))
698 struct rds_connection *conn;
708 hlist_for_each_entry_rcu(conn, head, c_hash_node) {
719 cp = conn->c_path;
742 struct rds_connection *conn = cp->cp_conn;
744 if (conn->c_isv6)
749 cinfo->laddr = conn->c_laddr.s6_addr32[3];
750 cinfo->faddr = conn->c_faddr.s6_addr32[3];
751 cinfo->tos = conn->c_tos;
752 strncpy(cinfo->transport, conn->c_trans->t_name,
772 struct rds_connection *conn = cp->cp_conn;
776 cinfo6->laddr = conn->c_laddr;
777 cinfo6->faddr = conn->c_faddr;
778 strncpy(cinfo6->transport, conn->c_trans->t_name,
893 void rds_conn_drop(struct rds_connection *conn)
895 WARN_ON(conn->c_trans->t_mp_capable);
896 rds_conn_path_drop(&conn->c_path[0], false);
920 void rds_check_all_paths(struct rds_connection *conn)
925 rds_conn_path_connect_if_down(&conn->c_path[i]);
926 } while (++i < conn->c_npaths);
929 void rds_conn_connect_if_down(struct rds_connection *conn)
931 WARN_ON(conn->c_trans->t_mp_capable);
932 rds_conn_path_connect_if_down(&conn->c_path[0]);