Lines Matching refs:id

84 const char *__attribute_const__ rdma_reject_msg(struct rdma_cm_id *id,
87 if (rdma_ib_or_roce(id->device, id->port_num))
90 if (rdma_protocol_iwarp(id->device, id->port_num))
101 * @id: Communication identifier that received the REJECT event.
104 static bool rdma_is_consumer_reject(struct rdma_cm_id *id, int reason)
106 if (rdma_ib_or_roce(id->device, id->port_num))
109 if (rdma_protocol_iwarp(id->device, id->port_num))
116 const void *rdma_consumer_reject_data(struct rdma_cm_id *id,
121 if (rdma_is_consumer_reject(id, ev->status)) {
134 * @id: Communication Identifier
136 struct iw_cm_id *rdma_iw_cm_id(struct rdma_cm_id *id)
140 id_priv = container_of(id, struct rdma_id_private, id);
141 if (id->device->node_type == RDMA_NODE_RNIC)
156 return &id_priv->id;
371 struct rdma_id_private *id;
440 return (struct sockaddr *)&id_priv->id.route.addr.src_addr;
445 return (struct sockaddr *)&id_priv->id.route.addr.dst_addr;
473 int ifindex_b = id_priv->id.route.addr.dev_addr.bound_dev_if;
514 node_id_priv->id.route.addr.dev_addr.bound_dev_if,
572 id_priv->id.route.addr.dev_addr.bound_dev_if,
591 id_priv->id.device = cma_dev->device;
592 id_priv->id.route.addr.dev_addr.transport =
604 cma_dev->default_gid_type[id_priv->id.port_num -
614 id_priv->id.device = NULL;
615 if (id_priv->id.route.addr.dev_addr.sgid_attr) {
616 rdma_put_gid_attr(id_priv->id.route.addr.dev_addr.sgid_attr);
617 id_priv->id.route.addr.dev_addr.sgid_attr = NULL;
624 return id_priv->id.route.addr.src_addr.ss_family;
632 switch (id_priv->id.ps) {
638 ib_addr_get_mgid(&id_priv->id.route.addr.dev_addr, &rec.mgid);
639 ret = ib_sa_get_mcmember_rec(id_priv->id.device,
640 id_priv->id.port_num, &rec.mgid,
688 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
694 if (!rdma_dev_access_netns(device, id_priv->id.route.addr.dev_addr.net))
743 WARN_ON(id_priv->id.route.addr.dev_addr.sgid_attr);
744 id_priv->id.route.addr.dev_addr.sgid_attr = sgid_attr;
752 * cma_acquire_dev_by_src_ip() binds cm id to cma device, port and GID attribute
758 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
767 id_priv->id.ps == RDMA_PS_IPOIB)
770 rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr,
785 id_priv->id.port_num = port;
800 * @id_priv: cm id to bind to cma device
801 * @listen_id_priv: listener cm id to match against
813 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
819 id_priv->id.ps == RDMA_PS_IPOIB)
823 rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr,
835 id_priv->id.port_num = req->port;
851 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
860 id_priv->id.ps == RDMA_PS_IPOIB)
869 port = listen_id_priv->id.port_num;
874 id_priv->id.port_num = port;
883 listen_id_priv->id.port_num == port)
890 id_priv->id.port_num = port;
949 id_priv->id.port_num = p;
958 id_priv->id.port_num = p;
973 cma_translate_ib(addr, &id_priv->id.route.addr.dev_addr);
1000 id_priv->id.context = context;
1001 id_priv->id.event_handler = event_handler;
1002 id_priv->id.ps = ps;
1003 id_priv->id.qp_type = qp_type;
1018 id_priv->id.route.addr.dev_addr.net = get_net(net);
1040 return &ret->id;
1057 return &ret->id;
1067 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask);
1093 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask);
1100 int rdma_create_qp(struct rdma_cm_id *id, struct ib_pd *pd,
1107 id_priv = container_of(id, struct rdma_id_private, id);
1108 if (id->device != pd->device) {
1113 qp_init_attr->port_num = id->port_num;
1120 if (id->qp_type == IB_QPT_UD)
1127 id->qp = qp;
1140 void rdma_destroy_qp(struct rdma_cm_id *id)
1144 id_priv = container_of(id, struct rdma_id_private, id);
1147 ib_destroy_qp(id_priv->id.qp);
1148 id_priv->id.qp = NULL;
1160 if (!id_priv->id.qp) {
1167 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask);
1171 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask);
1176 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask);
1180 BUG_ON(id_priv->cma_dev->device != id_priv->id.device);
1184 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask);
1197 if (!id_priv->id.qp) {
1203 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask);
1209 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask);
1221 if (!id_priv->id.qp) {
1227 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, IB_QP_STATE);
1236 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
1240 if (rdma_cap_eth_ah(id_priv->id.device, id_priv->id.port_num))
1245 ret = ib_find_cached_pkey(id_priv->id.device, id_priv->id.port_num,
1250 qp_attr->port_num = id_priv->id.port_num;
1253 if (id_priv->id.qp_type == IB_QPT_UD) {
1267 int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr,
1273 id_priv = container_of(id, struct rdma_id_private, id);
1274 if (rdma_cap_ib_cm(id->device, id->port_num)) {
1275 if (!id_priv->cm_id.ib || (id_priv->id.qp_type == IB_QPT_UD))
1283 } else if (rdma_cap_iw_cm(id->device, id->port_num)) {
1290 qp_attr->port_num = id_priv->id.port_num;
1739 static bool cma_protocol_roce(const struct rdma_cm_id *id)
1741 struct ib_device *device = id->device;
1742 const u32 port_num = id->port_num ?: rdma_start_port(device);
1758 static bool cma_match_net_dev(const struct rdma_cm_id *id,
1762 const struct rdma_addr *addr = &id->route.addr;
1766 return (!id->port_num || id->port_num == req->port) &&
1803 if (id_priv->id.device == cm_id->device &&
1804 cma_match_net_dev(&id_priv->id, net_dev, req))
1809 if (id_priv_dev->id.device == cm_id->device &&
1810 cma_match_net_dev(&id_priv_dev->id,
1901 if (rdma_cap_ib_sa(id_priv->id.device, id_priv->id.port_num)) {
1928 rdma_destroy_id(&dev_id_priv->id);
1953 rdma_addr_cancel(&id_priv->id.route.addr.dev_addr);
1970 struct net *net = id_priv->id.route.addr.dev_addr.net;
1989 if (rdma_cap_ib_mcast(id_priv->id.device, id_priv->id.port_num))
1992 if (rdma_protocol_roce(id_priv->id.device, id_priv->id.port_num)) {
1994 &id_priv->id.route.addr.dev_addr;
2005 [id_priv->id.port_num -
2039 if (rdma_cap_ib_cm(id_priv->id.device, 1)) {
2042 } else if (rdma_cap_iw_cm(id_priv->id.device, 1)) {
2055 cma_id_put(id_priv->id.context);
2057 kfree(id_priv->id.route.path_rec);
2058 kfree(id_priv->id.route.path_rec_inbound);
2059 kfree(id_priv->id.route.path_rec_outbound);
2061 put_net(id_priv->id.route.addr.dev_addr.net);
2092 void rdma_destroy_id(struct rdma_cm_id *id)
2095 container_of(id, struct rdma_id_private, id);
2154 ret = id_priv->id.event_handler(&id_priv->id, event);
2183 (id_priv->id.qp_type != IB_QPT_UD)) {
2187 if (id_priv->id.qp) {
2218 pr_debug_ratelimited("RDMA CM: REJECTED: %s\n", rdma_reject_msg(&id_priv->id,
2251 struct rdma_cm_id *id;
2259 listen_id_priv = container_of(listen_id, struct rdma_id_private, id);
2268 id = &id_priv->id;
2269 if (cma_save_net_info((struct sockaddr *)&id->route.addr.src_addr,
2270 (struct sockaddr *)&id->route.addr.dst_addr,
2274 rt = &id->route;
2305 rdma_destroy_id(id);
2316 struct rdma_cm_id *id;
2321 listen_id_priv = container_of(listen_id, struct rdma_id_private, id);
2328 id = &id_priv->id;
2329 if (cma_save_net_info((struct sockaddr *)&id->route.addr.src_addr,
2330 (struct sockaddr *)&id->route.addr.dst_addr,
2336 rdma_copy_src_l2_addr(&id->route.addr.dev_addr, net_dev);
2340 &id->route.addr.dev_addr);
2349 rdma_destroy_id(id);
2371 static int cma_ib_check_req_qp_type(const struct rdma_cm_id *id,
2375 (ib_event->param.req_rcvd.qp_type == id->qp_type)) ||
2377 (id->qp_type == IB_QPT_UD)) ||
2378 (!id->qp_type));
2396 if (!cma_ib_check_req_qp_type(&listen_id->id, ib_event)) {
2410 conn_id = cma_ib_new_udp_id(&listen_id->id, ib_event, net_dev);
2415 conn_id = cma_ib_new_conn_id(&listen_id->id, ib_event, net_dev);
2445 conn_id->id.qp_type != IB_QPT_UD) {
2460 __be64 rdma_get_service_id(struct rdma_cm_id *id, struct sockaddr *addr)
2465 return cpu_to_be64(((u64)id->ps << 16) + be16_to_cpu(cma_port(addr)));
2581 /* Create a new RDMA id for the new IW CM ID */
2582 conn_id = __rdma_create_id(listen_id->id.route.addr.dev_addr.net,
2583 listen_id->id.event_handler,
2584 listen_id->id.context, RDMA_PS_TCP,
2593 ret = rdma_translate_ip(laddr, &conn_id->id.route.addr.dev_addr);
2633 struct ib_cm_id *id;
2637 svc_id = rdma_get_service_id(&id_priv->id, addr);
2638 id = ib_cm_insert_listen(id_priv->id.device,
2640 if (IS_ERR(id))
2641 return PTR_ERR(id);
2642 id_priv->cm_id.ib = id;
2650 struct iw_cm_id *id;
2652 id = iw_create_cm_id(id_priv->id.device,
2655 if (IS_ERR(id))
2656 return PTR_ERR(id);
2659 id->tos = id_priv->tos;
2660 id->tos_set = id_priv->tos_set;
2662 id->afonly = id_priv->afonly;
2663 id_priv->cm_id.iw = id;
2678 static int cma_listen_handler(struct rdma_cm_id *id,
2681 struct rdma_id_private *id_priv = id->context;
2687 id->context = id_priv->id.context;
2688 id->event_handler = id_priv->id.event_handler;
2690 return id_priv->id.event_handler(id, event);
2698 struct net *net = id_priv->id.route.addr.dev_addr.net;
2709 id_priv->id.ps, id_priv->id.qp_type, id_priv);
2727 ret = rdma_listen(&dev_id_priv->id, id_priv->backlog);
2763 rdma_destroy_id(&to_destroy->id);
2767 void rdma_set_service_type(struct rdma_cm_id *id, int tos)
2771 id_priv = container_of(id, struct rdma_id_private, id);
2782 * @id: Communication identifier to associated with service type.
2794 int rdma_set_ack_timeout(struct rdma_cm_id *id, u8 timeout)
2798 if (id->qp_type != IB_QPT_RC && id->qp_type != IB_QPT_XRC_INI)
2801 id_priv = container_of(id, struct rdma_id_private, id);
2814 * @id: Communication identifier to associated with service type.
2829 int rdma_set_min_rnr_timer(struct rdma_cm_id *id, u8 min_rnr_timer)
2837 if (WARN_ON(id->qp_type != IB_QPT_RC && id->qp_type != IB_QPT_XRC_TGT))
2840 id_priv = container_of(id, struct rdma_id_private, id);
2853 struct rdma_route *route = &work->id->id.route;
2869 struct rdma_route *route = &work->id->id.route;
2889 route = &work->id->id.route;
2926 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
2934 if (rdma_cap_opa_ah(id_priv->id.device, id_priv->id.port_num))
2943 path_rec.service_id = rdma_get_service_id(&id_priv->id,
2967 id_priv->query_id = ib_sa_path_rec_get(&sa_client, id_priv->id.device,
2968 id_priv->id.port_num, &path_rec,
3001 struct rdma_id_private *id_priv = work->id;
3030 work->id = id_priv;
3043 work->id = id_priv;
3055 struct rdma_route *route = &id_priv->id.route;
3107 struct rdma_route *route = &id_priv->id.route;
3121 supported_gids = roce_gid_type_mask_support(id_priv->id.device,
3122 id_priv->id.port_num);
3136 int rdma_set_ib_path(struct rdma_cm_id *id,
3143 id_priv = container_of(id, struct rdma_id_private, id);
3148 id->route.path_rec = kmemdup(path_rec, sizeof(*path_rec),
3150 if (!id->route.path_rec) {
3155 if (rdma_protocol_roce(id->device, id->port_num)) {
3164 id->route.num_pri_alt_paths = 1;
3168 kfree(id->route.path_rec);
3169 id->route.path_rec = NULL;
3273 struct rdma_route *route = &id_priv->id.route;
3279 u8 default_roce_tos = id_priv->cma_dev->default_roce_tos[id_priv->id.port_num -
3305 rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr,
3307 rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.dst_addr,
3310 if (((struct sockaddr *)&id_priv->id.route.addr.dst_addr)->sa_family != AF_IB)
3344 if (rdma_protocol_roce_udp_encap(id_priv->id.device,
3345 id_priv->id.port_num))
3363 int rdma_resolve_route(struct rdma_cm_id *id, unsigned long timeout_ms)
3371 id_priv = container_of(id, struct rdma_id_private, id);
3376 if (rdma_cap_ib_sa(id->device, id->port_num))
3378 else if (rdma_protocol_roce(id->device, id->port_num)) {
3383 else if (rdma_protocol_iwarp(id->device, id->port_num))
3460 id_priv->id.route.addr.dev_addr.dev_type =
3464 rdma_addr_set_sgid(&id_priv->id.route.addr.dev_addr, &gid);
3465 ib_addr_set_pkey(&id_priv->id.route.addr.dev_addr, pkey);
3466 id_priv->id.port_num = p;
3541 rdma_addr_get_sgid(&id_priv->id.route.addr.dev_addr, &gid);
3542 rdma_addr_set_dgid(&id_priv->id.route.addr.dev_addr, &gid);
3566 rdma_addr_set_dgid(&id_priv->id.route.addr.dev_addr, (union ib_gid *)
3567 &(((struct sockaddr_ib *) &id_priv->id.route.addr.dst_addr)->sib_addr));
3576 int rdma_set_reuseaddr(struct rdma_cm_id *id, int reuse)
3582 id_priv = container_of(id, struct rdma_id_private, id);
3596 int rdma_set_afonly(struct rdma_cm_id *id, int afonly)
3602 id_priv = container_of(id, struct rdma_id_private, id);
3660 ret = cma_ps_alloc(id_priv->id.route.addr.dev_addr.net, ps, bind_list,
3721 struct net *net = id_priv->id.route.addr.dev_addr.net;
3809 bind_list = cma_ps_find(id_priv->id.route.addr.dev_addr.net, ps, snum);
3823 switch (id_priv->id.ps) {
3828 return id_priv->id.ps;
3846 if ((id_priv->id.ps == RDMA_PS_IB) && (sid == (RDMA_IB_IP_PS_IB & mask))) {
3849 } else if (((id_priv->id.ps == RDMA_PS_IB) || (id_priv->id.ps == RDMA_PS_TCP)) &&
3853 } else if (((id_priv->id.ps == RDMA_PS_IB) || (id_priv->id.ps == RDMA_PS_UDP)) &&
3911 int rdma_listen(struct rdma_cm_id *id, int backlog)
3914 container_of(id, struct rdma_id_private, id);
3924 ret = rdma_bind_addr(id, (struct sockaddr *)&any_in);
3948 if (rdma_cap_ib_cm(id->device, 1)) {
3952 } else if (rdma_cap_iw_cm(id->device, 1)) {
3991 ret = cma_check_linklocal(&id_priv->id.route.addr.dev_addr, addr);
3997 ret = cma_translate_addr(addr, &id_priv->id.route.addr.dev_addr);
4011 struct net *net = id_priv->id.route.addr.dev_addr.net;
4037 static int cma_bind_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
4041 container_of(id, struct rdma_id_private, id);
4059 id->route.addr.dev_addr.bound_dev_if =
4082 ret = cma_bind_addr(&id_priv->id, src_addr, dst_addr);
4104 int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
4108 container_of(id, struct rdma_id_private, id);
4133 rdma_addr_cancel(&id->route.addr.dev_addr);
4137 &id->route.addr.dev_addr,
4152 int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr)
4155 container_of(id, struct rdma_id_private, id);
4226 ib_init_ah_attr_from_path(id_priv->id.device,
4227 id_priv->id.port_num,
4228 id_priv->id.route.path_rec,
4260 struct ib_cm_id *id;
4289 id = ib_create_cm_id(id_priv->id.device, cma_sidr_rep_handler,
4291 if (IS_ERR(id)) {
4292 ret = PTR_ERR(id);
4295 id_priv->cm_id.ib = id;
4297 req.path = id_priv->id.route.path_rec;
4298 req.sgid_attr = id_priv->id.route.addr.dev_addr.sgid_attr;
4299 req.service_id = rdma_get_service_id(&id_priv->id, cma_dst_addr(id_priv));
4320 struct ib_cm_id *id;
4341 id = ib_create_cm_id(id_priv->id.device, cma_ib_handler, id_priv);
4342 if (IS_ERR(id)) {
4343 ret = PTR_ERR(id);
4346 id_priv->cm_id.ib = id;
4348 route = &id_priv->id.route;
4362 req.ppath_sgid_attr = id_priv->id.route.addr.dev_addr.sgid_attr;
4364 req.service_id = rdma_get_service_id(&id_priv->id, cma_dst_addr(id_priv));
4366 req.qp_type = id_priv->id.qp_type;
4383 if (ret && !IS_ERR(id)) {
4384 ib_destroy_cm_id(id);
4399 cm_id = iw_create_cm_id(id_priv->id.device, cma_iw_handler, id_priv);
4424 iw_param.qpn = id_priv->id.qp ? id_priv->qp_num : conn_param->qp_num;
4440 * @id: Connection identifier to connect.
4446 int rdma_connect_locked(struct rdma_cm_id *id,
4450 container_of(id, struct rdma_id_private, id);
4456 if (!id->qp) {
4461 if (rdma_cap_ib_cm(id->device, id->port_num)) {
4462 if (id->qp_type == IB_QPT_UD)
4466 } else if (rdma_cap_iw_cm(id->device, id->port_num)) {
4482 * @id: Connection identifier to connect.
4492 int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
4495 container_of(id, struct rdma_id_private, id);
4499 ret = rdma_connect_locked(id, conn_param);
4507 * @id: Connection identifier to connect.
4513 int rdma_connect_ece(struct rdma_cm_id *id, struct rdma_conn_param *conn_param,
4517 container_of(id, struct rdma_id_private, id);
4522 return rdma_connect(id, conn_param);
4577 if (id_priv->id.qp)
4617 * @id: Connection identifier associated with the request.
4627 * state of the qp associated with the id is modified to error, such that any
4633 int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
4636 container_of(id, struct rdma_id_private, id);
4644 if (!id->qp && conn_param) {
4649 if (rdma_cap_ib_cm(id->device, id->port_num)) {
4650 if (id->qp_type == IB_QPT_UD) {
4665 } else if (rdma_cap_iw_cm(id->device, id->port_num)) {
4676 rdma_reject(id, NULL, 0, IB_CM_REJ_CONSUMER_DEFINED);
4681 int rdma_accept_ece(struct rdma_cm_id *id, struct rdma_conn_param *conn_param,
4685 container_of(id, struct rdma_id_private, id);
4690 return rdma_accept(id, conn_param);
4694 void rdma_lock_handler(struct rdma_cm_id *id)
4697 container_of(id, struct rdma_id_private, id);
4703 void rdma_unlock_handler(struct rdma_cm_id *id)
4706 container_of(id, struct rdma_id_private, id);
4712 int rdma_notify(struct rdma_cm_id *id, enum ib_event_type event)
4717 id_priv = container_of(id, struct rdma_id_private, id);
4721 switch (id->device->node_type) {
4733 int rdma_reject(struct rdma_cm_id *id, const void *private_data,
4739 id_priv = container_of(id, struct rdma_id_private, id);
4743 if (rdma_cap_ib_cm(id->device, id->port_num)) {
4744 if (id->qp_type == IB_QPT_UD) {
4752 } else if (rdma_cap_iw_cm(id->device, id->port_num)) {
4763 int rdma_disconnect(struct rdma_cm_id *id)
4768 id_priv = container_of(id, struct rdma_id_private, id);
4772 if (rdma_cap_ib_cm(id->device, id->port_num)) {
4784 } else if (rdma_cap_iw_cm(id->device, id->port_num)) {
4814 dev_addr = &id_priv->id.route.addr.dev_addr;
4818 ->default_gid_type[id_priv->id.port_num -
4823 if (ib_init_ah_from_mcmember(id_priv->id.device, id_priv->id.port_num,
4866 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
4881 if (id_priv->id.ps == RDMA_PS_UDP)
4886 if (id_priv->id.ps == RDMA_PS_UDP)
4896 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
4901 ret = ib_sa_get_mcmember_rec(id_priv->id.device, id_priv->id.port_num,
4924 if (id_priv->id.ps == RDMA_PS_IPOIB)
4931 mc->sa_mc = ib_sa_join_multicast(&sa_client, id_priv->id.device,
4932 id_priv->id.port_num, &rec, comp_mask,
4969 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
4982 gid_type = id_priv->cma_dev->default_gid_type[id_priv->id.port_num -
5015 rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr,
5023 int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr,
5027 container_of(id, struct rdma_id_private, id);
5032 if (WARN_ON(id->qp))
5036 if (!id->device || (READ_ONCE(id_priv->state) != RDMA_CM_ADDR_BOUND &&
5040 if (id_priv->id.qp_type != IB_QPT_UD)
5052 if (rdma_protocol_roce(id->device, id->port_num)) {
5056 } else if (rdma_cap_ib_mcast(id->device, id->port_num)) {
5076 void rdma_leave_multicast(struct rdma_cm_id *id, struct sockaddr *addr)
5081 id_priv = container_of(id, struct rdma_id_private, id);
5089 WARN_ON(id_priv->cma_dev->device != id->device);
5102 dev_addr = &id_priv->id.route.addr.dev_addr;
5107 pr_info("RDMA CM addr change for ndev %s used by id %p\n",
5108 ndev->name, &id_priv->id);
5114 work->id = id_priv;
5153 container_of(_work, struct rdma_id_private, id.net_work);
5211 if (!memcmp(current_id->id.route.addr.dev_addr.dst_dev_addr,
5214 INIT_WORK(&current_id->id.net_work, cma_netevent_work_handler);
5216 queue_work(cma_wq, &current_id->id.net_work);
5422 .id = &cma_pernet_id,