Lines Matching refs:id

138 	struct rdma_cm_id	id;
186 struct rdma_id_private *id;
194 struct rdma_id_private *id;
200 struct rdma_id_private *id;
311 id_priv->id.device = cma_dev->device;
312 id_priv->id.route.addr.dev_addr.transport =
346 switch (id_priv->id.ps) {
351 ib_addr_get_mgid(&id_priv->id.route.addr.dev_addr, &rec.mgid);
352 ret = ib_sa_get_mcmember_rec(id_priv->id.device,
353 id_priv->id.port_num, &rec.mgid,
366 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
375 &id_priv->id.port_num, NULL);
385 &id_priv->id.port_num, NULL);
416 return (id_priv->id.device && id_priv->cm_id.ib);
429 id_priv->id.context = context;
430 id_priv->id.event_handler = event_handler;
431 id_priv->id.ps = ps;
441 return &id_priv->id;
451 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask);
477 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask);
484 int rdma_create_qp(struct rdma_cm_id *id, struct ib_pd *pd,
491 id_priv = container_of(id, struct rdma_id_private, id);
492 if (id->device != pd->device)
499 if (cma_is_ud_ps(id_priv->id.ps))
506 id->qp = qp;
516 void rdma_destroy_qp(struct rdma_cm_id *id)
520 id_priv = container_of(id, struct rdma_id_private, id);
522 ib_destroy_qp(id_priv->id.qp);
523 id_priv->id.qp = NULL;
535 if (!id_priv->id.qp) {
542 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask);
546 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask);
551 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask);
557 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask);
570 if (!id_priv->id.qp) {
576 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask);
582 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask);
594 if (!id_priv->id.qp) {
600 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, IB_QP_STATE);
609 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
613 if (rdma_port_get_link_layer(id_priv->id.device, id_priv->id.port_num) ==
619 ret = ib_find_cached_pkey(id_priv->id.device, id_priv->id.port_num,
624 qp_attr->port_num = id_priv->id.port_num;
627 if (cma_is_ud_ps(id_priv->id.ps)) {
641 int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr,
647 id_priv = container_of(id, struct rdma_id_private, id);
648 switch (rdma_node_get_transport(id_priv->id.device->node_type)) {
650 if (!id_priv->cm_id.ib || cma_is_ud_ps(id_priv->id.ps))
798 switch (rdma_port_get_link_layer(id_priv->id.device, id_priv->id.port_num)) {
827 rdma_destroy_id(&dev_id_priv->id);
838 rdma_addr_cancel(&id_priv->id.route.addr.dev_addr);
844 if (cma_any_addr((struct sockaddr *) &id_priv->id.route.addr.src_addr)
879 switch (rdma_port_get_link_layer(id_priv->cma_dev->device, id_priv->id.port_num)) {
893 void rdma_destroy_id(struct rdma_cm_id *id)
898 id_priv = container_of(id, struct rdma_id_private, id);
905 switch (rdma_node_get_transport(id_priv->id.device->node_type)) {
928 cma_deref_id(id_priv->id.context);
930 kfree(id_priv->id.route.path_rec);
961 if (id_priv->id.ps == RDMA_PS_SDP &&
1006 else if (id_priv->id.qp && id_priv->id.ps != RDMA_PS_SDP) {
1046 ret = id_priv->id.event_handler(&id_priv->id, &event);
1052 rdma_destroy_id(&id_priv->id);
1064 struct rdma_cm_id *id;
1075 id = rdma_create_id(listen_id->event_handler, listen_id->context,
1077 if (IS_ERR(id))
1080 cma_save_net_info(&id->route.addr, &listen_id->route.addr,
1083 rt = &id->route;
1106 id_priv = container_of(id, struct rdma_id_private, id);
1111 rdma_destroy_id(id);
1120 struct rdma_cm_id *id;
1126 id = rdma_create_id(listen_id->event_handler, listen_id->context,
1128 if (IS_ERR(id))
1136 cma_save_net_info(&id->route.addr, &listen_id->route.addr,
1139 if (!cma_any_addr((struct sockaddr *) &id->route.addr.src_addr)) {
1140 ret = rdma_translate_ip((struct sockaddr *) &id->route.addr.src_addr,
1141 &id->route.addr.dev_addr);
1146 id_priv = container_of(id, struct rdma_id_private, id);
1150 rdma_destroy_id(id);
1180 offset = cma_user_data_offset(listen_id->id.ps);
1182 if (cma_is_ud_ps(listen_id->id.ps)) {
1183 conn_id = cma_new_udp_id(&listen_id->id, ib_event);
1188 conn_id = cma_new_conn_id(&listen_id->id, ib_event);
1208 ret = conn_id->id.event_handler(&conn_id->id, &event);
1216 !cma_is_ud_ps(conn_id->id.ps))
1229 rdma_destroy_id(&conn_id->id);
1311 sin = (struct sockaddr_in *) &id_priv->id.route.addr.src_addr;
1313 sin = (struct sockaddr_in *) &id_priv->id.route.addr.dst_addr;
1341 ret = id_priv->id.event_handler(&id_priv->id, &event);
1347 rdma_destroy_id(&id_priv->id);
1370 /* Create a new RDMA id for the new IW CM ID */
1371 new_cm_id = rdma_create_id(listen_id->id.event_handler,
1372 listen_id->id.context,
1378 conn_id = container_of(new_cm_id, struct rdma_id_private, id);
1389 ret = rdma_copy_addr(&conn_id->id.route.addr.dev_addr, dev, NULL);
1414 ret = ib_query_device(conn_id->id.device, &attr);
1427 ret = conn_id->id.event_handler(&conn_id->id, &event);
1433 rdma_destroy_id(&conn_id->id);
1453 id_priv->cm_id.ib = ib_create_cm_id(id_priv->id.device, cma_req_handler,
1458 addr = (struct sockaddr *) &id_priv->id.route.addr.src_addr;
1459 svc_id = cma_get_service_id(id_priv->id.ps, addr);
1463 cma_set_compare_data(id_priv->id.ps, addr, &compare_data);
1480 id_priv->cm_id.iw = iw_create_cm_id(id_priv->id.device,
1487 sin = (struct sockaddr_in *) &id_priv->id.route.addr.src_addr;
1500 static int cma_listen_handler(struct rdma_cm_id *id,
1503 struct rdma_id_private *id_priv = id->context;
1505 id->context = id_priv->id.context;
1506 id->event_handler = id_priv->id.event_handler;
1507 return id_priv->id.event_handler(id, event);
1514 struct rdma_cm_id *id;
1517 id = rdma_create_id(cma_listen_handler, id_priv, id_priv->id.ps);
1518 if (IS_ERR(id))
1521 dev_id_priv = container_of(id, struct rdma_id_private, id);
1524 memcpy(&id->route.addr.src_addr, &id_priv->id.route.addr.src_addr,
1525 ip_addr_size((struct sockaddr *) &id_priv->id.route.addr.src_addr));
1532 ret = rdma_listen(id, id_priv->backlog);
1549 int rdma_listen(struct rdma_cm_id *id, int backlog)
1554 id_priv = container_of(id, struct rdma_id_private, id);
1556 ((struct sockaddr *) &id->route.addr.src_addr)->sa_family = AF_INET;
1557 ret = rdma_bind_addr(id, (struct sockaddr *) &id->route.addr.src_addr);
1566 if (id->device) {
1567 switch (rdma_node_get_transport(id->device->node_type)) {
1593 void rdma_set_service_type(struct rdma_cm_id *id, int tos)
1597 id_priv = container_of(id, struct rdma_id_private, id);
1608 route = &work->id->id.route;
1626 struct rdma_addr *addr = &id_priv->id.route.addr;
1637 path_rec.service_id = cma_get_service_id(id_priv->id.ps,
1658 id_priv->query_id = ib_sa_path_rec_get(&sa_client, id_priv->id.device,
1659 id_priv->id.port_num, &path_rec,
1670 struct rdma_id_private *id_priv = work->id;
1677 if (id_priv->id.event_handler(&id_priv->id, &work->event)) {
1685 rdma_destroy_id(&id_priv->id);
1692 struct rdma_id_private *id_priv = work->id;
1700 if (id_priv->id.event_handler(&id_priv->id, &work->event)) {
1709 rdma_destroy_id(&id_priv->id);
1715 struct rdma_route *route = &id_priv->id.route;
1723 work->id = id_priv;
1748 int rdma_set_ib_paths(struct rdma_cm_id *id,
1754 id_priv = container_of(id, struct rdma_id_private, id);
1758 id->route.path_rec = kmalloc(sizeof *path_rec * num_paths, GFP_KERNEL);
1759 if (!id->route.path_rec) {
1764 memcpy(id->route.path_rec, path_rec, sizeof *path_rec * num_paths);
1780 work->id = id_priv;
1796 struct rdma_route *route = &id_priv->id.route;
1812 work->id = id_priv;
1873 int rdma_resolve_route(struct rdma_cm_id *id, int timeout_ms)
1878 id_priv = container_of(id, struct rdma_id_private, id);
1883 switch (rdma_node_get_transport(id->device->node_type)) {
1885 switch (rdma_port_get_link_layer(id->device, id->port_num)) {
1946 id_priv->id.route.addr.dev_addr.dev_type =
1950 rdma_addr_set_sgid(&id_priv->id.route.addr.dev_addr, &gid);
1951 ib_addr_set_pkey(&id_priv->id.route.addr.dev_addr, pkey);
1952 id_priv->id.port_num = p;
1988 memcpy(&id_priv->id.route.addr.src_addr, src_addr,
1993 if (id_priv->id.event_handler(&id_priv->id, &event)) {
1997 rdma_destroy_id(&id_priv->id);
2022 rdma_addr_get_sgid(&id_priv->id.route.addr.dev_addr, &gid);
2023 rdma_addr_set_dgid(&id_priv->id.route.addr.dev_addr, &gid);
2025 src = (struct sockaddr *) &id_priv->id.route.addr.src_addr;
2027 dst = (struct sockaddr *) &id_priv->id.route.addr.dst_addr;
2037 work->id = id_priv;
2049 static int cma_bind_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
2053 src_addr = (struct sockaddr *) &id->route.addr.src_addr;
2060 return rdma_bind_addr(id, src_addr);
2067 return rdma_bind_addr(id, (struct sockaddr *) &addr_in);
2071 int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
2077 id_priv = container_of(id, struct rdma_id_private, id);
2079 ret = cma_bind_addr(id, src_addr, dst_addr);
2088 memcpy(&id->route.addr.dst_addr, dst_addr, ip_addr_size(dst_addr));
2092 ret = rdma_resolve_ip(&addr_client, (struct sockaddr *) &id->route.addr.src_addr,
2093 dst_addr, &id->route.addr.dev_addr,
2111 sin = (struct sockaddr_in *) &id_priv->id.route.addr.src_addr;
2207 sin = (struct sockaddr_in *) &id_priv->id.route.addr.src_addr;
2222 if (cma_any_addr((struct sockaddr *) &id_priv->id.route.addr.src_addr))
2226 if (cma_any_addr((struct sockaddr *) &cur_id->id.route.addr.src_addr))
2229 cur_sin = (struct sockaddr_in *) &cur_id->id.route.addr.src_addr;
2249 (struct sockaddr *) &id_priv->id.route.addr.src_addr,
2250 ip_addr_size((struct sockaddr *) &id_priv->id.route.addr.src_addr));
2253 (struct sockaddr *)&id_priv->id.route.addr.src_addr,
2261 size = ip_addr_size((struct sockaddr *) &id_priv->id.route.addr.src_addr);
2263 (struct sockaddr *) &id_priv->id.route.addr.src_addr,
2279 switch (id_priv->id.ps) {
2302 if (cma_any_port((struct sockaddr *) &id_priv->id.route.addr.src_addr))
2334 int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr)
2342 id_priv = container_of(id, struct rdma_id_private, id);
2346 ret = cma_check_linklocal(&id->route.addr.dev_addr, addr);
2351 ret = rdma_translate_ip(addr, &id->route.addr.dev_addr);
2362 memcpy(&id->route.addr.src_addr, addr, ip_addr_size(addr));
2476 ib_init_ah_from_path(id_priv->id.device, id_priv->id.port_num,
2477 id_priv->id.route.path_rec,
2490 ret = id_priv->id.event_handler(&id_priv->id, &event);
2496 rdma_destroy_id(&id_priv->id);
2521 route = &id_priv->id.route;
2522 ret = cma_format_hdr((void *) req.private_data, id_priv->id.ps, route);
2526 id_priv->cm_id.ib = ib_create_cm_id(id_priv->id.device,
2534 req.service_id = cma_get_service_id(id_priv->id.ps,
2558 offset = cma_user_data_offset(id_priv->id.ps);
2568 id_priv->cm_id.ib = ib_create_cm_id(id_priv->id.device, cma_ib_handler,
2575 route = &id_priv->id.route;
2576 ret = cma_format_hdr(private_data, id_priv->id.ps, route);
2585 req.service_id = cma_get_service_id(id_priv->id.ps,
2619 cm_id = iw_create_cm_id(id_priv->id.device, id_priv->sock,
2628 sin = (struct sockaddr_in*) &id_priv->id.route.addr.src_addr;
2631 sin = (struct sockaddr_in*) &id_priv->id.route.addr.dst_addr;
2642 if (id_priv->id.qp)
2655 int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
2660 id_priv = container_of(id, struct rdma_id_private, id);
2664 if (!id->qp) {
2669 switch (rdma_node_get_transport(id->device->node_type)) {
2671 if (cma_is_ud_ps(id->ps))
2738 if (id_priv->id.qp) {
2768 int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
2773 id_priv = container_of(id, struct rdma_id_private, id);
2777 if (!id->qp && conn_param) {
2782 switch (rdma_node_get_transport(id->device->node_type)) {
2784 if (cma_is_ud_ps(id->ps))
2807 rdma_reject(id, NULL, 0);
2812 int rdma_notify(struct rdma_cm_id *id, enum ib_event_type event)
2817 id_priv = container_of(id, struct rdma_id_private, id);
2821 switch (id->device->node_type) {
2833 int rdma_reject(struct rdma_cm_id *id, const void *private_data,
2839 id_priv = container_of(id, struct rdma_id_private, id);
2843 switch (rdma_node_get_transport(id->device->node_type)) {
2845 if (cma_is_ud_ps(id->ps))
2865 int rdma_disconnect(struct rdma_cm_id *id)
2870 id_priv = container_of(id, struct rdma_id_private, id);
2874 switch (rdma_node_get_transport(id->device->node_type)) {
2908 if (!status && id_priv->id.qp)
2909 status = ib_attach_mcast(id_priv->id.qp, &multicast->rec.mgid,
2918 ib_init_ah_from_mcmember(id_priv->id.device,
2919 id_priv->id.port_num, &multicast->rec,
2926 ret = id_priv->id.event_handler(&id_priv->id, &event);
2930 rdma_destroy_id(&id_priv->id);
2943 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
2962 if (id_priv->id.ps == RDMA_PS_UDP)
2969 if (id_priv->id.ps == RDMA_PS_UDP)
2980 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
2985 ret = ib_sa_get_mcmember_rec(id_priv->id.device, id_priv->id.port_num,
2991 if (id_priv->id.ps == RDMA_PS_UDP)
3003 if (id_priv->id.ps == RDMA_PS_IPOIB)
3007 mc->multicast.ib = ib_sa_join_multicast(&sa_client, id_priv->id.device,
3008 id_priv->id.port_num, &rec,
3060 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
3081 if (id_priv->id.ps == RDMA_PS_UDP)
3104 work->id = id_priv;
3119 int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr,
3126 id_priv = container_of(id, struct rdma_id_private, id);
3143 switch (rdma_node_get_transport(id->device->node_type)) {
3145 switch (rdma_port_get_link_layer(id->device, id->port_num)) {
3173 void rdma_leave_multicast(struct rdma_cm_id *id, struct sockaddr *addr)
3178 id_priv = container_of(id, struct rdma_id_private, id);
3185 if (id->qp)
3186 ib_detach_mcast(id->qp,
3190 switch (rdma_port_get_link_layer(id->device, id->port_num)) {
3214 dev_addr = &id_priv->id.route.addr.dev_addr;
3219 printk(KERN_INFO "RDMA CM addr change for ndev %s used by id %p\n",
3220 ndev->name, &id_priv->id);
3224 printk(KERN_INFO "RDMA CM addr change for ndev %s used by id %p\n",
3225 ndev->if_xname, &id_priv->id);
3232 work->id = id_priv;
3323 ret = id_priv->id.event_handler(&id_priv->id, &event);
3347 rdma_destroy_id(&id_priv->id);