• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-WNDR4500v2-V1.0.0.60_1.0.38/src/linux/linux-2.6/drivers/infiniband/core/

Lines Matching refs:id

112 	struct rdma_cm_id	id;
157 struct rdma_id_private *id;
270 id_priv->id.device = cma_dev->device;
311 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
329 &id_priv->id.port_num, NULL);
332 id_priv->id.port_num,
333 id_priv->id.ps, dev_addr,
373 return (id_priv->id.device && id_priv->cm_id.ib);
386 id_priv->id.context = context;
387 id_priv->id.event_handler = event_handler;
388 id_priv->id.ps = ps;
398 return &id_priv->id;
408 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask);
434 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask);
441 int rdma_create_qp(struct rdma_cm_id *id, struct ib_pd *pd,
448 id_priv = container_of(id, struct rdma_id_private, id);
449 if (id->device != pd->device)
456 if (cma_is_ud_ps(id_priv->id.ps))
463 id->qp = qp;
473 void rdma_destroy_qp(struct rdma_cm_id *id)
475 ib_destroy_qp(id->qp);
479 static int cma_modify_qp_rtr(struct rdma_cm_id *id)
484 if (!id->qp)
489 ret = rdma_init_qp_attr(id, &qp_attr, &qp_attr_mask);
493 ret = ib_modify_qp(id->qp, &qp_attr, qp_attr_mask);
498 ret = rdma_init_qp_attr(id, &qp_attr, &qp_attr_mask);
502 return ib_modify_qp(id->qp, &qp_attr, qp_attr_mask);
505 static int cma_modify_qp_rts(struct rdma_cm_id *id)
510 if (!id->qp)
514 ret = rdma_init_qp_attr(id, &qp_attr, &qp_attr_mask);
518 return ib_modify_qp(id->qp, &qp_attr, qp_attr_mask);
521 static int cma_modify_qp_err(struct rdma_cm_id *id)
525 if (!id->qp)
529 return ib_modify_qp(id->qp, &qp_attr, IB_QP_STATE);
535 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
538 ret = ib_find_cached_pkey(id_priv->id.device, id_priv->id.port_num,
544 qp_attr->port_num = id_priv->id.port_num;
547 if (cma_is_ud_ps(id_priv->id.ps)) {
557 int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr,
563 id_priv = container_of(id, struct rdma_id_private, id);
564 switch (rdma_node_get_transport(id_priv->id.device->node_type)) {
566 if (!id_priv->cm_id.ib || cma_is_ud_ps(id_priv->id.ps))
708 switch (rdma_node_get_transport(id_priv->id.device->node_type)) {
721 cma_any_addr(&id_priv->id.route.addr.src_addr);
729 switch (rdma_node_get_transport(id_priv->id.device->node_type)) {
771 rdma_addr_cancel(&id_priv->id.route.addr.dev_addr);
777 if (cma_any_addr(&id_priv->id.route.addr.src_addr) &&
815 void rdma_destroy_id(struct rdma_cm_id *id)
820 id_priv = container_of(id, struct rdma_id_private, id);
827 switch (rdma_node_get_transport(id->device->node_type)) {
849 kfree(id_priv->id.route.path_rec);
858 ret = cma_modify_qp_rtr(&id_priv->id);
862 ret = cma_modify_qp_rts(&id_priv->id);
872 cma_modify_qp_err(&id_priv->id);
880 if (id_priv->id.ps == RDMA_PS_SDP &&
922 else if (id_priv->id.qp && id_priv->id.ps != RDMA_PS_SDP) {
948 cma_modify_qp_err(&id_priv->id);
960 ret = id_priv->id.event_handler(&id_priv->id, &event);
966 rdma_destroy_id(&id_priv->id);
978 struct rdma_cm_id *id;
988 id = rdma_create_id(listen_id->event_handler, listen_id->context,
990 if (IS_ERR(id))
993 cma_save_net_info(&id->route.addr, &listen_id->route.addr,
996 rt = &id->route;
1012 id_priv = container_of(id, struct rdma_id_private, id);
1017 rdma_destroy_id(id);
1026 struct rdma_cm_id *id;
1032 id = rdma_create_id(listen_id->event_handler, listen_id->context,
1034 if (IS_ERR(id))
1042 cma_save_net_info(&id->route.addr, &listen_id->route.addr,
1045 ret = rdma_translate_ip(&id->route.addr.src_addr,
1046 &id->route.addr.dev_addr);
1050 id_priv = container_of(id, struct rdma_id_private, id);
1054 rdma_destroy_id(id);
1084 offset = cma_user_data_offset(listen_id->id.ps);
1086 if (cma_is_ud_ps(listen_id->id.ps)) {
1087 conn_id = cma_new_udp_id(&listen_id->id, ib_event);
1092 conn_id = cma_new_conn_id(&listen_id->id, ib_event);
1112 ret = conn_id->id.event_handler(&conn_id->id, &event);
1122 rdma_destroy_id(&conn_id->id);
1200 sin = (struct sockaddr_in *) &id_priv->id.route.addr.src_addr;
1202 sin = (struct sockaddr_in *) &id_priv->id.route.addr.dst_addr;
1230 ret = id_priv->id.event_handler(&id_priv->id, &event);
1236 rdma_destroy_id(&id_priv->id);
1258 /* Create a new RDMA id for the new IW CM ID */
1259 new_cm_id = rdma_create_id(listen_id->id.event_handler,
1260 listen_id->id.context,
1266 conn_id = container_of(new_cm_id, struct rdma_id_private, id);
1277 ret = rdma_copy_addr(&conn_id->id.route.addr.dev_addr, dev, NULL);
1306 ret = conn_id->id.event_handler(&conn_id->id, &event);
1312 rdma_destroy_id(&conn_id->id);
1329 id_priv->cm_id.ib = ib_create_cm_id(id_priv->id.device, cma_req_handler,
1334 addr = &id_priv->id.route.addr.src_addr;
1335 svc_id = cma_get_service_id(id_priv->id.ps, addr);
1339 cma_set_compare_data(id_priv->id.ps, addr, &compare_data);
1356 id_priv->cm_id.iw = iw_create_cm_id(id_priv->id.device,
1362 sin = (struct sockaddr_in *) &id_priv->id.route.addr.src_addr;
1375 static int cma_listen_handler(struct rdma_cm_id *id,
1378 struct rdma_id_private *id_priv = id->context;
1380 id->context = id_priv->id.context;
1381 id->event_handler = id_priv->id.event_handler;
1382 return id_priv->id.event_handler(id, event);
1389 struct rdma_cm_id *id;
1392 id = rdma_create_id(cma_listen_handler, id_priv, id_priv->id.ps);
1393 if (IS_ERR(id))
1396 dev_id_priv = container_of(id, struct rdma_id_private, id);
1399 memcpy(&id->route.addr.src_addr, &id_priv->id.route.addr.src_addr,
1400 ip_addr_size(&id_priv->id.route.addr.src_addr));
1405 ret = rdma_listen(id, id_priv->backlog);
1425 static int cma_bind_any(struct rdma_cm_id *id, sa_family_t af)
1431 return rdma_bind_addr(id, (struct sockaddr *) &addr_in);
1434 int rdma_listen(struct rdma_cm_id *id, int backlog)
1439 id_priv = container_of(id, struct rdma_id_private, id);
1441 ret = cma_bind_any(id, AF_INET);
1450 if (id->device) {
1451 switch (rdma_node_get_transport(id->device->node_type)) {
1483 route = &work->id->id.route;
1501 struct rdma_dev_addr *addr = &id_priv->id.route.addr.dev_addr;
1511 id_priv->query_id = ib_sa_path_rec_get(&sa_client, id_priv->id.device,
1512 id_priv->id.port_num, &path_rec,
1525 struct rdma_id_private *id_priv = work->id;
1532 if (id_priv->id.event_handler(&id_priv->id, &work->event)) {
1540 rdma_destroy_id(&id_priv->id);
1546 struct rdma_route *route = &id_priv->id.route;
1554 work->id = id_priv;
1579 int rdma_set_ib_paths(struct rdma_cm_id *id,
1585 id_priv = container_of(id, struct rdma_id_private, id);
1589 id->route.path_rec = kmalloc(sizeof *path_rec * num_paths, GFP_KERNEL);
1590 if (!id->route.path_rec) {
1595 memcpy(id->route.path_rec, path_rec, sizeof *path_rec * num_paths);
1611 work->id = id_priv;
1620 int rdma_resolve_route(struct rdma_cm_id *id, int timeout_ms)
1625 id_priv = container_of(id, struct rdma_id_private, id);
1630 switch (rdma_node_get_transport(id->device->node_type)) {
1684 ib_addr_set_sgid(&id_priv->id.route.addr.dev_addr, &gid);
1685 ib_addr_set_pkey(&id_priv->id.route.addr.dev_addr, pkey);
1686 id_priv->id.port_num = p;
1722 memcpy(&id_priv->id.route.addr.src_addr, src_addr,
1727 if (id_priv->id.event_handler(&id_priv->id, &event)) {
1731 rdma_destroy_id(&id_priv->id);
1756 ib_addr_get_sgid(&id_priv->id.route.addr.dev_addr, &gid);
1757 ib_addr_set_dgid(&id_priv->id.route.addr.dev_addr, &gid);
1759 if (cma_zero_addr(&id_priv->id.route.addr.src_addr)) {
1760 src_in = (struct sockaddr_in *)&id_priv->id.route.addr.src_addr;
1761 dst_in = (struct sockaddr_in *)&id_priv->id.route.addr.dst_addr;
1766 work->id = id_priv;
1778 static int cma_bind_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
1782 return rdma_bind_addr(id, src_addr);
1784 return cma_bind_any(id, dst_addr->sa_family);
1787 int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
1793 id_priv = container_of(id, struct rdma_id_private, id);
1795 ret = cma_bind_addr(id, src_addr, dst_addr);
1804 memcpy(&id->route.addr.dst_addr, dst_addr, ip_addr_size(dst_addr));
1808 ret = rdma_resolve_ip(&addr_client, &id->route.addr.src_addr,
1809 dst_addr, &id->route.addr.dev_addr,
1827 sin = (struct sockaddr_in *) &id_priv->id.route.addr.src_addr;
1917 sin = (struct sockaddr_in *) &id_priv->id.route.addr.src_addr;
1930 if (cma_any_addr(&id_priv->id.route.addr.src_addr))
1934 if (cma_any_addr(&cur_id->id.route.addr.src_addr))
1937 cur_sin = (struct sockaddr_in *) &cur_id->id.route.addr.src_addr;
1951 switch (id_priv->id.ps) {
1969 if (cma_any_port(&id_priv->id.route.addr.src_addr))
1978 int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr)
1986 id_priv = container_of(id, struct rdma_id_private, id);
1991 ret = rdma_translate_ip(addr, &id->route.addr.dev_addr);
2002 memcpy(&id->route.addr.src_addr, addr, ip_addr_size(addr));
2082 ib_init_ah_from_path(id_priv->id.device, id_priv->id.port_num,
2083 id_priv->id.route.path_rec,
2096 ret = id_priv->id.event_handler(&id_priv->id, &event);
2102 rdma_destroy_id(&id_priv->id);
2127 route = &id_priv->id.route;
2128 ret = cma_format_hdr((void *) req.private_data, id_priv->id.ps, route);
2132 id_priv->cm_id.ib = ib_create_cm_id(id_priv->id.device,
2140 req.service_id = cma_get_service_id(id_priv->id.ps,
2164 offset = cma_user_data_offset(id_priv->id.ps);
2174 id_priv->cm_id.ib = ib_create_cm_id(id_priv->id.device, cma_ib_handler,
2181 route = &id_priv->id.route;
2182 ret = cma_format_hdr(private_data, id_priv->id.ps, route);
2191 req.service_id = cma_get_service_id(id_priv->id.ps,
2225 cm_id = iw_create_cm_id(id_priv->id.device, cma_iw_handler, id_priv);
2233 sin = (struct sockaddr_in*) &id_priv->id.route.addr.src_addr;
2236 sin = (struct sockaddr_in*) &id_priv->id.route.addr.dst_addr;
2239 ret = cma_modify_qp_rtr(&id_priv->id);
2247 if (id_priv->id.qp)
2260 int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
2265 id_priv = container_of(id, struct rdma_id_private, id);
2269 if (!id->qp) {
2274 switch (rdma_node_get_transport(id->device->node_type)) {
2276 if (cma_is_ud_ps(id->ps))
2305 if (id_priv->id.qp) {
2306 ret = cma_modify_qp_rtr(&id_priv->id);
2317 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask);
2346 ret = cma_modify_qp_rtr(&id_priv->id);
2354 if (id_priv->id.qp) {
2380 int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
2385 id_priv = container_of(id, struct rdma_id_private, id);
2389 if (!id->qp && conn_param) {
2394 switch (rdma_node_get_transport(id->device->node_type)) {
2396 if (cma_is_ud_ps(id->ps))
2418 cma_modify_qp_err(id);
2419 rdma_reject(id, NULL, 0);
2424 int rdma_notify(struct rdma_cm_id *id, enum ib_event_type event)
2429 id_priv = container_of(id, struct rdma_id_private, id);
2433 switch (id->device->node_type) {
2445 int rdma_reject(struct rdma_cm_id *id, const void *private_data,
2451 id_priv = container_of(id, struct rdma_id_private, id);
2455 switch (rdma_node_get_transport(id->device->node_type)) {
2457 if (cma_is_ud_ps(id->ps))
2477 int rdma_disconnect(struct rdma_cm_id *id)
2482 id_priv = container_of(id, struct rdma_id_private, id);
2486 switch (rdma_node_get_transport(id->device->node_type)) {
2488 ret = cma_modify_qp_err(id);
2519 if (!status && id_priv->id.qp)
2520 status = ib_attach_mcast(id_priv->id.qp, &multicast->rec.mgid,
2528 ib_init_ah_from_mcmember(id_priv->id.device,
2529 id_priv->id.port_num, &multicast->rec,
2536 ret = id_priv->id.event_handler(&id_priv->id, &event);
2540 rdma_destroy_id(&id_priv->id);
2552 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
2565 if (id_priv->id.ps == RDMA_PS_UDP)
2577 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
2582 ret = ib_sa_get_mcmember_rec(id_priv->id.device, id_priv->id.port_num,
2588 if (id_priv->id.ps == RDMA_PS_UDP)
2600 mc->multicast.ib = ib_sa_join_multicast(&sa_client, id_priv->id.device,
2601 id_priv->id.port_num, &rec,
2610 int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr,
2617 id_priv = container_of(id, struct rdma_id_private, id);
2634 switch (rdma_node_get_transport(id->device->node_type)) {
2653 void rdma_leave_multicast(struct rdma_cm_id *id, struct sockaddr *addr)
2658 id_priv = container_of(id, struct rdma_id_private, id);
2665 if (id->qp)
2666 ib_detach_mcast(id->qp,
2720 return id_priv->id.event_handler(&id_priv->id, &event);
2745 rdma_destroy_id(&id_priv->id);