1/* 2 * Copyright (c) 2005 Voltaire Inc. All rights reserved. 3 * Copyright (c) 2002-2005, Network Appliance, Inc. All rights reserved. 4 * Copyright (c) 1999-2005, Mellanox Technologies, Inc. All rights reserved. 5 * Copyright (c) 2005-2006 Intel Corporation. All rights reserved. 6 * 7 * This software is available to you under a choice of one of two 8 * licenses. You may choose to be licensed under the terms of the GNU 9 * General Public License (GPL) Version 2, available from the file 10 * COPYING in the main directory of this source tree, or the 11 * OpenIB.org BSD license below: 12 * 13 * Redistribution and use in source and binary forms, with or 14 * without modification, are permitted provided that the following 15 * conditions are met: 16 * 17 * - Redistributions of source code must retain the above 18 * copyright notice, this list of conditions and the following 19 * disclaimer. 20 * 21 * - Redistributions in binary form must reproduce the above 22 * copyright notice, this list of conditions and the following 23 * disclaimer in the documentation and/or other materials 24 * provided with the distribution. 25 * 26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 33 * SOFTWARE. 34 */ 35 36#include <linux/completion.h> 37#include <linux/in.h> 38#include <linux/in6.h> 39#include <linux/mutex.h> 40#include <linux/random.h> 41#include <linux/idr.h> 42#include <linux/inetdevice.h> 43 44#include <net/tcp.h> 45#include <net/ipv6.h> 46 47#include <rdma/rdma_cm.h> 48#include <rdma/rdma_cm_ib.h> 49#include <rdma/ib_cache.h> 50#include <rdma/ib_cm.h> 51#include <rdma/ib_sa.h> 52#include <rdma/iw_cm.h> 53 54MODULE_AUTHOR("Sean Hefty"); 55MODULE_DESCRIPTION("Generic RDMA CM Agent"); 56MODULE_LICENSE("Dual BSD/GPL"); 57 58static int tavor_quirk = 0; 59module_param_named(tavor_quirk, tavor_quirk, int, 0644); 60MODULE_PARM_DESC(tavor_quirk, "Tavor performance quirk: limit MTU to 1K if > 0"); 61 62int unify_tcp_port_space = 1; 63module_param(unify_tcp_port_space, int, 0644); 64MODULE_PARM_DESC(unify_tcp_port_space, "Unify the host TCP and RDMA port " 65 "space allocation (default=1)"); 66 67#define CMA_CM_RESPONSE_TIMEOUT 20 68#define CMA_MAX_CM_RETRIES 15 69#define CMA_CM_MRA_SETTING (IB_CM_MRA_FLAG_DELAY | 24) 70#define IBOE_PACKET_LIFETIME 18 71 72static int cma_response_timeout = CMA_CM_RESPONSE_TIMEOUT; 73module_param_named(cma_response_timeout, cma_response_timeout, int, 0644); 74MODULE_PARM_DESC(cma_response_timeout, "CMA_CM_RESPONSE_TIMEOUT default=20"); 75 76static int def_prec2sl = 3; 77module_param_named(def_prec2sl, def_prec2sl, int, 0644); 78MODULE_PARM_DESC(def_prec2sl, "Default value for SL priority with RoCE. Valid values 0 - 7"); 79 80static void cma_add_one(struct ib_device *device); 81static void cma_remove_one(struct ib_device *device); 82 83static struct ib_client cma_client = { 84 .name = "cma", 85 .add = cma_add_one, 86 .remove = cma_remove_one 87}; 88 89static struct ib_sa_client sa_client; 90static struct rdma_addr_client addr_client; 91static LIST_HEAD(dev_list); 92static LIST_HEAD(listen_any_list); 93static DEFINE_MUTEX(lock); 94static struct workqueue_struct *cma_wq; 95static DEFINE_IDR(sdp_ps); 96static DEFINE_IDR(tcp_ps); 97static DEFINE_IDR(udp_ps); 98static DEFINE_IDR(ipoib_ps); 99#if defined(INET) 100static int next_port; 101#endif 102 103struct cma_device { 104 struct list_head list; 105 struct ib_device *device; 106 struct completion comp; 107 atomic_t refcount; 108 struct list_head id_list; 109}; 110 111enum cma_state { 112 CMA_IDLE, 113 CMA_ADDR_QUERY, 114 CMA_ADDR_RESOLVED, 115 CMA_ROUTE_QUERY, 116 CMA_ROUTE_RESOLVED, 117 CMA_CONNECT, 118 CMA_DISCONNECT, 119 CMA_ADDR_BOUND, 120 CMA_LISTEN, 121 CMA_DEVICE_REMOVAL, 122 CMA_DESTROYING 123}; 124 125struct rdma_bind_list { 126 struct idr *ps; 127 struct hlist_head owners; 128 unsigned short port; 129}; 130 131/* 132 * Device removal can occur at anytime, so we need extra handling to 133 * serialize notifying the user of device removal with other callbacks. 134 * We do this by disabling removal notification while a callback is in process, 135 * and reporting it after the callback completes. 136 */ 137struct rdma_id_private { 138 struct rdma_cm_id id; 139 140 struct rdma_bind_list *bind_list; 141 struct socket *sock; 142 struct hlist_node node; 143 struct list_head list; /* listen_any_list or cma_device.list */ 144 struct list_head listen_list; /* per device listens */ 145 struct cma_device *cma_dev; 146 struct list_head mc_list; 147 148 int internal_id; 149 enum cma_state state; 150 spinlock_t lock; 151 struct mutex qp_mutex; 152 153 struct completion comp; 154 atomic_t refcount; 155 struct mutex handler_mutex; 156 157 int backlog; 158 int timeout_ms; 159 struct ib_sa_query *query; 160 int query_id; 161 union { 162 struct ib_cm_id *ib; 163 struct iw_cm_id *iw; 164 } cm_id; 165 166 u32 seq_num; 167 u32 qkey; 168 u32 qp_num; 169 u8 srq; 170 u8 tos; 171}; 172 173struct cma_multicast { 174 struct rdma_id_private *id_priv; 175 union { 176 struct ib_sa_multicast *ib; 177 } multicast; 178 struct list_head list; 179 void *context; 180 struct sockaddr_storage addr; 181 struct kref mcref; 182}; 183 184struct cma_work { 185 struct work_struct work; 186 struct rdma_id_private *id; 187 enum cma_state old_state; 188 enum cma_state new_state; 189 struct rdma_cm_event event; 190}; 191 192struct cma_ndev_work { 193 struct work_struct work; 194 struct rdma_id_private *id; 195 struct rdma_cm_event event; 196}; 197 198struct iboe_mcast_work { 199 struct work_struct work; 200 struct rdma_id_private *id; 201 struct cma_multicast *mc; 202}; 203 204union cma_ip_addr { 205 struct in6_addr ip6; 206 struct { 207 __be32 pad[3]; 208 __be32 addr; 209 } ip4; 210}; 211 212struct cma_hdr { 213 u8 cma_version; 214 u8 ip_version; /* IP version: 7:4 */ 215 __be16 port; 216 union cma_ip_addr src_addr; 217 union cma_ip_addr dst_addr; 218}; 219 220struct sdp_hh { 221 u8 bsdh[16]; 222 u8 sdp_version; /* Major version: 7:4 */ 223 u8 ip_version; /* IP version: 7:4 */ 224 u8 sdp_specific1[10]; 225 __be16 port; 226 __be16 sdp_specific2; 227 union cma_ip_addr src_addr; 228 union cma_ip_addr dst_addr; 229}; 230 231struct sdp_hah { 232 u8 bsdh[16]; 233 u8 sdp_version; 234}; 235 236#define CMA_VERSION 0x00 237#define SDP_MAJ_VERSION 0x2 238 239static int cma_comp(struct rdma_id_private *id_priv, enum cma_state comp) 240{ 241 unsigned long flags; 242 int ret; 243 244 spin_lock_irqsave(&id_priv->lock, flags); 245 ret = (id_priv->state == comp); 246 spin_unlock_irqrestore(&id_priv->lock, flags); 247 return ret; 248} 249 250static int cma_comp_exch(struct rdma_id_private *id_priv, 251 enum cma_state comp, enum cma_state exch) 252{ 253 unsigned long flags; 254 int ret; 255 256 spin_lock_irqsave(&id_priv->lock, flags); 257 if ((ret = (id_priv->state == comp))) 258 id_priv->state = exch; 259 spin_unlock_irqrestore(&id_priv->lock, flags); 260 return ret; 261} 262 263static enum cma_state cma_exch(struct rdma_id_private *id_priv, 264 enum cma_state exch) 265{ 266 unsigned long flags; 267 enum cma_state old; 268 269 spin_lock_irqsave(&id_priv->lock, flags); 270 old = id_priv->state; 271 id_priv->state = exch; 272 spin_unlock_irqrestore(&id_priv->lock, flags); 273 return old; 274} 275 276static inline u8 cma_get_ip_ver(struct cma_hdr *hdr) 277{ 278 return hdr->ip_version >> 4; 279} 280 281static inline void cma_set_ip_ver(struct cma_hdr *hdr, u8 ip_ver) 282{ 283 hdr->ip_version = (ip_ver << 4) | (hdr->ip_version & 0xF); 284} 285 286static inline u8 sdp_get_majv(u8 sdp_version) 287{ 288 return sdp_version >> 4; 289} 290 291static inline u8 sdp_get_ip_ver(struct sdp_hh *hh) 292{ 293 return hh->ip_version >> 4; 294} 295 296static inline void sdp_set_ip_ver(struct sdp_hh *hh, u8 ip_ver) 297{ 298 hh->ip_version = (ip_ver << 4) | (hh->ip_version & 0xF); 299} 300 301static inline int cma_is_ud_ps(enum rdma_port_space ps) 302{ 303 return (ps == RDMA_PS_UDP || ps == RDMA_PS_IPOIB); 304} 305 306static void cma_attach_to_dev(struct rdma_id_private *id_priv, 307 struct cma_device *cma_dev) 308{ 309 atomic_inc(&cma_dev->refcount); 310 id_priv->cma_dev = cma_dev; 311 id_priv->id.device = cma_dev->device; 312 id_priv->id.route.addr.dev_addr.transport = 313 rdma_node_get_transport(cma_dev->device->node_type); 314 list_add_tail(&id_priv->list, &cma_dev->id_list); 315} 316 317static inline void cma_deref_dev(struct cma_device *cma_dev) 318{ 319 if (atomic_dec_and_test(&cma_dev->refcount)) 320 complete(&cma_dev->comp); 321} 322 323static inline void release_mc(struct kref *kref) 324{ 325 struct cma_multicast *mc = container_of(kref, struct cma_multicast, mcref); 326 327 kfree(mc->multicast.ib); 328 kfree(mc); 329} 330 331static void cma_detach_from_dev(struct rdma_id_private *id_priv) 332{ 333 list_del(&id_priv->list); 334 cma_deref_dev(id_priv->cma_dev); 335 id_priv->cma_dev = NULL; 336} 337 338static int cma_set_qkey(struct rdma_id_private *id_priv) 339{ 340 struct ib_sa_mcmember_rec rec; 341 int ret = 0; 342 343 if (id_priv->qkey) 344 return 0; 345 346 switch (id_priv->id.ps) { 347 case RDMA_PS_UDP: 348 id_priv->qkey = RDMA_UDP_QKEY; 349 break; 350 case RDMA_PS_IPOIB: 351 ib_addr_get_mgid(&id_priv->id.route.addr.dev_addr, &rec.mgid); 352 ret = ib_sa_get_mcmember_rec(id_priv->id.device, 353 id_priv->id.port_num, &rec.mgid, 354 &rec); 355 if (!ret) 356 id_priv->qkey = be32_to_cpu(rec.qkey); 357 break; 358 default: 359 break; 360 } 361 return ret; 362} 363 364static int cma_acquire_dev(struct rdma_id_private *id_priv) 365{ 366 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; 367 struct cma_device *cma_dev; 368 union ib_gid gid; 369 int ret = -ENODEV; 370 371 if (dev_addr->dev_type != ARPHRD_INFINIBAND) { 372 iboe_addr_get_sgid(dev_addr, &gid); 373 list_for_each_entry(cma_dev, &dev_list, list) { 374 ret = ib_find_cached_gid(cma_dev->device, &gid, 375 &id_priv->id.port_num, NULL); 376 if (!ret) 377 goto out; 378 } 379 } 380 381 memcpy(&gid, dev_addr->src_dev_addr + 382 rdma_addr_gid_offset(dev_addr), sizeof gid); 383 list_for_each_entry(cma_dev, &dev_list, list) { 384 ret = ib_find_cached_gid(cma_dev->device, &gid, 385 &id_priv->id.port_num, NULL); 386 if (!ret) 387 break; 388 } 389 390out: 391 if (!ret) 392 cma_attach_to_dev(id_priv, cma_dev); 393 394 return ret; 395} 396 397static void cma_deref_id(struct rdma_id_private *id_priv) 398{ 399 if (atomic_dec_and_test(&id_priv->refcount)) 400 complete(&id_priv->comp); 401} 402 403static int cma_disable_callback(struct rdma_id_private *id_priv, 404 enum cma_state state) 405{ 406 mutex_lock(&id_priv->handler_mutex); 407 if (id_priv->state != state) { 408 mutex_unlock(&id_priv->handler_mutex); 409 return -EINVAL; 410 } 411 return 0; 412} 413 414static int cma_has_cm_dev(struct rdma_id_private *id_priv) 415{ 416 return (id_priv->id.device && id_priv->cm_id.ib); 417} 418 419struct rdma_cm_id *rdma_create_id(rdma_cm_event_handler event_handler, 420 void *context, enum rdma_port_space ps) 421{ 422 struct rdma_id_private *id_priv; 423 424 id_priv = kzalloc(sizeof *id_priv, GFP_KERNEL); 425 if (!id_priv) 426 return ERR_PTR(-ENOMEM); 427 428 id_priv->state = CMA_IDLE; 429 id_priv->id.context = context; 430 id_priv->id.event_handler = event_handler; 431 id_priv->id.ps = ps; 432 spin_lock_init(&id_priv->lock); 433 mutex_init(&id_priv->qp_mutex); 434 init_completion(&id_priv->comp); 435 atomic_set(&id_priv->refcount, 1); 436 mutex_init(&id_priv->handler_mutex); 437 INIT_LIST_HEAD(&id_priv->listen_list); 438 INIT_LIST_HEAD(&id_priv->mc_list); 439 get_random_bytes(&id_priv->seq_num, sizeof id_priv->seq_num); 440 441 return &id_priv->id; 442} 443EXPORT_SYMBOL(rdma_create_id); 444 445static int cma_init_ud_qp(struct rdma_id_private *id_priv, struct ib_qp *qp) 446{ 447 struct ib_qp_attr qp_attr; 448 int qp_attr_mask, ret; 449 450 qp_attr.qp_state = IB_QPS_INIT; 451 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask); 452 if (ret) 453 return ret; 454 455 ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask); 456 if (ret) 457 return ret; 458 459 qp_attr.qp_state = IB_QPS_RTR; 460 ret = ib_modify_qp(qp, &qp_attr, IB_QP_STATE); 461 if (ret) 462 return ret; 463 464 qp_attr.qp_state = IB_QPS_RTS; 465 qp_attr.sq_psn = 0; 466 ret = ib_modify_qp(qp, &qp_attr, IB_QP_STATE | IB_QP_SQ_PSN); 467 468 return ret; 469} 470 471static int cma_init_conn_qp(struct rdma_id_private *id_priv, struct ib_qp *qp) 472{ 473 struct ib_qp_attr qp_attr; 474 int qp_attr_mask, ret; 475 476 qp_attr.qp_state = IB_QPS_INIT; 477 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask); 478 if (ret) 479 return ret; 480 481 return ib_modify_qp(qp, &qp_attr, qp_attr_mask); 482} 483 484int rdma_create_qp(struct rdma_cm_id *id, struct ib_pd *pd, 485 struct ib_qp_init_attr *qp_init_attr) 486{ 487 struct rdma_id_private *id_priv; 488 struct ib_qp *qp; 489 int ret; 490 491 id_priv = container_of(id, struct rdma_id_private, id); 492 if (id->device != pd->device) 493 return -EINVAL; 494 495 qp = ib_create_qp(pd, qp_init_attr); 496 if (IS_ERR(qp)) 497 return PTR_ERR(qp); 498 499 if (cma_is_ud_ps(id_priv->id.ps)) 500 ret = cma_init_ud_qp(id_priv, qp); 501 else 502 ret = cma_init_conn_qp(id_priv, qp); 503 if (ret) 504 goto err; 505 506 id->qp = qp; 507 id_priv->qp_num = qp->qp_num; 508 id_priv->srq = (qp->srq != NULL); 509 return 0; 510err: 511 ib_destroy_qp(qp); 512 return ret; 513} 514EXPORT_SYMBOL(rdma_create_qp); 515 516void rdma_destroy_qp(struct rdma_cm_id *id) 517{ 518 struct rdma_id_private *id_priv; 519 520 id_priv = container_of(id, struct rdma_id_private, id); 521 mutex_lock(&id_priv->qp_mutex); 522 ib_destroy_qp(id_priv->id.qp); 523 id_priv->id.qp = NULL; 524 mutex_unlock(&id_priv->qp_mutex); 525} 526EXPORT_SYMBOL(rdma_destroy_qp); 527 528static int cma_modify_qp_rtr(struct rdma_id_private *id_priv, 529 struct rdma_conn_param *conn_param) 530{ 531 struct ib_qp_attr qp_attr; 532 int qp_attr_mask, ret; 533 534 mutex_lock(&id_priv->qp_mutex); 535 if (!id_priv->id.qp) { 536 ret = 0; 537 goto out; 538 } 539 540 /* Need to update QP attributes from default values. */ 541 qp_attr.qp_state = IB_QPS_INIT; 542 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask); 543 if (ret) 544 goto out; 545 546 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask); 547 if (ret) 548 goto out; 549 550 qp_attr.qp_state = IB_QPS_RTR; 551 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask); 552 if (ret) 553 goto out; 554 555 if (conn_param) 556 qp_attr.max_dest_rd_atomic = conn_param->responder_resources; 557 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask); 558out: 559 mutex_unlock(&id_priv->qp_mutex); 560 return ret; 561} 562 563static int cma_modify_qp_rts(struct rdma_id_private *id_priv, 564 struct rdma_conn_param *conn_param) 565{ 566 struct ib_qp_attr qp_attr; 567 int qp_attr_mask, ret; 568 569 mutex_lock(&id_priv->qp_mutex); 570 if (!id_priv->id.qp) { 571 ret = 0; 572 goto out; 573 } 574 575 qp_attr.qp_state = IB_QPS_RTS; 576 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask); 577 if (ret) 578 goto out; 579 580 if (conn_param) 581 qp_attr.max_rd_atomic = conn_param->initiator_depth; 582 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask); 583out: 584 mutex_unlock(&id_priv->qp_mutex); 585 return ret; 586} 587 588static int cma_modify_qp_err(struct rdma_id_private *id_priv) 589{ 590 struct ib_qp_attr qp_attr; 591 int ret; 592 593 mutex_lock(&id_priv->qp_mutex); 594 if (!id_priv->id.qp) { 595 ret = 0; 596 goto out; 597 } 598 599 qp_attr.qp_state = IB_QPS_ERR; 600 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, IB_QP_STATE); 601out: 602 mutex_unlock(&id_priv->qp_mutex); 603 return ret; 604} 605 606static int cma_ib_init_qp_attr(struct rdma_id_private *id_priv, 607 struct ib_qp_attr *qp_attr, int *qp_attr_mask) 608{ 609 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; 610 int ret; 611 u16 pkey; 612 613 if (rdma_port_get_link_layer(id_priv->id.device, id_priv->id.port_num) == 614 IB_LINK_LAYER_INFINIBAND) 615 pkey = ib_addr_get_pkey(dev_addr); 616 else 617 pkey = 0xffff; 618 619 ret = ib_find_cached_pkey(id_priv->id.device, id_priv->id.port_num, 620 pkey, &qp_attr->pkey_index); 621 if (ret) 622 return ret; 623 624 qp_attr->port_num = id_priv->id.port_num; 625 *qp_attr_mask = IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT; 626 627 if (cma_is_ud_ps(id_priv->id.ps)) { 628 ret = cma_set_qkey(id_priv); 629 if (ret) 630 return ret; 631 632 qp_attr->qkey = id_priv->qkey; 633 *qp_attr_mask |= IB_QP_QKEY; 634 } else { 635 qp_attr->qp_access_flags = 0; 636 *qp_attr_mask |= IB_QP_ACCESS_FLAGS; 637 } 638 return 0; 639} 640 641int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr, 642 int *qp_attr_mask) 643{ 644 struct rdma_id_private *id_priv; 645 int ret = 0; 646 647 id_priv = container_of(id, struct rdma_id_private, id); 648 switch (rdma_node_get_transport(id_priv->id.device->node_type)) { 649 case RDMA_TRANSPORT_IB: 650 if (!id_priv->cm_id.ib || cma_is_ud_ps(id_priv->id.ps)) 651 ret = cma_ib_init_qp_attr(id_priv, qp_attr, qp_attr_mask); 652 else 653 ret = ib_cm_init_qp_attr(id_priv->cm_id.ib, qp_attr, 654 qp_attr_mask); 655 if (qp_attr->qp_state == IB_QPS_RTR) 656 qp_attr->rq_psn = id_priv->seq_num; 657 break; 658 case RDMA_TRANSPORT_IWARP: 659 if (!id_priv->cm_id.iw) { 660 qp_attr->qp_access_flags = 0; 661 *qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS; 662 } else 663 ret = iw_cm_init_qp_attr(id_priv->cm_id.iw, qp_attr, 664 qp_attr_mask); 665 break; 666 default: 667 ret = -ENOSYS; 668 break; 669 } 670 671 return ret; 672} 673EXPORT_SYMBOL(rdma_init_qp_attr); 674 675static inline int cma_zero_addr(struct sockaddr *addr) 676{ 677 struct in6_addr *ip6; 678 679 if (addr->sa_family == AF_INET) 680 return ipv4_is_zeronet( 681 ((struct sockaddr_in *)addr)->sin_addr.s_addr); 682 else { 683 ip6 = &((struct sockaddr_in6 *) addr)->sin6_addr; 684 return (ip6->s6_addr32[0] | ip6->s6_addr32[1] | 685 ip6->s6_addr32[2] | ip6->s6_addr32[3]) == 0; 686 } 687} 688 689static inline int cma_loopback_addr(struct sockaddr *addr) 690{ 691 if (addr->sa_family == AF_INET) 692 return ipv4_is_loopback( 693 ((struct sockaddr_in *) addr)->sin_addr.s_addr); 694 else 695 return ipv6_addr_loopback( 696 &((struct sockaddr_in6 *) addr)->sin6_addr); 697} 698 699static inline int cma_any_addr(struct sockaddr *addr) 700{ 701 return cma_zero_addr(addr) || cma_loopback_addr(addr); 702} 703 704static inline __be16 cma_port(struct sockaddr *addr) 705{ 706 if (addr->sa_family == AF_INET) 707 return ((struct sockaddr_in *) addr)->sin_port; 708 else 709 return ((struct sockaddr_in6 *) addr)->sin6_port; 710} 711 712static inline int cma_any_port(struct sockaddr *addr) 713{ 714 return !cma_port(addr); 715} 716 717static int cma_get_net_info(void *hdr, enum rdma_port_space ps, 718 u8 *ip_ver, __be16 *port, 719 union cma_ip_addr **src, union cma_ip_addr **dst) 720{ 721 switch (ps) { 722 case RDMA_PS_SDP: 723 if (sdp_get_majv(((struct sdp_hh *) hdr)->sdp_version) != 724 SDP_MAJ_VERSION) 725 return -EINVAL; 726 727 *ip_ver = sdp_get_ip_ver(hdr); 728 *port = ((struct sdp_hh *) hdr)->port; 729 *src = &((struct sdp_hh *) hdr)->src_addr; 730 *dst = &((struct sdp_hh *) hdr)->dst_addr; 731 break; 732 default: 733 if (((struct cma_hdr *) hdr)->cma_version != CMA_VERSION) 734 return -EINVAL; 735 736 *ip_ver = cma_get_ip_ver(hdr); 737 *port = ((struct cma_hdr *) hdr)->port; 738 *src = &((struct cma_hdr *) hdr)->src_addr; 739 *dst = &((struct cma_hdr *) hdr)->dst_addr; 740 break; 741 } 742 743 if (*ip_ver != 4 && *ip_ver != 6) 744 return -EINVAL; 745 return 0; 746} 747 748static void cma_save_net_info(struct rdma_addr *addr, 749 struct rdma_addr *listen_addr, 750 u8 ip_ver, __be16 port, 751 union cma_ip_addr *src, union cma_ip_addr *dst) 752{ 753 struct sockaddr_in *listen4, *ip4; 754 struct sockaddr_in6 *listen6, *ip6; 755 756 switch (ip_ver) { 757 case 4: 758 listen4 = (struct sockaddr_in *) &listen_addr->src_addr; 759 ip4 = (struct sockaddr_in *) &addr->src_addr; 760 ip4->sin_family = listen4->sin_family; 761 ip4->sin_addr.s_addr = dst->ip4.addr; 762 ip4->sin_port = listen4->sin_port; 763 764 ip4 = (struct sockaddr_in *) &addr->dst_addr; 765 ip4->sin_family = listen4->sin_family; 766 ip4->sin_addr.s_addr = src->ip4.addr; 767 ip4->sin_port = port; 768 break; 769 case 6: 770 listen6 = (struct sockaddr_in6 *) &listen_addr->src_addr; 771 ip6 = (struct sockaddr_in6 *) &addr->src_addr; 772 ip6->sin6_family = listen6->sin6_family; 773 ip6->sin6_addr = dst->ip6; 774 ip6->sin6_port = listen6->sin6_port; 775 776 ip6 = (struct sockaddr_in6 *) &addr->dst_addr; 777 ip6->sin6_family = listen6->sin6_family; 778 ip6->sin6_addr = src->ip6; 779 ip6->sin6_port = port; 780 break; 781 default: 782 break; 783 } 784} 785 786static inline int cma_user_data_offset(enum rdma_port_space ps) 787{ 788 switch (ps) { 789 case RDMA_PS_SDP: 790 return 0; 791 default: 792 return sizeof(struct cma_hdr); 793 } 794} 795 796static void cma_cancel_route(struct rdma_id_private *id_priv) 797{ 798 switch (rdma_port_get_link_layer(id_priv->id.device, id_priv->id.port_num)) { 799 case IB_LINK_LAYER_INFINIBAND: 800 if (id_priv->query) 801 ib_sa_cancel_query(id_priv->query_id, id_priv->query); 802 break; 803 default: 804 break; 805 } 806} 807 808static void cma_cancel_listens(struct rdma_id_private *id_priv) 809{ 810 struct rdma_id_private *dev_id_priv; 811 812 /* 813 * Remove from listen_any_list to prevent added devices from spawning 814 * additional listen requests. 815 */ 816 mutex_lock(&lock); 817 list_del(&id_priv->list); 818 819 while (!list_empty(&id_priv->listen_list)) { 820 dev_id_priv = list_entry(id_priv->listen_list.next, 821 struct rdma_id_private, listen_list); 822 /* sync with device removal to avoid duplicate destruction */ 823 list_del_init(&dev_id_priv->list); 824 list_del(&dev_id_priv->listen_list); 825 mutex_unlock(&lock); 826 827 rdma_destroy_id(&dev_id_priv->id); 828 mutex_lock(&lock); 829 } 830 mutex_unlock(&lock); 831} 832 833static void cma_cancel_operation(struct rdma_id_private *id_priv, 834 enum cma_state state) 835{ 836 switch (state) { 837 case CMA_ADDR_QUERY: 838 rdma_addr_cancel(&id_priv->id.route.addr.dev_addr); 839 break; 840 case CMA_ROUTE_QUERY: 841 cma_cancel_route(id_priv); 842 break; 843 case CMA_LISTEN: 844 if (cma_any_addr((struct sockaddr *) &id_priv->id.route.addr.src_addr) 845 && !id_priv->cma_dev) 846 cma_cancel_listens(id_priv); 847 break; 848 default: 849 break; 850 } 851} 852 853static void cma_release_port(struct rdma_id_private *id_priv) 854{ 855 struct rdma_bind_list *bind_list = id_priv->bind_list; 856 857 if (!bind_list) 858 return; 859 860 mutex_lock(&lock); 861 hlist_del(&id_priv->node); 862 if (hlist_empty(&bind_list->owners)) { 863 idr_remove(bind_list->ps, bind_list->port); 864 kfree(bind_list); 865 } 866 mutex_unlock(&lock); 867 if (id_priv->sock) 868 sock_release(id_priv->sock); 869} 870 871static void cma_leave_mc_groups(struct rdma_id_private *id_priv) 872{ 873 struct cma_multicast *mc; 874 875 while (!list_empty(&id_priv->mc_list)) { 876 mc = container_of(id_priv->mc_list.next, 877 struct cma_multicast, list); 878 list_del(&mc->list); 879 switch (rdma_port_get_link_layer(id_priv->cma_dev->device, id_priv->id.port_num)) { 880 case IB_LINK_LAYER_INFINIBAND: 881 ib_sa_free_multicast(mc->multicast.ib); 882 kfree(mc); 883 break; 884 case IB_LINK_LAYER_ETHERNET: 885 kref_put(&mc->mcref, release_mc); 886 break; 887 default: 888 break; 889 } 890 } 891} 892 893void rdma_destroy_id(struct rdma_cm_id *id) 894{ 895 struct rdma_id_private *id_priv; 896 enum cma_state state; 897 898 id_priv = container_of(id, struct rdma_id_private, id); 899 state = cma_exch(id_priv, CMA_DESTROYING); 900 cma_cancel_operation(id_priv, state); 901 902 mutex_lock(&lock); 903 if (id_priv->cma_dev) { 904 mutex_unlock(&lock); 905 switch (rdma_node_get_transport(id_priv->id.device->node_type)) { 906 case RDMA_TRANSPORT_IB: 907 if (id_priv->cm_id.ib && !IS_ERR(id_priv->cm_id.ib)) 908 ib_destroy_cm_id(id_priv->cm_id.ib); 909 break; 910 case RDMA_TRANSPORT_IWARP: 911 if (id_priv->cm_id.iw && !IS_ERR(id_priv->cm_id.iw)) 912 iw_destroy_cm_id(id_priv->cm_id.iw); 913 break; 914 default: 915 break; 916 } 917 cma_leave_mc_groups(id_priv); 918 mutex_lock(&lock); 919 cma_detach_from_dev(id_priv); 920 } 921 mutex_unlock(&lock); 922 923 cma_release_port(id_priv); 924 cma_deref_id(id_priv); 925 wait_for_completion(&id_priv->comp); 926 927 if (id_priv->internal_id) 928 cma_deref_id(id_priv->id.context); 929 930 kfree(id_priv->id.route.path_rec); 931 kfree(id_priv); 932} 933EXPORT_SYMBOL(rdma_destroy_id); 934 935static int cma_rep_recv(struct rdma_id_private *id_priv) 936{ 937 int ret; 938 939 ret = cma_modify_qp_rtr(id_priv, NULL); 940 if (ret) 941 goto reject; 942 943 ret = cma_modify_qp_rts(id_priv, NULL); 944 if (ret) 945 goto reject; 946 947 ret = ib_send_cm_rtu(id_priv->cm_id.ib, NULL, 0); 948 if (ret) 949 goto reject; 950 951 return 0; 952reject: 953 cma_modify_qp_err(id_priv); 954 ib_send_cm_rej(id_priv->cm_id.ib, IB_CM_REJ_CONSUMER_DEFINED, 955 NULL, 0, NULL, 0); 956 return ret; 957} 958 959static int cma_verify_rep(struct rdma_id_private *id_priv, void *data) 960{ 961 if (id_priv->id.ps == RDMA_PS_SDP && 962 sdp_get_majv(((struct sdp_hah *) data)->sdp_version) != 963 SDP_MAJ_VERSION) 964 return -EINVAL; 965 966 return 0; 967} 968 969static void cma_set_rep_event_data(struct rdma_cm_event *event, 970 struct ib_cm_rep_event_param *rep_data, 971 void *private_data) 972{ 973 event->param.conn.private_data = private_data; 974 event->param.conn.private_data_len = IB_CM_REP_PRIVATE_DATA_SIZE; 975 event->param.conn.responder_resources = rep_data->responder_resources; 976 event->param.conn.initiator_depth = rep_data->initiator_depth; 977 event->param.conn.flow_control = rep_data->flow_control; 978 event->param.conn.rnr_retry_count = rep_data->rnr_retry_count; 979 event->param.conn.srq = rep_data->srq; 980 event->param.conn.qp_num = rep_data->remote_qpn; 981} 982 983static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) 984{ 985 struct rdma_id_private *id_priv = cm_id->context; 986 struct rdma_cm_event event; 987 int ret = 0; 988 989 if ((ib_event->event != IB_CM_TIMEWAIT_EXIT && 990 cma_disable_callback(id_priv, CMA_CONNECT)) || 991 (ib_event->event == IB_CM_TIMEWAIT_EXIT && 992 cma_disable_callback(id_priv, CMA_DISCONNECT))) 993 return 0; 994 995 memset(&event, 0, sizeof event); 996 switch (ib_event->event) { 997 case IB_CM_REQ_ERROR: 998 case IB_CM_REP_ERROR: 999 event.event = RDMA_CM_EVENT_UNREACHABLE; 1000 event.status = -ETIMEDOUT; 1001 break; 1002 case IB_CM_REP_RECEIVED: 1003 event.status = cma_verify_rep(id_priv, ib_event->private_data); 1004 if (event.status) 1005 event.event = RDMA_CM_EVENT_CONNECT_ERROR; 1006 else if (id_priv->id.qp && id_priv->id.ps != RDMA_PS_SDP) { 1007 event.status = cma_rep_recv(id_priv); 1008 event.event = event.status ? RDMA_CM_EVENT_CONNECT_ERROR : 1009 RDMA_CM_EVENT_ESTABLISHED; 1010 } else 1011 event.event = RDMA_CM_EVENT_CONNECT_RESPONSE; 1012 cma_set_rep_event_data(&event, &ib_event->param.rep_rcvd, 1013 ib_event->private_data); 1014 break; 1015 case IB_CM_RTU_RECEIVED: 1016 case IB_CM_USER_ESTABLISHED: 1017 event.event = RDMA_CM_EVENT_ESTABLISHED; 1018 break; 1019 case IB_CM_DREQ_ERROR: 1020 event.status = -ETIMEDOUT; /* fall through */ 1021 case IB_CM_DREQ_RECEIVED: 1022 case IB_CM_DREP_RECEIVED: 1023 if (!cma_comp_exch(id_priv, CMA_CONNECT, CMA_DISCONNECT)) 1024 goto out; 1025 event.event = RDMA_CM_EVENT_DISCONNECTED; 1026 break; 1027 case IB_CM_TIMEWAIT_EXIT: 1028 event.event = RDMA_CM_EVENT_TIMEWAIT_EXIT; 1029 break; 1030 case IB_CM_MRA_RECEIVED: 1031 /* ignore event */ 1032 goto out; 1033 case IB_CM_REJ_RECEIVED: 1034 cma_modify_qp_err(id_priv); 1035 event.status = ib_event->param.rej_rcvd.reason; 1036 event.event = RDMA_CM_EVENT_REJECTED; 1037 event.param.conn.private_data = ib_event->private_data; 1038 event.param.conn.private_data_len = IB_CM_REJ_PRIVATE_DATA_SIZE; 1039 break; 1040 default: 1041 printk(KERN_ERR "RDMA CMA: unexpected IB CM event: %d\n", 1042 ib_event->event); 1043 goto out; 1044 } 1045 1046 ret = id_priv->id.event_handler(&id_priv->id, &event); 1047 if (ret) { 1048 /* Destroy the CM ID by returning a non-zero value. */ 1049 id_priv->cm_id.ib = NULL; 1050 cma_exch(id_priv, CMA_DESTROYING); 1051 mutex_unlock(&id_priv->handler_mutex); 1052 rdma_destroy_id(&id_priv->id); 1053 return ret; 1054 } 1055out: 1056 mutex_unlock(&id_priv->handler_mutex); 1057 return ret; 1058} 1059 1060static struct rdma_id_private *cma_new_conn_id(struct rdma_cm_id *listen_id, 1061 struct ib_cm_event *ib_event) 1062{ 1063 struct rdma_id_private *id_priv; 1064 struct rdma_cm_id *id; 1065 struct rdma_route *rt; 1066 union cma_ip_addr *src, *dst; 1067 __be16 port; 1068 u8 ip_ver; 1069 int ret; 1070 1071 if (cma_get_net_info(ib_event->private_data, listen_id->ps, 1072 &ip_ver, &port, &src, &dst)) 1073 goto err; 1074 1075 id = rdma_create_id(listen_id->event_handler, listen_id->context, 1076 listen_id->ps); 1077 if (IS_ERR(id)) 1078 goto err; 1079 1080 cma_save_net_info(&id->route.addr, &listen_id->route.addr, 1081 ip_ver, port, src, dst); 1082 1083 rt = &id->route; 1084 rt->num_paths = ib_event->param.req_rcvd.alternate_path ? 2 : 1; 1085 rt->path_rec = kmalloc(sizeof *rt->path_rec * rt->num_paths, 1086 GFP_KERNEL); 1087 if (!rt->path_rec) 1088 goto destroy_id; 1089 1090 rt->path_rec[0] = *ib_event->param.req_rcvd.primary_path; 1091 if (rt->num_paths == 2) 1092 rt->path_rec[1] = *ib_event->param.req_rcvd.alternate_path; 1093 1094 if (cma_any_addr((struct sockaddr *) &rt->addr.src_addr)) { 1095 rt->addr.dev_addr.dev_type = ARPHRD_INFINIBAND; 1096 rdma_addr_set_sgid(&rt->addr.dev_addr, &rt->path_rec[0].sgid); 1097 ib_addr_set_pkey(&rt->addr.dev_addr, rt->path_rec[0].pkey); 1098 } else { 1099 ret = rdma_translate_ip((struct sockaddr *) &rt->addr.src_addr, 1100 &rt->addr.dev_addr); 1101 if (ret) 1102 goto destroy_id; 1103 } 1104 rdma_addr_set_dgid(&rt->addr.dev_addr, &rt->path_rec[0].dgid); 1105 1106 id_priv = container_of(id, struct rdma_id_private, id); 1107 id_priv->state = CMA_CONNECT; 1108 return id_priv; 1109 1110destroy_id: 1111 rdma_destroy_id(id); 1112err: 1113 return NULL; 1114} 1115 1116static struct rdma_id_private *cma_new_udp_id(struct rdma_cm_id *listen_id, 1117 struct ib_cm_event *ib_event) 1118{ 1119 struct rdma_id_private *id_priv; 1120 struct rdma_cm_id *id; 1121 union cma_ip_addr *src, *dst; 1122 __be16 port; 1123 u8 ip_ver; 1124 int ret; 1125 1126 id = rdma_create_id(listen_id->event_handler, listen_id->context, 1127 listen_id->ps); 1128 if (IS_ERR(id)) 1129 return NULL; 1130 1131 1132 if (cma_get_net_info(ib_event->private_data, listen_id->ps, 1133 &ip_ver, &port, &src, &dst)) 1134 goto err; 1135 1136 cma_save_net_info(&id->route.addr, &listen_id->route.addr, 1137 ip_ver, port, src, dst); 1138 1139 if (!cma_any_addr((struct sockaddr *) &id->route.addr.src_addr)) { 1140 ret = rdma_translate_ip((struct sockaddr *) &id->route.addr.src_addr, 1141 &id->route.addr.dev_addr); 1142 if (ret) 1143 goto err; 1144 } 1145 1146 id_priv = container_of(id, struct rdma_id_private, id); 1147 id_priv->state = CMA_CONNECT; 1148 return id_priv; 1149err: 1150 rdma_destroy_id(id); 1151 return NULL; 1152} 1153 1154static void cma_set_req_event_data(struct rdma_cm_event *event, 1155 struct ib_cm_req_event_param *req_data, 1156 void *private_data, int offset) 1157{ 1158 event->param.conn.private_data = private_data + offset; 1159 event->param.conn.private_data_len = IB_CM_REQ_PRIVATE_DATA_SIZE - offset; 1160 event->param.conn.responder_resources = req_data->responder_resources; 1161 event->param.conn.initiator_depth = req_data->initiator_depth; 1162 event->param.conn.flow_control = req_data->flow_control; 1163 event->param.conn.retry_count = req_data->retry_count; 1164 event->param.conn.rnr_retry_count = req_data->rnr_retry_count; 1165 event->param.conn.srq = req_data->srq; 1166 event->param.conn.qp_num = req_data->remote_qpn; 1167} 1168 1169static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) 1170{ 1171 struct rdma_id_private *listen_id, *conn_id; 1172 struct rdma_cm_event event; 1173 int offset, ret; 1174 1175 listen_id = cm_id->context; 1176 if (cma_disable_callback(listen_id, CMA_LISTEN)) 1177 return -ECONNABORTED; 1178 1179 memset(&event, 0, sizeof event); 1180 offset = cma_user_data_offset(listen_id->id.ps); 1181 event.event = RDMA_CM_EVENT_CONNECT_REQUEST; 1182 if (cma_is_ud_ps(listen_id->id.ps)) { 1183 conn_id = cma_new_udp_id(&listen_id->id, ib_event); 1184 event.param.ud.private_data = ib_event->private_data + offset; 1185 event.param.ud.private_data_len = 1186 IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE - offset; 1187 } else { 1188 conn_id = cma_new_conn_id(&listen_id->id, ib_event); 1189 cma_set_req_event_data(&event, &ib_event->param.req_rcvd, 1190 ib_event->private_data, offset); 1191 } 1192 if (!conn_id) { 1193 ret = -ENOMEM; 1194 goto out; 1195 } 1196 1197 mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING); 1198 mutex_lock(&lock); 1199 ret = cma_acquire_dev(conn_id); 1200 mutex_unlock(&lock); 1201 if (ret) 1202 goto release_conn_id; 1203 1204 conn_id->cm_id.ib = cm_id; 1205 cm_id->context = conn_id; 1206 cm_id->cm_handler = cma_ib_handler; 1207 1208 ret = conn_id->id.event_handler(&conn_id->id, &event); 1209 if (!ret) { 1210 /* 1211 * Acquire mutex to prevent user executing rdma_destroy_id() 1212 * while we're accessing the cm_id. 1213 */ 1214 mutex_lock(&lock); 1215 if (cma_comp(conn_id, CMA_CONNECT) && 1216 !cma_is_ud_ps(conn_id->id.ps)) 1217 ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0); 1218 mutex_unlock(&lock); 1219 mutex_unlock(&conn_id->handler_mutex); 1220 goto out; 1221 } 1222 1223 /* Destroy the CM ID by returning a non-zero value. */ 1224 conn_id->cm_id.ib = NULL; 1225 1226release_conn_id: 1227 cma_exch(conn_id, CMA_DESTROYING); 1228 mutex_unlock(&conn_id->handler_mutex); 1229 rdma_destroy_id(&conn_id->id); 1230 1231out: 1232 mutex_unlock(&listen_id->handler_mutex); 1233 return ret; 1234} 1235 1236static __be64 cma_get_service_id(enum rdma_port_space ps, struct sockaddr *addr) 1237{ 1238 return cpu_to_be64(((u64)ps << 16) + be16_to_cpu(cma_port(addr))); 1239} 1240 1241static void cma_set_compare_data(enum rdma_port_space ps, struct sockaddr *addr, 1242 struct ib_cm_compare_data *compare) 1243{ 1244 struct cma_hdr *cma_data, *cma_mask; 1245 struct sdp_hh *sdp_data, *sdp_mask; 1246 __be32 ip4_addr; 1247#ifdef INET6 1248 struct in6_addr ip6_addr; 1249#endif 1250 1251 memset(compare, 0, sizeof *compare); 1252 cma_data = (void *) compare->data; 1253 cma_mask = (void *) compare->mask; 1254 sdp_data = (void *) compare->data; 1255 sdp_mask = (void *) compare->mask; 1256 1257 switch (addr->sa_family) { 1258 case AF_INET: 1259 ip4_addr = ((struct sockaddr_in *) addr)->sin_addr.s_addr; 1260 if (ps == RDMA_PS_SDP) { 1261 sdp_set_ip_ver(sdp_data, 4); 1262 sdp_set_ip_ver(sdp_mask, 0xF); 1263 sdp_data->dst_addr.ip4.addr = ip4_addr; 1264 sdp_mask->dst_addr.ip4.addr = htonl(~0); 1265 } else { 1266 cma_set_ip_ver(cma_data, 4); 1267 cma_set_ip_ver(cma_mask, 0xF); 1268 cma_data->dst_addr.ip4.addr = ip4_addr; 1269 cma_mask->dst_addr.ip4.addr = htonl(~0); 1270 } 1271 break; 1272#ifdef INET6 1273 case AF_INET6: 1274 ip6_addr = ((struct sockaddr_in6 *) addr)->sin6_addr; 1275 if (ps == RDMA_PS_SDP) { 1276 sdp_set_ip_ver(sdp_data, 6); 1277 sdp_set_ip_ver(sdp_mask, 0xF); 1278 sdp_data->dst_addr.ip6 = ip6_addr; 1279 memset(&sdp_mask->dst_addr.ip6, 0xFF, 1280 sizeof sdp_mask->dst_addr.ip6); 1281 } else { 1282 cma_set_ip_ver(cma_data, 6); 1283 cma_set_ip_ver(cma_mask, 0xF); 1284 cma_data->dst_addr.ip6 = ip6_addr; 1285 memset(&cma_mask->dst_addr.ip6, 0xFF, 1286 sizeof cma_mask->dst_addr.ip6); 1287 } 1288 break; 1289#endif 1290 default: 1291 break; 1292 } 1293} 1294 1295static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event) 1296{ 1297 struct rdma_id_private *id_priv = iw_id->context; 1298 struct rdma_cm_event event; 1299 struct sockaddr_in *sin; 1300 int ret = 0; 1301 1302 if (cma_disable_callback(id_priv, CMA_CONNECT)) 1303 return 0; 1304 1305 memset(&event, 0, sizeof event); 1306 switch (iw_event->event) { 1307 case IW_CM_EVENT_CLOSE: 1308 event.event = RDMA_CM_EVENT_DISCONNECTED; 1309 break; 1310 case IW_CM_EVENT_CONNECT_REPLY: 1311 sin = (struct sockaddr_in *) &id_priv->id.route.addr.src_addr; 1312 *sin = iw_event->local_addr; 1313 sin = (struct sockaddr_in *) &id_priv->id.route.addr.dst_addr; 1314 *sin = iw_event->remote_addr; 1315 switch ((int)iw_event->status) { 1316 case 0: 1317 event.event = RDMA_CM_EVENT_ESTABLISHED; 1318 break; 1319 case -ECONNRESET: 1320 case -ECONNREFUSED: 1321 event.event = RDMA_CM_EVENT_REJECTED; 1322 break; 1323 case -ETIMEDOUT: 1324 event.event = RDMA_CM_EVENT_UNREACHABLE; 1325 break; 1326 default: 1327 event.event = RDMA_CM_EVENT_CONNECT_ERROR; 1328 break; 1329 } 1330 break; 1331 case IW_CM_EVENT_ESTABLISHED: 1332 event.event = RDMA_CM_EVENT_ESTABLISHED; 1333 break; 1334 default: 1335 BUG_ON(1); 1336 } 1337 1338 event.status = iw_event->status; 1339 event.param.conn.private_data = iw_event->private_data; 1340 event.param.conn.private_data_len = iw_event->private_data_len; 1341 ret = id_priv->id.event_handler(&id_priv->id, &event); 1342 if (ret) { 1343 /* Destroy the CM ID by returning a non-zero value. */ 1344 id_priv->cm_id.iw = NULL; 1345 cma_exch(id_priv, CMA_DESTROYING); 1346 mutex_unlock(&id_priv->handler_mutex); 1347 rdma_destroy_id(&id_priv->id); 1348 return ret; 1349 } 1350 1351 mutex_unlock(&id_priv->handler_mutex); 1352 return ret; 1353} 1354 1355static int iw_conn_req_handler(struct iw_cm_id *cm_id, 1356 struct iw_cm_event *iw_event) 1357{ 1358 struct rdma_cm_id *new_cm_id; 1359 struct rdma_id_private *listen_id, *conn_id; 1360 struct sockaddr_in *sin; 1361 struct net_device *dev = NULL; 1362 struct rdma_cm_event event; 1363 int ret; 1364 struct ib_device_attr attr; 1365 1366 listen_id = cm_id->context; 1367 if (cma_disable_callback(listen_id, CMA_LISTEN)) 1368 return -ECONNABORTED; 1369 1370 /* Create a new RDMA id for the new IW CM ID */ 1371 new_cm_id = rdma_create_id(listen_id->id.event_handler, 1372 listen_id->id.context, 1373 RDMA_PS_TCP); 1374 if (IS_ERR(new_cm_id)) { 1375 ret = -ENOMEM; 1376 goto out; 1377 } 1378 conn_id = container_of(new_cm_id, struct rdma_id_private, id); 1379 mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING); 1380 conn_id->state = CMA_CONNECT; 1381 1382 dev = ip_dev_find(NULL, iw_event->local_addr.sin_addr.s_addr); 1383 if (!dev) { 1384 ret = -EADDRNOTAVAIL; 1385 mutex_unlock(&conn_id->handler_mutex); 1386 rdma_destroy_id(new_cm_id); 1387 goto out; 1388 } 1389 ret = rdma_copy_addr(&conn_id->id.route.addr.dev_addr, dev, NULL); 1390 if (ret) { 1391 mutex_unlock(&conn_id->handler_mutex); 1392 rdma_destroy_id(new_cm_id); 1393 goto out; 1394 } 1395 1396 mutex_lock(&lock); 1397 ret = cma_acquire_dev(conn_id); 1398 mutex_unlock(&lock); 1399 if (ret) { 1400 mutex_unlock(&conn_id->handler_mutex); 1401 rdma_destroy_id(new_cm_id); 1402 goto out; 1403 } 1404 1405 conn_id->cm_id.iw = cm_id; 1406 cm_id->context = conn_id; 1407 cm_id->cm_handler = cma_iw_handler; 1408 1409 sin = (struct sockaddr_in *) &new_cm_id->route.addr.src_addr; 1410 *sin = iw_event->local_addr; 1411 sin = (struct sockaddr_in *) &new_cm_id->route.addr.dst_addr; 1412 *sin = iw_event->remote_addr; 1413 1414 ret = ib_query_device(conn_id->id.device, &attr); 1415 if (ret) { 1416 mutex_unlock(&conn_id->handler_mutex); 1417 rdma_destroy_id(new_cm_id); 1418 goto out; 1419 } 1420 1421 memset(&event, 0, sizeof event); 1422 event.event = RDMA_CM_EVENT_CONNECT_REQUEST; 1423 event.param.conn.private_data = iw_event->private_data; 1424 event.param.conn.private_data_len = iw_event->private_data_len; 1425 event.param.conn.initiator_depth = attr.max_qp_init_rd_atom; 1426 event.param.conn.responder_resources = attr.max_qp_rd_atom; 1427 ret = conn_id->id.event_handler(&conn_id->id, &event); 1428 if (ret) { 1429 /* User wants to destroy the CM ID */ 1430 conn_id->cm_id.iw = NULL; 1431 cma_exch(conn_id, CMA_DESTROYING); 1432 mutex_unlock(&conn_id->handler_mutex); 1433 rdma_destroy_id(&conn_id->id); 1434 goto out; 1435 } 1436 1437 mutex_unlock(&conn_id->handler_mutex); 1438 1439out: 1440 if (dev) 1441 dev_put(dev); 1442 mutex_unlock(&listen_id->handler_mutex); 1443 return ret; 1444} 1445 1446static int cma_ib_listen(struct rdma_id_private *id_priv) 1447{ 1448 struct ib_cm_compare_data compare_data; 1449 struct sockaddr *addr; 1450 __be64 svc_id; 1451 int ret; 1452 1453 id_priv->cm_id.ib = ib_create_cm_id(id_priv->id.device, cma_req_handler, 1454 id_priv); 1455 if (IS_ERR(id_priv->cm_id.ib)) 1456 return PTR_ERR(id_priv->cm_id.ib); 1457 1458 addr = (struct sockaddr *) &id_priv->id.route.addr.src_addr; 1459 svc_id = cma_get_service_id(id_priv->id.ps, addr); 1460 if (cma_any_addr(addr)) 1461 ret = ib_cm_listen(id_priv->cm_id.ib, svc_id, 0, NULL); 1462 else { 1463 cma_set_compare_data(id_priv->id.ps, addr, &compare_data); 1464 ret = ib_cm_listen(id_priv->cm_id.ib, svc_id, 0, &compare_data); 1465 } 1466 1467 if (ret) { 1468 ib_destroy_cm_id(id_priv->cm_id.ib); 1469 id_priv->cm_id.ib = NULL; 1470 } 1471 1472 return ret; 1473} 1474 1475static int cma_iw_listen(struct rdma_id_private *id_priv, int backlog) 1476{ 1477 int ret; 1478 struct sockaddr_in *sin; 1479 1480 id_priv->cm_id.iw = iw_create_cm_id(id_priv->id.device, 1481 id_priv->sock, 1482 iw_conn_req_handler, 1483 id_priv); 1484 if (IS_ERR(id_priv->cm_id.iw)) 1485 return PTR_ERR(id_priv->cm_id.iw); 1486 1487 sin = (struct sockaddr_in *) &id_priv->id.route.addr.src_addr; 1488 id_priv->cm_id.iw->local_addr = *sin; 1489 1490 ret = iw_cm_listen(id_priv->cm_id.iw, backlog); 1491 1492 if (ret) { 1493 iw_destroy_cm_id(id_priv->cm_id.iw); 1494 id_priv->cm_id.iw = NULL; 1495 } 1496 1497 return ret; 1498} 1499 1500static int cma_listen_handler(struct rdma_cm_id *id, 1501 struct rdma_cm_event *event) 1502{ 1503 struct rdma_id_private *id_priv = id->context; 1504 1505 id->context = id_priv->id.context; 1506 id->event_handler = id_priv->id.event_handler; 1507 return id_priv->id.event_handler(id, event); 1508} 1509 1510static void cma_listen_on_dev(struct rdma_id_private *id_priv, 1511 struct cma_device *cma_dev) 1512{ 1513 struct rdma_id_private *dev_id_priv; 1514 struct rdma_cm_id *id; 1515 int ret; 1516 1517 id = rdma_create_id(cma_listen_handler, id_priv, id_priv->id.ps); 1518 if (IS_ERR(id)) 1519 return; 1520 1521 dev_id_priv = container_of(id, struct rdma_id_private, id); 1522 1523 dev_id_priv->state = CMA_ADDR_BOUND; 1524 memcpy(&id->route.addr.src_addr, &id_priv->id.route.addr.src_addr, 1525 ip_addr_size((struct sockaddr *) &id_priv->id.route.addr.src_addr)); 1526 1527 cma_attach_to_dev(dev_id_priv, cma_dev); 1528 list_add_tail(&dev_id_priv->listen_list, &id_priv->listen_list); 1529 atomic_inc(&id_priv->refcount); 1530 dev_id_priv->internal_id = 1; 1531 1532 ret = rdma_listen(id, id_priv->backlog); 1533 if (ret) 1534 printk(KERN_WARNING "RDMA CMA: cma_listen_on_dev, error %d, " 1535 "listening on device %s\n", ret, cma_dev->device->name); 1536} 1537 1538static void cma_listen_on_all(struct rdma_id_private *id_priv) 1539{ 1540 struct cma_device *cma_dev; 1541 1542 mutex_lock(&lock); 1543 list_add_tail(&id_priv->list, &listen_any_list); 1544 list_for_each_entry(cma_dev, &dev_list, list) 1545 cma_listen_on_dev(id_priv, cma_dev); 1546 mutex_unlock(&lock); 1547} 1548 1549int rdma_listen(struct rdma_cm_id *id, int backlog) 1550{ 1551 struct rdma_id_private *id_priv; 1552 int ret; 1553 1554 id_priv = container_of(id, struct rdma_id_private, id); 1555 if (id_priv->state == CMA_IDLE) { 1556 ((struct sockaddr *) &id->route.addr.src_addr)->sa_family = AF_INET; 1557 ret = rdma_bind_addr(id, (struct sockaddr *) &id->route.addr.src_addr); 1558 if (ret) 1559 return ret; 1560 } 1561 1562 if (!cma_comp_exch(id_priv, CMA_ADDR_BOUND, CMA_LISTEN)) 1563 return -EINVAL; 1564 1565 id_priv->backlog = backlog; 1566 if (id->device) { 1567 switch (rdma_node_get_transport(id->device->node_type)) { 1568 case RDMA_TRANSPORT_IB: 1569 ret = cma_ib_listen(id_priv); 1570 if (ret) 1571 goto err; 1572 break; 1573 case RDMA_TRANSPORT_IWARP: 1574 ret = cma_iw_listen(id_priv, backlog); 1575 if (ret) 1576 goto err; 1577 break; 1578 default: 1579 ret = -ENOSYS; 1580 goto err; 1581 } 1582 } else 1583 cma_listen_on_all(id_priv); 1584 1585 return 0; 1586err: 1587 id_priv->backlog = 0; 1588 cma_comp_exch(id_priv, CMA_LISTEN, CMA_ADDR_BOUND); 1589 return ret; 1590} 1591EXPORT_SYMBOL(rdma_listen); 1592 1593void rdma_set_service_type(struct rdma_cm_id *id, int tos) 1594{ 1595 struct rdma_id_private *id_priv; 1596 1597 id_priv = container_of(id, struct rdma_id_private, id); 1598 id_priv->tos = (u8) tos; 1599} 1600EXPORT_SYMBOL(rdma_set_service_type); 1601 1602static void cma_query_handler(int status, struct ib_sa_path_rec *path_rec, 1603 void *context) 1604{ 1605 struct cma_work *work = context; 1606 struct rdma_route *route; 1607 1608 route = &work->id->id.route; 1609 1610 if (!status) { 1611 route->num_paths = 1; 1612 *route->path_rec = *path_rec; 1613 } else { 1614 work->old_state = CMA_ROUTE_QUERY; 1615 work->new_state = CMA_ADDR_RESOLVED; 1616 work->event.event = RDMA_CM_EVENT_ROUTE_ERROR; 1617 work->event.status = status; 1618 } 1619 1620 queue_work(cma_wq, &work->work); 1621} 1622 1623static int cma_query_ib_route(struct rdma_id_private *id_priv, int timeout_ms, 1624 struct cma_work *work) 1625{ 1626 struct rdma_addr *addr = &id_priv->id.route.addr; 1627 struct ib_sa_path_rec path_rec; 1628 ib_sa_comp_mask comp_mask; 1629 struct sockaddr_in6 *sin6; 1630 1631 memset(&path_rec, 0, sizeof path_rec); 1632 rdma_addr_get_sgid(&addr->dev_addr, &path_rec.sgid); 1633 rdma_addr_get_dgid(&addr->dev_addr, &path_rec.dgid); 1634 path_rec.pkey = cpu_to_be16(ib_addr_get_pkey(&addr->dev_addr)); 1635 path_rec.numb_path = 1; 1636 path_rec.reversible = 1; 1637 path_rec.service_id = cma_get_service_id(id_priv->id.ps, 1638 (struct sockaddr *) &addr->dst_addr); 1639 1640 comp_mask = IB_SA_PATH_REC_DGID | IB_SA_PATH_REC_SGID | 1641 IB_SA_PATH_REC_PKEY | IB_SA_PATH_REC_NUMB_PATH | 1642 IB_SA_PATH_REC_REVERSIBLE | IB_SA_PATH_REC_SERVICE_ID; 1643 1644 if (addr->src_addr.ss_family == AF_INET) { 1645 path_rec.qos_class = cpu_to_be16((u16) id_priv->tos); 1646 comp_mask |= IB_SA_PATH_REC_QOS_CLASS; 1647 } else { 1648 sin6 = (struct sockaddr_in6 *) &addr->src_addr; 1649 path_rec.traffic_class = (u8) (be32_to_cpu(sin6->sin6_flowinfo) >> 20); 1650 comp_mask |= IB_SA_PATH_REC_TRAFFIC_CLASS; 1651 } 1652 1653 if (tavor_quirk) { 1654 path_rec.mtu_selector = IB_SA_LT; 1655 path_rec.mtu = IB_MTU_2048; 1656 } 1657 1658 id_priv->query_id = ib_sa_path_rec_get(&sa_client, id_priv->id.device, 1659 id_priv->id.port_num, &path_rec, 1660 comp_mask, timeout_ms, 1661 GFP_KERNEL, cma_query_handler, 1662 work, &id_priv->query); 1663 1664 return (id_priv->query_id < 0) ? id_priv->query_id : 0; 1665} 1666 1667static void cma_work_handler(struct work_struct *_work) 1668{ 1669 struct cma_work *work = container_of(_work, struct cma_work, work); 1670 struct rdma_id_private *id_priv = work->id; 1671 int destroy = 0; 1672 1673 mutex_lock(&id_priv->handler_mutex); 1674 if (!cma_comp_exch(id_priv, work->old_state, work->new_state)) 1675 goto out; 1676 1677 if (id_priv->id.event_handler(&id_priv->id, &work->event)) { 1678 cma_exch(id_priv, CMA_DESTROYING); 1679 destroy = 1; 1680 } 1681out: 1682 mutex_unlock(&id_priv->handler_mutex); 1683 cma_deref_id(id_priv); 1684 if (destroy) 1685 rdma_destroy_id(&id_priv->id); 1686 kfree(work); 1687} 1688 1689static void cma_ndev_work_handler(struct work_struct *_work) 1690{ 1691 struct cma_ndev_work *work = container_of(_work, struct cma_ndev_work, work); 1692 struct rdma_id_private *id_priv = work->id; 1693 int destroy = 0; 1694 1695 mutex_lock(&id_priv->handler_mutex); 1696 if (id_priv->state == CMA_DESTROYING || 1697 id_priv->state == CMA_DEVICE_REMOVAL) 1698 goto out; 1699 1700 if (id_priv->id.event_handler(&id_priv->id, &work->event)) { 1701 cma_exch(id_priv, CMA_DESTROYING); 1702 destroy = 1; 1703 } 1704 1705out: 1706 mutex_unlock(&id_priv->handler_mutex); 1707 cma_deref_id(id_priv); 1708 if (destroy) 1709 rdma_destroy_id(&id_priv->id); 1710 kfree(work); 1711} 1712 1713static int cma_resolve_ib_route(struct rdma_id_private *id_priv, int timeout_ms) 1714{ 1715 struct rdma_route *route = &id_priv->id.route; 1716 struct cma_work *work; 1717 int ret; 1718 1719 work = kzalloc(sizeof *work, GFP_KERNEL); 1720 if (!work) 1721 return -ENOMEM; 1722 1723 work->id = id_priv; 1724 INIT_WORK(&work->work, cma_work_handler); 1725 work->old_state = CMA_ROUTE_QUERY; 1726 work->new_state = CMA_ROUTE_RESOLVED; 1727 work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED; 1728 1729 route->path_rec = kmalloc(sizeof *route->path_rec, GFP_KERNEL); 1730 if (!route->path_rec) { 1731 ret = -ENOMEM; 1732 goto err1; 1733 } 1734 1735 ret = cma_query_ib_route(id_priv, timeout_ms, work); 1736 if (ret) 1737 goto err2; 1738 1739 return 0; 1740err2: 1741 kfree(route->path_rec); 1742 route->path_rec = NULL; 1743err1: 1744 kfree(work); 1745 return ret; 1746} 1747 1748int rdma_set_ib_paths(struct rdma_cm_id *id, 1749 struct ib_sa_path_rec *path_rec, int num_paths) 1750{ 1751 struct rdma_id_private *id_priv; 1752 int ret; 1753 1754 id_priv = container_of(id, struct rdma_id_private, id); 1755 if (!cma_comp_exch(id_priv, CMA_ADDR_RESOLVED, CMA_ROUTE_RESOLVED)) 1756 return -EINVAL; 1757 1758 id->route.path_rec = kmalloc(sizeof *path_rec * num_paths, GFP_KERNEL); 1759 if (!id->route.path_rec) { 1760 ret = -ENOMEM; 1761 goto err; 1762 } 1763 1764 memcpy(id->route.path_rec, path_rec, sizeof *path_rec * num_paths); 1765 return 0; 1766err: 1767 cma_comp_exch(id_priv, CMA_ROUTE_RESOLVED, CMA_ADDR_RESOLVED); 1768 return ret; 1769} 1770EXPORT_SYMBOL(rdma_set_ib_paths); 1771 1772static int cma_resolve_iw_route(struct rdma_id_private *id_priv, int timeout_ms) 1773{ 1774 struct cma_work *work; 1775 1776 work = kzalloc(sizeof *work, GFP_KERNEL); 1777 if (!work) 1778 return -ENOMEM; 1779 1780 work->id = id_priv; 1781 INIT_WORK(&work->work, cma_work_handler); 1782 work->old_state = CMA_ROUTE_QUERY; 1783 work->new_state = CMA_ROUTE_RESOLVED; 1784 work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED; 1785 queue_work(cma_wq, &work->work); 1786 return 0; 1787} 1788 1789static u8 tos_to_sl(u8 tos) 1790{ 1791 return def_prec2sl & 7; 1792} 1793 1794static int cma_resolve_iboe_route(struct rdma_id_private *id_priv) 1795{ 1796 struct rdma_route *route = &id_priv->id.route; 1797 struct rdma_addr *addr = &route->addr; 1798 struct cma_work *work; 1799 int ret; 1800 struct sockaddr_in *src_addr = (struct sockaddr_in *)&route->addr.src_addr; 1801 struct sockaddr_in *dst_addr = (struct sockaddr_in *)&route->addr.dst_addr; 1802 struct net_device *ndev = NULL; 1803 u16 vid; 1804 1805 if (src_addr->sin_family != dst_addr->sin_family) 1806 return -EINVAL; 1807 1808 work = kzalloc(sizeof *work, GFP_KERNEL); 1809 if (!work) 1810 return -ENOMEM; 1811 1812 work->id = id_priv; 1813 INIT_WORK(&work->work, cma_work_handler); 1814 1815 route->path_rec = kzalloc(sizeof *route->path_rec, GFP_KERNEL); 1816 if (!route->path_rec) { 1817 ret = -ENOMEM; 1818 goto err1; 1819 } 1820 1821 route->num_paths = 1; 1822 1823 if (addr->dev_addr.bound_dev_if) 1824 ndev = dev_get_by_index(&init_net, addr->dev_addr.bound_dev_if); 1825 if (!ndev) { 1826 ret = -ENODEV; 1827 goto err2; 1828 } 1829 1830 vid = rdma_vlan_dev_vlan_id(ndev); 1831 1832 iboe_mac_vlan_to_ll(&route->path_rec->sgid, addr->dev_addr.src_dev_addr, vid); 1833 iboe_mac_vlan_to_ll(&route->path_rec->dgid, addr->dev_addr.dst_dev_addr, vid); 1834 1835 route->path_rec->hop_limit = 1; 1836 route->path_rec->reversible = 1; 1837 route->path_rec->pkey = cpu_to_be16(0xffff); 1838 route->path_rec->mtu_selector = IB_SA_EQ; 1839 route->path_rec->sl = tos_to_sl(id_priv->tos); 1840 1841#ifdef __linux__ 1842 route->path_rec->mtu = iboe_get_mtu(ndev->mtu); 1843#else 1844 route->path_rec->mtu = iboe_get_mtu(ndev->if_mtu); 1845#endif 1846 route->path_rec->rate_selector = IB_SA_EQ; 1847 route->path_rec->rate = iboe_get_rate(ndev); 1848 dev_put(ndev); 1849 route->path_rec->packet_life_time_selector = IB_SA_EQ; 1850 route->path_rec->packet_life_time = IBOE_PACKET_LIFETIME; 1851 if (!route->path_rec->mtu) { 1852 ret = -EINVAL; 1853 goto err2; 1854 } 1855 1856 work->old_state = CMA_ROUTE_QUERY; 1857 work->new_state = CMA_ROUTE_RESOLVED; 1858 work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED; 1859 work->event.status = 0; 1860 1861 queue_work(cma_wq, &work->work); 1862 1863 return 0; 1864 1865err2: 1866 kfree(route->path_rec); 1867 route->path_rec = NULL; 1868err1: 1869 kfree(work); 1870 return ret; 1871} 1872 1873int rdma_resolve_route(struct rdma_cm_id *id, int timeout_ms) 1874{ 1875 struct rdma_id_private *id_priv; 1876 int ret; 1877 1878 id_priv = container_of(id, struct rdma_id_private, id); 1879 if (!cma_comp_exch(id_priv, CMA_ADDR_RESOLVED, CMA_ROUTE_QUERY)) 1880 return -EINVAL; 1881 1882 atomic_inc(&id_priv->refcount); 1883 switch (rdma_node_get_transport(id->device->node_type)) { 1884 case RDMA_TRANSPORT_IB: 1885 switch (rdma_port_get_link_layer(id->device, id->port_num)) { 1886 case IB_LINK_LAYER_INFINIBAND: 1887 ret = cma_resolve_ib_route(id_priv, timeout_ms); 1888 break; 1889 case IB_LINK_LAYER_ETHERNET: 1890 ret = cma_resolve_iboe_route(id_priv); 1891 break; 1892 default: 1893 ret = -ENOSYS; 1894 } 1895 break; 1896 case RDMA_TRANSPORT_IWARP: 1897 ret = cma_resolve_iw_route(id_priv, timeout_ms); 1898 break; 1899 default: 1900 ret = -ENOSYS; 1901 break; 1902 } 1903 if (ret) 1904 goto err; 1905 1906 return 0; 1907err: 1908 cma_comp_exch(id_priv, CMA_ROUTE_QUERY, CMA_ADDR_RESOLVED); 1909 cma_deref_id(id_priv); 1910 return ret; 1911} 1912EXPORT_SYMBOL(rdma_resolve_route); 1913 1914static int cma_bind_loopback(struct rdma_id_private *id_priv) 1915{ 1916 struct cma_device *cma_dev; 1917 struct ib_port_attr port_attr; 1918 union ib_gid gid; 1919 u16 pkey; 1920 int ret; 1921 u8 p; 1922 1923 mutex_lock(&lock); 1924 if (list_empty(&dev_list)) { 1925 ret = -ENODEV; 1926 goto out; 1927 } 1928 list_for_each_entry(cma_dev, &dev_list, list) 1929 for (p = 1; p <= cma_dev->device->phys_port_cnt; ++p) 1930 if (!ib_query_port(cma_dev->device, p, &port_attr) && 1931 port_attr.state == IB_PORT_ACTIVE) 1932 goto port_found; 1933 1934 p = 1; 1935 cma_dev = list_entry(dev_list.next, struct cma_device, list); 1936 1937port_found: 1938 ret = ib_get_cached_gid(cma_dev->device, p, 0, &gid); 1939 if (ret) 1940 goto out; 1941 1942 ret = ib_get_cached_pkey(cma_dev->device, p, 0, &pkey); 1943 if (ret) 1944 goto out; 1945 1946 id_priv->id.route.addr.dev_addr.dev_type = 1947 (rdma_port_get_link_layer(cma_dev->device, p) == IB_LINK_LAYER_INFINIBAND) ? 1948 ARPHRD_INFINIBAND : ARPHRD_ETHER; 1949 1950 rdma_addr_set_sgid(&id_priv->id.route.addr.dev_addr, &gid); 1951 ib_addr_set_pkey(&id_priv->id.route.addr.dev_addr, pkey); 1952 id_priv->id.port_num = p; 1953 cma_attach_to_dev(id_priv, cma_dev); 1954out: 1955 mutex_unlock(&lock); 1956 return ret; 1957} 1958 1959static void addr_handler(int status, struct sockaddr *src_addr, 1960 struct rdma_dev_addr *dev_addr, void *context) 1961{ 1962 struct rdma_id_private *id_priv = context; 1963 struct rdma_cm_event event; 1964 1965 memset(&event, 0, sizeof event); 1966 mutex_lock(&id_priv->handler_mutex); 1967 1968 /* 1969 * Grab mutex to block rdma_destroy_id() from removing the device while 1970 * we're trying to acquire it. 1971 */ 1972 mutex_lock(&lock); 1973 if (!cma_comp_exch(id_priv, CMA_ADDR_QUERY, CMA_ADDR_RESOLVED)) { 1974 mutex_unlock(&lock); 1975 goto out; 1976 } 1977 1978 if (!status && !id_priv->cma_dev) 1979 status = cma_acquire_dev(id_priv); 1980 mutex_unlock(&lock); 1981 1982 if (status) { 1983 if (!cma_comp_exch(id_priv, CMA_ADDR_RESOLVED, CMA_ADDR_BOUND)) 1984 goto out; 1985 event.event = RDMA_CM_EVENT_ADDR_ERROR; 1986 event.status = status; 1987 } else { 1988 memcpy(&id_priv->id.route.addr.src_addr, src_addr, 1989 ip_addr_size(src_addr)); 1990 event.event = RDMA_CM_EVENT_ADDR_RESOLVED; 1991 } 1992 1993 if (id_priv->id.event_handler(&id_priv->id, &event)) { 1994 cma_exch(id_priv, CMA_DESTROYING); 1995 mutex_unlock(&id_priv->handler_mutex); 1996 cma_deref_id(id_priv); 1997 rdma_destroy_id(&id_priv->id); 1998 return; 1999 } 2000out: 2001 mutex_unlock(&id_priv->handler_mutex); 2002 cma_deref_id(id_priv); 2003} 2004 2005static int cma_resolve_loopback(struct rdma_id_private *id_priv) 2006{ 2007 struct cma_work *work; 2008 struct sockaddr *src, *dst; 2009 union ib_gid gid; 2010 int ret; 2011 2012 work = kzalloc(sizeof *work, GFP_KERNEL); 2013 if (!work) 2014 return -ENOMEM; 2015 2016 if (!id_priv->cma_dev) { 2017 ret = cma_bind_loopback(id_priv); 2018 if (ret) 2019 goto err; 2020 } 2021 2022 rdma_addr_get_sgid(&id_priv->id.route.addr.dev_addr, &gid); 2023 rdma_addr_set_dgid(&id_priv->id.route.addr.dev_addr, &gid); 2024 2025 src = (struct sockaddr *) &id_priv->id.route.addr.src_addr; 2026 if (cma_zero_addr(src)) { 2027 dst = (struct sockaddr *) &id_priv->id.route.addr.dst_addr; 2028 if ((src->sa_family = dst->sa_family) == AF_INET) { 2029 ((struct sockaddr_in *) src)->sin_addr.s_addr = 2030 ((struct sockaddr_in *) dst)->sin_addr.s_addr; 2031 } else { 2032 ipv6_addr_copy(&((struct sockaddr_in6 *) src)->sin6_addr, 2033 &((struct sockaddr_in6 *) dst)->sin6_addr); 2034 } 2035 } 2036 2037 work->id = id_priv; 2038 INIT_WORK(&work->work, cma_work_handler); 2039 work->old_state = CMA_ADDR_QUERY; 2040 work->new_state = CMA_ADDR_RESOLVED; 2041 work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED; 2042 queue_work(cma_wq, &work->work); 2043 return 0; 2044err: 2045 kfree(work); 2046 return ret; 2047} 2048 2049static int cma_bind_addr(struct rdma_cm_id *id, struct sockaddr *src_addr, 2050 struct sockaddr *dst_addr) 2051{ 2052 if (!src_addr || !src_addr->sa_family) { 2053 src_addr = (struct sockaddr *) &id->route.addr.src_addr; 2054 if ((src_addr->sa_family = dst_addr->sa_family) == AF_INET6) { 2055 ((struct sockaddr_in6 *) src_addr)->sin6_scope_id = 2056 ((struct sockaddr_in6 *) dst_addr)->sin6_scope_id; 2057 } 2058 } 2059 if (!cma_any_addr(src_addr)) 2060 return rdma_bind_addr(id, src_addr); 2061 else { 2062 struct sockaddr_in addr_in; 2063 2064 memset(&addr_in, 0, sizeof addr_in); 2065 addr_in.sin_family = dst_addr->sa_family; 2066 addr_in.sin_len = sizeof addr_in; 2067 return rdma_bind_addr(id, (struct sockaddr *) &addr_in); 2068 } 2069} 2070 2071int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr, 2072 struct sockaddr *dst_addr, int timeout_ms) 2073{ 2074 struct rdma_id_private *id_priv; 2075 int ret; 2076 2077 id_priv = container_of(id, struct rdma_id_private, id); 2078 if (id_priv->state == CMA_IDLE) { 2079 ret = cma_bind_addr(id, src_addr, dst_addr); 2080 if (ret) 2081 return ret; 2082 } 2083 2084 if (!cma_comp_exch(id_priv, CMA_ADDR_BOUND, CMA_ADDR_QUERY)) 2085 return -EINVAL; 2086 2087 atomic_inc(&id_priv->refcount); 2088 memcpy(&id->route.addr.dst_addr, dst_addr, ip_addr_size(dst_addr)); 2089 if (cma_any_addr(dst_addr)) 2090 ret = cma_resolve_loopback(id_priv); 2091 else 2092 ret = rdma_resolve_ip(&addr_client, (struct sockaddr *) &id->route.addr.src_addr, 2093 dst_addr, &id->route.addr.dev_addr, 2094 timeout_ms, addr_handler, id_priv); 2095 if (ret) 2096 goto err; 2097 2098 return 0; 2099err: 2100 cma_comp_exch(id_priv, CMA_ADDR_QUERY, CMA_ADDR_BOUND); 2101 cma_deref_id(id_priv); 2102 return ret; 2103} 2104EXPORT_SYMBOL(rdma_resolve_addr); 2105 2106static void cma_bind_port(struct rdma_bind_list *bind_list, 2107 struct rdma_id_private *id_priv) 2108{ 2109 struct sockaddr_in *sin; 2110 2111 sin = (struct sockaddr_in *) &id_priv->id.route.addr.src_addr; 2112 sin->sin_port = htons(bind_list->port); 2113 id_priv->bind_list = bind_list; 2114 hlist_add_head(&id_priv->node, &bind_list->owners); 2115} 2116 2117static int cma_alloc_port(struct idr *ps, struct rdma_id_private *id_priv, 2118 unsigned short snum) 2119{ 2120 struct rdma_bind_list *bind_list; 2121 int port, ret; 2122 2123 bind_list = kzalloc(sizeof *bind_list, GFP_KERNEL); 2124 if (!bind_list) 2125 return -ENOMEM; 2126 2127 do { 2128 ret = idr_get_new_above(ps, bind_list, snum, &port); 2129 } while ((ret == -EAGAIN) && idr_pre_get(ps, GFP_KERNEL)); 2130 2131 if (ret) 2132 goto err1; 2133 2134 if (port != snum) { 2135 ret = -EADDRNOTAVAIL; 2136 goto err2; 2137 } 2138 2139 bind_list->ps = ps; 2140 bind_list->port = (unsigned short) port; 2141 cma_bind_port(bind_list, id_priv); 2142 return 0; 2143err2: 2144 idr_remove(ps, port); 2145err1: 2146 kfree(bind_list); 2147 return ret; 2148} 2149 2150static int cma_alloc_any_port(struct idr *ps, struct rdma_id_private *id_priv) 2151{ 2152#if defined(INET) 2153 struct rdma_bind_list *bind_list; 2154 int port, ret, low, high; 2155 2156 bind_list = kzalloc(sizeof *bind_list, GFP_KERNEL); 2157 if (!bind_list) 2158 return -ENOMEM; 2159 2160retry: 2161 /* FIXME: add proper port randomization per like inet_csk_get_port */ 2162 do { 2163 ret = idr_get_new_above(ps, bind_list, next_port, &port); 2164 } while ((ret == -EAGAIN) && idr_pre_get(ps, GFP_KERNEL)); 2165 2166 if (ret) 2167 goto err1; 2168 2169 inet_get_local_port_range(&low, &high); 2170 if (port > high) { 2171 if (next_port != low) { 2172 idr_remove(ps, port); 2173 next_port = low; 2174 goto retry; 2175 } 2176 ret = -EADDRNOTAVAIL; 2177 goto err2; 2178 } 2179 2180 if (port == high) 2181 next_port = low; 2182 else 2183 next_port = port + 1; 2184 2185 bind_list->ps = ps; 2186 bind_list->port = (unsigned short) port; 2187 cma_bind_port(bind_list, id_priv); 2188 return 0; 2189err2: 2190 idr_remove(ps, port); 2191err1: 2192 kfree(bind_list); 2193 return ret; 2194#else 2195 return -ENOSPC; 2196#endif 2197} 2198 2199static int cma_use_port(struct idr *ps, struct rdma_id_private *id_priv) 2200{ 2201 struct rdma_id_private *cur_id; 2202 struct sockaddr_in *sin, *cur_sin; 2203 struct rdma_bind_list *bind_list; 2204 struct hlist_node *node; 2205 unsigned short snum; 2206 2207 sin = (struct sockaddr_in *) &id_priv->id.route.addr.src_addr; 2208 snum = ntohs(sin->sin_port); 2209#ifdef __linux__ 2210 if (snum < PROT_SOCK && !capable(CAP_NET_BIND_SERVICE)) 2211 return -EACCES; 2212#endif 2213 2214 bind_list = idr_find(ps, snum); 2215 if (!bind_list) 2216 return cma_alloc_port(ps, id_priv, snum); 2217 2218 /* 2219 * We don't support binding to any address if anyone is bound to 2220 * a specific address on the same port. 2221 */ 2222 if (cma_any_addr((struct sockaddr *) &id_priv->id.route.addr.src_addr)) 2223 return -EADDRNOTAVAIL; 2224 2225 hlist_for_each_entry(cur_id, node, &bind_list->owners, node) { 2226 if (cma_any_addr((struct sockaddr *) &cur_id->id.route.addr.src_addr)) 2227 return -EADDRNOTAVAIL; 2228 2229 cur_sin = (struct sockaddr_in *) &cur_id->id.route.addr.src_addr; 2230 if (sin->sin_addr.s_addr == cur_sin->sin_addr.s_addr) 2231 return -EADDRINUSE; 2232 } 2233 2234 cma_bind_port(bind_list, id_priv); 2235 return 0; 2236} 2237 2238static int cma_get_tcp_port(struct rdma_id_private *id_priv) 2239{ 2240 int ret; 2241 int size; 2242 struct socket *sock; 2243 2244 ret = sock_create_kern(AF_INET, SOCK_STREAM, IPPROTO_TCP, &sock); 2245 if (ret) 2246 return ret; 2247#ifdef __linux__ 2248 ret = sock->ops->bind(sock, 2249 (struct sockaddr *) &id_priv->id.route.addr.src_addr, 2250 ip_addr_size((struct sockaddr *) &id_priv->id.route.addr.src_addr)); 2251#else 2252 ret = -sobind(sock, 2253 (struct sockaddr *)&id_priv->id.route.addr.src_addr, 2254 curthread); 2255#endif 2256 if (ret) { 2257 sock_release(sock); 2258 return ret; 2259 } 2260 2261 size = ip_addr_size((struct sockaddr *) &id_priv->id.route.addr.src_addr); 2262 ret = sock_getname(sock, 2263 (struct sockaddr *) &id_priv->id.route.addr.src_addr, 2264 &size, 0); 2265 if (ret) { 2266 sock_release(sock); 2267 return ret; 2268 } 2269 2270 id_priv->sock = sock; 2271 return 0; 2272} 2273 2274static int cma_get_port(struct rdma_id_private *id_priv) 2275{ 2276 struct idr *ps; 2277 int ret; 2278 2279 switch (id_priv->id.ps) { 2280 case RDMA_PS_SDP: 2281 ps = &sdp_ps; 2282 break; 2283 case RDMA_PS_TCP: 2284 ps = &tcp_ps; 2285 if (unify_tcp_port_space) { 2286 ret = cma_get_tcp_port(id_priv); 2287 if (ret) 2288 goto out; 2289 } 2290 break; 2291 case RDMA_PS_UDP: 2292 ps = &udp_ps; 2293 break; 2294 case RDMA_PS_IPOIB: 2295 ps = &ipoib_ps; 2296 break; 2297 default: 2298 return -EPROTONOSUPPORT; 2299 } 2300 2301 mutex_lock(&lock); 2302 if (cma_any_port((struct sockaddr *) &id_priv->id.route.addr.src_addr)) 2303 ret = cma_alloc_any_port(ps, id_priv); 2304 else 2305 ret = cma_use_port(ps, id_priv); 2306 mutex_unlock(&lock); 2307out: 2308 return ret; 2309} 2310 2311static int cma_check_linklocal(struct rdma_dev_addr *dev_addr, 2312 struct sockaddr *addr) 2313{ 2314#if defined(INET6) 2315 struct sockaddr_in6 *sin6; 2316 2317 if (addr->sa_family != AF_INET6) 2318 return 0; 2319 2320 sin6 = (struct sockaddr_in6 *) addr; 2321#ifdef __linux__ 2322 if ((ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL) && 2323#else 2324 if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr) && 2325#endif 2326 !sin6->sin6_scope_id) 2327 return -EINVAL; 2328 2329 dev_addr->bound_dev_if = sin6->sin6_scope_id; 2330#endif 2331 return 0; 2332} 2333 2334int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr) 2335{ 2336 struct rdma_id_private *id_priv; 2337 int ret; 2338 2339 if (addr->sa_family != AF_INET && addr->sa_family != AF_INET6) 2340 return -EAFNOSUPPORT; 2341 2342 id_priv = container_of(id, struct rdma_id_private, id); 2343 if (!cma_comp_exch(id_priv, CMA_IDLE, CMA_ADDR_BOUND)) 2344 return -EINVAL; 2345 2346 ret = cma_check_linklocal(&id->route.addr.dev_addr, addr); 2347 if (ret) 2348 goto err1; 2349 2350 if (!cma_any_addr(addr)) { 2351 ret = rdma_translate_ip(addr, &id->route.addr.dev_addr); 2352 if (ret) 2353 goto err1; 2354 2355 mutex_lock(&lock); 2356 ret = cma_acquire_dev(id_priv); 2357 mutex_unlock(&lock); 2358 if (ret) 2359 goto err1; 2360 } 2361 2362 memcpy(&id->route.addr.src_addr, addr, ip_addr_size(addr)); 2363 ret = cma_get_port(id_priv); 2364 if (ret) 2365 goto err2; 2366 2367 return 0; 2368err2: 2369 if (id_priv->cma_dev) { 2370 mutex_lock(&lock); 2371 cma_detach_from_dev(id_priv); 2372 mutex_unlock(&lock); 2373 } 2374err1: 2375 cma_comp_exch(id_priv, CMA_ADDR_BOUND, CMA_IDLE); 2376 return ret; 2377} 2378EXPORT_SYMBOL(rdma_bind_addr); 2379 2380static int cma_format_hdr(void *hdr, enum rdma_port_space ps, 2381 struct rdma_route *route) 2382{ 2383 struct cma_hdr *cma_hdr; 2384 struct sdp_hh *sdp_hdr; 2385 2386 if (route->addr.src_addr.ss_family == AF_INET) { 2387 struct sockaddr_in *src4, *dst4; 2388 2389 src4 = (struct sockaddr_in *) &route->addr.src_addr; 2390 dst4 = (struct sockaddr_in *) &route->addr.dst_addr; 2391 2392 switch (ps) { 2393 case RDMA_PS_SDP: 2394 sdp_hdr = hdr; 2395 if (sdp_get_majv(sdp_hdr->sdp_version) != SDP_MAJ_VERSION) 2396 return -EINVAL; 2397 sdp_set_ip_ver(sdp_hdr, 4); 2398 sdp_hdr->src_addr.ip4.addr = src4->sin_addr.s_addr; 2399 sdp_hdr->dst_addr.ip4.addr = dst4->sin_addr.s_addr; 2400 sdp_hdr->port = src4->sin_port; 2401 break; 2402 default: 2403 cma_hdr = hdr; 2404 cma_hdr->cma_version = CMA_VERSION; 2405 cma_set_ip_ver(cma_hdr, 4); 2406 cma_hdr->src_addr.ip4.addr = src4->sin_addr.s_addr; 2407 cma_hdr->dst_addr.ip4.addr = dst4->sin_addr.s_addr; 2408 cma_hdr->port = src4->sin_port; 2409 break; 2410 } 2411 } else { 2412 struct sockaddr_in6 *src6, *dst6; 2413 2414 src6 = (struct sockaddr_in6 *) &route->addr.src_addr; 2415 dst6 = (struct sockaddr_in6 *) &route->addr.dst_addr; 2416 2417 switch (ps) { 2418 case RDMA_PS_SDP: 2419 sdp_hdr = hdr; 2420 if (sdp_get_majv(sdp_hdr->sdp_version) != SDP_MAJ_VERSION) 2421 return -EINVAL; 2422 sdp_set_ip_ver(sdp_hdr, 6); 2423 sdp_hdr->src_addr.ip6 = src6->sin6_addr; 2424 sdp_hdr->dst_addr.ip6 = dst6->sin6_addr; 2425 sdp_hdr->port = src6->sin6_port; 2426 break; 2427 default: 2428 cma_hdr = hdr; 2429 cma_hdr->cma_version = CMA_VERSION; 2430 cma_set_ip_ver(cma_hdr, 6); 2431 cma_hdr->src_addr.ip6 = src6->sin6_addr; 2432 cma_hdr->dst_addr.ip6 = dst6->sin6_addr; 2433 cma_hdr->port = src6->sin6_port; 2434 break; 2435 } 2436 } 2437 return 0; 2438} 2439 2440static int cma_sidr_rep_handler(struct ib_cm_id *cm_id, 2441 struct ib_cm_event *ib_event) 2442{ 2443 struct rdma_id_private *id_priv = cm_id->context; 2444 struct rdma_cm_event event; 2445 struct ib_cm_sidr_rep_event_param *rep = &ib_event->param.sidr_rep_rcvd; 2446 int ret = 0; 2447 2448 if (cma_disable_callback(id_priv, CMA_CONNECT)) 2449 return 0; 2450 2451 memset(&event, 0, sizeof event); 2452 switch (ib_event->event) { 2453 case IB_CM_SIDR_REQ_ERROR: 2454 event.event = RDMA_CM_EVENT_UNREACHABLE; 2455 event.status = -ETIMEDOUT; 2456 break; 2457 case IB_CM_SIDR_REP_RECEIVED: 2458 event.param.ud.private_data = ib_event->private_data; 2459 event.param.ud.private_data_len = IB_CM_SIDR_REP_PRIVATE_DATA_SIZE; 2460 if (rep->status != IB_SIDR_SUCCESS) { 2461 event.event = RDMA_CM_EVENT_UNREACHABLE; 2462 event.status = ib_event->param.sidr_rep_rcvd.status; 2463 break; 2464 } 2465 ret = cma_set_qkey(id_priv); 2466 if (ret) { 2467 event.event = RDMA_CM_EVENT_ADDR_ERROR; 2468 event.status = -EINVAL; 2469 break; 2470 } 2471 if (id_priv->qkey != rep->qkey) { 2472 event.event = RDMA_CM_EVENT_UNREACHABLE; 2473 event.status = -EINVAL; 2474 break; 2475 } 2476 ib_init_ah_from_path(id_priv->id.device, id_priv->id.port_num, 2477 id_priv->id.route.path_rec, 2478 &event.param.ud.ah_attr); 2479 event.param.ud.qp_num = rep->qpn; 2480 event.param.ud.qkey = rep->qkey; 2481 event.event = RDMA_CM_EVENT_ESTABLISHED; 2482 event.status = 0; 2483 break; 2484 default: 2485 printk(KERN_ERR "RDMA CMA: unexpected IB CM event: %d\n", 2486 ib_event->event); 2487 goto out; 2488 } 2489 2490 ret = id_priv->id.event_handler(&id_priv->id, &event); 2491 if (ret) { 2492 /* Destroy the CM ID by returning a non-zero value. */ 2493 id_priv->cm_id.ib = NULL; 2494 cma_exch(id_priv, CMA_DESTROYING); 2495 mutex_unlock(&id_priv->handler_mutex); 2496 rdma_destroy_id(&id_priv->id); 2497 return ret; 2498 } 2499out: 2500 mutex_unlock(&id_priv->handler_mutex); 2501 return ret; 2502} 2503 2504static int cma_resolve_ib_udp(struct rdma_id_private *id_priv, 2505 struct rdma_conn_param *conn_param) 2506{ 2507 struct ib_cm_sidr_req_param req; 2508 struct rdma_route *route; 2509 int ret; 2510 2511 req.private_data_len = sizeof(struct cma_hdr) + 2512 conn_param->private_data_len; 2513 req.private_data = kzalloc(req.private_data_len, GFP_ATOMIC); 2514 if (!req.private_data) 2515 return -ENOMEM; 2516 2517 if (conn_param->private_data && conn_param->private_data_len) 2518 memcpy((void *) req.private_data + sizeof(struct cma_hdr), 2519 conn_param->private_data, conn_param->private_data_len); 2520 2521 route = &id_priv->id.route; 2522 ret = cma_format_hdr((void *) req.private_data, id_priv->id.ps, route); 2523 if (ret) 2524 goto out; 2525 2526 id_priv->cm_id.ib = ib_create_cm_id(id_priv->id.device, 2527 cma_sidr_rep_handler, id_priv); 2528 if (IS_ERR(id_priv->cm_id.ib)) { 2529 ret = PTR_ERR(id_priv->cm_id.ib); 2530 goto out; 2531 } 2532 2533 req.path = route->path_rec; 2534 req.service_id = cma_get_service_id(id_priv->id.ps, 2535 (struct sockaddr *) &route->addr.dst_addr); 2536 req.timeout_ms = 1 << (cma_response_timeout - 8); 2537 req.max_cm_retries = CMA_MAX_CM_RETRIES; 2538 2539 ret = ib_send_cm_sidr_req(id_priv->cm_id.ib, &req); 2540 if (ret) { 2541 ib_destroy_cm_id(id_priv->cm_id.ib); 2542 id_priv->cm_id.ib = NULL; 2543 } 2544out: 2545 kfree(req.private_data); 2546 return ret; 2547} 2548 2549static int cma_connect_ib(struct rdma_id_private *id_priv, 2550 struct rdma_conn_param *conn_param) 2551{ 2552 struct ib_cm_req_param req; 2553 struct rdma_route *route; 2554 void *private_data; 2555 int offset, ret; 2556 2557 memset(&req, 0, sizeof req); 2558 offset = cma_user_data_offset(id_priv->id.ps); 2559 req.private_data_len = offset + conn_param->private_data_len; 2560 private_data = kzalloc(req.private_data_len, GFP_ATOMIC); 2561 if (!private_data) 2562 return -ENOMEM; 2563 2564 if (conn_param->private_data && conn_param->private_data_len) 2565 memcpy(private_data + offset, conn_param->private_data, 2566 conn_param->private_data_len); 2567 2568 id_priv->cm_id.ib = ib_create_cm_id(id_priv->id.device, cma_ib_handler, 2569 id_priv); 2570 if (IS_ERR(id_priv->cm_id.ib)) { 2571 ret = PTR_ERR(id_priv->cm_id.ib); 2572 goto out; 2573 } 2574 2575 route = &id_priv->id.route; 2576 ret = cma_format_hdr(private_data, id_priv->id.ps, route); 2577 if (ret) 2578 goto out; 2579 req.private_data = private_data; 2580 2581 req.primary_path = &route->path_rec[0]; 2582 if (route->num_paths == 2) 2583 req.alternate_path = &route->path_rec[1]; 2584 2585 req.service_id = cma_get_service_id(id_priv->id.ps, 2586 (struct sockaddr *) &route->addr.dst_addr); 2587 req.qp_num = id_priv->qp_num; 2588 req.qp_type = IB_QPT_RC; 2589 req.starting_psn = id_priv->seq_num; 2590 req.responder_resources = conn_param->responder_resources; 2591 req.initiator_depth = conn_param->initiator_depth; 2592 req.flow_control = conn_param->flow_control; 2593 req.retry_count = conn_param->retry_count; 2594 req.rnr_retry_count = conn_param->rnr_retry_count; 2595 req.remote_cm_response_timeout = cma_response_timeout; 2596 req.local_cm_response_timeout = cma_response_timeout; 2597 req.max_cm_retries = CMA_MAX_CM_RETRIES; 2598 req.srq = id_priv->srq ? 1 : 0; 2599 2600 ret = ib_send_cm_req(id_priv->cm_id.ib, &req); 2601out: 2602 if (ret && !IS_ERR(id_priv->cm_id.ib)) { 2603 ib_destroy_cm_id(id_priv->cm_id.ib); 2604 id_priv->cm_id.ib = NULL; 2605 } 2606 2607 kfree(private_data); 2608 return ret; 2609} 2610 2611static int cma_connect_iw(struct rdma_id_private *id_priv, 2612 struct rdma_conn_param *conn_param) 2613{ 2614 struct iw_cm_id *cm_id; 2615 struct sockaddr_in* sin; 2616 int ret; 2617 struct iw_cm_conn_param iw_param; 2618 2619 cm_id = iw_create_cm_id(id_priv->id.device, id_priv->sock, 2620 cma_iw_handler, id_priv); 2621 if (IS_ERR(cm_id)) { 2622 ret = PTR_ERR(cm_id); 2623 goto out; 2624 } 2625 2626 id_priv->cm_id.iw = cm_id; 2627 2628 sin = (struct sockaddr_in*) &id_priv->id.route.addr.src_addr; 2629 cm_id->local_addr = *sin; 2630 2631 sin = (struct sockaddr_in*) &id_priv->id.route.addr.dst_addr; 2632 cm_id->remote_addr = *sin; 2633 2634 ret = cma_modify_qp_rtr(id_priv, conn_param); 2635 if (ret) 2636 goto out; 2637 2638 iw_param.ord = conn_param->initiator_depth; 2639 iw_param.ird = conn_param->responder_resources; 2640 iw_param.private_data = conn_param->private_data; 2641 iw_param.private_data_len = conn_param->private_data_len; 2642 if (id_priv->id.qp) 2643 iw_param.qpn = id_priv->qp_num; 2644 else 2645 iw_param.qpn = conn_param->qp_num; 2646 ret = iw_cm_connect(cm_id, &iw_param); 2647out: 2648 if (ret && !IS_ERR(cm_id)) { 2649 iw_destroy_cm_id(cm_id); 2650 id_priv->cm_id.iw = NULL; 2651 } 2652 return ret; 2653} 2654 2655int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param) 2656{ 2657 struct rdma_id_private *id_priv; 2658 int ret; 2659 2660 id_priv = container_of(id, struct rdma_id_private, id); 2661 if (!cma_comp_exch(id_priv, CMA_ROUTE_RESOLVED, CMA_CONNECT)) 2662 return -EINVAL; 2663 2664 if (!id->qp) { 2665 id_priv->qp_num = conn_param->qp_num; 2666 id_priv->srq = conn_param->srq; 2667 } 2668 2669 switch (rdma_node_get_transport(id->device->node_type)) { 2670 case RDMA_TRANSPORT_IB: 2671 if (cma_is_ud_ps(id->ps)) 2672 ret = cma_resolve_ib_udp(id_priv, conn_param); 2673 else 2674 ret = cma_connect_ib(id_priv, conn_param); 2675 break; 2676 case RDMA_TRANSPORT_IWARP: 2677 ret = cma_connect_iw(id_priv, conn_param); 2678 break; 2679 default: 2680 ret = -ENOSYS; 2681 break; 2682 } 2683 if (ret) 2684 goto err; 2685 2686 return 0; 2687err: 2688 cma_comp_exch(id_priv, CMA_CONNECT, CMA_ROUTE_RESOLVED); 2689 return ret; 2690} 2691EXPORT_SYMBOL(rdma_connect); 2692 2693static int cma_accept_ib(struct rdma_id_private *id_priv, 2694 struct rdma_conn_param *conn_param) 2695{ 2696 struct ib_cm_rep_param rep; 2697 int ret; 2698 2699 ret = cma_modify_qp_rtr(id_priv, conn_param); 2700 if (ret) 2701 goto out; 2702 2703 ret = cma_modify_qp_rts(id_priv, conn_param); 2704 if (ret) 2705 goto out; 2706 2707 memset(&rep, 0, sizeof rep); 2708 rep.qp_num = id_priv->qp_num; 2709 rep.starting_psn = id_priv->seq_num; 2710 rep.private_data = conn_param->private_data; 2711 rep.private_data_len = conn_param->private_data_len; 2712 rep.responder_resources = conn_param->responder_resources; 2713 rep.initiator_depth = conn_param->initiator_depth; 2714 rep.failover_accepted = 0; 2715 rep.flow_control = conn_param->flow_control; 2716 rep.rnr_retry_count = conn_param->rnr_retry_count; 2717 rep.srq = id_priv->srq ? 1 : 0; 2718 2719 ret = ib_send_cm_rep(id_priv->cm_id.ib, &rep); 2720out: 2721 return ret; 2722} 2723 2724static int cma_accept_iw(struct rdma_id_private *id_priv, 2725 struct rdma_conn_param *conn_param) 2726{ 2727 struct iw_cm_conn_param iw_param; 2728 int ret; 2729 2730 ret = cma_modify_qp_rtr(id_priv, conn_param); 2731 if (ret) 2732 return ret; 2733 2734 iw_param.ord = conn_param->initiator_depth; 2735 iw_param.ird = conn_param->responder_resources; 2736 iw_param.private_data = conn_param->private_data; 2737 iw_param.private_data_len = conn_param->private_data_len; 2738 if (id_priv->id.qp) { 2739 iw_param.qpn = id_priv->qp_num; 2740 } else 2741 iw_param.qpn = conn_param->qp_num; 2742 2743 return iw_cm_accept(id_priv->cm_id.iw, &iw_param); 2744} 2745 2746static int cma_send_sidr_rep(struct rdma_id_private *id_priv, 2747 enum ib_cm_sidr_status status, 2748 const void *private_data, int private_data_len) 2749{ 2750 struct ib_cm_sidr_rep_param rep; 2751 int ret; 2752 2753 memset(&rep, 0, sizeof rep); 2754 rep.status = status; 2755 if (status == IB_SIDR_SUCCESS) { 2756 ret = cma_set_qkey(id_priv); 2757 if (ret) 2758 return ret; 2759 rep.qp_num = id_priv->qp_num; 2760 rep.qkey = id_priv->qkey; 2761 } 2762 rep.private_data = private_data; 2763 rep.private_data_len = private_data_len; 2764 2765 return ib_send_cm_sidr_rep(id_priv->cm_id.ib, &rep); 2766} 2767 2768int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param) 2769{ 2770 struct rdma_id_private *id_priv; 2771 int ret; 2772 2773 id_priv = container_of(id, struct rdma_id_private, id); 2774 if (!cma_comp(id_priv, CMA_CONNECT)) 2775 return -EINVAL; 2776 2777 if (!id->qp && conn_param) { 2778 id_priv->qp_num = conn_param->qp_num; 2779 id_priv->srq = conn_param->srq; 2780 } 2781 2782 switch (rdma_node_get_transport(id->device->node_type)) { 2783 case RDMA_TRANSPORT_IB: 2784 if (cma_is_ud_ps(id->ps)) 2785 ret = cma_send_sidr_rep(id_priv, IB_SIDR_SUCCESS, 2786 conn_param->private_data, 2787 conn_param->private_data_len); 2788 else if (conn_param) 2789 ret = cma_accept_ib(id_priv, conn_param); 2790 else 2791 ret = cma_rep_recv(id_priv); 2792 break; 2793 case RDMA_TRANSPORT_IWARP: 2794 ret = cma_accept_iw(id_priv, conn_param); 2795 break; 2796 default: 2797 ret = -ENOSYS; 2798 break; 2799 } 2800 2801 if (ret) 2802 goto reject; 2803 2804 return 0; 2805reject: 2806 cma_modify_qp_err(id_priv); 2807 rdma_reject(id, NULL, 0); 2808 return ret; 2809} 2810EXPORT_SYMBOL(rdma_accept); 2811 2812int rdma_notify(struct rdma_cm_id *id, enum ib_event_type event) 2813{ 2814 struct rdma_id_private *id_priv; 2815 int ret; 2816 2817 id_priv = container_of(id, struct rdma_id_private, id); 2818 if (!cma_has_cm_dev(id_priv)) 2819 return -EINVAL; 2820 2821 switch (id->device->node_type) { 2822 case RDMA_NODE_IB_CA: 2823 ret = ib_cm_notify(id_priv->cm_id.ib, event); 2824 break; 2825 default: 2826 ret = 0; 2827 break; 2828 } 2829 return ret; 2830} 2831EXPORT_SYMBOL(rdma_notify); 2832 2833int rdma_reject(struct rdma_cm_id *id, const void *private_data, 2834 u8 private_data_len) 2835{ 2836 struct rdma_id_private *id_priv; 2837 int ret; 2838 2839 id_priv = container_of(id, struct rdma_id_private, id); 2840 if (!cma_has_cm_dev(id_priv)) 2841 return -EINVAL; 2842 2843 switch (rdma_node_get_transport(id->device->node_type)) { 2844 case RDMA_TRANSPORT_IB: 2845 if (cma_is_ud_ps(id->ps)) 2846 ret = cma_send_sidr_rep(id_priv, IB_SIDR_REJECT, 2847 private_data, private_data_len); 2848 else 2849 ret = ib_send_cm_rej(id_priv->cm_id.ib, 2850 IB_CM_REJ_CONSUMER_DEFINED, NULL, 2851 0, private_data, private_data_len); 2852 break; 2853 case RDMA_TRANSPORT_IWARP: 2854 ret = iw_cm_reject(id_priv->cm_id.iw, 2855 private_data, private_data_len); 2856 break; 2857 default: 2858 ret = -ENOSYS; 2859 break; 2860 } 2861 return ret; 2862} 2863EXPORT_SYMBOL(rdma_reject); 2864 2865int rdma_disconnect(struct rdma_cm_id *id) 2866{ 2867 struct rdma_id_private *id_priv; 2868 int ret; 2869 2870 id_priv = container_of(id, struct rdma_id_private, id); 2871 if (!cma_has_cm_dev(id_priv)) 2872 return -EINVAL; 2873 2874 switch (rdma_node_get_transport(id->device->node_type)) { 2875 case RDMA_TRANSPORT_IB: 2876 ret = cma_modify_qp_err(id_priv); 2877 if (ret) 2878 goto out; 2879 /* Initiate or respond to a disconnect. */ 2880 if (ib_send_cm_dreq(id_priv->cm_id.ib, NULL, 0)) 2881 ib_send_cm_drep(id_priv->cm_id.ib, NULL, 0); 2882 break; 2883 case RDMA_TRANSPORT_IWARP: 2884 ret = iw_cm_disconnect(id_priv->cm_id.iw, 0); 2885 break; 2886 default: 2887 ret = -EINVAL; 2888 break; 2889 } 2890out: 2891 return ret; 2892} 2893EXPORT_SYMBOL(rdma_disconnect); 2894 2895static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast) 2896{ 2897 struct rdma_id_private *id_priv; 2898 struct cma_multicast *mc = multicast->context; 2899 struct rdma_cm_event event; 2900 int ret; 2901 2902 id_priv = mc->id_priv; 2903 if (cma_disable_callback(id_priv, CMA_ADDR_BOUND) && 2904 cma_disable_callback(id_priv, CMA_ADDR_RESOLVED)) 2905 return 0; 2906 2907 mutex_lock(&id_priv->qp_mutex); 2908 if (!status && id_priv->id.qp) 2909 status = ib_attach_mcast(id_priv->id.qp, &multicast->rec.mgid, 2910 multicast->rec.mlid); 2911 mutex_unlock(&id_priv->qp_mutex); 2912 2913 memset(&event, 0, sizeof event); 2914 event.status = status; 2915 event.param.ud.private_data = mc->context; 2916 if (!status) { 2917 event.event = RDMA_CM_EVENT_MULTICAST_JOIN; 2918 ib_init_ah_from_mcmember(id_priv->id.device, 2919 id_priv->id.port_num, &multicast->rec, 2920 &event.param.ud.ah_attr); 2921 event.param.ud.qp_num = 0xFFFFFF; 2922 event.param.ud.qkey = be32_to_cpu(multicast->rec.qkey); 2923 } else 2924 event.event = RDMA_CM_EVENT_MULTICAST_ERROR; 2925 2926 ret = id_priv->id.event_handler(&id_priv->id, &event); 2927 if (ret) { 2928 cma_exch(id_priv, CMA_DESTROYING); 2929 mutex_unlock(&id_priv->handler_mutex); 2930 rdma_destroy_id(&id_priv->id); 2931 return 0; 2932 } 2933 2934 mutex_unlock(&id_priv->handler_mutex); 2935 return 0; 2936} 2937 2938static void cma_set_mgid(struct rdma_id_private *id_priv, 2939 struct sockaddr *addr, union ib_gid *mgid) 2940{ 2941#if defined(INET) || defined(INET6) 2942 unsigned char mc_map[MAX_ADDR_LEN]; 2943 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; 2944#endif 2945#ifdef INET 2946 struct sockaddr_in *sin = (struct sockaddr_in *) addr; 2947#endif 2948#ifdef INET6 2949 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) addr; 2950#endif 2951 2952 if (cma_any_addr(addr)) { 2953 memset(mgid, 0, sizeof *mgid); 2954#ifdef INET6 2955 } else if ((addr->sa_family == AF_INET6) && 2956 ((be32_to_cpu(sin6->sin6_addr.s6_addr32[0]) & 0xFFF0FFFF) == 2957 0xFF10A01B)) { 2958 /* IPv6 address is an SA assigned MGID. */ 2959 memcpy(mgid, &sin6->sin6_addr, sizeof *mgid); 2960 } else if (addr->sa_family == AF_INET6) { 2961 ipv6_ib_mc_map(&sin6->sin6_addr, dev_addr->broadcast, mc_map); 2962 if (id_priv->id.ps == RDMA_PS_UDP) 2963 mc_map[7] = 0x01; /* Use RDMA CM signature */ 2964 *mgid = *(union ib_gid *) (mc_map + 4); 2965#endif 2966#ifdef INET 2967 } else { 2968 ip_ib_mc_map(sin->sin_addr.s_addr, dev_addr->broadcast, mc_map); 2969 if (id_priv->id.ps == RDMA_PS_UDP) 2970 mc_map[7] = 0x01; /* Use RDMA CM signature */ 2971 *mgid = *(union ib_gid *) (mc_map + 4); 2972#endif 2973 } 2974} 2975 2976static int cma_join_ib_multicast(struct rdma_id_private *id_priv, 2977 struct cma_multicast *mc) 2978{ 2979 struct ib_sa_mcmember_rec rec; 2980 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; 2981 ib_sa_comp_mask comp_mask; 2982 int ret; 2983 2984 ib_addr_get_mgid(dev_addr, &rec.mgid); 2985 ret = ib_sa_get_mcmember_rec(id_priv->id.device, id_priv->id.port_num, 2986 &rec.mgid, &rec); 2987 if (ret) 2988 return ret; 2989 2990 cma_set_mgid(id_priv, (struct sockaddr *) &mc->addr, &rec.mgid); 2991 if (id_priv->id.ps == RDMA_PS_UDP) 2992 rec.qkey = cpu_to_be32(RDMA_UDP_QKEY); 2993 rdma_addr_get_sgid(dev_addr, &rec.port_gid); 2994 rec.pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr)); 2995 rec.join_state = 1; 2996 2997 comp_mask = IB_SA_MCMEMBER_REC_MGID | IB_SA_MCMEMBER_REC_PORT_GID | 2998 IB_SA_MCMEMBER_REC_PKEY | IB_SA_MCMEMBER_REC_JOIN_STATE | 2999 IB_SA_MCMEMBER_REC_QKEY | IB_SA_MCMEMBER_REC_SL | 3000 IB_SA_MCMEMBER_REC_FLOW_LABEL | 3001 IB_SA_MCMEMBER_REC_TRAFFIC_CLASS; 3002 3003 if (id_priv->id.ps == RDMA_PS_IPOIB) 3004 comp_mask |= IB_SA_MCMEMBER_REC_RATE | 3005 IB_SA_MCMEMBER_REC_RATE_SELECTOR; 3006 3007 mc->multicast.ib = ib_sa_join_multicast(&sa_client, id_priv->id.device, 3008 id_priv->id.port_num, &rec, 3009 comp_mask, GFP_KERNEL, 3010 cma_ib_mc_handler, mc); 3011 if (IS_ERR(mc->multicast.ib)) 3012 return PTR_ERR(mc->multicast.ib); 3013 3014 return 0; 3015} 3016 3017 3018static void iboe_mcast_work_handler(struct work_struct *work) 3019{ 3020 struct iboe_mcast_work *mw = container_of(work, struct iboe_mcast_work, work); 3021 struct cma_multicast *mc = mw->mc; 3022 struct ib_sa_multicast *m = mc->multicast.ib; 3023 3024 mc->multicast.ib->context = mc; 3025 cma_ib_mc_handler(0, m); 3026 kref_put(&mc->mcref, release_mc); 3027 kfree(mw); 3028} 3029 3030static void cma_iboe_set_mgid(struct sockaddr *addr, union ib_gid *mgid) 3031{ 3032 struct sockaddr_in *sin = (struct sockaddr_in *)addr; 3033 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)addr; 3034 3035 if (cma_any_addr(addr)) { 3036 memset(mgid, 0, sizeof *mgid); 3037 } else if (addr->sa_family == AF_INET6) 3038 memcpy(mgid, &sin6->sin6_addr, sizeof *mgid); 3039 else { 3040 mgid->raw[0] = 0xff; 3041 mgid->raw[1] = 0x0e; 3042 mgid->raw[2] = 0; 3043 mgid->raw[3] = 0; 3044 mgid->raw[4] = 0; 3045 mgid->raw[5] = 0; 3046 mgid->raw[6] = 0; 3047 mgid->raw[7] = 0; 3048 mgid->raw[8] = 0; 3049 mgid->raw[9] = 0; 3050 mgid->raw[10] = 0xff; 3051 mgid->raw[11] = 0xff; 3052 *(__be32 *)(&mgid->raw[12]) = sin->sin_addr.s_addr; 3053 } 3054} 3055 3056static int cma_iboe_join_multicast(struct rdma_id_private *id_priv, 3057 struct cma_multicast *mc) 3058{ 3059 struct iboe_mcast_work *work; 3060 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; 3061 int err; 3062 struct sockaddr *addr = (struct sockaddr *)&mc->addr; 3063 struct net_device *ndev = NULL; 3064 3065 if (cma_zero_addr((struct sockaddr *)&mc->addr)) 3066 return -EINVAL; 3067 3068 work = kzalloc(sizeof *work, GFP_KERNEL); 3069 if (!work) 3070 return -ENOMEM; 3071 3072 mc->multicast.ib = kzalloc(sizeof(struct ib_sa_multicast), GFP_KERNEL); 3073 if (!mc->multicast.ib) { 3074 err = -ENOMEM; 3075 goto out1; 3076 } 3077 3078 cma_iboe_set_mgid(addr, &mc->multicast.ib->rec.mgid); 3079 3080 mc->multicast.ib->rec.pkey = cpu_to_be16(0xffff); 3081 if (id_priv->id.ps == RDMA_PS_UDP) 3082 mc->multicast.ib->rec.qkey = cpu_to_be32(RDMA_UDP_QKEY); 3083 3084 if (dev_addr->bound_dev_if) 3085 ndev = dev_get_by_index(&init_net, dev_addr->bound_dev_if); 3086 if (!ndev) { 3087 err = -ENODEV; 3088 goto out2; 3089 } 3090 3091 mc->multicast.ib->rec.rate = iboe_get_rate(ndev); 3092 mc->multicast.ib->rec.hop_limit = 1; 3093#ifdef __linux__ 3094 mc->multicast.ib->rec.mtu = iboe_get_mtu(ndev->mtu); 3095#else 3096 mc->multicast.ib->rec.mtu = iboe_get_mtu(ndev->if_mtu); 3097#endif 3098 dev_put(ndev); 3099 if (!mc->multicast.ib->rec.mtu) { 3100 err = -EINVAL; 3101 goto out2; 3102 } 3103 iboe_addr_get_sgid(dev_addr, &mc->multicast.ib->rec.port_gid); 3104 work->id = id_priv; 3105 work->mc = mc; 3106 INIT_WORK(&work->work, iboe_mcast_work_handler); 3107 kref_get(&mc->mcref); 3108 queue_work(cma_wq, &work->work); 3109 3110 return 0; 3111 3112out2: 3113 kfree(mc->multicast.ib); 3114out1: 3115 kfree(work); 3116 return err; 3117} 3118 3119int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr, 3120 void *context) 3121{ 3122 struct rdma_id_private *id_priv; 3123 struct cma_multicast *mc; 3124 int ret; 3125 3126 id_priv = container_of(id, struct rdma_id_private, id); 3127 if (!cma_comp(id_priv, CMA_ADDR_BOUND) && 3128 !cma_comp(id_priv, CMA_ADDR_RESOLVED)) 3129 return -EINVAL; 3130 3131 mc = kmalloc(sizeof *mc, GFP_KERNEL); 3132 if (!mc) 3133 return -ENOMEM; 3134 3135 memcpy(&mc->addr, addr, ip_addr_size(addr)); 3136 mc->context = context; 3137 mc->id_priv = id_priv; 3138 3139 spin_lock(&id_priv->lock); 3140 list_add(&mc->list, &id_priv->mc_list); 3141 spin_unlock(&id_priv->lock); 3142 3143 switch (rdma_node_get_transport(id->device->node_type)) { 3144 case RDMA_TRANSPORT_IB: 3145 switch (rdma_port_get_link_layer(id->device, id->port_num)) { 3146 case IB_LINK_LAYER_INFINIBAND: 3147 ret = cma_join_ib_multicast(id_priv, mc); 3148 break; 3149 case IB_LINK_LAYER_ETHERNET: 3150 kref_init(&mc->mcref); 3151 ret = cma_iboe_join_multicast(id_priv, mc); 3152 break; 3153 default: 3154 ret = -EINVAL; 3155 } 3156 break; 3157 default: 3158 ret = -ENOSYS; 3159 break; 3160 } 3161 3162 if (ret) { 3163 spin_lock_irq(&id_priv->lock); 3164 list_del(&mc->list); 3165 spin_unlock_irq(&id_priv->lock); 3166 kfree(mc); 3167 } 3168 3169 return ret; 3170} 3171EXPORT_SYMBOL(rdma_join_multicast); 3172 3173void rdma_leave_multicast(struct rdma_cm_id *id, struct sockaddr *addr) 3174{ 3175 struct rdma_id_private *id_priv; 3176 struct cma_multicast *mc; 3177 3178 id_priv = container_of(id, struct rdma_id_private, id); 3179 spin_lock_irq(&id_priv->lock); 3180 list_for_each_entry(mc, &id_priv->mc_list, list) { 3181 if (!memcmp(&mc->addr, addr, ip_addr_size(addr))) { 3182 list_del(&mc->list); 3183 spin_unlock_irq(&id_priv->lock); 3184 3185 if (id->qp) 3186 ib_detach_mcast(id->qp, 3187 &mc->multicast.ib->rec.mgid, 3188 mc->multicast.ib->rec.mlid); 3189 if (rdma_node_get_transport(id_priv->cma_dev->device->node_type) == RDMA_TRANSPORT_IB) { 3190 switch (rdma_port_get_link_layer(id->device, id->port_num)) { 3191 case IB_LINK_LAYER_INFINIBAND: 3192 ib_sa_free_multicast(mc->multicast.ib); 3193 kfree(mc); 3194 break; 3195 case IB_LINK_LAYER_ETHERNET: 3196 kref_put(&mc->mcref, release_mc); 3197 break; 3198 default: 3199 break; 3200 } 3201 } 3202 return; 3203 } 3204 } 3205 spin_unlock_irq(&id_priv->lock); 3206} 3207EXPORT_SYMBOL(rdma_leave_multicast); 3208 3209static int cma_netdev_change(struct net_device *ndev, struct rdma_id_private *id_priv) 3210{ 3211 struct rdma_dev_addr *dev_addr; 3212 struct cma_ndev_work *work; 3213 3214 dev_addr = &id_priv->id.route.addr.dev_addr; 3215 3216#ifdef __linux__ 3217 if ((dev_addr->bound_dev_if == ndev->ifindex) && 3218 memcmp(dev_addr->src_dev_addr, ndev->dev_addr, ndev->addr_len)) { 3219 printk(KERN_INFO "RDMA CM addr change for ndev %s used by id %p\n", 3220 ndev->name, &id_priv->id); 3221#else 3222 if ((dev_addr->bound_dev_if == ndev->if_index) && 3223 memcmp(dev_addr->src_dev_addr, IF_LLADDR(ndev), ndev->if_addrlen)) { 3224 printk(KERN_INFO "RDMA CM addr change for ndev %s used by id %p\n", 3225 ndev->if_xname, &id_priv->id); 3226#endif 3227 work = kzalloc(sizeof *work, GFP_KERNEL); 3228 if (!work) 3229 return -ENOMEM; 3230 3231 INIT_WORK(&work->work, cma_ndev_work_handler); 3232 work->id = id_priv; 3233 work->event.event = RDMA_CM_EVENT_ADDR_CHANGE; 3234 atomic_inc(&id_priv->refcount); 3235 queue_work(cma_wq, &work->work); 3236 } 3237 3238 return 0; 3239} 3240 3241static int cma_netdev_callback(struct notifier_block *self, unsigned long event, 3242 void *ctx) 3243{ 3244 struct net_device *ndev = (struct net_device *)ctx; 3245 struct cma_device *cma_dev; 3246 struct rdma_id_private *id_priv; 3247 int ret = NOTIFY_DONE; 3248 3249#ifdef __linux__ 3250 if (dev_net(ndev) != &init_net) 3251 return NOTIFY_DONE; 3252 3253 if (event != NETDEV_BONDING_FAILOVER) 3254 return NOTIFY_DONE; 3255 3256 if (!(ndev->flags & IFF_MASTER) || !(ndev->priv_flags & IFF_BONDING)) 3257 return NOTIFY_DONE; 3258#else 3259 if (event != NETDEV_DOWN && event != NETDEV_UNREGISTER) 3260 return NOTIFY_DONE; 3261#endif 3262 3263 mutex_lock(&lock); 3264 list_for_each_entry(cma_dev, &dev_list, list) 3265 list_for_each_entry(id_priv, &cma_dev->id_list, list) { 3266 ret = cma_netdev_change(ndev, id_priv); 3267 if (ret) 3268 goto out; 3269 } 3270 3271out: 3272 mutex_unlock(&lock); 3273 return ret; 3274} 3275 3276static struct notifier_block cma_nb = { 3277 .notifier_call = cma_netdev_callback 3278}; 3279 3280static void cma_add_one(struct ib_device *device) 3281{ 3282 struct cma_device *cma_dev; 3283 struct rdma_id_private *id_priv; 3284 3285 cma_dev = kmalloc(sizeof *cma_dev, GFP_KERNEL); 3286 if (!cma_dev) 3287 return; 3288 3289 cma_dev->device = device; 3290 3291 init_completion(&cma_dev->comp); 3292 atomic_set(&cma_dev->refcount, 1); 3293 INIT_LIST_HEAD(&cma_dev->id_list); 3294 ib_set_client_data(device, &cma_client, cma_dev); 3295 3296 mutex_lock(&lock); 3297 list_add_tail(&cma_dev->list, &dev_list); 3298 list_for_each_entry(id_priv, &listen_any_list, list) 3299 cma_listen_on_dev(id_priv, cma_dev); 3300 mutex_unlock(&lock); 3301} 3302 3303static int cma_remove_id_dev(struct rdma_id_private *id_priv) 3304{ 3305 struct rdma_cm_event event; 3306 enum cma_state state; 3307 int ret = 0; 3308 3309 /* Record that we want to remove the device */ 3310 state = cma_exch(id_priv, CMA_DEVICE_REMOVAL); 3311 if (state == CMA_DESTROYING) 3312 return 0; 3313 3314 cma_cancel_operation(id_priv, state); 3315 mutex_lock(&id_priv->handler_mutex); 3316 3317 /* Check for destruction from another callback. */ 3318 if (!cma_comp(id_priv, CMA_DEVICE_REMOVAL)) 3319 goto out; 3320 3321 memset(&event, 0, sizeof event); 3322 event.event = RDMA_CM_EVENT_DEVICE_REMOVAL; 3323 ret = id_priv->id.event_handler(&id_priv->id, &event); 3324out: 3325 mutex_unlock(&id_priv->handler_mutex); 3326 return ret; 3327} 3328 3329static void cma_process_remove(struct cma_device *cma_dev) 3330{ 3331 struct rdma_id_private *id_priv; 3332 int ret; 3333 3334 mutex_lock(&lock); 3335 while (!list_empty(&cma_dev->id_list)) { 3336 id_priv = list_entry(cma_dev->id_list.next, 3337 struct rdma_id_private, list); 3338 3339 list_del(&id_priv->listen_list); 3340 list_del_init(&id_priv->list); 3341 atomic_inc(&id_priv->refcount); 3342 mutex_unlock(&lock); 3343 3344 ret = id_priv->internal_id ? 1 : cma_remove_id_dev(id_priv); 3345 cma_deref_id(id_priv); 3346 if (ret) 3347 rdma_destroy_id(&id_priv->id); 3348 3349 mutex_lock(&lock); 3350 } 3351 mutex_unlock(&lock); 3352 3353 cma_deref_dev(cma_dev); 3354 wait_for_completion(&cma_dev->comp); 3355} 3356 3357static void cma_remove_one(struct ib_device *device) 3358{ 3359 struct cma_device *cma_dev; 3360 3361 cma_dev = ib_get_client_data(device, &cma_client); 3362 if (!cma_dev) 3363 return; 3364 3365 mutex_lock(&lock); 3366 list_del(&cma_dev->list); 3367 mutex_unlock(&lock); 3368 3369 cma_process_remove(cma_dev); 3370 kfree(cma_dev); 3371} 3372 3373static int cma_init(void) 3374{ 3375 int ret; 3376#if defined(INET) 3377 int low, high, remaining; 3378 3379 get_random_bytes(&next_port, sizeof next_port); 3380 inet_get_local_port_range(&low, &high); 3381 remaining = (high - low) + 1; 3382 next_port = ((unsigned int) next_port % remaining) + low; 3383#endif 3384 3385 cma_wq = create_singlethread_workqueue("rdma_cm"); 3386 if (!cma_wq) 3387 return -ENOMEM; 3388 3389 ib_sa_register_client(&sa_client); 3390 rdma_addr_register_client(&addr_client); 3391 register_netdevice_notifier(&cma_nb); 3392 3393 ret = ib_register_client(&cma_client); 3394 if (ret) 3395 goto err; 3396 return 0; 3397 3398err: 3399 unregister_netdevice_notifier(&cma_nb); 3400 rdma_addr_unregister_client(&addr_client); 3401 ib_sa_unregister_client(&sa_client); 3402 destroy_workqueue(cma_wq); 3403 return ret; 3404} 3405 3406static void cma_cleanup(void) 3407{ 3408 ib_unregister_client(&cma_client); 3409 unregister_netdevice_notifier(&cma_nb); 3410 rdma_addr_unregister_client(&addr_client); 3411 ib_sa_unregister_client(&sa_client); 3412 destroy_workqueue(cma_wq); 3413 idr_destroy(&sdp_ps); 3414 idr_destroy(&tcp_ps); 3415 idr_destroy(&udp_ps); 3416 idr_destroy(&ipoib_ps); 3417} 3418 3419module_init(cma_init); 3420module_exit(cma_cleanup); 3421