cma.c revision 325611
1/* 2 * Copyright (c) 2005 Voltaire Inc. All rights reserved. 3 * Copyright (c) 2002-2005, Network Appliance, Inc. All rights reserved. 4 * Copyright (c) 1999-2005, Mellanox Technologies, Inc. All rights reserved. 5 * Copyright (c) 2005-2006 Intel Corporation. All rights reserved. 6 * Copyright (c) 2016 Chelsio Communications. All rights reserved. 7 * 8 * This software is available to you under a choice of one of two 9 * licenses. You may choose to be licensed under the terms of the GNU 10 * General Public License (GPL) Version 2, available from the file 11 * COPYING in the main directory of this source tree, or the 12 * OpenIB.org BSD license below: 13 * 14 * Redistribution and use in source and binary forms, with or 15 * without modification, are permitted provided that the following 16 * conditions are met: 17 * 18 * - Redistributions of source code must retain the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer. 21 * 22 * - Redistributions in binary form must reproduce the above 23 * copyright notice, this list of conditions and the following 24 * disclaimer in the documentation and/or other materials 25 * provided with the distribution. 26 * 27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 28 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 29 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 30 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 31 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 32 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 33 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 34 * SOFTWARE. 35 */ 36 37#define LINUXKPI_PARAM_PREFIX ibcore_ 38 39#include <linux/completion.h> 40#include <linux/in.h> 41#include <linux/in6.h> 42#include <linux/mutex.h> 43#include <linux/random.h> 44#include <linux/idr.h> 45#include <linux/inetdevice.h> 46 47#include <net/tcp.h> 48#include <net/ipv6.h> 49 50#include <rdma/rdma_cm.h> 51#include <rdma/rdma_cm_ib.h> 52#include <rdma/ib_cache.h> 53#include <rdma/ib_cm.h> 54#include <rdma/ib_sa.h> 55#include <rdma/iw_cm.h> 56 57MODULE_AUTHOR("Sean Hefty"); 58MODULE_DESCRIPTION("Generic RDMA CM Agent"); 59MODULE_LICENSE("Dual BSD/GPL"); 60 61static int tavor_quirk = 0; 62module_param_named(tavor_quirk, tavor_quirk, int, 0644); 63MODULE_PARM_DESC(tavor_quirk, "Tavor performance quirk: limit MTU to 1K if > 0"); 64 65int unify_tcp_port_space = 1; 66module_param(unify_tcp_port_space, int, 0644); 67MODULE_PARM_DESC(unify_tcp_port_space, "Unify the host TCP and RDMA port " 68 "space allocation (default=1)"); 69 70#define CMA_CM_RESPONSE_TIMEOUT 20 71#define CMA_MAX_CM_RETRIES 15 72#define CMA_CM_MRA_SETTING (IB_CM_MRA_FLAG_DELAY | 24) 73#define IBOE_PACKET_LIFETIME 18 74 75static int cma_response_timeout = CMA_CM_RESPONSE_TIMEOUT; 76module_param_named(cma_response_timeout, cma_response_timeout, int, 0644); 77MODULE_PARM_DESC(cma_response_timeout, "CMA_CM_RESPONSE_TIMEOUT default=20"); 78 79static int def_prec2sl = 3; 80module_param_named(def_prec2sl, def_prec2sl, int, 0644); 81MODULE_PARM_DESC(def_prec2sl, "Default value for SL priority with RoCE. Valid values 0 - 7"); 82 83static void cma_add_one(struct ib_device *device); 84static void cma_remove_one(struct ib_device *device); 85 86static struct ib_client cma_client = { 87 .name = "cma", 88 .add = cma_add_one, 89 .remove = cma_remove_one 90}; 91 92static struct ib_sa_client sa_client; 93static struct rdma_addr_client addr_client; 94static LIST_HEAD(dev_list); 95static LIST_HEAD(listen_any_list); 96static DEFINE_MUTEX(lock); 97static struct workqueue_struct *cma_wq; 98static DEFINE_IDR(sdp_ps); 99static DEFINE_IDR(tcp_ps); 100static DEFINE_IDR(udp_ps); 101static DEFINE_IDR(ipoib_ps); 102#if defined(INET) 103static int next_port; 104#endif 105 106struct cma_device { 107 struct list_head list; 108 struct ib_device *device; 109 struct completion comp; 110 atomic_t refcount; 111 struct list_head id_list; 112}; 113 114enum cma_state { 115 CMA_IDLE, 116 CMA_ADDR_QUERY, 117 CMA_ADDR_RESOLVED, 118 CMA_ROUTE_QUERY, 119 CMA_ROUTE_RESOLVED, 120 CMA_CONNECT, 121 CMA_DISCONNECT, 122 CMA_ADDR_BOUND, 123 CMA_LISTEN, 124 CMA_DEVICE_REMOVAL, 125 CMA_DESTROYING 126}; 127 128struct rdma_bind_list { 129 struct idr *ps; 130 struct hlist_head owners; 131 unsigned short port; 132}; 133 134/* 135 * Device removal can occur at anytime, so we need extra handling to 136 * serialize notifying the user of device removal with other callbacks. 137 * We do this by disabling removal notification while a callback is in process, 138 * and reporting it after the callback completes. 139 */ 140struct rdma_id_private { 141 struct rdma_cm_id id; 142 143 struct rdma_bind_list *bind_list; 144 struct socket *sock; 145 struct hlist_node node; 146 struct list_head list; /* listen_any_list or cma_device.list */ 147 struct list_head listen_list; /* per device listens */ 148 struct cma_device *cma_dev; 149 struct list_head mc_list; 150 151 int internal_id; 152 enum cma_state state; 153 spinlock_t lock; 154 struct mutex qp_mutex; 155 156 struct completion comp; 157 atomic_t refcount; 158 struct mutex handler_mutex; 159 160 int backlog; 161 int timeout_ms; 162 struct ib_sa_query *query; 163 int query_id; 164 union { 165 struct ib_cm_id *ib; 166 struct iw_cm_id *iw; 167 } cm_id; 168 169 u32 seq_num; 170 u32 qkey; 171 u32 qp_num; 172 u8 srq; 173 u8 tos; 174 int unify_ps_tcp; 175}; 176 177struct cma_multicast { 178 struct rdma_id_private *id_priv; 179 union { 180 struct ib_sa_multicast *ib; 181 } multicast; 182 struct list_head list; 183 void *context; 184 struct sockaddr_storage addr; 185 struct kref mcref; 186}; 187 188struct cma_work { 189 struct work_struct work; 190 struct rdma_id_private *id; 191 enum cma_state old_state; 192 enum cma_state new_state; 193 struct rdma_cm_event event; 194}; 195 196struct cma_ndev_work { 197 struct work_struct work; 198 struct rdma_id_private *id; 199 struct rdma_cm_event event; 200}; 201 202struct iboe_mcast_work { 203 struct work_struct work; 204 struct rdma_id_private *id; 205 struct cma_multicast *mc; 206}; 207 208union cma_ip_addr { 209 struct in6_addr ip6; 210 struct { 211 __be32 pad[3]; 212 __be32 addr; 213 } ip4; 214}; 215 216struct cma_hdr { 217 u8 cma_version; 218 u8 ip_version; /* IP version: 7:4 */ 219 __be16 port; 220 union cma_ip_addr src_addr; 221 union cma_ip_addr dst_addr; 222}; 223 224struct sdp_hh { 225 u8 bsdh[16]; 226 u8 sdp_version; /* Major version: 7:4 */ 227 u8 ip_version; /* IP version: 7:4 */ 228 u8 sdp_specific1[10]; 229 __be16 port; 230 __be16 sdp_specific2; 231 union cma_ip_addr src_addr; 232 union cma_ip_addr dst_addr; 233}; 234 235struct sdp_hah { 236 u8 bsdh[16]; 237 u8 sdp_version; 238}; 239 240#define CMA_VERSION 0x00 241#define SDP_MAJ_VERSION 0x2 242 243static int cma_comp(struct rdma_id_private *id_priv, enum cma_state comp) 244{ 245 unsigned long flags; 246 int ret; 247 248 spin_lock_irqsave(&id_priv->lock, flags); 249 ret = (id_priv->state == comp); 250 spin_unlock_irqrestore(&id_priv->lock, flags); 251 return ret; 252} 253 254static int cma_comp_exch(struct rdma_id_private *id_priv, 255 enum cma_state comp, enum cma_state exch) 256{ 257 unsigned long flags; 258 int ret; 259 260 spin_lock_irqsave(&id_priv->lock, flags); 261 if ((ret = (id_priv->state == comp))) 262 id_priv->state = exch; 263 spin_unlock_irqrestore(&id_priv->lock, flags); 264 return ret; 265} 266 267static enum cma_state cma_exch(struct rdma_id_private *id_priv, 268 enum cma_state exch) 269{ 270 unsigned long flags; 271 enum cma_state old; 272 273 spin_lock_irqsave(&id_priv->lock, flags); 274 old = id_priv->state; 275 id_priv->state = exch; 276 spin_unlock_irqrestore(&id_priv->lock, flags); 277 return old; 278} 279 280static inline u8 cma_get_ip_ver(struct cma_hdr *hdr) 281{ 282 return hdr->ip_version >> 4; 283} 284 285static inline void cma_set_ip_ver(struct cma_hdr *hdr, u8 ip_ver) 286{ 287 hdr->ip_version = (ip_ver << 4) | (hdr->ip_version & 0xF); 288} 289 290static inline u8 sdp_get_majv(u8 sdp_version) 291{ 292 return sdp_version >> 4; 293} 294 295static inline u8 sdp_get_ip_ver(struct sdp_hh *hh) 296{ 297 return hh->ip_version >> 4; 298} 299 300static inline void sdp_set_ip_ver(struct sdp_hh *hh, u8 ip_ver) 301{ 302 hh->ip_version = (ip_ver << 4) | (hh->ip_version & 0xF); 303} 304 305static inline int cma_is_ud_ps(enum rdma_port_space ps) 306{ 307 return (ps == RDMA_PS_UDP || ps == RDMA_PS_IPOIB); 308} 309 310static void cma_attach_to_dev(struct rdma_id_private *id_priv, 311 struct cma_device *cma_dev) 312{ 313 atomic_inc(&cma_dev->refcount); 314 id_priv->cma_dev = cma_dev; 315 id_priv->id.device = cma_dev->device; 316 id_priv->id.route.addr.dev_addr.transport = 317 rdma_node_get_transport(cma_dev->device->node_type); 318 list_add_tail(&id_priv->list, &cma_dev->id_list); 319} 320 321static inline void cma_deref_dev(struct cma_device *cma_dev) 322{ 323 if (atomic_dec_and_test(&cma_dev->refcount)) 324 complete(&cma_dev->comp); 325} 326 327static inline void release_mc(struct kref *kref) 328{ 329 struct cma_multicast *mc = container_of(kref, struct cma_multicast, mcref); 330 331 kfree(mc->multicast.ib); 332 kfree(mc); 333} 334 335static void cma_detach_from_dev(struct rdma_id_private *id_priv) 336{ 337 list_del(&id_priv->list); 338 cma_deref_dev(id_priv->cma_dev); 339 id_priv->cma_dev = NULL; 340} 341 342static int cma_set_qkey(struct rdma_id_private *id_priv) 343{ 344 struct ib_sa_mcmember_rec rec; 345 int ret = 0; 346 347 if (id_priv->qkey) 348 return 0; 349 350 switch (id_priv->id.ps) { 351 case RDMA_PS_UDP: 352 id_priv->qkey = RDMA_UDP_QKEY; 353 break; 354 case RDMA_PS_IPOIB: 355 ib_addr_get_mgid(&id_priv->id.route.addr.dev_addr, &rec.mgid); 356 ret = ib_sa_get_mcmember_rec(id_priv->id.device, 357 id_priv->id.port_num, &rec.mgid, 358 &rec); 359 if (!ret) 360 id_priv->qkey = be32_to_cpu(rec.qkey); 361 break; 362 default: 363 break; 364 } 365 return ret; 366} 367 368static int find_gid_port(struct ib_device *device, union ib_gid *gid, u8 port_num) 369{ 370 int i; 371 int err; 372 struct ib_port_attr props; 373 union ib_gid tmp; 374 375 err = ib_query_port(device, port_num, &props); 376 if (err) 377 return 1; 378 379 for (i = 0; i < props.gid_tbl_len; ++i) { 380 err = ib_query_gid(device, port_num, i, &tmp); 381 if (err) 382 return 1; 383 if (!memcmp(&tmp, gid, sizeof tmp)) 384 return 0; 385 } 386 387 return -EAGAIN; 388} 389 390int 391rdma_find_cmid_laddr(struct sockaddr_in *local_addr, unsigned short dev_type, 392 void **cm_id) 393{ 394 int ret; 395 u8 port; 396 int found_dev = 0, found_cmid = 0; 397 struct rdma_id_private *id_priv; 398 struct rdma_id_private *dev_id_priv; 399 struct cma_device *cma_dev; 400 struct rdma_dev_addr dev_addr; 401 union ib_gid gid; 402 enum rdma_link_layer dev_ll = dev_type == ARPHRD_INFINIBAND ? 403 IB_LINK_LAYER_INFINIBAND : IB_LINK_LAYER_ETHERNET; 404 405 memset(&dev_addr, 0, sizeof(dev_addr)); 406 407 ret = rdma_translate_ip((struct sockaddr *)local_addr, 408 &dev_addr); 409 if (ret) 410 goto err; 411 412 /* find rdma device based on MAC address/gid */ 413 mutex_lock(&lock); 414 415 memcpy(&gid, dev_addr.src_dev_addr + 416 rdma_addr_gid_offset(&dev_addr), sizeof(gid)); 417 418 list_for_each_entry(cma_dev, &dev_list, list) 419 for (port = 1; port <= cma_dev->device->phys_port_cnt; ++port) 420 if ((rdma_port_get_link_layer(cma_dev->device, port) == 421 dev_ll) && 422 (rdma_node_get_transport(cma_dev->device->node_type) == 423 RDMA_TRANSPORT_IWARP)) { 424 ret = find_gid_port(cma_dev->device, 425 &gid, port); 426 if (!ret) { 427 found_dev = 1; 428 goto out; 429 } else if (ret == 1) { 430 mutex_unlock(&lock); 431 goto err; 432 } 433 } 434out: 435 mutex_unlock(&lock); 436 437 if (!found_dev) 438 goto err; 439 440 /* Traverse through the list of listening cm_id's to find the 441 * desired cm_id based on rdma device & port number. 442 */ 443 list_for_each_entry(id_priv, &listen_any_list, list) 444 list_for_each_entry(dev_id_priv, &id_priv->listen_list, 445 listen_list) 446 if (dev_id_priv->cma_dev == cma_dev) 447 if (dev_id_priv->cm_id.iw->local_addr.sin_port 448 == local_addr->sin_port) { 449 *cm_id = (void *)dev_id_priv->cm_id.iw; 450 found_cmid = 1; 451 } 452 return found_cmid ? 0 : -ENODEV; 453 454err: 455 return -ENODEV; 456} 457EXPORT_SYMBOL(rdma_find_cmid_laddr); 458 459static int cma_acquire_dev(struct rdma_id_private *id_priv) 460{ 461 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; 462 struct cma_device *cma_dev; 463 union ib_gid gid; 464 int ret = -ENODEV; 465 466 if (dev_addr->dev_type != ARPHRD_INFINIBAND) { 467 iboe_addr_get_sgid(dev_addr, &gid); 468 list_for_each_entry(cma_dev, &dev_list, list) { 469 ret = ib_find_cached_gid(cma_dev->device, &gid, 470 &id_priv->id.port_num, NULL); 471 if (!ret) 472 goto out; 473 } 474 } 475 476 memcpy(&gid, dev_addr->src_dev_addr + 477 rdma_addr_gid_offset(dev_addr), sizeof gid); 478 list_for_each_entry(cma_dev, &dev_list, list) { 479 ret = ib_find_cached_gid(cma_dev->device, &gid, 480 &id_priv->id.port_num, NULL); 481 if (!ret) 482 break; 483 } 484 485out: 486 if (!ret) 487 cma_attach_to_dev(id_priv, cma_dev); 488 489 return ret; 490} 491 492static void cma_deref_id(struct rdma_id_private *id_priv) 493{ 494 if (atomic_dec_and_test(&id_priv->refcount)) 495 complete(&id_priv->comp); 496} 497 498static int cma_disable_callback(struct rdma_id_private *id_priv, 499 enum cma_state state) 500{ 501 mutex_lock(&id_priv->handler_mutex); 502 if (id_priv->state != state) { 503 mutex_unlock(&id_priv->handler_mutex); 504 return -EINVAL; 505 } 506 return 0; 507} 508 509static int cma_has_cm_dev(struct rdma_id_private *id_priv) 510{ 511 return (id_priv->id.device && id_priv->cm_id.ib); 512} 513 514struct rdma_cm_id *rdma_create_id(rdma_cm_event_handler event_handler, 515 void *context, enum rdma_port_space ps) 516{ 517 struct rdma_id_private *id_priv; 518 519 id_priv = kzalloc(sizeof *id_priv, GFP_KERNEL); 520 if (!id_priv) 521 return ERR_PTR(-ENOMEM); 522 523 id_priv->state = CMA_IDLE; 524 id_priv->id.context = context; 525 id_priv->id.event_handler = event_handler; 526 id_priv->id.ps = ps; 527 spin_lock_init(&id_priv->lock); 528 mutex_init(&id_priv->qp_mutex); 529 init_completion(&id_priv->comp); 530 atomic_set(&id_priv->refcount, 1); 531 mutex_init(&id_priv->handler_mutex); 532 INIT_LIST_HEAD(&id_priv->listen_list); 533 INIT_LIST_HEAD(&id_priv->mc_list); 534 get_random_bytes(&id_priv->seq_num, sizeof id_priv->seq_num); 535 536 return &id_priv->id; 537} 538EXPORT_SYMBOL(rdma_create_id); 539 540static int cma_init_ud_qp(struct rdma_id_private *id_priv, struct ib_qp *qp) 541{ 542 struct ib_qp_attr qp_attr; 543 int qp_attr_mask, ret; 544 545 qp_attr.qp_state = IB_QPS_INIT; 546 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask); 547 if (ret) 548 return ret; 549 550 ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask); 551 if (ret) 552 return ret; 553 554 qp_attr.qp_state = IB_QPS_RTR; 555 ret = ib_modify_qp(qp, &qp_attr, IB_QP_STATE); 556 if (ret) 557 return ret; 558 559 qp_attr.qp_state = IB_QPS_RTS; 560 qp_attr.sq_psn = 0; 561 ret = ib_modify_qp(qp, &qp_attr, IB_QP_STATE | IB_QP_SQ_PSN); 562 563 return ret; 564} 565 566static int cma_init_conn_qp(struct rdma_id_private *id_priv, struct ib_qp *qp) 567{ 568 struct ib_qp_attr qp_attr; 569 int qp_attr_mask, ret; 570 571 qp_attr.qp_state = IB_QPS_INIT; 572 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask); 573 if (ret) 574 return ret; 575 576 return ib_modify_qp(qp, &qp_attr, qp_attr_mask); 577} 578 579int rdma_create_qp(struct rdma_cm_id *id, struct ib_pd *pd, 580 struct ib_qp_init_attr *qp_init_attr) 581{ 582 struct rdma_id_private *id_priv; 583 struct ib_qp *qp; 584 int ret; 585 586 id_priv = container_of(id, struct rdma_id_private, id); 587 if (id->device != pd->device) 588 return -EINVAL; 589 590 qp = ib_create_qp(pd, qp_init_attr); 591 if (IS_ERR(qp)) 592 return PTR_ERR(qp); 593 594 if (cma_is_ud_ps(id_priv->id.ps)) 595 ret = cma_init_ud_qp(id_priv, qp); 596 else 597 ret = cma_init_conn_qp(id_priv, qp); 598 if (ret) 599 goto err; 600 601 id->qp = qp; 602 id_priv->qp_num = qp->qp_num; 603 id_priv->srq = (qp->srq != NULL); 604 return 0; 605err: 606 ib_destroy_qp(qp); 607 return ret; 608} 609EXPORT_SYMBOL(rdma_create_qp); 610 611void rdma_destroy_qp(struct rdma_cm_id *id) 612{ 613 struct rdma_id_private *id_priv; 614 615 id_priv = container_of(id, struct rdma_id_private, id); 616 mutex_lock(&id_priv->qp_mutex); 617 ib_destroy_qp(id_priv->id.qp); 618 id_priv->id.qp = NULL; 619 mutex_unlock(&id_priv->qp_mutex); 620} 621EXPORT_SYMBOL(rdma_destroy_qp); 622 623static int cma_modify_qp_rtr(struct rdma_id_private *id_priv, 624 struct rdma_conn_param *conn_param) 625{ 626 struct ib_qp_attr qp_attr; 627 int qp_attr_mask, ret; 628 629 mutex_lock(&id_priv->qp_mutex); 630 if (!id_priv->id.qp) { 631 ret = 0; 632 goto out; 633 } 634 635 /* Need to update QP attributes from default values. */ 636 qp_attr.qp_state = IB_QPS_INIT; 637 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask); 638 if (ret) 639 goto out; 640 641 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask); 642 if (ret) 643 goto out; 644 645 qp_attr.qp_state = IB_QPS_RTR; 646 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask); 647 if (ret) 648 goto out; 649 650 if (conn_param) 651 qp_attr.max_dest_rd_atomic = conn_param->responder_resources; 652 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask); 653out: 654 mutex_unlock(&id_priv->qp_mutex); 655 return ret; 656} 657 658static int cma_modify_qp_rts(struct rdma_id_private *id_priv, 659 struct rdma_conn_param *conn_param) 660{ 661 struct ib_qp_attr qp_attr; 662 int qp_attr_mask, ret; 663 664 mutex_lock(&id_priv->qp_mutex); 665 if (!id_priv->id.qp) { 666 ret = 0; 667 goto out; 668 } 669 670 qp_attr.qp_state = IB_QPS_RTS; 671 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask); 672 if (ret) 673 goto out; 674 675 if (conn_param) 676 qp_attr.max_rd_atomic = conn_param->initiator_depth; 677 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask); 678out: 679 mutex_unlock(&id_priv->qp_mutex); 680 return ret; 681} 682 683static int cma_modify_qp_err(struct rdma_id_private *id_priv) 684{ 685 struct ib_qp_attr qp_attr; 686 int ret; 687 688 mutex_lock(&id_priv->qp_mutex); 689 if (!id_priv->id.qp) { 690 ret = 0; 691 goto out; 692 } 693 694 qp_attr.qp_state = IB_QPS_ERR; 695 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, IB_QP_STATE); 696out: 697 mutex_unlock(&id_priv->qp_mutex); 698 return ret; 699} 700 701static int cma_ib_init_qp_attr(struct rdma_id_private *id_priv, 702 struct ib_qp_attr *qp_attr, int *qp_attr_mask) 703{ 704 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; 705 int ret; 706 u16 pkey; 707 708 if (rdma_port_get_link_layer(id_priv->id.device, id_priv->id.port_num) == 709 IB_LINK_LAYER_INFINIBAND) 710 pkey = ib_addr_get_pkey(dev_addr); 711 else 712 pkey = 0xffff; 713 714 ret = ib_find_cached_pkey(id_priv->id.device, id_priv->id.port_num, 715 pkey, &qp_attr->pkey_index); 716 if (ret) 717 return ret; 718 719 qp_attr->port_num = id_priv->id.port_num; 720 *qp_attr_mask = IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT; 721 722 if (cma_is_ud_ps(id_priv->id.ps)) { 723 ret = cma_set_qkey(id_priv); 724 if (ret) 725 return ret; 726 727 qp_attr->qkey = id_priv->qkey; 728 *qp_attr_mask |= IB_QP_QKEY; 729 } else { 730 qp_attr->qp_access_flags = 0; 731 *qp_attr_mask |= IB_QP_ACCESS_FLAGS; 732 } 733 return 0; 734} 735 736int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr, 737 int *qp_attr_mask) 738{ 739 struct rdma_id_private *id_priv; 740 int ret = 0; 741 742 id_priv = container_of(id, struct rdma_id_private, id); 743 if (rdma_cap_ib_cm(id->device, id->port_num)) { 744 if (!id_priv->cm_id.ib || cma_is_ud_ps(id_priv->id.ps)) 745 ret = cma_ib_init_qp_attr(id_priv, qp_attr, qp_attr_mask); 746 else 747 ret = ib_cm_init_qp_attr(id_priv->cm_id.ib, qp_attr, 748 qp_attr_mask); 749 750 if (qp_attr->qp_state == IB_QPS_RTR) 751 qp_attr->rq_psn = id_priv->seq_num; 752 } else if (rdma_cap_iw_cm(id->device, id->port_num)) { 753 if (!id_priv->cm_id.iw) { 754 qp_attr->qp_access_flags = 0; 755 *qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS; 756 } else 757 ret = iw_cm_init_qp_attr(id_priv->cm_id.iw, qp_attr, 758 qp_attr_mask); 759 } else 760 ret = -ENOSYS; 761 762 return ret; 763} 764EXPORT_SYMBOL(rdma_init_qp_attr); 765 766static inline int cma_zero_addr(struct sockaddr *addr) 767{ 768 struct in6_addr *ip6; 769 770 if (addr->sa_family == AF_INET) 771 return ipv4_is_zeronet( 772 ((struct sockaddr_in *)addr)->sin_addr.s_addr); 773 else { 774 ip6 = &((struct sockaddr_in6 *) addr)->sin6_addr; 775 return (ip6->s6_addr32[0] | ip6->s6_addr32[1] | 776 ip6->s6_addr32[2] | ip6->s6_addr32[3]) == 0; 777 } 778} 779 780static inline int cma_loopback_addr(struct sockaddr *addr) 781{ 782 if (addr->sa_family == AF_INET) 783 return ipv4_is_loopback( 784 ((struct sockaddr_in *) addr)->sin_addr.s_addr); 785 else 786 return ipv6_addr_loopback( 787 &((struct sockaddr_in6 *) addr)->sin6_addr); 788} 789 790static inline int cma_any_addr(struct sockaddr *addr) 791{ 792 return cma_zero_addr(addr) || cma_loopback_addr(addr); 793} 794int 795rdma_cma_any_addr(struct sockaddr *addr) 796{ 797 return cma_any_addr(addr); 798} 799EXPORT_SYMBOL(rdma_cma_any_addr); 800 801static inline __be16 cma_port(struct sockaddr *addr) 802{ 803 if (addr->sa_family == AF_INET) 804 return ((struct sockaddr_in *) addr)->sin_port; 805 else 806 return ((struct sockaddr_in6 *) addr)->sin6_port; 807} 808 809static inline int cma_any_port(struct sockaddr *addr) 810{ 811 return !cma_port(addr); 812} 813 814static int cma_get_net_info(void *hdr, enum rdma_port_space ps, 815 u8 *ip_ver, __be16 *port, 816 union cma_ip_addr **src, union cma_ip_addr **dst) 817{ 818 switch (ps) { 819 case RDMA_PS_SDP: 820 if (sdp_get_majv(((struct sdp_hh *) hdr)->sdp_version) != 821 SDP_MAJ_VERSION) 822 return -EINVAL; 823 824 *ip_ver = sdp_get_ip_ver(hdr); 825 *port = ((struct sdp_hh *) hdr)->port; 826 *src = &((struct sdp_hh *) hdr)->src_addr; 827 *dst = &((struct sdp_hh *) hdr)->dst_addr; 828 break; 829 default: 830 if (((struct cma_hdr *) hdr)->cma_version != CMA_VERSION) 831 return -EINVAL; 832 833 *ip_ver = cma_get_ip_ver(hdr); 834 *port = ((struct cma_hdr *) hdr)->port; 835 *src = &((struct cma_hdr *) hdr)->src_addr; 836 *dst = &((struct cma_hdr *) hdr)->dst_addr; 837 break; 838 } 839 840 if (*ip_ver != 4 && *ip_ver != 6) 841 return -EINVAL; 842 return 0; 843} 844 845static void cma_save_net_info(struct rdma_addr *addr, 846 struct rdma_addr *listen_addr, 847 u8 ip_ver, __be16 port, 848 union cma_ip_addr *src, union cma_ip_addr *dst) 849{ 850 struct sockaddr_in *listen4, *ip4; 851 struct sockaddr_in6 *listen6, *ip6; 852 853 switch (ip_ver) { 854 case 4: 855 listen4 = (struct sockaddr_in *) &listen_addr->src_addr; 856 ip4 = (struct sockaddr_in *) &addr->src_addr; 857 ip4->sin_family = listen4->sin_family; 858 ip4->sin_addr.s_addr = dst->ip4.addr; 859 ip4->sin_port = listen4->sin_port; 860 861 ip4 = (struct sockaddr_in *) &addr->dst_addr; 862 ip4->sin_family = listen4->sin_family; 863 ip4->sin_addr.s_addr = src->ip4.addr; 864 ip4->sin_port = port; 865 break; 866 case 6: 867 listen6 = (struct sockaddr_in6 *) &listen_addr->src_addr; 868 ip6 = (struct sockaddr_in6 *) &addr->src_addr; 869 ip6->sin6_family = listen6->sin6_family; 870 ip6->sin6_addr = dst->ip6; 871 ip6->sin6_port = listen6->sin6_port; 872 873 ip6 = (struct sockaddr_in6 *) &addr->dst_addr; 874 ip6->sin6_family = listen6->sin6_family; 875 ip6->sin6_addr = src->ip6; 876 ip6->sin6_port = port; 877 break; 878 default: 879 break; 880 } 881} 882 883static inline int cma_user_data_offset(enum rdma_port_space ps) 884{ 885 switch (ps) { 886 case RDMA_PS_SDP: 887 return 0; 888 default: 889 return sizeof(struct cma_hdr); 890 } 891} 892 893static void cma_cancel_route(struct rdma_id_private *id_priv) 894{ 895 switch (rdma_port_get_link_layer(id_priv->id.device, id_priv->id.port_num)) { 896 case IB_LINK_LAYER_INFINIBAND: 897 if (id_priv->query) 898 ib_sa_cancel_query(id_priv->query_id, id_priv->query); 899 break; 900 default: 901 break; 902 } 903} 904 905static void cma_cancel_listens(struct rdma_id_private *id_priv) 906{ 907 struct rdma_id_private *dev_id_priv; 908 909 /* 910 * Remove from listen_any_list to prevent added devices from spawning 911 * additional listen requests. 912 */ 913 mutex_lock(&lock); 914 list_del(&id_priv->list); 915 916 while (!list_empty(&id_priv->listen_list)) { 917 dev_id_priv = list_entry(id_priv->listen_list.next, 918 struct rdma_id_private, listen_list); 919 /* sync with device removal to avoid duplicate destruction */ 920 list_del_init(&dev_id_priv->list); 921 list_del(&dev_id_priv->listen_list); 922 mutex_unlock(&lock); 923 924 rdma_destroy_id(&dev_id_priv->id); 925 mutex_lock(&lock); 926 } 927 mutex_unlock(&lock); 928} 929 930static void cma_cancel_operation(struct rdma_id_private *id_priv, 931 enum cma_state state) 932{ 933 switch (state) { 934 case CMA_ADDR_QUERY: 935 rdma_addr_cancel(&id_priv->id.route.addr.dev_addr); 936 break; 937 case CMA_ROUTE_QUERY: 938 cma_cancel_route(id_priv); 939 break; 940 case CMA_LISTEN: 941 if (cma_any_addr((struct sockaddr *) &id_priv->id.route.addr.src_addr) 942 && !id_priv->cma_dev) 943 cma_cancel_listens(id_priv); 944 break; 945 default: 946 break; 947 } 948} 949 950static void cma_release_port(struct rdma_id_private *id_priv) 951{ 952 struct rdma_bind_list *bind_list = id_priv->bind_list; 953 954 if (!bind_list) 955 return; 956 957 mutex_lock(&lock); 958 hlist_del(&id_priv->node); 959 if (hlist_empty(&bind_list->owners)) { 960 idr_remove(bind_list->ps, bind_list->port); 961 kfree(bind_list); 962 } 963 mutex_unlock(&lock); 964} 965 966static void cma_leave_mc_groups(struct rdma_id_private *id_priv) 967{ 968 struct cma_multicast *mc; 969 970 while (!list_empty(&id_priv->mc_list)) { 971 mc = container_of(id_priv->mc_list.next, 972 struct cma_multicast, list); 973 list_del(&mc->list); 974 switch (rdma_port_get_link_layer(id_priv->cma_dev->device, id_priv->id.port_num)) { 975 case IB_LINK_LAYER_INFINIBAND: 976 ib_sa_free_multicast(mc->multicast.ib); 977 kfree(mc); 978 break; 979 case IB_LINK_LAYER_ETHERNET: 980 kref_put(&mc->mcref, release_mc); 981 break; 982 default: 983 break; 984 } 985 } 986} 987 988void rdma_destroy_id(struct rdma_cm_id *id) 989{ 990 struct rdma_id_private *id_priv; 991 enum cma_state state; 992 993 id_priv = container_of(id, struct rdma_id_private, id); 994 state = cma_exch(id_priv, CMA_DESTROYING); 995 cma_cancel_operation(id_priv, state); 996 997 mutex_lock(&lock); 998 if (id_priv->cma_dev) { 999 mutex_unlock(&lock); 1000 if (rdma_cap_ib_cm(id_priv->id.device, 1)) { 1001 if (id_priv->cm_id.ib && !IS_ERR(id_priv->cm_id.ib)) 1002 ib_destroy_cm_id(id_priv->cm_id.ib); 1003 } else if (rdma_cap_iw_cm(id_priv->id.device, 1)) { 1004 if (id_priv->cm_id.iw && !IS_ERR(id_priv->cm_id.iw)) 1005 iw_destroy_cm_id(id_priv->cm_id.iw); 1006 } 1007 cma_leave_mc_groups(id_priv); 1008 mutex_lock(&lock); 1009 cma_detach_from_dev(id_priv); 1010 } 1011 mutex_unlock(&lock); 1012 1013 cma_release_port(id_priv); 1014 cma_deref_id(id_priv); 1015 wait_for_completion(&id_priv->comp); 1016 1017 if (id_priv->internal_id) 1018 cma_deref_id(id_priv->id.context); 1019 1020 if (id_priv->sock != NULL && !id_priv->internal_id && 1021 !id_priv->unify_ps_tcp) 1022 sock_release(id_priv->sock); 1023 1024 kfree(id_priv->id.route.path_rec); 1025 kfree(id_priv); 1026} 1027EXPORT_SYMBOL(rdma_destroy_id); 1028 1029static int cma_rep_recv(struct rdma_id_private *id_priv) 1030{ 1031 int ret; 1032 1033 ret = cma_modify_qp_rtr(id_priv, NULL); 1034 if (ret) 1035 goto reject; 1036 1037 ret = cma_modify_qp_rts(id_priv, NULL); 1038 if (ret) 1039 goto reject; 1040 1041 ret = ib_send_cm_rtu(id_priv->cm_id.ib, NULL, 0); 1042 if (ret) 1043 goto reject; 1044 1045 return 0; 1046reject: 1047 cma_modify_qp_err(id_priv); 1048 ib_send_cm_rej(id_priv->cm_id.ib, IB_CM_REJ_CONSUMER_DEFINED, 1049 NULL, 0, NULL, 0); 1050 return ret; 1051} 1052 1053static int cma_verify_rep(struct rdma_id_private *id_priv, void *data) 1054{ 1055 if (id_priv->id.ps == RDMA_PS_SDP && 1056 sdp_get_majv(((struct sdp_hah *) data)->sdp_version) != 1057 SDP_MAJ_VERSION) 1058 return -EINVAL; 1059 1060 return 0; 1061} 1062 1063static void cma_set_rep_event_data(struct rdma_cm_event *event, 1064 struct ib_cm_rep_event_param *rep_data, 1065 void *private_data) 1066{ 1067 event->param.conn.private_data = private_data; 1068 event->param.conn.private_data_len = IB_CM_REP_PRIVATE_DATA_SIZE; 1069 event->param.conn.responder_resources = rep_data->responder_resources; 1070 event->param.conn.initiator_depth = rep_data->initiator_depth; 1071 event->param.conn.flow_control = rep_data->flow_control; 1072 event->param.conn.rnr_retry_count = rep_data->rnr_retry_count; 1073 event->param.conn.srq = rep_data->srq; 1074 event->param.conn.qp_num = rep_data->remote_qpn; 1075} 1076 1077static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) 1078{ 1079 struct rdma_id_private *id_priv = cm_id->context; 1080 struct rdma_cm_event event; 1081 int ret = 0; 1082 1083 if ((ib_event->event != IB_CM_TIMEWAIT_EXIT && 1084 cma_disable_callback(id_priv, CMA_CONNECT)) || 1085 (ib_event->event == IB_CM_TIMEWAIT_EXIT && 1086 cma_disable_callback(id_priv, CMA_DISCONNECT))) 1087 return 0; 1088 1089 memset(&event, 0, sizeof event); 1090 switch (ib_event->event) { 1091 case IB_CM_REQ_ERROR: 1092 case IB_CM_REP_ERROR: 1093 event.event = RDMA_CM_EVENT_UNREACHABLE; 1094 event.status = -ETIMEDOUT; 1095 break; 1096 case IB_CM_REP_RECEIVED: 1097 event.status = cma_verify_rep(id_priv, ib_event->private_data); 1098 if (event.status) 1099 event.event = RDMA_CM_EVENT_CONNECT_ERROR; 1100 else if (id_priv->id.qp && id_priv->id.ps != RDMA_PS_SDP) { 1101 event.status = cma_rep_recv(id_priv); 1102 event.event = event.status ? RDMA_CM_EVENT_CONNECT_ERROR : 1103 RDMA_CM_EVENT_ESTABLISHED; 1104 } else 1105 event.event = RDMA_CM_EVENT_CONNECT_RESPONSE; 1106 cma_set_rep_event_data(&event, &ib_event->param.rep_rcvd, 1107 ib_event->private_data); 1108 break; 1109 case IB_CM_RTU_RECEIVED: 1110 case IB_CM_USER_ESTABLISHED: 1111 event.event = RDMA_CM_EVENT_ESTABLISHED; 1112 break; 1113 case IB_CM_DREQ_ERROR: 1114 event.status = -ETIMEDOUT; /* fall through */ 1115 case IB_CM_DREQ_RECEIVED: 1116 case IB_CM_DREP_RECEIVED: 1117 if (!cma_comp_exch(id_priv, CMA_CONNECT, CMA_DISCONNECT)) 1118 goto out; 1119 event.event = RDMA_CM_EVENT_DISCONNECTED; 1120 break; 1121 case IB_CM_TIMEWAIT_EXIT: 1122 event.event = RDMA_CM_EVENT_TIMEWAIT_EXIT; 1123 break; 1124 case IB_CM_MRA_RECEIVED: 1125 /* ignore event */ 1126 goto out; 1127 case IB_CM_REJ_RECEIVED: 1128 cma_modify_qp_err(id_priv); 1129 event.status = ib_event->param.rej_rcvd.reason; 1130 event.event = RDMA_CM_EVENT_REJECTED; 1131 event.param.conn.private_data = ib_event->private_data; 1132 event.param.conn.private_data_len = IB_CM_REJ_PRIVATE_DATA_SIZE; 1133 break; 1134 default: 1135 printk(KERN_ERR "RDMA CMA: unexpected IB CM event: %d\n", 1136 ib_event->event); 1137 goto out; 1138 } 1139 1140 ret = id_priv->id.event_handler(&id_priv->id, &event); 1141 if (ret) { 1142 /* Destroy the CM ID by returning a non-zero value. */ 1143 id_priv->cm_id.ib = NULL; 1144 cma_exch(id_priv, CMA_DESTROYING); 1145 mutex_unlock(&id_priv->handler_mutex); 1146 rdma_destroy_id(&id_priv->id); 1147 return ret; 1148 } 1149out: 1150 mutex_unlock(&id_priv->handler_mutex); 1151 return ret; 1152} 1153 1154static struct rdma_id_private *cma_new_conn_id(struct rdma_cm_id *listen_id, 1155 struct ib_cm_event *ib_event) 1156{ 1157 struct rdma_id_private *id_priv; 1158 struct rdma_cm_id *id; 1159 struct rdma_route *rt; 1160 union cma_ip_addr *src, *dst; 1161 __be16 port; 1162 u8 ip_ver; 1163 int ret; 1164 1165 if (cma_get_net_info(ib_event->private_data, listen_id->ps, 1166 &ip_ver, &port, &src, &dst)) 1167 goto err; 1168 1169 id = rdma_create_id(listen_id->event_handler, listen_id->context, 1170 listen_id->ps); 1171 if (IS_ERR(id)) 1172 goto err; 1173 1174 cma_save_net_info(&id->route.addr, &listen_id->route.addr, 1175 ip_ver, port, src, dst); 1176 1177 rt = &id->route; 1178 rt->num_paths = ib_event->param.req_rcvd.alternate_path ? 2 : 1; 1179 rt->path_rec = kmalloc(sizeof *rt->path_rec * rt->num_paths, 1180 GFP_KERNEL); 1181 if (!rt->path_rec) 1182 goto destroy_id; 1183 1184 rt->path_rec[0] = *ib_event->param.req_rcvd.primary_path; 1185 if (rt->num_paths == 2) 1186 rt->path_rec[1] = *ib_event->param.req_rcvd.alternate_path; 1187 1188 if (cma_any_addr((struct sockaddr *) &rt->addr.src_addr)) { 1189 rt->addr.dev_addr.dev_type = ARPHRD_INFINIBAND; 1190 rdma_addr_set_sgid(&rt->addr.dev_addr, &rt->path_rec[0].sgid); 1191 ib_addr_set_pkey(&rt->addr.dev_addr, rt->path_rec[0].pkey); 1192 } else { 1193 ret = rdma_translate_ip((struct sockaddr *) &rt->addr.src_addr, 1194 &rt->addr.dev_addr); 1195 if (ret) 1196 goto destroy_id; 1197 } 1198 rdma_addr_set_dgid(&rt->addr.dev_addr, &rt->path_rec[0].dgid); 1199 1200 id_priv = container_of(id, struct rdma_id_private, id); 1201 id_priv->state = CMA_CONNECT; 1202 return id_priv; 1203 1204destroy_id: 1205 rdma_destroy_id(id); 1206err: 1207 return NULL; 1208} 1209 1210static struct rdma_id_private *cma_new_udp_id(struct rdma_cm_id *listen_id, 1211 struct ib_cm_event *ib_event) 1212{ 1213 struct rdma_id_private *id_priv; 1214 struct rdma_cm_id *id; 1215 union cma_ip_addr *src, *dst; 1216 __be16 port; 1217 u8 ip_ver; 1218 int ret; 1219 1220 id = rdma_create_id(listen_id->event_handler, listen_id->context, 1221 listen_id->ps); 1222 if (IS_ERR(id)) 1223 return NULL; 1224 1225 1226 if (cma_get_net_info(ib_event->private_data, listen_id->ps, 1227 &ip_ver, &port, &src, &dst)) 1228 goto err; 1229 1230 cma_save_net_info(&id->route.addr, &listen_id->route.addr, 1231 ip_ver, port, src, dst); 1232 1233 if (!cma_any_addr((struct sockaddr *) &id->route.addr.src_addr)) { 1234 ret = rdma_translate_ip((struct sockaddr *) &id->route.addr.src_addr, 1235 &id->route.addr.dev_addr); 1236 if (ret) 1237 goto err; 1238 } 1239 1240 id_priv = container_of(id, struct rdma_id_private, id); 1241 id_priv->state = CMA_CONNECT; 1242 return id_priv; 1243err: 1244 rdma_destroy_id(id); 1245 return NULL; 1246} 1247 1248static void cma_set_req_event_data(struct rdma_cm_event *event, 1249 struct ib_cm_req_event_param *req_data, 1250 void *private_data, int offset) 1251{ 1252 event->param.conn.private_data = private_data + offset; 1253 event->param.conn.private_data_len = IB_CM_REQ_PRIVATE_DATA_SIZE - offset; 1254 event->param.conn.responder_resources = req_data->responder_resources; 1255 event->param.conn.initiator_depth = req_data->initiator_depth; 1256 event->param.conn.flow_control = req_data->flow_control; 1257 event->param.conn.retry_count = req_data->retry_count; 1258 event->param.conn.rnr_retry_count = req_data->rnr_retry_count; 1259 event->param.conn.srq = req_data->srq; 1260 event->param.conn.qp_num = req_data->remote_qpn; 1261} 1262 1263static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) 1264{ 1265 struct rdma_id_private *listen_id, *conn_id; 1266 struct rdma_cm_event event; 1267 int offset, ret; 1268 1269 listen_id = cm_id->context; 1270 if (cma_disable_callback(listen_id, CMA_LISTEN)) 1271 return -ECONNABORTED; 1272 1273 memset(&event, 0, sizeof event); 1274 offset = cma_user_data_offset(listen_id->id.ps); 1275 event.event = RDMA_CM_EVENT_CONNECT_REQUEST; 1276 if (cma_is_ud_ps(listen_id->id.ps)) { 1277 conn_id = cma_new_udp_id(&listen_id->id, ib_event); 1278 event.param.ud.private_data = ib_event->private_data + offset; 1279 event.param.ud.private_data_len = 1280 IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE - offset; 1281 } else { 1282 conn_id = cma_new_conn_id(&listen_id->id, ib_event); 1283 cma_set_req_event_data(&event, &ib_event->param.req_rcvd, 1284 ib_event->private_data, offset); 1285 } 1286 if (!conn_id) { 1287 ret = -ENOMEM; 1288 goto out; 1289 } 1290 1291 mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING); 1292 mutex_lock(&lock); 1293 ret = cma_acquire_dev(conn_id); 1294 mutex_unlock(&lock); 1295 if (ret) 1296 goto release_conn_id; 1297 1298 conn_id->cm_id.ib = cm_id; 1299 cm_id->context = conn_id; 1300 cm_id->cm_handler = cma_ib_handler; 1301 1302 ret = conn_id->id.event_handler(&conn_id->id, &event); 1303 if (!ret) { 1304 /* 1305 * Acquire mutex to prevent user executing rdma_destroy_id() 1306 * while we're accessing the cm_id. 1307 */ 1308 mutex_lock(&lock); 1309 if (cma_comp(conn_id, CMA_CONNECT) && 1310 !cma_is_ud_ps(conn_id->id.ps)) 1311 ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0); 1312 mutex_unlock(&lock); 1313 mutex_unlock(&conn_id->handler_mutex); 1314 goto out; 1315 } 1316 1317 /* Destroy the CM ID by returning a non-zero value. */ 1318 conn_id->cm_id.ib = NULL; 1319 1320release_conn_id: 1321 cma_exch(conn_id, CMA_DESTROYING); 1322 mutex_unlock(&conn_id->handler_mutex); 1323 rdma_destroy_id(&conn_id->id); 1324 1325out: 1326 mutex_unlock(&listen_id->handler_mutex); 1327 return ret; 1328} 1329 1330static __be64 cma_get_service_id(enum rdma_port_space ps, struct sockaddr *addr) 1331{ 1332 return cpu_to_be64(((u64)ps << 16) + be16_to_cpu(cma_port(addr))); 1333} 1334 1335static void cma_set_compare_data(enum rdma_port_space ps, struct sockaddr *addr, 1336 struct ib_cm_compare_data *compare) 1337{ 1338 struct cma_hdr *cma_data, *cma_mask; 1339 struct sdp_hh *sdp_data, *sdp_mask; 1340 __be32 ip4_addr; 1341#ifdef INET6 1342 struct in6_addr ip6_addr; 1343#endif 1344 1345 memset(compare, 0, sizeof *compare); 1346 cma_data = (void *) compare->data; 1347 cma_mask = (void *) compare->mask; 1348 sdp_data = (void *) compare->data; 1349 sdp_mask = (void *) compare->mask; 1350 1351 switch (addr->sa_family) { 1352 case AF_INET: 1353 ip4_addr = ((struct sockaddr_in *) addr)->sin_addr.s_addr; 1354 if (ps == RDMA_PS_SDP) { 1355 sdp_set_ip_ver(sdp_data, 4); 1356 sdp_set_ip_ver(sdp_mask, 0xF); 1357 sdp_data->dst_addr.ip4.addr = ip4_addr; 1358 sdp_mask->dst_addr.ip4.addr = htonl(~0); 1359 } else { 1360 cma_set_ip_ver(cma_data, 4); 1361 cma_set_ip_ver(cma_mask, 0xF); 1362 cma_data->dst_addr.ip4.addr = ip4_addr; 1363 cma_mask->dst_addr.ip4.addr = htonl(~0); 1364 } 1365 break; 1366#ifdef INET6 1367 case AF_INET6: 1368 ip6_addr = ((struct sockaddr_in6 *) addr)->sin6_addr; 1369 if (ps == RDMA_PS_SDP) { 1370 sdp_set_ip_ver(sdp_data, 6); 1371 sdp_set_ip_ver(sdp_mask, 0xF); 1372 sdp_data->dst_addr.ip6 = ip6_addr; 1373 memset(&sdp_mask->dst_addr.ip6, 0xFF, 1374 sizeof sdp_mask->dst_addr.ip6); 1375 } else { 1376 cma_set_ip_ver(cma_data, 6); 1377 cma_set_ip_ver(cma_mask, 0xF); 1378 cma_data->dst_addr.ip6 = ip6_addr; 1379 memset(&cma_mask->dst_addr.ip6, 0xFF, 1380 sizeof cma_mask->dst_addr.ip6); 1381 } 1382 break; 1383#endif 1384 default: 1385 break; 1386 } 1387} 1388 1389static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event) 1390{ 1391 struct rdma_id_private *id_priv = iw_id->context; 1392 struct rdma_cm_event event; 1393 struct sockaddr_in *sin; 1394 int ret = 0; 1395 1396 if (cma_disable_callback(id_priv, CMA_CONNECT)) 1397 return 0; 1398 1399 memset(&event, 0, sizeof event); 1400 switch (iw_event->event) { 1401 case IW_CM_EVENT_CLOSE: 1402 event.event = RDMA_CM_EVENT_DISCONNECTED; 1403 break; 1404 case IW_CM_EVENT_CONNECT_REPLY: 1405 sin = (struct sockaddr_in *) &id_priv->id.route.addr.src_addr; 1406 *sin = iw_event->local_addr; 1407 sin = (struct sockaddr_in *) &id_priv->id.route.addr.dst_addr; 1408 *sin = iw_event->remote_addr; 1409 switch ((int)iw_event->status) { 1410 case 0: 1411 event.event = RDMA_CM_EVENT_ESTABLISHED; 1412 break; 1413 case -ECONNRESET: 1414 case -ECONNREFUSED: 1415 event.event = RDMA_CM_EVENT_REJECTED; 1416 break; 1417 case -ETIMEDOUT: 1418 event.event = RDMA_CM_EVENT_UNREACHABLE; 1419 break; 1420 default: 1421 event.event = RDMA_CM_EVENT_CONNECT_ERROR; 1422 break; 1423 } 1424 break; 1425 case IW_CM_EVENT_ESTABLISHED: 1426 event.event = RDMA_CM_EVENT_ESTABLISHED; 1427 break; 1428 default: 1429 BUG_ON(1); 1430 } 1431 1432 event.status = iw_event->status; 1433 event.param.conn.private_data = iw_event->private_data; 1434 event.param.conn.private_data_len = iw_event->private_data_len; 1435 ret = id_priv->id.event_handler(&id_priv->id, &event); 1436 if (ret) { 1437 /* Destroy the CM ID by returning a non-zero value. */ 1438 id_priv->cm_id.iw = NULL; 1439 cma_exch(id_priv, CMA_DESTROYING); 1440 mutex_unlock(&id_priv->handler_mutex); 1441 rdma_destroy_id(&id_priv->id); 1442 return ret; 1443 } 1444 1445 mutex_unlock(&id_priv->handler_mutex); 1446 return ret; 1447} 1448 1449static int iw_conn_req_handler(struct iw_cm_id *cm_id, 1450 struct iw_cm_event *iw_event) 1451{ 1452 struct rdma_cm_id *new_cm_id; 1453 struct rdma_id_private *listen_id, *conn_id; 1454 struct sockaddr_in *sin; 1455 struct net_device *dev = NULL; 1456 struct rdma_cm_event event; 1457 int ret; 1458 struct ib_device_attr attr; 1459 1460 listen_id = cm_id->context; 1461 if (cma_disable_callback(listen_id, CMA_LISTEN)) 1462 return -ECONNABORTED; 1463 1464 /* Create a new RDMA id for the new IW CM ID */ 1465 new_cm_id = rdma_create_id(listen_id->id.event_handler, 1466 listen_id->id.context, 1467 RDMA_PS_TCP); 1468 if (IS_ERR(new_cm_id)) { 1469 ret = -ENOMEM; 1470 goto out; 1471 } 1472 conn_id = container_of(new_cm_id, struct rdma_id_private, id); 1473 mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING); 1474 conn_id->state = CMA_CONNECT; 1475 1476 dev = ip_dev_find(NULL, iw_event->local_addr.sin_addr.s_addr); 1477 if (!dev) { 1478 ret = -EADDRNOTAVAIL; 1479 mutex_unlock(&conn_id->handler_mutex); 1480 rdma_destroy_id(new_cm_id); 1481 goto out; 1482 } 1483 ret = rdma_copy_addr(&conn_id->id.route.addr.dev_addr, dev, NULL); 1484 if (ret) { 1485 mutex_unlock(&conn_id->handler_mutex); 1486 rdma_destroy_id(new_cm_id); 1487 goto out; 1488 } 1489 1490 mutex_lock(&lock); 1491 ret = cma_acquire_dev(conn_id); 1492 mutex_unlock(&lock); 1493 if (ret) { 1494 mutex_unlock(&conn_id->handler_mutex); 1495 rdma_destroy_id(new_cm_id); 1496 goto out; 1497 } 1498 1499 conn_id->cm_id.iw = cm_id; 1500 cm_id->context = conn_id; 1501 cm_id->cm_handler = cma_iw_handler; 1502 1503 sin = (struct sockaddr_in *) &new_cm_id->route.addr.src_addr; 1504 *sin = iw_event->local_addr; 1505 sin = (struct sockaddr_in *) &new_cm_id->route.addr.dst_addr; 1506 *sin = iw_event->remote_addr; 1507 1508 ret = ib_query_device(conn_id->id.device, &attr); 1509 if (ret) { 1510 mutex_unlock(&conn_id->handler_mutex); 1511 rdma_destroy_id(new_cm_id); 1512 goto out; 1513 } 1514 1515 memset(&event, 0, sizeof event); 1516 event.event = RDMA_CM_EVENT_CONNECT_REQUEST; 1517 event.param.conn.private_data = iw_event->private_data; 1518 event.param.conn.private_data_len = iw_event->private_data_len; 1519 event.param.conn.initiator_depth = attr.max_qp_init_rd_atom; 1520 event.param.conn.responder_resources = attr.max_qp_rd_atom; 1521 ret = conn_id->id.event_handler(&conn_id->id, &event); 1522 if (ret) { 1523 /* User wants to destroy the CM ID */ 1524 conn_id->cm_id.iw = NULL; 1525 cma_exch(conn_id, CMA_DESTROYING); 1526 mutex_unlock(&conn_id->handler_mutex); 1527 rdma_destroy_id(&conn_id->id); 1528 goto out; 1529 } 1530 1531 mutex_unlock(&conn_id->handler_mutex); 1532 1533out: 1534 if (dev) 1535 dev_put(dev); 1536 mutex_unlock(&listen_id->handler_mutex); 1537 return ret; 1538} 1539 1540static int cma_ib_listen(struct rdma_id_private *id_priv) 1541{ 1542 struct ib_cm_compare_data compare_data; 1543 struct sockaddr *addr; 1544 __be64 svc_id; 1545 int ret; 1546 1547 id_priv->cm_id.ib = ib_create_cm_id(id_priv->id.device, cma_req_handler, 1548 id_priv); 1549 if (IS_ERR(id_priv->cm_id.ib)) 1550 return PTR_ERR(id_priv->cm_id.ib); 1551 1552 addr = (struct sockaddr *) &id_priv->id.route.addr.src_addr; 1553 svc_id = cma_get_service_id(id_priv->id.ps, addr); 1554 if (cma_any_addr(addr)) 1555 ret = ib_cm_listen(id_priv->cm_id.ib, svc_id, 0, NULL); 1556 else { 1557 cma_set_compare_data(id_priv->id.ps, addr, &compare_data); 1558 ret = ib_cm_listen(id_priv->cm_id.ib, svc_id, 0, &compare_data); 1559 } 1560 1561 if (ret) { 1562 ib_destroy_cm_id(id_priv->cm_id.ib); 1563 id_priv->cm_id.ib = NULL; 1564 } 1565 1566 return ret; 1567} 1568 1569static int cma_iw_listen(struct rdma_id_private *id_priv, int backlog) 1570{ 1571 int ret; 1572 struct sockaddr_in *sin; 1573 1574 id_priv->cm_id.iw = iw_create_cm_id(id_priv->id.device, 1575 id_priv->sock, 1576 iw_conn_req_handler, 1577 id_priv); 1578 if (IS_ERR(id_priv->cm_id.iw)) 1579 return PTR_ERR(id_priv->cm_id.iw); 1580 1581 sin = (struct sockaddr_in *) &id_priv->id.route.addr.src_addr; 1582 id_priv->cm_id.iw->local_addr = *sin; 1583 1584 ret = iw_cm_listen(id_priv->cm_id.iw, backlog); 1585 1586 if (ret) { 1587 iw_destroy_cm_id(id_priv->cm_id.iw); 1588 id_priv->cm_id.iw = NULL; 1589 } 1590 1591 return ret; 1592} 1593 1594static int cma_listen_handler(struct rdma_cm_id *id, 1595 struct rdma_cm_event *event) 1596{ 1597 struct rdma_id_private *id_priv = id->context; 1598 1599 id->context = id_priv->id.context; 1600 id->event_handler = id_priv->id.event_handler; 1601 return id_priv->id.event_handler(id, event); 1602} 1603 1604static void cma_listen_on_dev(struct rdma_id_private *id_priv, 1605 struct cma_device *cma_dev) 1606{ 1607 struct rdma_id_private *dev_id_priv; 1608 struct rdma_cm_id *id; 1609 int ret; 1610 1611 id = rdma_create_id(cma_listen_handler, id_priv, id_priv->id.ps); 1612 if (IS_ERR(id)) 1613 return; 1614 1615 dev_id_priv = container_of(id, struct rdma_id_private, id); 1616 1617 dev_id_priv->state = CMA_ADDR_BOUND; 1618 dev_id_priv->sock = id_priv->sock; 1619 memcpy(&id->route.addr.src_addr, &id_priv->id.route.addr.src_addr, 1620 ip_addr_size((struct sockaddr *) &id_priv->id.route.addr.src_addr)); 1621 1622 cma_attach_to_dev(dev_id_priv, cma_dev); 1623 list_add_tail(&dev_id_priv->listen_list, &id_priv->listen_list); 1624 atomic_inc(&id_priv->refcount); 1625 dev_id_priv->internal_id = 1; 1626 1627 ret = rdma_listen(id, id_priv->backlog); 1628 if (ret) 1629 printk(KERN_WARNING "RDMA CMA: cma_listen_on_dev, error %d, " 1630 "listening on device %s\n", ret, cma_dev->device->name); 1631} 1632 1633static void cma_listen_on_all(struct rdma_id_private *id_priv) 1634{ 1635 struct cma_device *cma_dev; 1636 1637 mutex_lock(&lock); 1638 list_add_tail(&id_priv->list, &listen_any_list); 1639 list_for_each_entry(cma_dev, &dev_list, list) 1640 cma_listen_on_dev(id_priv, cma_dev); 1641 mutex_unlock(&lock); 1642} 1643 1644int rdma_listen(struct rdma_cm_id *id, int backlog) 1645{ 1646 struct rdma_id_private *id_priv; 1647 int ret; 1648 1649 id_priv = container_of(id, struct rdma_id_private, id); 1650 if (id_priv->state == CMA_IDLE) { 1651 ((struct sockaddr *) &id->route.addr.src_addr)->sa_family = AF_INET; 1652 ret = rdma_bind_addr(id, (struct sockaddr *) &id->route.addr.src_addr); 1653 if (ret) 1654 return ret; 1655 } 1656 1657 if (!cma_comp_exch(id_priv, CMA_ADDR_BOUND, CMA_LISTEN)) 1658 return -EINVAL; 1659 1660 id_priv->backlog = backlog; 1661 if (id->device) { 1662 if (rdma_cap_ib_cm(id->device, 1)) { 1663 ret = cma_ib_listen(id_priv); 1664 if (ret) 1665 goto err; 1666 } else if (rdma_cap_iw_cm(id->device, 1)) { 1667 ret = cma_iw_listen(id_priv, backlog); 1668 if (ret) 1669 goto err; 1670 } else { 1671 ret = -ENOSYS; 1672 goto err; 1673 } 1674 } else 1675 cma_listen_on_all(id_priv); 1676 1677 return 0; 1678err: 1679 id_priv->backlog = 0; 1680 cma_comp_exch(id_priv, CMA_LISTEN, CMA_ADDR_BOUND); 1681 return ret; 1682} 1683EXPORT_SYMBOL(rdma_listen); 1684 1685void rdma_set_service_type(struct rdma_cm_id *id, int tos) 1686{ 1687 struct rdma_id_private *id_priv; 1688 1689 id_priv = container_of(id, struct rdma_id_private, id); 1690 id_priv->tos = (u8) tos; 1691} 1692EXPORT_SYMBOL(rdma_set_service_type); 1693 1694static void cma_query_handler(int status, struct ib_sa_path_rec *path_rec, 1695 void *context) 1696{ 1697 struct cma_work *work = context; 1698 struct rdma_route *route; 1699 1700 route = &work->id->id.route; 1701 1702 if (!status) { 1703 route->num_paths = 1; 1704 *route->path_rec = *path_rec; 1705 } else { 1706 work->old_state = CMA_ROUTE_QUERY; 1707 work->new_state = CMA_ADDR_RESOLVED; 1708 work->event.event = RDMA_CM_EVENT_ROUTE_ERROR; 1709 work->event.status = status; 1710 } 1711 1712 queue_work(cma_wq, &work->work); 1713} 1714 1715static int cma_query_ib_route(struct rdma_id_private *id_priv, int timeout_ms, 1716 struct cma_work *work) 1717{ 1718 struct rdma_addr *addr = &id_priv->id.route.addr; 1719 struct ib_sa_path_rec path_rec; 1720 ib_sa_comp_mask comp_mask; 1721 struct sockaddr_in6 *sin6; 1722 1723 memset(&path_rec, 0, sizeof path_rec); 1724 rdma_addr_get_sgid(&addr->dev_addr, &path_rec.sgid); 1725 rdma_addr_get_dgid(&addr->dev_addr, &path_rec.dgid); 1726 path_rec.pkey = cpu_to_be16(ib_addr_get_pkey(&addr->dev_addr)); 1727 path_rec.numb_path = 1; 1728 path_rec.reversible = 1; 1729 path_rec.service_id = cma_get_service_id(id_priv->id.ps, 1730 (struct sockaddr *) &addr->dst_addr); 1731 1732 comp_mask = IB_SA_PATH_REC_DGID | IB_SA_PATH_REC_SGID | 1733 IB_SA_PATH_REC_PKEY | IB_SA_PATH_REC_NUMB_PATH | 1734 IB_SA_PATH_REC_REVERSIBLE | IB_SA_PATH_REC_SERVICE_ID; 1735 1736 if (addr->src_addr.ss_family == AF_INET) { 1737 path_rec.qos_class = cpu_to_be16((u16) id_priv->tos); 1738 comp_mask |= IB_SA_PATH_REC_QOS_CLASS; 1739 } else { 1740 sin6 = (struct sockaddr_in6 *) &addr->src_addr; 1741 path_rec.traffic_class = (u8) (be32_to_cpu(sin6->sin6_flowinfo) >> 20); 1742 comp_mask |= IB_SA_PATH_REC_TRAFFIC_CLASS; 1743 } 1744 1745 if (tavor_quirk) { 1746 path_rec.mtu_selector = IB_SA_LT; 1747 path_rec.mtu = IB_MTU_2048; 1748 } 1749 1750 id_priv->query_id = ib_sa_path_rec_get(&sa_client, id_priv->id.device, 1751 id_priv->id.port_num, &path_rec, 1752 comp_mask, timeout_ms, 1753 GFP_KERNEL, cma_query_handler, 1754 work, &id_priv->query); 1755 1756 return (id_priv->query_id < 0) ? id_priv->query_id : 0; 1757} 1758 1759static void cma_work_handler(struct work_struct *_work) 1760{ 1761 struct cma_work *work = container_of(_work, struct cma_work, work); 1762 struct rdma_id_private *id_priv = work->id; 1763 int destroy = 0; 1764 1765 mutex_lock(&id_priv->handler_mutex); 1766 if (!cma_comp_exch(id_priv, work->old_state, work->new_state)) 1767 goto out; 1768 1769 if (id_priv->id.event_handler(&id_priv->id, &work->event)) { 1770 cma_exch(id_priv, CMA_DESTROYING); 1771 destroy = 1; 1772 } 1773out: 1774 mutex_unlock(&id_priv->handler_mutex); 1775 cma_deref_id(id_priv); 1776 if (destroy) 1777 rdma_destroy_id(&id_priv->id); 1778 kfree(work); 1779} 1780 1781static void cma_ndev_work_handler(struct work_struct *_work) 1782{ 1783 struct cma_ndev_work *work = container_of(_work, struct cma_ndev_work, work); 1784 struct rdma_id_private *id_priv = work->id; 1785 int destroy = 0; 1786 1787 mutex_lock(&id_priv->handler_mutex); 1788 if (id_priv->state == CMA_DESTROYING || 1789 id_priv->state == CMA_DEVICE_REMOVAL) 1790 goto out; 1791 1792 if (id_priv->id.event_handler(&id_priv->id, &work->event)) { 1793 cma_exch(id_priv, CMA_DESTROYING); 1794 destroy = 1; 1795 } 1796 1797out: 1798 mutex_unlock(&id_priv->handler_mutex); 1799 cma_deref_id(id_priv); 1800 if (destroy) 1801 rdma_destroy_id(&id_priv->id); 1802 kfree(work); 1803} 1804 1805static int cma_resolve_ib_route(struct rdma_id_private *id_priv, int timeout_ms) 1806{ 1807 struct rdma_route *route = &id_priv->id.route; 1808 struct cma_work *work; 1809 int ret; 1810 1811 work = kzalloc(sizeof *work, GFP_KERNEL); 1812 if (!work) 1813 return -ENOMEM; 1814 1815 work->id = id_priv; 1816 INIT_WORK(&work->work, cma_work_handler); 1817 work->old_state = CMA_ROUTE_QUERY; 1818 work->new_state = CMA_ROUTE_RESOLVED; 1819 work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED; 1820 1821 route->path_rec = kmalloc(sizeof *route->path_rec, GFP_KERNEL); 1822 if (!route->path_rec) { 1823 ret = -ENOMEM; 1824 goto err1; 1825 } 1826 1827 ret = cma_query_ib_route(id_priv, timeout_ms, work); 1828 if (ret) 1829 goto err2; 1830 1831 return 0; 1832err2: 1833 kfree(route->path_rec); 1834 route->path_rec = NULL; 1835err1: 1836 kfree(work); 1837 return ret; 1838} 1839 1840int rdma_set_ib_paths(struct rdma_cm_id *id, 1841 struct ib_sa_path_rec *path_rec, int num_paths) 1842{ 1843 struct rdma_id_private *id_priv; 1844 int ret; 1845 1846 id_priv = container_of(id, struct rdma_id_private, id); 1847 if (!cma_comp_exch(id_priv, CMA_ADDR_RESOLVED, CMA_ROUTE_RESOLVED)) 1848 return -EINVAL; 1849 1850 id->route.path_rec = kmalloc(sizeof *path_rec * num_paths, GFP_KERNEL); 1851 if (!id->route.path_rec) { 1852 ret = -ENOMEM; 1853 goto err; 1854 } 1855 1856 memcpy(id->route.path_rec, path_rec, sizeof *path_rec * num_paths); 1857 return 0; 1858err: 1859 cma_comp_exch(id_priv, CMA_ROUTE_RESOLVED, CMA_ADDR_RESOLVED); 1860 return ret; 1861} 1862EXPORT_SYMBOL(rdma_set_ib_paths); 1863 1864static int cma_resolve_iw_route(struct rdma_id_private *id_priv, int timeout_ms) 1865{ 1866 struct cma_work *work; 1867 1868 work = kzalloc(sizeof *work, GFP_KERNEL); 1869 if (!work) 1870 return -ENOMEM; 1871 1872 work->id = id_priv; 1873 INIT_WORK(&work->work, cma_work_handler); 1874 work->old_state = CMA_ROUTE_QUERY; 1875 work->new_state = CMA_ROUTE_RESOLVED; 1876 work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED; 1877 queue_work(cma_wq, &work->work); 1878 return 0; 1879} 1880 1881static u8 tos_to_sl(u8 tos) 1882{ 1883 return def_prec2sl & 7; 1884} 1885 1886static int cma_resolve_iboe_route(struct rdma_id_private *id_priv) 1887{ 1888 struct rdma_route *route = &id_priv->id.route; 1889 struct rdma_addr *addr = &route->addr; 1890 struct cma_work *work; 1891 int ret; 1892 struct sockaddr_in *src_addr = (struct sockaddr_in *)&route->addr.src_addr; 1893 struct sockaddr_in *dst_addr = (struct sockaddr_in *)&route->addr.dst_addr; 1894 struct net_device *ndev = NULL; 1895 u16 vid; 1896 1897 if (src_addr->sin_family != dst_addr->sin_family) 1898 return -EINVAL; 1899 1900 work = kzalloc(sizeof *work, GFP_KERNEL); 1901 if (!work) 1902 return -ENOMEM; 1903 1904 work->id = id_priv; 1905 INIT_WORK(&work->work, cma_work_handler); 1906 1907 route->path_rec = kzalloc(sizeof *route->path_rec, GFP_KERNEL); 1908 if (!route->path_rec) { 1909 ret = -ENOMEM; 1910 goto err1; 1911 } 1912 1913 route->num_paths = 1; 1914 1915 if (addr->dev_addr.bound_dev_if) 1916 ndev = dev_get_by_index(&init_net, addr->dev_addr.bound_dev_if); 1917 if (!ndev) { 1918 ret = -ENODEV; 1919 goto err2; 1920 } 1921 1922 vid = rdma_vlan_dev_vlan_id(ndev); 1923 1924 iboe_mac_vlan_to_ll(&route->path_rec->sgid, addr->dev_addr.src_dev_addr, vid); 1925 iboe_mac_vlan_to_ll(&route->path_rec->dgid, addr->dev_addr.dst_dev_addr, vid); 1926 1927 route->path_rec->hop_limit = 1; 1928 route->path_rec->reversible = 1; 1929 route->path_rec->pkey = cpu_to_be16(0xffff); 1930 route->path_rec->mtu_selector = IB_SA_EQ; 1931 route->path_rec->sl = tos_to_sl(id_priv->tos); 1932 1933#ifdef __linux__ 1934 route->path_rec->mtu = iboe_get_mtu(ndev->mtu); 1935#else 1936 route->path_rec->mtu = iboe_get_mtu(ndev->if_mtu); 1937#endif 1938 route->path_rec->rate_selector = IB_SA_EQ; 1939 route->path_rec->rate = iboe_get_rate(ndev); 1940 dev_put(ndev); 1941 route->path_rec->packet_life_time_selector = IB_SA_EQ; 1942 route->path_rec->packet_life_time = IBOE_PACKET_LIFETIME; 1943 if (!route->path_rec->mtu) { 1944 ret = -EINVAL; 1945 goto err2; 1946 } 1947 1948 work->old_state = CMA_ROUTE_QUERY; 1949 work->new_state = CMA_ROUTE_RESOLVED; 1950 work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED; 1951 work->event.status = 0; 1952 1953 queue_work(cma_wq, &work->work); 1954 1955 return 0; 1956 1957err2: 1958 kfree(route->path_rec); 1959 route->path_rec = NULL; 1960err1: 1961 kfree(work); 1962 return ret; 1963} 1964 1965int rdma_resolve_route(struct rdma_cm_id *id, int timeout_ms) 1966{ 1967 struct rdma_id_private *id_priv; 1968 int ret; 1969 1970 id_priv = container_of(id, struct rdma_id_private, id); 1971 if (!cma_comp_exch(id_priv, CMA_ADDR_RESOLVED, CMA_ROUTE_QUERY)) 1972 return -EINVAL; 1973 1974 atomic_inc(&id_priv->refcount); 1975 if (rdma_cap_ib_sa(id->device, id->port_num)) 1976 ret = cma_resolve_ib_route(id_priv, timeout_ms); 1977 else if (rdma_protocol_roce(id->device, id->port_num)) 1978 ret = cma_resolve_iboe_route(id_priv); 1979 else if (rdma_protocol_iwarp(id->device, id->port_num)) 1980 ret = cma_resolve_iw_route(id_priv, timeout_ms); 1981 else 1982 ret = -ENOSYS; 1983 1984 if (ret) 1985 goto err; 1986 1987 return 0; 1988err: 1989 cma_comp_exch(id_priv, CMA_ROUTE_QUERY, CMA_ADDR_RESOLVED); 1990 cma_deref_id(id_priv); 1991 return ret; 1992} 1993EXPORT_SYMBOL(rdma_resolve_route); 1994 1995static int cma_bind_loopback(struct rdma_id_private *id_priv) 1996{ 1997 struct cma_device *cma_dev; 1998 struct ib_port_attr port_attr; 1999 union ib_gid gid; 2000 u16 pkey; 2001 int ret; 2002 u8 p; 2003 2004 mutex_lock(&lock); 2005 if (list_empty(&dev_list)) { 2006 ret = -ENODEV; 2007 goto out; 2008 } 2009 list_for_each_entry(cma_dev, &dev_list, list) 2010 for (p = 1; p <= cma_dev->device->phys_port_cnt; ++p) 2011 if (!ib_query_port(cma_dev->device, p, &port_attr) && 2012 port_attr.state == IB_PORT_ACTIVE) 2013 goto port_found; 2014 2015 p = 1; 2016 cma_dev = list_entry(dev_list.next, struct cma_device, list); 2017 2018port_found: 2019 ret = ib_get_cached_gid(cma_dev->device, p, 0, &gid); 2020 if (ret) 2021 goto out; 2022 2023 ret = ib_get_cached_pkey(cma_dev->device, p, 0, &pkey); 2024 if (ret) 2025 goto out; 2026 2027 id_priv->id.route.addr.dev_addr.dev_type = 2028 (rdma_port_get_link_layer(cma_dev->device, p) == IB_LINK_LAYER_INFINIBAND) ? 2029 ARPHRD_INFINIBAND : ARPHRD_ETHER; 2030 2031 rdma_addr_set_sgid(&id_priv->id.route.addr.dev_addr, &gid); 2032 ib_addr_set_pkey(&id_priv->id.route.addr.dev_addr, pkey); 2033 id_priv->id.port_num = p; 2034 cma_attach_to_dev(id_priv, cma_dev); 2035out: 2036 mutex_unlock(&lock); 2037 return ret; 2038} 2039 2040static void addr_handler(int status, struct sockaddr *src_addr, 2041 struct rdma_dev_addr *dev_addr, void *context) 2042{ 2043 struct rdma_id_private *id_priv = context; 2044 struct rdma_cm_event event; 2045 2046 memset(&event, 0, sizeof event); 2047 mutex_lock(&id_priv->handler_mutex); 2048 2049 /* 2050 * Grab mutex to block rdma_destroy_id() from removing the device while 2051 * we're trying to acquire it. 2052 */ 2053 mutex_lock(&lock); 2054 if (!cma_comp_exch(id_priv, CMA_ADDR_QUERY, CMA_ADDR_RESOLVED)) { 2055 mutex_unlock(&lock); 2056 goto out; 2057 } 2058 2059 if (!status && !id_priv->cma_dev) 2060 status = cma_acquire_dev(id_priv); 2061 mutex_unlock(&lock); 2062 2063 if (status) { 2064 if (!cma_comp_exch(id_priv, CMA_ADDR_RESOLVED, CMA_ADDR_BOUND)) 2065 goto out; 2066 event.event = RDMA_CM_EVENT_ADDR_ERROR; 2067 event.status = status; 2068 } else { 2069 memcpy(&id_priv->id.route.addr.src_addr, src_addr, 2070 ip_addr_size(src_addr)); 2071 event.event = RDMA_CM_EVENT_ADDR_RESOLVED; 2072 } 2073 2074 if (id_priv->id.event_handler(&id_priv->id, &event)) { 2075 cma_exch(id_priv, CMA_DESTROYING); 2076 mutex_unlock(&id_priv->handler_mutex); 2077 cma_deref_id(id_priv); 2078 rdma_destroy_id(&id_priv->id); 2079 return; 2080 } 2081out: 2082 mutex_unlock(&id_priv->handler_mutex); 2083 cma_deref_id(id_priv); 2084} 2085 2086static int cma_resolve_loopback(struct rdma_id_private *id_priv) 2087{ 2088 struct cma_work *work; 2089 struct sockaddr *src, *dst; 2090 union ib_gid gid; 2091 int ret; 2092 2093 work = kzalloc(sizeof *work, GFP_KERNEL); 2094 if (!work) 2095 return -ENOMEM; 2096 2097 if (!id_priv->cma_dev) { 2098 ret = cma_bind_loopback(id_priv); 2099 if (ret) 2100 goto err; 2101 } 2102 2103 rdma_addr_get_sgid(&id_priv->id.route.addr.dev_addr, &gid); 2104 rdma_addr_set_dgid(&id_priv->id.route.addr.dev_addr, &gid); 2105 2106 src = (struct sockaddr *) &id_priv->id.route.addr.src_addr; 2107 if (cma_zero_addr(src)) { 2108 dst = (struct sockaddr *) &id_priv->id.route.addr.dst_addr; 2109 if ((src->sa_family = dst->sa_family) == AF_INET) { 2110 ((struct sockaddr_in *) src)->sin_addr.s_addr = 2111 ((struct sockaddr_in *) dst)->sin_addr.s_addr; 2112 } else { 2113 ipv6_addr_copy(&((struct sockaddr_in6 *) src)->sin6_addr, 2114 &((struct sockaddr_in6 *) dst)->sin6_addr); 2115 } 2116 } 2117 2118 work->id = id_priv; 2119 INIT_WORK(&work->work, cma_work_handler); 2120 work->old_state = CMA_ADDR_QUERY; 2121 work->new_state = CMA_ADDR_RESOLVED; 2122 work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED; 2123 queue_work(cma_wq, &work->work); 2124 return 0; 2125err: 2126 kfree(work); 2127 return ret; 2128} 2129 2130static int cma_bind_addr(struct rdma_cm_id *id, struct sockaddr *src_addr, 2131 struct sockaddr *dst_addr) 2132{ 2133 if (!src_addr || !src_addr->sa_family) { 2134 src_addr = (struct sockaddr *) &id->route.addr.src_addr; 2135 if ((src_addr->sa_family = dst_addr->sa_family) == AF_INET6) { 2136 ((struct sockaddr_in6 *) src_addr)->sin6_scope_id = 2137 ((struct sockaddr_in6 *) dst_addr)->sin6_scope_id; 2138 } 2139 } 2140 if (!cma_any_addr(src_addr)) 2141 return rdma_bind_addr(id, src_addr); 2142 else { 2143 struct sockaddr_in addr_in; 2144 2145 memset(&addr_in, 0, sizeof addr_in); 2146 addr_in.sin_family = dst_addr->sa_family; 2147 addr_in.sin_len = sizeof addr_in; 2148 return rdma_bind_addr(id, (struct sockaddr *) &addr_in); 2149 } 2150} 2151 2152int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr, 2153 struct sockaddr *dst_addr, int timeout_ms) 2154{ 2155 struct rdma_id_private *id_priv; 2156 int ret; 2157 2158 id_priv = container_of(id, struct rdma_id_private, id); 2159 if (id_priv->state == CMA_IDLE) { 2160 ret = cma_bind_addr(id, src_addr, dst_addr); 2161 if (ret) 2162 return ret; 2163 } 2164 2165 if (!cma_comp_exch(id_priv, CMA_ADDR_BOUND, CMA_ADDR_QUERY)) 2166 return -EINVAL; 2167 2168 atomic_inc(&id_priv->refcount); 2169 memcpy(&id->route.addr.dst_addr, dst_addr, ip_addr_size(dst_addr)); 2170 if (cma_any_addr(dst_addr)) 2171 ret = cma_resolve_loopback(id_priv); 2172 else 2173 ret = rdma_resolve_ip(&addr_client, (struct sockaddr *) &id->route.addr.src_addr, 2174 dst_addr, &id->route.addr.dev_addr, 2175 timeout_ms, addr_handler, id_priv); 2176 if (ret) 2177 goto err; 2178 2179 return 0; 2180err: 2181 cma_comp_exch(id_priv, CMA_ADDR_QUERY, CMA_ADDR_BOUND); 2182 cma_deref_id(id_priv); 2183 return ret; 2184} 2185EXPORT_SYMBOL(rdma_resolve_addr); 2186 2187static void cma_bind_port(struct rdma_bind_list *bind_list, 2188 struct rdma_id_private *id_priv) 2189{ 2190 struct sockaddr_in *sin; 2191 2192 sin = (struct sockaddr_in *) &id_priv->id.route.addr.src_addr; 2193 sin->sin_port = htons(bind_list->port); 2194 id_priv->bind_list = bind_list; 2195 hlist_add_head(&id_priv->node, &bind_list->owners); 2196} 2197 2198static int cma_alloc_port(struct idr *ps, struct rdma_id_private *id_priv, 2199 unsigned short snum) 2200{ 2201 struct rdma_bind_list *bind_list; 2202 int port, ret; 2203 2204 bind_list = kzalloc(sizeof *bind_list, GFP_KERNEL); 2205 if (!bind_list) 2206 return -ENOMEM; 2207 2208 do { 2209 ret = idr_get_new_above(ps, bind_list, snum, &port); 2210 } while ((ret == -EAGAIN) && idr_pre_get(ps, GFP_KERNEL)); 2211 2212 if (ret) 2213 goto err1; 2214 2215 if (port != snum) { 2216 ret = -EADDRNOTAVAIL; 2217 goto err2; 2218 } 2219 2220 bind_list->ps = ps; 2221 bind_list->port = (unsigned short) port; 2222 cma_bind_port(bind_list, id_priv); 2223 return 0; 2224err2: 2225 idr_remove(ps, port); 2226err1: 2227 kfree(bind_list); 2228 return ret; 2229} 2230 2231static int cma_alloc_any_port(struct idr *ps, struct rdma_id_private *id_priv) 2232{ 2233#if defined(INET) 2234 struct rdma_bind_list *bind_list; 2235 int port, ret, low, high; 2236 2237 bind_list = kzalloc(sizeof *bind_list, GFP_KERNEL); 2238 if (!bind_list) 2239 return -ENOMEM; 2240 2241retry: 2242 /* FIXME: add proper port randomization per like inet_csk_get_port */ 2243 do { 2244 ret = idr_get_new_above(ps, bind_list, next_port, &port); 2245 } while ((ret == -EAGAIN) && idr_pre_get(ps, GFP_KERNEL)); 2246 2247 if (ret) 2248 goto err1; 2249 2250 inet_get_local_port_range(&low, &high); 2251 if (port > high) { 2252 if (next_port != low) { 2253 idr_remove(ps, port); 2254 next_port = low; 2255 goto retry; 2256 } 2257 ret = -EADDRNOTAVAIL; 2258 goto err2; 2259 } 2260 2261 if (port == high) 2262 next_port = low; 2263 else 2264 next_port = port + 1; 2265 2266 bind_list->ps = ps; 2267 bind_list->port = (unsigned short) port; 2268 cma_bind_port(bind_list, id_priv); 2269 return 0; 2270err2: 2271 idr_remove(ps, port); 2272err1: 2273 kfree(bind_list); 2274 return ret; 2275#else 2276 return -ENOSPC; 2277#endif 2278} 2279 2280static int cma_use_port(struct idr *ps, struct rdma_id_private *id_priv) 2281{ 2282 struct rdma_id_private *cur_id; 2283 struct sockaddr_in *sin, *cur_sin; 2284 struct rdma_bind_list *bind_list; 2285 struct hlist_node *node; 2286 unsigned short snum; 2287 2288 sin = (struct sockaddr_in *) &id_priv->id.route.addr.src_addr; 2289 snum = ntohs(sin->sin_port); 2290#ifdef __linux__ 2291 if (snum < PROT_SOCK && !capable(CAP_NET_BIND_SERVICE)) 2292 return -EACCES; 2293#endif 2294 2295 bind_list = idr_find(ps, snum); 2296 if (!bind_list) 2297 return cma_alloc_port(ps, id_priv, snum); 2298 2299 /* 2300 * We don't support binding to any address if anyone is bound to 2301 * a specific address on the same port. 2302 */ 2303 if (cma_any_addr((struct sockaddr *) &id_priv->id.route.addr.src_addr)) 2304 return -EADDRNOTAVAIL; 2305 2306 hlist_for_each_entry(cur_id, node, &bind_list->owners, node) { 2307 if (cma_any_addr((struct sockaddr *) &cur_id->id.route.addr.src_addr)) 2308 return -EADDRNOTAVAIL; 2309 2310 cur_sin = (struct sockaddr_in *) &cur_id->id.route.addr.src_addr; 2311 if (sin->sin_addr.s_addr == cur_sin->sin_addr.s_addr) 2312 return -EADDRINUSE; 2313 } 2314 2315 cma_bind_port(bind_list, id_priv); 2316 return 0; 2317} 2318 2319static int cma_get_tcp_port(struct rdma_id_private *id_priv) 2320{ 2321 int ret; 2322 int size; 2323 struct socket *sock; 2324 2325 ret = sock_create_kern(AF_INET, SOCK_STREAM, IPPROTO_TCP, &sock); 2326 if (ret) 2327 return ret; 2328#ifdef __linux__ 2329 ret = sock->ops->bind(sock, 2330 (struct sockaddr *) &id_priv->id.route.addr.src_addr, 2331 ip_addr_size((struct sockaddr *) &id_priv->id.route.addr.src_addr)); 2332#else 2333 SOCK_LOCK(sock); 2334 sock->so_options |= SO_REUSEADDR; 2335 SOCK_UNLOCK(sock); 2336 2337 ret = -sobind(sock, 2338 (struct sockaddr *)&id_priv->id.route.addr.src_addr, 2339 curthread); 2340#endif 2341 if (ret) { 2342 sock_release(sock); 2343 return ret; 2344 } 2345 2346 size = ip_addr_size((struct sockaddr *) &id_priv->id.route.addr.src_addr); 2347 ret = sock_getname(sock, 2348 (struct sockaddr *) &id_priv->id.route.addr.src_addr, 2349 &size, 0); 2350 if (ret) { 2351 sock_release(sock); 2352 return ret; 2353 } 2354 2355 id_priv->sock = sock; 2356 return 0; 2357} 2358 2359static int cma_get_port(struct rdma_id_private *id_priv) 2360{ 2361 struct cma_device *cma_dev; 2362 struct idr *ps; 2363 int ret; 2364 2365 switch (id_priv->id.ps) { 2366 case RDMA_PS_SDP: 2367 ps = &sdp_ps; 2368 break; 2369 case RDMA_PS_TCP: 2370 ps = &tcp_ps; 2371 2372 mutex_lock(&lock); 2373 /* check if there are any iWarp IB devices present */ 2374 list_for_each_entry(cma_dev, &dev_list, list) { 2375 if (rdma_protocol_iwarp(cma_dev->device, 1)) { 2376 id_priv->unify_ps_tcp = 1; 2377 break; 2378 } 2379 } 2380 mutex_unlock(&lock); 2381 2382 if (id_priv->unify_ps_tcp) { 2383 ret = cma_get_tcp_port(id_priv); 2384 if (ret) 2385 goto out; 2386 } 2387 break; 2388 case RDMA_PS_UDP: 2389 ps = &udp_ps; 2390 break; 2391 case RDMA_PS_IPOIB: 2392 ps = &ipoib_ps; 2393 break; 2394 default: 2395 return -EPROTONOSUPPORT; 2396 } 2397 2398 mutex_lock(&lock); 2399 if (cma_any_port((struct sockaddr *) &id_priv->id.route.addr.src_addr)) 2400 ret = cma_alloc_any_port(ps, id_priv); 2401 else 2402 ret = cma_use_port(ps, id_priv); 2403 mutex_unlock(&lock); 2404out: 2405 return ret; 2406} 2407 2408static int cma_check_linklocal(struct rdma_dev_addr *dev_addr, 2409 struct sockaddr *addr) 2410{ 2411#if defined(INET6) 2412 struct sockaddr_in6 *sin6; 2413 2414 if (addr->sa_family != AF_INET6) 2415 return 0; 2416 2417 sin6 = (struct sockaddr_in6 *) addr; 2418#ifdef __linux__ 2419 if ((ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL) && 2420#else 2421 if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr) && 2422#endif 2423 !sin6->sin6_scope_id) 2424 return -EINVAL; 2425 2426 dev_addr->bound_dev_if = sin6->sin6_scope_id; 2427#endif 2428 return 0; 2429} 2430 2431int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr) 2432{ 2433 struct rdma_id_private *id_priv; 2434 int ret; 2435 2436 if (addr->sa_family != AF_INET && addr->sa_family != AF_INET6) 2437 return -EAFNOSUPPORT; 2438 2439 id_priv = container_of(id, struct rdma_id_private, id); 2440 if (!cma_comp_exch(id_priv, CMA_IDLE, CMA_ADDR_BOUND)) 2441 return -EINVAL; 2442 2443 ret = cma_check_linklocal(&id->route.addr.dev_addr, addr); 2444 if (ret) 2445 goto err1; 2446 2447 if (!cma_any_addr(addr)) { 2448 ret = rdma_translate_ip(addr, &id->route.addr.dev_addr); 2449 if (ret) 2450 goto err1; 2451 2452 mutex_lock(&lock); 2453 ret = cma_acquire_dev(id_priv); 2454 mutex_unlock(&lock); 2455 if (ret) 2456 goto err1; 2457 } 2458 2459 memcpy(&id->route.addr.src_addr, addr, ip_addr_size(addr)); 2460 ret = cma_get_port(id_priv); 2461 if (ret) 2462 goto err2; 2463 2464 return 0; 2465err2: 2466 if (id_priv->cma_dev) { 2467 mutex_lock(&lock); 2468 cma_detach_from_dev(id_priv); 2469 mutex_unlock(&lock); 2470 } 2471err1: 2472 cma_comp_exch(id_priv, CMA_ADDR_BOUND, CMA_IDLE); 2473 return ret; 2474} 2475EXPORT_SYMBOL(rdma_bind_addr); 2476 2477static int cma_format_hdr(void *hdr, enum rdma_port_space ps, 2478 struct rdma_route *route) 2479{ 2480 struct cma_hdr *cma_hdr; 2481 struct sdp_hh *sdp_hdr; 2482 2483 if (route->addr.src_addr.ss_family == AF_INET) { 2484 struct sockaddr_in *src4, *dst4; 2485 2486 src4 = (struct sockaddr_in *) &route->addr.src_addr; 2487 dst4 = (struct sockaddr_in *) &route->addr.dst_addr; 2488 2489 switch (ps) { 2490 case RDMA_PS_SDP: 2491 sdp_hdr = hdr; 2492 if (sdp_get_majv(sdp_hdr->sdp_version) != SDP_MAJ_VERSION) 2493 return -EINVAL; 2494 sdp_set_ip_ver(sdp_hdr, 4); 2495 sdp_hdr->src_addr.ip4.addr = src4->sin_addr.s_addr; 2496 sdp_hdr->dst_addr.ip4.addr = dst4->sin_addr.s_addr; 2497 sdp_hdr->port = src4->sin_port; 2498 break; 2499 default: 2500 cma_hdr = hdr; 2501 cma_hdr->cma_version = CMA_VERSION; 2502 cma_set_ip_ver(cma_hdr, 4); 2503 cma_hdr->src_addr.ip4.addr = src4->sin_addr.s_addr; 2504 cma_hdr->dst_addr.ip4.addr = dst4->sin_addr.s_addr; 2505 cma_hdr->port = src4->sin_port; 2506 break; 2507 } 2508 } else { 2509 struct sockaddr_in6 *src6, *dst6; 2510 2511 src6 = (struct sockaddr_in6 *) &route->addr.src_addr; 2512 dst6 = (struct sockaddr_in6 *) &route->addr.dst_addr; 2513 2514 switch (ps) { 2515 case RDMA_PS_SDP: 2516 sdp_hdr = hdr; 2517 if (sdp_get_majv(sdp_hdr->sdp_version) != SDP_MAJ_VERSION) 2518 return -EINVAL; 2519 sdp_set_ip_ver(sdp_hdr, 6); 2520 sdp_hdr->src_addr.ip6 = src6->sin6_addr; 2521 sdp_hdr->dst_addr.ip6 = dst6->sin6_addr; 2522 sdp_hdr->port = src6->sin6_port; 2523 break; 2524 default: 2525 cma_hdr = hdr; 2526 cma_hdr->cma_version = CMA_VERSION; 2527 cma_set_ip_ver(cma_hdr, 6); 2528 cma_hdr->src_addr.ip6 = src6->sin6_addr; 2529 cma_hdr->dst_addr.ip6 = dst6->sin6_addr; 2530 cma_hdr->port = src6->sin6_port; 2531 break; 2532 } 2533 } 2534 return 0; 2535} 2536 2537static int cma_sidr_rep_handler(struct ib_cm_id *cm_id, 2538 struct ib_cm_event *ib_event) 2539{ 2540 struct rdma_id_private *id_priv = cm_id->context; 2541 struct rdma_cm_event event; 2542 struct ib_cm_sidr_rep_event_param *rep = &ib_event->param.sidr_rep_rcvd; 2543 int ret = 0; 2544 2545 if (cma_disable_callback(id_priv, CMA_CONNECT)) 2546 return 0; 2547 2548 memset(&event, 0, sizeof event); 2549 switch (ib_event->event) { 2550 case IB_CM_SIDR_REQ_ERROR: 2551 event.event = RDMA_CM_EVENT_UNREACHABLE; 2552 event.status = -ETIMEDOUT; 2553 break; 2554 case IB_CM_SIDR_REP_RECEIVED: 2555 event.param.ud.private_data = ib_event->private_data; 2556 event.param.ud.private_data_len = IB_CM_SIDR_REP_PRIVATE_DATA_SIZE; 2557 if (rep->status != IB_SIDR_SUCCESS) { 2558 event.event = RDMA_CM_EVENT_UNREACHABLE; 2559 event.status = ib_event->param.sidr_rep_rcvd.status; 2560 break; 2561 } 2562 ret = cma_set_qkey(id_priv); 2563 if (ret) { 2564 event.event = RDMA_CM_EVENT_ADDR_ERROR; 2565 event.status = -EINVAL; 2566 break; 2567 } 2568 if (id_priv->qkey != rep->qkey) { 2569 event.event = RDMA_CM_EVENT_UNREACHABLE; 2570 event.status = -EINVAL; 2571 break; 2572 } 2573 ib_init_ah_from_path(id_priv->id.device, id_priv->id.port_num, 2574 id_priv->id.route.path_rec, 2575 &event.param.ud.ah_attr); 2576 event.param.ud.qp_num = rep->qpn; 2577 event.param.ud.qkey = rep->qkey; 2578 event.event = RDMA_CM_EVENT_ESTABLISHED; 2579 event.status = 0; 2580 break; 2581 default: 2582 printk(KERN_ERR "RDMA CMA: unexpected IB CM event: %d\n", 2583 ib_event->event); 2584 goto out; 2585 } 2586 2587 ret = id_priv->id.event_handler(&id_priv->id, &event); 2588 if (ret) { 2589 /* Destroy the CM ID by returning a non-zero value. */ 2590 id_priv->cm_id.ib = NULL; 2591 cma_exch(id_priv, CMA_DESTROYING); 2592 mutex_unlock(&id_priv->handler_mutex); 2593 rdma_destroy_id(&id_priv->id); 2594 return ret; 2595 } 2596out: 2597 mutex_unlock(&id_priv->handler_mutex); 2598 return ret; 2599} 2600 2601static int cma_resolve_ib_udp(struct rdma_id_private *id_priv, 2602 struct rdma_conn_param *conn_param) 2603{ 2604 struct ib_cm_sidr_req_param req; 2605 struct rdma_route *route; 2606 int ret; 2607 2608 req.private_data_len = sizeof(struct cma_hdr) + 2609 conn_param->private_data_len; 2610 req.private_data = kzalloc(req.private_data_len, GFP_ATOMIC); 2611 if (!req.private_data) 2612 return -ENOMEM; 2613 2614 if (conn_param->private_data && conn_param->private_data_len) 2615 memcpy((void *) req.private_data + sizeof(struct cma_hdr), 2616 conn_param->private_data, conn_param->private_data_len); 2617 2618 route = &id_priv->id.route; 2619 ret = cma_format_hdr((void *) req.private_data, id_priv->id.ps, route); 2620 if (ret) 2621 goto out; 2622 2623 id_priv->cm_id.ib = ib_create_cm_id(id_priv->id.device, 2624 cma_sidr_rep_handler, id_priv); 2625 if (IS_ERR(id_priv->cm_id.ib)) { 2626 ret = PTR_ERR(id_priv->cm_id.ib); 2627 goto out; 2628 } 2629 2630 req.path = route->path_rec; 2631 req.service_id = cma_get_service_id(id_priv->id.ps, 2632 (struct sockaddr *) &route->addr.dst_addr); 2633 req.timeout_ms = 1 << (cma_response_timeout - 8); 2634 req.max_cm_retries = CMA_MAX_CM_RETRIES; 2635 2636 ret = ib_send_cm_sidr_req(id_priv->cm_id.ib, &req); 2637 if (ret) { 2638 ib_destroy_cm_id(id_priv->cm_id.ib); 2639 id_priv->cm_id.ib = NULL; 2640 } 2641out: 2642 kfree(req.private_data); 2643 return ret; 2644} 2645 2646static int cma_connect_ib(struct rdma_id_private *id_priv, 2647 struct rdma_conn_param *conn_param) 2648{ 2649 struct ib_cm_req_param req; 2650 struct rdma_route *route; 2651 void *private_data; 2652 int offset, ret; 2653 2654 memset(&req, 0, sizeof req); 2655 offset = cma_user_data_offset(id_priv->id.ps); 2656 req.private_data_len = offset + conn_param->private_data_len; 2657 private_data = kzalloc(req.private_data_len, GFP_ATOMIC); 2658 if (!private_data) 2659 return -ENOMEM; 2660 2661 if (conn_param->private_data && conn_param->private_data_len) 2662 memcpy(private_data + offset, conn_param->private_data, 2663 conn_param->private_data_len); 2664 2665 id_priv->cm_id.ib = ib_create_cm_id(id_priv->id.device, cma_ib_handler, 2666 id_priv); 2667 if (IS_ERR(id_priv->cm_id.ib)) { 2668 ret = PTR_ERR(id_priv->cm_id.ib); 2669 goto out; 2670 } 2671 2672 route = &id_priv->id.route; 2673 ret = cma_format_hdr(private_data, id_priv->id.ps, route); 2674 if (ret) 2675 goto out; 2676 req.private_data = private_data; 2677 2678 req.primary_path = &route->path_rec[0]; 2679 if (route->num_paths == 2) 2680 req.alternate_path = &route->path_rec[1]; 2681 2682 req.service_id = cma_get_service_id(id_priv->id.ps, 2683 (struct sockaddr *) &route->addr.dst_addr); 2684 req.qp_num = id_priv->qp_num; 2685 req.qp_type = IB_QPT_RC; 2686 req.starting_psn = id_priv->seq_num; 2687 req.responder_resources = conn_param->responder_resources; 2688 req.initiator_depth = conn_param->initiator_depth; 2689 req.flow_control = conn_param->flow_control; 2690 req.retry_count = conn_param->retry_count; 2691 req.rnr_retry_count = conn_param->rnr_retry_count; 2692 req.remote_cm_response_timeout = cma_response_timeout; 2693 req.local_cm_response_timeout = cma_response_timeout; 2694 req.max_cm_retries = CMA_MAX_CM_RETRIES; 2695 req.srq = id_priv->srq ? 1 : 0; 2696 2697 ret = ib_send_cm_req(id_priv->cm_id.ib, &req); 2698out: 2699 if (ret && !IS_ERR(id_priv->cm_id.ib)) { 2700 ib_destroy_cm_id(id_priv->cm_id.ib); 2701 id_priv->cm_id.ib = NULL; 2702 } 2703 2704 kfree(private_data); 2705 return ret; 2706} 2707 2708static int cma_connect_iw(struct rdma_id_private *id_priv, 2709 struct rdma_conn_param *conn_param) 2710{ 2711 struct iw_cm_id *cm_id; 2712 struct sockaddr_in* sin; 2713 int ret; 2714 struct iw_cm_conn_param iw_param; 2715 2716 cm_id = iw_create_cm_id(id_priv->id.device, id_priv->sock, 2717 cma_iw_handler, id_priv); 2718 if (IS_ERR(cm_id)) { 2719 ret = PTR_ERR(cm_id); 2720 goto out; 2721 } 2722 2723 id_priv->cm_id.iw = cm_id; 2724 2725 sin = (struct sockaddr_in*) &id_priv->id.route.addr.src_addr; 2726 cm_id->local_addr = *sin; 2727 2728 sin = (struct sockaddr_in*) &id_priv->id.route.addr.dst_addr; 2729 cm_id->remote_addr = *sin; 2730 2731 ret = cma_modify_qp_rtr(id_priv, conn_param); 2732 if (ret) 2733 goto out; 2734 2735 iw_param.ord = conn_param->initiator_depth; 2736 iw_param.ird = conn_param->responder_resources; 2737 iw_param.private_data = conn_param->private_data; 2738 iw_param.private_data_len = conn_param->private_data_len; 2739 if (id_priv->id.qp) 2740 iw_param.qpn = id_priv->qp_num; 2741 else 2742 iw_param.qpn = conn_param->qp_num; 2743 ret = iw_cm_connect(cm_id, &iw_param); 2744out: 2745 if (ret && !IS_ERR(cm_id)) { 2746 iw_destroy_cm_id(cm_id); 2747 id_priv->cm_id.iw = NULL; 2748 } 2749 return ret; 2750} 2751 2752int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param) 2753{ 2754 struct rdma_id_private *id_priv; 2755 int ret; 2756 2757 id_priv = container_of(id, struct rdma_id_private, id); 2758 if (!cma_comp_exch(id_priv, CMA_ROUTE_RESOLVED, CMA_CONNECT)) 2759 return -EINVAL; 2760 2761 if (!id->qp) { 2762 id_priv->qp_num = conn_param->qp_num; 2763 id_priv->srq = conn_param->srq; 2764 } 2765 2766 if (rdma_cap_ib_cm(id->device, id->port_num)) { 2767 if (cma_is_ud_ps(id->ps)) 2768 ret = cma_resolve_ib_udp(id_priv, conn_param); 2769 else 2770 ret = cma_connect_ib(id_priv, conn_param); 2771 } else if (rdma_cap_iw_cm(id->device, id->port_num)) 2772 ret = cma_connect_iw(id_priv, conn_param); 2773 else 2774 ret = -ENOSYS; 2775 if (ret) 2776 goto err; 2777 2778 return 0; 2779err: 2780 cma_comp_exch(id_priv, CMA_CONNECT, CMA_ROUTE_RESOLVED); 2781 return ret; 2782} 2783EXPORT_SYMBOL(rdma_connect); 2784 2785static int cma_accept_ib(struct rdma_id_private *id_priv, 2786 struct rdma_conn_param *conn_param) 2787{ 2788 struct ib_cm_rep_param rep; 2789 int ret; 2790 2791 ret = cma_modify_qp_rtr(id_priv, conn_param); 2792 if (ret) 2793 goto out; 2794 2795 ret = cma_modify_qp_rts(id_priv, conn_param); 2796 if (ret) 2797 goto out; 2798 2799 memset(&rep, 0, sizeof rep); 2800 rep.qp_num = id_priv->qp_num; 2801 rep.starting_psn = id_priv->seq_num; 2802 rep.private_data = conn_param->private_data; 2803 rep.private_data_len = conn_param->private_data_len; 2804 rep.responder_resources = conn_param->responder_resources; 2805 rep.initiator_depth = conn_param->initiator_depth; 2806 rep.failover_accepted = 0; 2807 rep.flow_control = conn_param->flow_control; 2808 rep.rnr_retry_count = conn_param->rnr_retry_count; 2809 rep.srq = id_priv->srq ? 1 : 0; 2810 2811 ret = ib_send_cm_rep(id_priv->cm_id.ib, &rep); 2812out: 2813 return ret; 2814} 2815 2816static int cma_accept_iw(struct rdma_id_private *id_priv, 2817 struct rdma_conn_param *conn_param) 2818{ 2819 struct iw_cm_conn_param iw_param; 2820 int ret; 2821 2822 ret = cma_modify_qp_rtr(id_priv, conn_param); 2823 if (ret) 2824 return ret; 2825 2826 iw_param.ord = conn_param->initiator_depth; 2827 iw_param.ird = conn_param->responder_resources; 2828 iw_param.private_data = conn_param->private_data; 2829 iw_param.private_data_len = conn_param->private_data_len; 2830 if (id_priv->id.qp) { 2831 iw_param.qpn = id_priv->qp_num; 2832 } else 2833 iw_param.qpn = conn_param->qp_num; 2834 2835 return iw_cm_accept(id_priv->cm_id.iw, &iw_param); 2836} 2837 2838static int cma_send_sidr_rep(struct rdma_id_private *id_priv, 2839 enum ib_cm_sidr_status status, 2840 const void *private_data, int private_data_len) 2841{ 2842 struct ib_cm_sidr_rep_param rep; 2843 int ret; 2844 2845 memset(&rep, 0, sizeof rep); 2846 rep.status = status; 2847 if (status == IB_SIDR_SUCCESS) { 2848 ret = cma_set_qkey(id_priv); 2849 if (ret) 2850 return ret; 2851 rep.qp_num = id_priv->qp_num; 2852 rep.qkey = id_priv->qkey; 2853 } 2854 rep.private_data = private_data; 2855 rep.private_data_len = private_data_len; 2856 2857 return ib_send_cm_sidr_rep(id_priv->cm_id.ib, &rep); 2858} 2859 2860int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param) 2861{ 2862 struct rdma_id_private *id_priv; 2863 int ret; 2864 2865 id_priv = container_of(id, struct rdma_id_private, id); 2866 if (!cma_comp(id_priv, CMA_CONNECT)) 2867 return -EINVAL; 2868 2869 if (!id->qp && conn_param) { 2870 id_priv->qp_num = conn_param->qp_num; 2871 id_priv->srq = conn_param->srq; 2872 } 2873 2874 if (rdma_cap_ib_cm(id->device, id->port_num)) { 2875 if (cma_is_ud_ps(id->ps)) 2876 ret = cma_send_sidr_rep(id_priv, IB_SIDR_SUCCESS, 2877 conn_param->private_data, 2878 conn_param->private_data_len); 2879 else if (conn_param) 2880 ret = cma_accept_ib(id_priv, conn_param); 2881 else 2882 ret = cma_rep_recv(id_priv); 2883 } else if (rdma_cap_iw_cm(id->device, id->port_num)) 2884 ret = cma_accept_iw(id_priv, conn_param); 2885 else 2886 ret = -ENOSYS; 2887 2888 if (ret) 2889 goto reject; 2890 2891 return 0; 2892reject: 2893 cma_modify_qp_err(id_priv); 2894 rdma_reject(id, NULL, 0); 2895 return ret; 2896} 2897EXPORT_SYMBOL(rdma_accept); 2898 2899int rdma_notify(struct rdma_cm_id *id, enum ib_event_type event) 2900{ 2901 struct rdma_id_private *id_priv; 2902 int ret; 2903 2904 id_priv = container_of(id, struct rdma_id_private, id); 2905 if (!cma_has_cm_dev(id_priv)) 2906 return -EINVAL; 2907 2908 switch (id->device->node_type) { 2909 case RDMA_NODE_IB_CA: 2910 ret = ib_cm_notify(id_priv->cm_id.ib, event); 2911 break; 2912 default: 2913 ret = 0; 2914 break; 2915 } 2916 return ret; 2917} 2918EXPORT_SYMBOL(rdma_notify); 2919 2920int rdma_reject(struct rdma_cm_id *id, const void *private_data, 2921 u8 private_data_len) 2922{ 2923 struct rdma_id_private *id_priv; 2924 int ret; 2925 2926 id_priv = container_of(id, struct rdma_id_private, id); 2927 if (!cma_has_cm_dev(id_priv)) 2928 return -EINVAL; 2929 2930 if (rdma_cap_ib_cm(id->device, id->port_num)) { 2931 if (cma_is_ud_ps(id->ps)) 2932 ret = cma_send_sidr_rep(id_priv, IB_SIDR_REJECT, 2933 private_data, private_data_len); 2934 else 2935 ret = ib_send_cm_rej(id_priv->cm_id.ib, 2936 IB_CM_REJ_CONSUMER_DEFINED, NULL, 2937 0, private_data, private_data_len); 2938 } else if (rdma_cap_iw_cm(id->device, id->port_num)) { 2939 ret = iw_cm_reject(id_priv->cm_id.iw, 2940 private_data, private_data_len); 2941 } else 2942 ret = -ENOSYS; 2943 2944 return ret; 2945} 2946EXPORT_SYMBOL(rdma_reject); 2947 2948int rdma_disconnect(struct rdma_cm_id *id) 2949{ 2950 struct rdma_id_private *id_priv; 2951 int ret; 2952 2953 id_priv = container_of(id, struct rdma_id_private, id); 2954 if (!cma_has_cm_dev(id_priv)) 2955 return -EINVAL; 2956 2957 if (rdma_cap_ib_cm(id->device, id->port_num)) { 2958 ret = cma_modify_qp_err(id_priv); 2959 if (ret) 2960 goto out; 2961 /* Initiate or respond to a disconnect. */ 2962 if (ib_send_cm_dreq(id_priv->cm_id.ib, NULL, 0)) 2963 ib_send_cm_drep(id_priv->cm_id.ib, NULL, 0); 2964 } else if (rdma_cap_iw_cm(id->device, id->port_num)) { 2965 ret = iw_cm_disconnect(id_priv->cm_id.iw, 0); 2966 } else 2967 ret = -EINVAL; 2968 2969out: 2970 return ret; 2971} 2972EXPORT_SYMBOL(rdma_disconnect); 2973 2974static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast) 2975{ 2976 struct rdma_id_private *id_priv; 2977 struct cma_multicast *mc = multicast->context; 2978 struct rdma_cm_event event; 2979 int ret; 2980 2981 id_priv = mc->id_priv; 2982 if (cma_disable_callback(id_priv, CMA_ADDR_BOUND) && 2983 cma_disable_callback(id_priv, CMA_ADDR_RESOLVED)) 2984 return 0; 2985 2986 mutex_lock(&id_priv->qp_mutex); 2987 if (!status && id_priv->id.qp) 2988 status = ib_attach_mcast(id_priv->id.qp, &multicast->rec.mgid, 2989 multicast->rec.mlid); 2990 mutex_unlock(&id_priv->qp_mutex); 2991 2992 memset(&event, 0, sizeof event); 2993 event.status = status; 2994 event.param.ud.private_data = mc->context; 2995 if (!status) { 2996 event.event = RDMA_CM_EVENT_MULTICAST_JOIN; 2997 ib_init_ah_from_mcmember(id_priv->id.device, 2998 id_priv->id.port_num, &multicast->rec, 2999 &event.param.ud.ah_attr); 3000 event.param.ud.qp_num = 0xFFFFFF; 3001 event.param.ud.qkey = be32_to_cpu(multicast->rec.qkey); 3002 } else 3003 event.event = RDMA_CM_EVENT_MULTICAST_ERROR; 3004 3005 ret = id_priv->id.event_handler(&id_priv->id, &event); 3006 if (ret) { 3007 cma_exch(id_priv, CMA_DESTROYING); 3008 mutex_unlock(&id_priv->handler_mutex); 3009 rdma_destroy_id(&id_priv->id); 3010 return 0; 3011 } 3012 3013 mutex_unlock(&id_priv->handler_mutex); 3014 return 0; 3015} 3016 3017static void cma_set_mgid(struct rdma_id_private *id_priv, 3018 struct sockaddr *addr, union ib_gid *mgid) 3019{ 3020#if defined(INET) || defined(INET6) 3021 unsigned char mc_map[MAX_ADDR_LEN]; 3022 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; 3023#endif 3024#ifdef INET 3025 struct sockaddr_in *sin = (struct sockaddr_in *) addr; 3026#endif 3027#ifdef INET6 3028 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) addr; 3029#endif 3030 3031 if (cma_any_addr(addr)) { 3032 memset(mgid, 0, sizeof *mgid); 3033#ifdef INET6 3034 } else if ((addr->sa_family == AF_INET6) && 3035 ((be32_to_cpu(sin6->sin6_addr.s6_addr32[0]) & 0xFFF0FFFF) == 3036 0xFF10A01B)) { 3037 /* IPv6 address is an SA assigned MGID. */ 3038 memcpy(mgid, &sin6->sin6_addr, sizeof *mgid); 3039 } else if (addr->sa_family == AF_INET6) { 3040 ipv6_ib_mc_map(&sin6->sin6_addr, dev_addr->broadcast, mc_map); 3041 if (id_priv->id.ps == RDMA_PS_UDP) 3042 mc_map[7] = 0x01; /* Use RDMA CM signature */ 3043 *mgid = *(union ib_gid *) (mc_map + 4); 3044#endif 3045#ifdef INET 3046 } else { 3047 ip_ib_mc_map(sin->sin_addr.s_addr, dev_addr->broadcast, mc_map); 3048 if (id_priv->id.ps == RDMA_PS_UDP) 3049 mc_map[7] = 0x01; /* Use RDMA CM signature */ 3050 *mgid = *(union ib_gid *) (mc_map + 4); 3051#endif 3052 } 3053} 3054 3055static int cma_join_ib_multicast(struct rdma_id_private *id_priv, 3056 struct cma_multicast *mc) 3057{ 3058 struct ib_sa_mcmember_rec rec; 3059 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; 3060 ib_sa_comp_mask comp_mask; 3061 int ret; 3062 3063 ib_addr_get_mgid(dev_addr, &rec.mgid); 3064 ret = ib_sa_get_mcmember_rec(id_priv->id.device, id_priv->id.port_num, 3065 &rec.mgid, &rec); 3066 if (ret) 3067 return ret; 3068 3069 cma_set_mgid(id_priv, (struct sockaddr *) &mc->addr, &rec.mgid); 3070 if (id_priv->id.ps == RDMA_PS_UDP) 3071 rec.qkey = cpu_to_be32(RDMA_UDP_QKEY); 3072 rdma_addr_get_sgid(dev_addr, &rec.port_gid); 3073 rec.pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr)); 3074 rec.join_state = 1; 3075 3076 comp_mask = IB_SA_MCMEMBER_REC_MGID | IB_SA_MCMEMBER_REC_PORT_GID | 3077 IB_SA_MCMEMBER_REC_PKEY | IB_SA_MCMEMBER_REC_JOIN_STATE | 3078 IB_SA_MCMEMBER_REC_QKEY | IB_SA_MCMEMBER_REC_SL | 3079 IB_SA_MCMEMBER_REC_FLOW_LABEL | 3080 IB_SA_MCMEMBER_REC_TRAFFIC_CLASS; 3081 3082 if (id_priv->id.ps == RDMA_PS_IPOIB) 3083 comp_mask |= IB_SA_MCMEMBER_REC_RATE | 3084 IB_SA_MCMEMBER_REC_RATE_SELECTOR; 3085 3086 mc->multicast.ib = ib_sa_join_multicast(&sa_client, id_priv->id.device, 3087 id_priv->id.port_num, &rec, 3088 comp_mask, GFP_KERNEL, 3089 cma_ib_mc_handler, mc); 3090 if (IS_ERR(mc->multicast.ib)) 3091 return PTR_ERR(mc->multicast.ib); 3092 3093 return 0; 3094} 3095 3096 3097static void iboe_mcast_work_handler(struct work_struct *work) 3098{ 3099 struct iboe_mcast_work *mw = container_of(work, struct iboe_mcast_work, work); 3100 struct cma_multicast *mc = mw->mc; 3101 struct ib_sa_multicast *m = mc->multicast.ib; 3102 3103 mc->multicast.ib->context = mc; 3104 cma_ib_mc_handler(0, m); 3105 kref_put(&mc->mcref, release_mc); 3106 kfree(mw); 3107} 3108 3109static void cma_iboe_set_mgid(struct sockaddr *addr, union ib_gid *mgid) 3110{ 3111 struct sockaddr_in *sin = (struct sockaddr_in *)addr; 3112 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)addr; 3113 3114 if (cma_any_addr(addr)) { 3115 memset(mgid, 0, sizeof *mgid); 3116 } else if (addr->sa_family == AF_INET6) 3117 memcpy(mgid, &sin6->sin6_addr, sizeof *mgid); 3118 else { 3119 mgid->raw[0] = 0xff; 3120 mgid->raw[1] = 0x0e; 3121 mgid->raw[2] = 0; 3122 mgid->raw[3] = 0; 3123 mgid->raw[4] = 0; 3124 mgid->raw[5] = 0; 3125 mgid->raw[6] = 0; 3126 mgid->raw[7] = 0; 3127 mgid->raw[8] = 0; 3128 mgid->raw[9] = 0; 3129 mgid->raw[10] = 0xff; 3130 mgid->raw[11] = 0xff; 3131 *(__be32 *)(&mgid->raw[12]) = sin->sin_addr.s_addr; 3132 } 3133} 3134 3135static int cma_iboe_join_multicast(struct rdma_id_private *id_priv, 3136 struct cma_multicast *mc) 3137{ 3138 struct iboe_mcast_work *work; 3139 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; 3140 int err; 3141 struct sockaddr *addr = (struct sockaddr *)&mc->addr; 3142 struct net_device *ndev = NULL; 3143 3144 if (cma_zero_addr((struct sockaddr *)&mc->addr)) 3145 return -EINVAL; 3146 3147 work = kzalloc(sizeof *work, GFP_KERNEL); 3148 if (!work) 3149 return -ENOMEM; 3150 3151 mc->multicast.ib = kzalloc(sizeof(struct ib_sa_multicast), GFP_KERNEL); 3152 if (!mc->multicast.ib) { 3153 err = -ENOMEM; 3154 goto out1; 3155 } 3156 3157 cma_iboe_set_mgid(addr, &mc->multicast.ib->rec.mgid); 3158 3159 mc->multicast.ib->rec.pkey = cpu_to_be16(0xffff); 3160 if (id_priv->id.ps == RDMA_PS_UDP) 3161 mc->multicast.ib->rec.qkey = cpu_to_be32(RDMA_UDP_QKEY); 3162 3163 if (dev_addr->bound_dev_if) 3164 ndev = dev_get_by_index(&init_net, dev_addr->bound_dev_if); 3165 if (!ndev) { 3166 err = -ENODEV; 3167 goto out2; 3168 } 3169 3170 mc->multicast.ib->rec.rate = iboe_get_rate(ndev); 3171 mc->multicast.ib->rec.hop_limit = 1; 3172#ifdef __linux__ 3173 mc->multicast.ib->rec.mtu = iboe_get_mtu(ndev->mtu); 3174#else 3175 mc->multicast.ib->rec.mtu = iboe_get_mtu(ndev->if_mtu); 3176#endif 3177 dev_put(ndev); 3178 if (!mc->multicast.ib->rec.mtu) { 3179 err = -EINVAL; 3180 goto out2; 3181 } 3182 iboe_addr_get_sgid(dev_addr, &mc->multicast.ib->rec.port_gid); 3183 work->id = id_priv; 3184 work->mc = mc; 3185 INIT_WORK(&work->work, iboe_mcast_work_handler); 3186 kref_get(&mc->mcref); 3187 queue_work(cma_wq, &work->work); 3188 3189 return 0; 3190 3191out2: 3192 kfree(mc->multicast.ib); 3193out1: 3194 kfree(work); 3195 return err; 3196} 3197 3198int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr, 3199 void *context) 3200{ 3201 struct rdma_id_private *id_priv; 3202 struct cma_multicast *mc; 3203 int ret; 3204 3205 id_priv = container_of(id, struct rdma_id_private, id); 3206 if (!cma_comp(id_priv, CMA_ADDR_BOUND) && 3207 !cma_comp(id_priv, CMA_ADDR_RESOLVED)) 3208 return -EINVAL; 3209 3210 mc = kmalloc(sizeof *mc, GFP_KERNEL); 3211 if (!mc) 3212 return -ENOMEM; 3213 3214 memcpy(&mc->addr, addr, ip_addr_size(addr)); 3215 mc->context = context; 3216 mc->id_priv = id_priv; 3217 3218 spin_lock(&id_priv->lock); 3219 list_add(&mc->list, &id_priv->mc_list); 3220 spin_unlock(&id_priv->lock); 3221 3222 switch (rdma_node_get_transport(id->device->node_type)) { 3223 case RDMA_TRANSPORT_IB: 3224 switch (rdma_port_get_link_layer(id->device, id->port_num)) { 3225 case IB_LINK_LAYER_INFINIBAND: 3226 ret = cma_join_ib_multicast(id_priv, mc); 3227 break; 3228 case IB_LINK_LAYER_ETHERNET: 3229 kref_init(&mc->mcref); 3230 ret = cma_iboe_join_multicast(id_priv, mc); 3231 break; 3232 default: 3233 ret = -EINVAL; 3234 } 3235 break; 3236 default: 3237 ret = -ENOSYS; 3238 break; 3239 } 3240 3241 if (ret) { 3242 spin_lock_irq(&id_priv->lock); 3243 list_del(&mc->list); 3244 spin_unlock_irq(&id_priv->lock); 3245 kfree(mc); 3246 } 3247 3248 return ret; 3249} 3250EXPORT_SYMBOL(rdma_join_multicast); 3251 3252void rdma_leave_multicast(struct rdma_cm_id *id, struct sockaddr *addr) 3253{ 3254 struct rdma_id_private *id_priv; 3255 struct cma_multicast *mc; 3256 3257 id_priv = container_of(id, struct rdma_id_private, id); 3258 spin_lock_irq(&id_priv->lock); 3259 list_for_each_entry(mc, &id_priv->mc_list, list) { 3260 if (!memcmp(&mc->addr, addr, ip_addr_size(addr))) { 3261 list_del(&mc->list); 3262 spin_unlock_irq(&id_priv->lock); 3263 3264 if (id->qp) 3265 ib_detach_mcast(id->qp, 3266 &mc->multicast.ib->rec.mgid, 3267 mc->multicast.ib->rec.mlid); 3268 if (rdma_node_get_transport(id_priv->cma_dev->device->node_type) == RDMA_TRANSPORT_IB) { 3269 switch (rdma_port_get_link_layer(id->device, id->port_num)) { 3270 case IB_LINK_LAYER_INFINIBAND: 3271 ib_sa_free_multicast(mc->multicast.ib); 3272 kfree(mc); 3273 break; 3274 case IB_LINK_LAYER_ETHERNET: 3275 kref_put(&mc->mcref, release_mc); 3276 break; 3277 default: 3278 break; 3279 } 3280 } 3281 return; 3282 } 3283 } 3284 spin_unlock_irq(&id_priv->lock); 3285} 3286EXPORT_SYMBOL(rdma_leave_multicast); 3287 3288static int cma_netdev_change(struct net_device *ndev, struct rdma_id_private *id_priv) 3289{ 3290 struct rdma_dev_addr *dev_addr; 3291 struct cma_ndev_work *work; 3292 3293 dev_addr = &id_priv->id.route.addr.dev_addr; 3294 3295#ifdef __linux__ 3296 if ((dev_addr->bound_dev_if == ndev->ifindex) && 3297 memcmp(dev_addr->src_dev_addr, ndev->dev_addr, ndev->addr_len)) { 3298 printk(KERN_INFO "RDMA CM addr change for ndev %s used by id %p\n", 3299 ndev->name, &id_priv->id); 3300#else 3301 if ((dev_addr->bound_dev_if == ndev->if_index) && 3302 memcmp(dev_addr->src_dev_addr, IF_LLADDR(ndev), ndev->if_addrlen)) { 3303 printk(KERN_INFO "RDMA CM addr change for ndev %s used by id %p\n", 3304 ndev->if_xname, &id_priv->id); 3305#endif 3306 work = kzalloc(sizeof *work, GFP_KERNEL); 3307 if (!work) 3308 return -ENOMEM; 3309 3310 INIT_WORK(&work->work, cma_ndev_work_handler); 3311 work->id = id_priv; 3312 work->event.event = RDMA_CM_EVENT_ADDR_CHANGE; 3313 atomic_inc(&id_priv->refcount); 3314 queue_work(cma_wq, &work->work); 3315 } 3316 3317 return 0; 3318} 3319 3320static int cma_netdev_callback(struct notifier_block *self, unsigned long event, 3321 void *ctx) 3322{ 3323 struct net_device *ndev = (struct net_device *)ctx; 3324 struct cma_device *cma_dev; 3325 struct rdma_id_private *id_priv; 3326 int ret = NOTIFY_DONE; 3327 3328#ifdef __linux__ 3329 if (dev_net(ndev) != &init_net) 3330 return NOTIFY_DONE; 3331 3332 if (event != NETDEV_BONDING_FAILOVER) 3333 return NOTIFY_DONE; 3334 3335 if (!(ndev->flags & IFF_MASTER) || !(ndev->priv_flags & IFF_BONDING)) 3336 return NOTIFY_DONE; 3337#else 3338 if (event != NETDEV_DOWN && event != NETDEV_UNREGISTER) 3339 return NOTIFY_DONE; 3340#endif 3341 3342 mutex_lock(&lock); 3343 list_for_each_entry(cma_dev, &dev_list, list) 3344 list_for_each_entry(id_priv, &cma_dev->id_list, list) { 3345 ret = cma_netdev_change(ndev, id_priv); 3346 if (ret) 3347 goto out; 3348 } 3349 3350out: 3351 mutex_unlock(&lock); 3352 return ret; 3353} 3354 3355static struct notifier_block cma_nb = { 3356 .notifier_call = cma_netdev_callback 3357}; 3358 3359static void cma_add_one(struct ib_device *device) 3360{ 3361 struct cma_device *cma_dev; 3362 struct rdma_id_private *id_priv; 3363 3364 cma_dev = kmalloc(sizeof *cma_dev, GFP_KERNEL); 3365 if (!cma_dev) 3366 return; 3367 3368 cma_dev->device = device; 3369 3370 init_completion(&cma_dev->comp); 3371 atomic_set(&cma_dev->refcount, 1); 3372 INIT_LIST_HEAD(&cma_dev->id_list); 3373 ib_set_client_data(device, &cma_client, cma_dev); 3374 3375 mutex_lock(&lock); 3376 list_add_tail(&cma_dev->list, &dev_list); 3377 list_for_each_entry(id_priv, &listen_any_list, list) 3378 cma_listen_on_dev(id_priv, cma_dev); 3379 mutex_unlock(&lock); 3380} 3381 3382static int cma_remove_id_dev(struct rdma_id_private *id_priv) 3383{ 3384 struct rdma_cm_event event; 3385 enum cma_state state; 3386 int ret = 0; 3387 3388 /* Record that we want to remove the device */ 3389 state = cma_exch(id_priv, CMA_DEVICE_REMOVAL); 3390 if (state == CMA_DESTROYING) 3391 return 0; 3392 3393 cma_cancel_operation(id_priv, state); 3394 mutex_lock(&id_priv->handler_mutex); 3395 3396 /* Check for destruction from another callback. */ 3397 if (!cma_comp(id_priv, CMA_DEVICE_REMOVAL)) 3398 goto out; 3399 3400 memset(&event, 0, sizeof event); 3401 event.event = RDMA_CM_EVENT_DEVICE_REMOVAL; 3402 ret = id_priv->id.event_handler(&id_priv->id, &event); 3403out: 3404 mutex_unlock(&id_priv->handler_mutex); 3405 return ret; 3406} 3407 3408static void cma_process_remove(struct cma_device *cma_dev) 3409{ 3410 struct rdma_id_private *id_priv; 3411 int ret; 3412 3413 mutex_lock(&lock); 3414 while (!list_empty(&cma_dev->id_list)) { 3415 id_priv = list_entry(cma_dev->id_list.next, 3416 struct rdma_id_private, list); 3417 3418 list_del(&id_priv->listen_list); 3419 list_del_init(&id_priv->list); 3420 atomic_inc(&id_priv->refcount); 3421 mutex_unlock(&lock); 3422 3423 ret = id_priv->internal_id ? 1 : cma_remove_id_dev(id_priv); 3424 cma_deref_id(id_priv); 3425 if (ret) 3426 rdma_destroy_id(&id_priv->id); 3427 3428 mutex_lock(&lock); 3429 } 3430 mutex_unlock(&lock); 3431 3432 cma_deref_dev(cma_dev); 3433 wait_for_completion(&cma_dev->comp); 3434} 3435 3436static void cma_remove_one(struct ib_device *device) 3437{ 3438 struct cma_device *cma_dev; 3439 3440 cma_dev = ib_get_client_data(device, &cma_client); 3441 if (!cma_dev) 3442 return; 3443 3444 mutex_lock(&lock); 3445 list_del(&cma_dev->list); 3446 mutex_unlock(&lock); 3447 3448 cma_process_remove(cma_dev); 3449 kfree(cma_dev); 3450} 3451 3452static int cma_init(void) 3453{ 3454 int ret; 3455#if defined(INET) 3456 int low, high, remaining; 3457 3458 get_random_bytes(&next_port, sizeof next_port); 3459 inet_get_local_port_range(&low, &high); 3460 remaining = (high - low) + 1; 3461 next_port = ((unsigned int) next_port % remaining) + low; 3462#endif 3463 3464 cma_wq = create_singlethread_workqueue("rdma_cm"); 3465 if (!cma_wq) 3466 return -ENOMEM; 3467 3468 ib_sa_register_client(&sa_client); 3469 rdma_addr_register_client(&addr_client); 3470 register_netdevice_notifier(&cma_nb); 3471 3472 ret = ib_register_client(&cma_client); 3473 if (ret) 3474 goto err; 3475 return 0; 3476 3477err: 3478 unregister_netdevice_notifier(&cma_nb); 3479 rdma_addr_unregister_client(&addr_client); 3480 ib_sa_unregister_client(&sa_client); 3481 destroy_workqueue(cma_wq); 3482 return ret; 3483} 3484 3485static void cma_cleanup(void) 3486{ 3487 ib_unregister_client(&cma_client); 3488 unregister_netdevice_notifier(&cma_nb); 3489 rdma_addr_unregister_client(&addr_client); 3490 ib_sa_unregister_client(&sa_client); 3491 destroy_workqueue(cma_wq); 3492 idr_destroy(&sdp_ps); 3493 idr_destroy(&tcp_ps); 3494 idr_destroy(&udp_ps); 3495 idr_destroy(&ipoib_ps); 3496} 3497 3498module_init(cma_init); 3499module_exit(cma_cleanup); 3500