cma.c revision 325940
1/* 2 * Copyright (c) 2005 Voltaire Inc. All rights reserved. 3 * Copyright (c) 2002-2005, Network Appliance, Inc. All rights reserved. 4 * Copyright (c) 1999-2005, Mellanox Technologies, Inc. All rights reserved. 5 * Copyright (c) 2005-2006 Intel Corporation. All rights reserved. 6 * Copyright (c) 2016 Chelsio Communications. All rights reserved. 7 * 8 * This software is available to you under a choice of one of two 9 * licenses. You may choose to be licensed under the terms of the GNU 10 * General Public License (GPL) Version 2, available from the file 11 * COPYING in the main directory of this source tree, or the 12 * OpenIB.org BSD license below: 13 * 14 * Redistribution and use in source and binary forms, with or 15 * without modification, are permitted provided that the following 16 * conditions are met: 17 * 18 * - Redistributions of source code must retain the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer. 21 * 22 * - Redistributions in binary form must reproduce the above 23 * copyright notice, this list of conditions and the following 24 * disclaimer in the documentation and/or other materials 25 * provided with the distribution. 26 * 27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 28 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 29 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 30 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 31 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 32 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 33 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 34 * SOFTWARE. 35 */ 36 37#define LINUXKPI_PARAM_PREFIX ibcore_ 38 39#include <linux/completion.h> 40#include <linux/in.h> 41#include <linux/in6.h> 42#include <linux/mutex.h> 43#include <linux/random.h> 44#include <linux/idr.h> 45#include <linux/inetdevice.h> 46 47#include <net/tcp.h> 48#include <net/ipv6.h> 49 50#include <netinet6/scope6_var.h> 51#include <netinet6/ip6_var.h> 52 53#include <rdma/rdma_cm.h> 54#include <rdma/rdma_cm_ib.h> 55#include <rdma/ib_cache.h> 56#include <rdma/ib_cm.h> 57#include <rdma/ib_sa.h> 58#include <rdma/iw_cm.h> 59 60MODULE_AUTHOR("Sean Hefty"); 61MODULE_DESCRIPTION("Generic RDMA CM Agent"); 62MODULE_LICENSE("Dual BSD/GPL"); 63 64static int tavor_quirk = 0; 65module_param_named(tavor_quirk, tavor_quirk, int, 0644); 66MODULE_PARM_DESC(tavor_quirk, "Tavor performance quirk: limit MTU to 1K if > 0"); 67 68#define CMA_CM_RESPONSE_TIMEOUT 20 69#define CMA_MAX_CM_RETRIES 15 70#define CMA_CM_MRA_SETTING (IB_CM_MRA_FLAG_DELAY | 24) 71#define IBOE_PACKET_LIFETIME 18 72 73static int cma_response_timeout = CMA_CM_RESPONSE_TIMEOUT; 74module_param_named(cma_response_timeout, cma_response_timeout, int, 0644); 75MODULE_PARM_DESC(cma_response_timeout, "CMA_CM_RESPONSE_TIMEOUT default=20"); 76 77static int def_prec2sl = 3; 78module_param_named(def_prec2sl, def_prec2sl, int, 0644); 79MODULE_PARM_DESC(def_prec2sl, "Default value for SL priority with RoCE. Valid values 0 - 7"); 80 81static void cma_add_one(struct ib_device *device); 82static void cma_remove_one(struct ib_device *device); 83 84static struct ib_client cma_client = { 85 .name = "cma", 86 .add = cma_add_one, 87 .remove = cma_remove_one 88}; 89 90static struct ib_sa_client sa_client; 91static struct rdma_addr_client addr_client; 92static LIST_HEAD(dev_list); 93static LIST_HEAD(listen_any_list); 94static DEFINE_MUTEX(lock); 95static struct workqueue_struct *cma_wq; 96static DEFINE_IDR(sdp_ps); 97static DEFINE_IDR(tcp_ps); 98static DEFINE_IDR(udp_ps); 99static DEFINE_IDR(ipoib_ps); 100#if defined(INET) 101static int next_port; 102#endif 103 104struct cma_device { 105 struct list_head list; 106 struct ib_device *device; 107 struct completion comp; 108 atomic_t refcount; 109 struct list_head id_list; 110}; 111 112enum cma_state { 113 CMA_IDLE, 114 CMA_ADDR_QUERY, 115 CMA_ADDR_RESOLVED, 116 CMA_ROUTE_QUERY, 117 CMA_ROUTE_RESOLVED, 118 CMA_CONNECT, 119 CMA_DISCONNECT, 120 CMA_ADDR_BOUND, 121 CMA_LISTEN, 122 CMA_DEVICE_REMOVAL, 123 CMA_DESTROYING 124}; 125 126struct rdma_bind_list { 127 struct idr *ps; 128 struct hlist_head owners; 129 unsigned short port; 130}; 131 132/* 133 * Device removal can occur at anytime, so we need extra handling to 134 * serialize notifying the user of device removal with other callbacks. 135 * We do this by disabling removal notification while a callback is in process, 136 * and reporting it after the callback completes. 137 */ 138struct rdma_id_private { 139 struct rdma_cm_id id; 140 141 struct rdma_bind_list *bind_list; 142 struct socket *sock; 143 struct hlist_node node; 144 struct list_head list; /* listen_any_list or cma_device.list */ 145 struct list_head listen_list; /* per device listens */ 146 struct cma_device *cma_dev; 147 struct list_head mc_list; 148 149 int internal_id; 150 enum cma_state state; 151 spinlock_t lock; 152 struct mutex qp_mutex; 153 154 struct completion comp; 155 atomic_t refcount; 156 struct mutex handler_mutex; 157 158 int backlog; 159 int timeout_ms; 160 struct ib_sa_query *query; 161 int query_id; 162 union { 163 struct ib_cm_id *ib; 164 struct iw_cm_id *iw; 165 } cm_id; 166 167 u32 seq_num; 168 u32 qkey; 169 u32 qp_num; 170 u8 srq; 171 u8 tos; 172 int unify_ps_tcp; 173}; 174 175struct cma_multicast { 176 struct rdma_id_private *id_priv; 177 union { 178 struct ib_sa_multicast *ib; 179 } multicast; 180 struct list_head list; 181 void *context; 182 struct sockaddr_storage addr; 183 struct kref mcref; 184}; 185 186struct cma_work { 187 struct work_struct work; 188 struct rdma_id_private *id; 189 enum cma_state old_state; 190 enum cma_state new_state; 191 struct rdma_cm_event event; 192}; 193 194struct cma_ndev_work { 195 struct work_struct work; 196 struct rdma_id_private *id; 197 struct rdma_cm_event event; 198}; 199 200struct iboe_mcast_work { 201 struct work_struct work; 202 struct rdma_id_private *id; 203 struct cma_multicast *mc; 204}; 205 206union cma_ip_addr { 207 struct in6_addr ip6; 208 struct { 209 __be32 pad[3]; 210 __be32 addr; 211 } ip4; 212}; 213 214struct cma_hdr { 215 u8 cma_version; 216 u8 ip_version; /* IP version: 7:4 */ 217 __be16 port; 218 union cma_ip_addr src_addr; 219 union cma_ip_addr dst_addr; 220}; 221 222struct sdp_hh { 223 u8 bsdh[16]; 224 u8 sdp_version; /* Major version: 7:4 */ 225 u8 ip_version; /* IP version: 7:4 */ 226 u8 sdp_specific1[10]; 227 __be16 port; 228 __be16 sdp_specific2; 229 union cma_ip_addr src_addr; 230 union cma_ip_addr dst_addr; 231}; 232 233struct sdp_hah { 234 u8 bsdh[16]; 235 u8 sdp_version; 236}; 237 238#define CMA_VERSION 0x00 239#define SDP_MAJ_VERSION 0x2 240 241static int cma_comp(struct rdma_id_private *id_priv, enum cma_state comp) 242{ 243 unsigned long flags; 244 int ret; 245 246 spin_lock_irqsave(&id_priv->lock, flags); 247 ret = (id_priv->state == comp); 248 spin_unlock_irqrestore(&id_priv->lock, flags); 249 return ret; 250} 251 252static int cma_comp_exch(struct rdma_id_private *id_priv, 253 enum cma_state comp, enum cma_state exch) 254{ 255 unsigned long flags; 256 int ret; 257 258 spin_lock_irqsave(&id_priv->lock, flags); 259 if ((ret = (id_priv->state == comp))) 260 id_priv->state = exch; 261 spin_unlock_irqrestore(&id_priv->lock, flags); 262 return ret; 263} 264 265static enum cma_state cma_exch(struct rdma_id_private *id_priv, 266 enum cma_state exch) 267{ 268 unsigned long flags; 269 enum cma_state old; 270 271 spin_lock_irqsave(&id_priv->lock, flags); 272 old = id_priv->state; 273 id_priv->state = exch; 274 spin_unlock_irqrestore(&id_priv->lock, flags); 275 return old; 276} 277 278static inline u8 cma_get_ip_ver(struct cma_hdr *hdr) 279{ 280 return hdr->ip_version >> 4; 281} 282 283static inline void cma_set_ip_ver(struct cma_hdr *hdr, u8 ip_ver) 284{ 285 hdr->ip_version = (ip_ver << 4) | (hdr->ip_version & 0xF); 286} 287 288static inline u8 sdp_get_majv(u8 sdp_version) 289{ 290 return sdp_version >> 4; 291} 292 293static inline u8 sdp_get_ip_ver(struct sdp_hh *hh) 294{ 295 return hh->ip_version >> 4; 296} 297 298static inline void sdp_set_ip_ver(struct sdp_hh *hh, u8 ip_ver) 299{ 300 hh->ip_version = (ip_ver << 4) | (hh->ip_version & 0xF); 301} 302 303static inline int cma_is_ud_ps(enum rdma_port_space ps) 304{ 305 return (ps == RDMA_PS_UDP || ps == RDMA_PS_IPOIB); 306} 307 308static void cma_attach_to_dev(struct rdma_id_private *id_priv, 309 struct cma_device *cma_dev) 310{ 311 atomic_inc(&cma_dev->refcount); 312 id_priv->cma_dev = cma_dev; 313 id_priv->id.device = cma_dev->device; 314 id_priv->id.route.addr.dev_addr.transport = 315 rdma_node_get_transport(cma_dev->device->node_type); 316 list_add_tail(&id_priv->list, &cma_dev->id_list); 317} 318 319static inline void cma_deref_dev(struct cma_device *cma_dev) 320{ 321 if (atomic_dec_and_test(&cma_dev->refcount)) 322 complete(&cma_dev->comp); 323} 324 325static inline void release_mc(struct kref *kref) 326{ 327 struct cma_multicast *mc = container_of(kref, struct cma_multicast, mcref); 328 329 kfree(mc->multicast.ib); 330 kfree(mc); 331} 332 333static void cma_detach_from_dev(struct rdma_id_private *id_priv) 334{ 335 list_del(&id_priv->list); 336 cma_deref_dev(id_priv->cma_dev); 337 id_priv->cma_dev = NULL; 338} 339 340static int cma_set_qkey(struct rdma_id_private *id_priv) 341{ 342 struct ib_sa_mcmember_rec rec; 343 int ret = 0; 344 345 if (id_priv->qkey) 346 return 0; 347 348 switch (id_priv->id.ps) { 349 case RDMA_PS_UDP: 350 id_priv->qkey = RDMA_UDP_QKEY; 351 break; 352 case RDMA_PS_IPOIB: 353 ib_addr_get_mgid(&id_priv->id.route.addr.dev_addr, &rec.mgid); 354 ret = ib_sa_get_mcmember_rec(id_priv->id.device, 355 id_priv->id.port_num, &rec.mgid, 356 &rec); 357 if (!ret) 358 id_priv->qkey = be32_to_cpu(rec.qkey); 359 break; 360 default: 361 break; 362 } 363 return ret; 364} 365 366static int find_gid_port(struct ib_device *device, union ib_gid *gid, u8 port_num) 367{ 368 int i; 369 int err; 370 struct ib_port_attr props; 371 union ib_gid tmp; 372 373 err = ib_query_port(device, port_num, &props); 374 if (err) 375 return 1; 376 377 for (i = 0; i < props.gid_tbl_len; ++i) { 378 err = ib_query_gid(device, port_num, i, &tmp); 379 if (err) 380 return 1; 381 if (!memcmp(&tmp, gid, sizeof tmp)) 382 return 0; 383 } 384 385 return -EAGAIN; 386} 387 388int 389rdma_find_cmid_laddr(struct sockaddr_in *local_addr, unsigned short dev_type, 390 void **cm_id) 391{ 392 int ret; 393 u8 port; 394 int found_dev = 0, found_cmid = 0; 395 struct rdma_id_private *id_priv; 396 struct rdma_id_private *dev_id_priv; 397 struct cma_device *cma_dev; 398 struct rdma_dev_addr dev_addr; 399 union ib_gid gid; 400 enum rdma_link_layer dev_ll = dev_type == ARPHRD_INFINIBAND ? 401 IB_LINK_LAYER_INFINIBAND : IB_LINK_LAYER_ETHERNET; 402 403 memset(&dev_addr, 0, sizeof(dev_addr)); 404 405 ret = rdma_translate_ip((struct sockaddr *)local_addr, 406 &dev_addr); 407 if (ret) 408 goto err; 409 410 /* find rdma device based on MAC address/gid */ 411 mutex_lock(&lock); 412 413 memcpy(&gid, dev_addr.src_dev_addr + 414 rdma_addr_gid_offset(&dev_addr), sizeof(gid)); 415 416 list_for_each_entry(cma_dev, &dev_list, list) 417 for (port = 1; port <= cma_dev->device->phys_port_cnt; ++port) 418 if ((rdma_port_get_link_layer(cma_dev->device, port) == 419 dev_ll) && 420 (rdma_node_get_transport(cma_dev->device->node_type) == 421 RDMA_TRANSPORT_IWARP)) { 422 ret = find_gid_port(cma_dev->device, 423 &gid, port); 424 if (!ret) { 425 found_dev = 1; 426 goto out; 427 } else if (ret == 1) { 428 mutex_unlock(&lock); 429 goto err; 430 } 431 } 432out: 433 mutex_unlock(&lock); 434 435 if (!found_dev) 436 goto err; 437 438 /* Traverse through the list of listening cm_id's to find the 439 * desired cm_id based on rdma device & port number. 440 */ 441 list_for_each_entry(id_priv, &listen_any_list, list) 442 list_for_each_entry(dev_id_priv, &id_priv->listen_list, 443 listen_list) 444 if (dev_id_priv->cma_dev == cma_dev) 445 if (dev_id_priv->cm_id.iw->local_addr.sin_port 446 == local_addr->sin_port) { 447 *cm_id = (void *)dev_id_priv->cm_id.iw; 448 found_cmid = 1; 449 } 450 return found_cmid ? 0 : -ENODEV; 451 452err: 453 return -ENODEV; 454} 455EXPORT_SYMBOL(rdma_find_cmid_laddr); 456 457static int cma_acquire_dev(struct rdma_id_private *id_priv) 458{ 459 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; 460 struct cma_device *cma_dev; 461 union ib_gid gid; 462 int ret = -ENODEV; 463 464 if (dev_addr->dev_type != ARPHRD_INFINIBAND) { 465 iboe_addr_get_sgid(dev_addr, &gid); 466 list_for_each_entry(cma_dev, &dev_list, list) { 467 ret = ib_find_cached_gid(cma_dev->device, &gid, 468 &id_priv->id.port_num, NULL); 469 if (!ret) 470 goto out; 471 } 472 } 473 474 memcpy(&gid, dev_addr->src_dev_addr + 475 rdma_addr_gid_offset(dev_addr), sizeof gid); 476 list_for_each_entry(cma_dev, &dev_list, list) { 477 ret = ib_find_cached_gid(cma_dev->device, &gid, 478 &id_priv->id.port_num, NULL); 479 if (!ret) 480 break; 481 } 482 483out: 484 if (!ret) 485 cma_attach_to_dev(id_priv, cma_dev); 486 487 return ret; 488} 489 490static void cma_deref_id(struct rdma_id_private *id_priv) 491{ 492 if (atomic_dec_and_test(&id_priv->refcount)) 493 complete(&id_priv->comp); 494} 495 496static int cma_disable_callback(struct rdma_id_private *id_priv, 497 enum cma_state state) 498{ 499 mutex_lock(&id_priv->handler_mutex); 500 if (id_priv->state != state) { 501 mutex_unlock(&id_priv->handler_mutex); 502 return -EINVAL; 503 } 504 return 0; 505} 506 507static int cma_has_cm_dev(struct rdma_id_private *id_priv) 508{ 509 return (id_priv->id.device && id_priv->cm_id.ib); 510} 511 512struct rdma_cm_id *rdma_create_id(rdma_cm_event_handler event_handler, 513 void *context, enum rdma_port_space ps) 514{ 515 struct rdma_id_private *id_priv; 516 517 id_priv = kzalloc(sizeof *id_priv, GFP_KERNEL); 518 if (!id_priv) 519 return ERR_PTR(-ENOMEM); 520 521 id_priv->state = CMA_IDLE; 522 id_priv->id.context = context; 523 id_priv->id.event_handler = event_handler; 524 id_priv->id.ps = ps; 525 spin_lock_init(&id_priv->lock); 526 mutex_init(&id_priv->qp_mutex); 527 init_completion(&id_priv->comp); 528 atomic_set(&id_priv->refcount, 1); 529 mutex_init(&id_priv->handler_mutex); 530 INIT_LIST_HEAD(&id_priv->listen_list); 531 INIT_LIST_HEAD(&id_priv->mc_list); 532 get_random_bytes(&id_priv->seq_num, sizeof id_priv->seq_num); 533 534 return &id_priv->id; 535} 536EXPORT_SYMBOL(rdma_create_id); 537 538static int cma_init_ud_qp(struct rdma_id_private *id_priv, struct ib_qp *qp) 539{ 540 struct ib_qp_attr qp_attr; 541 int qp_attr_mask, ret; 542 543 qp_attr.qp_state = IB_QPS_INIT; 544 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask); 545 if (ret) 546 return ret; 547 548 ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask); 549 if (ret) 550 return ret; 551 552 qp_attr.qp_state = IB_QPS_RTR; 553 ret = ib_modify_qp(qp, &qp_attr, IB_QP_STATE); 554 if (ret) 555 return ret; 556 557 qp_attr.qp_state = IB_QPS_RTS; 558 qp_attr.sq_psn = 0; 559 ret = ib_modify_qp(qp, &qp_attr, IB_QP_STATE | IB_QP_SQ_PSN); 560 561 return ret; 562} 563 564static int cma_init_conn_qp(struct rdma_id_private *id_priv, struct ib_qp *qp) 565{ 566 struct ib_qp_attr qp_attr; 567 int qp_attr_mask, ret; 568 569 qp_attr.qp_state = IB_QPS_INIT; 570 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask); 571 if (ret) 572 return ret; 573 574 return ib_modify_qp(qp, &qp_attr, qp_attr_mask); 575} 576 577int rdma_create_qp(struct rdma_cm_id *id, struct ib_pd *pd, 578 struct ib_qp_init_attr *qp_init_attr) 579{ 580 struct rdma_id_private *id_priv; 581 struct ib_qp *qp; 582 int ret; 583 584 id_priv = container_of(id, struct rdma_id_private, id); 585 if (id->device != pd->device) 586 return -EINVAL; 587 588 qp = ib_create_qp(pd, qp_init_attr); 589 if (IS_ERR(qp)) 590 return PTR_ERR(qp); 591 592 if (cma_is_ud_ps(id_priv->id.ps)) 593 ret = cma_init_ud_qp(id_priv, qp); 594 else 595 ret = cma_init_conn_qp(id_priv, qp); 596 if (ret) 597 goto err; 598 599 id->qp = qp; 600 id_priv->qp_num = qp->qp_num; 601 id_priv->srq = (qp->srq != NULL); 602 return 0; 603err: 604 ib_destroy_qp(qp); 605 return ret; 606} 607EXPORT_SYMBOL(rdma_create_qp); 608 609void rdma_destroy_qp(struct rdma_cm_id *id) 610{ 611 struct rdma_id_private *id_priv; 612 613 id_priv = container_of(id, struct rdma_id_private, id); 614 mutex_lock(&id_priv->qp_mutex); 615 ib_destroy_qp(id_priv->id.qp); 616 id_priv->id.qp = NULL; 617 mutex_unlock(&id_priv->qp_mutex); 618} 619EXPORT_SYMBOL(rdma_destroy_qp); 620 621static int cma_modify_qp_rtr(struct rdma_id_private *id_priv, 622 struct rdma_conn_param *conn_param) 623{ 624 struct ib_qp_attr qp_attr; 625 int qp_attr_mask, ret; 626 627 mutex_lock(&id_priv->qp_mutex); 628 if (!id_priv->id.qp) { 629 ret = 0; 630 goto out; 631 } 632 633 /* Need to update QP attributes from default values. */ 634 qp_attr.qp_state = IB_QPS_INIT; 635 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask); 636 if (ret) 637 goto out; 638 639 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask); 640 if (ret) 641 goto out; 642 643 qp_attr.qp_state = IB_QPS_RTR; 644 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask); 645 if (ret) 646 goto out; 647 648 if (conn_param) 649 qp_attr.max_dest_rd_atomic = conn_param->responder_resources; 650 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask); 651out: 652 mutex_unlock(&id_priv->qp_mutex); 653 return ret; 654} 655 656static int cma_modify_qp_rts(struct rdma_id_private *id_priv, 657 struct rdma_conn_param *conn_param) 658{ 659 struct ib_qp_attr qp_attr; 660 int qp_attr_mask, ret; 661 662 mutex_lock(&id_priv->qp_mutex); 663 if (!id_priv->id.qp) { 664 ret = 0; 665 goto out; 666 } 667 668 qp_attr.qp_state = IB_QPS_RTS; 669 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask); 670 if (ret) 671 goto out; 672 673 if (conn_param) 674 qp_attr.max_rd_atomic = conn_param->initiator_depth; 675 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask); 676out: 677 mutex_unlock(&id_priv->qp_mutex); 678 return ret; 679} 680 681static int cma_modify_qp_err(struct rdma_id_private *id_priv) 682{ 683 struct ib_qp_attr qp_attr; 684 int ret; 685 686 mutex_lock(&id_priv->qp_mutex); 687 if (!id_priv->id.qp) { 688 ret = 0; 689 goto out; 690 } 691 692 qp_attr.qp_state = IB_QPS_ERR; 693 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, IB_QP_STATE); 694out: 695 mutex_unlock(&id_priv->qp_mutex); 696 return ret; 697} 698 699static int cma_ib_init_qp_attr(struct rdma_id_private *id_priv, 700 struct ib_qp_attr *qp_attr, int *qp_attr_mask) 701{ 702 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; 703 int ret; 704 u16 pkey; 705 706 if (rdma_port_get_link_layer(id_priv->id.device, id_priv->id.port_num) == 707 IB_LINK_LAYER_INFINIBAND) 708 pkey = ib_addr_get_pkey(dev_addr); 709 else 710 pkey = 0xffff; 711 712 ret = ib_find_cached_pkey(id_priv->id.device, id_priv->id.port_num, 713 pkey, &qp_attr->pkey_index); 714 if (ret) 715 return ret; 716 717 qp_attr->port_num = id_priv->id.port_num; 718 *qp_attr_mask = IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT; 719 720 if (cma_is_ud_ps(id_priv->id.ps)) { 721 ret = cma_set_qkey(id_priv); 722 if (ret) 723 return ret; 724 725 qp_attr->qkey = id_priv->qkey; 726 *qp_attr_mask |= IB_QP_QKEY; 727 } else { 728 qp_attr->qp_access_flags = 0; 729 *qp_attr_mask |= IB_QP_ACCESS_FLAGS; 730 } 731 return 0; 732} 733 734int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr, 735 int *qp_attr_mask) 736{ 737 struct rdma_id_private *id_priv; 738 int ret = 0; 739 740 id_priv = container_of(id, struct rdma_id_private, id); 741 if (rdma_cap_ib_cm(id->device, id->port_num)) { 742 if (!id_priv->cm_id.ib || cma_is_ud_ps(id_priv->id.ps)) 743 ret = cma_ib_init_qp_attr(id_priv, qp_attr, qp_attr_mask); 744 else 745 ret = ib_cm_init_qp_attr(id_priv->cm_id.ib, qp_attr, 746 qp_attr_mask); 747 748 if (qp_attr->qp_state == IB_QPS_RTR) 749 qp_attr->rq_psn = id_priv->seq_num; 750 } else if (rdma_cap_iw_cm(id->device, id->port_num)) { 751 if (!id_priv->cm_id.iw) { 752 qp_attr->qp_access_flags = 0; 753 *qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS; 754 } else 755 ret = iw_cm_init_qp_attr(id_priv->cm_id.iw, qp_attr, 756 qp_attr_mask); 757 } else 758 ret = -ENOSYS; 759 760 return ret; 761} 762EXPORT_SYMBOL(rdma_init_qp_attr); 763 764static inline int cma_zero_addr(struct sockaddr *addr) 765{ 766 struct in6_addr *ip6; 767 768 if (addr->sa_family == AF_INET) 769 return ipv4_is_zeronet( 770 ((struct sockaddr_in *)addr)->sin_addr.s_addr); 771 else { 772 ip6 = &((struct sockaddr_in6 *) addr)->sin6_addr; 773 return (ip6->s6_addr32[0] | ip6->s6_addr32[1] | 774 ip6->s6_addr32[2] | ip6->s6_addr32[3]) == 0; 775 } 776} 777 778static inline int cma_loopback_addr(struct sockaddr *addr) 779{ 780 if (addr->sa_family == AF_INET) 781 return ipv4_is_loopback( 782 ((struct sockaddr_in *) addr)->sin_addr.s_addr); 783 else 784 return ipv6_addr_loopback( 785 &((struct sockaddr_in6 *) addr)->sin6_addr); 786} 787 788static inline int cma_any_addr(struct sockaddr *addr) 789{ 790 return cma_zero_addr(addr) || cma_loopback_addr(addr); 791} 792int 793rdma_cma_any_addr(struct sockaddr *addr) 794{ 795 return cma_any_addr(addr); 796} 797EXPORT_SYMBOL(rdma_cma_any_addr); 798 799static inline __be16 cma_port(struct sockaddr *addr) 800{ 801 if (addr->sa_family == AF_INET) 802 return ((struct sockaddr_in *) addr)->sin_port; 803 else 804 return ((struct sockaddr_in6 *) addr)->sin6_port; 805} 806 807static inline int cma_any_port(struct sockaddr *addr) 808{ 809 return !cma_port(addr); 810} 811 812static int cma_get_net_info(void *hdr, enum rdma_port_space ps, 813 u8 *ip_ver, __be16 *port, 814 union cma_ip_addr **src, union cma_ip_addr **dst) 815{ 816 switch (ps) { 817 case RDMA_PS_SDP: 818 if (sdp_get_majv(((struct sdp_hh *) hdr)->sdp_version) != 819 SDP_MAJ_VERSION) 820 return -EINVAL; 821 822 *ip_ver = sdp_get_ip_ver(hdr); 823 *port = ((struct sdp_hh *) hdr)->port; 824 *src = &((struct sdp_hh *) hdr)->src_addr; 825 *dst = &((struct sdp_hh *) hdr)->dst_addr; 826 break; 827 default: 828 if (((struct cma_hdr *) hdr)->cma_version != CMA_VERSION) 829 return -EINVAL; 830 831 *ip_ver = cma_get_ip_ver(hdr); 832 *port = ((struct cma_hdr *) hdr)->port; 833 *src = &((struct cma_hdr *) hdr)->src_addr; 834 *dst = &((struct cma_hdr *) hdr)->dst_addr; 835 break; 836 } 837 838 if (*ip_ver != 4 && *ip_ver != 6) 839 return -EINVAL; 840 return 0; 841} 842 843static void cma_save_net_info(struct rdma_addr *addr, 844 struct rdma_addr *listen_addr, 845 u8 ip_ver, __be16 port, 846 union cma_ip_addr *src, union cma_ip_addr *dst) 847{ 848 struct sockaddr_in *listen4, *ip4; 849 struct sockaddr_in6 *listen6, *ip6; 850 851 switch (ip_ver) { 852 case 4: 853 listen4 = (struct sockaddr_in *) &listen_addr->src_addr; 854 ip4 = (struct sockaddr_in *) &addr->src_addr; 855 ip4->sin_family = listen4->sin_family; 856 ip4->sin_addr.s_addr = dst->ip4.addr; 857 ip4->sin_port = listen4->sin_port; 858 ip4->sin_len = sizeof(struct sockaddr_in); 859 860 ip4 = (struct sockaddr_in *) &addr->dst_addr; 861 ip4->sin_family = listen4->sin_family; 862 ip4->sin_addr.s_addr = src->ip4.addr; 863 ip4->sin_port = port; 864 ip4->sin_len = sizeof(struct sockaddr_in); 865 break; 866 case 6: 867 listen6 = (struct sockaddr_in6 *) &listen_addr->src_addr; 868 ip6 = (struct sockaddr_in6 *) &addr->src_addr; 869 ip6->sin6_family = listen6->sin6_family; 870 ip6->sin6_addr = dst->ip6; 871 ip6->sin6_port = listen6->sin6_port; 872 ip6->sin6_len = sizeof(struct sockaddr_in6); 873 ip6->sin6_scope_id = listen6->sin6_scope_id; 874 875 ip6 = (struct sockaddr_in6 *) &addr->dst_addr; 876 ip6->sin6_family = listen6->sin6_family; 877 ip6->sin6_addr = src->ip6; 878 ip6->sin6_port = port; 879 ip6->sin6_len = sizeof(struct sockaddr_in6); 880 ip6->sin6_scope_id = listen6->sin6_scope_id; 881 break; 882 default: 883 break; 884 } 885} 886 887static inline int cma_user_data_offset(enum rdma_port_space ps) 888{ 889 switch (ps) { 890 case RDMA_PS_SDP: 891 return 0; 892 default: 893 return sizeof(struct cma_hdr); 894 } 895} 896 897static void cma_cancel_route(struct rdma_id_private *id_priv) 898{ 899 switch (rdma_port_get_link_layer(id_priv->id.device, id_priv->id.port_num)) { 900 case IB_LINK_LAYER_INFINIBAND: 901 if (id_priv->query) 902 ib_sa_cancel_query(id_priv->query_id, id_priv->query); 903 break; 904 default: 905 break; 906 } 907} 908 909static void cma_cancel_listens(struct rdma_id_private *id_priv) 910{ 911 struct rdma_id_private *dev_id_priv; 912 913 /* 914 * Remove from listen_any_list to prevent added devices from spawning 915 * additional listen requests. 916 */ 917 mutex_lock(&lock); 918 list_del(&id_priv->list); 919 920 while (!list_empty(&id_priv->listen_list)) { 921 dev_id_priv = list_entry(id_priv->listen_list.next, 922 struct rdma_id_private, listen_list); 923 /* sync with device removal to avoid duplicate destruction */ 924 list_del_init(&dev_id_priv->list); 925 list_del(&dev_id_priv->listen_list); 926 mutex_unlock(&lock); 927 928 rdma_destroy_id(&dev_id_priv->id); 929 mutex_lock(&lock); 930 } 931 mutex_unlock(&lock); 932} 933 934static void cma_cancel_operation(struct rdma_id_private *id_priv, 935 enum cma_state state) 936{ 937 switch (state) { 938 case CMA_ADDR_QUERY: 939 rdma_addr_cancel(&id_priv->id.route.addr.dev_addr); 940 break; 941 case CMA_ROUTE_QUERY: 942 cma_cancel_route(id_priv); 943 break; 944 case CMA_LISTEN: 945 if (cma_any_addr((struct sockaddr *) &id_priv->id.route.addr.src_addr) 946 && !id_priv->cma_dev) 947 cma_cancel_listens(id_priv); 948 break; 949 default: 950 break; 951 } 952} 953 954static void cma_release_port(struct rdma_id_private *id_priv) 955{ 956 struct rdma_bind_list *bind_list = id_priv->bind_list; 957 958 if (!bind_list) 959 return; 960 961 mutex_lock(&lock); 962 hlist_del(&id_priv->node); 963 if (hlist_empty(&bind_list->owners)) { 964 idr_remove(bind_list->ps, bind_list->port); 965 kfree(bind_list); 966 } 967 mutex_unlock(&lock); 968} 969 970static void cma_leave_mc_groups(struct rdma_id_private *id_priv) 971{ 972 struct cma_multicast *mc; 973 974 while (!list_empty(&id_priv->mc_list)) { 975 mc = container_of(id_priv->mc_list.next, 976 struct cma_multicast, list); 977 list_del(&mc->list); 978 switch (rdma_port_get_link_layer(id_priv->cma_dev->device, id_priv->id.port_num)) { 979 case IB_LINK_LAYER_INFINIBAND: 980 ib_sa_free_multicast(mc->multicast.ib); 981 kfree(mc); 982 break; 983 case IB_LINK_LAYER_ETHERNET: 984 kref_put(&mc->mcref, release_mc); 985 break; 986 default: 987 break; 988 } 989 } 990} 991 992void rdma_destroy_id(struct rdma_cm_id *id) 993{ 994 struct rdma_id_private *id_priv; 995 enum cma_state state; 996 997 id_priv = container_of(id, struct rdma_id_private, id); 998 state = cma_exch(id_priv, CMA_DESTROYING); 999 cma_cancel_operation(id_priv, state); 1000 1001 mutex_lock(&lock); 1002 if (id_priv->cma_dev) { 1003 mutex_unlock(&lock); 1004 if (rdma_cap_ib_cm(id_priv->id.device, 1)) { 1005 if (id_priv->cm_id.ib && !IS_ERR(id_priv->cm_id.ib)) 1006 ib_destroy_cm_id(id_priv->cm_id.ib); 1007 } else if (rdma_cap_iw_cm(id_priv->id.device, 1)) { 1008 if (id_priv->cm_id.iw && !IS_ERR(id_priv->cm_id.iw)) 1009 iw_destroy_cm_id(id_priv->cm_id.iw); 1010 } 1011 cma_leave_mc_groups(id_priv); 1012 mutex_lock(&lock); 1013 cma_detach_from_dev(id_priv); 1014 } 1015 mutex_unlock(&lock); 1016 1017 cma_release_port(id_priv); 1018 cma_deref_id(id_priv); 1019 wait_for_completion(&id_priv->comp); 1020 1021 if (id_priv->internal_id) 1022 cma_deref_id(id_priv->id.context); 1023 1024 if (id_priv->sock != NULL && !id_priv->internal_id && 1025 !id_priv->unify_ps_tcp) 1026 sock_release(id_priv->sock); 1027 1028 kfree(id_priv->id.route.path_rec); 1029 kfree(id_priv); 1030} 1031EXPORT_SYMBOL(rdma_destroy_id); 1032 1033static int cma_rep_recv(struct rdma_id_private *id_priv) 1034{ 1035 int ret; 1036 1037 ret = cma_modify_qp_rtr(id_priv, NULL); 1038 if (ret) 1039 goto reject; 1040 1041 ret = cma_modify_qp_rts(id_priv, NULL); 1042 if (ret) 1043 goto reject; 1044 1045 ret = ib_send_cm_rtu(id_priv->cm_id.ib, NULL, 0); 1046 if (ret) 1047 goto reject; 1048 1049 return 0; 1050reject: 1051 cma_modify_qp_err(id_priv); 1052 ib_send_cm_rej(id_priv->cm_id.ib, IB_CM_REJ_CONSUMER_DEFINED, 1053 NULL, 0, NULL, 0); 1054 return ret; 1055} 1056 1057static int cma_verify_rep(struct rdma_id_private *id_priv, void *data) 1058{ 1059 if (id_priv->id.ps == RDMA_PS_SDP && 1060 sdp_get_majv(((struct sdp_hah *) data)->sdp_version) != 1061 SDP_MAJ_VERSION) 1062 return -EINVAL; 1063 1064 return 0; 1065} 1066 1067static void cma_set_rep_event_data(struct rdma_cm_event *event, 1068 struct ib_cm_rep_event_param *rep_data, 1069 void *private_data) 1070{ 1071 event->param.conn.private_data = private_data; 1072 event->param.conn.private_data_len = IB_CM_REP_PRIVATE_DATA_SIZE; 1073 event->param.conn.responder_resources = rep_data->responder_resources; 1074 event->param.conn.initiator_depth = rep_data->initiator_depth; 1075 event->param.conn.flow_control = rep_data->flow_control; 1076 event->param.conn.rnr_retry_count = rep_data->rnr_retry_count; 1077 event->param.conn.srq = rep_data->srq; 1078 event->param.conn.qp_num = rep_data->remote_qpn; 1079} 1080 1081static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) 1082{ 1083 struct rdma_id_private *id_priv = cm_id->context; 1084 struct rdma_cm_event event; 1085 int ret = 0; 1086 1087 if ((ib_event->event != IB_CM_TIMEWAIT_EXIT && 1088 cma_disable_callback(id_priv, CMA_CONNECT)) || 1089 (ib_event->event == IB_CM_TIMEWAIT_EXIT && 1090 cma_disable_callback(id_priv, CMA_DISCONNECT))) 1091 return 0; 1092 1093 memset(&event, 0, sizeof event); 1094 switch (ib_event->event) { 1095 case IB_CM_REQ_ERROR: 1096 case IB_CM_REP_ERROR: 1097 event.event = RDMA_CM_EVENT_UNREACHABLE; 1098 event.status = -ETIMEDOUT; 1099 break; 1100 case IB_CM_REP_RECEIVED: 1101 event.status = cma_verify_rep(id_priv, ib_event->private_data); 1102 if (event.status) 1103 event.event = RDMA_CM_EVENT_CONNECT_ERROR; 1104 else if (id_priv->id.qp && id_priv->id.ps != RDMA_PS_SDP) { 1105 event.status = cma_rep_recv(id_priv); 1106 event.event = event.status ? RDMA_CM_EVENT_CONNECT_ERROR : 1107 RDMA_CM_EVENT_ESTABLISHED; 1108 } else 1109 event.event = RDMA_CM_EVENT_CONNECT_RESPONSE; 1110 cma_set_rep_event_data(&event, &ib_event->param.rep_rcvd, 1111 ib_event->private_data); 1112 break; 1113 case IB_CM_RTU_RECEIVED: 1114 case IB_CM_USER_ESTABLISHED: 1115 event.event = RDMA_CM_EVENT_ESTABLISHED; 1116 break; 1117 case IB_CM_DREQ_ERROR: 1118 event.status = -ETIMEDOUT; /* fall through */ 1119 case IB_CM_DREQ_RECEIVED: 1120 case IB_CM_DREP_RECEIVED: 1121 if (!cma_comp_exch(id_priv, CMA_CONNECT, CMA_DISCONNECT)) 1122 goto out; 1123 event.event = RDMA_CM_EVENT_DISCONNECTED; 1124 break; 1125 case IB_CM_TIMEWAIT_EXIT: 1126 event.event = RDMA_CM_EVENT_TIMEWAIT_EXIT; 1127 break; 1128 case IB_CM_MRA_RECEIVED: 1129 /* ignore event */ 1130 goto out; 1131 case IB_CM_REJ_RECEIVED: 1132 cma_modify_qp_err(id_priv); 1133 event.status = ib_event->param.rej_rcvd.reason; 1134 event.event = RDMA_CM_EVENT_REJECTED; 1135 event.param.conn.private_data = ib_event->private_data; 1136 event.param.conn.private_data_len = IB_CM_REJ_PRIVATE_DATA_SIZE; 1137 break; 1138 default: 1139 printk(KERN_ERR "RDMA CMA: unexpected IB CM event: %d\n", 1140 ib_event->event); 1141 goto out; 1142 } 1143 1144 ret = id_priv->id.event_handler(&id_priv->id, &event); 1145 if (ret) { 1146 /* Destroy the CM ID by returning a non-zero value. */ 1147 id_priv->cm_id.ib = NULL; 1148 cma_exch(id_priv, CMA_DESTROYING); 1149 mutex_unlock(&id_priv->handler_mutex); 1150 rdma_destroy_id(&id_priv->id); 1151 return ret; 1152 } 1153out: 1154 mutex_unlock(&id_priv->handler_mutex); 1155 return ret; 1156} 1157 1158static struct rdma_id_private *cma_new_conn_id(struct rdma_cm_id *listen_id, 1159 struct ib_cm_event *ib_event) 1160{ 1161 struct rdma_id_private *id_priv; 1162 struct rdma_cm_id *id; 1163 struct rdma_route *rt; 1164 union cma_ip_addr *src, *dst; 1165 __be16 port; 1166 u8 ip_ver; 1167 int ret; 1168 1169 if (cma_get_net_info(ib_event->private_data, listen_id->ps, 1170 &ip_ver, &port, &src, &dst)) 1171 goto err; 1172 1173 id = rdma_create_id(listen_id->event_handler, listen_id->context, 1174 listen_id->ps); 1175 if (IS_ERR(id)) 1176 goto err; 1177 1178 cma_save_net_info(&id->route.addr, &listen_id->route.addr, 1179 ip_ver, port, src, dst); 1180 1181 rt = &id->route; 1182 rt->num_paths = ib_event->param.req_rcvd.alternate_path ? 2 : 1; 1183 rt->path_rec = kmalloc(sizeof *rt->path_rec * rt->num_paths, 1184 GFP_KERNEL); 1185 if (!rt->path_rec) 1186 goto destroy_id; 1187 1188 rt->path_rec[0] = *ib_event->param.req_rcvd.primary_path; 1189 if (rt->num_paths == 2) 1190 rt->path_rec[1] = *ib_event->param.req_rcvd.alternate_path; 1191 1192 if (cma_any_addr((struct sockaddr *) &rt->addr.src_addr)) { 1193 rt->addr.dev_addr.dev_type = ARPHRD_INFINIBAND; 1194 rdma_addr_set_sgid(&rt->addr.dev_addr, &rt->path_rec[0].sgid); 1195 ib_addr_set_pkey(&rt->addr.dev_addr, rt->path_rec[0].pkey); 1196 } else { 1197 ret = rdma_translate_ip((struct sockaddr *) &rt->addr.src_addr, 1198 &rt->addr.dev_addr); 1199 if (ret) 1200 goto destroy_id; 1201 } 1202 rdma_addr_set_dgid(&rt->addr.dev_addr, &rt->path_rec[0].dgid); 1203 1204 id_priv = container_of(id, struct rdma_id_private, id); 1205 id_priv->state = CMA_CONNECT; 1206 return id_priv; 1207 1208destroy_id: 1209 rdma_destroy_id(id); 1210err: 1211 return NULL; 1212} 1213 1214static struct rdma_id_private *cma_new_udp_id(struct rdma_cm_id *listen_id, 1215 struct ib_cm_event *ib_event) 1216{ 1217 struct rdma_id_private *id_priv; 1218 struct rdma_cm_id *id; 1219 union cma_ip_addr *src, *dst; 1220 __be16 port; 1221 u8 ip_ver; 1222 int ret; 1223 1224 id = rdma_create_id(listen_id->event_handler, listen_id->context, 1225 listen_id->ps); 1226 if (IS_ERR(id)) 1227 return NULL; 1228 1229 1230 if (cma_get_net_info(ib_event->private_data, listen_id->ps, 1231 &ip_ver, &port, &src, &dst)) 1232 goto err; 1233 1234 cma_save_net_info(&id->route.addr, &listen_id->route.addr, 1235 ip_ver, port, src, dst); 1236 1237 if (!cma_any_addr((struct sockaddr *) &id->route.addr.src_addr)) { 1238 ret = rdma_translate_ip((struct sockaddr *) &id->route.addr.src_addr, 1239 &id->route.addr.dev_addr); 1240 if (ret) 1241 goto err; 1242 } 1243 1244 id_priv = container_of(id, struct rdma_id_private, id); 1245 id_priv->state = CMA_CONNECT; 1246 return id_priv; 1247err: 1248 rdma_destroy_id(id); 1249 return NULL; 1250} 1251 1252static void cma_set_req_event_data(struct rdma_cm_event *event, 1253 struct ib_cm_req_event_param *req_data, 1254 void *private_data, int offset) 1255{ 1256 event->param.conn.private_data = private_data + offset; 1257 event->param.conn.private_data_len = IB_CM_REQ_PRIVATE_DATA_SIZE - offset; 1258 event->param.conn.responder_resources = req_data->responder_resources; 1259 event->param.conn.initiator_depth = req_data->initiator_depth; 1260 event->param.conn.flow_control = req_data->flow_control; 1261 event->param.conn.retry_count = req_data->retry_count; 1262 event->param.conn.rnr_retry_count = req_data->rnr_retry_count; 1263 event->param.conn.srq = req_data->srq; 1264 event->param.conn.qp_num = req_data->remote_qpn; 1265} 1266 1267static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) 1268{ 1269 struct rdma_id_private *listen_id, *conn_id; 1270 struct rdma_cm_event event; 1271 int offset, ret; 1272 1273 listen_id = cm_id->context; 1274 if (cma_disable_callback(listen_id, CMA_LISTEN)) 1275 return -ECONNABORTED; 1276 1277 memset(&event, 0, sizeof event); 1278 offset = cma_user_data_offset(listen_id->id.ps); 1279 event.event = RDMA_CM_EVENT_CONNECT_REQUEST; 1280 if (cma_is_ud_ps(listen_id->id.ps)) { 1281 conn_id = cma_new_udp_id(&listen_id->id, ib_event); 1282 event.param.ud.private_data = ib_event->private_data + offset; 1283 event.param.ud.private_data_len = 1284 IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE - offset; 1285 } else { 1286 conn_id = cma_new_conn_id(&listen_id->id, ib_event); 1287 cma_set_req_event_data(&event, &ib_event->param.req_rcvd, 1288 ib_event->private_data, offset); 1289 } 1290 if (!conn_id) { 1291 ret = -ENOMEM; 1292 goto out; 1293 } 1294 1295 mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING); 1296 mutex_lock(&lock); 1297 ret = cma_acquire_dev(conn_id); 1298 mutex_unlock(&lock); 1299 if (ret) 1300 goto release_conn_id; 1301 1302 conn_id->cm_id.ib = cm_id; 1303 cm_id->context = conn_id; 1304 cm_id->cm_handler = cma_ib_handler; 1305 1306 ret = conn_id->id.event_handler(&conn_id->id, &event); 1307 if (!ret) { 1308 /* 1309 * Acquire mutex to prevent user executing rdma_destroy_id() 1310 * while we're accessing the cm_id. 1311 */ 1312 mutex_lock(&lock); 1313 if (cma_comp(conn_id, CMA_CONNECT) && 1314 !cma_is_ud_ps(conn_id->id.ps)) 1315 ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0); 1316 mutex_unlock(&lock); 1317 mutex_unlock(&conn_id->handler_mutex); 1318 goto out; 1319 } 1320 1321 /* Destroy the CM ID by returning a non-zero value. */ 1322 conn_id->cm_id.ib = NULL; 1323 1324release_conn_id: 1325 cma_exch(conn_id, CMA_DESTROYING); 1326 mutex_unlock(&conn_id->handler_mutex); 1327 rdma_destroy_id(&conn_id->id); 1328 1329out: 1330 mutex_unlock(&listen_id->handler_mutex); 1331 return ret; 1332} 1333 1334static __be64 cma_get_service_id(enum rdma_port_space ps, struct sockaddr *addr) 1335{ 1336 return cpu_to_be64(((u64)ps << 16) + be16_to_cpu(cma_port(addr))); 1337} 1338 1339static void cma_set_compare_data(enum rdma_port_space ps, struct sockaddr *addr, 1340 struct ib_cm_compare_data *compare) 1341{ 1342 struct cma_hdr *cma_data, *cma_mask; 1343 struct sdp_hh *sdp_data, *sdp_mask; 1344 __be32 ip4_addr; 1345#ifdef INET6 1346 struct in6_addr ip6_addr; 1347#endif 1348 1349 memset(compare, 0, sizeof *compare); 1350 cma_data = (void *) compare->data; 1351 cma_mask = (void *) compare->mask; 1352 sdp_data = (void *) compare->data; 1353 sdp_mask = (void *) compare->mask; 1354 1355 switch (addr->sa_family) { 1356 case AF_INET: 1357 ip4_addr = ((struct sockaddr_in *) addr)->sin_addr.s_addr; 1358 if (ps == RDMA_PS_SDP) { 1359 sdp_set_ip_ver(sdp_data, 4); 1360 sdp_set_ip_ver(sdp_mask, 0xF); 1361 sdp_data->dst_addr.ip4.addr = ip4_addr; 1362 sdp_mask->dst_addr.ip4.addr = htonl(~0); 1363 } else { 1364 cma_set_ip_ver(cma_data, 4); 1365 cma_set_ip_ver(cma_mask, 0xF); 1366 cma_data->dst_addr.ip4.addr = ip4_addr; 1367 cma_mask->dst_addr.ip4.addr = htonl(~0); 1368 } 1369 break; 1370#ifdef INET6 1371 case AF_INET6: 1372 ip6_addr = ((struct sockaddr_in6 *) addr)->sin6_addr; 1373 if (ps == RDMA_PS_SDP) { 1374 sdp_set_ip_ver(sdp_data, 6); 1375 sdp_set_ip_ver(sdp_mask, 0xF); 1376 sdp_data->dst_addr.ip6 = ip6_addr; 1377 memset(&sdp_mask->dst_addr.ip6, 0xFF, 1378 sizeof sdp_mask->dst_addr.ip6); 1379 } else { 1380 cma_set_ip_ver(cma_data, 6); 1381 cma_set_ip_ver(cma_mask, 0xF); 1382 cma_data->dst_addr.ip6 = ip6_addr; 1383 memset(&cma_mask->dst_addr.ip6, 0xFF, 1384 sizeof cma_mask->dst_addr.ip6); 1385 } 1386 break; 1387#endif 1388 default: 1389 break; 1390 } 1391} 1392 1393static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event) 1394{ 1395 struct rdma_id_private *id_priv = iw_id->context; 1396 struct rdma_cm_event event; 1397 struct sockaddr_in *sin; 1398 int ret = 0; 1399 1400 if (cma_disable_callback(id_priv, CMA_CONNECT)) 1401 return 0; 1402 1403 memset(&event, 0, sizeof event); 1404 switch (iw_event->event) { 1405 case IW_CM_EVENT_CLOSE: 1406 event.event = RDMA_CM_EVENT_DISCONNECTED; 1407 break; 1408 case IW_CM_EVENT_CONNECT_REPLY: 1409 sin = (struct sockaddr_in *) &id_priv->id.route.addr.src_addr; 1410 *sin = iw_event->local_addr; 1411 sin = (struct sockaddr_in *) &id_priv->id.route.addr.dst_addr; 1412 *sin = iw_event->remote_addr; 1413 switch ((int)iw_event->status) { 1414 case 0: 1415 event.event = RDMA_CM_EVENT_ESTABLISHED; 1416 break; 1417 case -ECONNRESET: 1418 case -ECONNREFUSED: 1419 event.event = RDMA_CM_EVENT_REJECTED; 1420 break; 1421 case -ETIMEDOUT: 1422 event.event = RDMA_CM_EVENT_UNREACHABLE; 1423 break; 1424 default: 1425 event.event = RDMA_CM_EVENT_CONNECT_ERROR; 1426 break; 1427 } 1428 break; 1429 case IW_CM_EVENT_ESTABLISHED: 1430 event.event = RDMA_CM_EVENT_ESTABLISHED; 1431 break; 1432 default: 1433 BUG_ON(1); 1434 } 1435 1436 event.status = iw_event->status; 1437 event.param.conn.private_data = iw_event->private_data; 1438 event.param.conn.private_data_len = iw_event->private_data_len; 1439 ret = id_priv->id.event_handler(&id_priv->id, &event); 1440 if (ret) { 1441 /* Destroy the CM ID by returning a non-zero value. */ 1442 id_priv->cm_id.iw = NULL; 1443 cma_exch(id_priv, CMA_DESTROYING); 1444 mutex_unlock(&id_priv->handler_mutex); 1445 rdma_destroy_id(&id_priv->id); 1446 return ret; 1447 } 1448 1449 mutex_unlock(&id_priv->handler_mutex); 1450 return ret; 1451} 1452 1453static int iw_conn_req_handler(struct iw_cm_id *cm_id, 1454 struct iw_cm_event *iw_event) 1455{ 1456 struct rdma_cm_id *new_cm_id; 1457 struct rdma_id_private *listen_id, *conn_id; 1458 struct sockaddr_in *sin; 1459 struct net_device *dev = NULL; 1460 struct rdma_cm_event event; 1461 int ret; 1462 struct ib_device_attr attr; 1463 1464 listen_id = cm_id->context; 1465 if (cma_disable_callback(listen_id, CMA_LISTEN)) 1466 return -ECONNABORTED; 1467 1468 /* Create a new RDMA id for the new IW CM ID */ 1469 new_cm_id = rdma_create_id(listen_id->id.event_handler, 1470 listen_id->id.context, 1471 RDMA_PS_TCP); 1472 if (IS_ERR(new_cm_id)) { 1473 ret = -ENOMEM; 1474 goto out; 1475 } 1476 conn_id = container_of(new_cm_id, struct rdma_id_private, id); 1477 mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING); 1478 conn_id->state = CMA_CONNECT; 1479 1480 dev = ip_dev_find(NULL, iw_event->local_addr.sin_addr.s_addr); 1481 if (!dev) { 1482 ret = -EADDRNOTAVAIL; 1483 mutex_unlock(&conn_id->handler_mutex); 1484 rdma_destroy_id(new_cm_id); 1485 goto out; 1486 } 1487 ret = rdma_copy_addr(&conn_id->id.route.addr.dev_addr, dev, NULL); 1488 if (ret) { 1489 mutex_unlock(&conn_id->handler_mutex); 1490 rdma_destroy_id(new_cm_id); 1491 goto out; 1492 } 1493 1494 mutex_lock(&lock); 1495 ret = cma_acquire_dev(conn_id); 1496 mutex_unlock(&lock); 1497 if (ret) { 1498 mutex_unlock(&conn_id->handler_mutex); 1499 rdma_destroy_id(new_cm_id); 1500 goto out; 1501 } 1502 1503 conn_id->cm_id.iw = cm_id; 1504 cm_id->context = conn_id; 1505 cm_id->cm_handler = cma_iw_handler; 1506 1507 sin = (struct sockaddr_in *) &new_cm_id->route.addr.src_addr; 1508 *sin = iw_event->local_addr; 1509 sin = (struct sockaddr_in *) &new_cm_id->route.addr.dst_addr; 1510 *sin = iw_event->remote_addr; 1511 1512 ret = ib_query_device(conn_id->id.device, &attr); 1513 if (ret) { 1514 mutex_unlock(&conn_id->handler_mutex); 1515 rdma_destroy_id(new_cm_id); 1516 goto out; 1517 } 1518 1519 memset(&event, 0, sizeof event); 1520 event.event = RDMA_CM_EVENT_CONNECT_REQUEST; 1521 event.param.conn.private_data = iw_event->private_data; 1522 event.param.conn.private_data_len = iw_event->private_data_len; 1523 event.param.conn.initiator_depth = attr.max_qp_init_rd_atom; 1524 event.param.conn.responder_resources = attr.max_qp_rd_atom; 1525 ret = conn_id->id.event_handler(&conn_id->id, &event); 1526 if (ret) { 1527 /* User wants to destroy the CM ID */ 1528 conn_id->cm_id.iw = NULL; 1529 cma_exch(conn_id, CMA_DESTROYING); 1530 mutex_unlock(&conn_id->handler_mutex); 1531 rdma_destroy_id(&conn_id->id); 1532 goto out; 1533 } 1534 1535 mutex_unlock(&conn_id->handler_mutex); 1536 1537out: 1538 if (dev) 1539 dev_put(dev); 1540 mutex_unlock(&listen_id->handler_mutex); 1541 return ret; 1542} 1543 1544static int cma_ib_listen(struct rdma_id_private *id_priv) 1545{ 1546 struct ib_cm_compare_data compare_data; 1547 struct sockaddr *addr; 1548 __be64 svc_id; 1549 int ret; 1550 1551 id_priv->cm_id.ib = ib_create_cm_id(id_priv->id.device, cma_req_handler, 1552 id_priv); 1553 if (IS_ERR(id_priv->cm_id.ib)) 1554 return PTR_ERR(id_priv->cm_id.ib); 1555 1556 addr = (struct sockaddr *) &id_priv->id.route.addr.src_addr; 1557 svc_id = cma_get_service_id(id_priv->id.ps, addr); 1558 if (cma_any_addr(addr)) 1559 ret = ib_cm_listen(id_priv->cm_id.ib, svc_id, 0, NULL); 1560 else { 1561 cma_set_compare_data(id_priv->id.ps, addr, &compare_data); 1562 ret = ib_cm_listen(id_priv->cm_id.ib, svc_id, 0, &compare_data); 1563 } 1564 1565 if (ret) { 1566 ib_destroy_cm_id(id_priv->cm_id.ib); 1567 id_priv->cm_id.ib = NULL; 1568 } 1569 1570 return ret; 1571} 1572 1573static int cma_iw_listen(struct rdma_id_private *id_priv, int backlog) 1574{ 1575 int ret; 1576 struct sockaddr_in *sin; 1577 1578 id_priv->cm_id.iw = iw_create_cm_id(id_priv->id.device, 1579 id_priv->sock, 1580 iw_conn_req_handler, 1581 id_priv); 1582 if (IS_ERR(id_priv->cm_id.iw)) 1583 return PTR_ERR(id_priv->cm_id.iw); 1584 1585 sin = (struct sockaddr_in *) &id_priv->id.route.addr.src_addr; 1586 id_priv->cm_id.iw->local_addr = *sin; 1587 1588 ret = iw_cm_listen(id_priv->cm_id.iw, backlog); 1589 1590 if (ret) { 1591 iw_destroy_cm_id(id_priv->cm_id.iw); 1592 id_priv->cm_id.iw = NULL; 1593 } 1594 1595 return ret; 1596} 1597 1598static int cma_listen_handler(struct rdma_cm_id *id, 1599 struct rdma_cm_event *event) 1600{ 1601 struct rdma_id_private *id_priv = id->context; 1602 1603 id->context = id_priv->id.context; 1604 id->event_handler = id_priv->id.event_handler; 1605 return id_priv->id.event_handler(id, event); 1606} 1607 1608static void cma_listen_on_dev(struct rdma_id_private *id_priv, 1609 struct cma_device *cma_dev) 1610{ 1611 struct rdma_id_private *dev_id_priv; 1612 struct rdma_cm_id *id; 1613 int ret; 1614 1615 id = rdma_create_id(cma_listen_handler, id_priv, id_priv->id.ps); 1616 if (IS_ERR(id)) 1617 return; 1618 1619 dev_id_priv = container_of(id, struct rdma_id_private, id); 1620 1621 dev_id_priv->state = CMA_ADDR_BOUND; 1622 dev_id_priv->sock = id_priv->sock; 1623 memcpy(&id->route.addr.src_addr, &id_priv->id.route.addr.src_addr, 1624 ip_addr_size((struct sockaddr *) &id_priv->id.route.addr.src_addr)); 1625 1626 cma_attach_to_dev(dev_id_priv, cma_dev); 1627 list_add_tail(&dev_id_priv->listen_list, &id_priv->listen_list); 1628 atomic_inc(&id_priv->refcount); 1629 dev_id_priv->internal_id = 1; 1630 1631 ret = rdma_listen(id, id_priv->backlog); 1632 if (ret) 1633 printk(KERN_WARNING "RDMA CMA: cma_listen_on_dev, error %d, " 1634 "listening on device %s\n", ret, cma_dev->device->name); 1635} 1636 1637static void cma_listen_on_all(struct rdma_id_private *id_priv) 1638{ 1639 struct cma_device *cma_dev; 1640 1641 mutex_lock(&lock); 1642 list_add_tail(&id_priv->list, &listen_any_list); 1643 list_for_each_entry(cma_dev, &dev_list, list) 1644 cma_listen_on_dev(id_priv, cma_dev); 1645 mutex_unlock(&lock); 1646} 1647 1648int rdma_listen(struct rdma_cm_id *id, int backlog) 1649{ 1650 struct rdma_id_private *id_priv; 1651 int ret; 1652 1653 id_priv = container_of(id, struct rdma_id_private, id); 1654 if (id_priv->state == CMA_IDLE) { 1655 ((struct sockaddr *) &id->route.addr.src_addr)->sa_family = AF_INET; 1656 ret = rdma_bind_addr(id, (struct sockaddr *) &id->route.addr.src_addr); 1657 if (ret) 1658 return ret; 1659 } 1660 1661 if (!cma_comp_exch(id_priv, CMA_ADDR_BOUND, CMA_LISTEN)) 1662 return -EINVAL; 1663 1664 id_priv->backlog = backlog; 1665 if (id->device) { 1666 if (rdma_cap_ib_cm(id->device, 1)) { 1667 ret = cma_ib_listen(id_priv); 1668 if (ret) 1669 goto err; 1670 } else if (rdma_cap_iw_cm(id->device, 1)) { 1671 ret = cma_iw_listen(id_priv, backlog); 1672 if (ret) 1673 goto err; 1674 } else { 1675 ret = -ENOSYS; 1676 goto err; 1677 } 1678 } else 1679 cma_listen_on_all(id_priv); 1680 1681 return 0; 1682err: 1683 id_priv->backlog = 0; 1684 cma_comp_exch(id_priv, CMA_LISTEN, CMA_ADDR_BOUND); 1685 return ret; 1686} 1687EXPORT_SYMBOL(rdma_listen); 1688 1689void rdma_set_service_type(struct rdma_cm_id *id, int tos) 1690{ 1691 struct rdma_id_private *id_priv; 1692 1693 id_priv = container_of(id, struct rdma_id_private, id); 1694 id_priv->tos = (u8) tos; 1695} 1696EXPORT_SYMBOL(rdma_set_service_type); 1697 1698static void cma_query_handler(int status, struct ib_sa_path_rec *path_rec, 1699 void *context) 1700{ 1701 struct cma_work *work = context; 1702 struct rdma_route *route; 1703 1704 route = &work->id->id.route; 1705 1706 if (!status) { 1707 route->num_paths = 1; 1708 *route->path_rec = *path_rec; 1709 } else { 1710 work->old_state = CMA_ROUTE_QUERY; 1711 work->new_state = CMA_ADDR_RESOLVED; 1712 work->event.event = RDMA_CM_EVENT_ROUTE_ERROR; 1713 work->event.status = status; 1714 } 1715 1716 queue_work(cma_wq, &work->work); 1717} 1718 1719static int cma_query_ib_route(struct rdma_id_private *id_priv, int timeout_ms, 1720 struct cma_work *work) 1721{ 1722 struct rdma_addr *addr = &id_priv->id.route.addr; 1723 struct ib_sa_path_rec path_rec; 1724 ib_sa_comp_mask comp_mask; 1725 struct sockaddr_in6 *sin6; 1726 1727 memset(&path_rec, 0, sizeof path_rec); 1728 rdma_addr_get_sgid(&addr->dev_addr, &path_rec.sgid); 1729 rdma_addr_get_dgid(&addr->dev_addr, &path_rec.dgid); 1730 path_rec.pkey = cpu_to_be16(ib_addr_get_pkey(&addr->dev_addr)); 1731 path_rec.numb_path = 1; 1732 path_rec.reversible = 1; 1733 path_rec.service_id = cma_get_service_id(id_priv->id.ps, 1734 (struct sockaddr *) &addr->dst_addr); 1735 1736 comp_mask = IB_SA_PATH_REC_DGID | IB_SA_PATH_REC_SGID | 1737 IB_SA_PATH_REC_PKEY | IB_SA_PATH_REC_NUMB_PATH | 1738 IB_SA_PATH_REC_REVERSIBLE | IB_SA_PATH_REC_SERVICE_ID; 1739 1740 if (addr->src_addr.ss_family == AF_INET) { 1741 path_rec.qos_class = cpu_to_be16((u16) id_priv->tos); 1742 comp_mask |= IB_SA_PATH_REC_QOS_CLASS; 1743 } else { 1744 sin6 = (struct sockaddr_in6 *) &addr->src_addr; 1745 path_rec.traffic_class = (u8) (be32_to_cpu(sin6->sin6_flowinfo) >> 20); 1746 comp_mask |= IB_SA_PATH_REC_TRAFFIC_CLASS; 1747 } 1748 1749 if (tavor_quirk) { 1750 path_rec.mtu_selector = IB_SA_LT; 1751 path_rec.mtu = IB_MTU_2048; 1752 } 1753 1754 id_priv->query_id = ib_sa_path_rec_get(&sa_client, id_priv->id.device, 1755 id_priv->id.port_num, &path_rec, 1756 comp_mask, timeout_ms, 1757 GFP_KERNEL, cma_query_handler, 1758 work, &id_priv->query); 1759 1760 return (id_priv->query_id < 0) ? id_priv->query_id : 0; 1761} 1762 1763static void cma_work_handler(struct work_struct *_work) 1764{ 1765 struct cma_work *work = container_of(_work, struct cma_work, work); 1766 struct rdma_id_private *id_priv = work->id; 1767 int destroy = 0; 1768 1769 mutex_lock(&id_priv->handler_mutex); 1770 if (!cma_comp_exch(id_priv, work->old_state, work->new_state)) 1771 goto out; 1772 1773 if (id_priv->id.event_handler(&id_priv->id, &work->event)) { 1774 cma_exch(id_priv, CMA_DESTROYING); 1775 destroy = 1; 1776 } 1777out: 1778 mutex_unlock(&id_priv->handler_mutex); 1779 cma_deref_id(id_priv); 1780 if (destroy) 1781 rdma_destroy_id(&id_priv->id); 1782 kfree(work); 1783} 1784 1785static void cma_ndev_work_handler(struct work_struct *_work) 1786{ 1787 struct cma_ndev_work *work = container_of(_work, struct cma_ndev_work, work); 1788 struct rdma_id_private *id_priv = work->id; 1789 int destroy = 0; 1790 1791 mutex_lock(&id_priv->handler_mutex); 1792 if (id_priv->state == CMA_DESTROYING || 1793 id_priv->state == CMA_DEVICE_REMOVAL) 1794 goto out; 1795 1796 if (id_priv->id.event_handler(&id_priv->id, &work->event)) { 1797 cma_exch(id_priv, CMA_DESTROYING); 1798 destroy = 1; 1799 } 1800 1801out: 1802 mutex_unlock(&id_priv->handler_mutex); 1803 cma_deref_id(id_priv); 1804 if (destroy) 1805 rdma_destroy_id(&id_priv->id); 1806 kfree(work); 1807} 1808 1809static int cma_resolve_ib_route(struct rdma_id_private *id_priv, int timeout_ms) 1810{ 1811 struct rdma_route *route = &id_priv->id.route; 1812 struct cma_work *work; 1813 int ret; 1814 1815 work = kzalloc(sizeof *work, GFP_KERNEL); 1816 if (!work) 1817 return -ENOMEM; 1818 1819 work->id = id_priv; 1820 INIT_WORK(&work->work, cma_work_handler); 1821 work->old_state = CMA_ROUTE_QUERY; 1822 work->new_state = CMA_ROUTE_RESOLVED; 1823 work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED; 1824 1825 route->path_rec = kmalloc(sizeof *route->path_rec, GFP_KERNEL); 1826 if (!route->path_rec) { 1827 ret = -ENOMEM; 1828 goto err1; 1829 } 1830 1831 ret = cma_query_ib_route(id_priv, timeout_ms, work); 1832 if (ret) 1833 goto err2; 1834 1835 return 0; 1836err2: 1837 kfree(route->path_rec); 1838 route->path_rec = NULL; 1839err1: 1840 kfree(work); 1841 return ret; 1842} 1843 1844int rdma_set_ib_paths(struct rdma_cm_id *id, 1845 struct ib_sa_path_rec *path_rec, int num_paths) 1846{ 1847 struct rdma_id_private *id_priv; 1848 int ret; 1849 1850 id_priv = container_of(id, struct rdma_id_private, id); 1851 if (!cma_comp_exch(id_priv, CMA_ADDR_RESOLVED, CMA_ROUTE_RESOLVED)) 1852 return -EINVAL; 1853 1854 id->route.path_rec = kmalloc(sizeof *path_rec * num_paths, GFP_KERNEL); 1855 if (!id->route.path_rec) { 1856 ret = -ENOMEM; 1857 goto err; 1858 } 1859 1860 memcpy(id->route.path_rec, path_rec, sizeof *path_rec * num_paths); 1861 return 0; 1862err: 1863 cma_comp_exch(id_priv, CMA_ROUTE_RESOLVED, CMA_ADDR_RESOLVED); 1864 return ret; 1865} 1866EXPORT_SYMBOL(rdma_set_ib_paths); 1867 1868static int cma_resolve_iw_route(struct rdma_id_private *id_priv, int timeout_ms) 1869{ 1870 struct cma_work *work; 1871 1872 work = kzalloc(sizeof *work, GFP_KERNEL); 1873 if (!work) 1874 return -ENOMEM; 1875 1876 work->id = id_priv; 1877 INIT_WORK(&work->work, cma_work_handler); 1878 work->old_state = CMA_ROUTE_QUERY; 1879 work->new_state = CMA_ROUTE_RESOLVED; 1880 work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED; 1881 queue_work(cma_wq, &work->work); 1882 return 0; 1883} 1884 1885static u8 tos_to_sl(u8 tos) 1886{ 1887 return def_prec2sl & 7; 1888} 1889 1890static int cma_resolve_iboe_route(struct rdma_id_private *id_priv) 1891{ 1892 struct rdma_route *route = &id_priv->id.route; 1893 struct rdma_addr *addr = &route->addr; 1894 struct cma_work *work; 1895 int ret; 1896 struct sockaddr_in *src_addr = (struct sockaddr_in *)&route->addr.src_addr; 1897 struct sockaddr_in *dst_addr = (struct sockaddr_in *)&route->addr.dst_addr; 1898 struct net_device *ndev = NULL; 1899 u16 vid; 1900 1901 if (src_addr->sin_family != dst_addr->sin_family) 1902 return -EINVAL; 1903 1904 work = kzalloc(sizeof *work, GFP_KERNEL); 1905 if (!work) 1906 return -ENOMEM; 1907 1908 work->id = id_priv; 1909 INIT_WORK(&work->work, cma_work_handler); 1910 1911 route->path_rec = kzalloc(sizeof *route->path_rec, GFP_KERNEL); 1912 if (!route->path_rec) { 1913 ret = -ENOMEM; 1914 goto err1; 1915 } 1916 1917 route->num_paths = 1; 1918 1919 if (addr->dev_addr.bound_dev_if) 1920 ndev = dev_get_by_index(&init_net, addr->dev_addr.bound_dev_if); 1921 if (!ndev) { 1922 ret = -ENODEV; 1923 goto err2; 1924 } 1925 1926 vid = rdma_vlan_dev_vlan_id(ndev); 1927 1928 iboe_mac_vlan_to_ll(&route->path_rec->sgid, addr->dev_addr.src_dev_addr, vid); 1929 iboe_mac_vlan_to_ll(&route->path_rec->dgid, addr->dev_addr.dst_dev_addr, vid); 1930 1931 route->path_rec->hop_limit = 1; 1932 route->path_rec->reversible = 1; 1933 route->path_rec->pkey = cpu_to_be16(0xffff); 1934 route->path_rec->mtu_selector = IB_SA_EQ; 1935 route->path_rec->sl = tos_to_sl(id_priv->tos); 1936 1937#ifdef __linux__ 1938 route->path_rec->mtu = iboe_get_mtu(ndev->mtu); 1939#else 1940 route->path_rec->mtu = iboe_get_mtu(ndev->if_mtu); 1941#endif 1942 route->path_rec->rate_selector = IB_SA_EQ; 1943 route->path_rec->rate = iboe_get_rate(ndev); 1944 dev_put(ndev); 1945 route->path_rec->packet_life_time_selector = IB_SA_EQ; 1946 route->path_rec->packet_life_time = IBOE_PACKET_LIFETIME; 1947 if (!route->path_rec->mtu) { 1948 ret = -EINVAL; 1949 goto err2; 1950 } 1951 1952 work->old_state = CMA_ROUTE_QUERY; 1953 work->new_state = CMA_ROUTE_RESOLVED; 1954 work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED; 1955 work->event.status = 0; 1956 1957 queue_work(cma_wq, &work->work); 1958 1959 return 0; 1960 1961err2: 1962 kfree(route->path_rec); 1963 route->path_rec = NULL; 1964err1: 1965 kfree(work); 1966 return ret; 1967} 1968 1969int rdma_resolve_route(struct rdma_cm_id *id, int timeout_ms) 1970{ 1971 struct rdma_id_private *id_priv; 1972 int ret; 1973 1974 id_priv = container_of(id, struct rdma_id_private, id); 1975 if (!cma_comp_exch(id_priv, CMA_ADDR_RESOLVED, CMA_ROUTE_QUERY)) 1976 return -EINVAL; 1977 1978 atomic_inc(&id_priv->refcount); 1979 if (rdma_cap_ib_sa(id->device, id->port_num)) 1980 ret = cma_resolve_ib_route(id_priv, timeout_ms); 1981 else if (rdma_protocol_roce(id->device, id->port_num)) 1982 ret = cma_resolve_iboe_route(id_priv); 1983 else if (rdma_protocol_iwarp(id->device, id->port_num)) 1984 ret = cma_resolve_iw_route(id_priv, timeout_ms); 1985 else 1986 ret = -ENOSYS; 1987 1988 if (ret) 1989 goto err; 1990 1991 return 0; 1992err: 1993 cma_comp_exch(id_priv, CMA_ROUTE_QUERY, CMA_ADDR_RESOLVED); 1994 cma_deref_id(id_priv); 1995 return ret; 1996} 1997EXPORT_SYMBOL(rdma_resolve_route); 1998 1999static int cma_bind_loopback(struct rdma_id_private *id_priv) 2000{ 2001 struct cma_device *cma_dev; 2002 struct ib_port_attr port_attr; 2003 union ib_gid gid; 2004 u16 pkey; 2005 int ret; 2006 u8 p; 2007 2008 mutex_lock(&lock); 2009 if (list_empty(&dev_list)) { 2010 ret = -ENODEV; 2011 goto out; 2012 } 2013 list_for_each_entry(cma_dev, &dev_list, list) 2014 for (p = 1; p <= cma_dev->device->phys_port_cnt; ++p) 2015 if (!ib_query_port(cma_dev->device, p, &port_attr) && 2016 port_attr.state == IB_PORT_ACTIVE) 2017 goto port_found; 2018 2019 p = 1; 2020 cma_dev = list_entry(dev_list.next, struct cma_device, list); 2021 2022port_found: 2023 ret = ib_get_cached_gid(cma_dev->device, p, 0, &gid); 2024 if (ret) 2025 goto out; 2026 2027 ret = ib_get_cached_pkey(cma_dev->device, p, 0, &pkey); 2028 if (ret) 2029 goto out; 2030 2031 id_priv->id.route.addr.dev_addr.dev_type = 2032 (rdma_port_get_link_layer(cma_dev->device, p) == IB_LINK_LAYER_INFINIBAND) ? 2033 ARPHRD_INFINIBAND : ARPHRD_ETHER; 2034 2035 rdma_addr_set_sgid(&id_priv->id.route.addr.dev_addr, &gid); 2036 ib_addr_set_pkey(&id_priv->id.route.addr.dev_addr, pkey); 2037 id_priv->id.port_num = p; 2038 cma_attach_to_dev(id_priv, cma_dev); 2039out: 2040 mutex_unlock(&lock); 2041 return ret; 2042} 2043 2044static void addr_handler(int status, struct sockaddr *src_addr, 2045 struct rdma_dev_addr *dev_addr, void *context) 2046{ 2047 struct rdma_id_private *id_priv = context; 2048 struct rdma_cm_event event; 2049 2050 memset(&event, 0, sizeof event); 2051 mutex_lock(&id_priv->handler_mutex); 2052 2053 /* 2054 * Grab mutex to block rdma_destroy_id() from removing the device while 2055 * we're trying to acquire it. 2056 */ 2057 mutex_lock(&lock); 2058 if (!cma_comp_exch(id_priv, CMA_ADDR_QUERY, CMA_ADDR_RESOLVED)) { 2059 mutex_unlock(&lock); 2060 goto out; 2061 } 2062 2063 if (!status && !id_priv->cma_dev) 2064 status = cma_acquire_dev(id_priv); 2065 mutex_unlock(&lock); 2066 2067 if (status) { 2068 if (!cma_comp_exch(id_priv, CMA_ADDR_RESOLVED, CMA_ADDR_BOUND)) 2069 goto out; 2070 event.event = RDMA_CM_EVENT_ADDR_ERROR; 2071 event.status = status; 2072 } else { 2073 memcpy(&id_priv->id.route.addr.src_addr, src_addr, 2074 ip_addr_size(src_addr)); 2075 event.event = RDMA_CM_EVENT_ADDR_RESOLVED; 2076 } 2077 2078 if (id_priv->id.event_handler(&id_priv->id, &event)) { 2079 cma_exch(id_priv, CMA_DESTROYING); 2080 mutex_unlock(&id_priv->handler_mutex); 2081 cma_deref_id(id_priv); 2082 rdma_destroy_id(&id_priv->id); 2083 return; 2084 } 2085out: 2086 mutex_unlock(&id_priv->handler_mutex); 2087 cma_deref_id(id_priv); 2088} 2089 2090static int cma_resolve_loopback(struct rdma_id_private *id_priv) 2091{ 2092 struct cma_work *work; 2093 struct sockaddr *src, *dst; 2094 union ib_gid gid; 2095 int ret; 2096 2097 work = kzalloc(sizeof *work, GFP_KERNEL); 2098 if (!work) 2099 return -ENOMEM; 2100 2101 if (!id_priv->cma_dev) { 2102 ret = cma_bind_loopback(id_priv); 2103 if (ret) 2104 goto err; 2105 } 2106 2107 rdma_addr_get_sgid(&id_priv->id.route.addr.dev_addr, &gid); 2108 rdma_addr_set_dgid(&id_priv->id.route.addr.dev_addr, &gid); 2109 2110 src = (struct sockaddr *) &id_priv->id.route.addr.src_addr; 2111 if (cma_zero_addr(src)) { 2112 dst = (struct sockaddr *) &id_priv->id.route.addr.dst_addr; 2113 if ((src->sa_family = dst->sa_family) == AF_INET) { 2114 ((struct sockaddr_in *) src)->sin_addr.s_addr = 2115 ((struct sockaddr_in *) dst)->sin_addr.s_addr; 2116 } else { 2117 ipv6_addr_copy(&((struct sockaddr_in6 *) src)->sin6_addr, 2118 &((struct sockaddr_in6 *) dst)->sin6_addr); 2119 } 2120 } 2121 2122 work->id = id_priv; 2123 INIT_WORK(&work->work, cma_work_handler); 2124 work->old_state = CMA_ADDR_QUERY; 2125 work->new_state = CMA_ADDR_RESOLVED; 2126 work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED; 2127 queue_work(cma_wq, &work->work); 2128 return 0; 2129err: 2130 kfree(work); 2131 return ret; 2132} 2133 2134static int cma_bind_addr(struct rdma_cm_id *id, struct sockaddr *src_addr, 2135 struct sockaddr *dst_addr) 2136{ 2137 if (!src_addr || !src_addr->sa_family) { 2138 src_addr = (struct sockaddr *) &id->route.addr.src_addr; 2139 src_addr->sa_family = dst_addr->sa_family; 2140#ifdef INET6 2141 if (dst_addr->sa_family == AF_INET6) { 2142 struct sockaddr_in6 *src_addr6 = (struct sockaddr_in6 *) src_addr; 2143 struct sockaddr_in6 *dst_addr6 = (struct sockaddr_in6 *) dst_addr; 2144 src_addr6->sin6_scope_id = dst_addr6->sin6_scope_id; 2145 if (IN6_IS_SCOPE_LINKLOCAL(&dst_addr6->sin6_addr) || 2146 IN6_IS_ADDR_MC_INTFACELOCAL(&dst_addr6->sin6_addr)) 2147 id->route.addr.dev_addr.bound_dev_if = dst_addr6->sin6_scope_id; 2148 } 2149#endif 2150 } 2151 if (!cma_any_addr(src_addr)) 2152 return rdma_bind_addr(id, src_addr); 2153 else { 2154#if defined(INET6) || defined(INET) 2155 union { 2156#ifdef INET 2157 struct sockaddr_in in; 2158#endif 2159#ifdef INET6 2160 struct sockaddr_in6 in6; 2161#endif 2162 } addr; 2163#endif 2164 2165 switch(dst_addr->sa_family) { 2166#ifdef INET 2167 case AF_INET: 2168 memset(&addr.in, 0, sizeof(addr.in)); 2169 addr.in.sin_family = dst_addr->sa_family; 2170 addr.in.sin_len = sizeof(addr.in); 2171 return rdma_bind_addr(id, (struct sockaddr *)&addr.in); 2172#endif 2173#ifdef INET6 2174 case AF_INET6: 2175 memset(&addr.in6, 0, sizeof(addr.in6)); 2176 addr.in6.sin6_family = dst_addr->sa_family; 2177 addr.in6.sin6_len = sizeof(addr.in6); 2178 addr.in6.sin6_scope_id = 2179 ((struct sockaddr_in6 *)dst_addr)->sin6_scope_id; 2180 return rdma_bind_addr(id, (struct sockaddr *)&addr.in6); 2181#endif 2182 default: 2183 return -EINVAL; 2184 } 2185 } 2186} 2187 2188int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr, 2189 struct sockaddr *dst_addr, int timeout_ms) 2190{ 2191 struct rdma_id_private *id_priv; 2192 int ret; 2193 2194 id_priv = container_of(id, struct rdma_id_private, id); 2195 if (id_priv->state == CMA_IDLE) { 2196 ret = cma_bind_addr(id, src_addr, dst_addr); 2197 if (ret) 2198 return ret; 2199 } 2200 2201 if (!cma_comp_exch(id_priv, CMA_ADDR_BOUND, CMA_ADDR_QUERY)) 2202 return -EINVAL; 2203 2204 atomic_inc(&id_priv->refcount); 2205 memcpy(&id->route.addr.dst_addr, dst_addr, ip_addr_size(dst_addr)); 2206 if (cma_any_addr(dst_addr)) 2207 ret = cma_resolve_loopback(id_priv); 2208 else 2209 ret = rdma_resolve_ip(&addr_client, (struct sockaddr *) &id->route.addr.src_addr, 2210 dst_addr, &id->route.addr.dev_addr, 2211 timeout_ms, addr_handler, id_priv); 2212 if (ret) 2213 goto err; 2214 2215 return 0; 2216err: 2217 cma_comp_exch(id_priv, CMA_ADDR_QUERY, CMA_ADDR_BOUND); 2218 cma_deref_id(id_priv); 2219 return ret; 2220} 2221EXPORT_SYMBOL(rdma_resolve_addr); 2222 2223static void cma_bind_port(struct rdma_bind_list *bind_list, 2224 struct rdma_id_private *id_priv) 2225{ 2226 struct sockaddr_in *sin; 2227 2228 sin = (struct sockaddr_in *) &id_priv->id.route.addr.src_addr; 2229 sin->sin_port = htons(bind_list->port); 2230 id_priv->bind_list = bind_list; 2231 hlist_add_head(&id_priv->node, &bind_list->owners); 2232} 2233 2234static int cma_alloc_port(struct idr *ps, struct rdma_id_private *id_priv, 2235 unsigned short snum) 2236{ 2237 struct rdma_bind_list *bind_list; 2238 int port, ret; 2239 2240 bind_list = kzalloc(sizeof *bind_list, GFP_KERNEL); 2241 if (!bind_list) 2242 return -ENOMEM; 2243 2244 do { 2245 ret = idr_get_new_above(ps, bind_list, snum, &port); 2246 } while ((ret == -EAGAIN) && idr_pre_get(ps, GFP_KERNEL)); 2247 2248 if (ret) 2249 goto err1; 2250 2251 if (port != snum) { 2252 ret = -EADDRNOTAVAIL; 2253 goto err2; 2254 } 2255 2256 bind_list->ps = ps; 2257 bind_list->port = (unsigned short) port; 2258 cma_bind_port(bind_list, id_priv); 2259 return 0; 2260err2: 2261 idr_remove(ps, port); 2262err1: 2263 kfree(bind_list); 2264 return ret; 2265} 2266 2267static int cma_alloc_any_port(struct idr *ps, struct rdma_id_private *id_priv) 2268{ 2269#if defined(INET) 2270 struct rdma_bind_list *bind_list; 2271 int port, ret, low, high; 2272 2273 bind_list = kzalloc(sizeof *bind_list, GFP_KERNEL); 2274 if (!bind_list) 2275 return -ENOMEM; 2276 2277retry: 2278 /* FIXME: add proper port randomization per like inet_csk_get_port */ 2279 do { 2280 ret = idr_get_new_above(ps, bind_list, next_port, &port); 2281 } while ((ret == -EAGAIN) && idr_pre_get(ps, GFP_KERNEL)); 2282 2283 if (ret) 2284 goto err1; 2285 2286 inet_get_local_port_range(&low, &high); 2287 if (port > high) { 2288 if (next_port != low) { 2289 idr_remove(ps, port); 2290 next_port = low; 2291 goto retry; 2292 } 2293 ret = -EADDRNOTAVAIL; 2294 goto err2; 2295 } 2296 2297 if (port == high) 2298 next_port = low; 2299 else 2300 next_port = port + 1; 2301 2302 bind_list->ps = ps; 2303 bind_list->port = (unsigned short) port; 2304 cma_bind_port(bind_list, id_priv); 2305 return 0; 2306err2: 2307 idr_remove(ps, port); 2308err1: 2309 kfree(bind_list); 2310 return ret; 2311#else 2312 return -ENOSPC; 2313#endif 2314} 2315 2316static int cma_use_port(struct idr *ps, struct rdma_id_private *id_priv) 2317{ 2318 struct rdma_id_private *cur_id; 2319 struct sockaddr_in *sin, *cur_sin; 2320 struct rdma_bind_list *bind_list; 2321 struct hlist_node *node; 2322 unsigned short snum; 2323 2324 sin = (struct sockaddr_in *) &id_priv->id.route.addr.src_addr; 2325 snum = ntohs(sin->sin_port); 2326#ifdef __linux__ 2327 if (snum < PROT_SOCK && !capable(CAP_NET_BIND_SERVICE)) 2328 return -EACCES; 2329#endif 2330 2331 bind_list = idr_find(ps, snum); 2332 if (!bind_list) 2333 return cma_alloc_port(ps, id_priv, snum); 2334 2335 /* 2336 * We don't support binding to any address if anyone is bound to 2337 * a specific address on the same port. 2338 */ 2339 if (cma_any_addr((struct sockaddr *) &id_priv->id.route.addr.src_addr)) 2340 return -EADDRNOTAVAIL; 2341 2342 hlist_for_each_entry(cur_id, node, &bind_list->owners, node) { 2343 if (cma_any_addr((struct sockaddr *) &cur_id->id.route.addr.src_addr)) 2344 return -EADDRNOTAVAIL; 2345 2346 cur_sin = (struct sockaddr_in *) &cur_id->id.route.addr.src_addr; 2347 if (sin->sin_addr.s_addr == cur_sin->sin_addr.s_addr) 2348 return -EADDRINUSE; 2349 } 2350 2351 cma_bind_port(bind_list, id_priv); 2352 return 0; 2353} 2354 2355static int cma_get_tcp_port(struct rdma_id_private *id_priv) 2356{ 2357 int ret; 2358 int size; 2359 struct socket *sock; 2360 2361 ret = sock_create_kern(AF_INET, SOCK_STREAM, IPPROTO_TCP, &sock); 2362 if (ret) 2363 return ret; 2364#ifdef __linux__ 2365 ret = sock->ops->bind(sock, 2366 (struct sockaddr *) &id_priv->id.route.addr.src_addr, 2367 ip_addr_size((struct sockaddr *) &id_priv->id.route.addr.src_addr)); 2368#else 2369 SOCK_LOCK(sock); 2370 sock->so_options |= SO_REUSEADDR; 2371 SOCK_UNLOCK(sock); 2372 2373 ret = -sobind(sock, 2374 (struct sockaddr *)&id_priv->id.route.addr.src_addr, 2375 curthread); 2376#endif 2377 if (ret) { 2378 sock_release(sock); 2379 return ret; 2380 } 2381 2382 size = ip_addr_size((struct sockaddr *) &id_priv->id.route.addr.src_addr); 2383 ret = sock_getname(sock, 2384 (struct sockaddr *) &id_priv->id.route.addr.src_addr, 2385 &size, 0); 2386 if (ret) { 2387 sock_release(sock); 2388 return ret; 2389 } 2390 2391 id_priv->sock = sock; 2392 return 0; 2393} 2394 2395static int cma_get_port(struct rdma_id_private *id_priv) 2396{ 2397 struct cma_device *cma_dev; 2398 struct idr *ps; 2399 int ret; 2400 2401 switch (id_priv->id.ps) { 2402 case RDMA_PS_SDP: 2403 ps = &sdp_ps; 2404 break; 2405 case RDMA_PS_TCP: 2406 ps = &tcp_ps; 2407 2408 mutex_lock(&lock); 2409 /* check if there are any iWarp IB devices present */ 2410 list_for_each_entry(cma_dev, &dev_list, list) { 2411 if (rdma_protocol_iwarp(cma_dev->device, 1)) { 2412 id_priv->unify_ps_tcp = 1; 2413 break; 2414 } 2415 } 2416 mutex_unlock(&lock); 2417 2418 if (id_priv->unify_ps_tcp) { 2419 ret = cma_get_tcp_port(id_priv); 2420 if (ret) 2421 goto out; 2422 } 2423 break; 2424 case RDMA_PS_UDP: 2425 ps = &udp_ps; 2426 break; 2427 case RDMA_PS_IPOIB: 2428 ps = &ipoib_ps; 2429 break; 2430 default: 2431 return -EPROTONOSUPPORT; 2432 } 2433 2434 mutex_lock(&lock); 2435 if (cma_any_port((struct sockaddr *) &id_priv->id.route.addr.src_addr)) 2436 ret = cma_alloc_any_port(ps, id_priv); 2437 else 2438 ret = cma_use_port(ps, id_priv); 2439 mutex_unlock(&lock); 2440out: 2441 return ret; 2442} 2443 2444static int cma_check_linklocal(struct rdma_dev_addr *dev_addr, 2445 struct sockaddr *addr) 2446{ 2447#ifdef INET6 2448 struct sockaddr_in6 sin6; 2449 2450 if (addr->sa_family != AF_INET6) 2451 return 0; 2452 2453 sin6 = *(struct sockaddr_in6 *)addr; 2454 2455 if (IN6_IS_SCOPE_LINKLOCAL(&sin6.sin6_addr) || 2456 IN6_IS_ADDR_MC_INTFACELOCAL(&sin6.sin6_addr)) { 2457 /* check if IPv6 scope ID is set */ 2458 if (sa6_recoverscope(&sin6) || sin6.sin6_scope_id == 0) 2459 return -EINVAL; 2460 dev_addr->bound_dev_if = sin6.sin6_scope_id; 2461 } 2462#endif 2463 return (0); 2464} 2465 2466int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr) 2467{ 2468 struct rdma_id_private *id_priv; 2469 int ret; 2470 2471 if (addr->sa_family != AF_INET && addr->sa_family != AF_INET6) 2472 return -EAFNOSUPPORT; 2473 2474 id_priv = container_of(id, struct rdma_id_private, id); 2475 if (!cma_comp_exch(id_priv, CMA_IDLE, CMA_ADDR_BOUND)) 2476 return -EINVAL; 2477 2478 ret = cma_check_linklocal(&id->route.addr.dev_addr, addr); 2479 if (ret) 2480 goto err1; 2481 2482 if (!cma_any_addr(addr)) { 2483 ret = rdma_translate_ip(addr, &id->route.addr.dev_addr); 2484 if (ret) 2485 goto err1; 2486 2487 mutex_lock(&lock); 2488 ret = cma_acquire_dev(id_priv); 2489 mutex_unlock(&lock); 2490 if (ret) 2491 goto err1; 2492 } 2493 2494 memcpy(&id->route.addr.src_addr, addr, ip_addr_size(addr)); 2495 ret = cma_get_port(id_priv); 2496 if (ret) 2497 goto err2; 2498 2499 return 0; 2500err2: 2501 if (id_priv->cma_dev) { 2502 mutex_lock(&lock); 2503 cma_detach_from_dev(id_priv); 2504 mutex_unlock(&lock); 2505 } 2506err1: 2507 cma_comp_exch(id_priv, CMA_ADDR_BOUND, CMA_IDLE); 2508 return ret; 2509} 2510EXPORT_SYMBOL(rdma_bind_addr); 2511 2512static int cma_format_hdr(void *hdr, enum rdma_port_space ps, 2513 struct rdma_route *route) 2514{ 2515 struct cma_hdr *cma_hdr; 2516 struct sdp_hh *sdp_hdr; 2517 2518 if (route->addr.src_addr.ss_family == AF_INET) { 2519 struct sockaddr_in *src4, *dst4; 2520 2521 src4 = (struct sockaddr_in *) &route->addr.src_addr; 2522 dst4 = (struct sockaddr_in *) &route->addr.dst_addr; 2523 2524 switch (ps) { 2525 case RDMA_PS_SDP: 2526 sdp_hdr = hdr; 2527 if (sdp_get_majv(sdp_hdr->sdp_version) != SDP_MAJ_VERSION) 2528 return -EINVAL; 2529 sdp_set_ip_ver(sdp_hdr, 4); 2530 sdp_hdr->src_addr.ip4.addr = src4->sin_addr.s_addr; 2531 sdp_hdr->dst_addr.ip4.addr = dst4->sin_addr.s_addr; 2532 sdp_hdr->port = src4->sin_port; 2533 break; 2534 default: 2535 cma_hdr = hdr; 2536 cma_hdr->cma_version = CMA_VERSION; 2537 cma_set_ip_ver(cma_hdr, 4); 2538 cma_hdr->src_addr.ip4.addr = src4->sin_addr.s_addr; 2539 cma_hdr->dst_addr.ip4.addr = dst4->sin_addr.s_addr; 2540 cma_hdr->port = src4->sin_port; 2541 break; 2542 } 2543 } else { 2544 struct sockaddr_in6 *src6, *dst6; 2545 2546 src6 = (struct sockaddr_in6 *) &route->addr.src_addr; 2547 dst6 = (struct sockaddr_in6 *) &route->addr.dst_addr; 2548 2549 switch (ps) { 2550 case RDMA_PS_SDP: 2551 sdp_hdr = hdr; 2552 if (sdp_get_majv(sdp_hdr->sdp_version) != SDP_MAJ_VERSION) 2553 return -EINVAL; 2554 sdp_set_ip_ver(sdp_hdr, 6); 2555 sdp_hdr->src_addr.ip6 = src6->sin6_addr; 2556 sdp_hdr->dst_addr.ip6 = dst6->sin6_addr; 2557 sdp_hdr->port = src6->sin6_port; 2558 break; 2559 default: 2560 cma_hdr = hdr; 2561 cma_hdr->cma_version = CMA_VERSION; 2562 cma_set_ip_ver(cma_hdr, 6); 2563 cma_hdr->src_addr.ip6 = src6->sin6_addr; 2564 cma_hdr->dst_addr.ip6 = dst6->sin6_addr; 2565 cma_hdr->port = src6->sin6_port; 2566 break; 2567 } 2568 } 2569 return 0; 2570} 2571 2572static int cma_sidr_rep_handler(struct ib_cm_id *cm_id, 2573 struct ib_cm_event *ib_event) 2574{ 2575 struct rdma_id_private *id_priv = cm_id->context; 2576 struct rdma_cm_event event; 2577 struct ib_cm_sidr_rep_event_param *rep = &ib_event->param.sidr_rep_rcvd; 2578 int ret = 0; 2579 2580 if (cma_disable_callback(id_priv, CMA_CONNECT)) 2581 return 0; 2582 2583 memset(&event, 0, sizeof event); 2584 switch (ib_event->event) { 2585 case IB_CM_SIDR_REQ_ERROR: 2586 event.event = RDMA_CM_EVENT_UNREACHABLE; 2587 event.status = -ETIMEDOUT; 2588 break; 2589 case IB_CM_SIDR_REP_RECEIVED: 2590 event.param.ud.private_data = ib_event->private_data; 2591 event.param.ud.private_data_len = IB_CM_SIDR_REP_PRIVATE_DATA_SIZE; 2592 if (rep->status != IB_SIDR_SUCCESS) { 2593 event.event = RDMA_CM_EVENT_UNREACHABLE; 2594 event.status = ib_event->param.sidr_rep_rcvd.status; 2595 break; 2596 } 2597 ret = cma_set_qkey(id_priv); 2598 if (ret) { 2599 event.event = RDMA_CM_EVENT_ADDR_ERROR; 2600 event.status = -EINVAL; 2601 break; 2602 } 2603 if (id_priv->qkey != rep->qkey) { 2604 event.event = RDMA_CM_EVENT_UNREACHABLE; 2605 event.status = -EINVAL; 2606 break; 2607 } 2608 ib_init_ah_from_path(id_priv->id.device, id_priv->id.port_num, 2609 id_priv->id.route.path_rec, 2610 &event.param.ud.ah_attr); 2611 event.param.ud.qp_num = rep->qpn; 2612 event.param.ud.qkey = rep->qkey; 2613 event.event = RDMA_CM_EVENT_ESTABLISHED; 2614 event.status = 0; 2615 break; 2616 default: 2617 printk(KERN_ERR "RDMA CMA: unexpected IB CM event: %d\n", 2618 ib_event->event); 2619 goto out; 2620 } 2621 2622 ret = id_priv->id.event_handler(&id_priv->id, &event); 2623 if (ret) { 2624 /* Destroy the CM ID by returning a non-zero value. */ 2625 id_priv->cm_id.ib = NULL; 2626 cma_exch(id_priv, CMA_DESTROYING); 2627 mutex_unlock(&id_priv->handler_mutex); 2628 rdma_destroy_id(&id_priv->id); 2629 return ret; 2630 } 2631out: 2632 mutex_unlock(&id_priv->handler_mutex); 2633 return ret; 2634} 2635 2636static int cma_resolve_ib_udp(struct rdma_id_private *id_priv, 2637 struct rdma_conn_param *conn_param) 2638{ 2639 struct ib_cm_sidr_req_param req; 2640 struct rdma_route *route; 2641 int ret; 2642 2643 req.private_data_len = sizeof(struct cma_hdr) + 2644 conn_param->private_data_len; 2645 req.private_data = kzalloc(req.private_data_len, GFP_ATOMIC); 2646 if (!req.private_data) 2647 return -ENOMEM; 2648 2649 if (conn_param->private_data && conn_param->private_data_len) 2650 memcpy((void *) req.private_data + sizeof(struct cma_hdr), 2651 conn_param->private_data, conn_param->private_data_len); 2652 2653 route = &id_priv->id.route; 2654 ret = cma_format_hdr((void *) req.private_data, id_priv->id.ps, route); 2655 if (ret) 2656 goto out; 2657 2658 id_priv->cm_id.ib = ib_create_cm_id(id_priv->id.device, 2659 cma_sidr_rep_handler, id_priv); 2660 if (IS_ERR(id_priv->cm_id.ib)) { 2661 ret = PTR_ERR(id_priv->cm_id.ib); 2662 goto out; 2663 } 2664 2665 req.path = route->path_rec; 2666 req.service_id = cma_get_service_id(id_priv->id.ps, 2667 (struct sockaddr *) &route->addr.dst_addr); 2668 req.timeout_ms = 1 << (cma_response_timeout - 8); 2669 req.max_cm_retries = CMA_MAX_CM_RETRIES; 2670 2671 ret = ib_send_cm_sidr_req(id_priv->cm_id.ib, &req); 2672 if (ret) { 2673 ib_destroy_cm_id(id_priv->cm_id.ib); 2674 id_priv->cm_id.ib = NULL; 2675 } 2676out: 2677 kfree(req.private_data); 2678 return ret; 2679} 2680 2681static int cma_connect_ib(struct rdma_id_private *id_priv, 2682 struct rdma_conn_param *conn_param) 2683{ 2684 struct ib_cm_req_param req; 2685 struct rdma_route *route; 2686 void *private_data; 2687 int offset, ret; 2688 2689 memset(&req, 0, sizeof req); 2690 offset = cma_user_data_offset(id_priv->id.ps); 2691 req.private_data_len = offset + conn_param->private_data_len; 2692 private_data = kzalloc(req.private_data_len, GFP_ATOMIC); 2693 if (!private_data) 2694 return -ENOMEM; 2695 2696 if (conn_param->private_data && conn_param->private_data_len) 2697 memcpy(private_data + offset, conn_param->private_data, 2698 conn_param->private_data_len); 2699 2700 id_priv->cm_id.ib = ib_create_cm_id(id_priv->id.device, cma_ib_handler, 2701 id_priv); 2702 if (IS_ERR(id_priv->cm_id.ib)) { 2703 ret = PTR_ERR(id_priv->cm_id.ib); 2704 goto out; 2705 } 2706 2707 route = &id_priv->id.route; 2708 ret = cma_format_hdr(private_data, id_priv->id.ps, route); 2709 if (ret) 2710 goto out; 2711 req.private_data = private_data; 2712 2713 req.primary_path = &route->path_rec[0]; 2714 if (route->num_paths == 2) 2715 req.alternate_path = &route->path_rec[1]; 2716 2717 req.service_id = cma_get_service_id(id_priv->id.ps, 2718 (struct sockaddr *) &route->addr.dst_addr); 2719 req.qp_num = id_priv->qp_num; 2720 req.qp_type = IB_QPT_RC; 2721 req.starting_psn = id_priv->seq_num; 2722 req.responder_resources = conn_param->responder_resources; 2723 req.initiator_depth = conn_param->initiator_depth; 2724 req.flow_control = conn_param->flow_control; 2725 req.retry_count = conn_param->retry_count; 2726 req.rnr_retry_count = conn_param->rnr_retry_count; 2727 req.remote_cm_response_timeout = cma_response_timeout; 2728 req.local_cm_response_timeout = cma_response_timeout; 2729 req.max_cm_retries = CMA_MAX_CM_RETRIES; 2730 req.srq = id_priv->srq ? 1 : 0; 2731 2732 ret = ib_send_cm_req(id_priv->cm_id.ib, &req); 2733out: 2734 if (ret && !IS_ERR(id_priv->cm_id.ib)) { 2735 ib_destroy_cm_id(id_priv->cm_id.ib); 2736 id_priv->cm_id.ib = NULL; 2737 } 2738 2739 kfree(private_data); 2740 return ret; 2741} 2742 2743static int cma_connect_iw(struct rdma_id_private *id_priv, 2744 struct rdma_conn_param *conn_param) 2745{ 2746 struct iw_cm_id *cm_id; 2747 struct sockaddr_in* sin; 2748 int ret; 2749 struct iw_cm_conn_param iw_param; 2750 2751 cm_id = iw_create_cm_id(id_priv->id.device, id_priv->sock, 2752 cma_iw_handler, id_priv); 2753 if (IS_ERR(cm_id)) { 2754 ret = PTR_ERR(cm_id); 2755 goto out; 2756 } 2757 2758 id_priv->cm_id.iw = cm_id; 2759 2760 sin = (struct sockaddr_in*) &id_priv->id.route.addr.src_addr; 2761 cm_id->local_addr = *sin; 2762 2763 sin = (struct sockaddr_in*) &id_priv->id.route.addr.dst_addr; 2764 cm_id->remote_addr = *sin; 2765 2766 ret = cma_modify_qp_rtr(id_priv, conn_param); 2767 if (ret) 2768 goto out; 2769 2770 iw_param.ord = conn_param->initiator_depth; 2771 iw_param.ird = conn_param->responder_resources; 2772 iw_param.private_data = conn_param->private_data; 2773 iw_param.private_data_len = conn_param->private_data_len; 2774 if (id_priv->id.qp) 2775 iw_param.qpn = id_priv->qp_num; 2776 else 2777 iw_param.qpn = conn_param->qp_num; 2778 ret = iw_cm_connect(cm_id, &iw_param); 2779out: 2780 if (ret && !IS_ERR(cm_id)) { 2781 iw_destroy_cm_id(cm_id); 2782 id_priv->cm_id.iw = NULL; 2783 } 2784 return ret; 2785} 2786 2787int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param) 2788{ 2789 struct rdma_id_private *id_priv; 2790 int ret; 2791 2792 id_priv = container_of(id, struct rdma_id_private, id); 2793 if (!cma_comp_exch(id_priv, CMA_ROUTE_RESOLVED, CMA_CONNECT)) 2794 return -EINVAL; 2795 2796 if (!id->qp) { 2797 id_priv->qp_num = conn_param->qp_num; 2798 id_priv->srq = conn_param->srq; 2799 } 2800 2801 if (rdma_cap_ib_cm(id->device, id->port_num)) { 2802 if (cma_is_ud_ps(id->ps)) 2803 ret = cma_resolve_ib_udp(id_priv, conn_param); 2804 else 2805 ret = cma_connect_ib(id_priv, conn_param); 2806 } else if (rdma_cap_iw_cm(id->device, id->port_num)) 2807 ret = cma_connect_iw(id_priv, conn_param); 2808 else 2809 ret = -ENOSYS; 2810 if (ret) 2811 goto err; 2812 2813 return 0; 2814err: 2815 cma_comp_exch(id_priv, CMA_CONNECT, CMA_ROUTE_RESOLVED); 2816 return ret; 2817} 2818EXPORT_SYMBOL(rdma_connect); 2819 2820static int cma_accept_ib(struct rdma_id_private *id_priv, 2821 struct rdma_conn_param *conn_param) 2822{ 2823 struct ib_cm_rep_param rep; 2824 int ret; 2825 2826 ret = cma_modify_qp_rtr(id_priv, conn_param); 2827 if (ret) 2828 goto out; 2829 2830 ret = cma_modify_qp_rts(id_priv, conn_param); 2831 if (ret) 2832 goto out; 2833 2834 memset(&rep, 0, sizeof rep); 2835 rep.qp_num = id_priv->qp_num; 2836 rep.starting_psn = id_priv->seq_num; 2837 rep.private_data = conn_param->private_data; 2838 rep.private_data_len = conn_param->private_data_len; 2839 rep.responder_resources = conn_param->responder_resources; 2840 rep.initiator_depth = conn_param->initiator_depth; 2841 rep.failover_accepted = 0; 2842 rep.flow_control = conn_param->flow_control; 2843 rep.rnr_retry_count = conn_param->rnr_retry_count; 2844 rep.srq = id_priv->srq ? 1 : 0; 2845 2846 ret = ib_send_cm_rep(id_priv->cm_id.ib, &rep); 2847out: 2848 return ret; 2849} 2850 2851static int cma_accept_iw(struct rdma_id_private *id_priv, 2852 struct rdma_conn_param *conn_param) 2853{ 2854 struct iw_cm_conn_param iw_param; 2855 int ret; 2856 2857 ret = cma_modify_qp_rtr(id_priv, conn_param); 2858 if (ret) 2859 return ret; 2860 2861 iw_param.ord = conn_param->initiator_depth; 2862 iw_param.ird = conn_param->responder_resources; 2863 iw_param.private_data = conn_param->private_data; 2864 iw_param.private_data_len = conn_param->private_data_len; 2865 if (id_priv->id.qp) { 2866 iw_param.qpn = id_priv->qp_num; 2867 } else 2868 iw_param.qpn = conn_param->qp_num; 2869 2870 return iw_cm_accept(id_priv->cm_id.iw, &iw_param); 2871} 2872 2873static int cma_send_sidr_rep(struct rdma_id_private *id_priv, 2874 enum ib_cm_sidr_status status, 2875 const void *private_data, int private_data_len) 2876{ 2877 struct ib_cm_sidr_rep_param rep; 2878 int ret; 2879 2880 memset(&rep, 0, sizeof rep); 2881 rep.status = status; 2882 if (status == IB_SIDR_SUCCESS) { 2883 ret = cma_set_qkey(id_priv); 2884 if (ret) 2885 return ret; 2886 rep.qp_num = id_priv->qp_num; 2887 rep.qkey = id_priv->qkey; 2888 } 2889 rep.private_data = private_data; 2890 rep.private_data_len = private_data_len; 2891 2892 return ib_send_cm_sidr_rep(id_priv->cm_id.ib, &rep); 2893} 2894 2895int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param) 2896{ 2897 struct rdma_id_private *id_priv; 2898 int ret; 2899 2900 id_priv = container_of(id, struct rdma_id_private, id); 2901 if (!cma_comp(id_priv, CMA_CONNECT)) 2902 return -EINVAL; 2903 2904 if (!id->qp && conn_param) { 2905 id_priv->qp_num = conn_param->qp_num; 2906 id_priv->srq = conn_param->srq; 2907 } 2908 2909 if (rdma_cap_ib_cm(id->device, id->port_num)) { 2910 if (cma_is_ud_ps(id->ps)) 2911 ret = cma_send_sidr_rep(id_priv, IB_SIDR_SUCCESS, 2912 conn_param->private_data, 2913 conn_param->private_data_len); 2914 else if (conn_param) 2915 ret = cma_accept_ib(id_priv, conn_param); 2916 else 2917 ret = cma_rep_recv(id_priv); 2918 } else if (rdma_cap_iw_cm(id->device, id->port_num)) 2919 ret = cma_accept_iw(id_priv, conn_param); 2920 else 2921 ret = -ENOSYS; 2922 2923 if (ret) 2924 goto reject; 2925 2926 return 0; 2927reject: 2928 cma_modify_qp_err(id_priv); 2929 rdma_reject(id, NULL, 0); 2930 return ret; 2931} 2932EXPORT_SYMBOL(rdma_accept); 2933 2934int rdma_notify(struct rdma_cm_id *id, enum ib_event_type event) 2935{ 2936 struct rdma_id_private *id_priv; 2937 int ret; 2938 2939 id_priv = container_of(id, struct rdma_id_private, id); 2940 if (!cma_has_cm_dev(id_priv)) 2941 return -EINVAL; 2942 2943 switch (id->device->node_type) { 2944 case RDMA_NODE_IB_CA: 2945 ret = ib_cm_notify(id_priv->cm_id.ib, event); 2946 break; 2947 default: 2948 ret = 0; 2949 break; 2950 } 2951 return ret; 2952} 2953EXPORT_SYMBOL(rdma_notify); 2954 2955int rdma_reject(struct rdma_cm_id *id, const void *private_data, 2956 u8 private_data_len) 2957{ 2958 struct rdma_id_private *id_priv; 2959 int ret; 2960 2961 id_priv = container_of(id, struct rdma_id_private, id); 2962 if (!cma_has_cm_dev(id_priv)) 2963 return -EINVAL; 2964 2965 if (rdma_cap_ib_cm(id->device, id->port_num)) { 2966 if (cma_is_ud_ps(id->ps)) 2967 ret = cma_send_sidr_rep(id_priv, IB_SIDR_REJECT, 2968 private_data, private_data_len); 2969 else 2970 ret = ib_send_cm_rej(id_priv->cm_id.ib, 2971 IB_CM_REJ_CONSUMER_DEFINED, NULL, 2972 0, private_data, private_data_len); 2973 } else if (rdma_cap_iw_cm(id->device, id->port_num)) { 2974 ret = iw_cm_reject(id_priv->cm_id.iw, 2975 private_data, private_data_len); 2976 } else 2977 ret = -ENOSYS; 2978 2979 return ret; 2980} 2981EXPORT_SYMBOL(rdma_reject); 2982 2983int rdma_disconnect(struct rdma_cm_id *id) 2984{ 2985 struct rdma_id_private *id_priv; 2986 int ret; 2987 2988 id_priv = container_of(id, struct rdma_id_private, id); 2989 if (!cma_has_cm_dev(id_priv)) 2990 return -EINVAL; 2991 2992 if (rdma_cap_ib_cm(id->device, id->port_num)) { 2993 ret = cma_modify_qp_err(id_priv); 2994 if (ret) 2995 goto out; 2996 /* Initiate or respond to a disconnect. */ 2997 if (ib_send_cm_dreq(id_priv->cm_id.ib, NULL, 0)) 2998 ib_send_cm_drep(id_priv->cm_id.ib, NULL, 0); 2999 } else if (rdma_cap_iw_cm(id->device, id->port_num)) { 3000 ret = iw_cm_disconnect(id_priv->cm_id.iw, 0); 3001 } else 3002 ret = -EINVAL; 3003 3004out: 3005 return ret; 3006} 3007EXPORT_SYMBOL(rdma_disconnect); 3008 3009static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast) 3010{ 3011 struct rdma_id_private *id_priv; 3012 struct cma_multicast *mc = multicast->context; 3013 struct rdma_cm_event event; 3014 int ret; 3015 3016 id_priv = mc->id_priv; 3017 if (cma_disable_callback(id_priv, CMA_ADDR_BOUND) && 3018 cma_disable_callback(id_priv, CMA_ADDR_RESOLVED)) 3019 return 0; 3020 3021 mutex_lock(&id_priv->qp_mutex); 3022 if (!status && id_priv->id.qp) 3023 status = ib_attach_mcast(id_priv->id.qp, &multicast->rec.mgid, 3024 multicast->rec.mlid); 3025 mutex_unlock(&id_priv->qp_mutex); 3026 3027 memset(&event, 0, sizeof event); 3028 event.status = status; 3029 event.param.ud.private_data = mc->context; 3030 if (!status) { 3031 event.event = RDMA_CM_EVENT_MULTICAST_JOIN; 3032 ib_init_ah_from_mcmember(id_priv->id.device, 3033 id_priv->id.port_num, &multicast->rec, 3034 &event.param.ud.ah_attr); 3035 event.param.ud.qp_num = 0xFFFFFF; 3036 event.param.ud.qkey = be32_to_cpu(multicast->rec.qkey); 3037 } else 3038 event.event = RDMA_CM_EVENT_MULTICAST_ERROR; 3039 3040 ret = id_priv->id.event_handler(&id_priv->id, &event); 3041 if (ret) { 3042 cma_exch(id_priv, CMA_DESTROYING); 3043 mutex_unlock(&id_priv->handler_mutex); 3044 rdma_destroy_id(&id_priv->id); 3045 return 0; 3046 } 3047 3048 mutex_unlock(&id_priv->handler_mutex); 3049 return 0; 3050} 3051 3052static void cma_set_mgid(struct rdma_id_private *id_priv, 3053 struct sockaddr *addr, union ib_gid *mgid) 3054{ 3055#if defined(INET) || defined(INET6) 3056 unsigned char mc_map[MAX_ADDR_LEN]; 3057 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; 3058#endif 3059#ifdef INET 3060 struct sockaddr_in *sin = (struct sockaddr_in *) addr; 3061#endif 3062#ifdef INET6 3063 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) addr; 3064#endif 3065 3066 if (cma_any_addr(addr)) { 3067 memset(mgid, 0, sizeof *mgid); 3068#ifdef INET6 3069 } else if ((addr->sa_family == AF_INET6) && 3070 ((be32_to_cpu(sin6->sin6_addr.s6_addr32[0]) & 0xFFF0FFFF) == 3071 0xFF10A01B)) { 3072 /* IPv6 address is an SA assigned MGID. */ 3073 memcpy(mgid, &sin6->sin6_addr, sizeof *mgid); 3074 } else if (addr->sa_family == AF_INET6) { 3075 ipv6_ib_mc_map(&sin6->sin6_addr, dev_addr->broadcast, mc_map); 3076 if (id_priv->id.ps == RDMA_PS_UDP) 3077 mc_map[7] = 0x01; /* Use RDMA CM signature */ 3078 *mgid = *(union ib_gid *) (mc_map + 4); 3079#endif 3080#ifdef INET 3081 } else { 3082 ip_ib_mc_map(sin->sin_addr.s_addr, dev_addr->broadcast, mc_map); 3083 if (id_priv->id.ps == RDMA_PS_UDP) 3084 mc_map[7] = 0x01; /* Use RDMA CM signature */ 3085 *mgid = *(union ib_gid *) (mc_map + 4); 3086#endif 3087 } 3088} 3089 3090static int cma_join_ib_multicast(struct rdma_id_private *id_priv, 3091 struct cma_multicast *mc) 3092{ 3093 struct ib_sa_mcmember_rec rec; 3094 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; 3095 ib_sa_comp_mask comp_mask; 3096 int ret; 3097 3098 ib_addr_get_mgid(dev_addr, &rec.mgid); 3099 ret = ib_sa_get_mcmember_rec(id_priv->id.device, id_priv->id.port_num, 3100 &rec.mgid, &rec); 3101 if (ret) 3102 return ret; 3103 3104 cma_set_mgid(id_priv, (struct sockaddr *) &mc->addr, &rec.mgid); 3105 if (id_priv->id.ps == RDMA_PS_UDP) 3106 rec.qkey = cpu_to_be32(RDMA_UDP_QKEY); 3107 rdma_addr_get_sgid(dev_addr, &rec.port_gid); 3108 rec.pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr)); 3109 rec.join_state = 1; 3110 3111 comp_mask = IB_SA_MCMEMBER_REC_MGID | IB_SA_MCMEMBER_REC_PORT_GID | 3112 IB_SA_MCMEMBER_REC_PKEY | IB_SA_MCMEMBER_REC_JOIN_STATE | 3113 IB_SA_MCMEMBER_REC_QKEY | IB_SA_MCMEMBER_REC_SL | 3114 IB_SA_MCMEMBER_REC_FLOW_LABEL | 3115 IB_SA_MCMEMBER_REC_TRAFFIC_CLASS; 3116 3117 if (id_priv->id.ps == RDMA_PS_IPOIB) 3118 comp_mask |= IB_SA_MCMEMBER_REC_RATE | 3119 IB_SA_MCMEMBER_REC_RATE_SELECTOR; 3120 3121 mc->multicast.ib = ib_sa_join_multicast(&sa_client, id_priv->id.device, 3122 id_priv->id.port_num, &rec, 3123 comp_mask, GFP_KERNEL, 3124 cma_ib_mc_handler, mc); 3125 if (IS_ERR(mc->multicast.ib)) 3126 return PTR_ERR(mc->multicast.ib); 3127 3128 return 0; 3129} 3130 3131 3132static void iboe_mcast_work_handler(struct work_struct *work) 3133{ 3134 struct iboe_mcast_work *mw = container_of(work, struct iboe_mcast_work, work); 3135 struct cma_multicast *mc = mw->mc; 3136 struct ib_sa_multicast *m = mc->multicast.ib; 3137 3138 mc->multicast.ib->context = mc; 3139 cma_ib_mc_handler(0, m); 3140 kref_put(&mc->mcref, release_mc); 3141 kfree(mw); 3142} 3143 3144static void cma_iboe_set_mgid(struct sockaddr *addr, union ib_gid *mgid) 3145{ 3146 struct sockaddr_in *sin = (struct sockaddr_in *)addr; 3147 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)addr; 3148 3149 if (cma_any_addr(addr)) { 3150 memset(mgid, 0, sizeof *mgid); 3151 } else if (addr->sa_family == AF_INET6) 3152 memcpy(mgid, &sin6->sin6_addr, sizeof *mgid); 3153 else { 3154 mgid->raw[0] = 0xff; 3155 mgid->raw[1] = 0x0e; 3156 mgid->raw[2] = 0; 3157 mgid->raw[3] = 0; 3158 mgid->raw[4] = 0; 3159 mgid->raw[5] = 0; 3160 mgid->raw[6] = 0; 3161 mgid->raw[7] = 0; 3162 mgid->raw[8] = 0; 3163 mgid->raw[9] = 0; 3164 mgid->raw[10] = 0xff; 3165 mgid->raw[11] = 0xff; 3166 *(__be32 *)(&mgid->raw[12]) = sin->sin_addr.s_addr; 3167 } 3168} 3169 3170static int cma_iboe_join_multicast(struct rdma_id_private *id_priv, 3171 struct cma_multicast *mc) 3172{ 3173 struct iboe_mcast_work *work; 3174 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; 3175 int err; 3176 struct sockaddr *addr = (struct sockaddr *)&mc->addr; 3177 struct net_device *ndev = NULL; 3178 3179 if (cma_zero_addr((struct sockaddr *)&mc->addr)) 3180 return -EINVAL; 3181 3182 work = kzalloc(sizeof *work, GFP_KERNEL); 3183 if (!work) 3184 return -ENOMEM; 3185 3186 mc->multicast.ib = kzalloc(sizeof(struct ib_sa_multicast), GFP_KERNEL); 3187 if (!mc->multicast.ib) { 3188 err = -ENOMEM; 3189 goto out1; 3190 } 3191 3192 cma_iboe_set_mgid(addr, &mc->multicast.ib->rec.mgid); 3193 3194 mc->multicast.ib->rec.pkey = cpu_to_be16(0xffff); 3195 if (id_priv->id.ps == RDMA_PS_UDP) 3196 mc->multicast.ib->rec.qkey = cpu_to_be32(RDMA_UDP_QKEY); 3197 3198 if (dev_addr->bound_dev_if) 3199 ndev = dev_get_by_index(&init_net, dev_addr->bound_dev_if); 3200 if (!ndev) { 3201 err = -ENODEV; 3202 goto out2; 3203 } 3204 3205 mc->multicast.ib->rec.rate = iboe_get_rate(ndev); 3206 mc->multicast.ib->rec.hop_limit = 1; 3207#ifdef __linux__ 3208 mc->multicast.ib->rec.mtu = iboe_get_mtu(ndev->mtu); 3209#else 3210 mc->multicast.ib->rec.mtu = iboe_get_mtu(ndev->if_mtu); 3211#endif 3212 dev_put(ndev); 3213 if (!mc->multicast.ib->rec.mtu) { 3214 err = -EINVAL; 3215 goto out2; 3216 } 3217 iboe_addr_get_sgid(dev_addr, &mc->multicast.ib->rec.port_gid); 3218 work->id = id_priv; 3219 work->mc = mc; 3220 INIT_WORK(&work->work, iboe_mcast_work_handler); 3221 kref_get(&mc->mcref); 3222 queue_work(cma_wq, &work->work); 3223 3224 return 0; 3225 3226out2: 3227 kfree(mc->multicast.ib); 3228out1: 3229 kfree(work); 3230 return err; 3231} 3232 3233int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr, 3234 void *context) 3235{ 3236 struct rdma_id_private *id_priv; 3237 struct cma_multicast *mc; 3238 int ret; 3239 3240 id_priv = container_of(id, struct rdma_id_private, id); 3241 if (!cma_comp(id_priv, CMA_ADDR_BOUND) && 3242 !cma_comp(id_priv, CMA_ADDR_RESOLVED)) 3243 return -EINVAL; 3244 3245 mc = kmalloc(sizeof *mc, GFP_KERNEL); 3246 if (!mc) 3247 return -ENOMEM; 3248 3249 memcpy(&mc->addr, addr, ip_addr_size(addr)); 3250 mc->context = context; 3251 mc->id_priv = id_priv; 3252 3253 spin_lock(&id_priv->lock); 3254 list_add(&mc->list, &id_priv->mc_list); 3255 spin_unlock(&id_priv->lock); 3256 3257 switch (rdma_node_get_transport(id->device->node_type)) { 3258 case RDMA_TRANSPORT_IB: 3259 switch (rdma_port_get_link_layer(id->device, id->port_num)) { 3260 case IB_LINK_LAYER_INFINIBAND: 3261 ret = cma_join_ib_multicast(id_priv, mc); 3262 break; 3263 case IB_LINK_LAYER_ETHERNET: 3264 kref_init(&mc->mcref); 3265 ret = cma_iboe_join_multicast(id_priv, mc); 3266 break; 3267 default: 3268 ret = -EINVAL; 3269 } 3270 break; 3271 default: 3272 ret = -ENOSYS; 3273 break; 3274 } 3275 3276 if (ret) { 3277 spin_lock_irq(&id_priv->lock); 3278 list_del(&mc->list); 3279 spin_unlock_irq(&id_priv->lock); 3280 kfree(mc); 3281 } 3282 3283 return ret; 3284} 3285EXPORT_SYMBOL(rdma_join_multicast); 3286 3287void rdma_leave_multicast(struct rdma_cm_id *id, struct sockaddr *addr) 3288{ 3289 struct rdma_id_private *id_priv; 3290 struct cma_multicast *mc; 3291 3292 id_priv = container_of(id, struct rdma_id_private, id); 3293 spin_lock_irq(&id_priv->lock); 3294 list_for_each_entry(mc, &id_priv->mc_list, list) { 3295 if (!memcmp(&mc->addr, addr, ip_addr_size(addr))) { 3296 list_del(&mc->list); 3297 spin_unlock_irq(&id_priv->lock); 3298 3299 if (id->qp) 3300 ib_detach_mcast(id->qp, 3301 &mc->multicast.ib->rec.mgid, 3302 mc->multicast.ib->rec.mlid); 3303 if (rdma_node_get_transport(id_priv->cma_dev->device->node_type) == RDMA_TRANSPORT_IB) { 3304 switch (rdma_port_get_link_layer(id->device, id->port_num)) { 3305 case IB_LINK_LAYER_INFINIBAND: 3306 ib_sa_free_multicast(mc->multicast.ib); 3307 kfree(mc); 3308 break; 3309 case IB_LINK_LAYER_ETHERNET: 3310 kref_put(&mc->mcref, release_mc); 3311 break; 3312 default: 3313 break; 3314 } 3315 } 3316 return; 3317 } 3318 } 3319 spin_unlock_irq(&id_priv->lock); 3320} 3321EXPORT_SYMBOL(rdma_leave_multicast); 3322 3323static int cma_netdev_change(struct net_device *ndev, struct rdma_id_private *id_priv) 3324{ 3325 struct rdma_dev_addr *dev_addr; 3326 struct cma_ndev_work *work; 3327 3328 dev_addr = &id_priv->id.route.addr.dev_addr; 3329 3330 if ((dev_addr->bound_dev_if == ndev->if_index) && 3331 memcmp(dev_addr->src_dev_addr, IF_LLADDR(ndev), ndev->if_addrlen)) { 3332 printk(KERN_INFO "RDMA CM addr change for ndev %s used by id %p\n", 3333 ndev->if_xname, &id_priv->id); 3334 work = kzalloc(sizeof *work, GFP_KERNEL); 3335 if (!work) 3336 return -ENOMEM; 3337 3338 INIT_WORK(&work->work, cma_ndev_work_handler); 3339 work->id = id_priv; 3340 work->event.event = RDMA_CM_EVENT_ADDR_CHANGE; 3341 atomic_inc(&id_priv->refcount); 3342 queue_work(cma_wq, &work->work); 3343 } 3344 3345 return 0; 3346} 3347 3348static int cma_netdev_callback(struct notifier_block *self, unsigned long event, 3349 void *ctx) 3350{ 3351 struct net_device *ndev = (struct net_device *)ctx; 3352 struct cma_device *cma_dev; 3353 struct rdma_id_private *id_priv; 3354 int ret = NOTIFY_DONE; 3355 3356#ifdef __linux__ 3357 if (dev_net(ndev) != &init_net) 3358 return NOTIFY_DONE; 3359 3360 if (event != NETDEV_BONDING_FAILOVER) 3361 return NOTIFY_DONE; 3362 3363 if (!(ndev->flags & IFF_MASTER) || !(ndev->priv_flags & IFF_BONDING)) 3364 return NOTIFY_DONE; 3365#else 3366 if (event != NETDEV_DOWN && event != NETDEV_UNREGISTER) 3367 return NOTIFY_DONE; 3368#endif 3369 3370 mutex_lock(&lock); 3371 list_for_each_entry(cma_dev, &dev_list, list) 3372 list_for_each_entry(id_priv, &cma_dev->id_list, list) { 3373 ret = cma_netdev_change(ndev, id_priv); 3374 if (ret) 3375 goto out; 3376 } 3377 3378out: 3379 mutex_unlock(&lock); 3380 return ret; 3381} 3382 3383static struct notifier_block cma_nb = { 3384 .notifier_call = cma_netdev_callback 3385}; 3386 3387static void cma_add_one(struct ib_device *device) 3388{ 3389 struct cma_device *cma_dev; 3390 struct rdma_id_private *id_priv; 3391 3392 cma_dev = kmalloc(sizeof *cma_dev, GFP_KERNEL); 3393 if (!cma_dev) 3394 return; 3395 3396 cma_dev->device = device; 3397 3398 init_completion(&cma_dev->comp); 3399 atomic_set(&cma_dev->refcount, 1); 3400 INIT_LIST_HEAD(&cma_dev->id_list); 3401 ib_set_client_data(device, &cma_client, cma_dev); 3402 3403 mutex_lock(&lock); 3404 list_add_tail(&cma_dev->list, &dev_list); 3405 list_for_each_entry(id_priv, &listen_any_list, list) 3406 cma_listen_on_dev(id_priv, cma_dev); 3407 mutex_unlock(&lock); 3408} 3409 3410static int cma_remove_id_dev(struct rdma_id_private *id_priv) 3411{ 3412 struct rdma_cm_event event; 3413 enum cma_state state; 3414 int ret = 0; 3415 3416 /* Record that we want to remove the device */ 3417 state = cma_exch(id_priv, CMA_DEVICE_REMOVAL); 3418 if (state == CMA_DESTROYING) 3419 return 0; 3420 3421 cma_cancel_operation(id_priv, state); 3422 mutex_lock(&id_priv->handler_mutex); 3423 3424 /* Check for destruction from another callback. */ 3425 if (!cma_comp(id_priv, CMA_DEVICE_REMOVAL)) 3426 goto out; 3427 3428 memset(&event, 0, sizeof event); 3429 event.event = RDMA_CM_EVENT_DEVICE_REMOVAL; 3430 ret = id_priv->id.event_handler(&id_priv->id, &event); 3431out: 3432 mutex_unlock(&id_priv->handler_mutex); 3433 return ret; 3434} 3435 3436static void cma_process_remove(struct cma_device *cma_dev) 3437{ 3438 struct rdma_id_private *id_priv; 3439 int ret; 3440 3441 mutex_lock(&lock); 3442 while (!list_empty(&cma_dev->id_list)) { 3443 id_priv = list_entry(cma_dev->id_list.next, 3444 struct rdma_id_private, list); 3445 3446 list_del(&id_priv->listen_list); 3447 list_del_init(&id_priv->list); 3448 atomic_inc(&id_priv->refcount); 3449 mutex_unlock(&lock); 3450 3451 ret = id_priv->internal_id ? 1 : cma_remove_id_dev(id_priv); 3452 cma_deref_id(id_priv); 3453 if (ret) 3454 rdma_destroy_id(&id_priv->id); 3455 3456 mutex_lock(&lock); 3457 } 3458 mutex_unlock(&lock); 3459 3460 cma_deref_dev(cma_dev); 3461 wait_for_completion(&cma_dev->comp); 3462} 3463 3464static void cma_remove_one(struct ib_device *device) 3465{ 3466 struct cma_device *cma_dev; 3467 3468 cma_dev = ib_get_client_data(device, &cma_client); 3469 if (!cma_dev) 3470 return; 3471 3472 mutex_lock(&lock); 3473 list_del(&cma_dev->list); 3474 mutex_unlock(&lock); 3475 3476 cma_process_remove(cma_dev); 3477 kfree(cma_dev); 3478} 3479 3480static int cma_init(void) 3481{ 3482 int ret; 3483#if defined(INET) 3484 int low, high, remaining; 3485 3486 get_random_bytes(&next_port, sizeof next_port); 3487 inet_get_local_port_range(&low, &high); 3488 remaining = (high - low) + 1; 3489 next_port = ((unsigned int) next_port % remaining) + low; 3490#endif 3491 3492 cma_wq = create_singlethread_workqueue("rdma_cm"); 3493 if (!cma_wq) 3494 return -ENOMEM; 3495 3496 ib_sa_register_client(&sa_client); 3497 rdma_addr_register_client(&addr_client); 3498 register_netdevice_notifier(&cma_nb); 3499 3500 ret = ib_register_client(&cma_client); 3501 if (ret) 3502 goto err; 3503 return 0; 3504 3505err: 3506 unregister_netdevice_notifier(&cma_nb); 3507 rdma_addr_unregister_client(&addr_client); 3508 ib_sa_unregister_client(&sa_client); 3509 destroy_workqueue(cma_wq); 3510 return ret; 3511} 3512 3513static void cma_cleanup(void) 3514{ 3515 ib_unregister_client(&cma_client); 3516 unregister_netdevice_notifier(&cma_nb); 3517 rdma_addr_unregister_client(&addr_client); 3518 ib_sa_unregister_client(&sa_client); 3519 destroy_workqueue(cma_wq); 3520 idr_destroy(&sdp_ps); 3521 idr_destroy(&tcp_ps); 3522 idr_destroy(&udp_ps); 3523 idr_destroy(&ipoib_ps); 3524} 3525 3526module_init(cma_init); 3527module_exit(cma_cleanup); 3528