1/* 2 * Copyright (c) 2004, 2005 Intel Corporation. All rights reserved. 3 * Copyright (c) 2004 Topspin Corporation. All rights reserved. 4 * Copyright (c) 2004, 2005 Voltaire Corporation. All rights reserved. 5 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 6 * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved. 7 * Copyright (c) 2005 Network Appliance, Inc. All rights reserved. 8 * 9 * This software is available to you under a choice of one of two 10 * licenses. You may choose to be licensed under the terms of the GNU 11 * General Public License (GPL) Version 2, available from the file 12 * COPYING in the main directory of this source tree, or the 13 * OpenIB.org BSD license below: 14 * 15 * Redistribution and use in source and binary forms, with or 16 * without modification, are permitted provided that the following 17 * conditions are met: 18 * 19 * - Redistributions of source code must retain the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer. 22 * 23 * - Redistributions in binary form must reproduce the above 24 * copyright notice, this list of conditions and the following 25 * disclaimer in the documentation and/or other materials 26 * provided with the distribution. 27 * 28 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 29 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 30 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 31 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 32 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 33 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 34 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 35 * SOFTWARE. 36 * 37 */ 38#include <linux/dma-mapping.h> 39#include <linux/err.h> 40#include <linux/idr.h> 41#include <linux/interrupt.h> 42#include <linux/rbtree.h> 43#include <linux/spinlock.h> 44#include <linux/workqueue.h> 45#include <linux/completion.h> 46#include <linux/string.h> 47 48#include <rdma/iw_cm.h> 49#include <rdma/ib_addr.h> 50 51#include "iwcm.h" 52 53MODULE_AUTHOR("Tom Tucker"); 54MODULE_DESCRIPTION("iWARP CM"); 55MODULE_LICENSE("Dual BSD/GPL"); 56 57static struct workqueue_struct *iwcm_wq; 58struct iwcm_work { 59 struct work_struct work; 60 struct iwcm_id_private *cm_id; 61 struct list_head list; 62 struct iw_cm_event event; 63 struct list_head free_list; 64}; 65 66/* 67 * The following services provide a mechanism for pre-allocating iwcm_work 68 * elements. The design pre-allocates them based on the cm_id type: 69 * LISTENING IDS: Get enough elements preallocated to handle the 70 * listen backlog. 71 * ACTIVE IDS: 4: CONNECT_REPLY, ESTABLISHED, DISCONNECT, CLOSE 72 * PASSIVE IDS: 3: ESTABLISHED, DISCONNECT, CLOSE 73 * 74 * Allocating them in connect and listen avoids having to deal 75 * with allocation failures on the event upcall from the provider (which 76 * is called in the interrupt context). 77 * 78 * One exception is when creating the cm_id for incoming connection requests. 79 * There are two cases: 80 * 1) in the event upcall, cm_event_handler(), for a listening cm_id. If 81 * the backlog is exceeded, then no more connection request events will 82 * be processed. cm_event_handler() returns -ENOMEM in this case. Its up 83 * to the provider to reject the connection request. 84 * 2) in the connection request workqueue handler, cm_conn_req_handler(). 85 * If work elements cannot be allocated for the new connect request cm_id, 86 * then IWCM will call the provider reject method. This is ok since 87 * cm_conn_req_handler() runs in the workqueue thread context. 88 */ 89 90static struct iwcm_work *get_work(struct iwcm_id_private *cm_id_priv) 91{ 92 struct iwcm_work *work; 93 94 if (list_empty(&cm_id_priv->work_free_list)) 95 return NULL; 96 work = list_entry(cm_id_priv->work_free_list.next, struct iwcm_work, 97 free_list); 98 list_del_init(&work->free_list); 99 return work; 100} 101 102static void put_work(struct iwcm_work *work) 103{ 104 list_add(&work->free_list, &work->cm_id->work_free_list); 105} 106 107static void dealloc_work_entries(struct iwcm_id_private *cm_id_priv) 108{ 109 struct list_head *e, *tmp; 110 111 list_for_each_safe(e, tmp, &cm_id_priv->work_free_list) 112 kfree(list_entry(e, struct iwcm_work, free_list)); 113} 114 115static int alloc_work_entries(struct iwcm_id_private *cm_id_priv, int count) 116{ 117 struct iwcm_work *work; 118 119 BUG_ON(!list_empty(&cm_id_priv->work_free_list)); 120 while (count--) { 121 work = kmalloc(sizeof(struct iwcm_work), GFP_KERNEL); 122 if (!work) { 123 dealloc_work_entries(cm_id_priv); 124 return -ENOMEM; 125 } 126 work->cm_id = cm_id_priv; 127 INIT_LIST_HEAD(&work->list); 128 put_work(work); 129 } 130 return 0; 131} 132 133/* 134 * Save private data from incoming connection requests to 135 * iw_cm_event, so the low level driver doesn't have to. Adjust 136 * the event ptr to point to the local copy. 137 */ 138static int copy_private_data(struct iw_cm_event *event) 139{ 140 void *p; 141 142 p = kmemdup(event->private_data, event->private_data_len, GFP_ATOMIC); 143 if (!p) 144 return -ENOMEM; 145 event->private_data = p; 146 return 0; 147} 148 149static void free_cm_id(struct iwcm_id_private *cm_id_priv) 150{ 151 dealloc_work_entries(cm_id_priv); 152 kfree(cm_id_priv); 153} 154 155/* 156 * Release a reference on cm_id. If the last reference is being 157 * released, enable the waiting thread (in iw_destroy_cm_id) to 158 * get woken up, and return 1 if a thread is already waiting. 159 */ 160static int iwcm_deref_id(struct iwcm_id_private *cm_id_priv) 161{ 162 BUG_ON(atomic_read(&cm_id_priv->refcount)==0); 163 if (atomic_dec_and_test(&cm_id_priv->refcount)) { 164 BUG_ON(!list_empty(&cm_id_priv->work_list)); 165 complete(&cm_id_priv->destroy_comp); 166 return 1; 167 } 168 169 return 0; 170} 171 172static void add_ref(struct iw_cm_id *cm_id) 173{ 174 struct iwcm_id_private *cm_id_priv; 175 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); 176 atomic_inc(&cm_id_priv->refcount); 177} 178 179static void rem_ref(struct iw_cm_id *cm_id) 180{ 181 struct iwcm_id_private *cm_id_priv; 182 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); 183 if (iwcm_deref_id(cm_id_priv) && 184 test_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags)) { 185 BUG_ON(!list_empty(&cm_id_priv->work_list)); 186 free_cm_id(cm_id_priv); 187 } 188} 189 190static int cm_event_handler(struct iw_cm_id *cm_id, struct iw_cm_event *event); 191 192struct iw_cm_id *iw_create_cm_id(struct ib_device *device, 193 struct socket *so, 194 iw_cm_handler cm_handler, 195 void *context) 196{ 197 struct iwcm_id_private *cm_id_priv; 198 199 cm_id_priv = kzalloc(sizeof(*cm_id_priv), GFP_KERNEL); 200 if (!cm_id_priv) 201 return ERR_PTR(-ENOMEM); 202 203 cm_id_priv->state = IW_CM_STATE_IDLE; 204 cm_id_priv->id.device = device; 205 cm_id_priv->id.cm_handler = cm_handler; 206 cm_id_priv->id.context = context; 207 cm_id_priv->id.event_handler = cm_event_handler; 208 cm_id_priv->id.add_ref = add_ref; 209 cm_id_priv->id.rem_ref = rem_ref; 210 cm_id_priv->id.so = so; 211 spin_lock_init(&cm_id_priv->lock); 212 atomic_set(&cm_id_priv->refcount, 1); 213 init_waitqueue_head(&cm_id_priv->connect_wait); 214 init_completion(&cm_id_priv->destroy_comp); 215 INIT_LIST_HEAD(&cm_id_priv->work_list); 216 INIT_LIST_HEAD(&cm_id_priv->work_free_list); 217 218 return &cm_id_priv->id; 219} 220EXPORT_SYMBOL(iw_create_cm_id); 221 222 223static int iwcm_modify_qp_err(struct ib_qp *qp) 224{ 225 struct ib_qp_attr qp_attr; 226 227 if (!qp) 228 return -EINVAL; 229 230 qp_attr.qp_state = IB_QPS_ERR; 231 return ib_modify_qp(qp, &qp_attr, IB_QP_STATE); 232} 233 234/* 235 * This is really the RDMAC CLOSING state. It is most similar to the 236 * IB SQD QP state. 237 */ 238static int iwcm_modify_qp_sqd(struct ib_qp *qp) 239{ 240 struct ib_qp_attr qp_attr; 241 242 BUG_ON(qp == NULL); 243 qp_attr.qp_state = IB_QPS_SQD; 244 return ib_modify_qp(qp, &qp_attr, IB_QP_STATE); 245} 246 247/* 248 * CM_ID <-- CLOSING 249 * 250 * Block if a passive or active connection is currently being processed. Then 251 * process the event as follows: 252 * - If we are ESTABLISHED, move to CLOSING and modify the QP state 253 * based on the abrupt flag 254 * - If the connection is already in the CLOSING or IDLE state, the peer is 255 * disconnecting concurrently with us and we've already seen the 256 * DISCONNECT event -- ignore the request and return 0 257 * - Disconnect on a listening endpoint returns -EINVAL 258 */ 259int iw_cm_disconnect(struct iw_cm_id *cm_id, int abrupt) 260{ 261 struct iwcm_id_private *cm_id_priv; 262 unsigned long flags; 263 int ret = 0; 264 struct ib_qp *qp = NULL; 265 266 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); 267 /* Wait if we're currently in a connect or accept downcall */ 268 wait_event(cm_id_priv->connect_wait, 269 !test_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags)); 270 271 spin_lock_irqsave(&cm_id_priv->lock, flags); 272 switch (cm_id_priv->state) { 273 case IW_CM_STATE_ESTABLISHED: 274 cm_id_priv->state = IW_CM_STATE_CLOSING; 275 276 /* QP could be <nul> for user-mode client */ 277 if (cm_id_priv->qp) 278 qp = cm_id_priv->qp; 279 else 280 ret = -EINVAL; 281 break; 282 case IW_CM_STATE_LISTEN: 283 ret = -EINVAL; 284 break; 285 case IW_CM_STATE_CLOSING: 286 /* remote peer closed first */ 287 case IW_CM_STATE_IDLE: 288 /* accept or connect returned !0 */ 289 break; 290 case IW_CM_STATE_CONN_RECV: 291 /* 292 * App called disconnect before/without calling accept after 293 * connect_request event delivered. 294 */ 295 break; 296 case IW_CM_STATE_CONN_SENT: 297 /* Can only get here if wait above fails */ 298 default: 299 BUG(); 300 } 301 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 302 303 if (qp) { 304 if (abrupt) 305 ret = iwcm_modify_qp_err(qp); 306 else 307 ret = iwcm_modify_qp_sqd(qp); 308 309 /* 310 * If both sides are disconnecting the QP could 311 * already be in ERR or SQD states 312 */ 313 ret = 0; 314 } 315 316 return ret; 317} 318EXPORT_SYMBOL(iw_cm_disconnect); 319 320/* 321 * CM_ID <-- DESTROYING 322 * 323 * Clean up all resources associated with the connection and release 324 * the initial reference taken by iw_create_cm_id. 325 */ 326static void destroy_cm_id(struct iw_cm_id *cm_id) 327{ 328 struct iwcm_id_private *cm_id_priv; 329 unsigned long flags; 330 int ret; 331 332 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); 333 /* 334 * Wait if we're currently in a connect or accept downcall. A 335 * listening endpoint should never block here. 336 */ 337 wait_event(cm_id_priv->connect_wait, 338 !test_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags)); 339 340 spin_lock_irqsave(&cm_id_priv->lock, flags); 341 switch (cm_id_priv->state) { 342 case IW_CM_STATE_LISTEN: 343 cm_id_priv->state = IW_CM_STATE_DESTROYING; 344 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 345 /* destroy the listening endpoint */ 346 ret = cm_id->device->iwcm->destroy_listen(cm_id); 347 spin_lock_irqsave(&cm_id_priv->lock, flags); 348 break; 349 case IW_CM_STATE_ESTABLISHED: 350 cm_id_priv->state = IW_CM_STATE_DESTROYING; 351 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 352 /* Abrupt close of the connection */ 353 (void)iwcm_modify_qp_err(cm_id_priv->qp); 354 spin_lock_irqsave(&cm_id_priv->lock, flags); 355 break; 356 case IW_CM_STATE_IDLE: 357 case IW_CM_STATE_CLOSING: 358 cm_id_priv->state = IW_CM_STATE_DESTROYING; 359 break; 360 case IW_CM_STATE_CONN_RECV: 361 /* 362 * App called destroy before/without calling accept after 363 * receiving connection request event notification or 364 * returned non zero from the event callback function. 365 * In either case, must tell the provider to reject. 366 */ 367 cm_id_priv->state = IW_CM_STATE_DESTROYING; 368 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 369 cm_id->device->iwcm->reject(cm_id, NULL, 0); 370 spin_lock_irqsave(&cm_id_priv->lock, flags); 371 break; 372 case IW_CM_STATE_CONN_SENT: 373 case IW_CM_STATE_DESTROYING: 374 default: 375 BUG(); 376 break; 377 } 378 if (cm_id_priv->qp) { 379 cm_id_priv->id.device->iwcm->rem_ref(cm_id_priv->qp); 380 cm_id_priv->qp = NULL; 381 } 382 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 383 384 (void)iwcm_deref_id(cm_id_priv); 385} 386 387/* 388 * This function is only called by the application thread and cannot 389 * be called by the event thread. The function will wait for all 390 * references to be released on the cm_id and then kfree the cm_id 391 * object. 392 */ 393void iw_destroy_cm_id(struct iw_cm_id *cm_id) 394{ 395 struct iwcm_id_private *cm_id_priv; 396 397 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); 398 BUG_ON(test_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags)); 399 400 destroy_cm_id(cm_id); 401 402 wait_for_completion(&cm_id_priv->destroy_comp); 403 404 free_cm_id(cm_id_priv); 405} 406EXPORT_SYMBOL(iw_destroy_cm_id); 407 408/* 409 * CM_ID <-- LISTEN 410 * 411 * Start listening for connect requests. Generates one CONNECT_REQUEST 412 * event for each inbound connect request. 413 */ 414int iw_cm_listen(struct iw_cm_id *cm_id, int backlog) 415{ 416 struct iwcm_id_private *cm_id_priv; 417 unsigned long flags; 418 int ret; 419 420 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); 421 422 ret = alloc_work_entries(cm_id_priv, backlog); 423 if (ret) 424 return ret; 425 426 spin_lock_irqsave(&cm_id_priv->lock, flags); 427 switch (cm_id_priv->state) { 428 case IW_CM_STATE_IDLE: 429 cm_id_priv->state = IW_CM_STATE_LISTEN; 430 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 431 ret = cm_id->device->iwcm->create_listen(cm_id, backlog); 432 if (ret) 433 cm_id_priv->state = IW_CM_STATE_IDLE; 434 spin_lock_irqsave(&cm_id_priv->lock, flags); 435 break; 436 default: 437 ret = -EINVAL; 438 } 439 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 440 441 return ret; 442} 443EXPORT_SYMBOL(iw_cm_listen); 444 445/* 446 * CM_ID <-- IDLE 447 * 448 * Rejects an inbound connection request. No events are generated. 449 */ 450int iw_cm_reject(struct iw_cm_id *cm_id, 451 const void *private_data, 452 u8 private_data_len) 453{ 454 struct iwcm_id_private *cm_id_priv; 455 unsigned long flags; 456 int ret; 457 458 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); 459 set_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags); 460 461 spin_lock_irqsave(&cm_id_priv->lock, flags); 462 if (cm_id_priv->state != IW_CM_STATE_CONN_RECV) { 463 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 464 clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags); 465 wake_up_all(&cm_id_priv->connect_wait); 466 return -EINVAL; 467 } 468 cm_id_priv->state = IW_CM_STATE_IDLE; 469 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 470 471 ret = cm_id->device->iwcm->reject(cm_id, private_data, 472 private_data_len); 473 474 clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags); 475 wake_up_all(&cm_id_priv->connect_wait); 476 477 return ret; 478} 479EXPORT_SYMBOL(iw_cm_reject); 480 481/* 482 * CM_ID <-- ESTABLISHED 483 * 484 * Accepts an inbound connection request and generates an ESTABLISHED 485 * event. Callers of iw_cm_disconnect and iw_destroy_cm_id will block 486 * until the ESTABLISHED event is received from the provider. 487 */ 488int iw_cm_accept(struct iw_cm_id *cm_id, 489 struct iw_cm_conn_param *iw_param) 490{ 491 struct iwcm_id_private *cm_id_priv; 492 struct ib_qp *qp; 493 unsigned long flags; 494 int ret; 495 496 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); 497 set_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags); 498 499 spin_lock_irqsave(&cm_id_priv->lock, flags); 500 if (cm_id_priv->state != IW_CM_STATE_CONN_RECV) { 501 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 502 clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags); 503 wake_up_all(&cm_id_priv->connect_wait); 504 return -EINVAL; 505 } 506 /* Get the ib_qp given the QPN */ 507 qp = cm_id->device->iwcm->get_qp(cm_id->device, iw_param->qpn); 508 if (!qp) { 509 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 510 return -EINVAL; 511 } 512 cm_id->device->iwcm->add_ref(qp); 513 cm_id_priv->qp = qp; 514 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 515 516 ret = cm_id->device->iwcm->accept(cm_id, iw_param); 517 if (ret) { 518 /* An error on accept precludes provider events */ 519 BUG_ON(cm_id_priv->state != IW_CM_STATE_CONN_RECV); 520 cm_id_priv->state = IW_CM_STATE_IDLE; 521 spin_lock_irqsave(&cm_id_priv->lock, flags); 522 if (cm_id_priv->qp) { 523 cm_id->device->iwcm->rem_ref(qp); 524 cm_id_priv->qp = NULL; 525 } 526 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 527 clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags); 528 wake_up_all(&cm_id_priv->connect_wait); 529 } 530 531 return ret; 532} 533EXPORT_SYMBOL(iw_cm_accept); 534 535/* 536 * Active Side: CM_ID <-- CONN_SENT 537 * 538 * If successful, results in the generation of a CONNECT_REPLY 539 * event. iw_cm_disconnect and iw_cm_destroy will block until the 540 * CONNECT_REPLY event is received from the provider. 541 */ 542int iw_cm_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param) 543{ 544 struct iwcm_id_private *cm_id_priv; 545 int ret; 546 unsigned long flags; 547 struct ib_qp *qp; 548 549 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); 550 551 ret = alloc_work_entries(cm_id_priv, 4); 552 if (ret) 553 return ret; 554 555 set_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags); 556 spin_lock_irqsave(&cm_id_priv->lock, flags); 557 558 if (cm_id_priv->state != IW_CM_STATE_IDLE) { 559 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 560 clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags); 561 wake_up_all(&cm_id_priv->connect_wait); 562 return -EINVAL; 563 } 564 565 /* Get the ib_qp given the QPN */ 566 qp = cm_id->device->iwcm->get_qp(cm_id->device, iw_param->qpn); 567 if (!qp) { 568 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 569 return -EINVAL; 570 } 571 cm_id->device->iwcm->add_ref(qp); 572 cm_id_priv->qp = qp; 573 cm_id_priv->state = IW_CM_STATE_CONN_SENT; 574 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 575 576 ret = cm_id->device->iwcm->connect(cm_id, iw_param); 577 if (ret) { 578 spin_lock_irqsave(&cm_id_priv->lock, flags); 579 if (cm_id_priv->qp) { 580 cm_id->device->iwcm->rem_ref(qp); 581 cm_id_priv->qp = NULL; 582 } 583 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 584 BUG_ON(cm_id_priv->state != IW_CM_STATE_CONN_SENT); 585 cm_id_priv->state = IW_CM_STATE_IDLE; 586 clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags); 587 wake_up_all(&cm_id_priv->connect_wait); 588 } 589 590 return ret; 591} 592EXPORT_SYMBOL(iw_cm_connect); 593 594/* 595 * Passive Side: new CM_ID <-- CONN_RECV 596 * 597 * Handles an inbound connect request. The function creates a new 598 * iw_cm_id to represent the new connection and inherits the client 599 * callback function and other attributes from the listening parent. 600 * 601 * The work item contains a pointer to the listen_cm_id and the event. The 602 * listen_cm_id contains the client cm_handler, context and 603 * device. These are copied when the device is cloned. The event 604 * contains the new four tuple. 605 * 606 * An error on the child should not affect the parent, so this 607 * function does not return a value. 608 */ 609static void cm_conn_req_handler(struct iwcm_id_private *listen_id_priv, 610 struct iw_cm_event *iw_event) 611{ 612 unsigned long flags; 613 struct iw_cm_id *cm_id; 614 struct iwcm_id_private *cm_id_priv; 615 int ret; 616 617 /* 618 * The provider should never generate a connection request 619 * event with a bad status. 620 */ 621 BUG_ON(iw_event->status); 622 623 /* 624 * We could be destroying the listening id. If so, ignore this 625 * upcall. 626 */ 627 spin_lock_irqsave(&listen_id_priv->lock, flags); 628 if (listen_id_priv->state != IW_CM_STATE_LISTEN) { 629 spin_unlock_irqrestore(&listen_id_priv->lock, flags); 630 goto out; 631 } 632 spin_unlock_irqrestore(&listen_id_priv->lock, flags); 633 634 cm_id = iw_create_cm_id(listen_id_priv->id.device, 635 iw_event->so, 636 listen_id_priv->id.cm_handler, 637 listen_id_priv->id.context); 638 /* If the cm_id could not be created, ignore the request */ 639 if (IS_ERR(cm_id)) 640 goto out; 641 642 cm_id->provider_data = iw_event->provider_data; 643 cm_id->local_addr = iw_event->local_addr; 644 cm_id->remote_addr = iw_event->remote_addr; 645 646 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); 647 cm_id_priv->state = IW_CM_STATE_CONN_RECV; 648 649 ret = alloc_work_entries(cm_id_priv, 3); 650 if (ret) { 651 iw_cm_reject(cm_id, NULL, 0); 652 iw_destroy_cm_id(cm_id); 653 goto out; 654 } 655 656 /* Call the client CM handler */ 657 ret = cm_id->cm_handler(cm_id, iw_event); 658 if (ret) { 659 iw_cm_reject(cm_id, NULL, 0); 660 set_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags); 661 destroy_cm_id(cm_id); 662 if (atomic_read(&cm_id_priv->refcount)==0) 663 free_cm_id(cm_id_priv); 664 } 665 666out: 667 if (iw_event->private_data_len) 668 kfree(iw_event->private_data); 669} 670 671/* 672 * Passive Side: CM_ID <-- ESTABLISHED 673 * 674 * The provider generated an ESTABLISHED event which means that 675 * the MPA negotion has completed successfully and we are now in MPA 676 * FPDU mode. 677 * 678 * This event can only be received in the CONN_RECV state. If the 679 * remote peer closed, the ESTABLISHED event would be received followed 680 * by the CLOSE event. If the app closes, it will block until we wake 681 * it up after processing this event. 682 */ 683static int cm_conn_est_handler(struct iwcm_id_private *cm_id_priv, 684 struct iw_cm_event *iw_event) 685{ 686 unsigned long flags; 687 int ret; 688 689 spin_lock_irqsave(&cm_id_priv->lock, flags); 690 691 /* 692 * We clear the CONNECT_WAIT bit here to allow the callback 693 * function to call iw_cm_disconnect. Calling iw_destroy_cm_id 694 * from a callback handler is not allowed. 695 */ 696 clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags); 697 BUG_ON(cm_id_priv->state != IW_CM_STATE_CONN_RECV); 698 cm_id_priv->state = IW_CM_STATE_ESTABLISHED; 699 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 700 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, iw_event); 701 wake_up_all(&cm_id_priv->connect_wait); 702 703 return ret; 704} 705 706/* 707 * Active Side: CM_ID <-- ESTABLISHED 708 * 709 * The app has called connect and is waiting for the established event to 710 * post it's requests to the server. This event will wake up anyone 711 * blocked in iw_cm_disconnect or iw_destroy_id. 712 */ 713static int cm_conn_rep_handler(struct iwcm_id_private *cm_id_priv, 714 struct iw_cm_event *iw_event) 715{ 716 unsigned long flags; 717 int ret; 718 719 spin_lock_irqsave(&cm_id_priv->lock, flags); 720 /* 721 * Clear the connect wait bit so a callback function calling 722 * iw_cm_disconnect will not wait and deadlock this thread 723 */ 724 clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags); 725 BUG_ON(cm_id_priv->state != IW_CM_STATE_CONN_SENT); 726 if (iw_event->status == IW_CM_EVENT_STATUS_ACCEPTED) { 727 cm_id_priv->id.local_addr = iw_event->local_addr; 728 cm_id_priv->id.remote_addr = iw_event->remote_addr; 729 cm_id_priv->state = IW_CM_STATE_ESTABLISHED; 730 } else { 731 /* REJECTED or RESET */ 732 cm_id_priv->id.device->iwcm->rem_ref(cm_id_priv->qp); 733 cm_id_priv->qp = NULL; 734 cm_id_priv->state = IW_CM_STATE_IDLE; 735 } 736 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 737 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, iw_event); 738 739 if (iw_event->private_data_len) 740 kfree(iw_event->private_data); 741 742 /* Wake up waiters on connect complete */ 743 wake_up_all(&cm_id_priv->connect_wait); 744 745 return ret; 746} 747 748/* 749 * CM_ID <-- CLOSING 750 * 751 * If in the ESTABLISHED state, move to CLOSING. 752 */ 753static void cm_disconnect_handler(struct iwcm_id_private *cm_id_priv, 754 struct iw_cm_event *iw_event) 755{ 756 unsigned long flags; 757 758 spin_lock_irqsave(&cm_id_priv->lock, flags); 759 if (cm_id_priv->state == IW_CM_STATE_ESTABLISHED) 760 cm_id_priv->state = IW_CM_STATE_CLOSING; 761 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 762} 763 764/* 765 * CM_ID <-- IDLE 766 * 767 * If in the ESTBLISHED or CLOSING states, the QP will have have been 768 * moved by the provider to the ERR state. Disassociate the CM_ID from 769 * the QP, move to IDLE, and remove the 'connected' reference. 770 * 771 * If in some other state, the cm_id was destroyed asynchronously. 772 * This is the last reference that will result in waking up 773 * the app thread blocked in iw_destroy_cm_id. 774 */ 775static int cm_close_handler(struct iwcm_id_private *cm_id_priv, 776 struct iw_cm_event *iw_event) 777{ 778 unsigned long flags; 779 int ret = 0; 780 spin_lock_irqsave(&cm_id_priv->lock, flags); 781 782 if (cm_id_priv->qp) { 783 cm_id_priv->id.device->iwcm->rem_ref(cm_id_priv->qp); 784 cm_id_priv->qp = NULL; 785 } 786 switch (cm_id_priv->state) { 787 case IW_CM_STATE_ESTABLISHED: 788 case IW_CM_STATE_CLOSING: 789 cm_id_priv->state = IW_CM_STATE_IDLE; 790 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 791 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, iw_event); 792 spin_lock_irqsave(&cm_id_priv->lock, flags); 793 break; 794 case IW_CM_STATE_DESTROYING: 795 break; 796 default: 797 BUG(); 798 } 799 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 800 801 return ret; 802} 803 804static int process_event(struct iwcm_id_private *cm_id_priv, 805 struct iw_cm_event *iw_event) 806{ 807 int ret = 0; 808 809 switch (iw_event->event) { 810 case IW_CM_EVENT_CONNECT_REQUEST: 811 cm_conn_req_handler(cm_id_priv, iw_event); 812 break; 813 case IW_CM_EVENT_CONNECT_REPLY: 814 ret = cm_conn_rep_handler(cm_id_priv, iw_event); 815 break; 816 case IW_CM_EVENT_ESTABLISHED: 817 ret = cm_conn_est_handler(cm_id_priv, iw_event); 818 break; 819 case IW_CM_EVENT_DISCONNECT: 820 cm_disconnect_handler(cm_id_priv, iw_event); 821 break; 822 case IW_CM_EVENT_CLOSE: 823 ret = cm_close_handler(cm_id_priv, iw_event); 824 break; 825 default: 826 BUG(); 827 } 828 829 return ret; 830} 831 832/* 833 * Process events on the work_list for the cm_id. If the callback 834 * function requests that the cm_id be deleted, a flag is set in the 835 * cm_id flags to indicate that when the last reference is 836 * removed, the cm_id is to be destroyed. This is necessary to 837 * distinguish between an object that will be destroyed by the app 838 * thread asleep on the destroy_comp list vs. an object destroyed 839 * here synchronously when the last reference is removed. 840 */ 841static void cm_work_handler(struct work_struct *_work) 842{ 843 struct iwcm_work *work = container_of(_work, struct iwcm_work, work); 844 struct iw_cm_event levent; 845 struct iwcm_id_private *cm_id_priv = work->cm_id; 846 unsigned long flags; 847 int empty; 848 int ret = 0; 849 int destroy_id; 850 851 spin_lock_irqsave(&cm_id_priv->lock, flags); 852 empty = list_empty(&cm_id_priv->work_list); 853 while (!empty) { 854 work = list_entry(cm_id_priv->work_list.next, 855 struct iwcm_work, list); 856 list_del_init(&work->list); 857 empty = list_empty(&cm_id_priv->work_list); 858 levent = work->event; 859 put_work(work); 860 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 861 862 ret = process_event(cm_id_priv, &levent); 863 if (ret) { 864 set_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags); 865 destroy_cm_id(&cm_id_priv->id); 866 } 867 BUG_ON(atomic_read(&cm_id_priv->refcount)==0); 868 destroy_id = test_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags); 869 if (iwcm_deref_id(cm_id_priv)) { 870 if (destroy_id) { 871 BUG_ON(!list_empty(&cm_id_priv->work_list)); 872 free_cm_id(cm_id_priv); 873 } 874 return; 875 } 876 spin_lock_irqsave(&cm_id_priv->lock, flags); 877 } 878 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 879} 880 881/* 882 * This function is called on interrupt context. Schedule events on 883 * the iwcm_wq thread to allow callback functions to downcall into 884 * the CM and/or block. Events are queued to a per-CM_ID 885 * work_list. If this is the first event on the work_list, the work 886 * element is also queued on the iwcm_wq thread. 887 * 888 * Each event holds a reference on the cm_id. Until the last posted 889 * event has been delivered and processed, the cm_id cannot be 890 * deleted. 891 * 892 * Returns: 893 * 0 - the event was handled. 894 * -ENOMEM - the event was not handled due to lack of resources. 895 */ 896static int cm_event_handler(struct iw_cm_id *cm_id, 897 struct iw_cm_event *iw_event) 898{ 899 struct iwcm_work *work; 900 struct iwcm_id_private *cm_id_priv; 901 unsigned long flags; 902 int ret = 0; 903 904 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); 905 906 spin_lock_irqsave(&cm_id_priv->lock, flags); 907 work = get_work(cm_id_priv); 908 if (!work) { 909 ret = -ENOMEM; 910 goto out; 911 } 912 913 INIT_WORK(&work->work, cm_work_handler); 914 work->cm_id = cm_id_priv; 915 work->event = *iw_event; 916 917 if ((work->event.event == IW_CM_EVENT_CONNECT_REQUEST || 918 work->event.event == IW_CM_EVENT_CONNECT_REPLY) && 919 work->event.private_data_len) { 920 ret = copy_private_data(&work->event); 921 if (ret) { 922 put_work(work); 923 goto out; 924 } 925 } 926 927 atomic_inc(&cm_id_priv->refcount); 928 if (list_empty(&cm_id_priv->work_list)) { 929 list_add_tail(&work->list, &cm_id_priv->work_list); 930 queue_work(iwcm_wq, &work->work); 931 } else 932 list_add_tail(&work->list, &cm_id_priv->work_list); 933out: 934 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 935 return ret; 936} 937 938static int iwcm_init_qp_init_attr(struct iwcm_id_private *cm_id_priv, 939 struct ib_qp_attr *qp_attr, 940 int *qp_attr_mask) 941{ 942 unsigned long flags; 943 int ret; 944 945 spin_lock_irqsave(&cm_id_priv->lock, flags); 946 switch (cm_id_priv->state) { 947 case IW_CM_STATE_IDLE: 948 case IW_CM_STATE_CONN_SENT: 949 case IW_CM_STATE_CONN_RECV: 950 case IW_CM_STATE_ESTABLISHED: 951 *qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS; 952 qp_attr->qp_access_flags = IB_ACCESS_REMOTE_WRITE| 953 IB_ACCESS_REMOTE_READ; 954 ret = 0; 955 break; 956 default: 957 ret = -EINVAL; 958 break; 959 } 960 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 961 return ret; 962} 963 964static int iwcm_init_qp_rts_attr(struct iwcm_id_private *cm_id_priv, 965 struct ib_qp_attr *qp_attr, 966 int *qp_attr_mask) 967{ 968 unsigned long flags; 969 int ret; 970 971 spin_lock_irqsave(&cm_id_priv->lock, flags); 972 switch (cm_id_priv->state) { 973 case IW_CM_STATE_IDLE: 974 case IW_CM_STATE_CONN_SENT: 975 case IW_CM_STATE_CONN_RECV: 976 case IW_CM_STATE_ESTABLISHED: 977 *qp_attr_mask = 0; 978 ret = 0; 979 break; 980 default: 981 ret = -EINVAL; 982 break; 983 } 984 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 985 return ret; 986} 987 988int iw_cm_init_qp_attr(struct iw_cm_id *cm_id, 989 struct ib_qp_attr *qp_attr, 990 int *qp_attr_mask) 991{ 992 struct iwcm_id_private *cm_id_priv; 993 int ret; 994 995 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); 996 switch (qp_attr->qp_state) { 997 case IB_QPS_INIT: 998 case IB_QPS_RTR: 999 ret = iwcm_init_qp_init_attr(cm_id_priv, 1000 qp_attr, qp_attr_mask); 1001 break; 1002 case IB_QPS_RTS: 1003 ret = iwcm_init_qp_rts_attr(cm_id_priv, 1004 qp_attr, qp_attr_mask); 1005 break; 1006 default: 1007 ret = -EINVAL; 1008 break; 1009 } 1010 return ret; 1011} 1012EXPORT_SYMBOL(iw_cm_init_qp_attr); 1013 1014static int __init iw_cm_init(void) 1015{ 1016 iwcm_wq = create_singlethread_workqueue("iw_cm_wq"); 1017 if (!iwcm_wq) 1018 return -ENOMEM; 1019 1020 return 0; 1021} 1022 1023static void __exit iw_cm_cleanup(void) 1024{ 1025 destroy_workqueue(iwcm_wq); 1026} 1027 1028module_init(iw_cm_init); 1029module_exit(iw_cm_cleanup); 1030