iwcm.c revision 237263
1/* 2 * Copyright (c) 2004, 2005 Intel Corporation. All rights reserved. 3 * Copyright (c) 2004 Topspin Corporation. All rights reserved. 4 * Copyright (c) 2004, 2005 Voltaire Corporation. All rights reserved. 5 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 6 * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved. 7 * Copyright (c) 2005 Network Appliance, Inc. All rights reserved. 8 * 9 * This software is available to you under a choice of one of two 10 * licenses. You may choose to be licensed under the terms of the GNU 11 * General Public License (GPL) Version 2, available from the file 12 * COPYING in the main directory of this source tree, or the 13 * OpenIB.org BSD license below: 14 * 15 * Redistribution and use in source and binary forms, with or 16 * without modification, are permitted provided that the following 17 * conditions are met: 18 * 19 * - Redistributions of source code must retain the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer. 22 * 23 * - Redistributions in binary form must reproduce the above 24 * copyright notice, this list of conditions and the following 25 * disclaimer in the documentation and/or other materials 26 * provided with the distribution. 27 * 28 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 29 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 30 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 31 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 32 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 33 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 34 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 35 * SOFTWARE. 36 * 37 */ 38#include <linux/dma-mapping.h> 39#include <linux/err.h> 40#include <linux/idr.h> 41#include <linux/interrupt.h> 42#include <linux/rbtree.h> 43#include <linux/spinlock.h> 44#include <linux/workqueue.h> 45#include <linux/completion.h> 46 47#include <rdma/iw_cm.h> 48#include <rdma/ib_addr.h> 49 50#include "iwcm.h" 51 52MODULE_AUTHOR("Tom Tucker"); 53MODULE_DESCRIPTION("iWARP CM"); 54MODULE_LICENSE("Dual BSD/GPL"); 55 56static struct workqueue_struct *iwcm_wq; 57struct iwcm_work { 58 struct work_struct work; 59 struct iwcm_id_private *cm_id; 60 struct list_head list; 61 struct iw_cm_event event; 62 struct list_head free_list; 63}; 64 65/* 66 * The following services provide a mechanism for pre-allocating iwcm_work 67 * elements. The design pre-allocates them based on the cm_id type: 68 * LISTENING IDS: Get enough elements preallocated to handle the 69 * listen backlog. 70 * ACTIVE IDS: 4: CONNECT_REPLY, ESTABLISHED, DISCONNECT, CLOSE 71 * PASSIVE IDS: 3: ESTABLISHED, DISCONNECT, CLOSE 72 * 73 * Allocating them in connect and listen avoids having to deal 74 * with allocation failures on the event upcall from the provider (which 75 * is called in the interrupt context). 76 * 77 * One exception is when creating the cm_id for incoming connection requests. 78 * There are two cases: 79 * 1) in the event upcall, cm_event_handler(), for a listening cm_id. If 80 * the backlog is exceeded, then no more connection request events will 81 * be processed. cm_event_handler() returns -ENOMEM in this case. Its up 82 * to the provider to reject the connection request. 83 * 2) in the connection request workqueue handler, cm_conn_req_handler(). 84 * If work elements cannot be allocated for the new connect request cm_id, 85 * then IWCM will call the provider reject method. This is ok since 86 * cm_conn_req_handler() runs in the workqueue thread context. 87 */ 88 89static struct iwcm_work *get_work(struct iwcm_id_private *cm_id_priv) 90{ 91 struct iwcm_work *work; 92 93 if (list_empty(&cm_id_priv->work_free_list)) 94 return NULL; 95 work = list_entry(cm_id_priv->work_free_list.next, struct iwcm_work, 96 free_list); 97 list_del_init(&work->free_list); 98 return work; 99} 100 101static void put_work(struct iwcm_work *work) 102{ 103 list_add(&work->free_list, &work->cm_id->work_free_list); 104} 105 106static void dealloc_work_entries(struct iwcm_id_private *cm_id_priv) 107{ 108 struct list_head *e, *tmp; 109 110 list_for_each_safe(e, tmp, &cm_id_priv->work_free_list) 111 kfree(list_entry(e, struct iwcm_work, free_list)); 112} 113 114static int alloc_work_entries(struct iwcm_id_private *cm_id_priv, int count) 115{ 116 struct iwcm_work *work; 117 118 BUG_ON(!list_empty(&cm_id_priv->work_free_list)); 119 while (count--) { 120 work = kmalloc(sizeof(struct iwcm_work), GFP_KERNEL); 121 if (!work) { 122 dealloc_work_entries(cm_id_priv); 123 return -ENOMEM; 124 } 125 work->cm_id = cm_id_priv; 126 INIT_LIST_HEAD(&work->list); 127 put_work(work); 128 } 129 return 0; 130} 131 132/* 133 * Save private data from incoming connection requests to 134 * iw_cm_event, so the low level driver doesn't have to. Adjust 135 * the event ptr to point to the local copy. 136 */ 137static int copy_private_data(struct iw_cm_event *event) 138{ 139 void *p; 140 141 p = kmemdup(event->private_data, event->private_data_len, GFP_ATOMIC); 142 if (!p) 143 return -ENOMEM; 144 event->private_data = p; 145 return 0; 146} 147 148static void free_cm_id(struct iwcm_id_private *cm_id_priv) 149{ 150 dealloc_work_entries(cm_id_priv); 151 kfree(cm_id_priv); 152} 153 154/* 155 * Release a reference on cm_id. If the last reference is being 156 * released, enable the waiting thread (in iw_destroy_cm_id) to 157 * get woken up, and return 1 if a thread is already waiting. 158 */ 159static int iwcm_deref_id(struct iwcm_id_private *cm_id_priv) 160{ 161 BUG_ON(atomic_read(&cm_id_priv->refcount)==0); 162 if (atomic_dec_and_test(&cm_id_priv->refcount)) { 163 BUG_ON(!list_empty(&cm_id_priv->work_list)); 164 complete(&cm_id_priv->destroy_comp); 165 return 1; 166 } 167 168 return 0; 169} 170 171static void add_ref(struct iw_cm_id *cm_id) 172{ 173 struct iwcm_id_private *cm_id_priv; 174 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); 175 atomic_inc(&cm_id_priv->refcount); 176} 177 178static void rem_ref(struct iw_cm_id *cm_id) 179{ 180 struct iwcm_id_private *cm_id_priv; 181 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); 182 if (iwcm_deref_id(cm_id_priv) && 183 test_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags)) { 184 BUG_ON(!list_empty(&cm_id_priv->work_list)); 185 free_cm_id(cm_id_priv); 186 } 187} 188 189static int cm_event_handler(struct iw_cm_id *cm_id, struct iw_cm_event *event); 190 191struct iw_cm_id *iw_create_cm_id(struct ib_device *device, 192 struct socket *so, 193 iw_cm_handler cm_handler, 194 void *context) 195{ 196 struct iwcm_id_private *cm_id_priv; 197 198 cm_id_priv = kzalloc(sizeof(*cm_id_priv), GFP_KERNEL); 199 if (!cm_id_priv) 200 return ERR_PTR(-ENOMEM); 201 202 cm_id_priv->state = IW_CM_STATE_IDLE; 203 cm_id_priv->id.device = device; 204 cm_id_priv->id.cm_handler = cm_handler; 205 cm_id_priv->id.context = context; 206 cm_id_priv->id.event_handler = cm_event_handler; 207 cm_id_priv->id.add_ref = add_ref; 208 cm_id_priv->id.rem_ref = rem_ref; 209 cm_id_priv->id.so = so; 210 spin_lock_init(&cm_id_priv->lock); 211 atomic_set(&cm_id_priv->refcount, 1); 212 init_waitqueue_head(&cm_id_priv->connect_wait); 213 init_completion(&cm_id_priv->destroy_comp); 214 INIT_LIST_HEAD(&cm_id_priv->work_list); 215 INIT_LIST_HEAD(&cm_id_priv->work_free_list); 216 217 return &cm_id_priv->id; 218} 219EXPORT_SYMBOL(iw_create_cm_id); 220 221 222static int iwcm_modify_qp_err(struct ib_qp *qp) 223{ 224 struct ib_qp_attr qp_attr; 225 226 if (!qp) 227 return -EINVAL; 228 229 qp_attr.qp_state = IB_QPS_ERR; 230 return ib_modify_qp(qp, &qp_attr, IB_QP_STATE); 231} 232 233/* 234 * This is really the RDMAC CLOSING state. It is most similar to the 235 * IB SQD QP state. 236 */ 237static int iwcm_modify_qp_sqd(struct ib_qp *qp) 238{ 239 struct ib_qp_attr qp_attr; 240 241 BUG_ON(qp == NULL); 242 qp_attr.qp_state = IB_QPS_SQD; 243 return ib_modify_qp(qp, &qp_attr, IB_QP_STATE); 244} 245 246/* 247 * CM_ID <-- CLOSING 248 * 249 * Block if a passive or active connection is currently being processed. Then 250 * process the event as follows: 251 * - If we are ESTABLISHED, move to CLOSING and modify the QP state 252 * based on the abrupt flag 253 * - If the connection is already in the CLOSING or IDLE state, the peer is 254 * disconnecting concurrently with us and we've already seen the 255 * DISCONNECT event -- ignore the request and return 0 256 * - Disconnect on a listening endpoint returns -EINVAL 257 */ 258int iw_cm_disconnect(struct iw_cm_id *cm_id, int abrupt) 259{ 260 struct iwcm_id_private *cm_id_priv; 261 unsigned long flags; 262 int ret = 0; 263 struct ib_qp *qp = NULL; 264 265 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); 266 /* Wait if we're currently in a connect or accept downcall */ 267 wait_event(cm_id_priv->connect_wait, 268 !test_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags)); 269 270 spin_lock_irqsave(&cm_id_priv->lock, flags); 271 switch (cm_id_priv->state) { 272 case IW_CM_STATE_ESTABLISHED: 273 cm_id_priv->state = IW_CM_STATE_CLOSING; 274 275 /* QP could be <nul> for user-mode client */ 276 if (cm_id_priv->qp) 277 qp = cm_id_priv->qp; 278 else 279 ret = -EINVAL; 280 break; 281 case IW_CM_STATE_LISTEN: 282 ret = -EINVAL; 283 break; 284 case IW_CM_STATE_CLOSING: 285 /* remote peer closed first */ 286 case IW_CM_STATE_IDLE: 287 /* accept or connect returned !0 */ 288 break; 289 case IW_CM_STATE_CONN_RECV: 290 /* 291 * App called disconnect before/without calling accept after 292 * connect_request event delivered. 293 */ 294 break; 295 case IW_CM_STATE_CONN_SENT: 296 /* Can only get here if wait above fails */ 297 default: 298 BUG(); 299 } 300 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 301 302 if (qp) { 303 if (abrupt) 304 ret = iwcm_modify_qp_err(qp); 305 else 306 ret = iwcm_modify_qp_sqd(qp); 307 308 /* 309 * If both sides are disconnecting the QP could 310 * already be in ERR or SQD states 311 */ 312 ret = 0; 313 } 314 315 return ret; 316} 317EXPORT_SYMBOL(iw_cm_disconnect); 318 319/* 320 * CM_ID <-- DESTROYING 321 * 322 * Clean up all resources associated with the connection and release 323 * the initial reference taken by iw_create_cm_id. 324 */ 325static void destroy_cm_id(struct iw_cm_id *cm_id) 326{ 327 struct iwcm_id_private *cm_id_priv; 328 unsigned long flags; 329 int ret; 330 331 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); 332 /* 333 * Wait if we're currently in a connect or accept downcall. A 334 * listening endpoint should never block here. 335 */ 336 wait_event(cm_id_priv->connect_wait, 337 !test_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags)); 338 339 spin_lock_irqsave(&cm_id_priv->lock, flags); 340 switch (cm_id_priv->state) { 341 case IW_CM_STATE_LISTEN: 342 cm_id_priv->state = IW_CM_STATE_DESTROYING; 343 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 344 /* destroy the listening endpoint */ 345 ret = cm_id->device->iwcm->destroy_listen(cm_id); 346 spin_lock_irqsave(&cm_id_priv->lock, flags); 347 break; 348 case IW_CM_STATE_ESTABLISHED: 349 cm_id_priv->state = IW_CM_STATE_DESTROYING; 350 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 351 /* Abrupt close of the connection */ 352 (void)iwcm_modify_qp_err(cm_id_priv->qp); 353 spin_lock_irqsave(&cm_id_priv->lock, flags); 354 break; 355 case IW_CM_STATE_IDLE: 356 case IW_CM_STATE_CLOSING: 357 cm_id_priv->state = IW_CM_STATE_DESTROYING; 358 break; 359 case IW_CM_STATE_CONN_RECV: 360 /* 361 * App called destroy before/without calling accept after 362 * receiving connection request event notification or 363 * returned non zero from the event callback function. 364 * In either case, must tell the provider to reject. 365 */ 366 cm_id_priv->state = IW_CM_STATE_DESTROYING; 367 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 368 cm_id->device->iwcm->reject(cm_id, NULL, 0); 369 spin_lock_irqsave(&cm_id_priv->lock, flags); 370 break; 371 case IW_CM_STATE_CONN_SENT: 372 case IW_CM_STATE_DESTROYING: 373 default: 374 BUG(); 375 break; 376 } 377 if (cm_id_priv->qp) { 378 cm_id_priv->id.device->iwcm->rem_ref(cm_id_priv->qp); 379 cm_id_priv->qp = NULL; 380 } 381 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 382 383 (void)iwcm_deref_id(cm_id_priv); 384} 385 386/* 387 * This function is only called by the application thread and cannot 388 * be called by the event thread. The function will wait for all 389 * references to be released on the cm_id and then kfree the cm_id 390 * object. 391 */ 392void iw_destroy_cm_id(struct iw_cm_id *cm_id) 393{ 394 struct iwcm_id_private *cm_id_priv; 395 396 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); 397 BUG_ON(test_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags)); 398 399 destroy_cm_id(cm_id); 400 401 wait_for_completion(&cm_id_priv->destroy_comp); 402 403 free_cm_id(cm_id_priv); 404} 405EXPORT_SYMBOL(iw_destroy_cm_id); 406 407/* 408 * CM_ID <-- LISTEN 409 * 410 * Start listening for connect requests. Generates one CONNECT_REQUEST 411 * event for each inbound connect request. 412 */ 413int iw_cm_listen(struct iw_cm_id *cm_id, int backlog) 414{ 415 struct iwcm_id_private *cm_id_priv; 416 unsigned long flags; 417 int ret; 418 419 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); 420 421 ret = alloc_work_entries(cm_id_priv, backlog); 422 if (ret) 423 return ret; 424 425 spin_lock_irqsave(&cm_id_priv->lock, flags); 426 switch (cm_id_priv->state) { 427 case IW_CM_STATE_IDLE: 428 cm_id_priv->state = IW_CM_STATE_LISTEN; 429 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 430 ret = cm_id->device->iwcm->create_listen(cm_id, backlog); 431 if (ret) 432 cm_id_priv->state = IW_CM_STATE_IDLE; 433 spin_lock_irqsave(&cm_id_priv->lock, flags); 434 break; 435 default: 436 ret = -EINVAL; 437 } 438 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 439 440 return ret; 441} 442EXPORT_SYMBOL(iw_cm_listen); 443 444/* 445 * CM_ID <-- IDLE 446 * 447 * Rejects an inbound connection request. No events are generated. 448 */ 449int iw_cm_reject(struct iw_cm_id *cm_id, 450 const void *private_data, 451 u8 private_data_len) 452{ 453 struct iwcm_id_private *cm_id_priv; 454 unsigned long flags; 455 int ret; 456 457 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); 458 set_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags); 459 460 spin_lock_irqsave(&cm_id_priv->lock, flags); 461 if (cm_id_priv->state != IW_CM_STATE_CONN_RECV) { 462 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 463 clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags); 464 wake_up_all(&cm_id_priv->connect_wait); 465 return -EINVAL; 466 } 467 cm_id_priv->state = IW_CM_STATE_IDLE; 468 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 469 470 ret = cm_id->device->iwcm->reject(cm_id, private_data, 471 private_data_len); 472 473 clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags); 474 wake_up_all(&cm_id_priv->connect_wait); 475 476 return ret; 477} 478EXPORT_SYMBOL(iw_cm_reject); 479 480/* 481 * CM_ID <-- ESTABLISHED 482 * 483 * Accepts an inbound connection request and generates an ESTABLISHED 484 * event. Callers of iw_cm_disconnect and iw_destroy_cm_id will block 485 * until the ESTABLISHED event is received from the provider. 486 */ 487int iw_cm_accept(struct iw_cm_id *cm_id, 488 struct iw_cm_conn_param *iw_param) 489{ 490 struct iwcm_id_private *cm_id_priv; 491 struct ib_qp *qp; 492 unsigned long flags; 493 int ret; 494 495 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); 496 set_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags); 497 498 spin_lock_irqsave(&cm_id_priv->lock, flags); 499 if (cm_id_priv->state != IW_CM_STATE_CONN_RECV) { 500 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 501 clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags); 502 wake_up_all(&cm_id_priv->connect_wait); 503 return -EINVAL; 504 } 505 /* Get the ib_qp given the QPN */ 506 qp = cm_id->device->iwcm->get_qp(cm_id->device, iw_param->qpn); 507 if (!qp) { 508 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 509 return -EINVAL; 510 } 511 cm_id->device->iwcm->add_ref(qp); 512 cm_id_priv->qp = qp; 513 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 514 515 ret = cm_id->device->iwcm->accept(cm_id, iw_param); 516 if (ret) { 517 /* An error on accept precludes provider events */ 518 BUG_ON(cm_id_priv->state != IW_CM_STATE_CONN_RECV); 519 cm_id_priv->state = IW_CM_STATE_IDLE; 520 spin_lock_irqsave(&cm_id_priv->lock, flags); 521 if (cm_id_priv->qp) { 522 cm_id->device->iwcm->rem_ref(qp); 523 cm_id_priv->qp = NULL; 524 } 525 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 526 clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags); 527 wake_up_all(&cm_id_priv->connect_wait); 528 } 529 530 return ret; 531} 532EXPORT_SYMBOL(iw_cm_accept); 533 534/* 535 * Active Side: CM_ID <-- CONN_SENT 536 * 537 * If successful, results in the generation of a CONNECT_REPLY 538 * event. iw_cm_disconnect and iw_cm_destroy will block until the 539 * CONNECT_REPLY event is received from the provider. 540 */ 541int iw_cm_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param) 542{ 543 struct iwcm_id_private *cm_id_priv; 544 int ret; 545 unsigned long flags; 546 struct ib_qp *qp; 547 548 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); 549 550 ret = alloc_work_entries(cm_id_priv, 4); 551 if (ret) 552 return ret; 553 554 set_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags); 555 spin_lock_irqsave(&cm_id_priv->lock, flags); 556 557 if (cm_id_priv->state != IW_CM_STATE_IDLE) { 558 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 559 clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags); 560 wake_up_all(&cm_id_priv->connect_wait); 561 return -EINVAL; 562 } 563 564 /* Get the ib_qp given the QPN */ 565 qp = cm_id->device->iwcm->get_qp(cm_id->device, iw_param->qpn); 566 if (!qp) { 567 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 568 return -EINVAL; 569 } 570 cm_id->device->iwcm->add_ref(qp); 571 cm_id_priv->qp = qp; 572 cm_id_priv->state = IW_CM_STATE_CONN_SENT; 573 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 574 575 ret = cm_id->device->iwcm->connect(cm_id, iw_param); 576 if (ret) { 577 spin_lock_irqsave(&cm_id_priv->lock, flags); 578 if (cm_id_priv->qp) { 579 cm_id->device->iwcm->rem_ref(qp); 580 cm_id_priv->qp = NULL; 581 } 582 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 583 BUG_ON(cm_id_priv->state != IW_CM_STATE_CONN_SENT); 584 cm_id_priv->state = IW_CM_STATE_IDLE; 585 clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags); 586 wake_up_all(&cm_id_priv->connect_wait); 587 } 588 589 return ret; 590} 591EXPORT_SYMBOL(iw_cm_connect); 592 593/* 594 * Passive Side: new CM_ID <-- CONN_RECV 595 * 596 * Handles an inbound connect request. The function creates a new 597 * iw_cm_id to represent the new connection and inherits the client 598 * callback function and other attributes from the listening parent. 599 * 600 * The work item contains a pointer to the listen_cm_id and the event. The 601 * listen_cm_id contains the client cm_handler, context and 602 * device. These are copied when the device is cloned. The event 603 * contains the new four tuple. 604 * 605 * An error on the child should not affect the parent, so this 606 * function does not return a value. 607 */ 608static void cm_conn_req_handler(struct iwcm_id_private *listen_id_priv, 609 struct iw_cm_event *iw_event) 610{ 611 unsigned long flags; 612 struct iw_cm_id *cm_id; 613 struct iwcm_id_private *cm_id_priv; 614 int ret; 615 616 /* 617 * The provider should never generate a connection request 618 * event with a bad status. 619 */ 620 BUG_ON(iw_event->status); 621 622 /* 623 * We could be destroying the listening id. If so, ignore this 624 * upcall. 625 */ 626 spin_lock_irqsave(&listen_id_priv->lock, flags); 627 if (listen_id_priv->state != IW_CM_STATE_LISTEN) { 628 spin_unlock_irqrestore(&listen_id_priv->lock, flags); 629 goto out; 630 } 631 spin_unlock_irqrestore(&listen_id_priv->lock, flags); 632 633 cm_id = iw_create_cm_id(listen_id_priv->id.device, 634 iw_event->so, 635 listen_id_priv->id.cm_handler, 636 listen_id_priv->id.context); 637 /* If the cm_id could not be created, ignore the request */ 638 if (IS_ERR(cm_id)) 639 goto out; 640 641 cm_id->provider_data = iw_event->provider_data; 642 cm_id->local_addr = iw_event->local_addr; 643 cm_id->remote_addr = iw_event->remote_addr; 644 645 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); 646 cm_id_priv->state = IW_CM_STATE_CONN_RECV; 647 648 ret = alloc_work_entries(cm_id_priv, 3); 649 if (ret) { 650 iw_cm_reject(cm_id, NULL, 0); 651 iw_destroy_cm_id(cm_id); 652 goto out; 653 } 654 655 /* Call the client CM handler */ 656 ret = cm_id->cm_handler(cm_id, iw_event); 657 if (ret) { 658 iw_cm_reject(cm_id, NULL, 0); 659 set_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags); 660 destroy_cm_id(cm_id); 661 if (atomic_read(&cm_id_priv->refcount)==0) 662 free_cm_id(cm_id_priv); 663 } 664 665out: 666 if (iw_event->private_data_len) 667 kfree(iw_event->private_data); 668} 669 670/* 671 * Passive Side: CM_ID <-- ESTABLISHED 672 * 673 * The provider generated an ESTABLISHED event which means that 674 * the MPA negotion has completed successfully and we are now in MPA 675 * FPDU mode. 676 * 677 * This event can only be received in the CONN_RECV state. If the 678 * remote peer closed, the ESTABLISHED event would be received followed 679 * by the CLOSE event. If the app closes, it will block until we wake 680 * it up after processing this event. 681 */ 682static int cm_conn_est_handler(struct iwcm_id_private *cm_id_priv, 683 struct iw_cm_event *iw_event) 684{ 685 unsigned long flags; 686 int ret; 687 688 spin_lock_irqsave(&cm_id_priv->lock, flags); 689 690 /* 691 * We clear the CONNECT_WAIT bit here to allow the callback 692 * function to call iw_cm_disconnect. Calling iw_destroy_cm_id 693 * from a callback handler is not allowed. 694 */ 695 clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags); 696 BUG_ON(cm_id_priv->state != IW_CM_STATE_CONN_RECV); 697 cm_id_priv->state = IW_CM_STATE_ESTABLISHED; 698 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 699 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, iw_event); 700 wake_up_all(&cm_id_priv->connect_wait); 701 702 return ret; 703} 704 705/* 706 * Active Side: CM_ID <-- ESTABLISHED 707 * 708 * The app has called connect and is waiting for the established event to 709 * post it's requests to the server. This event will wake up anyone 710 * blocked in iw_cm_disconnect or iw_destroy_id. 711 */ 712static int cm_conn_rep_handler(struct iwcm_id_private *cm_id_priv, 713 struct iw_cm_event *iw_event) 714{ 715 unsigned long flags; 716 int ret; 717 718 spin_lock_irqsave(&cm_id_priv->lock, flags); 719 /* 720 * Clear the connect wait bit so a callback function calling 721 * iw_cm_disconnect will not wait and deadlock this thread 722 */ 723 clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags); 724 BUG_ON(cm_id_priv->state != IW_CM_STATE_CONN_SENT); 725 if (iw_event->status == IW_CM_EVENT_STATUS_ACCEPTED) { 726 cm_id_priv->id.local_addr = iw_event->local_addr; 727 cm_id_priv->id.remote_addr = iw_event->remote_addr; 728 cm_id_priv->state = IW_CM_STATE_ESTABLISHED; 729 } else { 730 /* REJECTED or RESET */ 731 cm_id_priv->id.device->iwcm->rem_ref(cm_id_priv->qp); 732 cm_id_priv->qp = NULL; 733 cm_id_priv->state = IW_CM_STATE_IDLE; 734 } 735 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 736 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, iw_event); 737 738 if (iw_event->private_data_len) 739 kfree(iw_event->private_data); 740 741 /* Wake up waiters on connect complete */ 742 wake_up_all(&cm_id_priv->connect_wait); 743 744 return ret; 745} 746 747/* 748 * CM_ID <-- CLOSING 749 * 750 * If in the ESTABLISHED state, move to CLOSING. 751 */ 752static void cm_disconnect_handler(struct iwcm_id_private *cm_id_priv, 753 struct iw_cm_event *iw_event) 754{ 755 unsigned long flags; 756 757 spin_lock_irqsave(&cm_id_priv->lock, flags); 758 if (cm_id_priv->state == IW_CM_STATE_ESTABLISHED) 759 cm_id_priv->state = IW_CM_STATE_CLOSING; 760 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 761} 762 763/* 764 * CM_ID <-- IDLE 765 * 766 * If in the ESTBLISHED or CLOSING states, the QP will have have been 767 * moved by the provider to the ERR state. Disassociate the CM_ID from 768 * the QP, move to IDLE, and remove the 'connected' reference. 769 * 770 * If in some other state, the cm_id was destroyed asynchronously. 771 * This is the last reference that will result in waking up 772 * the app thread blocked in iw_destroy_cm_id. 773 */ 774static int cm_close_handler(struct iwcm_id_private *cm_id_priv, 775 struct iw_cm_event *iw_event) 776{ 777 unsigned long flags; 778 int ret = 0; 779 spin_lock_irqsave(&cm_id_priv->lock, flags); 780 781 if (cm_id_priv->qp) { 782 cm_id_priv->id.device->iwcm->rem_ref(cm_id_priv->qp); 783 cm_id_priv->qp = NULL; 784 } 785 switch (cm_id_priv->state) { 786 case IW_CM_STATE_ESTABLISHED: 787 case IW_CM_STATE_CLOSING: 788 cm_id_priv->state = IW_CM_STATE_IDLE; 789 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 790 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, iw_event); 791 spin_lock_irqsave(&cm_id_priv->lock, flags); 792 break; 793 case IW_CM_STATE_DESTROYING: 794 break; 795 default: 796 BUG(); 797 } 798 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 799 800 return ret; 801} 802 803static int process_event(struct iwcm_id_private *cm_id_priv, 804 struct iw_cm_event *iw_event) 805{ 806 int ret = 0; 807 808 switch (iw_event->event) { 809 case IW_CM_EVENT_CONNECT_REQUEST: 810 cm_conn_req_handler(cm_id_priv, iw_event); 811 break; 812 case IW_CM_EVENT_CONNECT_REPLY: 813 ret = cm_conn_rep_handler(cm_id_priv, iw_event); 814 break; 815 case IW_CM_EVENT_ESTABLISHED: 816 ret = cm_conn_est_handler(cm_id_priv, iw_event); 817 break; 818 case IW_CM_EVENT_DISCONNECT: 819 cm_disconnect_handler(cm_id_priv, iw_event); 820 break; 821 case IW_CM_EVENT_CLOSE: 822 ret = cm_close_handler(cm_id_priv, iw_event); 823 break; 824 default: 825 BUG(); 826 } 827 828 return ret; 829} 830 831/* 832 * Process events on the work_list for the cm_id. If the callback 833 * function requests that the cm_id be deleted, a flag is set in the 834 * cm_id flags to indicate that when the last reference is 835 * removed, the cm_id is to be destroyed. This is necessary to 836 * distinguish between an object that will be destroyed by the app 837 * thread asleep on the destroy_comp list vs. an object destroyed 838 * here synchronously when the last reference is removed. 839 */ 840static void cm_work_handler(struct work_struct *_work) 841{ 842 struct iwcm_work *work = container_of(_work, struct iwcm_work, work); 843 struct iw_cm_event levent; 844 struct iwcm_id_private *cm_id_priv = work->cm_id; 845 unsigned long flags; 846 int empty; 847 int ret = 0; 848 int destroy_id; 849 850 spin_lock_irqsave(&cm_id_priv->lock, flags); 851 empty = list_empty(&cm_id_priv->work_list); 852 while (!empty) { 853 work = list_entry(cm_id_priv->work_list.next, 854 struct iwcm_work, list); 855 list_del_init(&work->list); 856 empty = list_empty(&cm_id_priv->work_list); 857 levent = work->event; 858 put_work(work); 859 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 860 861 ret = process_event(cm_id_priv, &levent); 862 if (ret) { 863 set_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags); 864 destroy_cm_id(&cm_id_priv->id); 865 } 866 BUG_ON(atomic_read(&cm_id_priv->refcount)==0); 867 destroy_id = test_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags); 868 if (iwcm_deref_id(cm_id_priv)) { 869 if (destroy_id) { 870 BUG_ON(!list_empty(&cm_id_priv->work_list)); 871 free_cm_id(cm_id_priv); 872 } 873 return; 874 } 875 spin_lock_irqsave(&cm_id_priv->lock, flags); 876 } 877 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 878} 879 880/* 881 * This function is called on interrupt context. Schedule events on 882 * the iwcm_wq thread to allow callback functions to downcall into 883 * the CM and/or block. Events are queued to a per-CM_ID 884 * work_list. If this is the first event on the work_list, the work 885 * element is also queued on the iwcm_wq thread. 886 * 887 * Each event holds a reference on the cm_id. Until the last posted 888 * event has been delivered and processed, the cm_id cannot be 889 * deleted. 890 * 891 * Returns: 892 * 0 - the event was handled. 893 * -ENOMEM - the event was not handled due to lack of resources. 894 */ 895static int cm_event_handler(struct iw_cm_id *cm_id, 896 struct iw_cm_event *iw_event) 897{ 898 struct iwcm_work *work; 899 struct iwcm_id_private *cm_id_priv; 900 unsigned long flags; 901 int ret = 0; 902 903 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); 904 905 spin_lock_irqsave(&cm_id_priv->lock, flags); 906 work = get_work(cm_id_priv); 907 if (!work) { 908 ret = -ENOMEM; 909 goto out; 910 } 911 912 INIT_WORK(&work->work, cm_work_handler); 913 work->cm_id = cm_id_priv; 914 work->event = *iw_event; 915 916 if ((work->event.event == IW_CM_EVENT_CONNECT_REQUEST || 917 work->event.event == IW_CM_EVENT_CONNECT_REPLY) && 918 work->event.private_data_len) { 919 ret = copy_private_data(&work->event); 920 if (ret) { 921 put_work(work); 922 goto out; 923 } 924 } 925 926 atomic_inc(&cm_id_priv->refcount); 927 if (list_empty(&cm_id_priv->work_list)) { 928 list_add_tail(&work->list, &cm_id_priv->work_list); 929 queue_work(iwcm_wq, &work->work); 930 } else 931 list_add_tail(&work->list, &cm_id_priv->work_list); 932out: 933 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 934 return ret; 935} 936 937static int iwcm_init_qp_init_attr(struct iwcm_id_private *cm_id_priv, 938 struct ib_qp_attr *qp_attr, 939 int *qp_attr_mask) 940{ 941 unsigned long flags; 942 int ret; 943 944 spin_lock_irqsave(&cm_id_priv->lock, flags); 945 switch (cm_id_priv->state) { 946 case IW_CM_STATE_IDLE: 947 case IW_CM_STATE_CONN_SENT: 948 case IW_CM_STATE_CONN_RECV: 949 case IW_CM_STATE_ESTABLISHED: 950 *qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS; 951 qp_attr->qp_access_flags = IB_ACCESS_REMOTE_WRITE| 952 IB_ACCESS_REMOTE_READ; 953 ret = 0; 954 break; 955 default: 956 ret = -EINVAL; 957 break; 958 } 959 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 960 return ret; 961} 962 963static int iwcm_init_qp_rts_attr(struct iwcm_id_private *cm_id_priv, 964 struct ib_qp_attr *qp_attr, 965 int *qp_attr_mask) 966{ 967 unsigned long flags; 968 int ret; 969 970 spin_lock_irqsave(&cm_id_priv->lock, flags); 971 switch (cm_id_priv->state) { 972 case IW_CM_STATE_IDLE: 973 case IW_CM_STATE_CONN_SENT: 974 case IW_CM_STATE_CONN_RECV: 975 case IW_CM_STATE_ESTABLISHED: 976 *qp_attr_mask = 0; 977 ret = 0; 978 break; 979 default: 980 ret = -EINVAL; 981 break; 982 } 983 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 984 return ret; 985} 986 987int iw_cm_init_qp_attr(struct iw_cm_id *cm_id, 988 struct ib_qp_attr *qp_attr, 989 int *qp_attr_mask) 990{ 991 struct iwcm_id_private *cm_id_priv; 992 int ret; 993 994 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); 995 switch (qp_attr->qp_state) { 996 case IB_QPS_INIT: 997 case IB_QPS_RTR: 998 ret = iwcm_init_qp_init_attr(cm_id_priv, 999 qp_attr, qp_attr_mask); 1000 break; 1001 case IB_QPS_RTS: 1002 ret = iwcm_init_qp_rts_attr(cm_id_priv, 1003 qp_attr, qp_attr_mask); 1004 break; 1005 default: 1006 ret = -EINVAL; 1007 break; 1008 } 1009 return ret; 1010} 1011EXPORT_SYMBOL(iw_cm_init_qp_attr); 1012 1013static int __init iw_cm_init(void) 1014{ 1015 iwcm_wq = create_singlethread_workqueue("iw_cm_wq"); 1016 if (!iwcm_wq) 1017 return -ENOMEM; 1018 1019 return 0; 1020} 1021 1022static void __exit iw_cm_cleanup(void) 1023{ 1024 destroy_workqueue(iwcm_wq); 1025} 1026 1027module_init(iw_cm_init); 1028module_exit(iw_cm_cleanup); 1029