ipoib_cm.c revision 337096
1/* 2 * Copyright (c) 2006 Mellanox Technologies. All rights reserved 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33#include <sys/cdefs.h> 34__FBSDID("$FreeBSD$"); 35 36#include "ipoib.h" 37 38#ifdef CONFIG_INFINIBAND_IPOIB_CM 39 40#include <netinet/ip.h> 41#include <netinet/ip_icmp.h> 42#include <netinet/icmp6.h> 43 44#include <rdma/ib_cm.h> 45#include <rdma/ib_cache.h> 46#include <linux/delay.h> 47 48int ipoib_max_conn_qp = 128; 49 50module_param_named(max_nonsrq_conn_qp, ipoib_max_conn_qp, int, 0444); 51MODULE_PARM_DESC(max_nonsrq_conn_qp, 52 "Max number of connected-mode QPs per interface " 53 "(applied only if shared receive queue is not available)"); 54 55#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG_DATA 56static int data_debug_level; 57 58module_param_named(cm_data_debug_level, data_debug_level, int, 0644); 59MODULE_PARM_DESC(cm_data_debug_level, 60 "Enable data path debug tracing for connected mode if > 0"); 61#endif 62 63#define IPOIB_CM_IETF_ID 0x1000000000000000ULL 64 65#define IPOIB_CM_RX_UPDATE_TIME (256 * HZ) 66#define IPOIB_CM_RX_TIMEOUT (2 * 256 * HZ) 67#define IPOIB_CM_RX_DELAY (3 * 256 * HZ) 68#define IPOIB_CM_RX_UPDATE_MASK (0x3) 69 70static struct ib_qp_attr ipoib_cm_err_attr = { 71 .qp_state = IB_QPS_ERR 72}; 73 74#define IPOIB_CM_RX_DRAIN_WRID 0xffffffff 75 76static struct ib_send_wr ipoib_cm_rx_drain_wr = { 77 .wr_id = IPOIB_CM_RX_DRAIN_WRID, 78 .opcode = IB_WR_SEND, 79}; 80 81static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id, 82 struct ib_cm_event *event); 83 84static void ipoib_cm_dma_unmap_rx(struct ipoib_dev_priv *priv, struct ipoib_cm_rx_buf *rx_req) 85{ 86 87 ipoib_dma_unmap_rx(priv, (struct ipoib_rx_buf *)rx_req); 88 89} 90 91static int ipoib_cm_post_receive_srq(struct ipoib_dev_priv *priv, int id) 92{ 93 struct ib_recv_wr *bad_wr; 94 struct ipoib_rx_buf *rx_req; 95 struct mbuf *m; 96 int ret; 97 int i; 98 99 rx_req = (struct ipoib_rx_buf *)&priv->cm.srq_ring[id]; 100 for (m = rx_req->mb, i = 0; m != NULL; m = m->m_next, i++) { 101 priv->cm.rx_sge[i].addr = rx_req->mapping[i]; 102 priv->cm.rx_sge[i].length = m->m_len; 103 } 104 105 priv->cm.rx_wr.num_sge = i; 106 priv->cm.rx_wr.wr_id = id | IPOIB_OP_CM | IPOIB_OP_RECV; 107 108 ret = ib_post_srq_recv(priv->cm.srq, &priv->cm.rx_wr, &bad_wr); 109 if (unlikely(ret)) { 110 ipoib_warn(priv, "post srq failed for buf %d (%d)\n", id, ret); 111 ipoib_dma_unmap_rx(priv, rx_req); 112 m_freem(priv->cm.srq_ring[id].mb); 113 priv->cm.srq_ring[id].mb = NULL; 114 } 115 116 return ret; 117} 118 119static int ipoib_cm_post_receive_nonsrq(struct ipoib_dev_priv *priv, 120 struct ipoib_cm_rx *rx, 121 struct ib_recv_wr *wr, 122 struct ib_sge *sge, int id) 123{ 124 struct ipoib_rx_buf *rx_req; 125 struct ib_recv_wr *bad_wr; 126 struct mbuf *m; 127 int ret; 128 int i; 129 130 rx_req = (struct ipoib_rx_buf *)&rx->rx_ring[id]; 131 for (m = rx_req->mb, i = 0; m != NULL; m = m->m_next, i++) { 132 sge[i].addr = rx_req->mapping[i]; 133 sge[i].length = m->m_len; 134 } 135 136 wr->num_sge = i; 137 wr->wr_id = id | IPOIB_OP_CM | IPOIB_OP_RECV; 138 139 ret = ib_post_recv(rx->qp, wr, &bad_wr); 140 if (unlikely(ret)) { 141 ipoib_warn(priv, "post recv failed for buf %d (%d)\n", id, ret); 142 ipoib_dma_unmap_rx(priv, rx_req); 143 m_freem(rx->rx_ring[id].mb); 144 rx->rx_ring[id].mb = NULL; 145 } 146 147 return ret; 148} 149 150static struct mbuf * 151ipoib_cm_alloc_rx_mb(struct ipoib_dev_priv *priv, struct ipoib_cm_rx_buf *rx_req) 152{ 153 return ipoib_alloc_map_mb(priv, (struct ipoib_rx_buf *)rx_req, 154 priv->cm.max_cm_mtu); 155} 156 157static void ipoib_cm_free_rx_ring(struct ipoib_dev_priv *priv, 158 struct ipoib_cm_rx_buf *rx_ring) 159{ 160 int i; 161 162 for (i = 0; i < ipoib_recvq_size; ++i) 163 if (rx_ring[i].mb) { 164 ipoib_cm_dma_unmap_rx(priv, &rx_ring[i]); 165 m_freem(rx_ring[i].mb); 166 } 167 168 kfree(rx_ring); 169} 170 171static void ipoib_cm_start_rx_drain(struct ipoib_dev_priv *priv) 172{ 173 struct ib_send_wr *bad_wr; 174 struct ipoib_cm_rx *p; 175 176 /* We only reserved 1 extra slot in CQ for drain WRs, so 177 * make sure we have at most 1 outstanding WR. */ 178 if (list_empty(&priv->cm.rx_flush_list) || 179 !list_empty(&priv->cm.rx_drain_list)) 180 return; 181 182 /* 183 * QPs on flush list are error state. This way, a "flush 184 * error" WC will be immediately generated for each WR we post. 185 */ 186 p = list_entry(priv->cm.rx_flush_list.next, typeof(*p), list); 187 if (ib_post_send(p->qp, &ipoib_cm_rx_drain_wr, &bad_wr)) 188 ipoib_warn(priv, "failed to post drain wr\n"); 189 190 list_splice_init(&priv->cm.rx_flush_list, &priv->cm.rx_drain_list); 191} 192 193static void ipoib_cm_rx_event_handler(struct ib_event *event, void *ctx) 194{ 195 struct ipoib_cm_rx *p = ctx; 196 struct ipoib_dev_priv *priv = p->priv; 197 unsigned long flags; 198 199 if (event->event != IB_EVENT_QP_LAST_WQE_REACHED) 200 return; 201 202 spin_lock_irqsave(&priv->lock, flags); 203 list_move(&p->list, &priv->cm.rx_flush_list); 204 p->state = IPOIB_CM_RX_FLUSH; 205 ipoib_cm_start_rx_drain(priv); 206 spin_unlock_irqrestore(&priv->lock, flags); 207} 208 209static struct ib_qp *ipoib_cm_create_rx_qp(struct ipoib_dev_priv *priv, 210 struct ipoib_cm_rx *p) 211{ 212 struct ib_qp_init_attr attr = { 213 .event_handler = ipoib_cm_rx_event_handler, 214 .send_cq = priv->recv_cq, /* For drain WR */ 215 .recv_cq = priv->recv_cq, 216 .srq = priv->cm.srq, 217 .cap.max_send_wr = 1, /* For drain WR */ 218 .cap.max_send_sge = 1, 219 .sq_sig_type = IB_SIGNAL_ALL_WR, 220 .qp_type = IB_QPT_RC, 221 .qp_context = p, 222 }; 223 224 if (!ipoib_cm_has_srq(priv)) { 225 attr.cap.max_recv_wr = ipoib_recvq_size; 226 attr.cap.max_recv_sge = priv->cm.num_frags; 227 } 228 229 return ib_create_qp(priv->pd, &attr); 230} 231 232static int ipoib_cm_modify_rx_qp(struct ipoib_dev_priv *priv, 233 struct ib_cm_id *cm_id, struct ib_qp *qp, 234 unsigned psn) 235{ 236 struct ib_qp_attr qp_attr; 237 int qp_attr_mask, ret; 238 239 qp_attr.qp_state = IB_QPS_INIT; 240 ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask); 241 if (ret) { 242 ipoib_warn(priv, "failed to init QP attr for INIT: %d\n", ret); 243 return ret; 244 } 245 ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask); 246 if (ret) { 247 ipoib_warn(priv, "failed to modify QP to INIT: %d\n", ret); 248 return ret; 249 } 250 qp_attr.qp_state = IB_QPS_RTR; 251 ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask); 252 if (ret) { 253 ipoib_warn(priv, "failed to init QP attr for RTR: %d\n", ret); 254 return ret; 255 } 256 qp_attr.rq_psn = psn; 257 ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask); 258 if (ret) { 259 ipoib_warn(priv, "failed to modify QP to RTR: %d\n", ret); 260 return ret; 261 } 262 263 /* 264 * Current Mellanox HCA firmware won't generate completions 265 * with error for drain WRs unless the QP has been moved to 266 * RTS first. This work-around leaves a window where a QP has 267 * moved to error asynchronously, but this will eventually get 268 * fixed in firmware, so let's not error out if modify QP 269 * fails. 270 */ 271 qp_attr.qp_state = IB_QPS_RTS; 272 ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask); 273 if (ret) { 274 ipoib_warn(priv, "failed to init QP attr for RTS: %d\n", ret); 275 return 0; 276 } 277 ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask); 278 if (ret) { 279 ipoib_warn(priv, "failed to modify QP to RTS: %d\n", ret); 280 return 0; 281 } 282 283 return 0; 284} 285 286static void ipoib_cm_init_rx_wr(struct ipoib_dev_priv *priv, 287 struct ib_recv_wr *wr, 288 struct ib_sge *sge) 289{ 290 int i; 291 292 for (i = 0; i < IPOIB_CM_RX_SG; i++) 293 sge[i].lkey = priv->pd->local_dma_lkey; 294 295 wr->next = NULL; 296 wr->sg_list = sge; 297 wr->num_sge = 1; 298} 299 300static int ipoib_cm_nonsrq_init_rx(struct ipoib_dev_priv *priv, 301 struct ib_cm_id *cm_id, struct ipoib_cm_rx *rx) 302{ 303 struct { 304 struct ib_recv_wr wr; 305 struct ib_sge sge[IPOIB_CM_RX_SG]; 306 } *t; 307 int ret; 308 int i; 309 310 rx->rx_ring = kzalloc(ipoib_recvq_size * sizeof *rx->rx_ring, GFP_KERNEL); 311 if (!rx->rx_ring) { 312 printk(KERN_WARNING "%s: failed to allocate CM non-SRQ ring (%d entries)\n", 313 priv->ca->name, ipoib_recvq_size); 314 return -ENOMEM; 315 } 316 317 memset(rx->rx_ring, 0, ipoib_recvq_size * sizeof *rx->rx_ring); 318 319 t = kmalloc(sizeof *t, GFP_KERNEL); 320 if (!t) { 321 ret = -ENOMEM; 322 goto err_free; 323 } 324 325 ipoib_cm_init_rx_wr(priv, &t->wr, t->sge); 326 327 spin_lock_irq(&priv->lock); 328 329 if (priv->cm.nonsrq_conn_qp >= ipoib_max_conn_qp) { 330 spin_unlock_irq(&priv->lock); 331 ib_send_cm_rej(cm_id, IB_CM_REJ_NO_QP, NULL, 0, NULL, 0); 332 ret = -EINVAL; 333 goto err_free; 334 } else 335 ++priv->cm.nonsrq_conn_qp; 336 337 spin_unlock_irq(&priv->lock); 338 339 for (i = 0; i < ipoib_recvq_size; ++i) { 340 if (!ipoib_cm_alloc_rx_mb(priv, &rx->rx_ring[i])) { 341 ipoib_warn(priv, "failed to allocate receive buffer %d\n", i); 342 ret = -ENOMEM; 343 goto err_count; 344 } 345 ret = ipoib_cm_post_receive_nonsrq(priv, rx, &t->wr, t->sge, i); 346 if (ret) { 347 ipoib_warn(priv, "ipoib_cm_post_receive_nonsrq " 348 "failed for buf %d\n", i); 349 ret = -EIO; 350 goto err_count; 351 } 352 } 353 354 rx->recv_count = ipoib_recvq_size; 355 356 kfree(t); 357 358 return 0; 359 360err_count: 361 spin_lock_irq(&priv->lock); 362 --priv->cm.nonsrq_conn_qp; 363 spin_unlock_irq(&priv->lock); 364 365err_free: 366 kfree(t); 367 ipoib_cm_free_rx_ring(priv, rx->rx_ring); 368 369 return ret; 370} 371 372static int ipoib_cm_send_rep(struct ipoib_dev_priv *priv, struct ib_cm_id *cm_id, 373 struct ib_qp *qp, struct ib_cm_req_event_param *req, 374 unsigned psn) 375{ 376 struct ipoib_cm_data data = {}; 377 struct ib_cm_rep_param rep = {}; 378 379 data.qpn = cpu_to_be32(priv->qp->qp_num); 380 data.mtu = cpu_to_be32(priv->cm.max_cm_mtu); 381 382 rep.private_data = &data; 383 rep.private_data_len = sizeof data; 384 rep.flow_control = 0; 385 rep.rnr_retry_count = req->rnr_retry_count; 386 rep.srq = ipoib_cm_has_srq(priv); 387 rep.qp_num = qp->qp_num; 388 rep.starting_psn = psn; 389 return ib_send_cm_rep(cm_id, &rep); 390} 391 392static int ipoib_cm_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event) 393{ 394 struct ipoib_dev_priv *priv = cm_id->context; 395 struct ipoib_cm_rx *p; 396 unsigned psn; 397 int ret; 398 399 ipoib_dbg(priv, "REQ arrived\n"); 400 p = kzalloc(sizeof *p, GFP_KERNEL); 401 if (!p) 402 return -ENOMEM; 403 p->priv = priv; 404 p->id = cm_id; 405 cm_id->context = p; 406 p->state = IPOIB_CM_RX_LIVE; 407 p->jiffies = jiffies; 408 INIT_LIST_HEAD(&p->list); 409 410 p->qp = ipoib_cm_create_rx_qp(priv, p); 411 if (IS_ERR(p->qp)) { 412 ret = PTR_ERR(p->qp); 413 goto err_qp; 414 } 415 416 psn = random() & 0xffffff; 417 ret = ipoib_cm_modify_rx_qp(priv, cm_id, p->qp, psn); 418 if (ret) 419 goto err_modify; 420 421 if (!ipoib_cm_has_srq(priv)) { 422 ret = ipoib_cm_nonsrq_init_rx(priv, cm_id, p); 423 if (ret) 424 goto err_modify; 425 } 426 427 spin_lock_irq(&priv->lock); 428 queue_delayed_work(ipoib_workqueue, 429 &priv->cm.stale_task, IPOIB_CM_RX_DELAY); 430 /* Add this entry to passive ids list head, but do not re-add it 431 * if IB_EVENT_QP_LAST_WQE_REACHED has moved it to flush list. */ 432 p->jiffies = jiffies; 433 if (p->state == IPOIB_CM_RX_LIVE) 434 list_move(&p->list, &priv->cm.passive_ids); 435 spin_unlock_irq(&priv->lock); 436 437 ret = ipoib_cm_send_rep(priv, cm_id, p->qp, &event->param.req_rcvd, psn); 438 if (ret) { 439 ipoib_warn(priv, "failed to send REP: %d\n", ret); 440 if (ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE)) 441 ipoib_warn(priv, "unable to move qp to error state\n"); 442 } 443 return 0; 444 445err_modify: 446 ib_destroy_qp(p->qp); 447err_qp: 448 kfree(p); 449 return ret; 450} 451 452static int ipoib_cm_rx_handler(struct ib_cm_id *cm_id, 453 struct ib_cm_event *event) 454{ 455 struct ipoib_cm_rx *p; 456 struct ipoib_dev_priv *priv; 457 458 switch (event->event) { 459 case IB_CM_REQ_RECEIVED: 460 return ipoib_cm_req_handler(cm_id, event); 461 case IB_CM_DREQ_RECEIVED: 462 p = cm_id->context; 463 ib_send_cm_drep(cm_id, NULL, 0); 464 /* Fall through */ 465 case IB_CM_REJ_RECEIVED: 466 p = cm_id->context; 467 priv = p->priv; 468 if (ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE)) 469 ipoib_warn(priv, "unable to move qp to error state\n"); 470 /* Fall through */ 471 default: 472 return 0; 473 } 474} 475 476void ipoib_cm_handle_rx_wc(struct ipoib_dev_priv *priv, struct ib_wc *wc) 477{ 478 struct ipoib_cm_rx_buf saverx; 479 struct ipoib_cm_rx_buf *rx_ring; 480 unsigned int wr_id = wc->wr_id & ~(IPOIB_OP_CM | IPOIB_OP_RECV); 481 struct ifnet *dev = priv->dev; 482 struct mbuf *mb, *newmb; 483 struct ipoib_cm_rx *p; 484 int has_srq; 485 u_short proto; 486 487 CURVNET_SET_QUIET(dev->if_vnet); 488 489 ipoib_dbg_data(priv, "cm recv completion: id %d, status: %d\n", 490 wr_id, wc->status); 491 492 if (unlikely(wr_id >= ipoib_recvq_size)) { 493 if (wr_id == (IPOIB_CM_RX_DRAIN_WRID & ~(IPOIB_OP_CM | IPOIB_OP_RECV))) { 494 spin_lock(&priv->lock); 495 list_splice_init(&priv->cm.rx_drain_list, &priv->cm.rx_reap_list); 496 ipoib_cm_start_rx_drain(priv); 497 if (priv->cm.id != NULL) 498 queue_work(ipoib_workqueue, 499 &priv->cm.rx_reap_task); 500 spin_unlock(&priv->lock); 501 } else 502 ipoib_warn(priv, "cm recv completion event with wrid %d (> %d)\n", 503 wr_id, ipoib_recvq_size); 504 goto done; 505 } 506 507 p = wc->qp->qp_context; 508 509 has_srq = ipoib_cm_has_srq(priv); 510 rx_ring = has_srq ? priv->cm.srq_ring : p->rx_ring; 511 512 mb = rx_ring[wr_id].mb; 513 514 if (unlikely(wc->status != IB_WC_SUCCESS)) { 515 ipoib_dbg(priv, "cm recv error " 516 "(status=%d, wrid=%d vend_err %x)\n", 517 wc->status, wr_id, wc->vendor_err); 518 if_inc_counter(dev, IFCOUNTER_IERRORS, 1); 519 if (has_srq) 520 goto repost; 521 else { 522 if (!--p->recv_count) { 523 spin_lock(&priv->lock); 524 list_move(&p->list, &priv->cm.rx_reap_list); 525 queue_work(ipoib_workqueue, &priv->cm.rx_reap_task); 526 spin_unlock(&priv->lock); 527 } 528 goto done; 529 } 530 } 531 532 if (unlikely(!(wr_id & IPOIB_CM_RX_UPDATE_MASK))) { 533 if (p && time_after_eq(jiffies, p->jiffies + IPOIB_CM_RX_UPDATE_TIME)) { 534 p->jiffies = jiffies; 535 /* Move this entry to list head, but do not re-add it 536 * if it has been moved out of list. */ 537 if (p->state == IPOIB_CM_RX_LIVE) 538 list_move(&p->list, &priv->cm.passive_ids); 539 } 540 } 541 542 memcpy(&saverx, &rx_ring[wr_id], sizeof(saverx)); 543 newmb = ipoib_cm_alloc_rx_mb(priv, &rx_ring[wr_id]); 544 if (unlikely(!newmb)) { 545 /* 546 * If we can't allocate a new RX buffer, dump 547 * this packet and reuse the old buffer. 548 */ 549 ipoib_dbg(priv, "failed to allocate receive buffer %d\n", wr_id); 550 if_inc_counter(dev, IFCOUNTER_IERRORS, 1); 551 memcpy(&rx_ring[wr_id], &saverx, sizeof(saverx)); 552 goto repost; 553 } 554 555 ipoib_cm_dma_unmap_rx(priv, &saverx); 556 557 ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n", 558 wc->byte_len, wc->slid); 559 560 ipoib_dma_mb(priv, mb, wc->byte_len); 561 562 if_inc_counter(dev, IFCOUNTER_IPACKETS, 1); 563 if_inc_counter(dev, IFCOUNTER_IBYTES, mb->m_pkthdr.len); 564 565 mb->m_pkthdr.rcvif = dev; 566 proto = *mtod(mb, uint16_t *); 567 m_adj(mb, IPOIB_ENCAP_LEN); 568 569 IPOIB_MTAP_PROTO(dev, mb, proto); 570 ipoib_demux(dev, mb, ntohs(proto)); 571 572repost: 573 if (has_srq) { 574 if (unlikely(ipoib_cm_post_receive_srq(priv, wr_id))) 575 ipoib_warn(priv, "ipoib_cm_post_receive_srq failed " 576 "for buf %d\n", wr_id); 577 } else { 578 if (unlikely(ipoib_cm_post_receive_nonsrq(priv, p, 579 &priv->cm.rx_wr, 580 priv->cm.rx_sge, 581 wr_id))) { 582 --p->recv_count; 583 ipoib_warn(priv, "ipoib_cm_post_receive_nonsrq failed " 584 "for buf %d\n", wr_id); 585 } 586 } 587done: 588 CURVNET_RESTORE(); 589 return; 590} 591 592static inline int post_send(struct ipoib_dev_priv *priv, 593 struct ipoib_cm_tx *tx, 594 struct ipoib_cm_tx_buf *tx_req, 595 unsigned int wr_id) 596{ 597 struct ib_send_wr *bad_wr; 598 struct mbuf *mb = tx_req->mb; 599 u64 *mapping = tx_req->mapping; 600 struct mbuf *m; 601 int i; 602 603 for (m = mb, i = 0; m != NULL; m = m->m_next, i++) { 604 priv->tx_sge[i].addr = mapping[i]; 605 priv->tx_sge[i].length = m->m_len; 606 } 607 priv->tx_wr.wr.num_sge = i; 608 priv->tx_wr.wr.wr_id = wr_id | IPOIB_OP_CM; 609 priv->tx_wr.wr.opcode = IB_WR_SEND; 610 611 return ib_post_send(tx->qp, &priv->tx_wr.wr, &bad_wr); 612} 613 614void ipoib_cm_send(struct ipoib_dev_priv *priv, struct mbuf *mb, struct ipoib_cm_tx *tx) 615{ 616 struct ipoib_cm_tx_buf *tx_req; 617 struct ifnet *dev = priv->dev; 618 619 if (unlikely(priv->tx_outstanding > MAX_SEND_CQE)) 620 while (ipoib_poll_tx(priv)); /* nothing */ 621 622 m_adj(mb, sizeof(struct ipoib_pseudoheader)); 623 if (unlikely(mb->m_pkthdr.len > tx->mtu)) { 624 ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n", 625 mb->m_pkthdr.len, tx->mtu); 626 if_inc_counter(dev, IFCOUNTER_OERRORS, 1); 627 ipoib_cm_mb_too_long(priv, mb, IPOIB_CM_MTU(tx->mtu)); 628 return; 629 } 630 631 ipoib_dbg_data(priv, "sending packet: head 0x%x length %d connection 0x%x\n", 632 tx->tx_head, mb->m_pkthdr.len, tx->qp->qp_num); 633 634 635 /* 636 * We put the mb into the tx_ring _before_ we call post_send() 637 * because it's entirely possible that the completion handler will 638 * run before we execute anything after the post_send(). That 639 * means we have to make sure everything is properly recorded and 640 * our state is consistent before we call post_send(). 641 */ 642 tx_req = &tx->tx_ring[tx->tx_head & (ipoib_sendq_size - 1)]; 643 tx_req->mb = mb; 644 if (unlikely(ipoib_dma_map_tx(priv->ca, (struct ipoib_tx_buf *)tx_req, 645 priv->cm.num_frags))) { 646 if_inc_counter(dev, IFCOUNTER_OERRORS, 1); 647 if (tx_req->mb) 648 m_freem(tx_req->mb); 649 return; 650 } 651 652 if (unlikely(post_send(priv, tx, tx_req, tx->tx_head & (ipoib_sendq_size - 1)))) { 653 ipoib_warn(priv, "post_send failed\n"); 654 if_inc_counter(dev, IFCOUNTER_OERRORS, 1); 655 ipoib_dma_unmap_tx(priv->ca, (struct ipoib_tx_buf *)tx_req); 656 m_freem(mb); 657 } else { 658 ++tx->tx_head; 659 660 if (++priv->tx_outstanding == ipoib_sendq_size) { 661 ipoib_dbg(priv, "TX ring 0x%x full, stopping kernel net queue\n", 662 tx->qp->qp_num); 663 if (ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP)) 664 ipoib_warn(priv, "request notify on send CQ failed\n"); 665 dev->if_drv_flags |= IFF_DRV_OACTIVE; 666 } 667 } 668 669} 670 671void ipoib_cm_handle_tx_wc(struct ipoib_dev_priv *priv, struct ib_wc *wc) 672{ 673 struct ipoib_cm_tx *tx = wc->qp->qp_context; 674 unsigned int wr_id = wc->wr_id & ~IPOIB_OP_CM; 675 struct ifnet *dev = priv->dev; 676 struct ipoib_cm_tx_buf *tx_req; 677 678 ipoib_dbg_data(priv, "cm send completion: id %d, status: %d\n", 679 wr_id, wc->status); 680 681 if (unlikely(wr_id >= ipoib_sendq_size)) { 682 ipoib_warn(priv, "cm send completion event with wrid %d (> %d)\n", 683 wr_id, ipoib_sendq_size); 684 return; 685 } 686 687 tx_req = &tx->tx_ring[wr_id]; 688 689 ipoib_dma_unmap_tx(priv->ca, (struct ipoib_tx_buf *)tx_req); 690 691 /* FIXME: is this right? Shouldn't we only increment on success? */ 692 if_inc_counter(dev, IFCOUNTER_OPACKETS, 1); 693 694 m_freem(tx_req->mb); 695 696 ++tx->tx_tail; 697 if (unlikely(--priv->tx_outstanding == ipoib_sendq_size >> 1) && 698 (dev->if_drv_flags & IFF_DRV_OACTIVE) != 0 && 699 test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) 700 dev->if_drv_flags &= ~IFF_DRV_OACTIVE; 701 702 if (wc->status != IB_WC_SUCCESS && 703 wc->status != IB_WC_WR_FLUSH_ERR) { 704 struct ipoib_path *path; 705 706 ipoib_dbg(priv, "failed cm send event " 707 "(status=%d, wrid=%d vend_err %x)\n", 708 wc->status, wr_id, wc->vendor_err); 709 710 path = tx->path; 711 712 if (path) { 713 path->cm = NULL; 714 rb_erase(&path->rb_node, &priv->path_tree); 715 list_del(&path->list); 716 } 717 718 if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) { 719 list_move(&tx->list, &priv->cm.reap_list); 720 queue_work(ipoib_workqueue, &priv->cm.reap_task); 721 } 722 723 clear_bit(IPOIB_FLAG_OPER_UP, &tx->flags); 724 } 725 726} 727 728int ipoib_cm_dev_open(struct ipoib_dev_priv *priv) 729{ 730 int ret; 731 732 if (!IPOIB_CM_SUPPORTED(IF_LLADDR(priv->dev))) 733 return 0; 734 735 priv->cm.id = ib_create_cm_id(priv->ca, ipoib_cm_rx_handler, priv); 736 if (IS_ERR(priv->cm.id)) { 737 printk(KERN_WARNING "%s: failed to create CM ID\n", priv->ca->name); 738 ret = PTR_ERR(priv->cm.id); 739 goto err_cm; 740 } 741 742 ret = ib_cm_listen(priv->cm.id, cpu_to_be64(IPOIB_CM_IETF_ID | priv->qp->qp_num), 0); 743 if (ret) { 744 printk(KERN_WARNING "%s: failed to listen on ID 0x%llx\n", priv->ca->name, 745 IPOIB_CM_IETF_ID | priv->qp->qp_num); 746 goto err_listen; 747 } 748 749 return 0; 750 751err_listen: 752 ib_destroy_cm_id(priv->cm.id); 753err_cm: 754 priv->cm.id = NULL; 755 return ret; 756} 757 758static void ipoib_cm_free_rx_reap_list(struct ipoib_dev_priv *priv) 759{ 760 struct ipoib_cm_rx *rx, *n; 761 LIST_HEAD(list); 762 763 spin_lock_irq(&priv->lock); 764 list_splice_init(&priv->cm.rx_reap_list, &list); 765 spin_unlock_irq(&priv->lock); 766 767 list_for_each_entry_safe(rx, n, &list, list) { 768 ib_destroy_cm_id(rx->id); 769 ib_destroy_qp(rx->qp); 770 if (!ipoib_cm_has_srq(priv)) { 771 ipoib_cm_free_rx_ring(priv, rx->rx_ring); 772 spin_lock_irq(&priv->lock); 773 --priv->cm.nonsrq_conn_qp; 774 spin_unlock_irq(&priv->lock); 775 } 776 kfree(rx); 777 } 778} 779 780void ipoib_cm_dev_stop(struct ipoib_dev_priv *priv) 781{ 782 struct ipoib_cm_rx *p; 783 unsigned long begin; 784 int ret; 785 786 if (!IPOIB_CM_SUPPORTED(IF_LLADDR(priv->dev)) || !priv->cm.id) 787 return; 788 789 ib_destroy_cm_id(priv->cm.id); 790 priv->cm.id = NULL; 791 792 cancel_work_sync(&priv->cm.rx_reap_task); 793 794 spin_lock_irq(&priv->lock); 795 while (!list_empty(&priv->cm.passive_ids)) { 796 p = list_entry(priv->cm.passive_ids.next, typeof(*p), list); 797 list_move(&p->list, &priv->cm.rx_error_list); 798 p->state = IPOIB_CM_RX_ERROR; 799 spin_unlock_irq(&priv->lock); 800 ret = ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE); 801 if (ret) 802 ipoib_warn(priv, "unable to move qp to error state: %d\n", ret); 803 spin_lock_irq(&priv->lock); 804 } 805 806 /* Wait for all RX to be drained */ 807 begin = jiffies; 808 809 while (!list_empty(&priv->cm.rx_error_list) || 810 !list_empty(&priv->cm.rx_flush_list) || 811 !list_empty(&priv->cm.rx_drain_list)) { 812 if (time_after(jiffies, begin + 5 * HZ)) { 813 ipoib_warn(priv, "RX drain timing out\n"); 814 815 /* 816 * assume the HW is wedged and just free up everything. 817 */ 818 list_splice_init(&priv->cm.rx_flush_list, 819 &priv->cm.rx_reap_list); 820 list_splice_init(&priv->cm.rx_error_list, 821 &priv->cm.rx_reap_list); 822 list_splice_init(&priv->cm.rx_drain_list, 823 &priv->cm.rx_reap_list); 824 break; 825 } 826 spin_unlock_irq(&priv->lock); 827 msleep(1); 828 ipoib_drain_cq(priv); 829 spin_lock_irq(&priv->lock); 830 } 831 832 spin_unlock_irq(&priv->lock); 833 834 ipoib_cm_free_rx_reap_list(priv); 835 836 cancel_delayed_work_sync(&priv->cm.stale_task); 837} 838 839static int ipoib_cm_rep_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event) 840{ 841 struct ipoib_cm_tx *p = cm_id->context; 842 struct ipoib_dev_priv *priv = p->priv; 843 struct ipoib_cm_data *data = event->private_data; 844 struct ifqueue mbqueue; 845 struct ib_qp_attr qp_attr; 846 int qp_attr_mask, ret; 847 struct mbuf *mb; 848 849 ipoib_dbg(priv, "cm rep handler\n"); 850 p->mtu = be32_to_cpu(data->mtu); 851 852 if (p->mtu <= IPOIB_ENCAP_LEN) { 853 ipoib_warn(priv, "Rejecting connection: mtu %d <= %d\n", 854 p->mtu, IPOIB_ENCAP_LEN); 855 return -EINVAL; 856 } 857 858 qp_attr.qp_state = IB_QPS_RTR; 859 ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask); 860 if (ret) { 861 ipoib_warn(priv, "failed to init QP attr for RTR: %d\n", ret); 862 return ret; 863 } 864 865 qp_attr.rq_psn = 0 /* FIXME */; 866 ret = ib_modify_qp(p->qp, &qp_attr, qp_attr_mask); 867 if (ret) { 868 ipoib_warn(priv, "failed to modify QP to RTR: %d\n", ret); 869 return ret; 870 } 871 872 qp_attr.qp_state = IB_QPS_RTS; 873 ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask); 874 if (ret) { 875 ipoib_warn(priv, "failed to init QP attr for RTS: %d\n", ret); 876 return ret; 877 } 878 ret = ib_modify_qp(p->qp, &qp_attr, qp_attr_mask); 879 if (ret) { 880 ipoib_warn(priv, "failed to modify QP to RTS: %d\n", ret); 881 return ret; 882 } 883 884 bzero(&mbqueue, sizeof(mbqueue)); 885 886 spin_lock_irq(&priv->lock); 887 set_bit(IPOIB_FLAG_OPER_UP, &p->flags); 888 if (p->path) 889 for (;;) { 890 _IF_DEQUEUE(&p->path->queue, mb); 891 if (mb == NULL) 892 break; 893 _IF_ENQUEUE(&mbqueue, mb); 894 } 895 spin_unlock_irq(&priv->lock); 896 897 for (;;) { 898 struct ifnet *dev = p->priv->dev; 899 _IF_DEQUEUE(&mbqueue, mb); 900 if (mb == NULL) 901 break; 902 mb->m_pkthdr.rcvif = dev; 903 if (dev->if_transmit(dev, mb)) 904 ipoib_warn(priv, "dev_queue_xmit failed " 905 "to requeue packet\n"); 906 } 907 908 ret = ib_send_cm_rtu(cm_id, NULL, 0); 909 if (ret) { 910 ipoib_warn(priv, "failed to send RTU: %d\n", ret); 911 return ret; 912 } 913 return 0; 914} 915 916static struct ib_qp *ipoib_cm_create_tx_qp(struct ipoib_dev_priv *priv, 917 struct ipoib_cm_tx *tx) 918{ 919 struct ib_qp_init_attr attr = { 920 .send_cq = priv->send_cq, 921 .recv_cq = priv->recv_cq, 922 .srq = priv->cm.srq, 923 .cap.max_send_wr = ipoib_sendq_size, 924 .cap.max_send_sge = priv->cm.num_frags, 925 .sq_sig_type = IB_SIGNAL_ALL_WR, 926 .qp_type = IB_QPT_RC, 927 .qp_context = tx 928 }; 929 930 return ib_create_qp(priv->pd, &attr); 931} 932 933static int ipoib_cm_send_req(struct ipoib_dev_priv *priv, 934 struct ib_cm_id *id, struct ib_qp *qp, 935 u32 qpn, 936 struct ib_sa_path_rec *pathrec) 937{ 938 struct ipoib_cm_data data = {}; 939 struct ib_cm_req_param req = {}; 940 941 ipoib_dbg(priv, "cm send req\n"); 942 943 data.qpn = cpu_to_be32(priv->qp->qp_num); 944 data.mtu = cpu_to_be32(priv->cm.max_cm_mtu); 945 946 req.primary_path = pathrec; 947 req.alternate_path = NULL; 948 req.service_id = cpu_to_be64(IPOIB_CM_IETF_ID | qpn); 949 req.qp_num = qp->qp_num; 950 req.qp_type = qp->qp_type; 951 req.private_data = &data; 952 req.private_data_len = sizeof data; 953 req.flow_control = 0; 954 955 req.starting_psn = 0; /* FIXME */ 956 957 /* 958 * Pick some arbitrary defaults here; we could make these 959 * module parameters if anyone cared about setting them. 960 */ 961 req.responder_resources = 4; 962 req.remote_cm_response_timeout = 20; 963 req.local_cm_response_timeout = 20; 964 req.retry_count = 0; /* RFC draft warns against retries */ 965 req.rnr_retry_count = 0; /* RFC draft warns against retries */ 966 req.max_cm_retries = 15; 967 req.srq = ipoib_cm_has_srq(priv); 968 return ib_send_cm_req(id, &req); 969} 970 971static int ipoib_cm_modify_tx_init(struct ipoib_dev_priv *priv, 972 struct ib_cm_id *cm_id, struct ib_qp *qp) 973{ 974 struct ib_qp_attr qp_attr; 975 int qp_attr_mask, ret; 976 ret = ib_find_pkey(priv->ca, priv->port, priv->pkey, &qp_attr.pkey_index); 977 if (ret) { 978 ipoib_warn(priv, "pkey 0x%x not found: %d\n", priv->pkey, ret); 979 return ret; 980 } 981 982 qp_attr.qp_state = IB_QPS_INIT; 983 qp_attr.qp_access_flags = IB_ACCESS_LOCAL_WRITE; 984 qp_attr.port_num = priv->port; 985 qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS | IB_QP_PKEY_INDEX | IB_QP_PORT; 986 987 ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask); 988 if (ret) { 989 ipoib_warn(priv, "failed to modify tx QP to INIT: %d\n", ret); 990 return ret; 991 } 992 return 0; 993} 994 995static int ipoib_cm_tx_init(struct ipoib_cm_tx *p, u32 qpn, 996 struct ib_sa_path_rec *pathrec) 997{ 998 struct ipoib_dev_priv *priv = p->priv; 999 int ret; 1000 1001 p->tx_ring = kzalloc(ipoib_sendq_size * sizeof *p->tx_ring, GFP_KERNEL); 1002 if (!p->tx_ring) { 1003 ipoib_warn(priv, "failed to allocate tx ring\n"); 1004 ret = -ENOMEM; 1005 goto err_tx; 1006 } 1007 memset(p->tx_ring, 0, ipoib_sendq_size * sizeof *p->tx_ring); 1008 1009 p->qp = ipoib_cm_create_tx_qp(p->priv, p); 1010 if (IS_ERR(p->qp)) { 1011 ret = PTR_ERR(p->qp); 1012 ipoib_warn(priv, "failed to allocate tx qp: %d\n", ret); 1013 goto err_qp; 1014 } 1015 1016 p->id = ib_create_cm_id(priv->ca, ipoib_cm_tx_handler, p); 1017 if (IS_ERR(p->id)) { 1018 ret = PTR_ERR(p->id); 1019 ipoib_warn(priv, "failed to create tx cm id: %d\n", ret); 1020 goto err_id; 1021 } 1022 1023 ret = ipoib_cm_modify_tx_init(p->priv, p->id, p->qp); 1024 if (ret) { 1025 ipoib_warn(priv, "failed to modify tx qp to rtr: %d\n", ret); 1026 goto err_modify; 1027 } 1028 1029 ret = ipoib_cm_send_req(p->priv, p->id, p->qp, qpn, pathrec); 1030 if (ret) { 1031 ipoib_warn(priv, "failed to send cm req: %d\n", ret); 1032 goto err_send_cm; 1033 } 1034 1035 ipoib_dbg(priv, "Request connection 0x%x for gid %pI6 qpn 0x%x\n", 1036 p->qp->qp_num, pathrec->dgid.raw, qpn); 1037 1038 return 0; 1039 1040err_send_cm: 1041err_modify: 1042 ib_destroy_cm_id(p->id); 1043err_id: 1044 p->id = NULL; 1045 ib_destroy_qp(p->qp); 1046err_qp: 1047 p->qp = NULL; 1048 kfree(p->tx_ring); 1049err_tx: 1050 return ret; 1051} 1052 1053static void ipoib_cm_tx_destroy(struct ipoib_cm_tx *p) 1054{ 1055 struct ipoib_dev_priv *priv = p->priv; 1056 struct ifnet *dev = priv->dev; 1057 struct ipoib_cm_tx_buf *tx_req; 1058 unsigned long begin; 1059 1060 ipoib_dbg(priv, "Destroy active connection 0x%x head 0x%x tail 0x%x\n", 1061 p->qp ? p->qp->qp_num : 0, p->tx_head, p->tx_tail); 1062 1063 if (p->path) 1064 ipoib_path_free(priv, p->path); 1065 1066 if (p->id) 1067 ib_destroy_cm_id(p->id); 1068 1069 if (p->tx_ring) { 1070 /* Wait for all sends to complete */ 1071 begin = jiffies; 1072 while ((int) p->tx_tail - (int) p->tx_head < 0) { 1073 if (time_after(jiffies, begin + 5 * HZ)) { 1074 ipoib_warn(priv, "timing out; %d sends not completed\n", 1075 p->tx_head - p->tx_tail); 1076 goto timeout; 1077 } 1078 1079 msleep(1); 1080 } 1081 } 1082 1083timeout: 1084 1085 while ((int) p->tx_tail - (int) p->tx_head < 0) { 1086 tx_req = &p->tx_ring[p->tx_tail & (ipoib_sendq_size - 1)]; 1087 ipoib_dma_unmap_tx(priv->ca, (struct ipoib_tx_buf *)tx_req); 1088 m_freem(tx_req->mb); 1089 ++p->tx_tail; 1090 if (unlikely(--priv->tx_outstanding == ipoib_sendq_size >> 1) && 1091 (dev->if_drv_flags & IFF_DRV_OACTIVE) != 0 && 1092 test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) 1093 dev->if_drv_flags &= ~IFF_DRV_OACTIVE; 1094 } 1095 1096 if (p->qp) 1097 ib_destroy_qp(p->qp); 1098 1099 kfree(p->tx_ring); 1100 kfree(p); 1101} 1102 1103static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id, 1104 struct ib_cm_event *event) 1105{ 1106 struct ipoib_cm_tx *tx = cm_id->context; 1107 struct ipoib_dev_priv *priv = tx->priv; 1108 struct ipoib_path *path; 1109 unsigned long flags; 1110 int ret; 1111 1112 switch (event->event) { 1113 case IB_CM_DREQ_RECEIVED: 1114 ipoib_dbg(priv, "DREQ received.\n"); 1115 ib_send_cm_drep(cm_id, NULL, 0); 1116 break; 1117 case IB_CM_REP_RECEIVED: 1118 ipoib_dbg(priv, "REP received.\n"); 1119 ret = ipoib_cm_rep_handler(cm_id, event); 1120 if (ret) 1121 ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED, 1122 NULL, 0, NULL, 0); 1123 break; 1124 case IB_CM_REQ_ERROR: 1125 case IB_CM_REJ_RECEIVED: 1126 case IB_CM_TIMEWAIT_EXIT: 1127 ipoib_dbg(priv, "CM error %d.\n", event->event); 1128 spin_lock_irqsave(&priv->lock, flags); 1129 path = tx->path; 1130 1131 if (path) { 1132 path->cm = NULL; 1133 tx->path = NULL; 1134 rb_erase(&path->rb_node, &priv->path_tree); 1135 list_del(&path->list); 1136 } 1137 1138 if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) { 1139 list_move(&tx->list, &priv->cm.reap_list); 1140 queue_work(ipoib_workqueue, &priv->cm.reap_task); 1141 } 1142 1143 spin_unlock_irqrestore(&priv->lock, flags); 1144 if (path) 1145 ipoib_path_free(tx->priv, path); 1146 break; 1147 default: 1148 break; 1149 } 1150 1151 return 0; 1152} 1153 1154struct ipoib_cm_tx *ipoib_cm_create_tx(struct ipoib_dev_priv *priv, 1155 struct ipoib_path *path) 1156{ 1157 struct ipoib_cm_tx *tx; 1158 1159 tx = kzalloc(sizeof *tx, GFP_ATOMIC); 1160 if (!tx) 1161 return NULL; 1162 1163 ipoib_dbg(priv, "Creating cm tx\n"); 1164 path->cm = tx; 1165 tx->path = path; 1166 tx->priv = priv; 1167 list_add(&tx->list, &priv->cm.start_list); 1168 set_bit(IPOIB_FLAG_INITIALIZED, &tx->flags); 1169 queue_work(ipoib_workqueue, &priv->cm.start_task); 1170 return tx; 1171} 1172 1173void ipoib_cm_destroy_tx(struct ipoib_cm_tx *tx) 1174{ 1175 struct ipoib_dev_priv *priv = tx->priv; 1176 if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) { 1177 spin_lock(&priv->lock); 1178 list_move(&tx->list, &priv->cm.reap_list); 1179 spin_unlock(&priv->lock); 1180 queue_work(ipoib_workqueue, &priv->cm.reap_task); 1181 ipoib_dbg(priv, "Reap connection for gid %pI6\n", 1182 tx->path->pathrec.dgid.raw); 1183 tx->path = NULL; 1184 } 1185} 1186 1187static void ipoib_cm_tx_start(struct work_struct *work) 1188{ 1189 struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv, 1190 cm.start_task); 1191 struct ipoib_path *path; 1192 struct ipoib_cm_tx *p; 1193 unsigned long flags; 1194 int ret; 1195 1196 struct ib_sa_path_rec pathrec; 1197 u32 qpn; 1198 1199 ipoib_dbg(priv, "cm start task\n"); 1200 spin_lock_irqsave(&priv->lock, flags); 1201 1202 while (!list_empty(&priv->cm.start_list)) { 1203 p = list_entry(priv->cm.start_list.next, typeof(*p), list); 1204 list_del_init(&p->list); 1205 path = p->path; 1206 qpn = IPOIB_QPN(path->hwaddr); 1207 memcpy(&pathrec, &p->path->pathrec, sizeof pathrec); 1208 1209 spin_unlock_irqrestore(&priv->lock, flags); 1210 1211 ret = ipoib_cm_tx_init(p, qpn, &pathrec); 1212 1213 spin_lock_irqsave(&priv->lock, flags); 1214 1215 if (ret) { 1216 path = p->path; 1217 if (path) { 1218 path->cm = NULL; 1219 rb_erase(&path->rb_node, &priv->path_tree); 1220 list_del(&path->list); 1221 ipoib_path_free(priv, path); 1222 } 1223 list_del(&p->list); 1224 kfree(p); 1225 } 1226 } 1227 1228 spin_unlock_irqrestore(&priv->lock, flags); 1229} 1230 1231static void ipoib_cm_tx_reap(struct work_struct *work) 1232{ 1233 struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv, 1234 cm.reap_task); 1235 struct ipoib_cm_tx *p; 1236 unsigned long flags; 1237 1238 spin_lock_irqsave(&priv->lock, flags); 1239 1240 while (!list_empty(&priv->cm.reap_list)) { 1241 p = list_entry(priv->cm.reap_list.next, typeof(*p), list); 1242 list_del(&p->list); 1243 spin_unlock_irqrestore(&priv->lock, flags); 1244 ipoib_cm_tx_destroy(p); 1245 spin_lock_irqsave(&priv->lock, flags); 1246 } 1247 1248 spin_unlock_irqrestore(&priv->lock, flags); 1249} 1250 1251static void ipoib_cm_mb_reap(struct work_struct *work) 1252{ 1253 struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv, 1254 cm.mb_task); 1255 struct mbuf *mb; 1256 unsigned long flags; 1257#if defined(INET) || defined(INET6) 1258 unsigned mtu = priv->mcast_mtu; 1259#endif 1260 uint16_t proto; 1261 1262 spin_lock_irqsave(&priv->lock, flags); 1263 1264 for (;;) { 1265 IF_DEQUEUE(&priv->cm.mb_queue, mb); 1266 if (mb == NULL) 1267 break; 1268 spin_unlock_irqrestore(&priv->lock, flags); 1269 1270 proto = htons(*mtod(mb, uint16_t *)); 1271 m_adj(mb, IPOIB_ENCAP_LEN); 1272 switch (proto) { 1273#if defined(INET) 1274 case ETHERTYPE_IP: 1275 icmp_error(mb, ICMP_UNREACH, ICMP_UNREACH_NEEDFRAG, 0, mtu); 1276 break; 1277#endif 1278#if defined(INET6) 1279 case ETHERTYPE_IPV6: 1280 icmp6_error(mb, ICMP6_PACKET_TOO_BIG, 0, mtu); 1281 break; 1282#endif 1283 default: 1284 m_freem(mb); 1285 } 1286 1287 spin_lock_irqsave(&priv->lock, flags); 1288 } 1289 1290 spin_unlock_irqrestore(&priv->lock, flags); 1291} 1292 1293void 1294ipoib_cm_mb_too_long(struct ipoib_dev_priv *priv, struct mbuf *mb, unsigned int mtu) 1295{ 1296 int e = priv->cm.mb_queue.ifq_len; 1297 1298 IF_ENQUEUE(&priv->cm.mb_queue, mb); 1299 if (e == 0) 1300 queue_work(ipoib_workqueue, &priv->cm.mb_task); 1301} 1302 1303static void ipoib_cm_rx_reap(struct work_struct *work) 1304{ 1305 ipoib_cm_free_rx_reap_list(container_of(work, struct ipoib_dev_priv, 1306 cm.rx_reap_task)); 1307} 1308 1309static void ipoib_cm_stale_task(struct work_struct *work) 1310{ 1311 struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv, 1312 cm.stale_task.work); 1313 struct ipoib_cm_rx *p; 1314 int ret; 1315 1316 spin_lock_irq(&priv->lock); 1317 while (!list_empty(&priv->cm.passive_ids)) { 1318 /* List is sorted by LRU, start from tail, 1319 * stop when we see a recently used entry */ 1320 p = list_entry(priv->cm.passive_ids.prev, typeof(*p), list); 1321 if (time_before_eq(jiffies, p->jiffies + IPOIB_CM_RX_TIMEOUT)) 1322 break; 1323 list_move(&p->list, &priv->cm.rx_error_list); 1324 p->state = IPOIB_CM_RX_ERROR; 1325 spin_unlock_irq(&priv->lock); 1326 ret = ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE); 1327 if (ret) 1328 ipoib_warn(priv, "unable to move qp to error state: %d\n", ret); 1329 spin_lock_irq(&priv->lock); 1330 } 1331 1332 if (!list_empty(&priv->cm.passive_ids)) 1333 queue_delayed_work(ipoib_workqueue, 1334 &priv->cm.stale_task, IPOIB_CM_RX_DELAY); 1335 spin_unlock_irq(&priv->lock); 1336} 1337 1338 1339static void ipoib_cm_create_srq(struct ipoib_dev_priv *priv, int max_sge) 1340{ 1341 struct ib_srq_init_attr srq_init_attr = { 1342 .attr = { 1343 .max_wr = ipoib_recvq_size, 1344 .max_sge = max_sge 1345 } 1346 }; 1347 1348 priv->cm.srq = ib_create_srq(priv->pd, &srq_init_attr); 1349 if (IS_ERR(priv->cm.srq)) { 1350 if (PTR_ERR(priv->cm.srq) != -ENOSYS) 1351 printk(KERN_WARNING "%s: failed to allocate SRQ, error %ld\n", 1352 priv->ca->name, PTR_ERR(priv->cm.srq)); 1353 priv->cm.srq = NULL; 1354 return; 1355 } 1356 1357 priv->cm.srq_ring = kzalloc(ipoib_recvq_size * sizeof *priv->cm.srq_ring, GFP_KERNEL); 1358 if (!priv->cm.srq_ring) { 1359 printk(KERN_WARNING "%s: failed to allocate CM SRQ ring (%d entries)\n", 1360 priv->ca->name, ipoib_recvq_size); 1361 ib_destroy_srq(priv->cm.srq); 1362 priv->cm.srq = NULL; 1363 return; 1364 } 1365 1366 memset(priv->cm.srq_ring, 0, ipoib_recvq_size * sizeof *priv->cm.srq_ring); 1367} 1368 1369int ipoib_cm_dev_init(struct ipoib_dev_priv *priv) 1370{ 1371 struct ifnet *dev = priv->dev; 1372 int i; 1373 int max_srq_sge; 1374 1375 INIT_LIST_HEAD(&priv->cm.passive_ids); 1376 INIT_LIST_HEAD(&priv->cm.reap_list); 1377 INIT_LIST_HEAD(&priv->cm.start_list); 1378 INIT_LIST_HEAD(&priv->cm.rx_error_list); 1379 INIT_LIST_HEAD(&priv->cm.rx_flush_list); 1380 INIT_LIST_HEAD(&priv->cm.rx_drain_list); 1381 INIT_LIST_HEAD(&priv->cm.rx_reap_list); 1382 INIT_WORK(&priv->cm.start_task, ipoib_cm_tx_start); 1383 INIT_WORK(&priv->cm.reap_task, ipoib_cm_tx_reap); 1384 INIT_WORK(&priv->cm.mb_task, ipoib_cm_mb_reap); 1385 INIT_WORK(&priv->cm.rx_reap_task, ipoib_cm_rx_reap); 1386 INIT_DELAYED_WORK(&priv->cm.stale_task, ipoib_cm_stale_task); 1387 1388 bzero(&priv->cm.mb_queue, sizeof(priv->cm.mb_queue)); 1389 mtx_init(&priv->cm.mb_queue.ifq_mtx, 1390 dev->if_xname, "if send queue", MTX_DEF); 1391 1392 max_srq_sge = priv->ca->attrs.max_srq_sge; 1393 1394 ipoib_dbg(priv, "max_srq_sge=%d\n", max_srq_sge); 1395 1396 max_srq_sge = min_t(int, IPOIB_CM_RX_SG, max_srq_sge); 1397 ipoib_cm_create_srq(priv, max_srq_sge); 1398 if (ipoib_cm_has_srq(priv)) { 1399 priv->cm.max_cm_mtu = max_srq_sge * MJUMPAGESIZE; 1400 priv->cm.num_frags = max_srq_sge; 1401 ipoib_dbg(priv, "max_cm_mtu = 0x%x, num_frags=%d\n", 1402 priv->cm.max_cm_mtu, priv->cm.num_frags); 1403 } else { 1404 priv->cm.max_cm_mtu = IPOIB_CM_MAX_MTU; 1405 priv->cm.num_frags = IPOIB_CM_RX_SG; 1406 } 1407 1408 ipoib_cm_init_rx_wr(priv, &priv->cm.rx_wr, priv->cm.rx_sge); 1409 1410 if (ipoib_cm_has_srq(priv)) { 1411 for (i = 0; i < ipoib_recvq_size; ++i) { 1412 if (!ipoib_cm_alloc_rx_mb(priv, &priv->cm.srq_ring[i])) { 1413 ipoib_warn(priv, "failed to allocate " 1414 "receive buffer %d\n", i); 1415 ipoib_cm_dev_cleanup(priv); 1416 return -ENOMEM; 1417 } 1418 1419 if (ipoib_cm_post_receive_srq(priv, i)) { 1420 ipoib_warn(priv, "ipoib_cm_post_receive_srq " 1421 "failed for buf %d\n", i); 1422 ipoib_cm_dev_cleanup(priv); 1423 return -EIO; 1424 } 1425 } 1426 } 1427 1428 IF_LLADDR(priv->dev)[0] = IPOIB_FLAGS_RC; 1429 return 0; 1430} 1431 1432void ipoib_cm_dev_cleanup(struct ipoib_dev_priv *priv) 1433{ 1434 int ret; 1435 1436 if (!priv->cm.srq) 1437 return; 1438 1439 ipoib_dbg(priv, "Cleanup ipoib connected mode.\n"); 1440 1441 ret = ib_destroy_srq(priv->cm.srq); 1442 if (ret) 1443 ipoib_warn(priv, "ib_destroy_srq failed: %d\n", ret); 1444 1445 priv->cm.srq = NULL; 1446 if (!priv->cm.srq_ring) 1447 return; 1448 1449 ipoib_cm_free_rx_ring(priv, priv->cm.srq_ring); 1450 priv->cm.srq_ring = NULL; 1451 1452 mtx_destroy(&priv->cm.mb_queue.ifq_mtx); 1453} 1454 1455#endif /* CONFIG_INFINIBAND_IPOIB_CM */ 1456