ipoib_ib.c revision 369540
1/* 2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. 3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 4 * Copyright (c) 2005 Mellanox Technologies. All rights reserved. 5 * Copyright (c) 2004, 2005 Voltaire, Inc. All rights reserved. 6 * 7 * This software is available to you under a choice of one of two 8 * licenses. You may choose to be licensed under the terms of the GNU 9 * General Public License (GPL) Version 2, available from the file 10 * COPYING in the main directory of this source tree, or the 11 * OpenIB.org BSD license below: 12 * 13 * Redistribution and use in source and binary forms, with or 14 * without modification, are permitted provided that the following 15 * conditions are met: 16 * 17 * - Redistributions of source code must retain the above 18 * copyright notice, this list of conditions and the following 19 * disclaimer. 20 * 21 * - Redistributions in binary form must reproduce the above 22 * copyright notice, this list of conditions and the following 23 * disclaimer in the documentation and/or other materials 24 * provided with the distribution. 25 * 26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 33 * SOFTWARE. 34 */ 35 36#include <sys/cdefs.h> 37__FBSDID("$FreeBSD$"); 38 39#include "ipoib.h" 40 41#include <rdma/ib_cache.h> 42 43#include <security/mac/mac_framework.h> 44 45#include <linux/delay.h> 46#include <linux/dma-mapping.h> 47 48#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG_DATA 49static int data_debug_level; 50 51module_param(data_debug_level, int, 0644); 52MODULE_PARM_DESC(data_debug_level, 53 "Enable data path debug tracing if > 0"); 54#endif 55 56static DEFINE_MUTEX(pkey_mutex); 57 58struct ipoib_ah *ipoib_create_ah(struct ipoib_dev_priv *priv, 59 struct ib_pd *pd, struct ib_ah_attr *attr) 60{ 61 struct ipoib_ah *ah; 62 63 ah = kmalloc(sizeof *ah, GFP_KERNEL); 64 if (!ah) 65 return NULL; 66 67 ah->priv = priv; 68 ah->last_send = 0; 69 kref_init(&ah->ref); 70 71 ah->ah = ib_create_ah(pd, attr); 72 if (IS_ERR(ah->ah)) { 73 kfree(ah); 74 ah = NULL; 75 } else 76 ipoib_dbg(priv, "Created ah %p\n", ah->ah); 77 78 return ah; 79} 80 81void ipoib_free_ah(struct kref *kref) 82{ 83 struct ipoib_ah *ah = container_of(kref, struct ipoib_ah, ref); 84 struct ipoib_dev_priv *priv = ah->priv; 85 86 unsigned long flags; 87 88 spin_lock_irqsave(&priv->lock, flags); 89 list_add_tail(&ah->list, &priv->dead_ahs); 90 spin_unlock_irqrestore(&priv->lock, flags); 91} 92 93void 94ipoib_dma_unmap_rx(struct ipoib_dev_priv *priv, struct ipoib_rx_buf *rx_req) 95{ 96 struct mbuf *m; 97 int i; 98 99 for (i = 0, m = rx_req->mb; m != NULL; m = m->m_next, i++) 100 ib_dma_unmap_single(priv->ca, rx_req->mapping[i], m->m_len, 101 DMA_FROM_DEVICE); 102} 103 104void 105ipoib_dma_mb(struct ipoib_dev_priv *priv, struct mbuf *mb, unsigned int length) 106{ 107 108 m_adj(mb, -(mb->m_pkthdr.len - length)); 109} 110 111struct mbuf * 112ipoib_alloc_map_mb(struct ipoib_dev_priv *priv, struct ipoib_rx_buf *rx_req, 113 int size, int max_frags) 114{ 115 struct mbuf *mb, *m; 116 int i, j; 117 118 rx_req->mb = NULL; 119 mb = m_getm2(NULL, size, M_NOWAIT, MT_DATA, M_PKTHDR); 120 if (mb == NULL) 121 return (NULL); 122 for (i = 0, m = mb; m != NULL; m = m->m_next, i++) { 123 MPASS(i < max_frags); 124 125 m->m_len = M_SIZE(m); 126 mb->m_pkthdr.len += m->m_len; 127 rx_req->mapping[i] = ib_dma_map_single(priv->ca, 128 mtod(m, void *), m->m_len, DMA_FROM_DEVICE); 129 if (unlikely(ib_dma_mapping_error(priv->ca, 130 rx_req->mapping[i]))) 131 goto error; 132 133 } 134 rx_req->mb = mb; 135 return (mb); 136error: 137 for (j = 0, m = mb; j < i; m = m->m_next, j++) 138 ib_dma_unmap_single(priv->ca, rx_req->mapping[j], m->m_len, 139 DMA_FROM_DEVICE); 140 m_freem(mb); 141 return (NULL); 142 143} 144 145static int ipoib_ib_post_receive(struct ipoib_dev_priv *priv, int id) 146{ 147 struct ipoib_rx_buf *rx_req; 148 struct ib_recv_wr *bad_wr; 149 struct mbuf *m; 150 int ret; 151 int i; 152 153 rx_req = &priv->rx_ring[id]; 154 for (m = rx_req->mb, i = 0; m != NULL; m = m->m_next, i++) { 155 priv->rx_sge[i].addr = rx_req->mapping[i]; 156 priv->rx_sge[i].length = m->m_len; 157 } 158 priv->rx_wr.num_sge = i; 159 priv->rx_wr.wr_id = id | IPOIB_OP_RECV; 160 161 ret = ib_post_recv(priv->qp, &priv->rx_wr, &bad_wr); 162 if (unlikely(ret)) { 163 ipoib_warn(priv, "receive failed for buf %d (%d)\n", id, ret); 164 ipoib_dma_unmap_rx(priv, &priv->rx_ring[id]); 165 m_freem(priv->rx_ring[id].mb); 166 priv->rx_ring[id].mb = NULL; 167 } 168 169 return ret; 170} 171 172static struct mbuf * 173ipoib_alloc_rx_mb(struct ipoib_dev_priv *priv, int id) 174{ 175 return ipoib_alloc_map_mb(priv, &priv->rx_ring[id], 176 priv->max_ib_mtu + IB_GRH_BYTES, IPOIB_UD_RX_SG); 177} 178 179static int ipoib_ib_post_receives(struct ipoib_dev_priv *priv) 180{ 181 int i; 182 183 for (i = 0; i < ipoib_recvq_size; ++i) { 184 if (!ipoib_alloc_rx_mb(priv, i)) { 185 ipoib_warn(priv, "failed to allocate receive buffer %d\n", i); 186 return -ENOMEM; 187 } 188 if (ipoib_ib_post_receive(priv, i)) { 189 ipoib_warn(priv, "ipoib_ib_post_receive failed for buf %d\n", i); 190 return -EIO; 191 } 192 } 193 194 return 0; 195} 196 197static void 198ipoib_ib_handle_rx_wc(struct ipoib_dev_priv *priv, struct ib_wc *wc) 199{ 200 struct ipoib_rx_buf saverx; 201 unsigned int wr_id = wc->wr_id & ~IPOIB_OP_RECV; 202 struct ifnet *dev = priv->dev; 203 struct ipoib_header *eh; 204 struct mbuf *mb; 205 206 ipoib_dbg_data(priv, "recv completion: id %d, status: %d\n", 207 wr_id, wc->status); 208 209 if (unlikely(wr_id >= ipoib_recvq_size)) { 210 ipoib_warn(priv, "recv completion event with wrid %d (> %d)\n", 211 wr_id, ipoib_recvq_size); 212 return; 213 } 214 215 mb = priv->rx_ring[wr_id].mb; 216 217 if (unlikely(wc->status != IB_WC_SUCCESS)) { 218 if (wc->status != IB_WC_WR_FLUSH_ERR) { 219 ipoib_warn(priv, "failed recv event " 220 "(status=%d, wrid=%d vend_err %x)\n", 221 wc->status, wr_id, wc->vendor_err); 222 goto repost; 223 } 224 if (mb) { 225 ipoib_dma_unmap_rx(priv, &priv->rx_ring[wr_id]); 226 m_freem(mb); 227 priv->rx_ring[wr_id].mb = NULL; 228 } 229 return; 230 } 231 232 /* 233 * Drop packets that this interface sent, ie multicast packets 234 * that the HCA has replicated. 235 */ 236 if (wc->slid == priv->local_lid && wc->src_qp == priv->qp->qp_num) 237 goto repost; 238 239 memcpy(&saverx, &priv->rx_ring[wr_id], sizeof(saverx)); 240 /* 241 * If we can't allocate a new RX buffer, dump 242 * this packet and reuse the old buffer. 243 */ 244 if (unlikely(!ipoib_alloc_rx_mb(priv, wr_id))) { 245 memcpy(&priv->rx_ring[wr_id], &saverx, sizeof(saverx)); 246 if_inc_counter(dev, IFCOUNTER_IQDROPS, 1); 247 goto repost; 248 } 249 250 ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n", 251 wc->byte_len, wc->slid); 252 253 ipoib_dma_unmap_rx(priv, &saverx); 254 ipoib_dma_mb(priv, mb, wc->byte_len); 255 256 if_inc_counter(dev, IFCOUNTER_IPACKETS, 1); 257 if_inc_counter(dev, IFCOUNTER_IBYTES, mb->m_pkthdr.len); 258 mb->m_pkthdr.rcvif = dev; 259 m_adj(mb, sizeof(struct ib_grh) - INFINIBAND_ALEN); 260 eh = mtod(mb, struct ipoib_header *); 261 bzero(eh->hwaddr, 4); /* Zero the queue pair, only dgid is in grh */ 262 263 if (test_bit(IPOIB_FLAG_CSUM, &priv->flags) && likely(wc->wc_flags & IB_WC_IP_CSUM_OK)) 264 mb->m_pkthdr.csum_flags = CSUM_IP_CHECKED | CSUM_IP_VALID; 265 266 dev->if_input(dev, mb); 267 268repost: 269 if (unlikely(ipoib_ib_post_receive(priv, wr_id))) 270 ipoib_warn(priv, "ipoib_ib_post_receive failed " 271 "for buf %d\n", wr_id); 272} 273 274int ipoib_dma_map_tx(struct ib_device *ca, struct ipoib_tx_buf *tx_req, int max) 275{ 276 struct mbuf *mb = tx_req->mb; 277 u64 *mapping = tx_req->mapping; 278 struct mbuf *m, *p; 279 int error; 280 int i; 281 282 for (m = mb, p = NULL, i = 0; m != NULL; p = m, m = m->m_next, i++) { 283 if (m->m_len != 0) 284 continue; 285 if (p == NULL) 286 panic("ipoib_dma_map_tx: First mbuf empty\n"); 287 p->m_next = m_free(m); 288 m = p; 289 i--; 290 } 291 i--; 292 if (i >= max) { 293 tx_req->mb = mb = m_defrag(mb, M_NOWAIT); 294 if (mb == NULL) 295 return -EIO; 296 for (m = mb, i = 0; m != NULL; m = m->m_next, i++); 297 if (i >= max) 298 return -EIO; 299 } 300 error = 0; 301 for (m = mb, i = 0; m != NULL; m = m->m_next, i++) { 302 mapping[i] = ib_dma_map_single(ca, mtod(m, void *), 303 m->m_len, DMA_TO_DEVICE); 304 if (unlikely(ib_dma_mapping_error(ca, mapping[i]))) { 305 error = -EIO; 306 break; 307 } 308 } 309 if (error) { 310 int end; 311 312 end = i; 313 for (m = mb, i = 0; i < end; m = m->m_next, i++) 314 ib_dma_unmap_single(ca, mapping[i], m->m_len, 315 DMA_TO_DEVICE); 316 } 317 return error; 318} 319 320void ipoib_dma_unmap_tx(struct ib_device *ca, struct ipoib_tx_buf *tx_req) 321{ 322 struct mbuf *mb = tx_req->mb; 323 u64 *mapping = tx_req->mapping; 324 struct mbuf *m; 325 int i; 326 327 for (m = mb, i = 0; m != NULL; m = m->m_next, i++) 328 ib_dma_unmap_single(ca, mapping[i], m->m_len, DMA_TO_DEVICE); 329} 330 331static void ipoib_ib_handle_tx_wc(struct ipoib_dev_priv *priv, struct ib_wc *wc) 332{ 333 struct ifnet *dev = priv->dev; 334 unsigned int wr_id = wc->wr_id; 335 struct ipoib_tx_buf *tx_req; 336 337 ipoib_dbg_data(priv, "send completion: id %d, status: %d\n", 338 wr_id, wc->status); 339 340 if (unlikely(wr_id >= ipoib_sendq_size)) { 341 ipoib_warn(priv, "send completion event with wrid %d (> %d)\n", 342 wr_id, ipoib_sendq_size); 343 return; 344 } 345 346 tx_req = &priv->tx_ring[wr_id]; 347 348 ipoib_dma_unmap_tx(priv->ca, tx_req); 349 350 if_inc_counter(dev, IFCOUNTER_OPACKETS, 1); 351 352 m_freem(tx_req->mb); 353 354 ++priv->tx_tail; 355 if (unlikely(--priv->tx_outstanding == ipoib_sendq_size >> 1) && 356 (dev->if_drv_flags & IFF_DRV_OACTIVE) && 357 test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) 358 dev->if_drv_flags &= ~IFF_DRV_OACTIVE; 359 360 if (wc->status != IB_WC_SUCCESS && 361 wc->status != IB_WC_WR_FLUSH_ERR) 362 ipoib_warn(priv, "failed send event " 363 "(status=%d, wrid=%d vend_err %x)\n", 364 wc->status, wr_id, wc->vendor_err); 365} 366 367int 368ipoib_poll_tx(struct ipoib_dev_priv *priv, bool do_start) 369{ 370 int n, i; 371 372 n = ib_poll_cq(priv->send_cq, MAX_SEND_CQE, priv->send_wc); 373 for (i = 0; i < n; ++i) { 374 struct ib_wc *wc = priv->send_wc + i; 375 if (wc->wr_id & IPOIB_OP_CM) 376 ipoib_cm_handle_tx_wc(priv, wc); 377 else 378 ipoib_ib_handle_tx_wc(priv, wc); 379 } 380 381 if (do_start && n != 0) 382 ipoib_start_locked(priv->dev, priv); 383 384 return n == MAX_SEND_CQE; 385} 386 387static void 388ipoib_poll(struct ipoib_dev_priv *priv) 389{ 390 int n, i; 391 392poll_more: 393 spin_lock(&priv->drain_lock); 394 for (;;) { 395 n = ib_poll_cq(priv->recv_cq, IPOIB_NUM_WC, priv->ibwc); 396 for (i = 0; i < n; i++) { 397 struct ib_wc *wc = priv->ibwc + i; 398 399 if ((wc->wr_id & IPOIB_OP_RECV) == 0) 400 panic("ipoib_poll: Bad wr_id 0x%jX\n", 401 (intmax_t)wc->wr_id); 402 if (wc->wr_id & IPOIB_OP_CM) 403 ipoib_cm_handle_rx_wc(priv, wc); 404 else 405 ipoib_ib_handle_rx_wc(priv, wc); 406 } 407 408 if (n != IPOIB_NUM_WC) 409 break; 410 } 411 spin_unlock(&priv->drain_lock); 412 413 if (ib_req_notify_cq(priv->recv_cq, 414 IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS) > 0) 415 goto poll_more; 416} 417 418void ipoib_ib_completion(struct ib_cq *cq, void *dev_ptr) 419{ 420 struct ipoib_dev_priv *priv = dev_ptr; 421 422 ipoib_poll(priv); 423} 424 425static void drain_tx_cq(struct ipoib_dev_priv *priv) 426{ 427 struct ifnet *dev = priv->dev; 428 429 spin_lock(&priv->lock); 430 while (ipoib_poll_tx(priv, true)) 431 ; /* nothing */ 432 433 if (dev->if_drv_flags & IFF_DRV_OACTIVE) 434 mod_timer(&priv->poll_timer, jiffies + 1); 435 436 spin_unlock(&priv->lock); 437} 438 439void ipoib_send_comp_handler(struct ib_cq *cq, void *dev_ptr) 440{ 441 struct ipoib_dev_priv *priv = dev_ptr; 442 443 mod_timer(&priv->poll_timer, jiffies); 444} 445 446static inline int 447post_send(struct ipoib_dev_priv *priv, unsigned int wr_id, 448 struct ib_ah *address, u32 qpn, struct ipoib_tx_buf *tx_req, void *head, 449 int hlen) 450{ 451 struct ib_send_wr *bad_wr; 452 struct mbuf *mb = tx_req->mb; 453 u64 *mapping = tx_req->mapping; 454 struct mbuf *m; 455 int i; 456 457 for (m = mb, i = 0; m != NULL; m = m->m_next, i++) { 458 priv->tx_sge[i].addr = mapping[i]; 459 priv->tx_sge[i].length = m->m_len; 460 } 461 priv->tx_wr.wr.num_sge = i; 462 priv->tx_wr.wr.wr_id = wr_id; 463 priv->tx_wr.remote_qpn = qpn; 464 priv->tx_wr.ah = address; 465 466 if (head) { 467 priv->tx_wr.mss = 0; /* XXX mb_shinfo(mb)->gso_size; */ 468 priv->tx_wr.header = head; 469 priv->tx_wr.hlen = hlen; 470 priv->tx_wr.wr.opcode = IB_WR_LSO; 471 } else 472 priv->tx_wr.wr.opcode = IB_WR_SEND; 473 474 return ib_post_send(priv->qp, &priv->tx_wr.wr, &bad_wr); 475} 476 477void 478ipoib_send(struct ipoib_dev_priv *priv, struct mbuf *mb, 479 struct ipoib_ah *address, u32 qpn) 480{ 481 struct ifnet *dev = priv->dev; 482 struct ipoib_tx_buf *tx_req; 483 int hlen; 484 void *phead; 485 486 if (unlikely(priv->tx_outstanding > MAX_SEND_CQE)) 487 while (ipoib_poll_tx(priv, false)) 488 ; /* nothing */ 489 490 m_adj(mb, sizeof (struct ipoib_pseudoheader)); 491 if (0 /* XXX segment offload mb_is_gso(mb) */) { 492 /* XXX hlen = mb_transport_offset(mb) + tcp_hdrlen(mb); */ 493 phead = mtod(mb, void *); 494 if (mb->m_len < hlen) { 495 ipoib_warn(priv, "linear data too small\n"); 496 if_inc_counter(dev, IFCOUNTER_OERRORS, 1); 497 m_freem(mb); 498 return; 499 } 500 m_adj(mb, hlen); 501 } else { 502 if (unlikely(mb->m_pkthdr.len - IPOIB_ENCAP_LEN > priv->mcast_mtu)) { 503 ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n", 504 mb->m_pkthdr.len, priv->mcast_mtu); 505 if_inc_counter(dev, IFCOUNTER_OERRORS, 1); 506 ipoib_cm_mb_too_long(priv, mb, priv->mcast_mtu); 507 return; 508 } 509 phead = NULL; 510 hlen = 0; 511 } 512 513 ipoib_dbg_data(priv, "sending packet, length=%d address=%p qpn=0x%06x\n", 514 mb->m_pkthdr.len, address, qpn); 515 516 /* 517 * We put the mb into the tx_ring _before_ we call post_send() 518 * because it's entirely possible that the completion handler will 519 * run before we execute anything after the post_send(). That 520 * means we have to make sure everything is properly recorded and 521 * our state is consistent before we call post_send(). 522 */ 523 tx_req = &priv->tx_ring[priv->tx_head & (ipoib_sendq_size - 1)]; 524 tx_req->mb = mb; 525 if (unlikely(ipoib_dma_map_tx(priv->ca, tx_req, IPOIB_UD_TX_SG))) { 526 if_inc_counter(dev, IFCOUNTER_OERRORS, 1); 527 if (tx_req->mb) 528 m_freem(tx_req->mb); 529 return; 530 } 531 532 if (mb->m_pkthdr.csum_flags & (CSUM_IP|CSUM_TCP|CSUM_UDP)) 533 priv->tx_wr.wr.send_flags |= IB_SEND_IP_CSUM; 534 else 535 priv->tx_wr.wr.send_flags &= ~IB_SEND_IP_CSUM; 536 537 if (++priv->tx_outstanding == ipoib_sendq_size) { 538 ipoib_dbg(priv, "TX ring full, stopping kernel net queue\n"); 539 if (ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP)) 540 ipoib_warn(priv, "request notify on send CQ failed\n"); 541 dev->if_drv_flags |= IFF_DRV_OACTIVE; 542 } 543 544 if (unlikely(post_send(priv, 545 priv->tx_head & (ipoib_sendq_size - 1), address->ah, qpn, 546 tx_req, phead, hlen))) { 547 ipoib_warn(priv, "post_send failed\n"); 548 if_inc_counter(dev, IFCOUNTER_OERRORS, 1); 549 --priv->tx_outstanding; 550 ipoib_dma_unmap_tx(priv->ca, tx_req); 551 m_freem(mb); 552 if (dev->if_drv_flags & IFF_DRV_OACTIVE) 553 dev->if_drv_flags &= ~IFF_DRV_OACTIVE; 554 } else { 555 address->last_send = priv->tx_head; 556 ++priv->tx_head; 557 } 558} 559 560static void __ipoib_reap_ah(struct ipoib_dev_priv *priv) 561{ 562 struct ipoib_ah *ah, *tah; 563 LIST_HEAD(remove_list); 564 unsigned long flags; 565 566 spin_lock_irqsave(&priv->lock, flags); 567 568 list_for_each_entry_safe(ah, tah, &priv->dead_ahs, list) 569 if ((int) priv->tx_tail - (int) ah->last_send >= 0) { 570 list_del(&ah->list); 571 ib_destroy_ah(ah->ah); 572 kfree(ah); 573 } 574 575 spin_unlock_irqrestore(&priv->lock, flags); 576} 577 578void ipoib_reap_ah(struct work_struct *work) 579{ 580 struct ipoib_dev_priv *priv = 581 container_of(work, struct ipoib_dev_priv, ah_reap_task.work); 582 583 __ipoib_reap_ah(priv); 584 585 if (!test_bit(IPOIB_STOP_REAPER, &priv->flags)) 586 queue_delayed_work(ipoib_workqueue, &priv->ah_reap_task, 587 HZ); 588} 589 590static void ipoib_ah_dev_cleanup(struct ipoib_dev_priv *priv) 591{ 592 unsigned long begin; 593 594 begin = jiffies; 595 596 while (!list_empty(&priv->dead_ahs)) { 597 __ipoib_reap_ah(priv); 598 599 if (time_after(jiffies, begin + HZ)) { 600 ipoib_warn(priv, "timing out; will leak address handles\n"); 601 break; 602 } 603 604 msleep(1); 605 } 606} 607 608static void ipoib_ib_tx_timer_func(unsigned long ctx) 609{ 610 drain_tx_cq((struct ipoib_dev_priv *)ctx); 611} 612 613int ipoib_ib_dev_open(struct ipoib_dev_priv *priv) 614{ 615 int ret; 616 617 if (ib_find_pkey(priv->ca, priv->port, priv->pkey, &priv->pkey_index)) { 618 ipoib_warn(priv, "P_Key 0x%04x not found\n", priv->pkey); 619 clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags); 620 return -1; 621 } 622 set_bit(IPOIB_PKEY_ASSIGNED, &priv->flags); 623 624 ret = ipoib_init_qp(priv); 625 if (ret) { 626 ipoib_warn(priv, "ipoib_init_qp returned %d\n", ret); 627 return -1; 628 } 629 630 ret = ipoib_ib_post_receives(priv); 631 if (ret) { 632 ipoib_warn(priv, "ipoib_ib_post_receives returned %d\n", ret); 633 ipoib_ib_dev_stop(priv, 1); 634 return -1; 635 } 636 637 ret = ipoib_cm_dev_open(priv); 638 if (ret) { 639 ipoib_warn(priv, "ipoib_cm_dev_open returned %d\n", ret); 640 ipoib_ib_dev_stop(priv, 1); 641 return -1; 642 } 643 644 clear_bit(IPOIB_STOP_REAPER, &priv->flags); 645 queue_delayed_work(ipoib_workqueue, &priv->ah_reap_task, HZ); 646 647 set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags); 648 649 return 0; 650} 651 652static void ipoib_pkey_dev_check_presence(struct ipoib_dev_priv *priv) 653{ 654 u16 pkey_index = 0; 655 656 if (ib_find_pkey(priv->ca, priv->port, priv->pkey, &pkey_index)) 657 clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags); 658 else 659 set_bit(IPOIB_PKEY_ASSIGNED, &priv->flags); 660} 661 662int ipoib_ib_dev_up(struct ipoib_dev_priv *priv) 663{ 664 665 ipoib_pkey_dev_check_presence(priv); 666 667 if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) { 668 ipoib_dbg(priv, "PKEY is not assigned.\n"); 669 return 0; 670 } 671 672 set_bit(IPOIB_FLAG_OPER_UP, &priv->flags); 673 674 return ipoib_mcast_start_thread(priv); 675} 676 677int ipoib_ib_dev_down(struct ipoib_dev_priv *priv, int flush) 678{ 679 680 ipoib_dbg(priv, "downing ib_dev\n"); 681 682 clear_bit(IPOIB_FLAG_OPER_UP, &priv->flags); 683 if_link_state_change(priv->dev, LINK_STATE_DOWN); 684 685 /* Shutdown the P_Key thread if still active */ 686 if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) { 687 mutex_lock(&pkey_mutex); 688 set_bit(IPOIB_PKEY_STOP, &priv->flags); 689 cancel_delayed_work(&priv->pkey_poll_task); 690 mutex_unlock(&pkey_mutex); 691 if (flush) 692 flush_workqueue(ipoib_workqueue); 693 } 694 695 ipoib_mcast_stop_thread(priv, flush); 696 ipoib_mcast_dev_flush(priv); 697 698 ipoib_flush_paths(priv); 699 700 return 0; 701} 702 703static int recvs_pending(struct ipoib_dev_priv *priv) 704{ 705 int pending = 0; 706 int i; 707 708 for (i = 0; i < ipoib_recvq_size; ++i) 709 if (priv->rx_ring[i].mb) 710 ++pending; 711 712 return pending; 713} 714 715static void check_qp_movement_and_print(struct ipoib_dev_priv *priv, 716 struct ib_qp *qp, 717 enum ib_qp_state new_state) 718{ 719 struct ib_qp_attr qp_attr; 720 struct ib_qp_init_attr query_init_attr; 721 int ret; 722 723 ret = ib_query_qp(qp, &qp_attr, IB_QP_STATE, &query_init_attr); 724 if (ret) { 725 ipoib_warn(priv, "%s: Failed to query QP (%d)\n", __func__, ret); 726 return; 727 } 728 729 /* print according to the new-state and the previous state */ 730 if (new_state == IB_QPS_ERR && qp_attr.qp_state == IB_QPS_RESET) { 731 ipoib_dbg(priv, "Failed to modify QP %d->%d, acceptable\n", 732 qp_attr.qp_state, new_state); 733 } else { 734 ipoib_warn(priv, "Failed to modify QP %d->%d\n", 735 qp_attr.qp_state, new_state); 736 } 737} 738 739void ipoib_drain_cq(struct ipoib_dev_priv *priv) 740{ 741 int i, n; 742 743 spin_lock(&priv->drain_lock); 744 do { 745 n = ib_poll_cq(priv->recv_cq, IPOIB_NUM_WC, priv->ibwc); 746 for (i = 0; i < n; ++i) { 747 /* 748 * Convert any successful completions to flush 749 * errors to avoid passing packets up the 750 * stack after bringing the device down. 751 */ 752 if (priv->ibwc[i].status == IB_WC_SUCCESS) 753 priv->ibwc[i].status = IB_WC_WR_FLUSH_ERR; 754 755 if ((priv->ibwc[i].wr_id & IPOIB_OP_RECV) == 0) 756 panic("ipoib_drain_cq: Bad wrid 0x%jX\n", 757 (intmax_t)priv->ibwc[i].wr_id); 758 if (priv->ibwc[i].wr_id & IPOIB_OP_CM) 759 ipoib_cm_handle_rx_wc(priv, priv->ibwc + i); 760 else 761 ipoib_ib_handle_rx_wc(priv, priv->ibwc + i); 762 } 763 } while (n == IPOIB_NUM_WC); 764 spin_unlock(&priv->drain_lock); 765 766 spin_lock(&priv->lock); 767 while (ipoib_poll_tx(priv, true)) 768 ; /* nothing */ 769 770 spin_unlock(&priv->lock); 771} 772 773int ipoib_ib_dev_stop(struct ipoib_dev_priv *priv, int flush) 774{ 775 struct ib_qp_attr qp_attr; 776 unsigned long begin; 777 struct ipoib_tx_buf *tx_req; 778 int i; 779 780 clear_bit(IPOIB_FLAG_INITIALIZED, &priv->flags); 781 782 ipoib_cm_dev_stop(priv); 783 784 /* 785 * Move our QP to the error state and then reinitialize in 786 * when all work requests have completed or have been flushed. 787 */ 788 qp_attr.qp_state = IB_QPS_ERR; 789 if (ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE)) 790 check_qp_movement_and_print(priv, priv->qp, IB_QPS_ERR); 791 792 /* Wait for all sends and receives to complete */ 793 begin = jiffies; 794 795 while (priv->tx_head != priv->tx_tail || recvs_pending(priv)) { 796 if (time_after(jiffies, begin + 5 * HZ)) { 797 ipoib_warn(priv, "timing out; %d sends %d receives not completed\n", 798 priv->tx_head - priv->tx_tail, recvs_pending(priv)); 799 800 /* 801 * assume the HW is wedged and just free up 802 * all our pending work requests. 803 */ 804 while ((int) priv->tx_tail - (int) priv->tx_head < 0) { 805 tx_req = &priv->tx_ring[priv->tx_tail & 806 (ipoib_sendq_size - 1)]; 807 ipoib_dma_unmap_tx(priv->ca, tx_req); 808 m_freem(tx_req->mb); 809 ++priv->tx_tail; 810 --priv->tx_outstanding; 811 } 812 813 for (i = 0; i < ipoib_recvq_size; ++i) { 814 struct ipoib_rx_buf *rx_req; 815 816 rx_req = &priv->rx_ring[i]; 817 if (!rx_req->mb) 818 continue; 819 ipoib_dma_unmap_rx(priv, &priv->rx_ring[i]); 820 m_freem(rx_req->mb); 821 rx_req->mb = NULL; 822 } 823 824 goto timeout; 825 } 826 827 ipoib_drain_cq(priv); 828 829 msleep(1); 830 } 831 832 ipoib_dbg(priv, "All sends and receives done.\n"); 833 834timeout: 835 del_timer_sync(&priv->poll_timer); 836 qp_attr.qp_state = IB_QPS_RESET; 837 if (ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE)) 838 ipoib_warn(priv, "Failed to modify QP to RESET state\n"); 839 840 /* Wait for all AHs to be reaped */ 841 set_bit(IPOIB_STOP_REAPER, &priv->flags); 842 cancel_delayed_work(&priv->ah_reap_task); 843 if (flush) 844 flush_workqueue(ipoib_workqueue); 845 846 ipoib_ah_dev_cleanup(priv); 847 848 ib_req_notify_cq(priv->recv_cq, IB_CQ_NEXT_COMP); 849 850 return 0; 851} 852 853int ipoib_ib_dev_init(struct ipoib_dev_priv *priv, struct ib_device *ca, int port) 854{ 855 struct ifnet *dev = priv->dev; 856 857 priv->ca = ca; 858 priv->port = port; 859 priv->qp = NULL; 860 861 if (ipoib_transport_dev_init(priv, ca)) { 862 printk(KERN_WARNING "%s: ipoib_transport_dev_init failed\n", ca->name); 863 return -ENODEV; 864 } 865 866 setup_timer(&priv->poll_timer, ipoib_ib_tx_timer_func, 867 (unsigned long) priv); 868 869 if (dev->if_flags & IFF_UP) { 870 if (ipoib_ib_dev_open(priv)) { 871 ipoib_transport_dev_cleanup(priv); 872 return -ENODEV; 873 } 874 } 875 876 return 0; 877} 878 879static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv, 880 enum ipoib_flush_level level) 881{ 882 struct ipoib_dev_priv *cpriv; 883 u16 new_index; 884 885 mutex_lock(&priv->vlan_mutex); 886 887 /* 888 * Flush any child interfaces too -- they might be up even if 889 * the parent is down. 890 */ 891 list_for_each_entry(cpriv, &priv->child_intfs, list) 892 __ipoib_ib_dev_flush(cpriv, level); 893 894 mutex_unlock(&priv->vlan_mutex); 895 896 if (!test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags)) { 897 ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_INITIALIZED not set.\n"); 898 return; 899 } 900 901 if (!test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) { 902 ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_ADMIN_UP not set.\n"); 903 return; 904 } 905 906 if (level == IPOIB_FLUSH_HEAVY) { 907 if (ib_find_pkey(priv->ca, priv->port, priv->pkey, &new_index)) { 908 clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags); 909 ipoib_ib_dev_down(priv, 0); 910 ipoib_ib_dev_stop(priv, 0); 911 if (ipoib_pkey_dev_delay_open(priv)) 912 return; 913 } 914 915 /* restart QP only if P_Key index is changed */ 916 if (test_and_set_bit(IPOIB_PKEY_ASSIGNED, &priv->flags) && 917 new_index == priv->pkey_index) { 918 ipoib_dbg(priv, "Not flushing - P_Key index not changed.\n"); 919 return; 920 } 921 priv->pkey_index = new_index; 922 } 923 924 if (level == IPOIB_FLUSH_LIGHT) { 925 ipoib_mark_paths_invalid(priv); 926 ipoib_mcast_dev_flush(priv); 927 } 928 929 if (level >= IPOIB_FLUSH_NORMAL) 930 ipoib_ib_dev_down(priv, 0); 931 932 if (level == IPOIB_FLUSH_HEAVY) { 933 ipoib_ib_dev_stop(priv, 0); 934 ipoib_ib_dev_open(priv); 935 } 936 937 /* 938 * The device could have been brought down between the start and when 939 * we get here, don't bring it back up if it's not configured up 940 */ 941 if (test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) { 942 if (level >= IPOIB_FLUSH_NORMAL) 943 ipoib_ib_dev_up(priv); 944 ipoib_mcast_restart_task(&priv->restart_task); 945 } 946} 947 948void ipoib_ib_dev_flush_light(struct work_struct *work) 949{ 950 struct ipoib_dev_priv *priv = 951 container_of(work, struct ipoib_dev_priv, flush_light); 952 953 __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_LIGHT); 954} 955 956void ipoib_ib_dev_flush_normal(struct work_struct *work) 957{ 958 struct ipoib_dev_priv *priv = 959 container_of(work, struct ipoib_dev_priv, flush_normal); 960 961 __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_NORMAL); 962} 963 964void ipoib_ib_dev_flush_heavy(struct work_struct *work) 965{ 966 struct ipoib_dev_priv *priv = 967 container_of(work, struct ipoib_dev_priv, flush_heavy); 968 969 __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_HEAVY); 970} 971 972void ipoib_ib_dev_cleanup(struct ipoib_dev_priv *priv) 973{ 974 975 ipoib_dbg(priv, "cleaning up ib_dev\n"); 976 977 ipoib_mcast_stop_thread(priv, 1); 978 ipoib_mcast_dev_flush(priv); 979 980 ipoib_ah_dev_cleanup(priv); 981 ipoib_transport_dev_cleanup(priv); 982} 983 984/* 985 * Delayed P_Key Assigment Interim Support 986 * 987 * The following is initial implementation of delayed P_Key assigment 988 * mechanism. It is using the same approach implemented for the multicast 989 * group join. The single goal of this implementation is to quickly address 990 * Bug #2507. This implementation will probably be removed when the P_Key 991 * change async notification is available. 992 */ 993 994void ipoib_pkey_poll(struct work_struct *work) 995{ 996 struct ipoib_dev_priv *priv = 997 container_of(work, struct ipoib_dev_priv, pkey_poll_task.work); 998 999 ipoib_pkey_dev_check_presence(priv); 1000 1001 if (test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) 1002 ipoib_open(priv); 1003 else { 1004 mutex_lock(&pkey_mutex); 1005 if (!test_bit(IPOIB_PKEY_STOP, &priv->flags)) 1006 queue_delayed_work(ipoib_workqueue, 1007 &priv->pkey_poll_task, 1008 HZ); 1009 mutex_unlock(&pkey_mutex); 1010 } 1011} 1012 1013int ipoib_pkey_dev_delay_open(struct ipoib_dev_priv *priv) 1014{ 1015 1016 /* Look for the interface pkey value in the IB Port P_Key table and */ 1017 /* set the interface pkey assigment flag */ 1018 ipoib_pkey_dev_check_presence(priv); 1019 1020 /* P_Key value not assigned yet - start polling */ 1021 if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) { 1022 mutex_lock(&pkey_mutex); 1023 clear_bit(IPOIB_PKEY_STOP, &priv->flags); 1024 queue_delayed_work(ipoib_workqueue, 1025 &priv->pkey_poll_task, 1026 HZ); 1027 mutex_unlock(&pkey_mutex); 1028 return 1; 1029 } 1030 1031 return 0; 1032} 1033