ipoib_ib.c revision 353183
1/* 2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. 3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 4 * Copyright (c) 2005 Mellanox Technologies. All rights reserved. 5 * Copyright (c) 2004, 2005 Voltaire, Inc. All rights reserved. 6 * 7 * This software is available to you under a choice of one of two 8 * licenses. You may choose to be licensed under the terms of the GNU 9 * General Public License (GPL) Version 2, available from the file 10 * COPYING in the main directory of this source tree, or the 11 * OpenIB.org BSD license below: 12 * 13 * Redistribution and use in source and binary forms, with or 14 * without modification, are permitted provided that the following 15 * conditions are met: 16 * 17 * - Redistributions of source code must retain the above 18 * copyright notice, this list of conditions and the following 19 * disclaimer. 20 * 21 * - Redistributions in binary form must reproduce the above 22 * copyright notice, this list of conditions and the following 23 * disclaimer in the documentation and/or other materials 24 * provided with the distribution. 25 * 26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 33 * SOFTWARE. 34 */ 35 36#include <sys/cdefs.h> 37__FBSDID("$FreeBSD$"); 38 39#include "ipoib.h" 40 41#include <rdma/ib_cache.h> 42 43#include <security/mac/mac_framework.h> 44 45#include <linux/delay.h> 46#include <linux/dma-mapping.h> 47 48#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG_DATA 49static int data_debug_level; 50 51module_param(data_debug_level, int, 0644); 52MODULE_PARM_DESC(data_debug_level, 53 "Enable data path debug tracing if > 0"); 54#endif 55 56static DEFINE_MUTEX(pkey_mutex); 57 58struct ipoib_ah *ipoib_create_ah(struct ipoib_dev_priv *priv, 59 struct ib_pd *pd, struct ib_ah_attr *attr) 60{ 61 struct ipoib_ah *ah; 62 63 ah = kmalloc(sizeof *ah, GFP_KERNEL); 64 if (!ah) 65 return NULL; 66 67 ah->priv = priv; 68 ah->last_send = 0; 69 kref_init(&ah->ref); 70 71 ah->ah = ib_create_ah(pd, attr); 72 if (IS_ERR(ah->ah)) { 73 kfree(ah); 74 ah = NULL; 75 } else 76 ipoib_dbg(priv, "Created ah %p\n", ah->ah); 77 78 return ah; 79} 80 81void ipoib_free_ah(struct kref *kref) 82{ 83 struct ipoib_ah *ah = container_of(kref, struct ipoib_ah, ref); 84 struct ipoib_dev_priv *priv = ah->priv; 85 86 unsigned long flags; 87 88 spin_lock_irqsave(&priv->lock, flags); 89 list_add_tail(&ah->list, &priv->dead_ahs); 90 spin_unlock_irqrestore(&priv->lock, flags); 91} 92 93void 94ipoib_dma_unmap_rx(struct ipoib_dev_priv *priv, struct ipoib_rx_buf *rx_req) 95{ 96 struct mbuf *m; 97 int i; 98 99 for (i = 0, m = rx_req->mb; m != NULL; m = m->m_next, i++) 100 ib_dma_unmap_single(priv->ca, rx_req->mapping[i], m->m_len, 101 DMA_FROM_DEVICE); 102} 103 104void 105ipoib_dma_mb(struct ipoib_dev_priv *priv, struct mbuf *mb, unsigned int length) 106{ 107 108 m_adj(mb, -(mb->m_pkthdr.len - length)); 109} 110 111struct mbuf * 112ipoib_alloc_map_mb(struct ipoib_dev_priv *priv, struct ipoib_rx_buf *rx_req, 113 int size) 114{ 115 struct mbuf *mb, *m; 116 int i, j; 117 118 rx_req->mb = NULL; 119 mb = m_getm2(NULL, size, M_NOWAIT, MT_DATA, M_PKTHDR); 120 if (mb == NULL) 121 return (NULL); 122 for (i = 0, m = mb; m != NULL; m = m->m_next, i++) { 123 m->m_len = M_SIZE(m); 124 mb->m_pkthdr.len += m->m_len; 125 rx_req->mapping[i] = ib_dma_map_single(priv->ca, 126 mtod(m, void *), m->m_len, DMA_FROM_DEVICE); 127 if (unlikely(ib_dma_mapping_error(priv->ca, 128 rx_req->mapping[i]))) 129 goto error; 130 131 } 132 rx_req->mb = mb; 133 return (mb); 134error: 135 for (j = 0, m = mb; j < i; m = m->m_next, j++) 136 ib_dma_unmap_single(priv->ca, rx_req->mapping[j], m->m_len, 137 DMA_FROM_DEVICE); 138 m_freem(mb); 139 return (NULL); 140 141} 142 143static int ipoib_ib_post_receive(struct ipoib_dev_priv *priv, int id) 144{ 145 struct ipoib_rx_buf *rx_req; 146 struct ib_recv_wr *bad_wr; 147 struct mbuf *m; 148 int ret; 149 int i; 150 151 rx_req = &priv->rx_ring[id]; 152 for (m = rx_req->mb, i = 0; m != NULL; m = m->m_next, i++) { 153 priv->rx_sge[i].addr = rx_req->mapping[i]; 154 priv->rx_sge[i].length = m->m_len; 155 } 156 priv->rx_wr.num_sge = i; 157 priv->rx_wr.wr_id = id | IPOIB_OP_RECV; 158 159 ret = ib_post_recv(priv->qp, &priv->rx_wr, &bad_wr); 160 if (unlikely(ret)) { 161 ipoib_warn(priv, "receive failed for buf %d (%d)\n", id, ret); 162 ipoib_dma_unmap_rx(priv, &priv->rx_ring[id]); 163 m_freem(priv->rx_ring[id].mb); 164 priv->rx_ring[id].mb = NULL; 165 } 166 167 return ret; 168} 169 170static struct mbuf * 171ipoib_alloc_rx_mb(struct ipoib_dev_priv *priv, int id) 172{ 173 174 return ipoib_alloc_map_mb(priv, &priv->rx_ring[id], 175 priv->max_ib_mtu + IB_GRH_BYTES); 176} 177 178static int ipoib_ib_post_receives(struct ipoib_dev_priv *priv) 179{ 180 int i; 181 182 for (i = 0; i < ipoib_recvq_size; ++i) { 183 if (!ipoib_alloc_rx_mb(priv, i)) { 184 ipoib_warn(priv, "failed to allocate receive buffer %d\n", i); 185 return -ENOMEM; 186 } 187 if (ipoib_ib_post_receive(priv, i)) { 188 ipoib_warn(priv, "ipoib_ib_post_receive failed for buf %d\n", i); 189 return -EIO; 190 } 191 } 192 193 return 0; 194} 195 196static void 197ipoib_ib_handle_rx_wc(struct ipoib_dev_priv *priv, struct ib_wc *wc) 198{ 199 struct ipoib_rx_buf saverx; 200 unsigned int wr_id = wc->wr_id & ~IPOIB_OP_RECV; 201 struct ifnet *dev = priv->dev; 202 struct ipoib_header *eh; 203 struct mbuf *mb; 204 205 ipoib_dbg_data(priv, "recv completion: id %d, status: %d\n", 206 wr_id, wc->status); 207 208 if (unlikely(wr_id >= ipoib_recvq_size)) { 209 ipoib_warn(priv, "recv completion event with wrid %d (> %d)\n", 210 wr_id, ipoib_recvq_size); 211 return; 212 } 213 214 mb = priv->rx_ring[wr_id].mb; 215 216 if (unlikely(wc->status != IB_WC_SUCCESS)) { 217 if (wc->status != IB_WC_WR_FLUSH_ERR) { 218 ipoib_warn(priv, "failed recv event " 219 "(status=%d, wrid=%d vend_err %x)\n", 220 wc->status, wr_id, wc->vendor_err); 221 goto repost; 222 } 223 if (mb) { 224 ipoib_dma_unmap_rx(priv, &priv->rx_ring[wr_id]); 225 m_freem(mb); 226 priv->rx_ring[wr_id].mb = NULL; 227 } 228 return; 229 } 230 231 /* 232 * Drop packets that this interface sent, ie multicast packets 233 * that the HCA has replicated. 234 */ 235 if (wc->slid == priv->local_lid && wc->src_qp == priv->qp->qp_num) 236 goto repost; 237 238 memcpy(&saverx, &priv->rx_ring[wr_id], sizeof(saverx)); 239 /* 240 * If we can't allocate a new RX buffer, dump 241 * this packet and reuse the old buffer. 242 */ 243 if (unlikely(!ipoib_alloc_rx_mb(priv, wr_id))) { 244 memcpy(&priv->rx_ring[wr_id], &saverx, sizeof(saverx)); 245 if_inc_counter(dev, IFCOUNTER_IQDROPS, 1); 246 goto repost; 247 } 248 249 ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n", 250 wc->byte_len, wc->slid); 251 252 ipoib_dma_unmap_rx(priv, &saverx); 253 ipoib_dma_mb(priv, mb, wc->byte_len); 254 255 if_inc_counter(dev, IFCOUNTER_IPACKETS, 1); 256 if_inc_counter(dev, IFCOUNTER_IBYTES, mb->m_pkthdr.len); 257 mb->m_pkthdr.rcvif = dev; 258 m_adj(mb, sizeof(struct ib_grh) - INFINIBAND_ALEN); 259 eh = mtod(mb, struct ipoib_header *); 260 bzero(eh->hwaddr, 4); /* Zero the queue pair, only dgid is in grh */ 261 262 if (test_bit(IPOIB_FLAG_CSUM, &priv->flags) && likely(wc->wc_flags & IB_WC_IP_CSUM_OK)) 263 mb->m_pkthdr.csum_flags = CSUM_IP_CHECKED | CSUM_IP_VALID; 264 265 dev->if_input(dev, mb); 266 267repost: 268 if (unlikely(ipoib_ib_post_receive(priv, wr_id))) 269 ipoib_warn(priv, "ipoib_ib_post_receive failed " 270 "for buf %d\n", wr_id); 271} 272 273int ipoib_dma_map_tx(struct ib_device *ca, struct ipoib_tx_buf *tx_req, int max) 274{ 275 struct mbuf *mb = tx_req->mb; 276 u64 *mapping = tx_req->mapping; 277 struct mbuf *m, *p; 278 int error; 279 int i; 280 281 for (m = mb, p = NULL, i = 0; m != NULL; p = m, m = m->m_next, i++) { 282 if (m->m_len != 0) 283 continue; 284 if (p == NULL) 285 panic("ipoib_dma_map_tx: First mbuf empty\n"); 286 p->m_next = m_free(m); 287 m = p; 288 i--; 289 } 290 i--; 291 if (i >= max) { 292 tx_req->mb = mb = m_defrag(mb, M_NOWAIT); 293 if (mb == NULL) 294 return -EIO; 295 for (m = mb, i = 0; m != NULL; m = m->m_next, i++); 296 if (i >= max) 297 return -EIO; 298 } 299 error = 0; 300 for (m = mb, i = 0; m != NULL; m = m->m_next, i++) { 301 mapping[i] = ib_dma_map_single(ca, mtod(m, void *), 302 m->m_len, DMA_TO_DEVICE); 303 if (unlikely(ib_dma_mapping_error(ca, mapping[i]))) { 304 error = -EIO; 305 break; 306 } 307 } 308 if (error) { 309 int end; 310 311 end = i; 312 for (m = mb, i = 0; i < end; m = m->m_next, i++) 313 ib_dma_unmap_single(ca, mapping[i], m->m_len, 314 DMA_TO_DEVICE); 315 } 316 return error; 317} 318 319void ipoib_dma_unmap_tx(struct ib_device *ca, struct ipoib_tx_buf *tx_req) 320{ 321 struct mbuf *mb = tx_req->mb; 322 u64 *mapping = tx_req->mapping; 323 struct mbuf *m; 324 int i; 325 326 for (m = mb, i = 0; m != NULL; m = m->m_next, i++) 327 ib_dma_unmap_single(ca, mapping[i], m->m_len, DMA_TO_DEVICE); 328} 329 330static void ipoib_ib_handle_tx_wc(struct ipoib_dev_priv *priv, struct ib_wc *wc) 331{ 332 struct ifnet *dev = priv->dev; 333 unsigned int wr_id = wc->wr_id; 334 struct ipoib_tx_buf *tx_req; 335 336 ipoib_dbg_data(priv, "send completion: id %d, status: %d\n", 337 wr_id, wc->status); 338 339 if (unlikely(wr_id >= ipoib_sendq_size)) { 340 ipoib_warn(priv, "send completion event with wrid %d (> %d)\n", 341 wr_id, ipoib_sendq_size); 342 return; 343 } 344 345 tx_req = &priv->tx_ring[wr_id]; 346 347 ipoib_dma_unmap_tx(priv->ca, tx_req); 348 349 if_inc_counter(dev, IFCOUNTER_OPACKETS, 1); 350 351 m_freem(tx_req->mb); 352 353 ++priv->tx_tail; 354 if (unlikely(--priv->tx_outstanding == ipoib_sendq_size >> 1) && 355 (dev->if_drv_flags & IFF_DRV_OACTIVE) && 356 test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) 357 dev->if_drv_flags &= ~IFF_DRV_OACTIVE; 358 359 if (wc->status != IB_WC_SUCCESS && 360 wc->status != IB_WC_WR_FLUSH_ERR) 361 ipoib_warn(priv, "failed send event " 362 "(status=%d, wrid=%d vend_err %x)\n", 363 wc->status, wr_id, wc->vendor_err); 364} 365 366int 367ipoib_poll_tx(struct ipoib_dev_priv *priv, bool do_start) 368{ 369 int n, i; 370 371 n = ib_poll_cq(priv->send_cq, MAX_SEND_CQE, priv->send_wc); 372 for (i = 0; i < n; ++i) { 373 struct ib_wc *wc = priv->send_wc + i; 374 if (wc->wr_id & IPOIB_OP_CM) 375 ipoib_cm_handle_tx_wc(priv, wc); 376 else 377 ipoib_ib_handle_tx_wc(priv, wc); 378 } 379 380 if (do_start && n != 0) 381 ipoib_start_locked(priv->dev, priv); 382 383 return n == MAX_SEND_CQE; 384} 385 386static void 387ipoib_poll(struct ipoib_dev_priv *priv) 388{ 389 int n, i; 390 391poll_more: 392 spin_lock(&priv->drain_lock); 393 for (;;) { 394 n = ib_poll_cq(priv->recv_cq, IPOIB_NUM_WC, priv->ibwc); 395 for (i = 0; i < n; i++) { 396 struct ib_wc *wc = priv->ibwc + i; 397 398 if ((wc->wr_id & IPOIB_OP_RECV) == 0) 399 panic("ipoib_poll: Bad wr_id 0x%jX\n", 400 (intmax_t)wc->wr_id); 401 if (wc->wr_id & IPOIB_OP_CM) 402 ipoib_cm_handle_rx_wc(priv, wc); 403 else 404 ipoib_ib_handle_rx_wc(priv, wc); 405 } 406 407 if (n != IPOIB_NUM_WC) 408 break; 409 } 410 spin_unlock(&priv->drain_lock); 411 412 if (ib_req_notify_cq(priv->recv_cq, 413 IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS) > 0) 414 goto poll_more; 415} 416 417void ipoib_ib_completion(struct ib_cq *cq, void *dev_ptr) 418{ 419 struct ipoib_dev_priv *priv = dev_ptr; 420 421 ipoib_poll(priv); 422} 423 424static void drain_tx_cq(struct ipoib_dev_priv *priv) 425{ 426 struct ifnet *dev = priv->dev; 427 428 spin_lock(&priv->lock); 429 while (ipoib_poll_tx(priv, true)) 430 ; /* nothing */ 431 432 if (dev->if_drv_flags & IFF_DRV_OACTIVE) 433 mod_timer(&priv->poll_timer, jiffies + 1); 434 435 spin_unlock(&priv->lock); 436} 437 438void ipoib_send_comp_handler(struct ib_cq *cq, void *dev_ptr) 439{ 440 struct ipoib_dev_priv *priv = dev_ptr; 441 442 mod_timer(&priv->poll_timer, jiffies); 443} 444 445static inline int 446post_send(struct ipoib_dev_priv *priv, unsigned int wr_id, 447 struct ib_ah *address, u32 qpn, struct ipoib_tx_buf *tx_req, void *head, 448 int hlen) 449{ 450 struct ib_send_wr *bad_wr; 451 struct mbuf *mb = tx_req->mb; 452 u64 *mapping = tx_req->mapping; 453 struct mbuf *m; 454 int i; 455 456 for (m = mb, i = 0; m != NULL; m = m->m_next, i++) { 457 priv->tx_sge[i].addr = mapping[i]; 458 priv->tx_sge[i].length = m->m_len; 459 } 460 priv->tx_wr.wr.num_sge = i; 461 priv->tx_wr.wr.wr_id = wr_id; 462 priv->tx_wr.remote_qpn = qpn; 463 priv->tx_wr.ah = address; 464 465 if (head) { 466 priv->tx_wr.mss = 0; /* XXX mb_shinfo(mb)->gso_size; */ 467 priv->tx_wr.header = head; 468 priv->tx_wr.hlen = hlen; 469 priv->tx_wr.wr.opcode = IB_WR_LSO; 470 } else 471 priv->tx_wr.wr.opcode = IB_WR_SEND; 472 473 return ib_post_send(priv->qp, &priv->tx_wr.wr, &bad_wr); 474} 475 476void 477ipoib_send(struct ipoib_dev_priv *priv, struct mbuf *mb, 478 struct ipoib_ah *address, u32 qpn) 479{ 480 struct ifnet *dev = priv->dev; 481 struct ipoib_tx_buf *tx_req; 482 int hlen; 483 void *phead; 484 485 if (unlikely(priv->tx_outstanding > MAX_SEND_CQE)) 486 while (ipoib_poll_tx(priv, false)) 487 ; /* nothing */ 488 489 m_adj(mb, sizeof (struct ipoib_pseudoheader)); 490 if (0 /* XXX segment offload mb_is_gso(mb) */) { 491 /* XXX hlen = mb_transport_offset(mb) + tcp_hdrlen(mb); */ 492 phead = mtod(mb, void *); 493 if (mb->m_len < hlen) { 494 ipoib_warn(priv, "linear data too small\n"); 495 if_inc_counter(dev, IFCOUNTER_OERRORS, 1); 496 m_freem(mb); 497 return; 498 } 499 m_adj(mb, hlen); 500 } else { 501 if (unlikely(mb->m_pkthdr.len - IPOIB_ENCAP_LEN > priv->mcast_mtu)) { 502 ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n", 503 mb->m_pkthdr.len, priv->mcast_mtu); 504 if_inc_counter(dev, IFCOUNTER_OERRORS, 1); 505 ipoib_cm_mb_too_long(priv, mb, priv->mcast_mtu); 506 return; 507 } 508 phead = NULL; 509 hlen = 0; 510 } 511 512 ipoib_dbg_data(priv, "sending packet, length=%d address=%p qpn=0x%06x\n", 513 mb->m_pkthdr.len, address, qpn); 514 515 /* 516 * We put the mb into the tx_ring _before_ we call post_send() 517 * because it's entirely possible that the completion handler will 518 * run before we execute anything after the post_send(). That 519 * means we have to make sure everything is properly recorded and 520 * our state is consistent before we call post_send(). 521 */ 522 tx_req = &priv->tx_ring[priv->tx_head & (ipoib_sendq_size - 1)]; 523 tx_req->mb = mb; 524 if (unlikely(ipoib_dma_map_tx(priv->ca, tx_req, IPOIB_UD_TX_SG))) { 525 if_inc_counter(dev, IFCOUNTER_OERRORS, 1); 526 if (tx_req->mb) 527 m_freem(tx_req->mb); 528 return; 529 } 530 531 if (mb->m_pkthdr.csum_flags & (CSUM_IP|CSUM_TCP|CSUM_UDP)) 532 priv->tx_wr.wr.send_flags |= IB_SEND_IP_CSUM; 533 else 534 priv->tx_wr.wr.send_flags &= ~IB_SEND_IP_CSUM; 535 536 if (++priv->tx_outstanding == ipoib_sendq_size) { 537 ipoib_dbg(priv, "TX ring full, stopping kernel net queue\n"); 538 if (ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP)) 539 ipoib_warn(priv, "request notify on send CQ failed\n"); 540 dev->if_drv_flags |= IFF_DRV_OACTIVE; 541 } 542 543 if (unlikely(post_send(priv, 544 priv->tx_head & (ipoib_sendq_size - 1), address->ah, qpn, 545 tx_req, phead, hlen))) { 546 ipoib_warn(priv, "post_send failed\n"); 547 if_inc_counter(dev, IFCOUNTER_OERRORS, 1); 548 --priv->tx_outstanding; 549 ipoib_dma_unmap_tx(priv->ca, tx_req); 550 m_freem(mb); 551 if (dev->if_drv_flags & IFF_DRV_OACTIVE) 552 dev->if_drv_flags &= ~IFF_DRV_OACTIVE; 553 } else { 554 address->last_send = priv->tx_head; 555 ++priv->tx_head; 556 } 557} 558 559static void __ipoib_reap_ah(struct ipoib_dev_priv *priv) 560{ 561 struct ipoib_ah *ah, *tah; 562 LIST_HEAD(remove_list); 563 unsigned long flags; 564 565 spin_lock_irqsave(&priv->lock, flags); 566 567 list_for_each_entry_safe(ah, tah, &priv->dead_ahs, list) 568 if ((int) priv->tx_tail - (int) ah->last_send >= 0) { 569 list_del(&ah->list); 570 ib_destroy_ah(ah->ah); 571 kfree(ah); 572 } 573 574 spin_unlock_irqrestore(&priv->lock, flags); 575} 576 577void ipoib_reap_ah(struct work_struct *work) 578{ 579 struct ipoib_dev_priv *priv = 580 container_of(work, struct ipoib_dev_priv, ah_reap_task.work); 581 582 __ipoib_reap_ah(priv); 583 584 if (!test_bit(IPOIB_STOP_REAPER, &priv->flags)) 585 queue_delayed_work(ipoib_workqueue, &priv->ah_reap_task, 586 HZ); 587} 588 589static void ipoib_ah_dev_cleanup(struct ipoib_dev_priv *priv) 590{ 591 unsigned long begin; 592 593 begin = jiffies; 594 595 while (!list_empty(&priv->dead_ahs)) { 596 __ipoib_reap_ah(priv); 597 598 if (time_after(jiffies, begin + HZ)) { 599 ipoib_warn(priv, "timing out; will leak address handles\n"); 600 break; 601 } 602 603 msleep(1); 604 } 605} 606 607static void ipoib_ib_tx_timer_func(unsigned long ctx) 608{ 609 drain_tx_cq((struct ipoib_dev_priv *)ctx); 610} 611 612int ipoib_ib_dev_open(struct ipoib_dev_priv *priv) 613{ 614 int ret; 615 616 if (ib_find_pkey(priv->ca, priv->port, priv->pkey, &priv->pkey_index)) { 617 ipoib_warn(priv, "P_Key 0x%04x not found\n", priv->pkey); 618 clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags); 619 return -1; 620 } 621 set_bit(IPOIB_PKEY_ASSIGNED, &priv->flags); 622 623 ret = ipoib_init_qp(priv); 624 if (ret) { 625 ipoib_warn(priv, "ipoib_init_qp returned %d\n", ret); 626 return -1; 627 } 628 629 ret = ipoib_ib_post_receives(priv); 630 if (ret) { 631 ipoib_warn(priv, "ipoib_ib_post_receives returned %d\n", ret); 632 ipoib_ib_dev_stop(priv, 1); 633 return -1; 634 } 635 636 ret = ipoib_cm_dev_open(priv); 637 if (ret) { 638 ipoib_warn(priv, "ipoib_cm_dev_open returned %d\n", ret); 639 ipoib_ib_dev_stop(priv, 1); 640 return -1; 641 } 642 643 clear_bit(IPOIB_STOP_REAPER, &priv->flags); 644 queue_delayed_work(ipoib_workqueue, &priv->ah_reap_task, HZ); 645 646 set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags); 647 648 return 0; 649} 650 651static void ipoib_pkey_dev_check_presence(struct ipoib_dev_priv *priv) 652{ 653 u16 pkey_index = 0; 654 655 if (ib_find_pkey(priv->ca, priv->port, priv->pkey, &pkey_index)) 656 clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags); 657 else 658 set_bit(IPOIB_PKEY_ASSIGNED, &priv->flags); 659} 660 661int ipoib_ib_dev_up(struct ipoib_dev_priv *priv) 662{ 663 664 ipoib_pkey_dev_check_presence(priv); 665 666 if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) { 667 ipoib_dbg(priv, "PKEY is not assigned.\n"); 668 return 0; 669 } 670 671 set_bit(IPOIB_FLAG_OPER_UP, &priv->flags); 672 673 return ipoib_mcast_start_thread(priv); 674} 675 676int ipoib_ib_dev_down(struct ipoib_dev_priv *priv, int flush) 677{ 678 679 ipoib_dbg(priv, "downing ib_dev\n"); 680 681 clear_bit(IPOIB_FLAG_OPER_UP, &priv->flags); 682 if_link_state_change(priv->dev, LINK_STATE_DOWN); 683 684 /* Shutdown the P_Key thread if still active */ 685 if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) { 686 mutex_lock(&pkey_mutex); 687 set_bit(IPOIB_PKEY_STOP, &priv->flags); 688 cancel_delayed_work(&priv->pkey_poll_task); 689 mutex_unlock(&pkey_mutex); 690 if (flush) 691 flush_workqueue(ipoib_workqueue); 692 } 693 694 ipoib_mcast_stop_thread(priv, flush); 695 ipoib_mcast_dev_flush(priv); 696 697 ipoib_flush_paths(priv); 698 699 return 0; 700} 701 702static int recvs_pending(struct ipoib_dev_priv *priv) 703{ 704 int pending = 0; 705 int i; 706 707 for (i = 0; i < ipoib_recvq_size; ++i) 708 if (priv->rx_ring[i].mb) 709 ++pending; 710 711 return pending; 712} 713 714static void check_qp_movement_and_print(struct ipoib_dev_priv *priv, 715 struct ib_qp *qp, 716 enum ib_qp_state new_state) 717{ 718 struct ib_qp_attr qp_attr; 719 struct ib_qp_init_attr query_init_attr; 720 int ret; 721 722 ret = ib_query_qp(qp, &qp_attr, IB_QP_STATE, &query_init_attr); 723 if (ret) { 724 ipoib_warn(priv, "%s: Failed to query QP (%d)\n", __func__, ret); 725 return; 726 } 727 728 /* print according to the new-state and the previous state */ 729 if (new_state == IB_QPS_ERR && qp_attr.qp_state == IB_QPS_RESET) { 730 ipoib_dbg(priv, "Failed to modify QP %d->%d, acceptable\n", 731 qp_attr.qp_state, new_state); 732 } else { 733 ipoib_warn(priv, "Failed to modify QP %d->%d\n", 734 qp_attr.qp_state, new_state); 735 } 736} 737 738void ipoib_drain_cq(struct ipoib_dev_priv *priv) 739{ 740 int i, n; 741 742 spin_lock(&priv->drain_lock); 743 do { 744 n = ib_poll_cq(priv->recv_cq, IPOIB_NUM_WC, priv->ibwc); 745 for (i = 0; i < n; ++i) { 746 /* 747 * Convert any successful completions to flush 748 * errors to avoid passing packets up the 749 * stack after bringing the device down. 750 */ 751 if (priv->ibwc[i].status == IB_WC_SUCCESS) 752 priv->ibwc[i].status = IB_WC_WR_FLUSH_ERR; 753 754 if ((priv->ibwc[i].wr_id & IPOIB_OP_RECV) == 0) 755 panic("ipoib_drain_cq: Bad wrid 0x%jX\n", 756 (intmax_t)priv->ibwc[i].wr_id); 757 if (priv->ibwc[i].wr_id & IPOIB_OP_CM) 758 ipoib_cm_handle_rx_wc(priv, priv->ibwc + i); 759 else 760 ipoib_ib_handle_rx_wc(priv, priv->ibwc + i); 761 } 762 } while (n == IPOIB_NUM_WC); 763 spin_unlock(&priv->drain_lock); 764 765 spin_lock(&priv->lock); 766 while (ipoib_poll_tx(priv, true)) 767 ; /* nothing */ 768 769 spin_unlock(&priv->lock); 770} 771 772int ipoib_ib_dev_stop(struct ipoib_dev_priv *priv, int flush) 773{ 774 struct ib_qp_attr qp_attr; 775 unsigned long begin; 776 struct ipoib_tx_buf *tx_req; 777 int i; 778 779 clear_bit(IPOIB_FLAG_INITIALIZED, &priv->flags); 780 781 ipoib_cm_dev_stop(priv); 782 783 /* 784 * Move our QP to the error state and then reinitialize in 785 * when all work requests have completed or have been flushed. 786 */ 787 qp_attr.qp_state = IB_QPS_ERR; 788 if (ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE)) 789 check_qp_movement_and_print(priv, priv->qp, IB_QPS_ERR); 790 791 /* Wait for all sends and receives to complete */ 792 begin = jiffies; 793 794 while (priv->tx_head != priv->tx_tail || recvs_pending(priv)) { 795 if (time_after(jiffies, begin + 5 * HZ)) { 796 ipoib_warn(priv, "timing out; %d sends %d receives not completed\n", 797 priv->tx_head - priv->tx_tail, recvs_pending(priv)); 798 799 /* 800 * assume the HW is wedged and just free up 801 * all our pending work requests. 802 */ 803 while ((int) priv->tx_tail - (int) priv->tx_head < 0) { 804 tx_req = &priv->tx_ring[priv->tx_tail & 805 (ipoib_sendq_size - 1)]; 806 ipoib_dma_unmap_tx(priv->ca, tx_req); 807 m_freem(tx_req->mb); 808 ++priv->tx_tail; 809 --priv->tx_outstanding; 810 } 811 812 for (i = 0; i < ipoib_recvq_size; ++i) { 813 struct ipoib_rx_buf *rx_req; 814 815 rx_req = &priv->rx_ring[i]; 816 if (!rx_req->mb) 817 continue; 818 ipoib_dma_unmap_rx(priv, &priv->rx_ring[i]); 819 m_freem(rx_req->mb); 820 rx_req->mb = NULL; 821 } 822 823 goto timeout; 824 } 825 826 ipoib_drain_cq(priv); 827 828 msleep(1); 829 } 830 831 ipoib_dbg(priv, "All sends and receives done.\n"); 832 833timeout: 834 del_timer_sync(&priv->poll_timer); 835 qp_attr.qp_state = IB_QPS_RESET; 836 if (ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE)) 837 ipoib_warn(priv, "Failed to modify QP to RESET state\n"); 838 839 /* Wait for all AHs to be reaped */ 840 set_bit(IPOIB_STOP_REAPER, &priv->flags); 841 cancel_delayed_work(&priv->ah_reap_task); 842 if (flush) 843 flush_workqueue(ipoib_workqueue); 844 845 ipoib_ah_dev_cleanup(priv); 846 847 ib_req_notify_cq(priv->recv_cq, IB_CQ_NEXT_COMP); 848 849 return 0; 850} 851 852int ipoib_ib_dev_init(struct ipoib_dev_priv *priv, struct ib_device *ca, int port) 853{ 854 struct ifnet *dev = priv->dev; 855 856 priv->ca = ca; 857 priv->port = port; 858 priv->qp = NULL; 859 860 if (ipoib_transport_dev_init(priv, ca)) { 861 printk(KERN_WARNING "%s: ipoib_transport_dev_init failed\n", ca->name); 862 return -ENODEV; 863 } 864 865 setup_timer(&priv->poll_timer, ipoib_ib_tx_timer_func, 866 (unsigned long) priv); 867 868 if (dev->if_flags & IFF_UP) { 869 if (ipoib_ib_dev_open(priv)) { 870 ipoib_transport_dev_cleanup(priv); 871 return -ENODEV; 872 } 873 } 874 875 return 0; 876} 877 878static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv, 879 enum ipoib_flush_level level) 880{ 881 struct ipoib_dev_priv *cpriv; 882 u16 new_index; 883 884 mutex_lock(&priv->vlan_mutex); 885 886 /* 887 * Flush any child interfaces too -- they might be up even if 888 * the parent is down. 889 */ 890 list_for_each_entry(cpriv, &priv->child_intfs, list) 891 __ipoib_ib_dev_flush(cpriv, level); 892 893 mutex_unlock(&priv->vlan_mutex); 894 895 if (!test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags)) { 896 ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_INITIALIZED not set.\n"); 897 return; 898 } 899 900 if (!test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) { 901 ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_ADMIN_UP not set.\n"); 902 return; 903 } 904 905 if (level == IPOIB_FLUSH_HEAVY) { 906 if (ib_find_pkey(priv->ca, priv->port, priv->pkey, &new_index)) { 907 clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags); 908 ipoib_ib_dev_down(priv, 0); 909 ipoib_ib_dev_stop(priv, 0); 910 if (ipoib_pkey_dev_delay_open(priv)) 911 return; 912 } 913 914 /* restart QP only if P_Key index is changed */ 915 if (test_and_set_bit(IPOIB_PKEY_ASSIGNED, &priv->flags) && 916 new_index == priv->pkey_index) { 917 ipoib_dbg(priv, "Not flushing - P_Key index not changed.\n"); 918 return; 919 } 920 priv->pkey_index = new_index; 921 } 922 923 if (level == IPOIB_FLUSH_LIGHT) { 924 ipoib_mark_paths_invalid(priv); 925 ipoib_mcast_dev_flush(priv); 926 } 927 928 if (level >= IPOIB_FLUSH_NORMAL) 929 ipoib_ib_dev_down(priv, 0); 930 931 if (level == IPOIB_FLUSH_HEAVY) { 932 ipoib_ib_dev_stop(priv, 0); 933 ipoib_ib_dev_open(priv); 934 } 935 936 /* 937 * The device could have been brought down between the start and when 938 * we get here, don't bring it back up if it's not configured up 939 */ 940 if (test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) { 941 if (level >= IPOIB_FLUSH_NORMAL) 942 ipoib_ib_dev_up(priv); 943 ipoib_mcast_restart_task(&priv->restart_task); 944 } 945} 946 947void ipoib_ib_dev_flush_light(struct work_struct *work) 948{ 949 struct ipoib_dev_priv *priv = 950 container_of(work, struct ipoib_dev_priv, flush_light); 951 952 __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_LIGHT); 953} 954 955void ipoib_ib_dev_flush_normal(struct work_struct *work) 956{ 957 struct ipoib_dev_priv *priv = 958 container_of(work, struct ipoib_dev_priv, flush_normal); 959 960 __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_NORMAL); 961} 962 963void ipoib_ib_dev_flush_heavy(struct work_struct *work) 964{ 965 struct ipoib_dev_priv *priv = 966 container_of(work, struct ipoib_dev_priv, flush_heavy); 967 968 __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_HEAVY); 969} 970 971void ipoib_ib_dev_cleanup(struct ipoib_dev_priv *priv) 972{ 973 974 ipoib_dbg(priv, "cleaning up ib_dev\n"); 975 976 ipoib_mcast_stop_thread(priv, 1); 977 ipoib_mcast_dev_flush(priv); 978 979 ipoib_ah_dev_cleanup(priv); 980 ipoib_transport_dev_cleanup(priv); 981} 982 983/* 984 * Delayed P_Key Assigment Interim Support 985 * 986 * The following is initial implementation of delayed P_Key assigment 987 * mechanism. It is using the same approach implemented for the multicast 988 * group join. The single goal of this implementation is to quickly address 989 * Bug #2507. This implementation will probably be removed when the P_Key 990 * change async notification is available. 991 */ 992 993void ipoib_pkey_poll(struct work_struct *work) 994{ 995 struct ipoib_dev_priv *priv = 996 container_of(work, struct ipoib_dev_priv, pkey_poll_task.work); 997 998 ipoib_pkey_dev_check_presence(priv); 999 1000 if (test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) 1001 ipoib_open(priv); 1002 else { 1003 mutex_lock(&pkey_mutex); 1004 if (!test_bit(IPOIB_PKEY_STOP, &priv->flags)) 1005 queue_delayed_work(ipoib_workqueue, 1006 &priv->pkey_poll_task, 1007 HZ); 1008 mutex_unlock(&pkey_mutex); 1009 } 1010} 1011 1012int ipoib_pkey_dev_delay_open(struct ipoib_dev_priv *priv) 1013{ 1014 1015 /* Look for the interface pkey value in the IB Port P_Key table and */ 1016 /* set the interface pkey assigment flag */ 1017 ipoib_pkey_dev_check_presence(priv); 1018 1019 /* P_Key value not assigned yet - start polling */ 1020 if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) { 1021 mutex_lock(&pkey_mutex); 1022 clear_bit(IPOIB_PKEY_STOP, &priv->flags); 1023 queue_delayed_work(ipoib_workqueue, 1024 &priv->pkey_poll_task, 1025 HZ); 1026 mutex_unlock(&pkey_mutex); 1027 return 1; 1028 } 1029 1030 return 0; 1031} 1032