en_tx.c revision 225736
1/* 2 * Copyright (c) 2007 Mellanox Technologies. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 * 32 */ 33 34#include "mlx4_en.h" 35 36#include <linux/mlx4/cq.h> 37#include <linux/mlx4/qp.h> 38#include <linux/vmalloc.h> 39 40#include <net/ethernet.h> 41#include <net/if_vlan_var.h> 42#include <sys/mbuf.h> 43 44#include <netinet/in_systm.h> 45#include <netinet/in.h> 46#include <netinet/if_ether.h> 47#include <netinet/ip.h> 48#include <netinet/ip6.h> 49#include <netinet/tcp.h> 50#include <netinet/tcp_lro.h> 51#include <netinet/udp.h> 52 53enum { 54 MAX_INLINE = 104, /* 128 - 16 - 4 - 4 */ 55 MAX_BF = 256, 56}; 57 58static int inline_thold = MAX_INLINE; 59 60module_param_named(inline_thold, inline_thold, int, 0444); 61MODULE_PARM_DESC(inline_thold, "treshold for using inline data"); 62 63int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, 64 struct mlx4_en_tx_ring *ring, u32 size, 65 u16 stride) 66{ 67 struct mlx4_en_dev *mdev = priv->mdev; 68 int tmp; 69 int err; 70 71 ring->size = size; 72 ring->size_mask = size - 1; 73 ring->stride = stride; 74 75 inline_thold = min(inline_thold, MAX_INLINE); 76 77 mtx_init(&ring->tx_lock.m, "mlx4 tx", NULL, MTX_DEF); 78 mtx_init(&ring->comp_lock.m, "mlx4 comp", NULL, MTX_DEF); 79 80 /* Allocate the buf ring */ 81 ring->br = buf_ring_alloc(MLX4_EN_DEF_TX_QUEUE_SIZE, M_DEVBUF, 82 M_WAITOK, &ring->tx_lock.m); 83 if (ring->br == NULL) { 84 en_err(priv, "Failed allocating tx_info ring\n"); 85 return -ENOMEM; 86 } 87 88 tmp = size * sizeof(struct mlx4_en_tx_info); 89 ring->tx_info = kmalloc(tmp, GFP_KERNEL); 90 if (!ring->tx_info) { 91 en_err(priv, "Failed allocating tx_info ring\n"); 92 err = -ENOMEM; 93 goto err_tx; 94 } 95 en_dbg(DRV, priv, "Allocated tx_info ring at addr:%p size:%d\n", 96 ring->tx_info, tmp); 97 98 ring->bounce_buf = kmalloc(MAX_DESC_SIZE, GFP_KERNEL); 99 if (!ring->bounce_buf) { 100 en_err(priv, "Failed allocating bounce buffer\n"); 101 err = -ENOMEM; 102 goto err_tx; 103 } 104 ring->buf_size = ALIGN(size * ring->stride, MLX4_EN_PAGE_SIZE); 105 106 err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, ring->buf_size, 107 2 * PAGE_SIZE); 108 if (err) { 109 en_err(priv, "Failed allocating hwq resources\n"); 110 goto err_bounce; 111 } 112 113 err = mlx4_en_map_buffer(&ring->wqres.buf); 114 if (err) { 115 en_err(priv, "Failed to map TX buffer\n"); 116 goto err_hwq_res; 117 } 118 119 ring->buf = ring->wqres.buf.direct.buf; 120 121 en_dbg(DRV, priv, "Allocated TX ring (addr:%p) - buf:%p size:%d " 122 "buf_size:%d dma:%llx\n", ring, ring->buf, ring->size, 123 ring->buf_size, (unsigned long long) ring->wqres.buf.direct.map); 124 125 err = mlx4_qp_reserve_range(mdev->dev, 1, 256, &ring->qpn); 126 if (err) { 127 en_err(priv, "Failed reserving qp for tx ring.\n"); 128 goto err_map; 129 } 130 131 err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->qp); 132 if (err) { 133 en_err(priv, "Failed allocating qp %d\n", ring->qpn); 134 goto err_reserve; 135 } 136 ring->qp.event = mlx4_en_sqp_event; 137 138 err = mlx4_bf_alloc(mdev->dev, &ring->bf); 139 if (err) { 140 ring->bf.uar = &mdev->priv_uar; 141 ring->bf.uar->map = mdev->uar_map; 142 ring->bf_enabled = false; 143 } else 144 ring->bf_enabled = true; 145 146 return 0; 147 148err_reserve: 149 mlx4_qp_release_range(mdev->dev, ring->qpn, 1); 150err_map: 151 mlx4_en_unmap_buffer(&ring->wqres.buf); 152err_hwq_res: 153 mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size); 154err_bounce: 155 kfree(ring->bounce_buf); 156 ring->bounce_buf = NULL; 157err_tx: 158 buf_ring_free(ring->br, M_DEVBUF); 159 kfree(ring->tx_info); 160 ring->tx_info = NULL; 161 return err; 162} 163 164void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv, 165 struct mlx4_en_tx_ring *ring) 166{ 167 struct mlx4_en_dev *mdev = priv->mdev; 168 en_dbg(DRV, priv, "Destroying tx ring, qpn: %d\n", ring->qpn); 169 170 buf_ring_free(ring->br, M_DEVBUF); 171 if (ring->bf_enabled) 172 mlx4_bf_free(mdev->dev, &ring->bf); 173 mlx4_qp_remove(mdev->dev, &ring->qp); 174 mlx4_qp_free(mdev->dev, &ring->qp); 175 mlx4_qp_release_range(mdev->dev, ring->qpn, 1); 176 mlx4_en_unmap_buffer(&ring->wqres.buf); 177 mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size); 178 kfree(ring->bounce_buf); 179 ring->bounce_buf = NULL; 180 kfree(ring->tx_info); 181 ring->tx_info = NULL; 182 mtx_destroy(&ring->tx_lock.m); 183 mtx_destroy(&ring->comp_lock.m); 184} 185 186int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv, 187 struct mlx4_en_tx_ring *ring, 188 int cq) 189{ 190 struct mlx4_en_dev *mdev = priv->mdev; 191 int err; 192 193 ring->cqn = cq; 194 ring->prod = 0; 195 ring->cons = 0xffffffff; 196 ring->last_nr_txbb = 1; 197 ring->poll_cnt = 0; 198 ring->blocked = 0; 199 memset(ring->tx_info, 0, ring->size * sizeof(struct mlx4_en_tx_info)); 200 memset(ring->buf, 0, ring->buf_size); 201 202 ring->qp_state = MLX4_QP_STATE_RST; 203 ring->doorbell_qpn = swab32(ring->qp.qpn << 8); 204 205 mlx4_en_fill_qp_context(priv, ring->size, ring->stride, 1, 0, ring->qpn, 206 ring->cqn, &ring->context); 207 if (ring->bf_enabled) 208 ring->context.usr_page = cpu_to_be32(ring->bf.uar->index); 209 210 err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, &ring->context, 211 &ring->qp, &ring->qp_state); 212 213 return err; 214} 215 216void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv, 217 struct mlx4_en_tx_ring *ring) 218{ 219 struct mlx4_en_dev *mdev = priv->mdev; 220 221 mlx4_qp_modify(mdev->dev, NULL, ring->qp_state, 222 MLX4_QP_STATE_RST, NULL, 0, 0, &ring->qp); 223} 224 225 226static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv, 227 struct mlx4_en_tx_ring *ring, 228 int index, u8 owner) 229{ 230 struct mlx4_en_dev *mdev = priv->mdev; 231 struct mlx4_en_tx_info *tx_info = &ring->tx_info[index]; 232 struct mlx4_en_tx_desc *tx_desc = ring->buf + index * TXBB_SIZE; 233 struct mlx4_wqe_data_seg *data = (void *) tx_desc + tx_info->data_offset; 234 struct mbuf *mb = tx_info->mb; 235 void *end = ring->buf + ring->buf_size; 236 int frags = tx_info->nr_segs; 237 int i; 238 __be32 *ptr = (__be32 *)tx_desc; 239 __be32 stamp = cpu_to_be32(STAMP_VAL | (!!owner << STAMP_SHIFT)); 240 241 /* Optimize the common case when there are no wraparounds */ 242 if (likely((void *) tx_desc + tx_info->nr_txbb * TXBB_SIZE <= end)) { 243 if (!tx_info->inl) { 244 for (i = 0; i < frags; i++) { 245 pci_unmap_single(mdev->pdev, 246 (dma_addr_t) be64_to_cpu(data[i].addr), 247 data[i].byte_count, PCI_DMA_TODEVICE); 248 } 249 } 250 /* Stamp the freed descriptor */ 251 for (i = 0; i < tx_info->nr_txbb * TXBB_SIZE; i += STAMP_STRIDE) { 252 *ptr = stamp; 253 ptr += STAMP_DWORDS; 254 } 255 256 } else { 257 if (!tx_info->inl) { 258 for (i = 0; i < frags; i++) { 259 /* Check for wraparound before unmapping */ 260 if ((void *) data >= end) 261 data = (struct mlx4_wqe_data_seg *) ring->buf; 262 pci_unmap_single(mdev->pdev, 263 (dma_addr_t) be64_to_cpu(data->addr), 264 data->byte_count, PCI_DMA_TODEVICE); 265 ++data; 266 } 267 } 268 /* Stamp the freed descriptor */ 269 for (i = 0; i < tx_info->nr_txbb * TXBB_SIZE; i += STAMP_STRIDE) { 270 *ptr = stamp; 271 ptr += STAMP_DWORDS; 272 if ((void *) ptr >= end) { 273 ptr = ring->buf; 274 stamp ^= cpu_to_be32(0x80000000); 275 } 276 } 277 278 } 279 m_freem(mb); 280 return tx_info->nr_txbb; 281} 282 283 284int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring) 285{ 286 struct mlx4_en_priv *priv = netdev_priv(dev); 287 int cnt = 0; 288 289 /* Skip last polled descriptor */ 290 ring->cons += ring->last_nr_txbb; 291 en_dbg(DRV, priv, "Freeing Tx buf - cons:0x%x prod:0x%x\n", 292 ring->cons, ring->prod); 293 294 if ((u32) (ring->prod - ring->cons) > ring->size) { 295 en_warn(priv, "Tx consumer passed producer!\n"); 296 return 0; 297 } 298 299 while (ring->cons != ring->prod) { 300 ring->last_nr_txbb = mlx4_en_free_tx_desc(priv, ring, 301 ring->cons & ring->size_mask, 302 !!(ring->cons & ring->size)); 303 ring->cons += ring->last_nr_txbb; 304 cnt++; 305 } 306 307 if (cnt) 308 en_dbg(DRV, priv, "Freed %d uncompleted tx descriptors\n", cnt); 309 310 return cnt; 311} 312 313void mlx4_en_set_prio_map(struct mlx4_en_priv *priv, u16 *prio_map, u32 ring_num) 314{ 315 int block = 8 / ring_num; 316 int extra = 8 - (block * ring_num); 317 int num = 0; 318 u16 ring = 1; 319 int prio; 320 321 if (ring_num == 1) { 322 for (prio = 0; prio < 8; prio++) 323 prio_map[prio] = 0; 324 return; 325 } 326 327 for (prio = 0; prio < 8; prio++) { 328 if (extra && (num == block + 1)) { 329 ring++; 330 num = 0; 331 extra--; 332 } else if (!extra && (num == block)) { 333 ring++; 334 num = 0; 335 } 336 prio_map[prio] = ring; 337 en_dbg(DRV, priv, " prio:%d --> ring:%d\n", prio, ring); 338 num++; 339 } 340} 341 342static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq) 343{ 344 struct mlx4_en_priv *priv = netdev_priv(dev); 345 struct mlx4_cq *mcq = &cq->mcq; 346 struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring]; 347 struct mlx4_cqe *cqe = cq->buf; 348 u16 index; 349 u16 new_index; 350 u32 txbbs_skipped = 0; 351 u32 cq_last_sav; 352 353 /* index always points to the first TXBB of the last polled descriptor */ 354 index = ring->cons & ring->size_mask; 355 new_index = be16_to_cpu(cqe->wqe_index) & ring->size_mask; 356 if (index == new_index) 357 return; 358 359 if (!priv->port_up) 360 return; 361 362 /* 363 * We use a two-stage loop: 364 * - the first samples the HW-updated CQE 365 * - the second frees TXBBs until the last sample 366 * This lets us amortize CQE cache misses, while still polling the CQ 367 * until is quiescent. 368 */ 369 cq_last_sav = mcq->cons_index; 370 do { 371 do { 372 /* Skip over last polled CQE */ 373 index = (index + ring->last_nr_txbb) & ring->size_mask; 374 txbbs_skipped += ring->last_nr_txbb; 375 376 /* Poll next CQE */ 377 ring->last_nr_txbb = mlx4_en_free_tx_desc( 378 priv, ring, index, 379 !!((ring->cons + txbbs_skipped) & 380 ring->size)); 381 ++mcq->cons_index; 382 383 } while (index != new_index); 384 385 new_index = be16_to_cpu(cqe->wqe_index) & ring->size_mask; 386 } while (index != new_index); 387 AVG_PERF_COUNTER(priv->pstats.tx_coal_avg, 388 (u32) (mcq->cons_index - cq_last_sav)); 389 390 /* 391 * To prevent CQ overflow we first update CQ consumer and only then 392 * the ring consumer. 393 */ 394 mlx4_cq_set_ci(mcq); 395 wmb(); 396 ring->cons += txbbs_skipped; 397 398 /* Wakeup Tx queue if this ring stopped it */ 399 if (unlikely(ring->blocked)) { 400 if ((u32) (ring->prod - ring->cons) <= 401 ring->size - HEADROOM - MAX_DESC_TXBBS) { 402 ring->blocked = 0; 403 if (atomic_fetchadd_int(&priv->blocked, -1) == 1) 404 atomic_clear_int(&dev->if_drv_flags, 405 IFF_DRV_OACTIVE); 406 priv->port_stats.wake_queue++; 407 } 408 } 409} 410 411void mlx4_en_tx_irq(struct mlx4_cq *mcq) 412{ 413 struct mlx4_en_cq *cq = container_of(mcq, struct mlx4_en_cq, mcq); 414 struct mlx4_en_priv *priv = netdev_priv(cq->dev); 415 struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring]; 416 417 if (!spin_trylock(&ring->comp_lock)) 418 return; 419 mlx4_en_process_tx_cq(cq->dev, cq); 420 mod_timer(&cq->timer, jiffies + 1); 421 spin_unlock(&ring->comp_lock); 422} 423 424 425void mlx4_en_poll_tx_cq(unsigned long data) 426{ 427 struct mlx4_en_cq *cq = (struct mlx4_en_cq *) data; 428 struct mlx4_en_priv *priv = netdev_priv(cq->dev); 429 struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring]; 430 u32 inflight; 431 432 INC_PERF_COUNTER(priv->pstats.tx_poll); 433 434 if (!spin_trylock(&ring->comp_lock)) { 435 mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT); 436 return; 437 } 438 mlx4_en_process_tx_cq(cq->dev, cq); 439 inflight = (u32) (ring->prod - ring->cons - ring->last_nr_txbb); 440 441 /* If there are still packets in flight and the timer has not already 442 * been scheduled by the Tx routine then schedule it here to guarantee 443 * completion processing of these packets */ 444 if (inflight && priv->port_up) 445 mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT); 446 447 spin_unlock(&ring->comp_lock); 448} 449 450static struct mlx4_en_tx_desc *mlx4_en_bounce_to_desc(struct mlx4_en_priv *priv, 451 struct mlx4_en_tx_ring *ring, 452 u32 index, 453 unsigned int desc_size) 454{ 455 u32 copy = (ring->size - index) * TXBB_SIZE; 456 int i; 457 458 for (i = desc_size - copy - 4; i >= 0; i -= 4) { 459 if ((i & (TXBB_SIZE - 1)) == 0) 460 wmb(); 461 462 *((u32 *) (ring->buf + i)) = 463 *((u32 *) (ring->bounce_buf + copy + i)); 464 } 465 466 for (i = copy - 4; i >= 4 ; i -= 4) { 467 if ((i & (TXBB_SIZE - 1)) == 0) 468 wmb(); 469 470 *((u32 *) (ring->buf + index * TXBB_SIZE + i)) = 471 *((u32 *) (ring->bounce_buf + i)); 472 } 473 474 /* Return real descriptor location */ 475 return ring->buf + index * TXBB_SIZE; 476} 477 478static inline void mlx4_en_xmit_poll(struct mlx4_en_priv *priv, int tx_ind) 479{ 480 struct mlx4_en_cq *cq = &priv->tx_cq[tx_ind]; 481 struct mlx4_en_tx_ring *ring = &priv->tx_ring[tx_ind]; 482 483 /* If we don't have a pending timer, set one up to catch our recent 484 post in case the interface becomes idle */ 485 if (!timer_pending(&cq->timer)) 486 mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT); 487 488 /* Poll the CQ every mlx4_en_TX_MODER_POLL packets */ 489 if ((++ring->poll_cnt & (MLX4_EN_TX_POLL_MODER - 1)) == 0) 490 if (spin_trylock(&ring->comp_lock)) { 491 mlx4_en_process_tx_cq(priv->dev, cq); 492 spin_unlock(&ring->comp_lock); 493 } 494} 495 496static int is_inline(struct mbuf *mb) 497{ 498 499 if (inline_thold && mb->m_pkthdr.len <= inline_thold && 500 (mb->m_pkthdr.csum_flags & CSUM_TSO) == 0) 501 return 1; 502 503 return 0; 504} 505 506static int inline_size(struct mbuf *mb) 507{ 508 int len; 509 510 len = mb->m_pkthdr.len; 511 if (len + CTRL_SIZE + sizeof(struct mlx4_wqe_inline_seg) 512 <= MLX4_INLINE_ALIGN) 513 return ALIGN(len + CTRL_SIZE + 514 sizeof(struct mlx4_wqe_inline_seg), 16); 515 else 516 return ALIGN(len + CTRL_SIZE + 2 * 517 sizeof(struct mlx4_wqe_inline_seg), 16); 518} 519 520static int get_head_size(struct mbuf *mb) 521{ 522 struct tcphdr *th; 523 struct ip *ip; 524 int ip_hlen, tcp_hlen; 525 int len; 526 527 len = ETHER_HDR_LEN; 528 if (mb->m_len < len + sizeof(struct ip)) 529 return (0); 530 ip = (struct ip *)(mtod(mb, char *) + len); 531 if (ip->ip_p != IPPROTO_TCP) 532 return (0); 533 ip_hlen = ip->ip_hl << 2; 534 len += ip_hlen; 535 if (mb->m_len < len + sizeof(struct tcphdr)) 536 return (0); 537 th = (struct tcphdr *)(mtod(mb, char *) + len); 538 tcp_hlen = th->th_off << 2; 539 len += tcp_hlen; 540 if (mb->m_len < len) 541 return (0); 542 return (len); 543} 544 545static int get_real_size(struct mbuf *mb, struct net_device *dev, int *segsp, 546 int *lso_header_size) 547{ 548 struct mbuf *m; 549 int nr_segs; 550 551 nr_segs = 0; 552 for (m = mb; m != NULL; m = m->m_next) 553 if (m->m_len) 554 nr_segs++; 555 556 if (mb->m_pkthdr.csum_flags & CSUM_TSO) { 557 *lso_header_size = get_head_size(mb); 558 if (*lso_header_size) { 559 if (mb->m_len == *lso_header_size) 560 nr_segs--; 561 *segsp = nr_segs; 562 return CTRL_SIZE + nr_segs * DS_SIZE + 563 ALIGN(*lso_header_size + 4, DS_SIZE); 564 } 565 } else 566 *lso_header_size = 0; 567 *segsp = nr_segs; 568 if (is_inline(mb)) 569 return inline_size(mb); 570 return (CTRL_SIZE + nr_segs * DS_SIZE); 571} 572 573static struct mbuf *mb_copy(struct mbuf *mb, int *offp, char *data, int len) 574{ 575 int bytes; 576 int off; 577 578 off = *offp; 579 while (len) { 580 bytes = min(mb->m_len - off, len); 581 if (bytes) 582 memcpy(data, mb->m_data + off, bytes); 583 len -= bytes; 584 data += bytes; 585 off += bytes; 586 if (off == mb->m_len) { 587 off = 0; 588 mb = mb->m_next; 589 } 590 } 591 *offp = off; 592 return (mb); 593} 594 595static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, struct mbuf *mb, 596 int real_size, u16 *vlan_tag, int tx_ind) 597{ 598 struct mlx4_wqe_inline_seg *inl = &tx_desc->inl; 599 int spc = MLX4_INLINE_ALIGN - CTRL_SIZE - sizeof *inl; 600 int len; 601 int off; 602 603 off = 0; 604 len = mb->m_pkthdr.len; 605 if (len <= spc) { 606 inl->byte_count = cpu_to_be32(1 << 31 | len); 607 mb_copy(mb, &off, (void *)(inl + 1), len); 608 } else { 609 inl->byte_count = cpu_to_be32(1 << 31 | spc); 610 mb = mb_copy(mb, &off, (void *)(inl + 1), spc); 611 inl = (void *) (inl + 1) + spc; 612 mb_copy(mb, &off, (void *)(inl + 1), len - spc); 613 wmb(); 614 inl->byte_count = cpu_to_be32(1 << 31 | (len - spc)); 615 } 616 tx_desc->ctrl.vlan_tag = cpu_to_be16(*vlan_tag); 617 tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_VLAN * !!(*vlan_tag); 618 tx_desc->ctrl.fence_size = (real_size / 16) & 0x3f; 619} 620 621u16 mlx4_en_select_queue(struct net_device *dev, struct mbuf *mb) 622{ 623 struct mlx4_en_priv *priv = netdev_priv(dev); 624 struct mlx4_en_tx_hash_entry *entry; 625 struct ether_header *eth; 626 struct tcphdr *th; 627 struct ip *iph; 628 u32 hash_index; 629 int tx_ind = 0; 630 u16 vlan_tag = 0; 631 int len; 632 633 /* Obtain VLAN information if present */ 634 if (mb->m_flags & M_VLANTAG) { 635 vlan_tag = mb->m_pkthdr.ether_vtag; 636 /* Set the Tx ring to use according to vlan priority */ 637 tx_ind = priv->tx_prio_map[vlan_tag >> 13]; 638 if (tx_ind) 639 return tx_ind; 640 } 641 if (mb->m_len < 642 ETHER_HDR_LEN + sizeof(struct ip) + sizeof(struct tcphdr)) 643 return MLX4_EN_NUM_HASH_RINGS; 644 eth = mtod(mb, struct ether_header *); 645 /* Hashing is only done for TCP/IP or UDP/IP packets */ 646 if (be16_to_cpu(eth->ether_type) != ETHERTYPE_IP) 647 return MLX4_EN_NUM_HASH_RINGS; 648 len = ETHER_HDR_LEN; 649 iph = (struct ip *)(mtod(mb, char *) + len); 650 len += iph->ip_hl << 2; 651 th = (struct tcphdr *)(mtod(mb, char *) + len); 652 hash_index = be32_to_cpu(iph->ip_dst.s_addr) & MLX4_EN_TX_HASH_MASK; 653 switch(iph->ip_p) { 654 case IPPROTO_UDP: 655 break; 656 case IPPROTO_TCP: 657 if (mb->m_len < len + sizeof(struct tcphdr)) 658 return MLX4_EN_NUM_HASH_RINGS; 659 hash_index = 660 (hash_index ^ be16_to_cpu(th->th_dport ^ th->th_sport)) & 661 MLX4_EN_TX_HASH_MASK; 662 break; 663 default: 664 return MLX4_EN_NUM_HASH_RINGS; 665 } 666 667 entry = &priv->tx_hash[hash_index]; 668 if(unlikely(!entry->cnt)) { 669 tx_ind = hash_index & (MLX4_EN_NUM_HASH_RINGS / 2 - 1); 670 if (2 * entry->small_pkts > entry->big_pkts) 671 tx_ind += MLX4_EN_NUM_HASH_RINGS / 2; 672 entry->small_pkts = entry->big_pkts = 0; 673 entry->ring = tx_ind; 674 } 675 676 entry->cnt++; 677 if (mb->m_pkthdr.len > MLX4_EN_SMALL_PKT_SIZE) 678 entry->big_pkts++; 679 else 680 entry->small_pkts++; 681 return entry->ring; 682} 683 684static void mlx4_bf_copy(unsigned long *dst, unsigned long *src, unsigned bytecnt) 685{ 686 __iowrite64_copy(dst, src, bytecnt / 8); 687} 688 689static int mlx4_en_xmit(struct net_device *dev, int tx_ind, struct mbuf **mbp) 690{ 691 struct mlx4_en_priv *priv = netdev_priv(dev); 692 struct mlx4_en_dev *mdev = priv->mdev; 693 struct mlx4_en_tx_ring *ring; 694 struct mlx4_en_cq *cq; 695 struct mlx4_en_tx_desc *tx_desc; 696 struct mlx4_wqe_data_seg *data; 697 struct mlx4_en_tx_info *tx_info; 698 struct mbuf *m; 699 int nr_txbb; 700 int nr_segs; 701 int desc_size; 702 int real_size; 703 dma_addr_t dma; 704 u32 index, bf_index; 705 __be32 op_own; 706 u16 vlan_tag = 0; 707 int i; 708 int lso_header_size; 709 bool bounce = false; 710 struct mbuf *mb; 711 int defrag = 1; 712 713 ring = &priv->tx_ring[tx_ind]; 714 mb = *mbp; 715 if (!priv->port_up) 716 goto tx_drop; 717 718retry: 719 real_size = get_real_size(mb, dev, &nr_segs, &lso_header_size); 720 if (unlikely(!real_size)) 721 goto tx_drop; 722 723 /* Allign descriptor to TXBB size */ 724 desc_size = ALIGN(real_size, TXBB_SIZE); 725 nr_txbb = desc_size / TXBB_SIZE; 726 if (unlikely(nr_txbb > MAX_DESC_TXBBS)) { 727 if (defrag) { 728 mb = m_defrag(*mbp, M_DONTWAIT); 729 if (mb == NULL) { 730 mb = *mbp; 731 goto tx_drop; 732 } 733 *mbp = mb; 734 defrag = 0; 735 goto retry; 736 } 737 goto tx_drop; 738 } 739 740 /* Check available TXBBs And 2K spare for prefetch */ 741 if (unlikely(((int)(ring->prod - ring->cons)) > 742 ring->size - HEADROOM - MAX_DESC_TXBBS)) { 743 /* every full Tx ring stops queue */ 744 if (ring->blocked == 0) 745 atomic_add_int(&priv->blocked, 1); 746 atomic_set_int(&dev->if_drv_flags, IFF_DRV_OACTIVE); 747 ring->blocked = 1; 748 priv->port_stats.queue_stopped++; 749 750 /* Use interrupts to find out when queue opened */ 751 cq = &priv->tx_cq[tx_ind]; 752 mlx4_en_arm_cq(priv, cq); 753 return EBUSY; 754 } 755 756 /* Track current inflight packets for performance analysis */ 757 AVG_PERF_COUNTER(priv->pstats.inflight_avg, 758 (u32) (ring->prod - ring->cons - 1)); 759 760 /* Packet is good - grab an index and transmit it */ 761 index = ring->prod & ring->size_mask; 762 bf_index = ring->prod; 763 764 /* See if we have enough space for whole descriptor TXBB for setting 765 * SW ownership on next descriptor; if not, use a bounce buffer. */ 766 if (likely(index + nr_txbb <= ring->size)) 767 tx_desc = ring->buf + index * TXBB_SIZE; 768 else { 769 tx_desc = (struct mlx4_en_tx_desc *) ring->bounce_buf; 770 bounce = true; 771 } 772 773 /* Prepare ctrl segement apart opcode+ownership, which depends on 774 * whether LSO is used */ 775 if (mb->m_flags & M_VLANTAG) 776 vlan_tag = mb->m_pkthdr.ether_vtag; 777 tx_desc->ctrl.vlan_tag = cpu_to_be16(vlan_tag); 778 tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_VLAN * !!vlan_tag; 779 tx_desc->ctrl.fence_size = (real_size / 16) & 0x3f; 780 tx_desc->ctrl.srcrb_flags = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE | 781 MLX4_WQE_CTRL_SOLICITED); 782 if (mb->m_pkthdr.csum_flags & (CSUM_IP|CSUM_TCP|CSUM_UDP)) { 783 tx_desc->ctrl.srcrb_flags |= cpu_to_be32(MLX4_WQE_CTRL_IP_CSUM | 784 MLX4_WQE_CTRL_TCP_UDP_CSUM); 785 priv->port_stats.tx_chksum_offload++; 786 } 787 788 if (unlikely(priv->validate_loopback)) { 789 /* Copy dst mac address to wqe */ 790 struct ether_header *ethh; 791 u64 mac; 792 u32 mac_l, mac_h; 793 794 ethh = mtod(mb, struct ether_header *); 795 mac = mlx4_en_mac_to_u64(ethh->ether_dhost); 796 if (mac) { 797 mac_h = (u32) ((mac & 0xffff00000000ULL) >> 16); 798 mac_l = (u32) (mac & 0xffffffff); 799 tx_desc->ctrl.srcrb_flags |= cpu_to_be32(mac_h); 800 tx_desc->ctrl.imm = cpu_to_be32(mac_l); 801 } 802 } 803 804 /* Handle LSO (TSO) packets */ 805 if (lso_header_size) { 806 int segsz; 807 808 /* Mark opcode as LSO */ 809 op_own = cpu_to_be32(MLX4_OPCODE_LSO | (1 << 6)) | 810 ((ring->prod & ring->size) ? 811 cpu_to_be32(MLX4_EN_BIT_DESC_OWN) : 0); 812 813 /* Fill in the LSO prefix */ 814 tx_desc->lso.mss_hdr_size = cpu_to_be32( 815 mb->m_pkthdr.tso_segsz << 16 | lso_header_size); 816 817 /* Copy headers; 818 * note that we already verified that it is linear */ 819 memcpy(tx_desc->lso.header, mb->m_data, lso_header_size); 820 data = ((void *) &tx_desc->lso + 821 ALIGN(lso_header_size + 4, DS_SIZE)); 822 823 priv->port_stats.tso_packets++; 824 segsz = mb->m_pkthdr.tso_segsz; 825 i = ((mb->m_pkthdr.len - lso_header_size) / segsz) + 826 !!((mb->m_pkthdr.len - lso_header_size) % segsz); 827 ring->bytes += mb->m_pkthdr.len + (i - 1) * lso_header_size; 828 ring->packets += i; 829 mb->m_data += lso_header_size; 830 mb->m_len -= lso_header_size; 831 } else { 832 /* Normal (Non LSO) packet */ 833 op_own = cpu_to_be32(MLX4_OPCODE_SEND) | 834 ((ring->prod & ring->size) ? 835 cpu_to_be32(MLX4_EN_BIT_DESC_OWN) : 0); 836 data = &tx_desc->data; 837 ring->bytes += max(mb->m_pkthdr.len, 838 (unsigned int)ETHER_MIN_LEN - ETHER_CRC_LEN); 839 ring->packets++; 840 841 } 842 AVG_PERF_COUNTER(priv->pstats.tx_pktsz_avg, mb->m_pkthdr.len); 843 844 /* Save mb in tx_info ring */ 845 tx_info = &ring->tx_info[index]; 846 tx_info->mb = mb; 847 tx_info->nr_txbb = nr_txbb; 848 tx_info->nr_segs = nr_segs; 849 /* valid only for non inline segments */ 850 tx_info->data_offset = (void *) data - (void *) tx_desc; 851 852 if (!is_inline(mb)) { 853 for (i = 0, m = mb; i < nr_segs; i++, m = m->m_next) { 854 if (m->m_len == 0) { 855 i--; 856 continue; 857 } 858 dma = pci_map_single(mdev->dev->pdev, m->m_data, 859 m->m_len, PCI_DMA_TODEVICE); 860 data->addr = cpu_to_be64(dma); 861 data->lkey = cpu_to_be32(mdev->mr.key); 862 wmb(); 863 data->byte_count = cpu_to_be32(m->m_len); 864 data++; 865 } 866 if (lso_header_size) { 867 mb->m_data -= lso_header_size; 868 mb->m_len += lso_header_size; 869 } 870 tx_info->inl = 0; 871 } else { 872 build_inline_wqe(tx_desc, mb, real_size, &vlan_tag, tx_ind); 873 tx_info->inl = 1; 874 } 875 876 ring->prod += nr_txbb; 877 878 /* If we used a bounce buffer then copy descriptor back into place */ 879 if (bounce) 880 tx_desc = mlx4_en_bounce_to_desc(priv, ring, index, desc_size); 881 882 if (ring->bf_enabled && desc_size <= MAX_BF && !bounce && !vlan_tag) { 883 *(u32 *) (&tx_desc->ctrl.vlan_tag) |= ring->doorbell_qpn; 884 op_own |= htonl((bf_index & 0xffff) << 8); 885 /* Ensure new descirptor hits memory 886 * before setting ownership of this descriptor to HW */ 887 wmb(); 888 tx_desc->ctrl.owner_opcode = op_own; 889 890 wmb(); 891 892 mlx4_bf_copy(ring->bf.reg + ring->bf.offset, (unsigned long *) &tx_desc->ctrl, 893 desc_size); 894 895 wmb(); 896 897 ring->bf.offset ^= ring->bf.buf_size; 898 } else { 899 /* Ensure new descirptor hits memory 900 * before setting ownership of this descriptor to HW */ 901 wmb(); 902 tx_desc->ctrl.owner_opcode = op_own; 903 wmb(); 904 writel(ring->doorbell_qpn, ring->bf.uar->map + MLX4_SEND_DOORBELL); 905 } 906 907 return 0; 908 909tx_drop: 910 *mbp = NULL; 911 m_freem(mb); 912 ring->errors++; 913 return EINVAL; 914} 915 916 917static int 918mlx4_en_transmit_locked(struct ifnet *dev, int tx_ind, struct mbuf *m) 919{ 920 struct mlx4_en_priv *priv = netdev_priv(dev); 921 struct mlx4_en_tx_ring *ring; 922 struct mbuf *next; 923 int enqueued, err = 0; 924 925 ring = &priv->tx_ring[tx_ind]; 926 if ((dev->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 927 IFF_DRV_RUNNING || priv->port_up == 0) { 928 if (m != NULL) 929 err = drbr_enqueue(dev, ring->br, m); 930 return (err); 931 } 932 933 enqueued = 0; 934 if (m == NULL) { 935 next = drbr_dequeue(dev, ring->br); 936 } else if (drbr_needs_enqueue(dev, ring->br)) { 937 if ((err = drbr_enqueue(dev, ring->br, m)) != 0) 938 return (err); 939 next = drbr_dequeue(dev, ring->br); 940 } else 941 next = m; 942 943 /* Process the queue */ 944 while (next != NULL) { 945 if ((err = mlx4_en_xmit(dev, tx_ind, &next)) != 0) { 946 if (next != NULL) 947 err = drbr_enqueue(dev, ring->br, next); 948 break; 949 } 950 enqueued++; 951 drbr_stats_update(dev, next->m_pkthdr.len, next->m_flags); 952 /* Send a copy of the frame to the BPF listener */ 953 ETHER_BPF_MTAP(dev, next); 954 if ((dev->if_drv_flags & IFF_DRV_RUNNING) == 0) 955 break; 956 next = drbr_dequeue(dev, ring->br); 957 } 958 959 if (enqueued > 0) 960 ring->watchdog_time = ticks; 961 962 return (err); 963} 964 965void 966mlx4_en_tx_que(void *context, int pending) 967{ 968 struct mlx4_en_tx_ring *ring; 969 struct mlx4_en_priv *priv; 970 struct net_device *dev; 971 struct mlx4_en_cq *cq; 972 int tx_ind; 973 974 cq = context; 975 dev = cq->dev; 976 priv = dev->if_softc; 977 tx_ind = cq->ring; 978 ring = &priv->tx_ring[tx_ind]; 979 if (dev->if_drv_flags & IFF_DRV_RUNNING) { 980 mlx4_en_xmit_poll(priv, tx_ind); 981 spin_lock(&ring->tx_lock); 982 if (!drbr_empty(dev, ring->br)) 983 mlx4_en_transmit_locked(dev, tx_ind, NULL); 984 spin_unlock(&ring->tx_lock); 985 } 986} 987 988int 989mlx4_en_transmit(struct ifnet *dev, struct mbuf *m) 990{ 991 struct mlx4_en_priv *priv = netdev_priv(dev); 992 struct mlx4_en_tx_ring *ring; 993 struct mlx4_en_cq *cq; 994 int i = 0, err = 0; 995 996 /* Which queue to use */ 997 if ((m->m_flags & (M_FLOWID | M_VLANTAG)) == M_FLOWID) 998 i = m->m_pkthdr.flowid % (MLX4_EN_NUM_HASH_RINGS - 1); 999 else 1000 i = mlx4_en_select_queue(dev, m); 1001 1002 ring = &priv->tx_ring[i]; 1003 1004 if (spin_trylock(&ring->tx_lock)) { 1005 err = mlx4_en_transmit_locked(dev, i, m); 1006 spin_unlock(&ring->tx_lock); 1007 /* Poll CQ here */ 1008 mlx4_en_xmit_poll(priv, i); 1009 } else { 1010 err = drbr_enqueue(dev, ring->br, m); 1011 cq = &priv->tx_cq[i]; 1012 taskqueue_enqueue(cq->tq, &cq->cq_task); 1013 } 1014 1015 return (err); 1016} 1017 1018/* 1019 * Flush ring buffers. 1020 */ 1021void 1022mlx4_en_qflush(struct ifnet *dev) 1023{ 1024 struct mlx4_en_priv *priv = netdev_priv(dev); 1025 struct mlx4_en_tx_ring *ring = priv->tx_ring; 1026 struct mbuf *m; 1027 1028 for (int i = 0; i < priv->tx_ring_num; i++, ring++) { 1029 spin_lock(&ring->tx_lock); 1030 while ((m = buf_ring_dequeue_sc(ring->br)) != NULL) 1031 m_freem(m); 1032 spin_unlock(&ring->tx_lock); 1033 } 1034 if_qflush(dev); 1035} 1036