1/* 2 * Copyright (c) 2007 Mellanox Technologies. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 * 32 */ 33 34#include <asm/page.h> 35#include <linux/mlx4/cq.h> 36#include <linux/slab.h> 37#include <linux/mlx4/qp.h> 38#include <linux/skbuff.h> 39#include <linux/if_vlan.h> 40#include <linux/vmalloc.h> 41 42#include "mlx4_en.h" 43 44enum { 45 MAX_INLINE = 104, /* 128 - 16 - 4 - 4 */ 46}; 47 48static int inline_thold __read_mostly = MAX_INLINE; 49 50module_param_named(inline_thold, inline_thold, int, 0444); 51MODULE_PARM_DESC(inline_thold, "threshold for using inline data"); 52 53int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, 54 struct mlx4_en_tx_ring *ring, u32 size, 55 u16 stride) 56{ 57 struct mlx4_en_dev *mdev = priv->mdev; 58 int tmp; 59 int err; 60 61 ring->size = size; 62 ring->size_mask = size - 1; 63 ring->stride = stride; 64 65 inline_thold = min(inline_thold, MAX_INLINE); 66 67 spin_lock_init(&ring->comp_lock); 68 69 tmp = size * sizeof(struct mlx4_en_tx_info); 70 ring->tx_info = vmalloc(tmp); 71 if (!ring->tx_info) { 72 en_err(priv, "Failed allocating tx_info ring\n"); 73 return -ENOMEM; 74 } 75 en_dbg(DRV, priv, "Allocated tx_info ring at addr:%p size:%d\n", 76 ring->tx_info, tmp); 77 78 ring->bounce_buf = kmalloc(MAX_DESC_SIZE, GFP_KERNEL); 79 if (!ring->bounce_buf) { 80 en_err(priv, "Failed allocating bounce buffer\n"); 81 err = -ENOMEM; 82 goto err_tx; 83 } 84 ring->buf_size = ALIGN(size * ring->stride, MLX4_EN_PAGE_SIZE); 85 86 err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, ring->buf_size, 87 2 * PAGE_SIZE); 88 if (err) { 89 en_err(priv, "Failed allocating hwq resources\n"); 90 goto err_bounce; 91 } 92 93 err = mlx4_en_map_buffer(&ring->wqres.buf); 94 if (err) { 95 en_err(priv, "Failed to map TX buffer\n"); 96 goto err_hwq_res; 97 } 98 99 ring->buf = ring->wqres.buf.direct.buf; 100 101 en_dbg(DRV, priv, "Allocated TX ring (addr:%p) - buf:%p size:%d " 102 "buf_size:%d dma:%llx\n", ring, ring->buf, ring->size, 103 ring->buf_size, (unsigned long long) ring->wqres.buf.direct.map); 104 105 err = mlx4_qp_reserve_range(mdev->dev, 1, 1, &ring->qpn); 106 if (err) { 107 en_err(priv, "Failed reserving qp for tx ring.\n"); 108 goto err_map; 109 } 110 111 err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->qp); 112 if (err) { 113 en_err(priv, "Failed allocating qp %d\n", ring->qpn); 114 goto err_reserve; 115 } 116 ring->qp.event = mlx4_en_sqp_event; 117 118 return 0; 119 120err_reserve: 121 mlx4_qp_release_range(mdev->dev, ring->qpn, 1); 122err_map: 123 mlx4_en_unmap_buffer(&ring->wqres.buf); 124err_hwq_res: 125 mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size); 126err_bounce: 127 kfree(ring->bounce_buf); 128 ring->bounce_buf = NULL; 129err_tx: 130 vfree(ring->tx_info); 131 ring->tx_info = NULL; 132 return err; 133} 134 135void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv, 136 struct mlx4_en_tx_ring *ring) 137{ 138 struct mlx4_en_dev *mdev = priv->mdev; 139 en_dbg(DRV, priv, "Destroying tx ring, qpn: %d\n", ring->qpn); 140 141 mlx4_qp_remove(mdev->dev, &ring->qp); 142 mlx4_qp_free(mdev->dev, &ring->qp); 143 mlx4_qp_release_range(mdev->dev, ring->qpn, 1); 144 mlx4_en_unmap_buffer(&ring->wqres.buf); 145 mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size); 146 kfree(ring->bounce_buf); 147 ring->bounce_buf = NULL; 148 vfree(ring->tx_info); 149 ring->tx_info = NULL; 150} 151 152int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv, 153 struct mlx4_en_tx_ring *ring, 154 int cq) 155{ 156 struct mlx4_en_dev *mdev = priv->mdev; 157 int err; 158 159 ring->cqn = cq; 160 ring->prod = 0; 161 ring->cons = 0xffffffff; 162 ring->last_nr_txbb = 1; 163 ring->poll_cnt = 0; 164 ring->blocked = 0; 165 memset(ring->tx_info, 0, ring->size * sizeof(struct mlx4_en_tx_info)); 166 memset(ring->buf, 0, ring->buf_size); 167 168 ring->qp_state = MLX4_QP_STATE_RST; 169 ring->doorbell_qpn = swab32(ring->qp.qpn << 8); 170 171 mlx4_en_fill_qp_context(priv, ring->size, ring->stride, 1, 0, ring->qpn, 172 ring->cqn, &ring->context); 173 174 err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, &ring->context, 175 &ring->qp, &ring->qp_state); 176 177 return err; 178} 179 180void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv, 181 struct mlx4_en_tx_ring *ring) 182{ 183 struct mlx4_en_dev *mdev = priv->mdev; 184 185 mlx4_qp_modify(mdev->dev, NULL, ring->qp_state, 186 MLX4_QP_STATE_RST, NULL, 0, 0, &ring->qp); 187} 188 189 190static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv, 191 struct mlx4_en_tx_ring *ring, 192 int index, u8 owner) 193{ 194 struct mlx4_en_dev *mdev = priv->mdev; 195 struct mlx4_en_tx_info *tx_info = &ring->tx_info[index]; 196 struct mlx4_en_tx_desc *tx_desc = ring->buf + index * TXBB_SIZE; 197 struct mlx4_wqe_data_seg *data = (void *) tx_desc + tx_info->data_offset; 198 struct sk_buff *skb = tx_info->skb; 199 struct skb_frag_struct *frag; 200 void *end = ring->buf + ring->buf_size; 201 int frags = skb_shinfo(skb)->nr_frags; 202 int i; 203 __be32 *ptr = (__be32 *)tx_desc; 204 __be32 stamp = cpu_to_be32(STAMP_VAL | (!!owner << STAMP_SHIFT)); 205 206 /* Optimize the common case when there are no wraparounds */ 207 if (likely((void *) tx_desc + tx_info->nr_txbb * TXBB_SIZE <= end)) { 208 if (!tx_info->inl) { 209 if (tx_info->linear) { 210 pci_unmap_single(mdev->pdev, 211 (dma_addr_t) be64_to_cpu(data->addr), 212 be32_to_cpu(data->byte_count), 213 PCI_DMA_TODEVICE); 214 ++data; 215 } 216 217 for (i = 0; i < frags; i++) { 218 frag = &skb_shinfo(skb)->frags[i]; 219 pci_unmap_page(mdev->pdev, 220 (dma_addr_t) be64_to_cpu(data[i].addr), 221 frag->size, PCI_DMA_TODEVICE); 222 } 223 } 224 /* Stamp the freed descriptor */ 225 for (i = 0; i < tx_info->nr_txbb * TXBB_SIZE; i += STAMP_STRIDE) { 226 *ptr = stamp; 227 ptr += STAMP_DWORDS; 228 } 229 230 } else { 231 if (!tx_info->inl) { 232 if ((void *) data >= end) { 233 data = (struct mlx4_wqe_data_seg *) 234 (ring->buf + ((void *) data - end)); 235 } 236 237 if (tx_info->linear) { 238 pci_unmap_single(mdev->pdev, 239 (dma_addr_t) be64_to_cpu(data->addr), 240 be32_to_cpu(data->byte_count), 241 PCI_DMA_TODEVICE); 242 ++data; 243 } 244 245 for (i = 0; i < frags; i++) { 246 /* Check for wraparound before unmapping */ 247 if ((void *) data >= end) 248 data = (struct mlx4_wqe_data_seg *) ring->buf; 249 frag = &skb_shinfo(skb)->frags[i]; 250 pci_unmap_page(mdev->pdev, 251 (dma_addr_t) be64_to_cpu(data->addr), 252 frag->size, PCI_DMA_TODEVICE); 253 ++data; 254 } 255 } 256 /* Stamp the freed descriptor */ 257 for (i = 0; i < tx_info->nr_txbb * TXBB_SIZE; i += STAMP_STRIDE) { 258 *ptr = stamp; 259 ptr += STAMP_DWORDS; 260 if ((void *) ptr >= end) { 261 ptr = ring->buf; 262 stamp ^= cpu_to_be32(0x80000000); 263 } 264 } 265 266 } 267 dev_kfree_skb_any(skb); 268 return tx_info->nr_txbb; 269} 270 271 272int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring) 273{ 274 struct mlx4_en_priv *priv = netdev_priv(dev); 275 int cnt = 0; 276 277 /* Skip last polled descriptor */ 278 ring->cons += ring->last_nr_txbb; 279 en_dbg(DRV, priv, "Freeing Tx buf - cons:0x%x prod:0x%x\n", 280 ring->cons, ring->prod); 281 282 if ((u32) (ring->prod - ring->cons) > ring->size) { 283 if (netif_msg_tx_err(priv)) 284 en_warn(priv, "Tx consumer passed producer!\n"); 285 return 0; 286 } 287 288 while (ring->cons != ring->prod) { 289 ring->last_nr_txbb = mlx4_en_free_tx_desc(priv, ring, 290 ring->cons & ring->size_mask, 291 !!(ring->cons & ring->size)); 292 ring->cons += ring->last_nr_txbb; 293 cnt++; 294 } 295 296 if (cnt) 297 en_dbg(DRV, priv, "Freed %d uncompleted tx descriptors\n", cnt); 298 299 return cnt; 300} 301 302 303static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq) 304{ 305 struct mlx4_en_priv *priv = netdev_priv(dev); 306 struct mlx4_cq *mcq = &cq->mcq; 307 struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring]; 308 struct mlx4_cqe *cqe = cq->buf; 309 u16 index; 310 u16 new_index; 311 u32 txbbs_skipped = 0; 312 u32 cq_last_sav; 313 314 /* index always points to the first TXBB of the last polled descriptor */ 315 index = ring->cons & ring->size_mask; 316 new_index = be16_to_cpu(cqe->wqe_index) & ring->size_mask; 317 if (index == new_index) 318 return; 319 320 if (!priv->port_up) 321 return; 322 323 /* 324 * We use a two-stage loop: 325 * - the first samples the HW-updated CQE 326 * - the second frees TXBBs until the last sample 327 * This lets us amortize CQE cache misses, while still polling the CQ 328 * until is quiescent. 329 */ 330 cq_last_sav = mcq->cons_index; 331 do { 332 do { 333 /* Skip over last polled CQE */ 334 index = (index + ring->last_nr_txbb) & ring->size_mask; 335 txbbs_skipped += ring->last_nr_txbb; 336 337 /* Poll next CQE */ 338 ring->last_nr_txbb = mlx4_en_free_tx_desc( 339 priv, ring, index, 340 !!((ring->cons + txbbs_skipped) & 341 ring->size)); 342 ++mcq->cons_index; 343 344 } while (index != new_index); 345 346 new_index = be16_to_cpu(cqe->wqe_index) & ring->size_mask; 347 } while (index != new_index); 348 AVG_PERF_COUNTER(priv->pstats.tx_coal_avg, 349 (u32) (mcq->cons_index - cq_last_sav)); 350 351 /* 352 * To prevent CQ overflow we first update CQ consumer and only then 353 * the ring consumer. 354 */ 355 mlx4_cq_set_ci(mcq); 356 wmb(); 357 ring->cons += txbbs_skipped; 358 359 /* Wakeup Tx queue if this ring stopped it */ 360 if (unlikely(ring->blocked)) { 361 if ((u32) (ring->prod - ring->cons) <= 362 ring->size - HEADROOM - MAX_DESC_TXBBS) { 363 ring->blocked = 0; 364 netif_tx_wake_queue(netdev_get_tx_queue(dev, cq->ring)); 365 priv->port_stats.wake_queue++; 366 } 367 } 368} 369 370void mlx4_en_tx_irq(struct mlx4_cq *mcq) 371{ 372 struct mlx4_en_cq *cq = container_of(mcq, struct mlx4_en_cq, mcq); 373 struct mlx4_en_priv *priv = netdev_priv(cq->dev); 374 struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring]; 375 376 if (!spin_trylock(&ring->comp_lock)) 377 return; 378 mlx4_en_process_tx_cq(cq->dev, cq); 379 mod_timer(&cq->timer, jiffies + 1); 380 spin_unlock(&ring->comp_lock); 381} 382 383 384void mlx4_en_poll_tx_cq(unsigned long data) 385{ 386 struct mlx4_en_cq *cq = (struct mlx4_en_cq *) data; 387 struct mlx4_en_priv *priv = netdev_priv(cq->dev); 388 struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring]; 389 u32 inflight; 390 391 INC_PERF_COUNTER(priv->pstats.tx_poll); 392 393 if (!spin_trylock_irq(&ring->comp_lock)) { 394 mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT); 395 return; 396 } 397 mlx4_en_process_tx_cq(cq->dev, cq); 398 inflight = (u32) (ring->prod - ring->cons - ring->last_nr_txbb); 399 400 /* If there are still packets in flight and the timer has not already 401 * been scheduled by the Tx routine then schedule it here to guarantee 402 * completion processing of these packets */ 403 if (inflight && priv->port_up) 404 mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT); 405 406 spin_unlock_irq(&ring->comp_lock); 407} 408 409static struct mlx4_en_tx_desc *mlx4_en_bounce_to_desc(struct mlx4_en_priv *priv, 410 struct mlx4_en_tx_ring *ring, 411 u32 index, 412 unsigned int desc_size) 413{ 414 u32 copy = (ring->size - index) * TXBB_SIZE; 415 int i; 416 417 for (i = desc_size - copy - 4; i >= 0; i -= 4) { 418 if ((i & (TXBB_SIZE - 1)) == 0) 419 wmb(); 420 421 *((u32 *) (ring->buf + i)) = 422 *((u32 *) (ring->bounce_buf + copy + i)); 423 } 424 425 for (i = copy - 4; i >= 4 ; i -= 4) { 426 if ((i & (TXBB_SIZE - 1)) == 0) 427 wmb(); 428 429 *((u32 *) (ring->buf + index * TXBB_SIZE + i)) = 430 *((u32 *) (ring->bounce_buf + i)); 431 } 432 433 /* Return real descriptor location */ 434 return ring->buf + index * TXBB_SIZE; 435} 436 437static inline void mlx4_en_xmit_poll(struct mlx4_en_priv *priv, int tx_ind) 438{ 439 struct mlx4_en_cq *cq = &priv->tx_cq[tx_ind]; 440 struct mlx4_en_tx_ring *ring = &priv->tx_ring[tx_ind]; 441 unsigned long flags; 442 443 /* If we don't have a pending timer, set one up to catch our recent 444 post in case the interface becomes idle */ 445 if (!timer_pending(&cq->timer)) 446 mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT); 447 448 /* Poll the CQ every mlx4_en_TX_MODER_POLL packets */ 449 if ((++ring->poll_cnt & (MLX4_EN_TX_POLL_MODER - 1)) == 0) 450 if (spin_trylock_irqsave(&ring->comp_lock, flags)) { 451 mlx4_en_process_tx_cq(priv->dev, cq); 452 spin_unlock_irqrestore(&ring->comp_lock, flags); 453 } 454} 455 456static void *get_frag_ptr(struct sk_buff *skb) 457{ 458 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; 459 struct page *page = frag->page; 460 void *ptr; 461 462 ptr = page_address(page); 463 if (unlikely(!ptr)) 464 return NULL; 465 466 return ptr + frag->page_offset; 467} 468 469static int is_inline(struct sk_buff *skb, void **pfrag) 470{ 471 void *ptr; 472 473 if (inline_thold && !skb_is_gso(skb) && skb->len <= inline_thold) { 474 if (skb_shinfo(skb)->nr_frags == 1) { 475 ptr = get_frag_ptr(skb); 476 if (unlikely(!ptr)) 477 return 0; 478 479 if (pfrag) 480 *pfrag = ptr; 481 482 return 1; 483 } else if (unlikely(skb_shinfo(skb)->nr_frags)) 484 return 0; 485 else 486 return 1; 487 } 488 489 return 0; 490} 491 492static int inline_size(struct sk_buff *skb) 493{ 494 if (skb->len + CTRL_SIZE + sizeof(struct mlx4_wqe_inline_seg) 495 <= MLX4_INLINE_ALIGN) 496 return ALIGN(skb->len + CTRL_SIZE + 497 sizeof(struct mlx4_wqe_inline_seg), 16); 498 else 499 return ALIGN(skb->len + CTRL_SIZE + 2 * 500 sizeof(struct mlx4_wqe_inline_seg), 16); 501} 502 503static int get_real_size(struct sk_buff *skb, struct net_device *dev, 504 int *lso_header_size) 505{ 506 struct mlx4_en_priv *priv = netdev_priv(dev); 507 int real_size; 508 509 if (skb_is_gso(skb)) { 510 *lso_header_size = skb_transport_offset(skb) + tcp_hdrlen(skb); 511 real_size = CTRL_SIZE + skb_shinfo(skb)->nr_frags * DS_SIZE + 512 ALIGN(*lso_header_size + 4, DS_SIZE); 513 if (unlikely(*lso_header_size != skb_headlen(skb))) { 514 /* We add a segment for the skb linear buffer only if 515 * it contains data */ 516 if (*lso_header_size < skb_headlen(skb)) 517 real_size += DS_SIZE; 518 else { 519 if (netif_msg_tx_err(priv)) 520 en_warn(priv, "Non-linear headers\n"); 521 return 0; 522 } 523 } 524 } else { 525 *lso_header_size = 0; 526 if (!is_inline(skb, NULL)) 527 real_size = CTRL_SIZE + (skb_shinfo(skb)->nr_frags + 1) * DS_SIZE; 528 else 529 real_size = inline_size(skb); 530 } 531 532 return real_size; 533} 534 535static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, struct sk_buff *skb, 536 int real_size, u16 *vlan_tag, int tx_ind, void *fragptr) 537{ 538 struct mlx4_wqe_inline_seg *inl = &tx_desc->inl; 539 int spc = MLX4_INLINE_ALIGN - CTRL_SIZE - sizeof *inl; 540 541 if (skb->len <= spc) { 542 inl->byte_count = cpu_to_be32(1 << 31 | skb->len); 543 skb_copy_from_linear_data(skb, inl + 1, skb_headlen(skb)); 544 if (skb_shinfo(skb)->nr_frags) 545 memcpy(((void *)(inl + 1)) + skb_headlen(skb), fragptr, 546 skb_shinfo(skb)->frags[0].size); 547 548 } else { 549 inl->byte_count = cpu_to_be32(1 << 31 | spc); 550 if (skb_headlen(skb) <= spc) { 551 skb_copy_from_linear_data(skb, inl + 1, skb_headlen(skb)); 552 if (skb_headlen(skb) < spc) { 553 memcpy(((void *)(inl + 1)) + skb_headlen(skb), 554 fragptr, spc - skb_headlen(skb)); 555 fragptr += spc - skb_headlen(skb); 556 } 557 inl = (void *) (inl + 1) + spc; 558 memcpy(((void *)(inl + 1)), fragptr, skb->len - spc); 559 } else { 560 skb_copy_from_linear_data(skb, inl + 1, spc); 561 inl = (void *) (inl + 1) + spc; 562 skb_copy_from_linear_data_offset(skb, spc, inl + 1, 563 skb_headlen(skb) - spc); 564 if (skb_shinfo(skb)->nr_frags) 565 memcpy(((void *)(inl + 1)) + skb_headlen(skb) - spc, 566 fragptr, skb_shinfo(skb)->frags[0].size); 567 } 568 569 wmb(); 570 inl->byte_count = cpu_to_be32(1 << 31 | (skb->len - spc)); 571 } 572 tx_desc->ctrl.vlan_tag = cpu_to_be16(*vlan_tag); 573 tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_VLAN * !!(*vlan_tag); 574 tx_desc->ctrl.fence_size = (real_size / 16) & 0x3f; 575} 576 577u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb) 578{ 579 struct mlx4_en_priv *priv = netdev_priv(dev); 580 u16 vlan_tag = 0; 581 582 /* If we support per priority flow control and the packet contains 583 * a vlan tag, send the packet to the TX ring assigned to that priority 584 */ 585 if (priv->prof->rx_ppp && priv->vlgrp && vlan_tx_tag_present(skb)) { 586 vlan_tag = vlan_tx_tag_get(skb); 587 return MLX4_EN_NUM_TX_RINGS + (vlan_tag >> 13); 588 } 589 590 return skb_tx_hash(dev, skb); 591} 592 593netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) 594{ 595 struct mlx4_en_priv *priv = netdev_priv(dev); 596 struct mlx4_en_dev *mdev = priv->mdev; 597 struct mlx4_en_tx_ring *ring; 598 struct mlx4_en_cq *cq; 599 struct mlx4_en_tx_desc *tx_desc; 600 struct mlx4_wqe_data_seg *data; 601 struct skb_frag_struct *frag; 602 struct mlx4_en_tx_info *tx_info; 603 int tx_ind = 0; 604 int nr_txbb; 605 int desc_size; 606 int real_size; 607 dma_addr_t dma; 608 u32 index; 609 __be32 op_own; 610 u16 vlan_tag = 0; 611 int i; 612 int lso_header_size; 613 void *fragptr; 614 615 real_size = get_real_size(skb, dev, &lso_header_size); 616 if (unlikely(!real_size)) 617 goto tx_drop; 618 619 /* Allign descriptor to TXBB size */ 620 desc_size = ALIGN(real_size, TXBB_SIZE); 621 nr_txbb = desc_size / TXBB_SIZE; 622 if (unlikely(nr_txbb > MAX_DESC_TXBBS)) { 623 if (netif_msg_tx_err(priv)) 624 en_warn(priv, "Oversized header or SG list\n"); 625 goto tx_drop; 626 } 627 628 tx_ind = skb->queue_mapping; 629 ring = &priv->tx_ring[tx_ind]; 630 if (priv->vlgrp && vlan_tx_tag_present(skb)) 631 vlan_tag = vlan_tx_tag_get(skb); 632 633 /* Check available TXBBs And 2K spare for prefetch */ 634 if (unlikely(((int)(ring->prod - ring->cons)) > 635 ring->size - HEADROOM - MAX_DESC_TXBBS)) { 636 /* every full Tx ring stops queue */ 637 netif_tx_stop_queue(netdev_get_tx_queue(dev, tx_ind)); 638 ring->blocked = 1; 639 priv->port_stats.queue_stopped++; 640 641 /* Use interrupts to find out when queue opened */ 642 cq = &priv->tx_cq[tx_ind]; 643 mlx4_en_arm_cq(priv, cq); 644 return NETDEV_TX_BUSY; 645 } 646 647 /* Track current inflight packets for performance analysis */ 648 AVG_PERF_COUNTER(priv->pstats.inflight_avg, 649 (u32) (ring->prod - ring->cons - 1)); 650 651 /* Packet is good - grab an index and transmit it */ 652 index = ring->prod & ring->size_mask; 653 654 /* See if we have enough space for whole descriptor TXBB for setting 655 * SW ownership on next descriptor; if not, use a bounce buffer. */ 656 if (likely(index + nr_txbb <= ring->size)) 657 tx_desc = ring->buf + index * TXBB_SIZE; 658 else 659 tx_desc = (struct mlx4_en_tx_desc *) ring->bounce_buf; 660 661 /* Save skb in tx_info ring */ 662 tx_info = &ring->tx_info[index]; 663 tx_info->skb = skb; 664 tx_info->nr_txbb = nr_txbb; 665 666 /* Prepare ctrl segement apart opcode+ownership, which depends on 667 * whether LSO is used */ 668 tx_desc->ctrl.vlan_tag = cpu_to_be16(vlan_tag); 669 tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_VLAN * !!vlan_tag; 670 tx_desc->ctrl.fence_size = (real_size / 16) & 0x3f; 671 tx_desc->ctrl.srcrb_flags = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE | 672 MLX4_WQE_CTRL_SOLICITED); 673 if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { 674 tx_desc->ctrl.srcrb_flags |= cpu_to_be32(MLX4_WQE_CTRL_IP_CSUM | 675 MLX4_WQE_CTRL_TCP_UDP_CSUM); 676 priv->port_stats.tx_chksum_offload++; 677 } 678 679 /* Handle LSO (TSO) packets */ 680 if (lso_header_size) { 681 /* Mark opcode as LSO */ 682 op_own = cpu_to_be32(MLX4_OPCODE_LSO | (1 << 6)) | 683 ((ring->prod & ring->size) ? 684 cpu_to_be32(MLX4_EN_BIT_DESC_OWN) : 0); 685 686 /* Fill in the LSO prefix */ 687 tx_desc->lso.mss_hdr_size = cpu_to_be32( 688 skb_shinfo(skb)->gso_size << 16 | lso_header_size); 689 690 /* Copy headers; 691 * note that we already verified that it is linear */ 692 memcpy(tx_desc->lso.header, skb->data, lso_header_size); 693 data = ((void *) &tx_desc->lso + 694 ALIGN(lso_header_size + 4, DS_SIZE)); 695 696 priv->port_stats.tso_packets++; 697 i = ((skb->len - lso_header_size) / skb_shinfo(skb)->gso_size) + 698 !!((skb->len - lso_header_size) % skb_shinfo(skb)->gso_size); 699 ring->bytes += skb->len + (i - 1) * lso_header_size; 700 ring->packets += i; 701 } else { 702 /* Normal (Non LSO) packet */ 703 op_own = cpu_to_be32(MLX4_OPCODE_SEND) | 704 ((ring->prod & ring->size) ? 705 cpu_to_be32(MLX4_EN_BIT_DESC_OWN) : 0); 706 data = &tx_desc->data; 707 ring->bytes += max(skb->len, (unsigned int) ETH_ZLEN); 708 ring->packets++; 709 710 } 711 AVG_PERF_COUNTER(priv->pstats.tx_pktsz_avg, skb->len); 712 713 714 /* valid only for none inline segments */ 715 tx_info->data_offset = (void *) data - (void *) tx_desc; 716 717 tx_info->linear = (lso_header_size < skb_headlen(skb) && !is_inline(skb, NULL)) ? 1 : 0; 718 data += skb_shinfo(skb)->nr_frags + tx_info->linear - 1; 719 720 if (!is_inline(skb, &fragptr)) { 721 /* Map fragments */ 722 for (i = skb_shinfo(skb)->nr_frags - 1; i >= 0; i--) { 723 frag = &skb_shinfo(skb)->frags[i]; 724 dma = pci_map_page(mdev->dev->pdev, frag->page, frag->page_offset, 725 frag->size, PCI_DMA_TODEVICE); 726 data->addr = cpu_to_be64(dma); 727 data->lkey = cpu_to_be32(mdev->mr.key); 728 wmb(); 729 data->byte_count = cpu_to_be32(frag->size); 730 --data; 731 } 732 733 /* Map linear part */ 734 if (tx_info->linear) { 735 dma = pci_map_single(mdev->dev->pdev, skb->data + lso_header_size, 736 skb_headlen(skb) - lso_header_size, PCI_DMA_TODEVICE); 737 data->addr = cpu_to_be64(dma); 738 data->lkey = cpu_to_be32(mdev->mr.key); 739 wmb(); 740 data->byte_count = cpu_to_be32(skb_headlen(skb) - lso_header_size); 741 } 742 tx_info->inl = 0; 743 } else { 744 build_inline_wqe(tx_desc, skb, real_size, &vlan_tag, tx_ind, fragptr); 745 tx_info->inl = 1; 746 } 747 748 ring->prod += nr_txbb; 749 750 /* If we used a bounce buffer then copy descriptor back into place */ 751 if (tx_desc == (struct mlx4_en_tx_desc *) ring->bounce_buf) 752 tx_desc = mlx4_en_bounce_to_desc(priv, ring, index, desc_size); 753 754 /* Run destructor before passing skb to HW */ 755 if (likely(!skb_shared(skb))) 756 skb_orphan(skb); 757 758 /* Ensure new descirptor hits memory 759 * before setting ownership of this descriptor to HW */ 760 wmb(); 761 tx_desc->ctrl.owner_opcode = op_own; 762 763 /* Ring doorbell! */ 764 wmb(); 765 writel(ring->doorbell_qpn, mdev->uar_map + MLX4_SEND_DOORBELL); 766 767 /* Poll CQ here */ 768 mlx4_en_xmit_poll(priv, tx_ind); 769 770 return NETDEV_TX_OK; 771 772tx_drop: 773 dev_kfree_skb_any(skb); 774 priv->stats.tx_dropped++; 775 return NETDEV_TX_OK; 776} 777