mlx4_en_tx.c revision 297966
1/* 2 * Copyright (c) 2007, 2014 Mellanox Technologies. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 * 32 */ 33 34#include <linux/page.h> 35#include <linux/mlx4/cq.h> 36#include <linux/slab.h> 37#include <linux/mlx4/qp.h> 38#include <linux/if_vlan.h> 39#include <linux/vmalloc.h> 40#include <linux/moduleparam.h> 41 42#include <netinet/in_systm.h> 43#include <netinet/in.h> 44#include <netinet/if_ether.h> 45#include <netinet/ip.h> 46#include <netinet/ip6.h> 47#include <netinet/tcp.h> 48#include <netinet/tcp_lro.h> 49#include <netinet/udp.h> 50 51#include "mlx4_en.h" 52 53enum { 54 MAX_INLINE = 104, /* 128 - 16 - 4 - 4 */ 55 MAX_BF = 256, 56 MIN_PKT_LEN = 17, 57}; 58 59static int inline_thold __read_mostly = MAX_INLINE; 60 61module_param_named(inline_thold, inline_thold, uint, 0444); 62MODULE_PARM_DESC(inline_thold, "threshold for using inline data"); 63 64int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, 65 struct mlx4_en_tx_ring **pring, u32 size, 66 u16 stride, int node, int queue_idx) 67{ 68 struct mlx4_en_dev *mdev = priv->mdev; 69 struct mlx4_en_tx_ring *ring; 70 uint32_t x; 71 int tmp; 72 int err; 73 74 ring = kzalloc_node(sizeof(struct mlx4_en_tx_ring), GFP_KERNEL, node); 75 if (!ring) { 76 ring = kzalloc(sizeof(struct mlx4_en_tx_ring), GFP_KERNEL); 77 if (!ring) { 78 en_err(priv, "Failed allocating TX ring\n"); 79 return -ENOMEM; 80 } 81 } 82 83 /* Create DMA descriptor TAG */ 84 if ((err = -bus_dma_tag_create( 85 bus_get_dma_tag(mdev->pdev->dev.bsddev), 86 1, /* any alignment */ 87 0, /* no boundary */ 88 BUS_SPACE_MAXADDR, /* lowaddr */ 89 BUS_SPACE_MAXADDR, /* highaddr */ 90 NULL, NULL, /* filter, filterarg */ 91 MLX4_EN_TX_MAX_PAYLOAD_SIZE, /* maxsize */ 92 MLX4_EN_TX_MAX_MBUF_FRAGS, /* nsegments */ 93 MLX4_EN_TX_MAX_MBUF_SIZE, /* maxsegsize */ 94 0, /* flags */ 95 NULL, NULL, /* lockfunc, lockfuncarg */ 96 &ring->dma_tag))) 97 goto done; 98 99 ring->size = size; 100 ring->size_mask = size - 1; 101 ring->stride = stride; 102 ring->inline_thold = MAX(MIN_PKT_LEN, MIN(inline_thold, MAX_INLINE)); 103 mtx_init(&ring->tx_lock.m, "mlx4 tx", NULL, MTX_DEF); 104 mtx_init(&ring->comp_lock.m, "mlx4 comp", NULL, MTX_DEF); 105 106 /* Allocate the buf ring */ 107 ring->br = buf_ring_alloc(MLX4_EN_DEF_TX_QUEUE_SIZE, M_DEVBUF, 108 M_WAITOK, &ring->tx_lock.m); 109 if (ring->br == NULL) { 110 en_err(priv, "Failed allocating tx_info ring\n"); 111 err = -ENOMEM; 112 goto err_free_dma_tag; 113 } 114 115 tmp = size * sizeof(struct mlx4_en_tx_info); 116 ring->tx_info = kzalloc_node(tmp, GFP_KERNEL, node); 117 if (!ring->tx_info) { 118 ring->tx_info = kzalloc(tmp, GFP_KERNEL); 119 if (!ring->tx_info) { 120 err = -ENOMEM; 121 goto err_ring; 122 } 123 } 124 125 /* Create DMA descriptor MAPs */ 126 for (x = 0; x != size; x++) { 127 err = -bus_dmamap_create(ring->dma_tag, 0, 128 &ring->tx_info[x].dma_map); 129 if (err != 0) { 130 while (x--) { 131 bus_dmamap_destroy(ring->dma_tag, 132 ring->tx_info[x].dma_map); 133 } 134 goto err_info; 135 } 136 } 137 138 en_dbg(DRV, priv, "Allocated tx_info ring at addr:%p size:%d\n", 139 ring->tx_info, tmp); 140 141 ring->buf_size = ALIGN(size * ring->stride, MLX4_EN_PAGE_SIZE); 142 143 /* Allocate HW buffers on provided NUMA node */ 144 err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, ring->buf_size, 145 2 * PAGE_SIZE); 146 if (err) { 147 en_err(priv, "Failed allocating hwq resources\n"); 148 goto err_dma_map; 149 } 150 151 err = mlx4_en_map_buffer(&ring->wqres.buf); 152 if (err) { 153 en_err(priv, "Failed to map TX buffer\n"); 154 goto err_hwq_res; 155 } 156 157 ring->buf = ring->wqres.buf.direct.buf; 158 159 en_dbg(DRV, priv, "Allocated TX ring (addr:%p) - buf:%p size:%d " 160 "buf_size:%d dma:%llx\n", ring, ring->buf, ring->size, 161 ring->buf_size, (unsigned long long) ring->wqres.buf.direct.map); 162 163 err = mlx4_qp_reserve_range(mdev->dev, 1, 1, &ring->qpn, 164 MLX4_RESERVE_BF_QP); 165 if (err) { 166 en_err(priv, "failed reserving qp for TX ring\n"); 167 goto err_map; 168 } 169 170 err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->qp); 171 if (err) { 172 en_err(priv, "Failed allocating qp %d\n", ring->qpn); 173 goto err_reserve; 174 } 175 ring->qp.event = mlx4_en_sqp_event; 176 177 err = mlx4_bf_alloc(mdev->dev, &ring->bf, node); 178 if (err) { 179 en_dbg(DRV, priv, "working without blueflame (%d)", err); 180 ring->bf.uar = &mdev->priv_uar; 181 ring->bf.uar->map = mdev->uar_map; 182 ring->bf_enabled = false; 183 } else 184 ring->bf_enabled = true; 185 ring->queue_index = queue_idx; 186 if (queue_idx < priv->num_tx_rings_p_up ) 187 CPU_SET(queue_idx, &ring->affinity_mask); 188 189 *pring = ring; 190 return 0; 191 192err_reserve: 193 mlx4_qp_release_range(mdev->dev, ring->qpn, 1); 194err_map: 195 mlx4_en_unmap_buffer(&ring->wqres.buf); 196err_hwq_res: 197 mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size); 198err_dma_map: 199 for (x = 0; x != size; x++) 200 bus_dmamap_destroy(ring->dma_tag, ring->tx_info[x].dma_map); 201err_info: 202 vfree(ring->tx_info); 203err_ring: 204 buf_ring_free(ring->br, M_DEVBUF); 205err_free_dma_tag: 206 bus_dma_tag_destroy(ring->dma_tag); 207done: 208 kfree(ring); 209 return err; 210} 211 212void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv, 213 struct mlx4_en_tx_ring **pring) 214{ 215 struct mlx4_en_dev *mdev = priv->mdev; 216 struct mlx4_en_tx_ring *ring = *pring; 217 uint32_t x; 218 en_dbg(DRV, priv, "Destroying tx ring, qpn: %d\n", ring->qpn); 219 220 buf_ring_free(ring->br, M_DEVBUF); 221 if (ring->bf_enabled) 222 mlx4_bf_free(mdev->dev, &ring->bf); 223 mlx4_qp_remove(mdev->dev, &ring->qp); 224 mlx4_qp_free(mdev->dev, &ring->qp); 225 mlx4_qp_release_range(priv->mdev->dev, ring->qpn, 1); 226 mlx4_en_unmap_buffer(&ring->wqres.buf); 227 mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size); 228 for (x = 0; x != ring->size; x++) 229 bus_dmamap_destroy(ring->dma_tag, ring->tx_info[x].dma_map); 230 vfree(ring->tx_info); 231 mtx_destroy(&ring->tx_lock.m); 232 mtx_destroy(&ring->comp_lock.m); 233 bus_dma_tag_destroy(ring->dma_tag); 234 kfree(ring); 235 *pring = NULL; 236} 237 238int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv, 239 struct mlx4_en_tx_ring *ring, 240 int cq, int user_prio) 241{ 242 struct mlx4_en_dev *mdev = priv->mdev; 243 int err; 244 245 ring->cqn = cq; 246 ring->prod = 0; 247 ring->cons = 0xffffffff; 248 ring->last_nr_txbb = 1; 249 ring->poll_cnt = 0; 250 ring->blocked = 0; 251 memset(ring->buf, 0, ring->buf_size); 252 253 ring->qp_state = MLX4_QP_STATE_RST; 254 ring->doorbell_qpn = ring->qp.qpn << 8; 255 256 mlx4_en_fill_qp_context(priv, ring->size, ring->stride, 1, 0, ring->qpn, 257 ring->cqn, user_prio, &ring->context); 258 if (ring->bf_enabled) 259 ring->context.usr_page = cpu_to_be32(ring->bf.uar->index); 260 261 err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, &ring->context, 262 &ring->qp, &ring->qp_state); 263 return err; 264} 265 266void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv, 267 struct mlx4_en_tx_ring *ring) 268{ 269 struct mlx4_en_dev *mdev = priv->mdev; 270 271 mlx4_qp_modify(mdev->dev, NULL, ring->qp_state, 272 MLX4_QP_STATE_RST, NULL, 0, 0, &ring->qp); 273} 274 275static volatile struct mlx4_wqe_data_seg * 276mlx4_en_store_inline_lso_data(volatile struct mlx4_wqe_data_seg *dseg, 277 struct mbuf *mb, int len, __be32 owner_bit) 278{ 279 uint8_t *inl = __DEVOLATILE(uint8_t *, dseg); 280 281 /* copy data into place */ 282 m_copydata(mb, 0, len, inl + 4); 283 dseg += DIV_ROUND_UP(4 + len, DS_SIZE_ALIGNMENT); 284 return (dseg); 285} 286 287static void 288mlx4_en_store_inline_lso_header(volatile struct mlx4_wqe_data_seg *dseg, 289 int len, __be32 owner_bit) 290{ 291} 292 293static void 294mlx4_en_stamp_wqe(struct mlx4_en_priv *priv, 295 struct mlx4_en_tx_ring *ring, u32 index, u8 owner) 296{ 297 struct mlx4_en_tx_info *tx_info = &ring->tx_info[index]; 298 struct mlx4_en_tx_desc *tx_desc = (struct mlx4_en_tx_desc *) 299 (ring->buf + (index * TXBB_SIZE)); 300 volatile __be32 *ptr = (__be32 *)tx_desc; 301 const __be32 stamp = cpu_to_be32(STAMP_VAL | 302 ((u32)owner << STAMP_SHIFT)); 303 u32 i; 304 305 /* Stamp the freed descriptor */ 306 for (i = 0; i < tx_info->nr_txbb * TXBB_SIZE; i += STAMP_STRIDE) { 307 *ptr = stamp; 308 ptr += STAMP_DWORDS; 309 } 310} 311 312static u32 313mlx4_en_free_tx_desc(struct mlx4_en_priv *priv, 314 struct mlx4_en_tx_ring *ring, u32 index) 315{ 316 struct mlx4_en_tx_info *tx_info; 317 struct mbuf *mb; 318 319 tx_info = &ring->tx_info[index]; 320 mb = tx_info->mb; 321 322 if (mb == NULL) 323 goto done; 324 325 bus_dmamap_sync(ring->dma_tag, tx_info->dma_map, 326 BUS_DMASYNC_POSTWRITE); 327 bus_dmamap_unload(ring->dma_tag, tx_info->dma_map); 328 329 m_freem(mb); 330done: 331 return (tx_info->nr_txbb); 332} 333 334int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring) 335{ 336 struct mlx4_en_priv *priv = netdev_priv(dev); 337 int cnt = 0; 338 339 /* Skip last polled descriptor */ 340 ring->cons += ring->last_nr_txbb; 341 en_dbg(DRV, priv, "Freeing Tx buf - cons:0x%x prod:0x%x\n", 342 ring->cons, ring->prod); 343 344 if ((u32) (ring->prod - ring->cons) > ring->size) { 345 en_warn(priv, "Tx consumer passed producer!\n"); 346 return 0; 347 } 348 349 while (ring->cons != ring->prod) { 350 ring->last_nr_txbb = mlx4_en_free_tx_desc(priv, ring, 351 ring->cons & ring->size_mask); 352 ring->cons += ring->last_nr_txbb; 353 cnt++; 354 } 355 356 if (cnt) 357 en_dbg(DRV, priv, "Freed %d uncompleted tx descriptors\n", cnt); 358 359 return cnt; 360} 361 362static bool 363mlx4_en_tx_ring_is_full(struct mlx4_en_tx_ring *ring) 364{ 365 int wqs; 366 wqs = ring->size - (ring->prod - ring->cons); 367 return (wqs < (HEADROOM + (2 * MLX4_EN_TX_WQE_MAX_WQEBBS))); 368} 369 370static int mlx4_en_process_tx_cq(struct net_device *dev, 371 struct mlx4_en_cq *cq) 372{ 373 struct mlx4_en_priv *priv = netdev_priv(dev); 374 struct mlx4_cq *mcq = &cq->mcq; 375 struct mlx4_en_tx_ring *ring = priv->tx_ring[cq->ring]; 376 struct mlx4_cqe *cqe; 377 u16 index; 378 u16 new_index, ring_index, stamp_index; 379 u32 txbbs_skipped = 0; 380 u32 txbbs_stamp = 0; 381 u32 cons_index = mcq->cons_index; 382 int size = cq->size; 383 u32 size_mask = ring->size_mask; 384 struct mlx4_cqe *buf = cq->buf; 385 int factor = priv->cqe_factor; 386 387 if (!priv->port_up) 388 return 0; 389 390 index = cons_index & size_mask; 391 cqe = &buf[(index << factor) + factor]; 392 ring_index = ring->cons & size_mask; 393 stamp_index = ring_index; 394 395 /* Process all completed CQEs */ 396 while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK, 397 cons_index & size)) { 398 /* 399 * make sure we read the CQE after we read the 400 * ownership bit 401 */ 402 rmb(); 403 404 if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == 405 MLX4_CQE_OPCODE_ERROR)) { 406 en_err(priv, "CQE completed in error - vendor syndrom: 0x%x syndrom: 0x%x\n", 407 ((struct mlx4_err_cqe *)cqe)-> 408 vendor_err_syndrome, 409 ((struct mlx4_err_cqe *)cqe)->syndrome); 410 } 411 412 /* Skip over last polled CQE */ 413 new_index = be16_to_cpu(cqe->wqe_index) & size_mask; 414 415 do { 416 txbbs_skipped += ring->last_nr_txbb; 417 ring_index = (ring_index + ring->last_nr_txbb) & size_mask; 418 /* free next descriptor */ 419 ring->last_nr_txbb = mlx4_en_free_tx_desc( 420 priv, ring, ring_index); 421 mlx4_en_stamp_wqe(priv, ring, stamp_index, 422 !!((ring->cons + txbbs_stamp) & 423 ring->size)); 424 stamp_index = ring_index; 425 txbbs_stamp = txbbs_skipped; 426 } while (ring_index != new_index); 427 428 ++cons_index; 429 index = cons_index & size_mask; 430 cqe = &buf[(index << factor) + factor]; 431 } 432 433 434 /* 435 * To prevent CQ overflow we first update CQ consumer and only then 436 * the ring consumer. 437 */ 438 mcq->cons_index = cons_index; 439 mlx4_cq_set_ci(mcq); 440 wmb(); 441 ring->cons += txbbs_skipped; 442 443 /* Wakeup Tx queue if it was stopped and ring is not full */ 444 if (unlikely(ring->blocked) && !mlx4_en_tx_ring_is_full(ring)) { 445 ring->blocked = 0; 446 if (atomic_fetchadd_int(&priv->blocked, -1) == 1) 447 atomic_clear_int(&dev->if_drv_flags ,IFF_DRV_OACTIVE); 448 ring->wake_queue++; 449 priv->port_stats.wake_queue++; 450 } 451 return (0); 452} 453 454void mlx4_en_tx_irq(struct mlx4_cq *mcq) 455{ 456 struct mlx4_en_cq *cq = container_of(mcq, struct mlx4_en_cq, mcq); 457 struct mlx4_en_priv *priv = netdev_priv(cq->dev); 458 struct mlx4_en_tx_ring *ring = priv->tx_ring[cq->ring]; 459 460 if (priv->port_up == 0 || !spin_trylock(&ring->comp_lock)) 461 return; 462 mlx4_en_process_tx_cq(cq->dev, cq); 463 mod_timer(&cq->timer, jiffies + 1); 464 spin_unlock(&ring->comp_lock); 465} 466 467void mlx4_en_poll_tx_cq(unsigned long data) 468{ 469 struct mlx4_en_cq *cq = (struct mlx4_en_cq *) data; 470 struct mlx4_en_priv *priv = netdev_priv(cq->dev); 471 struct mlx4_en_tx_ring *ring = priv->tx_ring[cq->ring]; 472 u32 inflight; 473 474 INC_PERF_COUNTER(priv->pstats.tx_poll); 475 476 if (priv->port_up == 0) 477 return; 478 if (!spin_trylock(&ring->comp_lock)) { 479 mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT); 480 return; 481 } 482 mlx4_en_process_tx_cq(cq->dev, cq); 483 inflight = (u32) (ring->prod - ring->cons - ring->last_nr_txbb); 484 485 /* If there are still packets in flight and the timer has not already 486 * been scheduled by the Tx routine then schedule it here to guarantee 487 * completion processing of these packets */ 488 if (inflight && priv->port_up) 489 mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT); 490 491 spin_unlock(&ring->comp_lock); 492} 493 494static inline void mlx4_en_xmit_poll(struct mlx4_en_priv *priv, int tx_ind) 495{ 496 struct mlx4_en_cq *cq = priv->tx_cq[tx_ind]; 497 struct mlx4_en_tx_ring *ring = priv->tx_ring[tx_ind]; 498 499 if (priv->port_up == 0) 500 return; 501 502 /* If we don't have a pending timer, set one up to catch our recent 503 post in case the interface becomes idle */ 504 if (!timer_pending(&cq->timer)) 505 mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT); 506 507 /* Poll the CQ every mlx4_en_TX_MODER_POLL packets */ 508 if ((++ring->poll_cnt & (MLX4_EN_TX_POLL_MODER - 1)) == 0) 509 if (spin_trylock(&ring->comp_lock)) { 510 mlx4_en_process_tx_cq(priv->dev, cq); 511 spin_unlock(&ring->comp_lock); 512 } 513} 514 515static u16 516mlx4_en_get_inline_hdr_size(struct mlx4_en_tx_ring *ring, struct mbuf *mb) 517{ 518 u16 retval; 519 520 /* only copy from first fragment, if possible */ 521 retval = MIN(ring->inline_thold, mb->m_len); 522 523 /* check for too little data */ 524 if (unlikely(retval < MIN_PKT_LEN)) 525 retval = MIN(ring->inline_thold, mb->m_pkthdr.len); 526 return (retval); 527} 528 529static int 530mlx4_en_get_header_size(struct mbuf *mb) 531{ 532 struct ether_vlan_header *eh; 533 struct tcphdr *th; 534 struct ip *ip; 535 int ip_hlen, tcp_hlen; 536 struct ip6_hdr *ip6; 537 uint16_t eth_type; 538 int eth_hdr_len; 539 540 eh = mtod(mb, struct ether_vlan_header *); 541 if (mb->m_len < ETHER_HDR_LEN) 542 return (0); 543 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { 544 eth_type = ntohs(eh->evl_proto); 545 eth_hdr_len = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 546 } else { 547 eth_type = ntohs(eh->evl_encap_proto); 548 eth_hdr_len = ETHER_HDR_LEN; 549 } 550 if (mb->m_len < eth_hdr_len) 551 return (0); 552 switch (eth_type) { 553 case ETHERTYPE_IP: 554 ip = (struct ip *)(mb->m_data + eth_hdr_len); 555 if (mb->m_len < eth_hdr_len + sizeof(*ip)) 556 return (0); 557 if (ip->ip_p != IPPROTO_TCP) 558 return (0); 559 ip_hlen = ip->ip_hl << 2; 560 eth_hdr_len += ip_hlen; 561 break; 562 case ETHERTYPE_IPV6: 563 ip6 = (struct ip6_hdr *)(mb->m_data + eth_hdr_len); 564 if (mb->m_len < eth_hdr_len + sizeof(*ip6)) 565 return (0); 566 if (ip6->ip6_nxt != IPPROTO_TCP) 567 return (0); 568 eth_hdr_len += sizeof(*ip6); 569 break; 570 default: 571 return (0); 572 } 573 if (mb->m_len < eth_hdr_len + sizeof(*th)) 574 return (0); 575 th = (struct tcphdr *)(mb->m_data + eth_hdr_len); 576 tcp_hlen = th->th_off << 2; 577 eth_hdr_len += tcp_hlen; 578 if (mb->m_len < eth_hdr_len) 579 return (0); 580 return (eth_hdr_len); 581} 582 583static volatile struct mlx4_wqe_data_seg * 584mlx4_en_store_inline_data(volatile struct mlx4_wqe_data_seg *dseg, 585 struct mbuf *mb, int len, __be32 owner_bit) 586{ 587 uint8_t *inl = __DEVOLATILE(uint8_t *, dseg); 588 const int spc = MLX4_INLINE_ALIGN - CTRL_SIZE - 4; 589 590 if (unlikely(len < MIN_PKT_LEN)) { 591 m_copydata(mb, 0, len, inl + 4); 592 memset(inl + 4 + len, 0, MIN_PKT_LEN - len); 593 dseg += DIV_ROUND_UP(4 + MIN_PKT_LEN, DS_SIZE_ALIGNMENT); 594 } else if (len <= spc) { 595 m_copydata(mb, 0, len, inl + 4); 596 dseg += DIV_ROUND_UP(4 + len, DS_SIZE_ALIGNMENT); 597 } else { 598 m_copydata(mb, 0, spc, inl + 4); 599 m_copydata(mb, spc, len - spc, inl + 8 + spc); 600 dseg += DIV_ROUND_UP(8 + len, DS_SIZE_ALIGNMENT); 601 } 602 return (dseg); 603} 604 605static void 606mlx4_en_store_inline_header(volatile struct mlx4_wqe_data_seg *dseg, 607 int len, __be32 owner_bit) 608{ 609 uint8_t *inl = __DEVOLATILE(uint8_t *, dseg); 610 const int spc = MLX4_INLINE_ALIGN - CTRL_SIZE - 4; 611 612 if (unlikely(len < MIN_PKT_LEN)) { 613 *(volatile uint32_t *)inl = 614 SET_BYTE_COUNT((1 << 31) | MIN_PKT_LEN); 615 } else if (len <= spc) { 616 *(volatile uint32_t *)inl = 617 SET_BYTE_COUNT((1 << 31) | len); 618 } else { 619 *(volatile uint32_t *)(inl + 4 + spc) = 620 SET_BYTE_COUNT((1 << 31) | (len - spc)); 621 wmb(); 622 *(volatile uint32_t *)inl = 623 SET_BYTE_COUNT((1 << 31) | spc); 624 } 625} 626 627static uint32_t hashrandom; 628static void hashrandom_init(void *arg) 629{ 630 /* 631 * It is assumed that the random subsystem has been 632 * initialized when this function is called: 633 */ 634 hashrandom = m_ether_tcpip_hash_init(); 635} 636SYSINIT(hashrandom_init, SI_SUB_RANDOM, SI_ORDER_ANY, &hashrandom_init, NULL); 637 638u16 mlx4_en_select_queue(struct net_device *dev, struct mbuf *mb) 639{ 640 struct mlx4_en_priv *priv = netdev_priv(dev); 641 u32 rings_p_up = priv->num_tx_rings_p_up; 642 u32 up = 0; 643 u32 queue_index; 644 645#if (MLX4_EN_NUM_UP > 1) 646 /* Obtain VLAN information if present */ 647 if (mb->m_flags & M_VLANTAG) { 648 u32 vlan_tag = mb->m_pkthdr.ether_vtag; 649 up = (vlan_tag >> 13) % MLX4_EN_NUM_UP; 650 } 651#endif 652 queue_index = m_ether_tcpip_hash(MBUF_HASHFLAG_L3 | MBUF_HASHFLAG_L4, mb, hashrandom); 653 654 return ((queue_index % rings_p_up) + (up * rings_p_up)); 655} 656 657static void mlx4_bf_copy(void __iomem *dst, volatile unsigned long *src, unsigned bytecnt) 658{ 659 __iowrite64_copy(dst, __DEVOLATILE(void *, src), bytecnt / 8); 660} 661 662static u64 mlx4_en_mac_to_u64(u8 *addr) 663{ 664 u64 mac = 0; 665 int i; 666 667 for (i = 0; i < ETHER_ADDR_LEN; i++) { 668 mac <<= 8; 669 mac |= addr[i]; 670 } 671 return mac; 672} 673 674static int mlx4_en_xmit(struct mlx4_en_priv *priv, int tx_ind, struct mbuf **mbp) 675{ 676 enum { 677 DS_FACT = TXBB_SIZE / DS_SIZE_ALIGNMENT, 678 CTRL_FLAGS = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE | 679 MLX4_WQE_CTRL_SOLICITED), 680 }; 681 bus_dma_segment_t segs[MLX4_EN_TX_MAX_MBUF_FRAGS]; 682 volatile struct mlx4_wqe_data_seg *dseg; 683 volatile struct mlx4_wqe_data_seg *dseg_inline; 684 volatile struct mlx4_en_tx_desc *tx_desc; 685 struct mlx4_en_tx_ring *ring = priv->tx_ring[tx_ind]; 686 struct ifnet *ifp = priv->dev; 687 struct mlx4_en_tx_info *tx_info; 688 struct mbuf *mb = *mbp; 689 struct mbuf *m; 690 __be32 owner_bit; 691 int nr_segs; 692 int pad; 693 int err; 694 u32 bf_size; 695 u32 bf_prod; 696 u32 opcode; 697 u16 index; 698 u16 ds_cnt; 699 u16 ihs; 700 701 if (unlikely(!priv->port_up)) { 702 err = EINVAL; 703 goto tx_drop; 704 } 705 706 /* check if TX ring is full */ 707 if (unlikely(mlx4_en_tx_ring_is_full(ring))) { 708 /* every full native Tx ring stops queue */ 709 if (ring->blocked == 0) 710 atomic_add_int(&priv->blocked, 1); 711 /* Set HW-queue-is-full flag */ 712 atomic_set_int(&ifp->if_drv_flags, IFF_DRV_OACTIVE); 713 priv->port_stats.queue_stopped++; 714 ring->blocked = 1; 715 priv->port_stats.queue_stopped++; 716 ring->queue_stopped++; 717 718 /* Use interrupts to find out when queue opened */ 719 mlx4_en_arm_cq(priv, priv->tx_cq[tx_ind]); 720 return (ENOBUFS); 721 } 722 723 /* sanity check we are not wrapping around */ 724 KASSERT(((~ring->prod) & ring->size_mask) >= 725 (MLX4_EN_TX_WQE_MAX_WQEBBS - 1), ("Wrapping around TX ring")); 726 727 /* Track current inflight packets for performance analysis */ 728 AVG_PERF_COUNTER(priv->pstats.inflight_avg, 729 (u32) (ring->prod - ring->cons - 1)); 730 731 /* Track current mbuf packet header length */ 732 AVG_PERF_COUNTER(priv->pstats.tx_pktsz_avg, mb->m_pkthdr.len); 733 734 /* Grab an index and try to transmit packet */ 735 owner_bit = (ring->prod & ring->size) ? 736 cpu_to_be32(MLX4_EN_BIT_DESC_OWN) : 0; 737 index = ring->prod & ring->size_mask; 738 tx_desc = (volatile struct mlx4_en_tx_desc *) 739 (ring->buf + index * TXBB_SIZE); 740 tx_info = &ring->tx_info[index]; 741 dseg = &tx_desc->data; 742 743 /* send a copy of the frame to the BPF listener, if any */ 744 if (ifp != NULL && ifp->if_bpf != NULL) 745 ETHER_BPF_MTAP(ifp, mb); 746 747 /* get default flags */ 748 tx_desc->ctrl.srcrb_flags = CTRL_FLAGS; 749 750 if (mb->m_pkthdr.csum_flags & (CSUM_IP | CSUM_TSO)) 751 tx_desc->ctrl.srcrb_flags |= cpu_to_be32(MLX4_WQE_CTRL_IP_CSUM); 752 753 if (mb->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP | 754 CSUM_UDP_IPV6 | CSUM_TCP_IPV6 | CSUM_TSO)) 755 tx_desc->ctrl.srcrb_flags |= cpu_to_be32(MLX4_WQE_CTRL_TCP_UDP_CSUM); 756 757 /* do statistics */ 758 if (likely(tx_desc->ctrl.srcrb_flags != CTRL_FLAGS)) { 759 priv->port_stats.tx_chksum_offload++; 760 ring->tx_csum++; 761 } 762 763 /* check for VLAN tag */ 764 if (mb->m_flags & M_VLANTAG) { 765 tx_desc->ctrl.vlan_tag = cpu_to_be16(mb->m_pkthdr.ether_vtag); 766 tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_VLAN; 767 } else { 768 tx_desc->ctrl.vlan_tag = 0; 769 tx_desc->ctrl.ins_vlan = 0; 770 } 771 772 /* clear immediate field */ 773 tx_desc->ctrl.imm = 0; 774 775 /* Handle LSO (TSO) packets */ 776 if (mb->m_pkthdr.csum_flags & CSUM_TSO) { 777 u32 payload_len; 778 u32 mss = mb->m_pkthdr.tso_segsz; 779 u32 num_pkts; 780 781 opcode = cpu_to_be32(MLX4_OPCODE_LSO | MLX4_WQE_CTRL_RR) | 782 owner_bit; 783 ihs = mlx4_en_get_header_size(mb); 784 if (unlikely(ihs > MAX_INLINE)) { 785 ring->oversized_packets++; 786 err = EINVAL; 787 goto tx_drop; 788 } 789 tx_desc->lso.mss_hdr_size = cpu_to_be32((mss << 16) | ihs); 790 payload_len = mb->m_pkthdr.len - ihs; 791 if (unlikely(payload_len == 0)) 792 num_pkts = 1; 793 else 794 num_pkts = DIV_ROUND_UP(payload_len, mss); 795 ring->bytes += payload_len + (num_pkts * ihs); 796 ring->packets += num_pkts; 797 priv->port_stats.tso_packets++; 798 /* store pointer to inline header */ 799 dseg_inline = dseg; 800 /* copy data inline */ 801 dseg = mlx4_en_store_inline_lso_data(dseg, 802 mb, ihs, owner_bit); 803 } else { 804 opcode = cpu_to_be32(MLX4_OPCODE_SEND) | 805 owner_bit; 806 ihs = mlx4_en_get_inline_hdr_size(ring, mb); 807 ring->bytes += max_t (unsigned int, 808 mb->m_pkthdr.len, ETHER_MIN_LEN - ETHER_CRC_LEN); 809 ring->packets++; 810 /* store pointer to inline header */ 811 dseg_inline = dseg; 812 /* copy data inline */ 813 dseg = mlx4_en_store_inline_data(dseg, 814 mb, ihs, owner_bit); 815 } 816 m_adj(mb, ihs); 817 818 /* trim off empty mbufs */ 819 while (mb->m_len == 0) { 820 mb = m_free(mb); 821 /* check if all data has been inlined */ 822 if (mb == NULL) { 823 nr_segs = 0; 824 goto skip_dma; 825 } 826 } 827 828 err = bus_dmamap_load_mbuf_sg(ring->dma_tag, tx_info->dma_map, 829 mb, segs, &nr_segs, BUS_DMA_NOWAIT); 830 if (unlikely(err == EFBIG)) { 831 /* Too many mbuf fragments */ 832 m = m_defrag(mb, M_NOWAIT); 833 if (m == NULL) { 834 ring->oversized_packets++; 835 goto tx_drop; 836 } 837 mb = m; 838 /* Try again */ 839 err = bus_dmamap_load_mbuf_sg(ring->dma_tag, tx_info->dma_map, 840 mb, segs, &nr_segs, BUS_DMA_NOWAIT); 841 } 842 /* catch errors */ 843 if (unlikely(err != 0)) { 844 ring->oversized_packets++; 845 goto tx_drop; 846 } 847 /* make sure all mbuf data is written to RAM */ 848 bus_dmamap_sync(ring->dma_tag, tx_info->dma_map, 849 BUS_DMASYNC_PREWRITE); 850 851skip_dma: 852 /* compute number of DS needed */ 853 ds_cnt = (dseg - ((volatile struct mlx4_wqe_data_seg *)tx_desc)) + nr_segs; 854 855 /* 856 * Check if the next request can wrap around and fill the end 857 * of the current request with zero immediate data: 858 */ 859 pad = DIV_ROUND_UP(ds_cnt, DS_FACT); 860 pad = (~(ring->prod + pad)) & ring->size_mask; 861 862 if (unlikely(pad < (MLX4_EN_TX_WQE_MAX_WQEBBS - 1))) { 863 /* 864 * Compute the least number of DS blocks we need to 865 * pad in order to achieve a TX ring wraparound: 866 */ 867 pad = (DS_FACT * (pad + 1)); 868 } else { 869 /* 870 * The hardware will automatically jump to the next 871 * TXBB. No need for padding. 872 */ 873 pad = 0; 874 } 875 876 /* compute total number of DS blocks */ 877 ds_cnt += pad; 878 /* 879 * When modifying this code, please ensure that the following 880 * computation is always less than or equal to 0x3F: 881 * 882 * ((MLX4_EN_TX_WQE_MAX_WQEBBS - 1) * DS_FACT) + 883 * (MLX4_EN_TX_WQE_MAX_WQEBBS * DS_FACT) 884 * 885 * Else the "ds_cnt" variable can become too big. 886 */ 887 tx_desc->ctrl.fence_size = (ds_cnt & 0x3f); 888 889 /* store pointer to mbuf */ 890 tx_info->mb = mb; 891 tx_info->nr_txbb = DIV_ROUND_UP(ds_cnt, DS_FACT); 892 bf_size = ds_cnt * DS_SIZE_ALIGNMENT; 893 bf_prod = ring->prod; 894 895 /* compute end of "dseg" array */ 896 dseg += nr_segs + pad; 897 898 /* pad using zero immediate dseg */ 899 while (pad--) { 900 dseg--; 901 dseg->addr = 0; 902 dseg->lkey = 0; 903 wmb(); 904 dseg->byte_count = SET_BYTE_COUNT((1 << 31)|0); 905 } 906 907 /* fill segment list */ 908 while (nr_segs--) { 909 if (unlikely(segs[nr_segs].ds_len == 0)) { 910 dseg--; 911 dseg->addr = 0; 912 dseg->lkey = 0; 913 wmb(); 914 dseg->byte_count = SET_BYTE_COUNT((1 << 31)|0); 915 } else { 916 dseg--; 917 dseg->addr = cpu_to_be64((uint64_t)segs[nr_segs].ds_addr); 918 dseg->lkey = cpu_to_be32(priv->mdev->mr.key); 919 wmb(); 920 dseg->byte_count = SET_BYTE_COUNT((uint32_t)segs[nr_segs].ds_len); 921 } 922 } 923 924 wmb(); 925 926 /* write owner bits in reverse order */ 927 if ((opcode & cpu_to_be32(0x1F)) == cpu_to_be32(MLX4_OPCODE_LSO)) 928 mlx4_en_store_inline_lso_header(dseg_inline, ihs, owner_bit); 929 else 930 mlx4_en_store_inline_header(dseg_inline, ihs, owner_bit); 931 932 if (unlikely(priv->validate_loopback)) { 933 /* Copy dst mac address to wqe */ 934 struct ether_header *ethh; 935 u64 mac; 936 u32 mac_l, mac_h; 937 938 ethh = mtod(mb, struct ether_header *); 939 mac = mlx4_en_mac_to_u64(ethh->ether_dhost); 940 if (mac) { 941 mac_h = (u32) ((mac & 0xffff00000000ULL) >> 16); 942 mac_l = (u32) (mac & 0xffffffff); 943 tx_desc->ctrl.srcrb_flags |= cpu_to_be32(mac_h); 944 tx_desc->ctrl.imm = cpu_to_be32(mac_l); 945 } 946 } 947 948 /* update producer counter */ 949 ring->prod += tx_info->nr_txbb; 950 951 if (ring->bf_enabled && bf_size <= MAX_BF && 952 (tx_desc->ctrl.ins_vlan != MLX4_WQE_CTRL_INS_VLAN)) { 953 954 /* store doorbell number */ 955 *(volatile __be32 *) (&tx_desc->ctrl.vlan_tag) |= cpu_to_be32(ring->doorbell_qpn); 956 957 /* or in producer number for this WQE */ 958 opcode |= cpu_to_be32((bf_prod & 0xffff) << 8); 959 960 /* 961 * Ensure the new descriptor hits memory before 962 * setting ownership of this descriptor to HW: 963 */ 964 wmb(); 965 tx_desc->ctrl.owner_opcode = opcode; 966 wmb(); 967 mlx4_bf_copy(((u8 *)ring->bf.reg) + ring->bf.offset, 968 (volatile unsigned long *) &tx_desc->ctrl, bf_size); 969 wmb(); 970 ring->bf.offset ^= ring->bf.buf_size; 971 } else { 972 /* 973 * Ensure the new descriptor hits memory before 974 * setting ownership of this descriptor to HW: 975 */ 976 wmb(); 977 tx_desc->ctrl.owner_opcode = opcode; 978 wmb(); 979 writel(cpu_to_be32(ring->doorbell_qpn), 980 ((u8 *)ring->bf.uar->map) + MLX4_SEND_DOORBELL); 981 } 982 983 return (0); 984tx_drop: 985 *mbp = NULL; 986 m_freem(mb); 987 return (err); 988} 989 990static int 991mlx4_en_transmit_locked(struct ifnet *dev, int tx_ind, struct mbuf *m) 992{ 993 struct mlx4_en_priv *priv = netdev_priv(dev); 994 struct mlx4_en_tx_ring *ring; 995 struct mbuf *next; 996 int enqueued, err = 0; 997 998 ring = priv->tx_ring[tx_ind]; 999 if ((dev->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 1000 IFF_DRV_RUNNING || priv->port_up == 0) { 1001 if (m != NULL) 1002 err = drbr_enqueue(dev, ring->br, m); 1003 return (err); 1004 } 1005 1006 enqueued = 0; 1007 if (m != NULL) 1008 /* 1009 * If we can't insert mbuf into drbr, try to xmit anyway. 1010 * We keep the error we got so we could return that after xmit. 1011 */ 1012 err = drbr_enqueue(dev, ring->br, m); 1013 1014 /* Process the queue */ 1015 while ((next = drbr_peek(dev, ring->br)) != NULL) { 1016 if (mlx4_en_xmit(priv, tx_ind, &next) != 0) { 1017 if (next == NULL) { 1018 drbr_advance(dev, ring->br); 1019 } else { 1020 drbr_putback(dev, ring->br, next); 1021 } 1022 break; 1023 } 1024 drbr_advance(dev, ring->br); 1025 enqueued++; 1026 if ((dev->if_drv_flags & IFF_DRV_RUNNING) == 0) 1027 break; 1028 } 1029 1030 if (enqueued > 0) 1031 ring->watchdog_time = ticks; 1032 1033 return (err); 1034} 1035 1036void 1037mlx4_en_tx_que(void *context, int pending) 1038{ 1039 struct mlx4_en_tx_ring *ring; 1040 struct mlx4_en_priv *priv; 1041 struct net_device *dev; 1042 struct mlx4_en_cq *cq; 1043 int tx_ind; 1044 cq = context; 1045 dev = cq->dev; 1046 priv = dev->if_softc; 1047 tx_ind = cq->ring; 1048 ring = priv->tx_ring[tx_ind]; 1049 1050 if (priv->port_up != 0 && 1051 (dev->if_drv_flags & IFF_DRV_RUNNING) != 0) { 1052 mlx4_en_xmit_poll(priv, tx_ind); 1053 spin_lock(&ring->tx_lock); 1054 if (!drbr_empty(dev, ring->br)) 1055 mlx4_en_transmit_locked(dev, tx_ind, NULL); 1056 spin_unlock(&ring->tx_lock); 1057 } 1058} 1059 1060int 1061mlx4_en_transmit(struct ifnet *dev, struct mbuf *m) 1062{ 1063 struct mlx4_en_priv *priv = netdev_priv(dev); 1064 struct mlx4_en_tx_ring *ring; 1065 struct mlx4_en_cq *cq; 1066 int i, err = 0; 1067 1068 if (priv->port_up == 0) { 1069 m_freem(m); 1070 return (ENETDOWN); 1071 } 1072 1073 /* Compute which queue to use */ 1074 if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) { 1075 i = (m->m_pkthdr.flowid % 128) % priv->tx_ring_num; 1076 } 1077 else { 1078 i = mlx4_en_select_queue(dev, m); 1079 } 1080 1081 ring = priv->tx_ring[i]; 1082 if (spin_trylock(&ring->tx_lock)) { 1083 err = mlx4_en_transmit_locked(dev, i, m); 1084 spin_unlock(&ring->tx_lock); 1085 /* Poll CQ here */ 1086 mlx4_en_xmit_poll(priv, i); 1087 } else { 1088 err = drbr_enqueue(dev, ring->br, m); 1089 cq = priv->tx_cq[i]; 1090 taskqueue_enqueue(cq->tq, &cq->cq_task); 1091 } 1092 1093 return (err); 1094} 1095 1096/* 1097 * Flush ring buffers. 1098 */ 1099void 1100mlx4_en_qflush(struct ifnet *dev) 1101{ 1102 struct mlx4_en_priv *priv = netdev_priv(dev); 1103 struct mlx4_en_tx_ring *ring; 1104 struct mbuf *m; 1105 1106 if (priv->port_up == 0) 1107 return; 1108 1109 for (int i = 0; i < priv->tx_ring_num; i++) { 1110 ring = priv->tx_ring[i]; 1111 spin_lock(&ring->tx_lock); 1112 while ((m = buf_ring_dequeue_sc(ring->br)) != NULL) 1113 m_freem(m); 1114 spin_unlock(&ring->tx_lock); 1115 } 1116 if_qflush(dev); 1117} 1118