Deleted Added
full compact
1/******************************************************************************
2
3 Copyright (c) 2001-2014, Intel Corporation
3 Copyright (c) 2001-2015, Intel Corporation
4 All rights reserved.
5
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
11

--- 13 unchanged lines hidden (view full) ---

25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
31
32******************************************************************************/
33/*$FreeBSD: head/sys/dev/ixgbe/ix_txrx.c 280182 2015-03-17 18:32:28Z jfv $*/
33/*$FreeBSD: stable/10/sys/dev/ixgbe/ix_txrx.c 283620 2015-05-27 17:44:11Z erj $*/
34
35
36#ifndef IXGBE_STANDALONE_BUILD
37#include "opt_inet.h"
38#include "opt_inet6.h"
39#include "opt_rss.h"
39#endif
40
41#include "ixgbe.h"
42
44#ifdef RSS
45#include <netinet/in_rss.h>
43#ifdef DEV_NETMAP
44#include <net/netmap.h>
45#include <sys/selinfo.h>
46#include <dev/netmap/netmap_kern.h>
47
48extern int ix_crcstrip;
49#endif
50
51/*
52** HW RSC control:
53** this feature only works with
54** IPv4, and only on 82599 and later.
55** Also this will cause IP forwarding to
56** fail and that can't be controlled by

--- 131 unchanged lines hidden (view full) ---

188*/
189int
190ixgbe_mq_start(struct ifnet *ifp, struct mbuf *m)
191{
192 struct adapter *adapter = ifp->if_softc;
193 struct ix_queue *que;
194 struct tx_ring *txr;
195 int i, err = 0;
193#ifdef RSS
194 uint32_t bucket_id;
195#endif
196
197 /*
198 * When doing RSS, map it to the same outbound queue
199 * as the incoming flow would be mapped to.
200 *
201 * If everything is setup correctly, it should be the
202 * same bucket that the current CPU we're on is.
203 */
204 if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) {
205#ifdef RSS
206 if (rss_hash2bucket(m->m_pkthdr.flowid,
207 M_HASHTYPE_GET(m), &bucket_id) == 0)
208 /* TODO: spit out something if bucket_id > num_queues? */
209 i = bucket_id % adapter->num_queues;
210 else
211#endif
212 i = m->m_pkthdr.flowid % adapter->num_queues;
213 } else
204 if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE)
205 i = m->m_pkthdr.flowid % adapter->num_queues;
206 else
207 i = curcpu % adapter->num_queues;
208
209 /* Check for a hung queue and pick alternative */
210 if (((1 << i) & adapter->active_queues) == 0)
211 i = ffsl(adapter->active_queues);
212
213 txr = &adapter->tx_rings[i];
214 que = &adapter->queues[i];

--- 40 unchanged lines hidden (view full) ---

255 break;
256 }
257#if __FreeBSD_version >= 901504
258 drbr_advance(ifp, txr->br);
259#endif
260 enqueued++;
261#if 0 // this is VF-only
262#if __FreeBSD_version >= 1100036
270 if (next->m_flags & M_MCAST)
263 /*
264 * Since we're looking at the tx ring, we can check
265 * to see if we're a VF by examing our tail register
266 * address.
267 */
268 if (txr->tail < IXGBE_TDT(0) && next->m_flags & M_MCAST)
269 if_inc_counter(ifp, IFCOUNTER_OMCASTS, 1);
270#endif
271#endif
272 /* Send a copy of the frame to the BPF listener */
273 ETHER_BPF_MTAP(ifp, next);
274 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
275 break;
276#if __FreeBSD_version < 901504

--- 19 unchanged lines hidden (view full) ---

296
297 IXGBE_TX_LOCK(txr);
298 if (!drbr_empty(ifp, txr->br))
299 ixgbe_mq_start_locked(ifp, txr);
300 IXGBE_TX_UNLOCK(txr);
301}
302
303/*
306** Flush all ring buffers
307*/
304 * Flush all ring buffers
305 */
306void
307ixgbe_qflush(struct ifnet *ifp)
308{
309 struct adapter *adapter = ifp->if_softc;
310 struct tx_ring *txr = adapter->tx_rings;
311 struct mbuf *m;
312
313 for (int i = 0; i < adapter->num_queues; i++, txr++) {

--- 57 unchanged lines hidden (view full) ---

371 if (__predict_false(error)) {
372 struct mbuf *m;
373
374 switch (error) {
375 case EFBIG:
376 /* Try it again? - one try */
377 if (remap == TRUE) {
378 remap = FALSE;
379 /*
380 * XXX: m_defrag will choke on
381 * non-MCLBYTES-sized clusters
382 */
383 m = m_defrag(*m_headp, M_NOWAIT);
384 if (m == NULL) {
385 adapter->mbuf_defrag_failed++;
386 m_freem(*m_headp);
387 *m_headp = NULL;
388 return (ENOBUFS);
389 }
390 *m_headp = m;

--- 15 unchanged lines hidden (view full) ---

406 if (nsegs > txr->tx_avail - 2) {
407 txr->no_desc_avail++;
408 bus_dmamap_unload(txr->txtag, map);
409 return (ENOBUFS);
410 }
411 m_head = *m_headp;
412
413 /*
412 ** Set up the appropriate offload context
413 ** this will consume the first descriptor
414 */
414 * Set up the appropriate offload context
415 * this will consume the first descriptor
416 */
417 error = ixgbe_tx_ctx_setup(txr, m_head, &cmd_type_len, &olinfo_status);
418 if (__predict_false(error)) {
419 if (error == ENOBUFS)
420 *m_headp = NULL;
421 return (error);
422 }
423
424#ifdef IXGBE_FDIR
425 /* Do the flow director magic */
426 if ((txr->atr_sample) && (!adapter->fdir_reinit)) {
427 ++txr->atr_count;
428 if (txr->atr_count >= atr_sample_rate) {
429 ixgbe_atr(txr, m_head);
430 txr->atr_count = 0;
431 }
432 }
433#endif
434
433 olinfo_status |= IXGBE_ADVTXD_CC;
435 i = txr->next_avail_desc;
436 for (j = 0; j < nsegs; j++) {
437 bus_size_t seglen;
438 bus_addr_t segaddr;
439
440 txbuf = &txr->tx_buffers[i];
441 txd = &txr->tx_base[i];
442 seglen = segs[j].ds_len;

--- 10 unchanged lines hidden (view full) ---

453
454 txd->read.cmd_type_len |=
455 htole32(IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS);
456 txr->tx_avail -= nsegs;
457 txr->next_avail_desc = i;
458
459 txbuf->m_head = m_head;
460 /*
460 ** Here we swap the map so the last descriptor,
461 ** which gets the completion interrupt has the
462 ** real map, and the first descriptor gets the
463 ** unused map from this descriptor.
464 */
461 * Here we swap the map so the last descriptor,
462 * which gets the completion interrupt has the
463 * real map, and the first descriptor gets the
464 * unused map from this descriptor.
465 */
466 txr->tx_buffers[first].map = txbuf->map;
467 txbuf->map = map;
468 bus_dmamap_sync(txr->txtag, map, BUS_DMASYNC_PREWRITE);
469
470 /* Set the EOP descriptor that will be marked done */
471 txbuf = &txr->tx_buffers[first];
472 txbuf->eop = txd;
473

--- 6 unchanged lines hidden (view full) ---

480 ++txr->total_packets;
481 IXGBE_WRITE_REG(&adapter->hw, txr->tail, i);
482
483 /* Mark queue as having work */
484 if (txr->busy == 0)
485 txr->busy = 1;
486
487 return (0);
487
488}
489
490
491/*********************************************************************
492 *
493 * Allocate memory for tx_buffer structures. The tx_buffer stores all
494 * the information needed to transmit a packet on the wire. This is
495 * called only once at attach, setup is done every reset.

--- 222 unchanged lines hidden (view full) ---

718 * Advanced Context Descriptor setup for VLAN, CSUM or TSO
719 *
720 **********************************************************************/
721
722static int
723ixgbe_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp,
724 u32 *cmd_type_len, u32 *olinfo_status)
725{
726 struct adapter *adapter = txr->adapter;
727 struct ixgbe_adv_tx_context_desc *TXD;
728 struct ether_vlan_header *eh;
729 struct ip *ip;
730 struct ip6_hdr *ip6;
731 u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
732 int ehdrlen, ip_hlen = 0;
733 u16 etype;
734 u8 ipproto = 0;

--- 18 unchanged lines hidden (view full) ---

753 ** In advanced descriptors the vlan tag must
754 ** be placed into the context descriptor. Hence
755 ** we need to make one even if not doing offloads.
756 */
757 if (mp->m_flags & M_VLANTAG) {
758 vtag = htole16(mp->m_pkthdr.ether_vtag);
759 vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
760 }
761 else if (!IXGBE_IS_X550VF(adapter) && (offload == FALSE))
762 return (0);
763
764 /*
765 * Determine where frame payload starts.
766 * Jump over vlan headers if already present,
767 * helpful for QinQ too.
768 */
769 eh = mtod(mp, struct ether_vlan_header *);
770 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {

--- 267 unchanged lines hidden (view full) ---

1038 buf->m_head->m_pkthdr.len;
1039 bus_dmamap_sync(txr->txtag,
1040 buf->map,
1041 BUS_DMASYNC_POSTWRITE);
1042 bus_dmamap_unload(txr->txtag,
1043 buf->map);
1044 m_freem(buf->m_head);
1045 buf->m_head = NULL;
1043 buf->map = NULL;
1046 }
1047 buf->eop = NULL;
1048 ++txr->tx_avail;
1049
1050 /* We clean the range if multi segment */
1051 while (txd != eop) {
1052 ++txd;
1053 ++buf;

--- 9 unchanged lines hidden (view full) ---

1063 buf->m_head->m_pkthdr.len;
1064 bus_dmamap_sync(txr->txtag,
1065 buf->map,
1066 BUS_DMASYNC_POSTWRITE);
1067 bus_dmamap_unload(txr->txtag,
1068 buf->map);
1069 m_freem(buf->m_head);
1070 buf->m_head = NULL;
1069 buf->map = NULL;
1071 }
1072 ++txr->tx_avail;
1073 buf->eop = NULL;
1074
1075 }
1076 ++txr->packets;
1077 ++processed;
1078

--- 227 unchanged lines hidden (view full) ---

1306
1307 mp->m_pkthdr.len = mp->m_len = rxr->mbuf_sz;
1308
1309 /* If we're dealing with an mbuf that was copied rather
1310 * than replaced, there's no need to go through busdma.
1311 */
1312 if ((rxbuf->flags & IXGBE_RX_COPY) == 0) {
1313 /* Get the memory mapping */
1314 bus_dmamap_unload(rxr->ptag, rxbuf->pmap);
1315 error = bus_dmamap_load_mbuf_sg(rxr->ptag,
1316 rxbuf->pmap, mp, seg, &nsegs, BUS_DMA_NOWAIT);
1317 if (error != 0) {
1318 printf("Refresh mbufs: payload dmamap load"
1319 " failure - %d\n", error);
1320 m_free(mp);
1321 rxbuf->buf = NULL;
1322 goto update;

--- 60 unchanged lines hidden (view full) ---

1383 NULL, /* lockfuncarg */
1384 &rxr->ptag))) {
1385 device_printf(dev, "Unable to create RX DMA tag\n");
1386 goto fail;
1387 }
1388
1389 for (i = 0; i < rxr->num_desc; i++, rxbuf++) {
1390 rxbuf = &rxr->rx_buffers[i];
1389 error = bus_dmamap_create(rxr->ptag,
1390 BUS_DMA_NOWAIT, &rxbuf->pmap);
1391 error = bus_dmamap_create(rxr->ptag, 0, &rxbuf->pmap);
1392 if (error) {
1393 device_printf(dev, "Unable to create RX dma map\n");
1394 goto fail;
1395 }
1396 }
1397
1398 return (0);
1399

--- 302 unchanged lines hidden (view full) ---

1702 rbuf->fmp->m_flags |= M_PKTHDR;
1703 m_freem(rbuf->fmp);
1704 rbuf->fmp = NULL;
1705 rbuf->buf = NULL; /* rbuf->buf is part of fmp's chain */
1706 } else if (rbuf->buf) {
1707 m_free(rbuf->buf);
1708 rbuf->buf = NULL;
1709 }
1710 bus_dmamap_unload(rxr->ptag, rbuf->pmap);
1711
1712 rbuf->flags = 0;
1713
1714 return;
1715}
1716
1717
1718/*********************************************************************
1719 *
1720 * This routine executes in interrupt context. It replenishes
1721 * the mbufs in the descriptor and sends data which has been
1722 * dma'ed into host memory to upper layer.
1723 *
1722 * We loop at most count times if count is > 0, or until done if
1723 * count < 0.
1724 *
1724 * Return TRUE for more work, FALSE for all clean.
1725 *********************************************************************/
1726bool
1727ixgbe_rxeof(struct ix_queue *que)
1728{
1729 struct adapter *adapter = que->adapter;
1730 struct rx_ring *rxr = que->rxr;
1731 struct ifnet *ifp = adapter->ifp;

--- 46 unchanged lines hidden (view full) ---

1778
1779 len = le16toh(cur->wb.upper.length);
1780 ptype = le32toh(cur->wb.lower.lo_dword.data) &
1781 IXGBE_RXDADV_PKTTYPE_MASK;
1782 eop = ((staterr & IXGBE_RXD_STAT_EOP) != 0);
1783
1784 /* Make sure bad packets are discarded */
1785 if (eop && (staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) != 0) {
1787#if 0 // VF-only
1786#if __FreeBSD_version >= 1100036
1789 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
1787 if (IXGBE_IS_VF(adapter))
1788 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
1789#endif
1791#endif
1790 rxr->rx_discarded++;
1791 ixgbe_rx_discard(rxr, i);
1792 goto next_desc;
1793 }
1794
1795 /*
1796 ** On 82599 which supports a hardware
1797 ** LRO (called HW RSC), packets need

--- 90 unchanged lines hidden (view full) ---

1888 vtag = le16toh(cur->wb.upper.vlan);
1889 if (vtag) {
1890 sendmp->m_pkthdr.ether_vtag = vtag;
1891 sendmp->m_flags |= M_VLANTAG;
1892 }
1893 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
1894 ixgbe_rx_checksum(staterr, sendmp, ptype);
1895#if __FreeBSD_version >= 800000
1898#ifdef RSS
1899 sendmp->m_pkthdr.flowid =
1900 le32toh(cur->wb.lower.hi_dword.rss);
1901 switch (pkt_info & IXGBE_RXDADV_RSSTYPE_MASK) {
1902 case IXGBE_RXDADV_RSSTYPE_IPV4_TCP:
1903 M_HASHTYPE_SET(sendmp, M_HASHTYPE_RSS_TCP_IPV4);
1904 break;
1905 case IXGBE_RXDADV_RSSTYPE_IPV4:
1906 M_HASHTYPE_SET(sendmp, M_HASHTYPE_RSS_IPV4);
1907 break;
1908 case IXGBE_RXDADV_RSSTYPE_IPV6_TCP:
1909 M_HASHTYPE_SET(sendmp, M_HASHTYPE_RSS_TCP_IPV6);
1910 break;
1911 case IXGBE_RXDADV_RSSTYPE_IPV6_EX:
1912 M_HASHTYPE_SET(sendmp, M_HASHTYPE_RSS_IPV6_EX);
1913 break;
1914 case IXGBE_RXDADV_RSSTYPE_IPV6:
1915 M_HASHTYPE_SET(sendmp, M_HASHTYPE_RSS_IPV6);
1916 break;
1917 case IXGBE_RXDADV_RSSTYPE_IPV6_TCP_EX:
1918 M_HASHTYPE_SET(sendmp, M_HASHTYPE_RSS_TCP_IPV6_EX);
1919 break;
1920 case IXGBE_RXDADV_RSSTYPE_IPV4_UDP:
1921 M_HASHTYPE_SET(sendmp, M_HASHTYPE_RSS_UDP_IPV4);
1922 break;
1923 case IXGBE_RXDADV_RSSTYPE_IPV6_UDP:
1924 M_HASHTYPE_SET(sendmp, M_HASHTYPE_RSS_UDP_IPV6);
1925 break;
1926 case IXGBE_RXDADV_RSSTYPE_IPV6_UDP_EX:
1927 M_HASHTYPE_SET(sendmp, M_HASHTYPE_RSS_UDP_IPV6_EX);
1928 break;
1929 default:
1930 M_HASHTYPE_SET(sendmp, M_HASHTYPE_OPAQUE);
1931 }
1932#else /* RSS */
1896 sendmp->m_pkthdr.flowid = que->msix;
1934 M_HASHTYPE_SET(sendmp, M_HASHTYPE_OPAQUE);
1935#endif /* RSS */
1897#endif /* FreeBSD_version */
1898 }
1899next_desc:
1900 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
1901 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1902
1903 /* Advance our pointers to the next descriptor. */
1904 if (++i == rxr->num_desc)

--- 316 unchanged lines hidden ---