Deleted Added
full compact
3c3
< Copyright (c) 2001-2015, Intel Corporation
---
> Copyright (c) 2001-2017, Intel Corporation
5,6c5,6
<
< Redistribution and use in source and binary forms, with or without
---
>
> Redistribution and use in source and binary forms, with or without
8,9c8,9
<
< 1. Redistributions of source code must retain the above copyright notice,
---
>
> 1. Redistributions of source code must retain the above copyright notice,
11,13c11,13
<
< 2. Redistributions in binary form must reproduce the above copyright
< notice, this list of conditions and the following disclaimer in the
---
>
> 2. Redistributions in binary form must reproduce the above copyright
> notice, this list of conditions and the following disclaimer in the
15,17c15,17
<
< 3. Neither the name of the Intel Corporation nor the names of its
< contributors may be used to endorse or promote products derived from
---
>
> 3. Neither the name of the Intel Corporation nor the names of its
> contributors may be used to endorse or promote products derived from
19c19
<
---
>
21,28c21,28
< AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
< IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
< ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
< LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
< CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
< SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
< INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
< CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
---
> AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
> IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
> ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
> LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
> CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
> SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
> INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
> CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33c33
< /*$FreeBSD: stable/11/sys/dev/ixgbe/ix_txrx.c 301538 2016-06-07 04:51:50Z sephe $*/
---
> /*$FreeBSD: stable/11/sys/dev/ixgbe/ix_txrx.c 320897 2017-07-11 21:25:07Z erj $*/
44,56d43
< #ifdef RSS
< #include <net/rss_config.h>
< #include <netinet/in_rss.h>
< #endif
<
< #ifdef DEV_NETMAP
< #include <net/netmap.h>
< #include <sys/selinfo.h>
< #include <dev/netmap/netmap_kern.h>
<
< extern int ix_crcstrip;
< #endif
<
58,68c45,55
< ** HW RSC control:
< ** this feature only works with
< ** IPv4, and only on 82599 and later.
< ** Also this will cause IP forwarding to
< ** fail and that can't be controlled by
< ** the stack as LRO can. For all these
< ** reasons I've deemed it best to leave
< ** this off and not bother with a tuneable
< ** interface, this would need to be compiled
< ** to enable.
< */
---
> * HW RSC control:
> * this feature only works with
> * IPv4, and only on 82599 and later.
> * Also this will cause IP forwarding to
> * fail and that can't be controlled by
> * the stack as LRO can. For all these
> * reasons I've deemed it best to leave
> * this off and not bother with a tuneable
> * interface, this would need to be compiled
> * to enable.
> */
71d57
< #ifdef IXGBE_FDIR
73,80c59,66
< ** For Flow Director: this is the
< ** number of TX packets we sample
< ** for the filter pool, this means
< ** every 20th packet will be probed.
< **
< ** This feature can be disabled by
< ** setting this to 0.
< */
---
> * For Flow Director: this is the
> * number of TX packets we sample
> * for the filter pool, this means
> * every 20th packet will be probed.
> *
> * This feature can be disabled by
> * setting this to 0.
> */
82d67
< #endif
84c69
< /*********************************************************************
---
> /************************************************************************
86,101c71,82
< *********************************************************************/
< static void ixgbe_setup_transmit_ring(struct tx_ring *);
< static void ixgbe_free_transmit_buffers(struct tx_ring *);
< static int ixgbe_setup_receive_ring(struct rx_ring *);
< static void ixgbe_free_receive_buffers(struct rx_ring *);
<
< static void ixgbe_rx_checksum(u32, struct mbuf *, u32);
< static void ixgbe_refresh_mbufs(struct rx_ring *, int);
< static int ixgbe_xmit(struct tx_ring *, struct mbuf **);
< static int ixgbe_tx_ctx_setup(struct tx_ring *,
< struct mbuf *, u32 *, u32 *);
< static int ixgbe_tso_setup(struct tx_ring *,
< struct mbuf *, u32 *, u32 *);
< #ifdef IXGBE_FDIR
< static void ixgbe_atr(struct tx_ring *, struct mbuf *);
< #endif
---
> ************************************************************************/
> static void ixgbe_setup_transmit_ring(struct tx_ring *);
> static void ixgbe_free_transmit_buffers(struct tx_ring *);
> static int ixgbe_setup_receive_ring(struct rx_ring *);
> static void ixgbe_free_receive_buffers(struct rx_ring *);
> static void ixgbe_rx_checksum(u32, struct mbuf *, u32);
> static void ixgbe_refresh_mbufs(struct rx_ring *, int);
> static int ixgbe_xmit(struct tx_ring *, struct mbuf **);
> static int ixgbe_tx_ctx_setup(struct tx_ring *,
> struct mbuf *, u32 *, u32 *);
> static int ixgbe_tso_setup(struct tx_ring *,
> struct mbuf *, u32 *, u32 *);
104c85,88
< struct mbuf *, u32);
---
> struct mbuf *, u32);
> static int ixgbe_dma_malloc(struct adapter *, bus_size_t,
> struct ixgbe_dma_alloc *, int);
> static void ixgbe_dma_free(struct adapter *, struct ixgbe_dma_alloc *);
106,108c90,91
< #ifdef IXGBE_LEGACY_TX
< /*********************************************************************
< * Transmit entry point
---
> /************************************************************************
> * ixgbe_legacy_start_locked - Transmit entry point
110,118c93,100
< * ixgbe_start is called by the stack to initiate a transmit.
< * The driver will remain in this routine as long as there are
< * packets to transmit and transmit resources are available.
< * In case resources are not available stack is notified and
< * the packet is requeued.
< **********************************************************************/
<
< void
< ixgbe_start_locked(struct tx_ring *txr, struct ifnet * ifp)
---
> * Called by the stack to initiate a transmit.
> * The driver will remain in this routine as long as there are
> * packets to transmit and transmit resources are available.
> * In case resources are not available, the stack is notified
> * and the packet is requeued.
> ************************************************************************/
> int
> ixgbe_legacy_start_locked(struct ifnet *ifp, struct tx_ring *txr)
126c108
< return;
---
> return (ENETDOWN);
128c110
< return;
---
> return (ENETDOWN);
146,147d127
< return;
< }
149,153c129,137
< /*
< * Legacy TX start - called by the stack, this
< * always uses the first tx ring, and should
< * not be used with multiqueue tx enabled.
< */
---
> return IXGBE_SUCCESS;
> } /* ixgbe_legacy_start_locked */
>
> /************************************************************************
> * ixgbe_legacy_start
> *
> * Called by the stack, this always uses the first tx ring,
> * and should not be used with multiqueue tx enabled.
> ************************************************************************/
155c139
< ixgbe_start(struct ifnet *ifp)
---
> ixgbe_legacy_start(struct ifnet *ifp)
158c142
< struct tx_ring *txr = adapter->tx_rings;
---
> struct tx_ring *txr = adapter->tx_rings;
162c146
< ixgbe_start_locked(txr, ifp);
---
> ixgbe_legacy_start_locked(ifp, txr);
165,166c149
< return;
< }
---
> } /* ixgbe_legacy_start */
168,173c151,155
< #else /* ! IXGBE_LEGACY_TX */
<
< /*
< ** Multiqueue Transmit Entry Point
< ** (if_transmit function)
< */
---
> /************************************************************************
> * ixgbe_mq_start - Multiqueue Transmit Entry Point
> *
> * (if_transmit function)
> ************************************************************************/
177,183c159,163
< struct adapter *adapter = ifp->if_softc;
< struct ix_queue *que;
< struct tx_ring *txr;
< int i, err = 0;
< #ifdef RSS
< uint32_t bucket_id;
< #endif
---
> struct adapter *adapter = ifp->if_softc;
> struct ix_queue *que;
> struct tx_ring *txr;
> int i, err = 0;
> uint32_t bucket_id;
193,195c173,175
< #ifdef RSS
< if (rss_hash2bucket(m->m_pkthdr.flowid,
< M_HASHTYPE_GET(m), &bucket_id) == 0) {
---
> if ((adapter->feat_en & IXGBE_FEATURE_RSS) &&
> (rss_hash2bucket(m->m_pkthdr.flowid, M_HASHTYPE_GET(m),
> &bucket_id) == 0)) {
199,200c179,181
< if_printf(ifp, "bucket_id (%d) > num_queues "
< "(%d)\n", bucket_id, adapter->num_queues);
---
> if_printf(ifp,
> "bucket_id (%d) > num_queues (%d)\n",
> bucket_id, adapter->num_queues);
202,203c183
< } else
< #endif
---
> } else
225c205
< }
---
> } /* ixgbe_mq_start */
226a207,209
> /************************************************************************
> * ixgbe_mq_start_locked
> ************************************************************************/
230,232c213,214
< struct adapter *adapter = txr->adapter;
< struct mbuf *next;
< int enqueued = 0, err = 0;
---
> struct mbuf *next;
> int enqueued = 0, err = 0;
234,235c216
< if (((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) ||
< adapter->link_active == 0)
---
> if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
236a218,219
> if (txr->adapter->link_active == 0)
> return (ENETDOWN);
247,248c230,232
< if ((err = ixgbe_xmit(txr, &next)) != 0) {
< if (next == NULL) {
---
> err = ixgbe_xmit(txr, &next);
> if (err != 0) {
> if (next == NULL)
250c234
< } else {
---
> else
252d235
< }
260d242
< #if 0 // this is VF-only
267c249,250
< if (txr->tail < IXGBE_TDT(0) && next->m_flags & M_MCAST)
---
> if ((txr->adapter->feat_en & IXGBE_FEATURE_VF) &&
> (next->m_flags & M_MCAST))
270d252
< #endif
280c262
< if (txr->tx_avail < IXGBE_TX_CLEANUP_THRESHOLD)
---
> if (txr->tx_avail < IXGBE_TX_CLEANUP_THRESHOLD(txr->adapter))
284c266
< }
---
> } /* ixgbe_mq_start_locked */
286,288c268,272
< /*
< * Called from a taskqueue to drain queued transmit packets.
< */
---
> /************************************************************************
> * ixgbe_deferred_mq_start
> *
> * Called from a taskqueue to drain queued transmit packets.
> ************************************************************************/
294c278
< struct ifnet *ifp = adapter->ifp;
---
> struct ifnet *ifp = adapter->ifp;
300c284
< }
---
> } /* ixgbe_deferred_mq_start */
302,304c286,288
< /*
< * Flush all ring buffers
< */
---
> /************************************************************************
> * ixgbe_qflush - Flush all ring buffers
> ************************************************************************/
308,310c292,294
< struct adapter *adapter = ifp->if_softc;
< struct tx_ring *txr = adapter->tx_rings;
< struct mbuf *m;
---
> struct adapter *adapter = ifp->if_softc;
> struct tx_ring *txr = adapter->tx_rings;
> struct mbuf *m;
319,320c303
< }
< #endif /* IXGBE_LEGACY_TX */
---
> } /* ixgbe_qflush */
323c306,307
< /*********************************************************************
---
> /************************************************************************
> * ixgbe_xmit
325,327c309,310
< * This routine maps the mbufs to tx descriptors, allowing the
< * TX engine to transmit the packets.
< * - return 0 on success, positive on failure
---
> * Maps the mbufs to tx descriptors, allowing the
> * TX engine to transmit the packets.
329,330c312,313
< **********************************************************************/
<
---
> * Return 0 on success, positive on failure
> ************************************************************************/
334,342c317,318
< struct adapter *adapter = txr->adapter;
< u32 olinfo_status = 0, cmd_type_len;
< int i, j, error, nsegs;
< int first;
< bool remap = TRUE;
< struct mbuf *m_head;
< bus_dma_segment_t segs[adapter->num_segs];
< bus_dmamap_t map;
< struct ixgbe_tx_buf *txbuf;
---
> struct adapter *adapter = txr->adapter;
> struct ixgbe_tx_buf *txbuf;
343a320,326
> struct mbuf *m_head;
> int i, j, error, nsegs;
> int first;
> u32 olinfo_status = 0, cmd_type_len;
> bool remap = TRUE;
> bus_dma_segment_t segs[adapter->num_segs];
> bus_dmamap_t map;
348c331
< cmd_type_len = (IXGBE_ADVTXD_DTYP_DATA |
---
> cmd_type_len = (IXGBE_ADVTXD_DTYP_DATA |
352c335
< cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
---
> cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
354,359c337,342
< /*
< * Important to capture the first descriptor
< * used because it will contain the index of
< * the one we tell the hardware to report back
< */
< first = txr->next_avail_desc;
---
> /*
> * Important to capture the first descriptor
> * used because it will contain the index of
> * the one we tell the hardware to report back
> */
> first = txr->next_avail_desc;
367,368c350,351
< error = bus_dmamap_load_mbuf_sg(txr->txtag, map,
< *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
---
> error = bus_dmamap_load_mbuf_sg(txr->txtag, map, *m_headp, segs,
> &nsegs, BUS_DMA_NOWAIT);
423d405
< #ifdef IXGBE_FDIR
425c407,408
< if ((txr->atr_sample) && (!adapter->fdir_reinit)) {
---
> if ((adapter->feat_en & IXGBE_FEATURE_FDIR) &&
> (txr->atr_sample) && (!adapter->fdir_reinit)) {
432d414
< #endif
447c429
< cmd_type_len |seglen);
---
> cmd_type_len | seglen);
454,455c436
< txd->read.cmd_type_len |=
< htole32(IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS);
---
> txd->read.cmd_type_len |= htole32(IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS);
470,471c451,452
< /* Set the EOP descriptor that will be marked done */
< txbuf = &txr->tx_buffers[first];
---
> /* Set the EOP descriptor that will be marked done */
> txbuf = &txr->tx_buffers[first];
474,475c455,456
< bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
< BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
---
> bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
> BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
488c469
< }
---
> } /* ixgbe_xmit */
491c472,473
< /*********************************************************************
---
> /************************************************************************
> * ixgbe_allocate_transmit_buffers
493,498c475,479
< * Allocate memory for tx_buffer structures. The tx_buffer stores all
< * the information needed to transmit a packet on the wire. This is
< * called only once at attach, setup is done every reset.
< *
< **********************************************************************/
< int
---
> * Allocate memory for tx_buffer structures. The tx_buffer stores all
> * the information needed to transmit a packet on the wire. This is
> * called only once at attach, setup is done every reset.
> ************************************************************************/
> static int
501,502c482,483
< struct adapter *adapter = txr->adapter;
< device_t dev = adapter->dev;
---
> struct adapter *adapter = txr->adapter;
> device_t dev = adapter->dev;
504c485
< int error, i;
---
> int error, i;
509,522c490,506
< if ((error = bus_dma_tag_create(
< bus_get_dma_tag(adapter->dev), /* parent */
< 1, 0, /* alignment, bounds */
< BUS_SPACE_MAXADDR, /* lowaddr */
< BUS_SPACE_MAXADDR, /* highaddr */
< NULL, NULL, /* filter, filterarg */
< IXGBE_TSO_SIZE, /* maxsize */
< adapter->num_segs, /* nsegments */
< PAGE_SIZE, /* maxsegsize */
< 0, /* flags */
< NULL, /* lockfunc */
< NULL, /* lockfuncarg */
< &txr->txtag))) {
< device_printf(dev,"Unable to allocate TX DMA tag\n");
---
> error = bus_dma_tag_create(
> /* parent */ bus_get_dma_tag(adapter->dev),
> /* alignment */ 1,
> /* bounds */ 0,
> /* lowaddr */ BUS_SPACE_MAXADDR,
> /* highaddr */ BUS_SPACE_MAXADDR,
> /* filter */ NULL,
> /* filterarg */ NULL,
> /* maxsize */ IXGBE_TSO_SIZE,
> /* nsegments */ adapter->num_segs,
> /* maxsegsize */ PAGE_SIZE,
> /* flags */ 0,
> /* lockfunc */ NULL,
> /* lockfuncarg */ NULL,
> &txr->txtag);
> if (error != 0) {
> device_printf(dev, "Unable to allocate TX DMA tag\n");
526,528c510,513
< if (!(txr->tx_buffers =
< (struct ixgbe_tx_buf *) malloc(sizeof(struct ixgbe_tx_buf) *
< adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO))) {
---
> txr->tx_buffers =
> (struct ixgbe_tx_buf *)malloc(sizeof(struct ixgbe_tx_buf) *
> adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO);
> if (txr->tx_buffers == NULL) {
534c519
< /* Create the descriptor buffer dma maps */
---
> /* Create the descriptor buffer dma maps */
547a533
>
549c535
< }
---
> } /* ixgbe_allocate_transmit_buffers */
551,555c537,539
< /*********************************************************************
< *
< * Initialize a transmit ring.
< *
< **********************************************************************/
---
> /************************************************************************
> * ixgbe_setup_transmit_ring - Initialize a transmit ring.
> ************************************************************************/
559,560c543,544
< struct adapter *adapter = txr->adapter;
< struct ixgbe_tx_buf *txbuf;
---
> struct adapter *adapter = txr->adapter;
> struct ixgbe_tx_buf *txbuf;
563c547
< struct netmap_slot *slot;
---
> struct netmap_slot *slot;
567a552
>
569,573c554,560
< /*
< * (under lock): if in netmap mode, do some consistency
< * checks and set slot to entry 0 of the netmap ring.
< */
< slot = netmap_reset(na, NR_TX, txr->me, 0);
---
> if (adapter->feat_en & IXGBE_FEATURE_NETMAP) {
> /*
> * (under lock): if in netmap mode, do some consistency
> * checks and set slot to entry 0 of the netmap ring.
> */
> slot = netmap_reset(na, NR_TX, txr->me, 0);
> }
574a562
>
576c564
< (sizeof(union ixgbe_adv_tx_desc)) * adapter->num_tx_desc);
---
> (sizeof(union ixgbe_adv_tx_desc)) * adapter->num_tx_desc);
582c570
< txbuf = txr->tx_buffers;
---
> txbuf = txr->tx_buffers;
590a579
>
602c591
< if (slot) {
---
> if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) && slot) {
607a597
>
610c600
< }
---
> }
612d601
< #ifdef IXGBE_FDIR
614c603
< if (adapter->hw.mac.type != ixgbe_mac_82598EB)
---
> if (adapter->feat_en & IXGBE_FEATURE_FDIR)
616d604
< #endif
624c612
< }
---
> } /* ixgbe_setup_transmit_ring */
626,630c614,616
< /*********************************************************************
< *
< * Initialize all transmit rings.
< *
< **********************************************************************/
---
> /************************************************************************
> * ixgbe_setup_transmit_structures - Initialize all transmit rings.
> ************************************************************************/
640c626
< }
---
> } /* ixgbe_setup_transmit_structures */
642,646c628,630
< /*********************************************************************
< *
< * Free all transmit rings.
< *
< **********************************************************************/
---
> /************************************************************************
> * ixgbe_free_transmit_structures - Free all transmit rings.
> ************************************************************************/
660c644
< }
---
> } /* ixgbe_free_transmit_structures */
662c646,647
< /*********************************************************************
---
> /************************************************************************
> * ixgbe_free_transmit_buffers
664,666c649,650
< * Free transmit ring related data structures.
< *
< **********************************************************************/
---
> * Free transmit ring related data structures.
> ************************************************************************/
670c654
< struct adapter *adapter = txr->adapter;
---
> struct adapter *adapter = txr->adapter;
672c656
< int i;
---
> int i;
684,685c668
< bus_dmamap_unload(txr->txtag,
< tx_buffer->map);
---
> bus_dmamap_unload(txr->txtag, tx_buffer->map);
689,690c672
< bus_dmamap_destroy(txr->txtag,
< tx_buffer->map);
---
> bus_dmamap_destroy(txr->txtag, tx_buffer->map);
694,697c676,677
< bus_dmamap_unload(txr->txtag,
< tx_buffer->map);
< bus_dmamap_destroy(txr->txtag,
< tx_buffer->map);
---
> bus_dmamap_unload(txr->txtag, tx_buffer->map);
> bus_dmamap_destroy(txr->txtag, tx_buffer->map);
701d680
< #ifdef IXGBE_LEGACY_TX
704d682
< #endif
713,714c691
< return;
< }
---
> } /* ixgbe_free_transmit_buffers */
716c693,694
< /*********************************************************************
---
> /************************************************************************
> * ixgbe_tx_ctx_setup
718,721c696,697
< * Advanced Context Descriptor setup for VLAN, CSUM or TSO
< *
< **********************************************************************/
<
---
> * Advanced Context Descriptor setup for VLAN, CSUM or TSO
> ************************************************************************/
726d701
< struct adapter *adapter = txr->adapter;
728c703
< struct ether_vlan_header *eh;
---
> struct ether_vlan_header *eh;
730c705
< struct ip *ip;
---
> struct ip *ip;
733c708
< struct ip6_hdr *ip6;
---
> struct ip6_hdr *ip6;
735,742c710,718
< u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
< int ehdrlen, ip_hlen = 0;
< u16 etype;
< u8 ipproto = 0;
< int offload = TRUE;
< int ctxd = txr->next_avail_desc;
< u16 vtag = 0;
< caddr_t l3d;
---
> int ehdrlen, ip_hlen = 0;
> int offload = TRUE;
> int ctxd = txr->next_avail_desc;
> u32 vlan_macip_lens = 0;
> u32 type_tucmd_mlhl = 0;
> u16 vtag = 0;
> u16 etype;
> u8 ipproto = 0;
> caddr_t l3d;
746c722
< if (mp->m_pkthdr.csum_flags & (CSUM_IP_TSO|CSUM_IP6_TSO))
---
> if (mp->m_pkthdr.csum_flags & (CSUM_IP_TSO | CSUM_IP6_TSO))
753c729
< *olinfo_status |= mp->m_pkthdr.len << IXGBE_ADVTXD_PAYLEN_SHIFT;
---
> *olinfo_status |= mp->m_pkthdr.len << IXGBE_ADVTXD_PAYLEN_SHIFT;
756c732
< TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd];
---
> TXD = (struct ixgbe_adv_tx_context_desc *)&txr->tx_base[ctxd];
759,762c735,738
< ** In advanced descriptors the vlan tag must
< ** be placed into the context descriptor. Hence
< ** we need to make one even if not doing offloads.
< */
---
> * In advanced descriptors the vlan tag must
> * be placed into the context descriptor. Hence
> * we need to make one even if not doing offloads.
> */
766c742,743
< } else if (!IXGBE_IS_X550VF(adapter) && (offload == FALSE))
---
> } else if (!(txr->adapter->feat_en & IXGBE_FEATURE_NEEDS_CTXD) &&
> (offload == FALSE))
790,791c767,770
< * If the first mbuf only includes the ethernet header, jump to the next one
< * XXX: This assumes the stack splits mbufs containing headers on header boundaries
---
> * If the first mbuf only includes the ethernet header,
> * jump to the next one
> * XXX: This assumes the stack splits mbufs containing headers
> * on header boundaries
831c810,811
< if (mp->m_pkthdr.csum_flags & (CSUM_IP_TCP | CSUM_IP6_TCP))
---
> if (mp->m_pkthdr.csum_flags &
> (CSUM_IP_TCP | CSUM_IP6_TCP))
837c817,818
< if (mp->m_pkthdr.csum_flags & (CSUM_IP_UDP | CSUM_IP6_UDP))
---
> if (mp->m_pkthdr.csum_flags &
> (CSUM_IP_UDP | CSUM_IP6_UDP))
843c824,825
< if (mp->m_pkthdr.csum_flags & (CSUM_IP_SCTP | CSUM_IP6_SCTP))
---
> if (mp->m_pkthdr.csum_flags &
> (CSUM_IP_SCTP | CSUM_IP6_SCTP))
871,872c853,854
< return (0);
< }
---
> return (0);
> } /* ixgbe_tx_ctx_setup */
874c856,857
< /**********************************************************************
---
> /************************************************************************
> * ixgbe_tso_setup
876,879c859,861
< * Setup work for hardware segmentation offload (TSO) on
< * adapters using advanced tx descriptors
< *
< **********************************************************************/
---
> * Setup work for hardware segmentation offload (TSO) on
> * adapters using advanced tx descriptors
> ************************************************************************/
881,882c863,864
< ixgbe_tso_setup(struct tx_ring *txr, struct mbuf *mp,
< u32 *cmd_type_len, u32 *olinfo_status)
---
> ixgbe_tso_setup(struct tx_ring *txr, struct mbuf *mp, u32 *cmd_type_len,
> u32 *olinfo_status)
885,889c867
< u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
< u32 mss_l4len_idx = 0, paylen;
< u16 vtag = 0, eh_type;
< int ctxd, ehdrlen, ip_hlen, tcp_hlen;
< struct ether_vlan_header *eh;
---
> struct ether_vlan_header *eh;
891c869
< struct ip6_hdr *ip6;
---
> struct ip6_hdr *ip6;
894c872
< struct ip *ip;
---
> struct ip *ip;
896c874,879
< struct tcphdr *th;
---
> struct tcphdr *th;
> int ctxd, ehdrlen, ip_hlen, tcp_hlen;
> u32 vlan_macip_lens = 0;
> u32 type_tucmd_mlhl = 0;
> u32 mss_l4len_idx = 0, paylen;
> u16 vtag = 0, eh_type;
912,924d894
< #ifdef INET6
< case ETHERTYPE_IPV6:
< ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
< /* XXX-BZ For now we do not pretend to support ext. hdrs. */
< if (ip6->ip6_nxt != IPPROTO_TCP)
< return (ENXIO);
< ip_hlen = sizeof(struct ip6_hdr);
< ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
< th = (struct tcphdr *)((caddr_t)ip6 + ip_hlen);
< th->th_sum = in6_cksum_pseudo(ip6, 0, IPPROTO_TCP, 0);
< type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6;
< break;
< #endif
939a910,921
> #ifdef INET6
> case ETHERTYPE_IPV6:
> ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
> /* XXX-BZ For now we do not pretend to support ext. hdrs. */
> if (ip6->ip6_nxt != IPPROTO_TCP)
> return (ENXIO);
> ip_hlen = sizeof(struct ip6_hdr);
> th = (struct tcphdr *)((caddr_t)ip6 + ip_hlen);
> th->th_sum = in6_cksum_pseudo(ip6, 0, IPPROTO_TCP, 0);
> type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6;
> break;
> #endif
947c929
< TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd];
---
> TXD = (struct ixgbe_adv_tx_context_desc *)&txr->tx_base[ctxd];
957c939
< vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
---
> vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
984a967
>
986c969
< }
---
> } /* ixgbe_tso_setup */
989c972,973
< /**********************************************************************
---
> /************************************************************************
> * ixgbe_txeof
991,995c975,978
< * Examine each tx_buffer in the used queue. If the hardware is done
< * processing the packet then free associated resources. The
< * tx_buffer is put back on the free queue.
< *
< **********************************************************************/
---
> * Examine each tx_buffer in the used queue. If the hardware is done
> * processing the packet then free associated resources. The
> * tx_buffer is put back on the free queue.
> ************************************************************************/
999,1005c982,983
< struct adapter *adapter = txr->adapter;
< #ifdef DEV_NETMAP
< struct ifnet *ifp = adapter->ifp;
< #endif
< u32 work, processed = 0;
< u32 limit = adapter->tx_process_limit;
< struct ixgbe_tx_buf *buf;
---
> struct adapter *adapter = txr->adapter;
> struct ixgbe_tx_buf *buf;
1006a985,986
> u32 work, processed = 0;
> u32 limit = adapter->tx_process_limit;
1011,1012c991,993
< if (ifp->if_capenable & IFCAP_NETMAP) {
< struct netmap_adapter *na = NA(ifp);
---
> if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
> (adapter->ifp->if_capenable & IFCAP_NETMAP)) {
> struct netmap_adapter *na = NA(adapter->ifp);
1033,1034c1014,1015
< txd[kring->nr_kflags].wb.status & IXGBE_TXD_STAT_DD)) {
< netmap_tx_irq(ifp, txr->me);
---
> txd[kring->nr_kflags].wb.status & IXGBE_TXD_STAT_DD)) {
> netmap_tx_irq(adapter->ifp, txr->me);
1050,1051c1031,1032
< bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
< BUS_DMASYNC_POSTREAD);
---
> bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
> BUS_DMASYNC_POSTREAD);
1062,1065c1043,1044
< txr->bytes +=
< buf->m_head->m_pkthdr.len;
< bus_dmamap_sync(txr->txtag,
< buf->map,
---
> txr->bytes += buf->m_head->m_pkthdr.len;
> bus_dmamap_sync(txr->txtag, buf->map,
1067,1068c1046
< bus_dmamap_unload(txr->txtag,
< buf->map);
---
> bus_dmamap_unload(txr->txtag, buf->map);
1087,1090c1065,1066
< txr->bytes +=
< buf->m_head->m_pkthdr.len;
< bus_dmamap_sync(txr->txtag,
< buf->map,
---
> txr->bytes += buf->m_head->m_pkthdr.len;
> bus_dmamap_sync(txr->txtag, buf->map,
1092,1093c1068
< bus_dmamap_unload(txr->txtag,
< buf->map);
---
> bus_dmamap_unload(txr->txtag, buf->map);
1124,1130c1099,1105
< ** Queue Hang detection, we know there's
< ** work outstanding or the first return
< ** would have been taken, so increment busy
< ** if nothing managed to get cleaned, then
< ** in local_timer it will be checked and
< ** marked as HUNG if it exceeds a MAX attempt.
< */
---
> * Queue Hang detection, we know there's
> * work outstanding or the first return
> * would have been taken, so increment busy
> * if nothing managed to get cleaned, then
> * in local_timer it will be checked and
> * marked as HUNG if it exceeds a MAX attempt.
> */
1134,1136c1109,1111
< ** If anything gets cleaned we reset state to 1,
< ** note this will turn off HUNG if its set.
< */
---
> * If anything gets cleaned we reset state to 1,
> * note this will turn off HUNG if its set.
> */
1144c1119
< }
---
> } /* ixgbe_txeof */
1146,1226c1121,1125
<
< #ifdef IXGBE_FDIR
< /*
< ** This routine parses packet headers so that Flow
< ** Director can make a hashed filter table entry
< ** allowing traffic flows to be identified and kept
< ** on the same cpu. This would be a performance
< ** hit, but we only do it at IXGBE_FDIR_RATE of
< ** packets.
< */
< static void
< ixgbe_atr(struct tx_ring *txr, struct mbuf *mp)
< {
< struct adapter *adapter = txr->adapter;
< struct ix_queue *que;
< struct ip *ip;
< struct tcphdr *th;
< struct udphdr *uh;
< struct ether_vlan_header *eh;
< union ixgbe_atr_hash_dword input = {.dword = 0};
< union ixgbe_atr_hash_dword common = {.dword = 0};
< int ehdrlen, ip_hlen;
< u16 etype;
<
< eh = mtod(mp, struct ether_vlan_header *);
< if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
< ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
< etype = eh->evl_proto;
< } else {
< ehdrlen = ETHER_HDR_LEN;
< etype = eh->evl_encap_proto;
< }
<
< /* Only handling IPv4 */
< if (etype != htons(ETHERTYPE_IP))
< return;
<
< ip = (struct ip *)(mp->m_data + ehdrlen);
< ip_hlen = ip->ip_hl << 2;
<
< /* check if we're UDP or TCP */
< switch (ip->ip_p) {
< case IPPROTO_TCP:
< th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
< /* src and dst are inverted */
< common.port.dst ^= th->th_sport;
< common.port.src ^= th->th_dport;
< input.formatted.flow_type ^= IXGBE_ATR_FLOW_TYPE_TCPV4;
< break;
< case IPPROTO_UDP:
< uh = (struct udphdr *)((caddr_t)ip + ip_hlen);
< /* src and dst are inverted */
< common.port.dst ^= uh->uh_sport;
< common.port.src ^= uh->uh_dport;
< input.formatted.flow_type ^= IXGBE_ATR_FLOW_TYPE_UDPV4;
< break;
< default:
< return;
< }
<
< input.formatted.vlan_id = htobe16(mp->m_pkthdr.ether_vtag);
< if (mp->m_pkthdr.ether_vtag)
< common.flex_bytes ^= htons(ETHERTYPE_VLAN);
< else
< common.flex_bytes ^= etype;
< common.ip ^= ip->ip_src.s_addr ^ ip->ip_dst.s_addr;
<
< que = &adapter->queues[txr->me];
< /*
< ** This assumes the Rx queue and Tx
< ** queue are bound to the same CPU
< */
< ixgbe_fdir_add_signature_filter_82599(&adapter->hw,
< input, common, que->msix);
< }
< #endif /* IXGBE_FDIR */
<
< /*
< ** Used to detect a descriptor that has
< ** been merged by Hardware RSC.
< */
---
> /************************************************************************
> * ixgbe_rsc_count
> *
> * Used to detect a descriptor that has been merged by Hardware RSC.
> ************************************************************************/
1232c1131
< }
---
> } /* ixgbe_rsc_count */
1234c1133,1134
< /*********************************************************************
---
> /************************************************************************
> * ixgbe_setup_hw_rsc
1236,1238c1136,1138
< * Initialize Hardware RSC (LRO) feature on 82599
< * for an RX ring, this is toggled by the LRO capability
< * even though it is transparent to the stack.
---
> * Initialize Hardware RSC (LRO) feature on 82599
> * for an RX ring, this is toggled by the LRO capability
> * even though it is transparent to the stack.
1240,1244c1140,1143
< * NOTE: since this HW feature only works with IPV4 and
< * our testing has shown soft LRO to be as effective
< * I have decided to disable this by default.
< *
< **********************************************************************/
---
> * NOTE: Since this HW feature only works with IPv4 and
> * testing has shown soft LRO to be as effective,
> * this feature will be disabled by default.
> ************************************************************************/
1248,1250c1147,1149
< struct adapter *adapter = rxr->adapter;
< struct ixgbe_hw *hw = &adapter->hw;
< u32 rscctrl, rdrxctl;
---
> struct adapter *adapter = rxr->adapter;
> struct ixgbe_hw *hw = &adapter->hw;
> u32 rscctrl, rdrxctl;
1261,1262c1160,1164
< #ifdef DEV_NETMAP /* crcstrip is optional in netmap */
< if (adapter->ifp->if_capenable & IFCAP_NETMAP && !ix_crcstrip)
---
> #ifdef DEV_NETMAP
> /* Always strip CRC unless Netmap disabled it */
> if (!(adapter->feat_en & IXGBE_FEATURE_NETMAP) ||
> !(adapter->ifp->if_capenable & IFCAP_NETMAP) ||
> ix_crcstrip)
1264c1166
< rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
---
> rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
1271,1273c1173,1175
< ** Limit the total number of descriptors that
< ** can be combined, so it does not exceed 64K
< */
---
> * Limit the total number of descriptors that
> * can be combined, so it does not exceed 64K
> */
1287,1288c1189
< (IXGBE_READ_REG(hw, IXGBE_PSRTYPE(0)) |
< IXGBE_PSRTYPE_TCPHDR));
---
> (IXGBE_READ_REG(hw, IXGBE_PSRTYPE(0)) | IXGBE_PSRTYPE_TCPHDR));
1295c1196
< }
---
> } /* ixgbe_setup_hw_rsc */
1297c1198,1199
< /*********************************************************************
---
> /************************************************************************
> * ixgbe_refresh_mbufs
1299,1305c1201,1206
< * Refresh mbuf buffers for RX descriptor rings
< * - now keeps its own state so discards due to resource
< * exhaustion are unnecessary, if an mbuf cannot be obtained
< * it just returns, keeping its placeholder, thus it can simply
< * be recalled to try again.
< *
< **********************************************************************/
---
> * Refresh mbuf buffers for RX descriptor rings
> * - now keeps its own state so discards due to resource
> * exhaustion are unnecessary, if an mbuf cannot be obtained
> * it just returns, keeping its placeholder, thus it can simply
> * be recalled to try again.
> ************************************************************************/
1309,1314c1210,1215
< struct adapter *adapter = rxr->adapter;
< bus_dma_segment_t seg[1];
< struct ixgbe_rx_buf *rxbuf;
< struct mbuf *mp;
< int i, j, nsegs, error;
< bool refreshed = FALSE;
---
> struct adapter *adapter = rxr->adapter;
> struct ixgbe_rx_buf *rxbuf;
> struct mbuf *mp;
> bus_dma_segment_t seg[1];
> int i, j, nsegs, error;
> bool refreshed = FALSE;
1324,1325c1225,1226
< mp = m_getjcl(M_NOWAIT, MT_DATA,
< M_PKTHDR, rxr->mbuf_sz);
---
> mp = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR,
> rxr->mbuf_sz);
1341,1342c1242,1243
< error = bus_dmamap_load_mbuf_sg(rxr->ptag,
< rxbuf->pmap, mp, seg, &nsegs, BUS_DMA_NOWAIT);
---
> error = bus_dmamap_load_mbuf_sg(rxr->ptag, rxbuf->pmap,
> mp, seg, &nsegs, BUS_DMA_NOWAIT);
1344,1345c1245
< printf("Refresh mbufs: payload dmamap load"
< " failure - %d\n", error);
---
> printf("Refresh mbufs: payload dmamap load failure - %d\n", error);
1366a1267
>
1369,1370c1270,1271
< IXGBE_WRITE_REG(&adapter->hw,
< rxr->tail, rxr->next_to_refresh);
---
> IXGBE_WRITE_REG(&adapter->hw, rxr->tail, rxr->next_to_refresh);
>
1372c1273
< }
---
> } /* ixgbe_refresh_mbufs */
1374c1275,1276
< /*********************************************************************
---
> /************************************************************************
> * ixgbe_allocate_receive_buffers
1376,1382c1278,1283
< * Allocate memory for rx_buffer structures. Since we use one
< * rx_buffer per received packet, the maximum number of rx_buffer's
< * that we'll need is equal to the number of receive descriptors
< * that we've allocated.
< *
< **********************************************************************/
< int
---
> * Allocate memory for rx_buffer structures. Since we use one
> * rx_buffer per received packet, the maximum number of rx_buffer's
> * that we'll need is equal to the number of receive descriptors
> * that we've allocated.
> ************************************************************************/
> static int
1385,1388c1286,1289
< struct adapter *adapter = rxr->adapter;
< device_t dev = adapter->dev;
< struct ixgbe_rx_buf *rxbuf;
< int bsize, error;
---
> struct adapter *adapter = rxr->adapter;
> device_t dev = adapter->dev;
> struct ixgbe_rx_buf *rxbuf;
> int bsize, error;
1391,1393c1292,1294
< if (!(rxr->rx_buffers =
< (struct ixgbe_rx_buf *) malloc(bsize,
< M_DEVBUF, M_NOWAIT | M_ZERO))) {
---
> rxr->rx_buffers = (struct ixgbe_rx_buf *)malloc(bsize, M_DEVBUF,
> M_NOWAIT | M_ZERO);
> if (rxr->rx_buffers == NULL) {
1399,1410c1300,1315
< if ((error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
< 1, 0, /* alignment, bounds */
< BUS_SPACE_MAXADDR, /* lowaddr */
< BUS_SPACE_MAXADDR, /* highaddr */
< NULL, NULL, /* filter, filterarg */
< MJUM16BYTES, /* maxsize */
< 1, /* nsegments */
< MJUM16BYTES, /* maxsegsize */
< 0, /* flags */
< NULL, /* lockfunc */
< NULL, /* lockfuncarg */
< &rxr->ptag))) {
---
> error = bus_dma_tag_create(
> /* parent */ bus_get_dma_tag(dev),
> /* alignment */ 1,
> /* bounds */ 0,
> /* lowaddr */ BUS_SPACE_MAXADDR,
> /* highaddr */ BUS_SPACE_MAXADDR,
> /* filter */ NULL,
> /* filterarg */ NULL,
> /* maxsize */ MJUM16BYTES,
> /* nsegments */ 1,
> /* maxsegsize */ MJUM16BYTES,
> /* flags */ 0,
> /* lockfunc */ NULL,
> /* lockfuncarg */ NULL,
> &rxr->ptag);
> if (error != 0) {
1428a1334
>
1430c1336
< }
---
> } /* ixgbe_allocate_receive_buffers */
1432c1338,1341
< static void
---
> /************************************************************************
> * ixgbe_free_receive_ring
> ************************************************************************/
> static void
1434,1436c1343
< {
< struct ixgbe_rx_buf *rxbuf;
<
---
> {
1438,1447c1345
< rxbuf = &rxr->rx_buffers[i];
< if (rxbuf->buf != NULL) {
< bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
< BUS_DMASYNC_POSTREAD);
< bus_dmamap_unload(rxr->ptag, rxbuf->pmap);
< rxbuf->buf->m_flags |= M_PKTHDR;
< m_freem(rxbuf->buf);
< rxbuf->buf = NULL;
< rxbuf->flags = 0;
< }
---
> ixgbe_rx_discard(rxr, i);
1449c1347
< }
---
> } /* ixgbe_free_receive_ring */
1451c1349,1350
< /*********************************************************************
---
> /************************************************************************
> * ixgbe_setup_receive_ring
1453,1455c1352,1353
< * Initialize a receive ring and its buffers.
< *
< **********************************************************************/
---
> * Initialize a receive ring and its buffers.
> ************************************************************************/
1459,1465c1357,1361
< struct adapter *adapter;
< struct ifnet *ifp;
< device_t dev;
< struct ixgbe_rx_buf *rxbuf;
< bus_dma_segment_t seg[1];
< struct lro_ctrl *lro = &rxr->lro;
< int rsize, nsegs, error = 0;
---
> struct adapter *adapter;
> struct ifnet *ifp;
> device_t dev;
> struct ixgbe_rx_buf *rxbuf;
> struct lro_ctrl *lro = &rxr->lro;
1468c1364
< struct netmap_slot *slot;
---
> struct netmap_slot *slot;
1469a1366,1367
> bus_dma_segment_t seg[1];
> int rsize, nsegs, error = 0;
1476a1375
>
1478,1479c1377,1378
< /* same as in ixgbe_setup_transmit_ring() */
< slot = netmap_reset(na, NR_RX, rxr->me, 0);
---
> if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
> slot = netmap_reset(na, NR_RX, rxr->me, 0);
1480a1380
>
1492c1392
< struct mbuf *mp;
---
> struct mbuf *mp;
1494a1395
>
1503c1404
< if (slot) {
---
> if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) && slot) {
1516,1518c1417,1420
< rxbuf->flags = 0;
< rxbuf->buf = m_getjcl(M_NOWAIT, MT_DATA,
< M_PKTHDR, adapter->rx_mbuf_sz);
---
>
> rxbuf->flags = 0;
> rxbuf->buf = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR,
> adapter->rx_mbuf_sz);
1521c1423
< goto fail;
---
> goto fail;
1526,1527c1428
< error = bus_dmamap_load_mbuf_sg(rxr->ptag,
< rxbuf->pmap, mp, seg,
---
> error = bus_dmamap_load_mbuf_sg(rxr->ptag, rxbuf->pmap, mp, seg,
1530,1532c1431,1432
< goto fail;
< bus_dmamap_sync(rxr->ptag,
< rxbuf->pmap, BUS_DMASYNC_PREREAD);
---
> goto fail;
> bus_dmamap_sync(rxr->ptag, rxbuf->pmap, BUS_DMASYNC_PREREAD);
1551,1552c1451,1452
< ** Now set up the LRO interface:
< */
---
> * Now set up the LRO interface
> */
1566a1467
>
1571a1473
>
1573c1475
< }
---
> } /* ixgbe_setup_receive_ring */
1575,1579c1477,1479
< /*********************************************************************
< *
< * Initialize all receive rings.
< *
< **********************************************************************/
---
> /************************************************************************
> * ixgbe_setup_receive_structures - Initialize all receive rings.
> ************************************************************************/
1584c1484
< int j;
---
> int j;
1598a1499
> IXGBE_RX_LOCK(rxr);
1599a1501
> IXGBE_RX_UNLOCK(rxr);
1603c1505
< }
---
> } /* ixgbe_setup_receive_structures */
1606,1610c1508,1510
< /*********************************************************************
< *
< * Free all receive rings.
< *
< **********************************************************************/
---
> /************************************************************************
> * ixgbe_free_receive_structures - Free all receive rings.
> ************************************************************************/
1619d1518
< struct lro_ctrl *lro = &rxr->lro;
1622c1521
< tcp_lro_free(lro);
---
> tcp_lro_free(&rxr->lro);
1628c1527
< }
---
> } /* ixgbe_free_receive_structures */
1631,1636c1530,1533
< /*********************************************************************
< *
< * Free receive ring data structures
< *
< **********************************************************************/
< void
---
> /************************************************************************
> * ixgbe_free_receive_buffers - Free receive ring data structures
> ************************************************************************/
> static void
1639,1640c1536,1537
< struct adapter *adapter = rxr->adapter;
< struct ixgbe_rx_buf *rxbuf;
---
> struct adapter *adapter = rxr->adapter;
> struct ixgbe_rx_buf *rxbuf;
1648,1655c1545
< if (rxbuf->buf != NULL) {
< bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
< BUS_DMASYNC_POSTREAD);
< bus_dmamap_unload(rxr->ptag, rxbuf->pmap);
< rxbuf->buf->m_flags |= M_PKTHDR;
< m_freem(rxbuf->buf);
< }
< rxbuf->buf = NULL;
---
> ixgbe_rx_discard(rxr, i);
1673c1563
< }
---
> } /* ixgbe_free_receive_buffers */
1674a1565,1567
> /************************************************************************
> * ixgbe_rx_input
> ************************************************************************/
1676c1569,1570
< ixgbe_rx_input(struct rx_ring *rxr, struct ifnet *ifp, struct mbuf *m, u32 ptype)
---
> ixgbe_rx_input(struct rx_ring *rxr, struct ifnet *ifp, struct mbuf *m,
> u32 ptype)
1678,1702c1572,1595
<
< /*
< * ATM LRO is only for IP/TCP packets and TCP checksum of the packet
< * should be computed by hardware. Also it should not have VLAN tag in
< * ethernet header. In case of IPv6 we do not yet support ext. hdrs.
< */
< if (rxr->lro_enabled &&
< (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 &&
< (ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 &&
< ((ptype & (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP)) ==
< (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP) ||
< (ptype & (IXGBE_RXDADV_PKTTYPE_IPV6 | IXGBE_RXDADV_PKTTYPE_TCP)) ==
< (IXGBE_RXDADV_PKTTYPE_IPV6 | IXGBE_RXDADV_PKTTYPE_TCP)) &&
< (m->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) ==
< (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) {
< /*
< * Send to the stack if:
< ** - LRO not enabled, or
< ** - no LRO resources, or
< ** - lro enqueue fails
< */
< if (rxr->lro.lro_cnt != 0)
< if (tcp_lro_rx(&rxr->lro, m, 0) == 0)
< return;
< }
---
> /*
> * ATM LRO is only for IP/TCP packets and TCP checksum of the packet
> * should be computed by hardware. Also it should not have VLAN tag in
> * ethernet header. In case of IPv6 we do not yet support ext. hdrs.
> */
> if (rxr->lro_enabled &&
> (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 &&
> (ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 &&
> ((ptype & (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP)) ==
> (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP) ||
> (ptype & (IXGBE_RXDADV_PKTTYPE_IPV6 | IXGBE_RXDADV_PKTTYPE_TCP)) ==
> (IXGBE_RXDADV_PKTTYPE_IPV6 | IXGBE_RXDADV_PKTTYPE_TCP)) &&
> (m->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) ==
> (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) {
> /*
> * Send to the stack if:
> * - LRO not enabled, or
> * - no LRO resources, or
> * - lro enqueue fails
> */
> if (rxr->lro.lro_cnt != 0)
> if (tcp_lro_rx(&rxr->lro, m, 0) == 0)
> return;
> }
1704c1597
< (*ifp->if_input)(ifp, m);
---
> (*ifp->if_input)(ifp, m);
1706c1599
< }
---
> } /* ixgbe_rx_input */
1707a1601,1603
> /************************************************************************
> * ixgbe_rx_discard
> ************************************************************************/
1711c1607
< struct ixgbe_rx_buf *rbuf;
---
> struct ixgbe_rx_buf *rbuf;
1715d1610
<
1717,1722c1612,1617
< ** With advanced descriptors the writeback
< ** clobbers the buffer addrs, so its easier
< ** to just free the existing mbufs and take
< ** the normal refresh path to get new buffers
< ** and mapping.
< */
---
> * With advanced descriptors the writeback
> * clobbers the buffer addrs, so its easier
> * to just free the existing mbufs and take
> * the normal refresh path to get new buffers
> * and mapping.
> */
1725c1620
< rbuf->fmp->m_flags |= M_PKTHDR;
---
> bus_dmamap_sync(rxr->ptag, rbuf->pmap, BUS_DMASYNC_POSTREAD);
1729a1625
> bus_dmamap_sync(rxr->ptag, rbuf->pmap, BUS_DMASYNC_POSTREAD);
1736c1632
<
---
>
1738c1634
< }
---
> } /* ixgbe_rx_discard */
1741c1637,1638
< /*********************************************************************
---
> /************************************************************************
> * ixgbe_rxeof
1743,1745c1640,1642
< * This routine executes in interrupt context. It replenishes
< * the mbufs in the descriptor and sends data which has been
< * dma'ed into host memory to upper layer.
---
> * Executes in interrupt context. It replenishes the
> * mbufs in the descriptor and sends data which has
> * been dma'ed into host memory to upper layer.
1747,1748c1644,1645
< * Return TRUE for more work, FALSE for all clean.
< *********************************************************************/
---
> * Return TRUE for more work, FALSE for all clean.
> ************************************************************************/
1752,1761c1649,1658
< struct adapter *adapter = que->adapter;
< struct rx_ring *rxr = que->rxr;
< struct ifnet *ifp = adapter->ifp;
< struct lro_ctrl *lro = &rxr->lro;
< int i, nextp, processed = 0;
< u32 staterr = 0;
< u32 count = adapter->rx_process_limit;
< union ixgbe_adv_rx_desc *cur;
< struct ixgbe_rx_buf *rbuf, *nbuf;
< u16 pkt_info;
---
> struct adapter *adapter = que->adapter;
> struct rx_ring *rxr = que->rxr;
> struct ifnet *ifp = adapter->ifp;
> struct lro_ctrl *lro = &rxr->lro;
> union ixgbe_adv_rx_desc *cur;
> struct ixgbe_rx_buf *rbuf, *nbuf;
> int i, nextp, processed = 0;
> u32 staterr = 0;
> u32 count = adapter->rx_process_limit;
> u16 pkt_info;
1766,1769c1663,1668
< /* Same as the txeof routine: wakeup clients on intr. */
< if (netmap_rx_irq(ifp, rxr->me, &processed)) {
< IXGBE_RX_UNLOCK(rxr);
< return (FALSE);
---
> if (adapter->feat_en & IXGBE_FEATURE_NETMAP) {
> /* Same as the txeof routine: wakeup clients on intr. */
> if (netmap_rx_irq(ifp, rxr->me, &processed)) {
> IXGBE_RX_UNLOCK(rxr);
> return (FALSE);
> }
1774,1779c1673,1678
< struct mbuf *sendmp, *mp;
< u32 rsc, ptype;
< u16 len;
< u16 vtag = 0;
< bool eop;
<
---
> struct mbuf *sendmp, *mp;
> u32 rsc, ptype;
> u16 len;
> u16 vtag = 0;
> bool eop;
>
1809c1708
< if (IXGBE_IS_VF(adapter))
---
> if (adapter->feat_en & IXGBE_FEATURE_VF)
1816a1716,1717
> bus_dmamap_sync(rxr->ptag, rbuf->pmap, BUS_DMASYNC_POSTREAD);
>
1818,1828c1719,1729
< ** On 82599 which supports a hardware
< ** LRO (called HW RSC), packets need
< ** not be fragmented across sequential
< ** descriptors, rather the next descriptor
< ** is indicated in bits of the descriptor.
< ** This also means that we might proceses
< ** more than one packet at a time, something
< ** that has never been true before, it
< ** required eliminating global chain pointers
< ** in favor of what we are doing here. -jfv
< */
---
> * On 82599 which supports a hardware
> * LRO (called HW RSC), packets need
> * not be fragmented across sequential
> * descriptors, rather the next descriptor
> * is indicated in bits of the descriptor.
> * This also means that we might proceses
> * more than one packet at a time, something
> * that has never been true before, it
> * required eliminating global chain pointers
> * in favor of what we are doing here. -jfv
> */
1831,1833c1732,1734
< ** Figure out the next descriptor
< ** of this frame.
< */
---
> * Figure out the next descriptor
> * of this frame.
> */
1839,1840c1740
< nextp = ((staterr &
< IXGBE_RXDADV_NEXTP_MASK) >>
---
> nextp = ((staterr & IXGBE_RXDADV_NEXTP_MASK) >>
1851,1855c1751,1755
< ** Rather than using the fmp/lmp global pointers
< ** we now keep the head of a packet chain in the
< ** buffer struct and pass this along from one
< ** descriptor to the next, until we get EOP.
< */
---
> * Rather than using the fmp/lmp global pointers
> * we now keep the head of a packet chain in the
> * buffer struct and pass this along from one
> * descriptor to the next, until we get EOP.
> */
1858,1860c1758,1760
< ** See if there is a stored head
< ** that determines what we are
< */
---
> * See if there is a stored head
> * that determines what we are
> */
1876,1879c1776,1778
< sendmp->m_data +=
< IXGBE_RX_COPY_ALIGN;
< ixgbe_bcopy(mp->m_data,
< sendmp->m_data, len);
---
> sendmp->m_data += IXGBE_RX_COPY_ALIGN;
> ixgbe_bcopy(mp->m_data, sendmp->m_data,
> len);
1908,1909c1807
< if ((rxr->vtag_strip) &&
< (staterr & IXGBE_RXD_STAT_VP))
---
> if ((rxr->vtag_strip) && (staterr & IXGBE_RXD_STAT_VP))
1918,1950c1816,1848
< /*
< * In case of multiqueue, we have RXCSUM.PCSD bit set
< * and never cleared. This means we have RSS hash
< * available to be used.
< */
< if (adapter->num_queues > 1) {
< sendmp->m_pkthdr.flowid =
< le32toh(cur->wb.lower.hi_dword.rss);
< switch (pkt_info & IXGBE_RXDADV_RSSTYPE_MASK) {
< case IXGBE_RXDADV_RSSTYPE_IPV4:
< M_HASHTYPE_SET(sendmp,
< M_HASHTYPE_RSS_IPV4);
< break;
< case IXGBE_RXDADV_RSSTYPE_IPV4_TCP:
< M_HASHTYPE_SET(sendmp,
< M_HASHTYPE_RSS_TCP_IPV4);
< break;
< case IXGBE_RXDADV_RSSTYPE_IPV6:
< M_HASHTYPE_SET(sendmp,
< M_HASHTYPE_RSS_IPV6);
< break;
< case IXGBE_RXDADV_RSSTYPE_IPV6_TCP:
< M_HASHTYPE_SET(sendmp,
< M_HASHTYPE_RSS_TCP_IPV6);
< break;
< case IXGBE_RXDADV_RSSTYPE_IPV6_EX:
< M_HASHTYPE_SET(sendmp,
< M_HASHTYPE_RSS_IPV6_EX);
< break;
< case IXGBE_RXDADV_RSSTYPE_IPV6_TCP_EX:
< M_HASHTYPE_SET(sendmp,
< M_HASHTYPE_RSS_TCP_IPV6_EX);
< break;
---
> /*
> * In case of multiqueue, we have RXCSUM.PCSD bit set
> * and never cleared. This means we have RSS hash
> * available to be used.
> */
> if (adapter->num_queues > 1) {
> sendmp->m_pkthdr.flowid =
> le32toh(cur->wb.lower.hi_dword.rss);
> switch (pkt_info & IXGBE_RXDADV_RSSTYPE_MASK) {
> case IXGBE_RXDADV_RSSTYPE_IPV4:
> M_HASHTYPE_SET(sendmp,
> M_HASHTYPE_RSS_IPV4);
> break;
> case IXGBE_RXDADV_RSSTYPE_IPV4_TCP:
> M_HASHTYPE_SET(sendmp,
> M_HASHTYPE_RSS_TCP_IPV4);
> break;
> case IXGBE_RXDADV_RSSTYPE_IPV6:
> M_HASHTYPE_SET(sendmp,
> M_HASHTYPE_RSS_IPV6);
> break;
> case IXGBE_RXDADV_RSSTYPE_IPV6_TCP:
> M_HASHTYPE_SET(sendmp,
> M_HASHTYPE_RSS_TCP_IPV6);
> break;
> case IXGBE_RXDADV_RSSTYPE_IPV6_EX:
> M_HASHTYPE_SET(sendmp,
> M_HASHTYPE_RSS_IPV6_EX);
> break;
> case IXGBE_RXDADV_RSSTYPE_IPV6_TCP_EX:
> M_HASHTYPE_SET(sendmp,
> M_HASHTYPE_RSS_TCP_IPV6_EX);
> break;
1952,1963c1850,1861
< case IXGBE_RXDADV_RSSTYPE_IPV4_UDP:
< M_HASHTYPE_SET(sendmp,
< M_HASHTYPE_RSS_UDP_IPV4);
< break;
< case IXGBE_RXDADV_RSSTYPE_IPV6_UDP:
< M_HASHTYPE_SET(sendmp,
< M_HASHTYPE_RSS_UDP_IPV6);
< break;
< case IXGBE_RXDADV_RSSTYPE_IPV6_UDP_EX:
< M_HASHTYPE_SET(sendmp,
< M_HASHTYPE_RSS_UDP_IPV6_EX);
< break;
---
> case IXGBE_RXDADV_RSSTYPE_IPV4_UDP:
> M_HASHTYPE_SET(sendmp,
> M_HASHTYPE_RSS_UDP_IPV4);
> break;
> case IXGBE_RXDADV_RSSTYPE_IPV6_UDP:
> M_HASHTYPE_SET(sendmp,
> M_HASHTYPE_RSS_UDP_IPV6);
> break;
> case IXGBE_RXDADV_RSSTYPE_IPV6_UDP_EX:
> M_HASHTYPE_SET(sendmp,
> M_HASHTYPE_RSS_UDP_IPV6_EX);
> break;
1965,1970c1863,1868
< default:
< M_HASHTYPE_SET(sendmp,
< M_HASHTYPE_OPAQUE_HASH);
< }
< } else {
< sendmp->m_pkthdr.flowid = que->msix;
---
> default:
> M_HASHTYPE_SET(sendmp,
> M_HASHTYPE_OPAQUE_HASH);
> }
> } else {
> sendmp->m_pkthdr.flowid = que->msix;
1989c1887
< /* Every 8 descriptors we go to refresh mbufs */
---
> /* Every 8 descriptors we go to refresh mbufs */
2010,2011c1908,1909
< ** Still have cleaning to do?
< */
---
> * Still have cleaning to do?
> */
2014,2016d1911
< else
< return (FALSE);
< }
2017a1913,1914
> return (FALSE);
> } /* ixgbe_rxeof */
2019c1916,1918
< /*********************************************************************
---
>
> /************************************************************************
> * ixgbe_rx_checksum
2021,2025c1920,1923
< * Verify that the hardware indicated that the checksum is valid.
< * Inform the stack about the status of checksum so that stack
< * doesn't spend time verifying the checksum.
< *
< *********************************************************************/
---
> * Verify that the hardware indicated that the checksum is valid.
> * Inform the stack about the status of checksum so that stack
> * doesn't spend time verifying the checksum.
> ************************************************************************/
2029,2031c1927,1929
< u16 status = (u16) staterr;
< u8 errors = (u8) (staterr >> 24);
< bool sctp = false;
---
> u16 status = (u16)staterr;
> u8 errors = (u8)(staterr >> 24);
> bool sctp = false;
2053c1951
< }
---
> } /* ixgbe_rx_checksum */
2055,2057c1953,1955
< /********************************************************************
< * Manage DMA'able memory.
< *******************************************************************/
---
> /************************************************************************
> * ixgbe_dmamap_cb - Manage DMA'able memory.
> ************************************************************************/
2063c1961,1962
< *(bus_addr_t *) arg = segs->ds_addr;
---
> *(bus_addr_t *)arg = segs->ds_addr;
>
2065c1964
< }
---
> } /* ixgbe_dmamap_cb */
2067c1966,1969
< int
---
> /************************************************************************
> * ixgbe_dma_malloc
> ************************************************************************/
> static int
2069c1971
< struct ixgbe_dma_alloc *dma, int mapflags)
---
> struct ixgbe_dma_alloc *dma, int mapflags)
2072c1974
< int r;
---
> int r;
2074,2085c1976,1990
< r = bus_dma_tag_create(bus_get_dma_tag(adapter->dev), /* parent */
< DBA_ALIGN, 0, /* alignment, bounds */
< BUS_SPACE_MAXADDR, /* lowaddr */
< BUS_SPACE_MAXADDR, /* highaddr */
< NULL, NULL, /* filter, filterarg */
< size, /* maxsize */
< 1, /* nsegments */
< size, /* maxsegsize */
< BUS_DMA_ALLOCNOW, /* flags */
< NULL, /* lockfunc */
< NULL, /* lockfuncarg */
< &dma->dma_tag);
---
> r = bus_dma_tag_create(
> /* parent */ bus_get_dma_tag(adapter->dev),
> /* alignment */ DBA_ALIGN,
> /* bounds */ 0,
> /* lowaddr */ BUS_SPACE_MAXADDR,
> /* highaddr */ BUS_SPACE_MAXADDR,
> /* filter */ NULL,
> /* filterarg */ NULL,
> /* maxsize */ size,
> /* nsegments */ 1,
> /* maxsegsize */ size,
> /* flags */ BUS_DMA_ALLOCNOW,
> /* lockfunc */ NULL,
> /* lockfuncarg */ NULL,
> &dma->dma_tag);
2087,2088c1992,1994
< device_printf(dev,"ixgbe_dma_malloc: bus_dma_tag_create failed; "
< "error %u\n", r);
---
> device_printf(dev,
> "ixgbe_dma_malloc: bus_dma_tag_create failed; error %u\n",
> r);
2092c1998
< BUS_DMA_NOWAIT, &dma->dma_map);
---
> BUS_DMA_NOWAIT, &dma->dma_map);
2094,2095c2000,2001
< device_printf(dev,"ixgbe_dma_malloc: bus_dmamem_alloc failed; "
< "error %u\n", r);
---
> device_printf(dev,
> "ixgbe_dma_malloc: bus_dmamem_alloc failed; error %u\n", r);
2098,2102c2004,2005
< r = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
< size,
< ixgbe_dmamap_cb,
< &dma->dma_paddr,
< mapflags | BUS_DMA_NOWAIT);
---
> r = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr, size,
> ixgbe_dmamap_cb, &dma->dma_paddr, mapflags | BUS_DMA_NOWAIT);
2104,2105c2007,2008
< device_printf(dev,"ixgbe_dma_malloc: bus_dmamap_load failed; "
< "error %u\n", r);
---
> device_printf(dev,
> "ixgbe_dma_malloc: bus_dmamap_load failed; error %u\n", r);
2108a2012
>
2115a2020
>
2117c2022
< }
---
> } /* ixgbe_dma_malloc */
2119c2024,2027
< void
---
> /************************************************************************
> * ixgbe_dma_free
> ************************************************************************/
> static void
2127c2035
< }
---
> } /* ixgbe_dma_free */
2130c2038,2039
< /*********************************************************************
---
> /************************************************************************
> * ixgbe_allocate_queues
2132,2135c2041,2043
< * Allocate memory for the transmit and receive rings, and then
< * the descriptors associated with each, called only once at attach.
< *
< **********************************************************************/
---
> * Allocate memory for the transmit and receive rings, and then
> * the descriptors associated with each, called only once at attach.
> ************************************************************************/
2139,2147c2047,2052
< device_t dev = adapter->dev;
< struct ix_queue *que;
< struct tx_ring *txr;
< struct rx_ring *rxr;
< int rsize, tsize, error = IXGBE_SUCCESS;
< int txconf = 0, rxconf = 0;
< #ifdef PCI_IOV
< enum ixgbe_iov_mode iov_mode;
< #endif
---
> device_t dev = adapter->dev;
> struct ix_queue *que;
> struct tx_ring *txr;
> struct rx_ring *rxr;
> int rsize, tsize, error = IXGBE_SUCCESS;
> int txconf = 0, rxconf = 0;
2149,2156c2054,2061
< /* First allocate the top level queue structs */
< if (!(adapter->queues =
< (struct ix_queue *) malloc(sizeof(struct ix_queue) *
< adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
< device_printf(dev, "Unable to allocate queue memory\n");
< error = ENOMEM;
< goto fail;
< }
---
> /* First, allocate the top level queue structs */
> adapter->queues = (struct ix_queue *)malloc(sizeof(struct ix_queue) *
> adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO);
> if (adapter->queues == NULL) {
> device_printf(dev, "Unable to allocate queue memory\n");
> error = ENOMEM;
> goto fail;
> }
2158,2161c2063,2066
< /* First allocate the TX ring struct memory */
< if (!(adapter->tx_rings =
< (struct tx_ring *) malloc(sizeof(struct tx_ring) *
< adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
---
> /* Second, allocate the TX ring struct memory */
> adapter->tx_rings = (struct tx_ring *)malloc(sizeof(struct tx_ring) *
> adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO);
> if (adapter->tx_rings == NULL) {
2167,2170c2072,2075
< /* Next allocate the RX */
< if (!(adapter->rx_rings =
< (struct rx_ring *) malloc(sizeof(struct rx_ring) *
< adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
---
> /* Third, allocate the RX ring */
> adapter->rx_rings = (struct rx_ring *)malloc(sizeof(struct rx_ring) *
> adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO);
> if (adapter->rx_rings == NULL) {
2177,2178c2082,2083
< tsize = roundup2(adapter->num_tx_desc *
< sizeof(union ixgbe_adv_tx_desc), DBA_ALIGN);
---
> tsize = roundup2(adapter->num_tx_desc * sizeof(union ixgbe_adv_tx_desc),
> DBA_ALIGN);
2180,2185d2084
< #ifdef PCI_IOV
< iov_mode = ixgbe_get_iov_mode(adapter);
< adapter->pool = ixgbe_max_vfs(iov_mode);
< #else
< adapter->pool = 0;
< #endif
2190c2089
< */
---
> */
2195,2199c2094,2097
< #ifdef PCI_IOV
< txr->me = ixgbe_pf_que_index(iov_mode, i);
< #else
< txr->me = i;
< #endif
---
> txr->br = NULL;
> /* In case SR-IOV is enabled, align the index properly */
> txr->me = ixgbe_vf_que_index(adapter->iov_mode, adapter->pool,
> i);
2207,2208c2105,2106
< if (ixgbe_dma_malloc(adapter, tsize,
< &txr->txdma, BUS_DMA_NOWAIT)) {
---
> if (ixgbe_dma_malloc(adapter, tsize, &txr->txdma,
> BUS_DMA_NOWAIT)) {
2217,2218c2115,2116
< /* Now allocate transmit buffers for the ring */
< if (ixgbe_allocate_transmit_buffers(txr)) {
---
> /* Now allocate transmit buffers for the ring */
> if (ixgbe_allocate_transmit_buffers(txr)) {
2223,2234c2121,2132
< }
< #ifndef IXGBE_LEGACY_TX
< /* Allocate a buf ring */
< txr->br = buf_ring_alloc(IXGBE_BR_SIZE, M_DEVBUF,
< M_WAITOK, &txr->tx_mtx);
< if (txr->br == NULL) {
< device_printf(dev,
< "Critical Failure setting up buf ring\n");
< error = ENOMEM;
< goto err_tx_desc;
< }
< #endif
---
> }
> if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) {
> /* Allocate a buf ring */
> txr->br = buf_ring_alloc(IXGBE_BR_SIZE, M_DEVBUF,
> M_WAITOK, &txr->tx_mtx);
> if (txr->br == NULL) {
> device_printf(dev,
> "Critical Failure setting up buf ring\n");
> error = ENOMEM;
> goto err_tx_desc;
> }
> }
2239,2241c2137,2139
< */
< rsize = roundup2(adapter->num_rx_desc *
< sizeof(union ixgbe_adv_rx_desc), DBA_ALIGN);
---
> */
> rsize = roundup2(adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc),
> DBA_ALIGN);
2246,2250c2144,2146
< #ifdef PCI_IOV
< rxr->me = ixgbe_pf_que_index(iov_mode, i);
< #else
< rxr->me = i;
< #endif
---
> /* In case SR-IOV is enabled, align the index properly */
> rxr->me = ixgbe_vf_que_index(adapter->iov_mode, adapter->pool,
> i);
2258,2259c2154,2155
< if (ixgbe_dma_malloc(adapter, rsize,
< &rxr->rxdma, BUS_DMA_NOWAIT)) {
---
> if (ixgbe_dma_malloc(adapter, rsize, &rxr->rxdma,
> BUS_DMA_NOWAIT)) {
2268c2164
< /* Allocate receive buffers for the ring*/
---
> /* Allocate receive buffers for the ring */
2278,2279c2174,2175
< ** Finally set up the queue holding structs
< */
---
> * Finally set up the queue holding structs
> */
2303c2199
< }
---
> } /* ixgbe_allocate_queues */