1280182Sjfv/******************************************************************************
2280182Sjfv
3320897Serj  Copyright (c) 2001-2017, Intel Corporation
4280182Sjfv  All rights reserved.
5320897Serj
6320897Serj  Redistribution and use in source and binary forms, with or without
7280182Sjfv  modification, are permitted provided that the following conditions are met:
8320897Serj
9320897Serj   1. Redistributions of source code must retain the above copyright notice,
10280182Sjfv      this list of conditions and the following disclaimer.
11320897Serj
12320897Serj   2. Redistributions in binary form must reproduce the above copyright
13320897Serj      notice, this list of conditions and the following disclaimer in the
14280182Sjfv      documentation and/or other materials provided with the distribution.
15320897Serj
16320897Serj   3. Neither the name of the Intel Corporation nor the names of its
17320897Serj      contributors may be used to endorse or promote products derived from
18280182Sjfv      this software without specific prior written permission.
19320897Serj
20280182Sjfv  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21320897Serj  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22320897Serj  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23320897Serj  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24320897Serj  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25320897Serj  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26320897Serj  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27320897Serj  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28320897Serj  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29280182Sjfv  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30280182Sjfv  POSSIBILITY OF SUCH DAMAGE.
31280182Sjfv
32280182Sjfv******************************************************************************/
33280182Sjfv/*$FreeBSD: stable/11/sys/dev/ixgbe/ix_txrx.c 341477 2018-12-04 17:40:56Z vmaffione $*/
34280182Sjfv
35280182Sjfv
36280182Sjfv#ifndef IXGBE_STANDALONE_BUILD
37280182Sjfv#include "opt_inet.h"
38280182Sjfv#include "opt_inet6.h"
39280182Sjfv#include "opt_rss.h"
40280182Sjfv#endif
41280182Sjfv
42280182Sjfv#include "ixgbe.h"
43280182Sjfv
44280182Sjfv/*
45320897Serj * HW RSC control:
46320897Serj *  this feature only works with
47320897Serj *  IPv4, and only on 82599 and later.
48320897Serj *  Also this will cause IP forwarding to
49320897Serj *  fail and that can't be controlled by
50320897Serj *  the stack as LRO can. For all these
51320897Serj *  reasons I've deemed it best to leave
52320897Serj *  this off and not bother with a tuneable
53320897Serj *  interface, this would need to be compiled
54320897Serj *  to enable.
55320897Serj */
56280182Sjfvstatic bool ixgbe_rsc_enable = FALSE;
57280182Sjfv
58280182Sjfv/*
59320897Serj * For Flow Director: this is the
60320897Serj * number of TX packets we sample
61320897Serj * for the filter pool, this means
62320897Serj * every 20th packet will be probed.
63320897Serj *
64320897Serj * This feature can be disabled by
65320897Serj * setting this to 0.
66320897Serj */
67280182Sjfvstatic int atr_sample_rate = 20;
68280182Sjfv
69320897Serj/************************************************************************
70280182Sjfv *  Local Function prototypes
71320897Serj ************************************************************************/
72320897Serjstatic void          ixgbe_setup_transmit_ring(struct tx_ring *);
73320897Serjstatic void          ixgbe_free_transmit_buffers(struct tx_ring *);
74320897Serjstatic int           ixgbe_setup_receive_ring(struct rx_ring *);
75320897Serjstatic void          ixgbe_free_receive_buffers(struct rx_ring *);
76320897Serjstatic void          ixgbe_rx_checksum(u32, struct mbuf *, u32);
77320897Serjstatic void          ixgbe_refresh_mbufs(struct rx_ring *, int);
78320897Serjstatic int           ixgbe_xmit(struct tx_ring *, struct mbuf **);
79320897Serjstatic int           ixgbe_tx_ctx_setup(struct tx_ring *,
80320897Serj                                        struct mbuf *, u32 *, u32 *);
81320897Serjstatic int           ixgbe_tso_setup(struct tx_ring *,
82320897Serj                                     struct mbuf *, u32 *, u32 *);
83280182Sjfvstatic __inline void ixgbe_rx_discard(struct rx_ring *, int);
84280182Sjfvstatic __inline void ixgbe_rx_input(struct rx_ring *, struct ifnet *,
85320897Serj                                    struct mbuf *, u32);
86320897Serjstatic int           ixgbe_dma_malloc(struct adapter *, bus_size_t,
87320897Serj                                      struct ixgbe_dma_alloc *, int);
88320897Serjstatic void          ixgbe_dma_free(struct adapter *, struct ixgbe_dma_alloc *);
89280182Sjfv
90320897Serj/************************************************************************
91320897Serj * ixgbe_legacy_start_locked - Transmit entry point
92280182Sjfv *
93320897Serj *   Called by the stack to initiate a transmit.
94320897Serj *   The driver will remain in this routine as long as there are
95320897Serj *   packets to transmit and transmit resources are available.
96320897Serj *   In case resources are not available, the stack is notified
97320897Serj *   and the packet is requeued.
98320897Serj ************************************************************************/
99320897Serjint
100320897Serjixgbe_legacy_start_locked(struct ifnet *ifp, struct tx_ring *txr)
101280182Sjfv{
102280182Sjfv	struct mbuf    *m_head;
103280182Sjfv	struct adapter *adapter = txr->adapter;
104280182Sjfv
105280182Sjfv	IXGBE_TX_LOCK_ASSERT(txr);
106280182Sjfv
107280182Sjfv	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
108320897Serj		return (ENETDOWN);
109280182Sjfv	if (!adapter->link_active)
110320897Serj		return (ENETDOWN);
111280182Sjfv
112280182Sjfv	while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
113280182Sjfv		if (txr->tx_avail <= IXGBE_QUEUE_MIN_FREE)
114280182Sjfv			break;
115280182Sjfv
116280182Sjfv		IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
117280182Sjfv		if (m_head == NULL)
118280182Sjfv			break;
119280182Sjfv
120280182Sjfv		if (ixgbe_xmit(txr, &m_head)) {
121280182Sjfv			if (m_head != NULL)
122280182Sjfv				IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
123280182Sjfv			break;
124280182Sjfv		}
125280182Sjfv		/* Send a copy of the frame to the BPF listener */
126280182Sjfv		ETHER_BPF_MTAP(ifp, m_head);
127280182Sjfv	}
128280182Sjfv
129320897Serj	return IXGBE_SUCCESS;
130320897Serj} /* ixgbe_legacy_start_locked */
131320897Serj
132320897Serj/************************************************************************
133320897Serj * ixgbe_legacy_start
134320897Serj *
135320897Serj *   Called by the stack, this always uses the first tx ring,
136320897Serj *   and should not be used with multiqueue tx enabled.
137320897Serj ************************************************************************/
138280182Sjfvvoid
139320897Serjixgbe_legacy_start(struct ifnet *ifp)
140280182Sjfv{
141280182Sjfv	struct adapter *adapter = ifp->if_softc;
142320897Serj	struct tx_ring *txr = adapter->tx_rings;
143280182Sjfv
144280182Sjfv	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
145280182Sjfv		IXGBE_TX_LOCK(txr);
146320897Serj		ixgbe_legacy_start_locked(ifp, txr);
147280182Sjfv		IXGBE_TX_UNLOCK(txr);
148280182Sjfv	}
149320897Serj} /* ixgbe_legacy_start */
150280182Sjfv
151320897Serj/************************************************************************
152320897Serj * ixgbe_mq_start - Multiqueue Transmit Entry Point
153320897Serj *
154320897Serj *   (if_transmit function)
155320897Serj ************************************************************************/
156280182Sjfvint
157280182Sjfvixgbe_mq_start(struct ifnet *ifp, struct mbuf *m)
158280182Sjfv{
159320897Serj	struct adapter  *adapter = ifp->if_softc;
160320897Serj	struct ix_queue *que;
161320897Serj	struct tx_ring  *txr;
162320897Serj	int             i, err = 0;
163320897Serj	uint32_t        bucket_id;
164280182Sjfv
165280182Sjfv	/*
166280182Sjfv	 * When doing RSS, map it to the same outbound queue
167280182Sjfv	 * as the incoming flow would be mapped to.
168280182Sjfv	 *
169280182Sjfv	 * If everything is setup correctly, it should be the
170280182Sjfv	 * same bucket that the current CPU we're on is.
171280182Sjfv	 */
172280182Sjfv	if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) {
173320897Serj		if ((adapter->feat_en & IXGBE_FEATURE_RSS) &&
174320897Serj		    (rss_hash2bucket(m->m_pkthdr.flowid, M_HASHTYPE_GET(m),
175320897Serj		    &bucket_id) == 0)) {
176280182Sjfv			i = bucket_id % adapter->num_queues;
177292674Ssbruno#ifdef IXGBE_DEBUG
178292674Ssbruno			if (bucket_id > adapter->num_queues)
179320897Serj				if_printf(ifp,
180320897Serj				    "bucket_id (%d) > num_queues (%d)\n",
181320897Serj				    bucket_id, adapter->num_queues);
182280182Sjfv#endif
183320897Serj		} else
184280182Sjfv			i = m->m_pkthdr.flowid % adapter->num_queues;
185280182Sjfv	} else
186280182Sjfv		i = curcpu % adapter->num_queues;
187280182Sjfv
188280182Sjfv	/* Check for a hung queue and pick alternative */
189280182Sjfv	if (((1 << i) & adapter->active_queues) == 0)
190280182Sjfv		i = ffsl(adapter->active_queues);
191280182Sjfv
192280182Sjfv	txr = &adapter->tx_rings[i];
193280182Sjfv	que = &adapter->queues[i];
194280182Sjfv
195280182Sjfv	err = drbr_enqueue(ifp, txr->br, m);
196280182Sjfv	if (err)
197280182Sjfv		return (err);
198280182Sjfv	if (IXGBE_TX_TRYLOCK(txr)) {
199280182Sjfv		ixgbe_mq_start_locked(ifp, txr);
200280182Sjfv		IXGBE_TX_UNLOCK(txr);
201280182Sjfv	} else
202280182Sjfv		taskqueue_enqueue(que->tq, &txr->txq_task);
203280182Sjfv
204280182Sjfv	return (0);
205320897Serj} /* ixgbe_mq_start */
206280182Sjfv
207320897Serj/************************************************************************
208320897Serj * ixgbe_mq_start_locked
209320897Serj ************************************************************************/
210280182Sjfvint
211280182Sjfvixgbe_mq_start_locked(struct ifnet *ifp, struct tx_ring *txr)
212280182Sjfv{
213320897Serj	struct mbuf    *next;
214320897Serj	int            enqueued = 0, err = 0;
215280182Sjfv
216320897Serj	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
217280182Sjfv		return (ENETDOWN);
218320897Serj	if (txr->adapter->link_active == 0)
219320897Serj		return (ENETDOWN);
220280182Sjfv
221280182Sjfv	/* Process the queue */
222280182Sjfv#if __FreeBSD_version < 901504
223280182Sjfv	next = drbr_dequeue(ifp, txr->br);
224280182Sjfv	while (next != NULL) {
225280182Sjfv		if ((err = ixgbe_xmit(txr, &next)) != 0) {
226280182Sjfv			if (next != NULL)
227280182Sjfv				err = drbr_enqueue(ifp, txr->br, next);
228280182Sjfv#else
229280182Sjfv	while ((next = drbr_peek(ifp, txr->br)) != NULL) {
230320897Serj		err = ixgbe_xmit(txr, &next);
231320897Serj		if (err != 0) {
232320897Serj			if (next == NULL)
233280182Sjfv				drbr_advance(ifp, txr->br);
234320897Serj			else
235280182Sjfv				drbr_putback(ifp, txr->br, next);
236280182Sjfv#endif
237280182Sjfv			break;
238280182Sjfv		}
239280182Sjfv#if __FreeBSD_version >= 901504
240280182Sjfv		drbr_advance(ifp, txr->br);
241280182Sjfv#endif
242280182Sjfv		enqueued++;
243280182Sjfv#if __FreeBSD_version >= 1100036
244282289Serj		/*
245282289Serj		 * Since we're looking at the tx ring, we can check
246282289Serj		 * to see if we're a VF by examing our tail register
247282289Serj		 * address.
248282289Serj		 */
249320897Serj		if ((txr->adapter->feat_en & IXGBE_FEATURE_VF) &&
250320897Serj		    (next->m_flags & M_MCAST))
251280182Sjfv			if_inc_counter(ifp, IFCOUNTER_OMCASTS, 1);
252280182Sjfv#endif
253280182Sjfv		/* Send a copy of the frame to the BPF listener */
254280182Sjfv		ETHER_BPF_MTAP(ifp, next);
255280182Sjfv		if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
256280182Sjfv			break;
257280182Sjfv#if __FreeBSD_version < 901504
258280182Sjfv		next = drbr_dequeue(ifp, txr->br);
259280182Sjfv#endif
260280182Sjfv	}
261280182Sjfv
262320897Serj	if (txr->tx_avail < IXGBE_TX_CLEANUP_THRESHOLD(txr->adapter))
263280182Sjfv		ixgbe_txeof(txr);
264280182Sjfv
265280182Sjfv	return (err);
266320897Serj} /* ixgbe_mq_start_locked */
267280182Sjfv
268320897Serj/************************************************************************
269320897Serj * ixgbe_deferred_mq_start
270320897Serj *
271320897Serj *   Called from a taskqueue to drain queued transmit packets.
272320897Serj ************************************************************************/
273280182Sjfvvoid
274280182Sjfvixgbe_deferred_mq_start(void *arg, int pending)
275280182Sjfv{
276280182Sjfv	struct tx_ring *txr = arg;
277280182Sjfv	struct adapter *adapter = txr->adapter;
278320897Serj	struct ifnet   *ifp = adapter->ifp;
279280182Sjfv
280280182Sjfv	IXGBE_TX_LOCK(txr);
281280182Sjfv	if (!drbr_empty(ifp, txr->br))
282280182Sjfv		ixgbe_mq_start_locked(ifp, txr);
283280182Sjfv	IXGBE_TX_UNLOCK(txr);
284320897Serj} /* ixgbe_deferred_mq_start */
285280182Sjfv
286320897Serj/************************************************************************
287320897Serj * ixgbe_qflush - Flush all ring buffers
288320897Serj ************************************************************************/
289280182Sjfvvoid
290280182Sjfvixgbe_qflush(struct ifnet *ifp)
291280182Sjfv{
292320897Serj	struct adapter *adapter = ifp->if_softc;
293320897Serj	struct tx_ring *txr = adapter->tx_rings;
294320897Serj	struct mbuf    *m;
295280182Sjfv
296280182Sjfv	for (int i = 0; i < adapter->num_queues; i++, txr++) {
297280182Sjfv		IXGBE_TX_LOCK(txr);
298280182Sjfv		while ((m = buf_ring_dequeue_sc(txr->br)) != NULL)
299280182Sjfv			m_freem(m);
300280182Sjfv		IXGBE_TX_UNLOCK(txr);
301280182Sjfv	}
302280182Sjfv	if_qflush(ifp);
303320897Serj} /* ixgbe_qflush */
304280182Sjfv
305280182Sjfv
306320897Serj/************************************************************************
307320897Serj * ixgbe_xmit
308280182Sjfv *
309320897Serj *   Maps the mbufs to tx descriptors, allowing the
310320897Serj *   TX engine to transmit the packets.
311280182Sjfv *
312320897Serj *   Return 0 on success, positive on failure
313320897Serj ************************************************************************/
314280182Sjfvstatic int
315280182Sjfvixgbe_xmit(struct tx_ring *txr, struct mbuf **m_headp)
316280182Sjfv{
317320897Serj	struct adapter          *adapter = txr->adapter;
318320897Serj	struct ixgbe_tx_buf     *txbuf;
319280182Sjfv	union ixgbe_adv_tx_desc *txd = NULL;
320320897Serj	struct mbuf             *m_head;
321320897Serj	int                     i, j, error, nsegs;
322320897Serj	int                     first;
323320897Serj	u32                     olinfo_status = 0, cmd_type_len;
324320897Serj	bool                    remap = TRUE;
325320897Serj	bus_dma_segment_t       segs[adapter->num_segs];
326320897Serj	bus_dmamap_t            map;
327280182Sjfv
328280182Sjfv	m_head = *m_headp;
329280182Sjfv
330280182Sjfv	/* Basic descriptor defines */
331320897Serj	cmd_type_len = (IXGBE_ADVTXD_DTYP_DATA |
332280182Sjfv	    IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT);
333280182Sjfv
334280182Sjfv	if (m_head->m_flags & M_VLANTAG)
335320897Serj		cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
336280182Sjfv
337320897Serj	/*
338320897Serj	 * Important to capture the first descriptor
339320897Serj	 * used because it will contain the index of
340320897Serj	 * the one we tell the hardware to report back
341320897Serj	 */
342320897Serj	first = txr->next_avail_desc;
343280182Sjfv	txbuf = &txr->tx_buffers[first];
344280182Sjfv	map = txbuf->map;
345280182Sjfv
346280182Sjfv	/*
347280182Sjfv	 * Map the packet for DMA.
348280182Sjfv	 */
349280182Sjfvretry:
350320897Serj	error = bus_dmamap_load_mbuf_sg(txr->txtag, map, *m_headp, segs,
351320897Serj	    &nsegs, BUS_DMA_NOWAIT);
352280182Sjfv
353280182Sjfv	if (__predict_false(error)) {
354280182Sjfv		struct mbuf *m;
355280182Sjfv
356280182Sjfv		switch (error) {
357280182Sjfv		case EFBIG:
358280182Sjfv			/* Try it again? - one try */
359280182Sjfv			if (remap == TRUE) {
360280182Sjfv				remap = FALSE;
361282289Serj				/*
362282289Serj				 * XXX: m_defrag will choke on
363282289Serj				 * non-MCLBYTES-sized clusters
364282289Serj				 */
365280182Sjfv				m = m_defrag(*m_headp, M_NOWAIT);
366280182Sjfv				if (m == NULL) {
367280182Sjfv					adapter->mbuf_defrag_failed++;
368280182Sjfv					m_freem(*m_headp);
369280182Sjfv					*m_headp = NULL;
370280182Sjfv					return (ENOBUFS);
371280182Sjfv				}
372280182Sjfv				*m_headp = m;
373280182Sjfv				goto retry;
374280182Sjfv			} else
375280182Sjfv				return (error);
376280182Sjfv		case ENOMEM:
377280182Sjfv			txr->no_tx_dma_setup++;
378280182Sjfv			return (error);
379280182Sjfv		default:
380280182Sjfv			txr->no_tx_dma_setup++;
381280182Sjfv			m_freem(*m_headp);
382280182Sjfv			*m_headp = NULL;
383280182Sjfv			return (error);
384280182Sjfv		}
385280182Sjfv	}
386280182Sjfv
387280182Sjfv	/* Make certain there are enough descriptors */
388298224Ssbruno	if (txr->tx_avail < (nsegs + 2)) {
389280182Sjfv		txr->no_desc_avail++;
390280182Sjfv		bus_dmamap_unload(txr->txtag, map);
391280182Sjfv		return (ENOBUFS);
392280182Sjfv	}
393280182Sjfv	m_head = *m_headp;
394280182Sjfv
395280182Sjfv	/*
396282289Serj	 * Set up the appropriate offload context
397282289Serj	 * this will consume the first descriptor
398282289Serj	 */
399280182Sjfv	error = ixgbe_tx_ctx_setup(txr, m_head, &cmd_type_len, &olinfo_status);
400280182Sjfv	if (__predict_false(error)) {
401280182Sjfv		if (error == ENOBUFS)
402280182Sjfv			*m_headp = NULL;
403280182Sjfv		return (error);
404280182Sjfv	}
405280182Sjfv
406280182Sjfv	/* Do the flow director magic */
407320897Serj	if ((adapter->feat_en & IXGBE_FEATURE_FDIR) &&
408320897Serj	    (txr->atr_sample) && (!adapter->fdir_reinit)) {
409280182Sjfv		++txr->atr_count;
410280182Sjfv		if (txr->atr_count >= atr_sample_rate) {
411280182Sjfv			ixgbe_atr(txr, m_head);
412280182Sjfv			txr->atr_count = 0;
413280182Sjfv		}
414280182Sjfv	}
415280182Sjfv
416292674Ssbruno	olinfo_status |= IXGBE_ADVTXD_CC;
417280182Sjfv	i = txr->next_avail_desc;
418280182Sjfv	for (j = 0; j < nsegs; j++) {
419280182Sjfv		bus_size_t seglen;
420280182Sjfv		bus_addr_t segaddr;
421280182Sjfv
422280182Sjfv		txbuf = &txr->tx_buffers[i];
423280182Sjfv		txd = &txr->tx_base[i];
424280182Sjfv		seglen = segs[j].ds_len;
425280182Sjfv		segaddr = htole64(segs[j].ds_addr);
426280182Sjfv
427280182Sjfv		txd->read.buffer_addr = segaddr;
428280182Sjfv		txd->read.cmd_type_len = htole32(txr->txd_cmd |
429320897Serj		    cmd_type_len | seglen);
430280182Sjfv		txd->read.olinfo_status = htole32(olinfo_status);
431280182Sjfv
432280182Sjfv		if (++i == txr->num_desc)
433280182Sjfv			i = 0;
434280182Sjfv	}
435280182Sjfv
436320897Serj	txd->read.cmd_type_len |= htole32(IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS);
437280182Sjfv	txr->tx_avail -= nsegs;
438280182Sjfv	txr->next_avail_desc = i;
439280182Sjfv
440280182Sjfv	txbuf->m_head = m_head;
441280182Sjfv	/*
442282289Serj	 * Here we swap the map so the last descriptor,
443282289Serj	 * which gets the completion interrupt has the
444282289Serj	 * real map, and the first descriptor gets the
445282289Serj	 * unused map from this descriptor.
446282289Serj	 */
447280182Sjfv	txr->tx_buffers[first].map = txbuf->map;
448280182Sjfv	txbuf->map = map;
449280182Sjfv	bus_dmamap_sync(txr->txtag, map, BUS_DMASYNC_PREWRITE);
450280182Sjfv
451320897Serj	/* Set the EOP descriptor that will be marked done */
452320897Serj	txbuf = &txr->tx_buffers[first];
453280182Sjfv	txbuf->eop = txd;
454280182Sjfv
455320897Serj	bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
456320897Serj	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
457280182Sjfv	/*
458280182Sjfv	 * Advance the Transmit Descriptor Tail (Tdt), this tells the
459280182Sjfv	 * hardware that this frame is available to transmit.
460280182Sjfv	 */
461280182Sjfv	++txr->total_packets;
462280182Sjfv	IXGBE_WRITE_REG(&adapter->hw, txr->tail, i);
463280182Sjfv
464280182Sjfv	/* Mark queue as having work */
465280182Sjfv	if (txr->busy == 0)
466280182Sjfv		txr->busy = 1;
467280182Sjfv
468280182Sjfv	return (0);
469320897Serj} /* ixgbe_xmit */
470280182Sjfv
471280182Sjfv
472320897Serj/************************************************************************
473320897Serj * ixgbe_allocate_transmit_buffers
474280182Sjfv *
475320897Serj *   Allocate memory for tx_buffer structures. The tx_buffer stores all
476320897Serj *   the information needed to transmit a packet on the wire. This is
477320897Serj *   called only once at attach, setup is done every reset.
478320897Serj ************************************************************************/
479320897Serjstatic int
480280182Sjfvixgbe_allocate_transmit_buffers(struct tx_ring *txr)
481280182Sjfv{
482320897Serj	struct adapter      *adapter = txr->adapter;
483320897Serj	device_t            dev = adapter->dev;
484280182Sjfv	struct ixgbe_tx_buf *txbuf;
485320897Serj	int                 error, i;
486280182Sjfv
487280182Sjfv	/*
488280182Sjfv	 * Setup DMA descriptor areas.
489280182Sjfv	 */
490320897Serj	error = bus_dma_tag_create(
491320897Serj	         /*      parent */ bus_get_dma_tag(adapter->dev),
492320897Serj	         /*   alignment */ 1,
493320897Serj	         /*      bounds */ 0,
494320897Serj	         /*     lowaddr */ BUS_SPACE_MAXADDR,
495320897Serj	         /*    highaddr */ BUS_SPACE_MAXADDR,
496320897Serj	         /*      filter */ NULL,
497320897Serj	         /*   filterarg */ NULL,
498320897Serj	         /*     maxsize */ IXGBE_TSO_SIZE,
499320897Serj	         /*   nsegments */ adapter->num_segs,
500320897Serj	         /*  maxsegsize */ PAGE_SIZE,
501320897Serj	         /*       flags */ 0,
502320897Serj	         /*    lockfunc */ NULL,
503320897Serj	         /* lockfuncarg */ NULL,
504320897Serj	                           &txr->txtag);
505320897Serj	if (error != 0) {
506320897Serj		device_printf(dev, "Unable to allocate TX DMA tag\n");
507280182Sjfv		goto fail;
508280182Sjfv	}
509280182Sjfv
510320897Serj	txr->tx_buffers =
511320897Serj	    (struct ixgbe_tx_buf *)malloc(sizeof(struct ixgbe_tx_buf) *
512320897Serj	    adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO);
513320897Serj	if (txr->tx_buffers == NULL) {
514280182Sjfv		device_printf(dev, "Unable to allocate tx_buffer memory\n");
515280182Sjfv		error = ENOMEM;
516280182Sjfv		goto fail;
517280182Sjfv	}
518280182Sjfv
519320897Serj	/* Create the descriptor buffer dma maps */
520280182Sjfv	txbuf = txr->tx_buffers;
521280182Sjfv	for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) {
522280182Sjfv		error = bus_dmamap_create(txr->txtag, 0, &txbuf->map);
523280182Sjfv		if (error != 0) {
524280182Sjfv			device_printf(dev, "Unable to create TX DMA map\n");
525280182Sjfv			goto fail;
526280182Sjfv		}
527280182Sjfv	}
528280182Sjfv
529280182Sjfv	return 0;
530280182Sjfvfail:
531280182Sjfv	/* We free all, it handles case where we are in the middle */
532280182Sjfv	ixgbe_free_transmit_structures(adapter);
533320897Serj
534280182Sjfv	return (error);
535320897Serj} /* ixgbe_allocate_transmit_buffers */
536280182Sjfv
537320897Serj/************************************************************************
538320897Serj * ixgbe_setup_transmit_ring - Initialize a transmit ring.
539320897Serj ************************************************************************/
540280182Sjfvstatic void
541280182Sjfvixgbe_setup_transmit_ring(struct tx_ring *txr)
542280182Sjfv{
543320897Serj	struct adapter        *adapter = txr->adapter;
544320897Serj	struct ixgbe_tx_buf   *txbuf;
545280182Sjfv#ifdef DEV_NETMAP
546280182Sjfv	struct netmap_adapter *na = NA(adapter->ifp);
547320897Serj	struct netmap_slot    *slot;
548280182Sjfv#endif /* DEV_NETMAP */
549280182Sjfv
550280182Sjfv	/* Clear the old ring contents */
551280182Sjfv	IXGBE_TX_LOCK(txr);
552320897Serj
553280182Sjfv#ifdef DEV_NETMAP
554320897Serj	if (adapter->feat_en & IXGBE_FEATURE_NETMAP) {
555320897Serj		/*
556320897Serj		 * (under lock): if in netmap mode, do some consistency
557320897Serj		 * checks and set slot to entry 0 of the netmap ring.
558320897Serj		 */
559320897Serj		slot = netmap_reset(na, NR_TX, txr->me, 0);
560320897Serj	}
561280182Sjfv#endif /* DEV_NETMAP */
562320897Serj
563280182Sjfv	bzero((void *)txr->tx_base,
564320897Serj	    (sizeof(union ixgbe_adv_tx_desc)) * adapter->num_tx_desc);
565280182Sjfv	/* Reset indices */
566280182Sjfv	txr->next_avail_desc = 0;
567280182Sjfv	txr->next_to_clean = 0;
568280182Sjfv
569280182Sjfv	/* Free any existing tx buffers. */
570320897Serj	txbuf = txr->tx_buffers;
571283883Sjfv	for (int i = 0; i < txr->num_desc; i++, txbuf++) {
572280182Sjfv		if (txbuf->m_head != NULL) {
573280182Sjfv			bus_dmamap_sync(txr->txtag, txbuf->map,
574280182Sjfv			    BUS_DMASYNC_POSTWRITE);
575280182Sjfv			bus_dmamap_unload(txr->txtag, txbuf->map);
576280182Sjfv			m_freem(txbuf->m_head);
577280182Sjfv			txbuf->m_head = NULL;
578280182Sjfv		}
579320897Serj
580280182Sjfv#ifdef DEV_NETMAP
581280182Sjfv		/*
582280182Sjfv		 * In netmap mode, set the map for the packet buffer.
583280182Sjfv		 * NOTE: Some drivers (not this one) also need to set
584280182Sjfv		 * the physical buffer address in the NIC ring.
585280182Sjfv		 * Slots in the netmap ring (indexed by "si") are
586280182Sjfv		 * kring->nkr_hwofs positions "ahead" wrt the
587280182Sjfv		 * corresponding slot in the NIC ring. In some drivers
588280182Sjfv		 * (not here) nkr_hwofs can be negative. Function
589280182Sjfv		 * netmap_idx_n2k() handles wraparounds properly.
590280182Sjfv		 */
591320897Serj		if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) && slot) {
592341477Svmaffione			int si = netmap_idx_n2k(na->tx_rings[txr->me], i);
593283883Sjfv			netmap_load_map(na, txr->txtag,
594283883Sjfv			    txbuf->map, NMB(na, slot + si));
595280182Sjfv		}
596280182Sjfv#endif /* DEV_NETMAP */
597320897Serj
598280182Sjfv		/* Clear the EOP descriptor pointer */
599280182Sjfv		txbuf->eop = NULL;
600320897Serj	}
601280182Sjfv
602280182Sjfv	/* Set the rate at which we sample packets */
603320897Serj	if (adapter->feat_en & IXGBE_FEATURE_FDIR)
604280182Sjfv		txr->atr_sample = atr_sample_rate;
605280182Sjfv
606280182Sjfv	/* Set number of descriptors available */
607280182Sjfv	txr->tx_avail = adapter->num_tx_desc;
608280182Sjfv
609280182Sjfv	bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
610280182Sjfv	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
611280182Sjfv	IXGBE_TX_UNLOCK(txr);
612320897Serj} /* ixgbe_setup_transmit_ring */
613280182Sjfv
614320897Serj/************************************************************************
615320897Serj * ixgbe_setup_transmit_structures - Initialize all transmit rings.
616320897Serj ************************************************************************/
617280182Sjfvint
618280182Sjfvixgbe_setup_transmit_structures(struct adapter *adapter)
619280182Sjfv{
620280182Sjfv	struct tx_ring *txr = adapter->tx_rings;
621280182Sjfv
622280182Sjfv	for (int i = 0; i < adapter->num_queues; i++, txr++)
623280182Sjfv		ixgbe_setup_transmit_ring(txr);
624280182Sjfv
625280182Sjfv	return (0);
626320897Serj} /* ixgbe_setup_transmit_structures */
627280182Sjfv
628320897Serj/************************************************************************
629320897Serj * ixgbe_free_transmit_structures - Free all transmit rings.
630320897Serj ************************************************************************/
631280182Sjfvvoid
632280182Sjfvixgbe_free_transmit_structures(struct adapter *adapter)
633280182Sjfv{
634280182Sjfv	struct tx_ring *txr = adapter->tx_rings;
635280182Sjfv
636280182Sjfv	for (int i = 0; i < adapter->num_queues; i++, txr++) {
637280182Sjfv		IXGBE_TX_LOCK(txr);
638280182Sjfv		ixgbe_free_transmit_buffers(txr);
639280182Sjfv		ixgbe_dma_free(adapter, &txr->txdma);
640280182Sjfv		IXGBE_TX_UNLOCK(txr);
641280182Sjfv		IXGBE_TX_LOCK_DESTROY(txr);
642280182Sjfv	}
643280182Sjfv	free(adapter->tx_rings, M_DEVBUF);
644320897Serj} /* ixgbe_free_transmit_structures */
645280182Sjfv
646320897Serj/************************************************************************
647320897Serj * ixgbe_free_transmit_buffers
648280182Sjfv *
649320897Serj *   Free transmit ring related data structures.
650320897Serj ************************************************************************/
651280182Sjfvstatic void
652280182Sjfvixgbe_free_transmit_buffers(struct tx_ring *txr)
653280182Sjfv{
654320897Serj	struct adapter      *adapter = txr->adapter;
655280182Sjfv	struct ixgbe_tx_buf *tx_buffer;
656320897Serj	int                 i;
657280182Sjfv
658280182Sjfv	INIT_DEBUGOUT("ixgbe_free_transmit_ring: begin");
659280182Sjfv
660280182Sjfv	if (txr->tx_buffers == NULL)
661280182Sjfv		return;
662280182Sjfv
663280182Sjfv	tx_buffer = txr->tx_buffers;
664280182Sjfv	for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
665280182Sjfv		if (tx_buffer->m_head != NULL) {
666280182Sjfv			bus_dmamap_sync(txr->txtag, tx_buffer->map,
667280182Sjfv			    BUS_DMASYNC_POSTWRITE);
668320897Serj			bus_dmamap_unload(txr->txtag, tx_buffer->map);
669280182Sjfv			m_freem(tx_buffer->m_head);
670280182Sjfv			tx_buffer->m_head = NULL;
671280182Sjfv			if (tx_buffer->map != NULL) {
672320897Serj				bus_dmamap_destroy(txr->txtag, tx_buffer->map);
673280182Sjfv				tx_buffer->map = NULL;
674280182Sjfv			}
675280182Sjfv		} else if (tx_buffer->map != NULL) {
676320897Serj			bus_dmamap_unload(txr->txtag, tx_buffer->map);
677320897Serj			bus_dmamap_destroy(txr->txtag, tx_buffer->map);
678280182Sjfv			tx_buffer->map = NULL;
679280182Sjfv		}
680280182Sjfv	}
681280182Sjfv	if (txr->br != NULL)
682280182Sjfv		buf_ring_free(txr->br, M_DEVBUF);
683280182Sjfv	if (txr->tx_buffers != NULL) {
684280182Sjfv		free(txr->tx_buffers, M_DEVBUF);
685280182Sjfv		txr->tx_buffers = NULL;
686280182Sjfv	}
687280182Sjfv	if (txr->txtag != NULL) {
688280182Sjfv		bus_dma_tag_destroy(txr->txtag);
689280182Sjfv		txr->txtag = NULL;
690280182Sjfv	}
691320897Serj} /* ixgbe_free_transmit_buffers */
692280182Sjfv
693320897Serj/************************************************************************
694320897Serj * ixgbe_tx_ctx_setup
695280182Sjfv *
696320897Serj *   Advanced Context Descriptor setup for VLAN, CSUM or TSO
697320897Serj ************************************************************************/
698280182Sjfvstatic int
699280182Sjfvixgbe_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp,
700280182Sjfv    u32 *cmd_type_len, u32 *olinfo_status)
701280182Sjfv{
702280182Sjfv	struct ixgbe_adv_tx_context_desc *TXD;
703320897Serj	struct ether_vlan_header         *eh;
704292674Ssbruno#ifdef INET
705320897Serj	struct ip                        *ip;
706292674Ssbruno#endif
707292674Ssbruno#ifdef INET6
708320897Serj	struct ip6_hdr                   *ip6;
709292674Ssbruno#endif
710320897Serj	int                              ehdrlen, ip_hlen = 0;
711320897Serj	int                              offload = TRUE;
712320897Serj	int                              ctxd = txr->next_avail_desc;
713320897Serj	u32                              vlan_macip_lens = 0;
714320897Serj	u32                              type_tucmd_mlhl = 0;
715320897Serj	u16                              vtag = 0;
716320897Serj	u16                              etype;
717320897Serj	u8                               ipproto = 0;
718320897Serj	caddr_t                          l3d;
719280182Sjfv
720292674Ssbruno
721280182Sjfv	/* First check if TSO is to be used */
722320897Serj	if (mp->m_pkthdr.csum_flags & (CSUM_IP_TSO | CSUM_IP6_TSO))
723280182Sjfv		return (ixgbe_tso_setup(txr, mp, cmd_type_len, olinfo_status));
724280182Sjfv
725280182Sjfv	if ((mp->m_pkthdr.csum_flags & CSUM_OFFLOAD) == 0)
726280182Sjfv		offload = FALSE;
727280182Sjfv
728280182Sjfv	/* Indicate the whole packet as payload when not doing TSO */
729320897Serj	*olinfo_status |= mp->m_pkthdr.len << IXGBE_ADVTXD_PAYLEN_SHIFT;
730280182Sjfv
731280182Sjfv	/* Now ready a context descriptor */
732320897Serj	TXD = (struct ixgbe_adv_tx_context_desc *)&txr->tx_base[ctxd];
733280182Sjfv
734280182Sjfv	/*
735320897Serj	 * In advanced descriptors the vlan tag must
736320897Serj	 * be placed into the context descriptor. Hence
737320897Serj	 * we need to make one even if not doing offloads.
738320897Serj	 */
739280182Sjfv	if (mp->m_flags & M_VLANTAG) {
740280182Sjfv		vtag = htole16(mp->m_pkthdr.ether_vtag);
741280182Sjfv		vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
742320897Serj	} else if (!(txr->adapter->feat_en & IXGBE_FEATURE_NEEDS_CTXD) &&
743320897Serj	           (offload == FALSE))
744282289Serj		return (0);
745280182Sjfv
746280182Sjfv	/*
747280182Sjfv	 * Determine where frame payload starts.
748280182Sjfv	 * Jump over vlan headers if already present,
749280182Sjfv	 * helpful for QinQ too.
750280182Sjfv	 */
751280182Sjfv	eh = mtod(mp, struct ether_vlan_header *);
752280182Sjfv	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
753280182Sjfv		etype = ntohs(eh->evl_proto);
754280182Sjfv		ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
755280182Sjfv	} else {
756280182Sjfv		etype = ntohs(eh->evl_encap_proto);
757280182Sjfv		ehdrlen = ETHER_HDR_LEN;
758280182Sjfv	}
759280182Sjfv
760280182Sjfv	/* Set the ether header length */
761280182Sjfv	vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
762280182Sjfv
763280182Sjfv	if (offload == FALSE)
764280182Sjfv		goto no_offloads;
765280182Sjfv
766292674Ssbruno	/*
767320897Serj	 * If the first mbuf only includes the ethernet header,
768320897Serj	 * jump to the next one
769320897Serj	 * XXX: This assumes the stack splits mbufs containing headers
770320897Serj	 *      on header boundaries
771292674Ssbruno	 * XXX: And assumes the entire IP header is contained in one mbuf
772292674Ssbruno	 */
773292674Ssbruno	if (mp->m_len == ehdrlen && mp->m_next)
774292674Ssbruno		l3d = mtod(mp->m_next, caddr_t);
775292674Ssbruno	else
776292674Ssbruno		l3d = mtod(mp, caddr_t) + ehdrlen;
777292674Ssbruno
778280182Sjfv	switch (etype) {
779292751Sbz#ifdef INET
780280182Sjfv		case ETHERTYPE_IP:
781292674Ssbruno			ip = (struct ip *)(l3d);
782280182Sjfv			ip_hlen = ip->ip_hl << 2;
783280182Sjfv			ipproto = ip->ip_p;
784280182Sjfv			type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
785292674Ssbruno			/* Insert IPv4 checksum into data descriptors */
786292674Ssbruno			if (mp->m_pkthdr.csum_flags & CSUM_IP) {
787292674Ssbruno				ip->ip_sum = 0;
788292674Ssbruno				*olinfo_status |= IXGBE_TXD_POPTS_IXSM << 8;
789292674Ssbruno			}
790280182Sjfv			break;
791292751Sbz#endif
792292697Ssbruno#ifdef INET6
793280182Sjfv		case ETHERTYPE_IPV6:
794292674Ssbruno			ip6 = (struct ip6_hdr *)(l3d);
795280182Sjfv			ip_hlen = sizeof(struct ip6_hdr);
796280182Sjfv			ipproto = ip6->ip6_nxt;
797280182Sjfv			type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6;
798280182Sjfv			break;
799292697Ssbruno#endif
800280182Sjfv		default:
801280182Sjfv			offload = FALSE;
802280182Sjfv			break;
803280182Sjfv	}
804280182Sjfv
805280182Sjfv	vlan_macip_lens |= ip_hlen;
806280182Sjfv
807292674Ssbruno	/* No support for offloads for non-L4 next headers */
808280182Sjfv	switch (ipproto) {
809280182Sjfv		case IPPROTO_TCP:
810320897Serj			if (mp->m_pkthdr.csum_flags &
811320897Serj			    (CSUM_IP_TCP | CSUM_IP6_TCP))
812280182Sjfv				type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
813292674Ssbruno			else
814292674Ssbruno				offload = false;
815280182Sjfv			break;
816280182Sjfv		case IPPROTO_UDP:
817320897Serj			if (mp->m_pkthdr.csum_flags &
818320897Serj			    (CSUM_IP_UDP | CSUM_IP6_UDP))
819280182Sjfv				type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP;
820292674Ssbruno			else
821292674Ssbruno				offload = false;
822280182Sjfv			break;
823280182Sjfv		case IPPROTO_SCTP:
824320897Serj			if (mp->m_pkthdr.csum_flags &
825320897Serj			    (CSUM_IP_SCTP | CSUM_IP6_SCTP))
826280182Sjfv				type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
827292674Ssbruno			else
828292674Ssbruno				offload = false;
829280182Sjfv			break;
830280182Sjfv		default:
831292674Ssbruno			offload = false;
832280182Sjfv			break;
833280182Sjfv	}
834280182Sjfv
835292674Ssbruno	if (offload) /* Insert L4 checksum into data descriptors */
836280182Sjfv		*olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
837280182Sjfv
838280182Sjfvno_offloads:
839280182Sjfv	type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
840280182Sjfv
841280182Sjfv	/* Now copy bits into descriptor */
842280182Sjfv	TXD->vlan_macip_lens = htole32(vlan_macip_lens);
843280182Sjfv	TXD->type_tucmd_mlhl = htole32(type_tucmd_mlhl);
844280182Sjfv	TXD->seqnum_seed = htole32(0);
845280182Sjfv	TXD->mss_l4len_idx = htole32(0);
846280182Sjfv
847280182Sjfv	/* We've consumed the first desc, adjust counters */
848280182Sjfv	if (++ctxd == txr->num_desc)
849280182Sjfv		ctxd = 0;
850280182Sjfv	txr->next_avail_desc = ctxd;
851280182Sjfv	--txr->tx_avail;
852280182Sjfv
853320897Serj	return (0);
854320897Serj} /* ixgbe_tx_ctx_setup */
855280182Sjfv
856320897Serj/************************************************************************
857320897Serj * ixgbe_tso_setup
858280182Sjfv *
859320897Serj *   Setup work for hardware segmentation offload (TSO) on
860320897Serj *   adapters using advanced tx descriptors
861320897Serj ************************************************************************/
862280182Sjfvstatic int
863320897Serjixgbe_tso_setup(struct tx_ring *txr, struct mbuf *mp, u32 *cmd_type_len,
864320897Serj    u32 *olinfo_status)
865280182Sjfv{
866280182Sjfv	struct ixgbe_adv_tx_context_desc *TXD;
867320897Serj	struct ether_vlan_header         *eh;
868280182Sjfv#ifdef INET6
869320897Serj	struct ip6_hdr                   *ip6;
870280182Sjfv#endif
871280182Sjfv#ifdef INET
872320897Serj	struct ip                        *ip;
873280182Sjfv#endif
874320897Serj	struct tcphdr                    *th;
875320897Serj	int                              ctxd, ehdrlen, ip_hlen, tcp_hlen;
876320897Serj	u32                              vlan_macip_lens = 0;
877320897Serj	u32                              type_tucmd_mlhl = 0;
878320897Serj	u32                              mss_l4len_idx = 0, paylen;
879320897Serj	u16                              vtag = 0, eh_type;
880280182Sjfv
881280182Sjfv	/*
882280182Sjfv	 * Determine where frame payload starts.
883280182Sjfv	 * Jump over vlan headers if already present
884280182Sjfv	 */
885280182Sjfv	eh = mtod(mp, struct ether_vlan_header *);
886280182Sjfv	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
887280182Sjfv		ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
888280182Sjfv		eh_type = eh->evl_proto;
889280182Sjfv	} else {
890280182Sjfv		ehdrlen = ETHER_HDR_LEN;
891280182Sjfv		eh_type = eh->evl_encap_proto;
892280182Sjfv	}
893280182Sjfv
894280182Sjfv	switch (ntohs(eh_type)) {
895280182Sjfv#ifdef INET
896280182Sjfv	case ETHERTYPE_IP:
897280182Sjfv		ip = (struct ip *)(mp->m_data + ehdrlen);
898280182Sjfv		if (ip->ip_p != IPPROTO_TCP)
899280182Sjfv			return (ENXIO);
900280182Sjfv		ip->ip_sum = 0;
901280182Sjfv		ip_hlen = ip->ip_hl << 2;
902280182Sjfv		th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
903280182Sjfv		th->th_sum = in_pseudo(ip->ip_src.s_addr,
904280182Sjfv		    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
905280182Sjfv		type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
906280182Sjfv		/* Tell transmit desc to also do IPv4 checksum. */
907280182Sjfv		*olinfo_status |= IXGBE_TXD_POPTS_IXSM << 8;
908280182Sjfv		break;
909280182Sjfv#endif
910320897Serj#ifdef INET6
911320897Serj	case ETHERTYPE_IPV6:
912320897Serj		ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
913320897Serj		/* XXX-BZ For now we do not pretend to support ext. hdrs. */
914320897Serj		if (ip6->ip6_nxt != IPPROTO_TCP)
915320897Serj			return (ENXIO);
916320897Serj		ip_hlen = sizeof(struct ip6_hdr);
917320897Serj		th = (struct tcphdr *)((caddr_t)ip6 + ip_hlen);
918320897Serj		th->th_sum = in6_cksum_pseudo(ip6, 0, IPPROTO_TCP, 0);
919320897Serj		type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6;
920320897Serj		break;
921320897Serj#endif
922280182Sjfv	default:
923280182Sjfv		panic("%s: CSUM_TSO but no supported IP version (0x%04x)",
924280182Sjfv		    __func__, ntohs(eh_type));
925280182Sjfv		break;
926280182Sjfv	}
927280182Sjfv
928280182Sjfv	ctxd = txr->next_avail_desc;
929320897Serj	TXD = (struct ixgbe_adv_tx_context_desc *)&txr->tx_base[ctxd];
930280182Sjfv
931280182Sjfv	tcp_hlen = th->th_off << 2;
932280182Sjfv
933280182Sjfv	/* This is used in the transmit desc in encap */
934280182Sjfv	paylen = mp->m_pkthdr.len - ehdrlen - ip_hlen - tcp_hlen;
935280182Sjfv
936280182Sjfv	/* VLAN MACLEN IPLEN */
937280182Sjfv	if (mp->m_flags & M_VLANTAG) {
938280182Sjfv		vtag = htole16(mp->m_pkthdr.ether_vtag);
939320897Serj		vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
940280182Sjfv	}
941280182Sjfv
942280182Sjfv	vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
943280182Sjfv	vlan_macip_lens |= ip_hlen;
944280182Sjfv	TXD->vlan_macip_lens = htole32(vlan_macip_lens);
945280182Sjfv
946280182Sjfv	/* ADV DTYPE TUCMD */
947280182Sjfv	type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
948280182Sjfv	type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
949280182Sjfv	TXD->type_tucmd_mlhl = htole32(type_tucmd_mlhl);
950280182Sjfv
951280182Sjfv	/* MSS L4LEN IDX */
952280182Sjfv	mss_l4len_idx |= (mp->m_pkthdr.tso_segsz << IXGBE_ADVTXD_MSS_SHIFT);
953280182Sjfv	mss_l4len_idx |= (tcp_hlen << IXGBE_ADVTXD_L4LEN_SHIFT);
954280182Sjfv	TXD->mss_l4len_idx = htole32(mss_l4len_idx);
955280182Sjfv
956280182Sjfv	TXD->seqnum_seed = htole32(0);
957280182Sjfv
958280182Sjfv	if (++ctxd == txr->num_desc)
959280182Sjfv		ctxd = 0;
960280182Sjfv
961280182Sjfv	txr->tx_avail--;
962280182Sjfv	txr->next_avail_desc = ctxd;
963280182Sjfv	*cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
964280182Sjfv	*olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
965280182Sjfv	*olinfo_status |= paylen << IXGBE_ADVTXD_PAYLEN_SHIFT;
966280182Sjfv	++txr->tso_tx;
967320897Serj
968280182Sjfv	return (0);
969320897Serj} /* ixgbe_tso_setup */
970280182Sjfv
971280182Sjfv
972320897Serj/************************************************************************
973320897Serj * ixgbe_txeof
974280182Sjfv *
975320897Serj *   Examine each tx_buffer in the used queue. If the hardware is done
976320897Serj *   processing the packet then free associated resources. The
977320897Serj *   tx_buffer is put back on the free queue.
978320897Serj ************************************************************************/
979280182Sjfvvoid
980280182Sjfvixgbe_txeof(struct tx_ring *txr)
981280182Sjfv{
982320897Serj	struct adapter          *adapter = txr->adapter;
983320897Serj	struct ixgbe_tx_buf     *buf;
984280182Sjfv	union ixgbe_adv_tx_desc *txd;
985320897Serj	u32                     work, processed = 0;
986320897Serj	u32                     limit = adapter->tx_process_limit;
987280182Sjfv
988280182Sjfv	mtx_assert(&txr->tx_mtx, MA_OWNED);
989280182Sjfv
990280182Sjfv#ifdef DEV_NETMAP
991320897Serj	if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
992320897Serj	    (adapter->ifp->if_capenable & IFCAP_NETMAP)) {
993320897Serj		struct netmap_adapter *na = NA(adapter->ifp);
994341477Svmaffione		struct netmap_kring *kring = na->tx_rings[txr->me];
995280182Sjfv		txd = txr->tx_base;
996280182Sjfv		bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
997280182Sjfv		    BUS_DMASYNC_POSTREAD);
998280182Sjfv		/*
999280182Sjfv		 * In netmap mode, all the work is done in the context
1000280182Sjfv		 * of the client thread. Interrupt handlers only wake up
1001280182Sjfv		 * clients, which may be sleeping on individual rings
1002280182Sjfv		 * or on a global resource for all rings.
1003280182Sjfv		 * To implement tx interrupt mitigation, we wake up the client
1004280182Sjfv		 * thread roughly every half ring, even if the NIC interrupts
1005280182Sjfv		 * more frequently. This is implemented as follows:
1006280182Sjfv		 * - ixgbe_txsync() sets kring->nr_kflags with the index of
1007280182Sjfv		 *   the slot that should wake up the thread (nkr_num_slots
1008280182Sjfv		 *   means the user thread should not be woken up);
1009280182Sjfv		 * - the driver ignores tx interrupts unless netmap_mitigate=0
1010280182Sjfv		 *   or the slot has the DD bit set.
1011280182Sjfv		 */
1012280182Sjfv		if (!netmap_mitigate ||
1013280182Sjfv		    (kring->nr_kflags < kring->nkr_num_slots &&
1014320897Serj		     txd[kring->nr_kflags].wb.status & IXGBE_TXD_STAT_DD)) {
1015320897Serj			netmap_tx_irq(adapter->ifp, txr->me);
1016280182Sjfv		}
1017280182Sjfv		return;
1018280182Sjfv	}
1019280182Sjfv#endif /* DEV_NETMAP */
1020280182Sjfv
1021280182Sjfv	if (txr->tx_avail == txr->num_desc) {
1022280182Sjfv		txr->busy = 0;
1023280182Sjfv		return;
1024280182Sjfv	}
1025280182Sjfv
1026280182Sjfv	/* Get work starting point */
1027280182Sjfv	work = txr->next_to_clean;
1028280182Sjfv	buf = &txr->tx_buffers[work];
1029280182Sjfv	txd = &txr->tx_base[work];
1030280182Sjfv	work -= txr->num_desc; /* The distance to ring end */
1031320897Serj	bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
1032320897Serj	    BUS_DMASYNC_POSTREAD);
1033280182Sjfv
1034280182Sjfv	do {
1035292674Ssbruno		union ixgbe_adv_tx_desc *eop = buf->eop;
1036280182Sjfv		if (eop == NULL) /* No work */
1037280182Sjfv			break;
1038280182Sjfv
1039280182Sjfv		if ((eop->wb.status & IXGBE_TXD_STAT_DD) == 0)
1040280182Sjfv			break;	/* I/O not complete */
1041280182Sjfv
1042280182Sjfv		if (buf->m_head) {
1043320897Serj			txr->bytes += buf->m_head->m_pkthdr.len;
1044320897Serj			bus_dmamap_sync(txr->txtag, buf->map,
1045280182Sjfv			    BUS_DMASYNC_POSTWRITE);
1046320897Serj			bus_dmamap_unload(txr->txtag, buf->map);
1047280182Sjfv			m_freem(buf->m_head);
1048280182Sjfv			buf->m_head = NULL;
1049280182Sjfv		}
1050280182Sjfv		buf->eop = NULL;
1051280182Sjfv		++txr->tx_avail;
1052280182Sjfv
1053280182Sjfv		/* We clean the range if multi segment */
1054280182Sjfv		while (txd != eop) {
1055280182Sjfv			++txd;
1056280182Sjfv			++buf;
1057280182Sjfv			++work;
1058280182Sjfv			/* wrap the ring? */
1059280182Sjfv			if (__predict_false(!work)) {
1060280182Sjfv				work -= txr->num_desc;
1061280182Sjfv				buf = txr->tx_buffers;
1062280182Sjfv				txd = txr->tx_base;
1063280182Sjfv			}
1064280182Sjfv			if (buf->m_head) {
1065320897Serj				txr->bytes += buf->m_head->m_pkthdr.len;
1066320897Serj				bus_dmamap_sync(txr->txtag, buf->map,
1067280182Sjfv				    BUS_DMASYNC_POSTWRITE);
1068320897Serj				bus_dmamap_unload(txr->txtag, buf->map);
1069280182Sjfv				m_freem(buf->m_head);
1070280182Sjfv				buf->m_head = NULL;
1071280182Sjfv			}
1072280182Sjfv			++txr->tx_avail;
1073280182Sjfv			buf->eop = NULL;
1074280182Sjfv
1075280182Sjfv		}
1076280182Sjfv		++txr->packets;
1077280182Sjfv		++processed;
1078280182Sjfv
1079280182Sjfv		/* Try the next packet */
1080280182Sjfv		++txd;
1081280182Sjfv		++buf;
1082280182Sjfv		++work;
1083280182Sjfv		/* reset with a wrap */
1084280182Sjfv		if (__predict_false(!work)) {
1085280182Sjfv			work -= txr->num_desc;
1086280182Sjfv			buf = txr->tx_buffers;
1087280182Sjfv			txd = txr->tx_base;
1088280182Sjfv		}
1089280182Sjfv		prefetch(txd);
1090280182Sjfv	} while (__predict_true(--limit));
1091280182Sjfv
1092280182Sjfv	bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
1093280182Sjfv	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1094280182Sjfv
1095280182Sjfv	work += txr->num_desc;
1096280182Sjfv	txr->next_to_clean = work;
1097280182Sjfv
1098280182Sjfv	/*
1099320897Serj	 * Queue Hang detection, we know there's
1100320897Serj	 * work outstanding or the first return
1101320897Serj	 * would have been taken, so increment busy
1102320897Serj	 * if nothing managed to get cleaned, then
1103320897Serj	 * in local_timer it will be checked and
1104320897Serj	 * marked as HUNG if it exceeds a MAX attempt.
1105320897Serj	 */
1106280182Sjfv	if ((processed == 0) && (txr->busy != IXGBE_QUEUE_HUNG))
1107280182Sjfv		++txr->busy;
1108280182Sjfv	/*
1109320897Serj	 * If anything gets cleaned we reset state to 1,
1110320897Serj	 * note this will turn off HUNG if its set.
1111320897Serj	 */
1112280182Sjfv	if (processed)
1113280182Sjfv		txr->busy = 1;
1114280182Sjfv
1115280182Sjfv	if (txr->tx_avail == txr->num_desc)
1116280182Sjfv		txr->busy = 0;
1117280182Sjfv
1118280182Sjfv	return;
1119320897Serj} /* ixgbe_txeof */
1120280182Sjfv
1121320897Serj/************************************************************************
1122320897Serj * ixgbe_rsc_count
1123320897Serj *
1124320897Serj *   Used to detect a descriptor that has been merged by Hardware RSC.
1125320897Serj ************************************************************************/
1126280182Sjfvstatic inline u32
1127280182Sjfvixgbe_rsc_count(union ixgbe_adv_rx_desc *rx)
1128280182Sjfv{
1129280182Sjfv	return (le32toh(rx->wb.lower.lo_dword.data) &
1130280182Sjfv	    IXGBE_RXDADV_RSCCNT_MASK) >> IXGBE_RXDADV_RSCCNT_SHIFT;
1131320897Serj} /* ixgbe_rsc_count */
1132280182Sjfv
1133320897Serj/************************************************************************
1134320897Serj * ixgbe_setup_hw_rsc
1135280182Sjfv *
1136320897Serj *   Initialize Hardware RSC (LRO) feature on 82599
1137320897Serj *   for an RX ring, this is toggled by the LRO capability
1138320897Serj *   even though it is transparent to the stack.
1139280182Sjfv *
1140320897Serj *   NOTE: Since this HW feature only works with IPv4 and
1141320897Serj *         testing has shown soft LRO to be as effective,
1142320897Serj *         this feature will be disabled by default.
1143320897Serj ************************************************************************/
1144280182Sjfvstatic void
1145280182Sjfvixgbe_setup_hw_rsc(struct rx_ring *rxr)
1146280182Sjfv{
1147320897Serj	struct adapter  *adapter = rxr->adapter;
1148320897Serj	struct ixgbe_hw *hw = &adapter->hw;
1149320897Serj	u32             rscctrl, rdrxctl;
1150280182Sjfv
1151280182Sjfv	/* If turning LRO/RSC off we need to disable it */
1152280182Sjfv	if ((adapter->ifp->if_capenable & IFCAP_LRO) == 0) {
1153280182Sjfv		rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(rxr->me));
1154280182Sjfv		rscctrl &= ~IXGBE_RSCCTL_RSCEN;
1155280182Sjfv		return;
1156280182Sjfv	}
1157280182Sjfv
1158280182Sjfv	rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
1159280182Sjfv	rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
1160320897Serj#ifdef DEV_NETMAP
1161320897Serj	/* Always strip CRC unless Netmap disabled it */
1162320897Serj	if (!(adapter->feat_en & IXGBE_FEATURE_NETMAP) ||
1163320897Serj	    !(adapter->ifp->if_capenable & IFCAP_NETMAP) ||
1164320897Serj	    ix_crcstrip)
1165280182Sjfv#endif /* DEV_NETMAP */
1166320897Serj		rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
1167280182Sjfv	rdrxctl |= IXGBE_RDRXCTL_RSCACKC;
1168280182Sjfv	IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
1169280182Sjfv
1170280182Sjfv	rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(rxr->me));
1171280182Sjfv	rscctrl |= IXGBE_RSCCTL_RSCEN;
1172280182Sjfv	/*
1173320897Serj	 * Limit the total number of descriptors that
1174320897Serj	 * can be combined, so it does not exceed 64K
1175320897Serj	 */
1176280182Sjfv	if (rxr->mbuf_sz == MCLBYTES)
1177280182Sjfv		rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
1178280182Sjfv	else if (rxr->mbuf_sz == MJUMPAGESIZE)
1179280182Sjfv		rscctrl |= IXGBE_RSCCTL_MAXDESC_8;
1180280182Sjfv	else if (rxr->mbuf_sz == MJUM9BYTES)
1181280182Sjfv		rscctrl |= IXGBE_RSCCTL_MAXDESC_4;
1182280182Sjfv	else  /* Using 16K cluster */
1183280182Sjfv		rscctrl |= IXGBE_RSCCTL_MAXDESC_1;
1184280182Sjfv
1185280182Sjfv	IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(rxr->me), rscctrl);
1186280182Sjfv
1187280182Sjfv	/* Enable TCP header recognition */
1188280182Sjfv	IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0),
1189320897Serj	    (IXGBE_READ_REG(hw, IXGBE_PSRTYPE(0)) | IXGBE_PSRTYPE_TCPHDR));
1190280182Sjfv
1191280182Sjfv	/* Disable RSC for ACK packets */
1192280182Sjfv	IXGBE_WRITE_REG(hw, IXGBE_RSCDBU,
1193280182Sjfv	    (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU)));
1194280182Sjfv
1195280182Sjfv	rxr->hw_rsc = TRUE;
1196320897Serj} /* ixgbe_setup_hw_rsc */
1197292674Ssbruno
1198320897Serj/************************************************************************
1199320897Serj * ixgbe_refresh_mbufs
1200280182Sjfv *
1201320897Serj *   Refresh mbuf buffers for RX descriptor rings
1202320897Serj *    - now keeps its own state so discards due to resource
1203320897Serj *      exhaustion are unnecessary, if an mbuf cannot be obtained
1204320897Serj *      it just returns, keeping its placeholder, thus it can simply
1205320897Serj *      be recalled to try again.
1206320897Serj ************************************************************************/
1207280182Sjfvstatic void
1208280182Sjfvixgbe_refresh_mbufs(struct rx_ring *rxr, int limit)
1209280182Sjfv{
1210320897Serj	struct adapter      *adapter = rxr->adapter;
1211320897Serj	struct ixgbe_rx_buf *rxbuf;
1212320897Serj	struct mbuf         *mp;
1213320897Serj	bus_dma_segment_t   seg[1];
1214320897Serj	int                 i, j, nsegs, error;
1215320897Serj	bool                refreshed = FALSE;
1216280182Sjfv
1217280182Sjfv	i = j = rxr->next_to_refresh;
1218280182Sjfv	/* Control the loop with one beyond */
1219280182Sjfv	if (++j == rxr->num_desc)
1220280182Sjfv		j = 0;
1221280182Sjfv
1222280182Sjfv	while (j != limit) {
1223280182Sjfv		rxbuf = &rxr->rx_buffers[i];
1224280182Sjfv		if (rxbuf->buf == NULL) {
1225320897Serj			mp = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR,
1226320897Serj			    rxr->mbuf_sz);
1227280182Sjfv			if (mp == NULL)
1228280182Sjfv				goto update;
1229280182Sjfv			if (adapter->max_frame_size <= (MCLBYTES - ETHER_ALIGN))
1230280182Sjfv				m_adj(mp, ETHER_ALIGN);
1231280182Sjfv		} else
1232280182Sjfv			mp = rxbuf->buf;
1233280182Sjfv
1234280182Sjfv		mp->m_pkthdr.len = mp->m_len = rxr->mbuf_sz;
1235280182Sjfv
1236280182Sjfv		/* If we're dealing with an mbuf that was copied rather
1237280182Sjfv		 * than replaced, there's no need to go through busdma.
1238280182Sjfv		 */
1239280182Sjfv		if ((rxbuf->flags & IXGBE_RX_COPY) == 0) {
1240280182Sjfv			/* Get the memory mapping */
1241280962Serj			bus_dmamap_unload(rxr->ptag, rxbuf->pmap);
1242320897Serj			error = bus_dmamap_load_mbuf_sg(rxr->ptag, rxbuf->pmap,
1243320897Serj			    mp, seg, &nsegs, BUS_DMA_NOWAIT);
1244280182Sjfv			if (error != 0) {
1245320897Serj				printf("Refresh mbufs: payload dmamap load failure - %d\n", error);
1246280182Sjfv				m_free(mp);
1247280182Sjfv				rxbuf->buf = NULL;
1248280182Sjfv				goto update;
1249280182Sjfv			}
1250280182Sjfv			rxbuf->buf = mp;
1251280182Sjfv			bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
1252280182Sjfv			    BUS_DMASYNC_PREREAD);
1253280182Sjfv			rxbuf->addr = rxr->rx_base[i].read.pkt_addr =
1254280182Sjfv			    htole64(seg[0].ds_addr);
1255280182Sjfv		} else {
1256280182Sjfv			rxr->rx_base[i].read.pkt_addr = rxbuf->addr;
1257280182Sjfv			rxbuf->flags &= ~IXGBE_RX_COPY;
1258280182Sjfv		}
1259280182Sjfv
1260280182Sjfv		refreshed = TRUE;
1261280182Sjfv		/* Next is precalculated */
1262280182Sjfv		i = j;
1263280182Sjfv		rxr->next_to_refresh = i;
1264280182Sjfv		if (++j == rxr->num_desc)
1265280182Sjfv			j = 0;
1266280182Sjfv	}
1267320897Serj
1268280182Sjfvupdate:
1269280182Sjfv	if (refreshed) /* Update hardware tail index */
1270320897Serj		IXGBE_WRITE_REG(&adapter->hw, rxr->tail, rxr->next_to_refresh);
1271320897Serj
1272280182Sjfv	return;
1273320897Serj} /* ixgbe_refresh_mbufs */
1274280182Sjfv
1275320897Serj/************************************************************************
1276320897Serj * ixgbe_allocate_receive_buffers
1277280182Sjfv *
1278320897Serj *   Allocate memory for rx_buffer structures. Since we use one
1279320897Serj *   rx_buffer per received packet, the maximum number of rx_buffer's
1280320897Serj *   that we'll need is equal to the number of receive descriptors
1281320897Serj *   that we've allocated.
1282320897Serj ************************************************************************/
1283320897Serjstatic int
1284280182Sjfvixgbe_allocate_receive_buffers(struct rx_ring *rxr)
1285280182Sjfv{
1286320897Serj	struct adapter      *adapter = rxr->adapter;
1287320897Serj	device_t            dev = adapter->dev;
1288320897Serj	struct ixgbe_rx_buf *rxbuf;
1289320897Serj	int                 bsize, error;
1290280182Sjfv
1291280182Sjfv	bsize = sizeof(struct ixgbe_rx_buf) * rxr->num_desc;
1292320897Serj	rxr->rx_buffers = (struct ixgbe_rx_buf *)malloc(bsize, M_DEVBUF,
1293320897Serj	    M_NOWAIT | M_ZERO);
1294320897Serj	if (rxr->rx_buffers == NULL) {
1295280182Sjfv		device_printf(dev, "Unable to allocate rx_buffer memory\n");
1296280182Sjfv		error = ENOMEM;
1297280182Sjfv		goto fail;
1298280182Sjfv	}
1299280182Sjfv
1300320897Serj	error = bus_dma_tag_create(
1301320897Serj	         /*      parent */ bus_get_dma_tag(dev),
1302320897Serj	         /*   alignment */ 1,
1303320897Serj	         /*      bounds */ 0,
1304320897Serj	         /*     lowaddr */ BUS_SPACE_MAXADDR,
1305320897Serj	         /*    highaddr */ BUS_SPACE_MAXADDR,
1306320897Serj	         /*      filter */ NULL,
1307320897Serj	         /*   filterarg */ NULL,
1308320897Serj	         /*     maxsize */ MJUM16BYTES,
1309320897Serj	         /*   nsegments */ 1,
1310320897Serj	         /*  maxsegsize */ MJUM16BYTES,
1311320897Serj	         /*       flags */ 0,
1312320897Serj	         /*    lockfunc */ NULL,
1313320897Serj	         /* lockfuncarg */ NULL,
1314320897Serj	                           &rxr->ptag);
1315320897Serj	if (error != 0) {
1316280182Sjfv		device_printf(dev, "Unable to create RX DMA tag\n");
1317280182Sjfv		goto fail;
1318280182Sjfv	}
1319280182Sjfv
1320283883Sjfv	for (int i = 0; i < rxr->num_desc; i++, rxbuf++) {
1321280182Sjfv		rxbuf = &rxr->rx_buffers[i];
1322280962Serj		error = bus_dmamap_create(rxr->ptag, 0, &rxbuf->pmap);
1323280182Sjfv		if (error) {
1324280182Sjfv			device_printf(dev, "Unable to create RX dma map\n");
1325280182Sjfv			goto fail;
1326280182Sjfv		}
1327280182Sjfv	}
1328280182Sjfv
1329280182Sjfv	return (0);
1330280182Sjfv
1331280182Sjfvfail:
1332280182Sjfv	/* Frees all, but can handle partial completion */
1333280182Sjfv	ixgbe_free_receive_structures(adapter);
1334320897Serj
1335280182Sjfv	return (error);
1336320897Serj} /* ixgbe_allocate_receive_buffers */
1337280182Sjfv
1338320897Serj/************************************************************************
1339320897Serj * ixgbe_free_receive_ring
1340320897Serj ************************************************************************/
1341320897Serjstatic void
1342280182Sjfvixgbe_free_receive_ring(struct rx_ring *rxr)
1343320897Serj{
1344283883Sjfv	for (int i = 0; i < rxr->num_desc; i++) {
1345320897Serj		ixgbe_rx_discard(rxr, i);
1346280182Sjfv	}
1347320897Serj} /* ixgbe_free_receive_ring */
1348280182Sjfv
1349320897Serj/************************************************************************
1350320897Serj * ixgbe_setup_receive_ring
1351280182Sjfv *
1352320897Serj *   Initialize a receive ring and its buffers.
1353320897Serj ************************************************************************/
1354280182Sjfvstatic int
1355280182Sjfvixgbe_setup_receive_ring(struct rx_ring *rxr)
1356280182Sjfv{
1357320897Serj	struct adapter        *adapter;
1358320897Serj	struct ifnet          *ifp;
1359320897Serj	device_t              dev;
1360320897Serj	struct ixgbe_rx_buf   *rxbuf;
1361320897Serj	struct lro_ctrl       *lro = &rxr->lro;
1362280182Sjfv#ifdef DEV_NETMAP
1363280182Sjfv	struct netmap_adapter *na = NA(rxr->adapter->ifp);
1364320897Serj	struct netmap_slot    *slot;
1365280182Sjfv#endif /* DEV_NETMAP */
1366320897Serj	bus_dma_segment_t     seg[1];
1367320897Serj	int                   rsize, nsegs, error = 0;
1368280182Sjfv
1369280182Sjfv	adapter = rxr->adapter;
1370280182Sjfv	ifp = adapter->ifp;
1371280182Sjfv	dev = adapter->dev;
1372280182Sjfv
1373280182Sjfv	/* Clear the ring contents */
1374280182Sjfv	IXGBE_RX_LOCK(rxr);
1375320897Serj
1376280182Sjfv#ifdef DEV_NETMAP
1377320897Serj	if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
1378320897Serj		slot = netmap_reset(na, NR_RX, rxr->me, 0);
1379280182Sjfv#endif /* DEV_NETMAP */
1380320897Serj
1381280182Sjfv	rsize = roundup2(adapter->num_rx_desc *
1382280182Sjfv	    sizeof(union ixgbe_adv_rx_desc), DBA_ALIGN);
1383280182Sjfv	bzero((void *)rxr->rx_base, rsize);
1384280182Sjfv	/* Cache the size */
1385280182Sjfv	rxr->mbuf_sz = adapter->rx_mbuf_sz;
1386280182Sjfv
1387280182Sjfv	/* Free current RX buffer structs and their mbufs */
1388280182Sjfv	ixgbe_free_receive_ring(rxr);
1389280182Sjfv
1390280182Sjfv	/* Now replenish the mbufs */
1391280182Sjfv	for (int j = 0; j != rxr->num_desc; ++j) {
1392320897Serj		struct mbuf *mp;
1393280182Sjfv
1394280182Sjfv		rxbuf = &rxr->rx_buffers[j];
1395320897Serj
1396280182Sjfv#ifdef DEV_NETMAP
1397280182Sjfv		/*
1398280182Sjfv		 * In netmap mode, fill the map and set the buffer
1399280182Sjfv		 * address in the NIC ring, considering the offset
1400280182Sjfv		 * between the netmap and NIC rings (see comment in
1401280182Sjfv		 * ixgbe_setup_transmit_ring() ). No need to allocate
1402280182Sjfv		 * an mbuf, so end the block with a continue;
1403280182Sjfv		 */
1404320897Serj		if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) && slot) {
1405341477Svmaffione			int sj = netmap_idx_n2k(na->rx_rings[rxr->me], j);
1406280182Sjfv			uint64_t paddr;
1407280182Sjfv			void *addr;
1408280182Sjfv
1409280182Sjfv			addr = PNMB(na, slot + sj, &paddr);
1410280182Sjfv			netmap_load_map(na, rxr->ptag, rxbuf->pmap, addr);
1411280182Sjfv			/* Update descriptor and the cached value */
1412280182Sjfv			rxr->rx_base[j].read.pkt_addr = htole64(paddr);
1413280182Sjfv			rxbuf->addr = htole64(paddr);
1414280182Sjfv			continue;
1415280182Sjfv		}
1416280182Sjfv#endif /* DEV_NETMAP */
1417320897Serj
1418320897Serj		rxbuf->flags = 0;
1419320897Serj		rxbuf->buf = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR,
1420320897Serj		    adapter->rx_mbuf_sz);
1421280182Sjfv		if (rxbuf->buf == NULL) {
1422280182Sjfv			error = ENOBUFS;
1423320897Serj			goto fail;
1424280182Sjfv		}
1425280182Sjfv		mp = rxbuf->buf;
1426280182Sjfv		mp->m_pkthdr.len = mp->m_len = rxr->mbuf_sz;
1427280182Sjfv		/* Get the memory mapping */
1428320897Serj		error = bus_dmamap_load_mbuf_sg(rxr->ptag, rxbuf->pmap, mp, seg,
1429280182Sjfv		    &nsegs, BUS_DMA_NOWAIT);
1430280182Sjfv		if (error != 0)
1431320897Serj			goto fail;
1432320897Serj		bus_dmamap_sync(rxr->ptag, rxbuf->pmap, BUS_DMASYNC_PREREAD);
1433280182Sjfv		/* Update the descriptor and the cached value */
1434280182Sjfv		rxr->rx_base[j].read.pkt_addr = htole64(seg[0].ds_addr);
1435280182Sjfv		rxbuf->addr = htole64(seg[0].ds_addr);
1436280182Sjfv	}
1437280182Sjfv
1438280182Sjfv
1439280182Sjfv	/* Setup our descriptor indices */
1440280182Sjfv	rxr->next_to_check = 0;
1441280182Sjfv	rxr->next_to_refresh = 0;
1442280182Sjfv	rxr->lro_enabled = FALSE;
1443280182Sjfv	rxr->rx_copies = 0;
1444280182Sjfv	rxr->rx_bytes = 0;
1445280182Sjfv	rxr->vtag_strip = FALSE;
1446280182Sjfv
1447280182Sjfv	bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
1448280182Sjfv	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1449280182Sjfv
1450280182Sjfv	/*
1451320897Serj	 * Now set up the LRO interface
1452320897Serj	 */
1453280182Sjfv	if (ixgbe_rsc_enable)
1454280182Sjfv		ixgbe_setup_hw_rsc(rxr);
1455280182Sjfv	else if (ifp->if_capenable & IFCAP_LRO) {
1456280182Sjfv		int err = tcp_lro_init(lro);
1457280182Sjfv		if (err) {
1458280182Sjfv			device_printf(dev, "LRO Initialization failed!\n");
1459280182Sjfv			goto fail;
1460280182Sjfv		}
1461280182Sjfv		INIT_DEBUGOUT("RX Soft LRO Initialized\n");
1462280182Sjfv		rxr->lro_enabled = TRUE;
1463280182Sjfv		lro->ifp = adapter->ifp;
1464280182Sjfv	}
1465280182Sjfv
1466280182Sjfv	IXGBE_RX_UNLOCK(rxr);
1467320897Serj
1468280182Sjfv	return (0);
1469280182Sjfv
1470280182Sjfvfail:
1471280182Sjfv	ixgbe_free_receive_ring(rxr);
1472280182Sjfv	IXGBE_RX_UNLOCK(rxr);
1473320897Serj
1474280182Sjfv	return (error);
1475320897Serj} /* ixgbe_setup_receive_ring */
1476280182Sjfv
1477320897Serj/************************************************************************
1478320897Serj * ixgbe_setup_receive_structures - Initialize all receive rings.
1479320897Serj ************************************************************************/
1480280182Sjfvint
1481280182Sjfvixgbe_setup_receive_structures(struct adapter *adapter)
1482280182Sjfv{
1483280182Sjfv	struct rx_ring *rxr = adapter->rx_rings;
1484320897Serj	int            j;
1485280182Sjfv
1486280182Sjfv	for (j = 0; j < adapter->num_queues; j++, rxr++)
1487280182Sjfv		if (ixgbe_setup_receive_ring(rxr))
1488280182Sjfv			goto fail;
1489280182Sjfv
1490280182Sjfv	return (0);
1491280182Sjfvfail:
1492280182Sjfv	/*
1493280182Sjfv	 * Free RX buffers allocated so far, we will only handle
1494280182Sjfv	 * the rings that completed, the failing case will have
1495280182Sjfv	 * cleaned up for itself. 'j' failed, so its the terminus.
1496280182Sjfv	 */
1497280182Sjfv	for (int i = 0; i < j; ++i) {
1498280182Sjfv		rxr = &adapter->rx_rings[i];
1499320897Serj		IXGBE_RX_LOCK(rxr);
1500280182Sjfv		ixgbe_free_receive_ring(rxr);
1501320897Serj		IXGBE_RX_UNLOCK(rxr);
1502280182Sjfv	}
1503280182Sjfv
1504280182Sjfv	return (ENOBUFS);
1505320897Serj} /* ixgbe_setup_receive_structures */
1506280182Sjfv
1507280182Sjfv
1508320897Serj/************************************************************************
1509320897Serj * ixgbe_free_receive_structures - Free all receive rings.
1510320897Serj ************************************************************************/
1511280182Sjfvvoid
1512280182Sjfvixgbe_free_receive_structures(struct adapter *adapter)
1513280182Sjfv{
1514280182Sjfv	struct rx_ring *rxr = adapter->rx_rings;
1515280182Sjfv
1516280182Sjfv	INIT_DEBUGOUT("ixgbe_free_receive_structures: begin");
1517280182Sjfv
1518280182Sjfv	for (int i = 0; i < adapter->num_queues; i++, rxr++) {
1519280182Sjfv		ixgbe_free_receive_buffers(rxr);
1520280182Sjfv		/* Free LRO memory */
1521320897Serj		tcp_lro_free(&rxr->lro);
1522280182Sjfv		/* Free the ring memory as well */
1523280182Sjfv		ixgbe_dma_free(adapter, &rxr->rxdma);
1524280182Sjfv	}
1525280182Sjfv
1526280182Sjfv	free(adapter->rx_rings, M_DEVBUF);
1527320897Serj} /* ixgbe_free_receive_structures */
1528280182Sjfv
1529280182Sjfv
1530320897Serj/************************************************************************
1531320897Serj * ixgbe_free_receive_buffers - Free receive ring data structures
1532320897Serj ************************************************************************/
1533320897Serjstatic void
1534280182Sjfvixgbe_free_receive_buffers(struct rx_ring *rxr)
1535280182Sjfv{
1536320897Serj	struct adapter      *adapter = rxr->adapter;
1537320897Serj	struct ixgbe_rx_buf *rxbuf;
1538280182Sjfv
1539280182Sjfv	INIT_DEBUGOUT("ixgbe_free_receive_buffers: begin");
1540280182Sjfv
1541280182Sjfv	/* Cleanup any existing buffers */
1542280182Sjfv	if (rxr->rx_buffers != NULL) {
1543280182Sjfv		for (int i = 0; i < adapter->num_rx_desc; i++) {
1544280182Sjfv			rxbuf = &rxr->rx_buffers[i];
1545320897Serj			ixgbe_rx_discard(rxr, i);
1546280182Sjfv			if (rxbuf->pmap != NULL) {
1547280182Sjfv				bus_dmamap_destroy(rxr->ptag, rxbuf->pmap);
1548280182Sjfv				rxbuf->pmap = NULL;
1549280182Sjfv			}
1550280182Sjfv		}
1551280182Sjfv		if (rxr->rx_buffers != NULL) {
1552280182Sjfv			free(rxr->rx_buffers, M_DEVBUF);
1553280182Sjfv			rxr->rx_buffers = NULL;
1554280182Sjfv		}
1555280182Sjfv	}
1556280182Sjfv
1557280182Sjfv	if (rxr->ptag != NULL) {
1558280182Sjfv		bus_dma_tag_destroy(rxr->ptag);
1559280182Sjfv		rxr->ptag = NULL;
1560280182Sjfv	}
1561280182Sjfv
1562280182Sjfv	return;
1563320897Serj} /* ixgbe_free_receive_buffers */
1564280182Sjfv
1565320897Serj/************************************************************************
1566320897Serj * ixgbe_rx_input
1567320897Serj ************************************************************************/
1568280182Sjfvstatic __inline void
1569320897Serjixgbe_rx_input(struct rx_ring *rxr, struct ifnet *ifp, struct mbuf *m,
1570320897Serj    u32 ptype)
1571280182Sjfv{
1572320897Serj	/*
1573320897Serj	 * ATM LRO is only for IP/TCP packets and TCP checksum of the packet
1574320897Serj	 * should be computed by hardware. Also it should not have VLAN tag in
1575320897Serj	 * ethernet header.  In case of IPv6 we do not yet support ext. hdrs.
1576320897Serj	 */
1577320897Serj	if (rxr->lro_enabled &&
1578320897Serj	    (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 &&
1579320897Serj	    (ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 &&
1580320897Serj	    ((ptype & (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP)) ==
1581320897Serj	     (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP) ||
1582320897Serj	     (ptype & (IXGBE_RXDADV_PKTTYPE_IPV6 | IXGBE_RXDADV_PKTTYPE_TCP)) ==
1583320897Serj	     (IXGBE_RXDADV_PKTTYPE_IPV6 | IXGBE_RXDADV_PKTTYPE_TCP)) &&
1584320897Serj	    (m->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) ==
1585320897Serj	    (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) {
1586320897Serj		/*
1587320897Serj		 * Send to the stack if:
1588320897Serj		 *  - LRO not enabled, or
1589320897Serj		 *  - no LRO resources, or
1590320897Serj		 *  - lro enqueue fails
1591320897Serj		 */
1592320897Serj		if (rxr->lro.lro_cnt != 0)
1593320897Serj			if (tcp_lro_rx(&rxr->lro, m, 0) == 0)
1594320897Serj				return;
1595320897Serj	}
1596280182Sjfv	IXGBE_RX_UNLOCK(rxr);
1597320897Serj	(*ifp->if_input)(ifp, m);
1598280182Sjfv	IXGBE_RX_LOCK(rxr);
1599320897Serj} /* ixgbe_rx_input */
1600280182Sjfv
1601320897Serj/************************************************************************
1602320897Serj * ixgbe_rx_discard
1603320897Serj ************************************************************************/
1604280182Sjfvstatic __inline void
1605280182Sjfvixgbe_rx_discard(struct rx_ring *rxr, int i)
1606280182Sjfv{
1607320897Serj	struct ixgbe_rx_buf *rbuf;
1608280182Sjfv
1609280182Sjfv	rbuf = &rxr->rx_buffers[i];
1610280182Sjfv
1611280182Sjfv	/*
1612320897Serj	 * With advanced descriptors the writeback
1613320897Serj	 * clobbers the buffer addrs, so its easier
1614320897Serj	 * to just free the existing mbufs and take
1615320897Serj	 * the normal refresh path to get new buffers
1616320897Serj	 * and mapping.
1617320897Serj	 */
1618280182Sjfv
1619280182Sjfv	if (rbuf->fmp != NULL) {/* Partial chain ? */
1620320897Serj		bus_dmamap_sync(rxr->ptag, rbuf->pmap, BUS_DMASYNC_POSTREAD);
1621280182Sjfv		m_freem(rbuf->fmp);
1622280182Sjfv		rbuf->fmp = NULL;
1623280182Sjfv		rbuf->buf = NULL; /* rbuf->buf is part of fmp's chain */
1624280182Sjfv	} else if (rbuf->buf) {
1625320897Serj		bus_dmamap_sync(rxr->ptag, rbuf->pmap, BUS_DMASYNC_POSTREAD);
1626280182Sjfv		m_free(rbuf->buf);
1627280182Sjfv		rbuf->buf = NULL;
1628280182Sjfv	}
1629280962Serj	bus_dmamap_unload(rxr->ptag, rbuf->pmap);
1630280182Sjfv
1631280182Sjfv	rbuf->flags = 0;
1632320897Serj
1633280182Sjfv	return;
1634320897Serj} /* ixgbe_rx_discard */
1635280182Sjfv
1636280182Sjfv
1637320897Serj/************************************************************************
1638320897Serj * ixgbe_rxeof
1639280182Sjfv *
1640320897Serj *   Executes in interrupt context. It replenishes the
1641320897Serj *   mbufs in the descriptor and sends data which has
1642320897Serj *   been dma'ed into host memory to upper layer.
1643280182Sjfv *
1644320897Serj *   Return TRUE for more work, FALSE for all clean.
1645320897Serj ************************************************************************/
1646280182Sjfvbool
1647280182Sjfvixgbe_rxeof(struct ix_queue *que)
1648280182Sjfv{
1649320897Serj	struct adapter          *adapter = que->adapter;
1650320897Serj	struct rx_ring          *rxr = que->rxr;
1651320897Serj	struct ifnet            *ifp = adapter->ifp;
1652320897Serj	struct lro_ctrl         *lro = &rxr->lro;
1653320897Serj	union ixgbe_adv_rx_desc *cur;
1654320897Serj	struct ixgbe_rx_buf     *rbuf, *nbuf;
1655320897Serj	int                     i, nextp, processed = 0;
1656320897Serj	u32                     staterr = 0;
1657320897Serj	u32                     count = adapter->rx_process_limit;
1658320897Serj	u16                     pkt_info;
1659280182Sjfv
1660280182Sjfv	IXGBE_RX_LOCK(rxr);
1661280182Sjfv
1662280182Sjfv#ifdef DEV_NETMAP
1663320897Serj	if (adapter->feat_en & IXGBE_FEATURE_NETMAP) {
1664320897Serj		/* Same as the txeof routine: wakeup clients on intr. */
1665320897Serj		if (netmap_rx_irq(ifp, rxr->me, &processed)) {
1666320897Serj			IXGBE_RX_UNLOCK(rxr);
1667320897Serj			return (FALSE);
1668320897Serj		}
1669280182Sjfv	}
1670280182Sjfv#endif /* DEV_NETMAP */
1671280182Sjfv
1672280182Sjfv	for (i = rxr->next_to_check; count != 0;) {
1673320897Serj		struct mbuf *sendmp, *mp;
1674320897Serj		u32         rsc, ptype;
1675320897Serj		u16         len;
1676320897Serj		u16         vtag = 0;
1677320897Serj		bool        eop;
1678320897Serj
1679280182Sjfv		/* Sync the ring. */
1680280182Sjfv		bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
1681280182Sjfv		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1682280182Sjfv
1683280182Sjfv		cur = &rxr->rx_base[i];
1684280182Sjfv		staterr = le32toh(cur->wb.upper.status_error);
1685280182Sjfv		pkt_info = le16toh(cur->wb.lower.lo_dword.hs_rss.pkt_info);
1686280182Sjfv
1687280182Sjfv		if ((staterr & IXGBE_RXD_STAT_DD) == 0)
1688280182Sjfv			break;
1689280182Sjfv		if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1690280182Sjfv			break;
1691280182Sjfv
1692280182Sjfv		count--;
1693280182Sjfv		sendmp = NULL;
1694280182Sjfv		nbuf = NULL;
1695280182Sjfv		rsc = 0;
1696280182Sjfv		cur->wb.upper.status_error = 0;
1697280182Sjfv		rbuf = &rxr->rx_buffers[i];
1698280182Sjfv		mp = rbuf->buf;
1699280182Sjfv
1700280182Sjfv		len = le16toh(cur->wb.upper.length);
1701280182Sjfv		ptype = le32toh(cur->wb.lower.lo_dword.data) &
1702280182Sjfv		    IXGBE_RXDADV_PKTTYPE_MASK;
1703280182Sjfv		eop = ((staterr & IXGBE_RXD_STAT_EOP) != 0);
1704280182Sjfv
1705280182Sjfv		/* Make sure bad packets are discarded */
1706280182Sjfv		if (eop && (staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) != 0) {
1707280182Sjfv#if __FreeBSD_version >= 1100036
1708320897Serj			if (adapter->feat_en & IXGBE_FEATURE_VF)
1709282289Serj				if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
1710280182Sjfv#endif
1711280182Sjfv			rxr->rx_discarded++;
1712280182Sjfv			ixgbe_rx_discard(rxr, i);
1713280182Sjfv			goto next_desc;
1714280182Sjfv		}
1715280182Sjfv
1716320897Serj		bus_dmamap_sync(rxr->ptag, rbuf->pmap, BUS_DMASYNC_POSTREAD);
1717320897Serj
1718280182Sjfv		/*
1719320897Serj		 * On 82599 which supports a hardware
1720320897Serj		 * LRO (called HW RSC), packets need
1721320897Serj		 * not be fragmented across sequential
1722320897Serj		 * descriptors, rather the next descriptor
1723320897Serj		 * is indicated in bits of the descriptor.
1724320897Serj		 * This also means that we might proceses
1725320897Serj		 * more than one packet at a time, something
1726320897Serj		 * that has never been true before, it
1727320897Serj		 * required eliminating global chain pointers
1728320897Serj		 * in favor of what we are doing here.  -jfv
1729320897Serj		 */
1730280182Sjfv		if (!eop) {
1731280182Sjfv			/*
1732320897Serj			 * Figure out the next descriptor
1733320897Serj			 * of this frame.
1734320897Serj			 */
1735280182Sjfv			if (rxr->hw_rsc == TRUE) {
1736280182Sjfv				rsc = ixgbe_rsc_count(cur);
1737280182Sjfv				rxr->rsc_num += (rsc - 1);
1738280182Sjfv			}
1739280182Sjfv			if (rsc) { /* Get hardware index */
1740320897Serj				nextp = ((staterr & IXGBE_RXDADV_NEXTP_MASK) >>
1741280182Sjfv				    IXGBE_RXDADV_NEXTP_SHIFT);
1742280182Sjfv			} else { /* Just sequential */
1743280182Sjfv				nextp = i + 1;
1744280182Sjfv				if (nextp == adapter->num_rx_desc)
1745280182Sjfv					nextp = 0;
1746280182Sjfv			}
1747280182Sjfv			nbuf = &rxr->rx_buffers[nextp];
1748280182Sjfv			prefetch(nbuf);
1749280182Sjfv		}
1750280182Sjfv		/*
1751320897Serj		 * Rather than using the fmp/lmp global pointers
1752320897Serj		 * we now keep the head of a packet chain in the
1753320897Serj		 * buffer struct and pass this along from one
1754320897Serj		 * descriptor to the next, until we get EOP.
1755320897Serj		 */
1756280182Sjfv		mp->m_len = len;
1757280182Sjfv		/*
1758320897Serj		 * See if there is a stored head
1759320897Serj		 * that determines what we are
1760320897Serj		 */
1761280182Sjfv		sendmp = rbuf->fmp;
1762280182Sjfv		if (sendmp != NULL) {  /* secondary frag */
1763280182Sjfv			rbuf->buf = rbuf->fmp = NULL;
1764280182Sjfv			mp->m_flags &= ~M_PKTHDR;
1765280182Sjfv			sendmp->m_pkthdr.len += mp->m_len;
1766280182Sjfv		} else {
1767280182Sjfv			/*
1768280182Sjfv			 * Optimize.  This might be a small packet,
1769280182Sjfv			 * maybe just a TCP ACK.  Do a fast copy that
1770280182Sjfv			 * is cache aligned into a new mbuf, and
1771280182Sjfv			 * leave the old mbuf+cluster for re-use.
1772280182Sjfv			 */
1773280182Sjfv			if (eop && len <= IXGBE_RX_COPY_LEN) {
1774280182Sjfv				sendmp = m_gethdr(M_NOWAIT, MT_DATA);
1775280182Sjfv				if (sendmp != NULL) {
1776320897Serj					sendmp->m_data += IXGBE_RX_COPY_ALIGN;
1777320897Serj					ixgbe_bcopy(mp->m_data, sendmp->m_data,
1778320897Serj					    len);
1779280182Sjfv					sendmp->m_len = len;
1780280182Sjfv					rxr->rx_copies++;
1781280182Sjfv					rbuf->flags |= IXGBE_RX_COPY;
1782280182Sjfv				}
1783280182Sjfv			}
1784280182Sjfv			if (sendmp == NULL) {
1785280182Sjfv				rbuf->buf = rbuf->fmp = NULL;
1786280182Sjfv				sendmp = mp;
1787280182Sjfv			}
1788280182Sjfv
1789280182Sjfv			/* first desc of a non-ps chain */
1790280182Sjfv			sendmp->m_flags |= M_PKTHDR;
1791280182Sjfv			sendmp->m_pkthdr.len = mp->m_len;
1792280182Sjfv		}
1793280182Sjfv		++processed;
1794280182Sjfv
1795280182Sjfv		/* Pass the head pointer on */
1796280182Sjfv		if (eop == 0) {
1797280182Sjfv			nbuf->fmp = sendmp;
1798280182Sjfv			sendmp = NULL;
1799280182Sjfv			mp->m_next = nbuf->buf;
1800280182Sjfv		} else { /* Sending this frame */
1801280182Sjfv			sendmp->m_pkthdr.rcvif = ifp;
1802280182Sjfv			rxr->rx_packets++;
1803280182Sjfv			/* capture data for AIM */
1804280182Sjfv			rxr->bytes += sendmp->m_pkthdr.len;
1805280182Sjfv			rxr->rx_bytes += sendmp->m_pkthdr.len;
1806280182Sjfv			/* Process vlan info */
1807320897Serj			if ((rxr->vtag_strip) && (staterr & IXGBE_RXD_STAT_VP))
1808280182Sjfv				vtag = le16toh(cur->wb.upper.vlan);
1809280182Sjfv			if (vtag) {
1810280182Sjfv				sendmp->m_pkthdr.ether_vtag = vtag;
1811280182Sjfv				sendmp->m_flags |= M_VLANTAG;
1812280182Sjfv			}
1813280182Sjfv			if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
1814280182Sjfv				ixgbe_rx_checksum(staterr, sendmp, ptype);
1815285528Shiren
1816320897Serj			/*
1817320897Serj			 * In case of multiqueue, we have RXCSUM.PCSD bit set
1818320897Serj			 * and never cleared. This means we have RSS hash
1819320897Serj			 * available to be used.
1820320897Serj			 */
1821320897Serj			if (adapter->num_queues > 1) {
1822320897Serj				sendmp->m_pkthdr.flowid =
1823320897Serj				    le32toh(cur->wb.lower.hi_dword.rss);
1824320897Serj				switch (pkt_info & IXGBE_RXDADV_RSSTYPE_MASK) {
1825320897Serj				case IXGBE_RXDADV_RSSTYPE_IPV4:
1826320897Serj					M_HASHTYPE_SET(sendmp,
1827320897Serj					    M_HASHTYPE_RSS_IPV4);
1828320897Serj					break;
1829320897Serj				case IXGBE_RXDADV_RSSTYPE_IPV4_TCP:
1830320897Serj					M_HASHTYPE_SET(sendmp,
1831320897Serj					    M_HASHTYPE_RSS_TCP_IPV4);
1832320897Serj					break;
1833320897Serj				case IXGBE_RXDADV_RSSTYPE_IPV6:
1834320897Serj					M_HASHTYPE_SET(sendmp,
1835320897Serj					    M_HASHTYPE_RSS_IPV6);
1836320897Serj					break;
1837320897Serj				case IXGBE_RXDADV_RSSTYPE_IPV6_TCP:
1838320897Serj					M_HASHTYPE_SET(sendmp,
1839320897Serj					    M_HASHTYPE_RSS_TCP_IPV6);
1840320897Serj					break;
1841320897Serj				case IXGBE_RXDADV_RSSTYPE_IPV6_EX:
1842320897Serj					M_HASHTYPE_SET(sendmp,
1843320897Serj					    M_HASHTYPE_RSS_IPV6_EX);
1844320897Serj					break;
1845320897Serj				case IXGBE_RXDADV_RSSTYPE_IPV6_TCP_EX:
1846320897Serj					M_HASHTYPE_SET(sendmp,
1847320897Serj					    M_HASHTYPE_RSS_TCP_IPV6_EX);
1848320897Serj					break;
1849292674Ssbruno#if __FreeBSD_version > 1100000
1850320897Serj				case IXGBE_RXDADV_RSSTYPE_IPV4_UDP:
1851320897Serj					M_HASHTYPE_SET(sendmp,
1852320897Serj					    M_HASHTYPE_RSS_UDP_IPV4);
1853320897Serj					break;
1854320897Serj				case IXGBE_RXDADV_RSSTYPE_IPV6_UDP:
1855320897Serj					M_HASHTYPE_SET(sendmp,
1856320897Serj					    M_HASHTYPE_RSS_UDP_IPV6);
1857320897Serj					break;
1858320897Serj				case IXGBE_RXDADV_RSSTYPE_IPV6_UDP_EX:
1859320897Serj					M_HASHTYPE_SET(sendmp,
1860320897Serj					    M_HASHTYPE_RSS_UDP_IPV6_EX);
1861320897Serj					break;
1862292674Ssbruno#endif
1863320897Serj				default:
1864320897Serj					M_HASHTYPE_SET(sendmp,
1865320897Serj					    M_HASHTYPE_OPAQUE_HASH);
1866320897Serj				}
1867320897Serj			} else {
1868320897Serj				sendmp->m_pkthdr.flowid = que->msix;
1869280182Sjfv				M_HASHTYPE_SET(sendmp, M_HASHTYPE_OPAQUE);
1870280182Sjfv			}
1871280182Sjfv		}
1872280182Sjfvnext_desc:
1873280182Sjfv		bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
1874280182Sjfv		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1875280182Sjfv
1876280182Sjfv		/* Advance our pointers to the next descriptor. */
1877280182Sjfv		if (++i == rxr->num_desc)
1878280182Sjfv			i = 0;
1879280182Sjfv
1880280182Sjfv		/* Now send to the stack or do LRO */
1881280182Sjfv		if (sendmp != NULL) {
1882280182Sjfv			rxr->next_to_check = i;
1883280182Sjfv			ixgbe_rx_input(rxr, ifp, sendmp, ptype);
1884280182Sjfv			i = rxr->next_to_check;
1885280182Sjfv		}
1886280182Sjfv
1887320897Serj		/* Every 8 descriptors we go to refresh mbufs */
1888280182Sjfv		if (processed == 8) {
1889280182Sjfv			ixgbe_refresh_mbufs(rxr, i);
1890280182Sjfv			processed = 0;
1891280182Sjfv		}
1892280182Sjfv	}
1893280182Sjfv
1894280182Sjfv	/* Refresh any remaining buf structs */
1895280182Sjfv	if (ixgbe_rx_unrefreshed(rxr))
1896280182Sjfv		ixgbe_refresh_mbufs(rxr, i);
1897280182Sjfv
1898280182Sjfv	rxr->next_to_check = i;
1899280182Sjfv
1900280182Sjfv	/*
1901280182Sjfv	 * Flush any outstanding LRO work
1902280182Sjfv	 */
1903297482Ssephe	tcp_lro_flush_all(lro);
1904280182Sjfv
1905280182Sjfv	IXGBE_RX_UNLOCK(rxr);
1906280182Sjfv
1907280182Sjfv	/*
1908320897Serj	 * Still have cleaning to do?
1909320897Serj	 */
1910280182Sjfv	if ((staterr & IXGBE_RXD_STAT_DD) != 0)
1911280182Sjfv		return (TRUE);
1912280182Sjfv
1913320897Serj	return (FALSE);
1914320897Serj} /* ixgbe_rxeof */
1915280182Sjfv
1916320897Serj
1917320897Serj/************************************************************************
1918320897Serj * ixgbe_rx_checksum
1919280182Sjfv *
1920320897Serj *   Verify that the hardware indicated that the checksum is valid.
1921320897Serj *   Inform the stack about the status of checksum so that stack
1922320897Serj *   doesn't spend time verifying the checksum.
1923320897Serj ************************************************************************/
1924280182Sjfvstatic void
1925280182Sjfvixgbe_rx_checksum(u32 staterr, struct mbuf * mp, u32 ptype)
1926280182Sjfv{
1927320897Serj	u16  status = (u16)staterr;
1928320897Serj	u8   errors = (u8)(staterr >> 24);
1929320897Serj	bool sctp = false;
1930280182Sjfv
1931280182Sjfv	if ((ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 &&
1932280182Sjfv	    (ptype & IXGBE_RXDADV_PKTTYPE_SCTP) != 0)
1933292674Ssbruno		sctp = true;
1934280182Sjfv
1935292674Ssbruno	/* IPv4 checksum */
1936280182Sjfv	if (status & IXGBE_RXD_STAT_IPCS) {
1937292674Ssbruno		mp->m_pkthdr.csum_flags |= CSUM_L3_CALC;
1938292674Ssbruno		/* IP Checksum Good */
1939292674Ssbruno		if (!(errors & IXGBE_RXD_ERR_IPE))
1940292674Ssbruno			mp->m_pkthdr.csum_flags |= CSUM_L3_VALID;
1941280182Sjfv	}
1942292674Ssbruno	/* TCP/UDP/SCTP checksum */
1943280182Sjfv	if (status & IXGBE_RXD_STAT_L4CS) {
1944292674Ssbruno		mp->m_pkthdr.csum_flags |= CSUM_L4_CALC;
1945280182Sjfv		if (!(errors & IXGBE_RXD_ERR_TCPE)) {
1946292674Ssbruno			mp->m_pkthdr.csum_flags |= CSUM_L4_VALID;
1947280182Sjfv			if (!sctp)
1948280182Sjfv				mp->m_pkthdr.csum_data = htons(0xffff);
1949292674Ssbruno		}
1950280182Sjfv	}
1951320897Serj} /* ixgbe_rx_checksum */
1952280182Sjfv
1953320897Serj/************************************************************************
1954320897Serj * ixgbe_dmamap_cb - Manage DMA'able memory.
1955320897Serj ************************************************************************/
1956280182Sjfvstatic void
1957280182Sjfvixgbe_dmamap_cb(void *arg, bus_dma_segment_t * segs, int nseg, int error)
1958280182Sjfv{
1959280182Sjfv	if (error)
1960280182Sjfv		return;
1961320897Serj	*(bus_addr_t *)arg = segs->ds_addr;
1962320897Serj
1963280182Sjfv	return;
1964320897Serj} /* ixgbe_dmamap_cb */
1965280182Sjfv
1966320897Serj/************************************************************************
1967320897Serj * ixgbe_dma_malloc
1968320897Serj ************************************************************************/
1969320897Serjstatic int
1970280182Sjfvixgbe_dma_malloc(struct adapter *adapter, bus_size_t size,
1971320897Serj                 struct ixgbe_dma_alloc *dma, int mapflags)
1972280182Sjfv{
1973280182Sjfv	device_t dev = adapter->dev;
1974320897Serj	int      r;
1975280182Sjfv
1976320897Serj	r = bus_dma_tag_create(
1977320897Serj	     /*      parent */ bus_get_dma_tag(adapter->dev),
1978320897Serj	     /*   alignment */ DBA_ALIGN,
1979320897Serj	     /*      bounds */ 0,
1980320897Serj	     /*     lowaddr */ BUS_SPACE_MAXADDR,
1981320897Serj	     /*    highaddr */ BUS_SPACE_MAXADDR,
1982320897Serj	     /*      filter */ NULL,
1983320897Serj	     /*   filterarg */ NULL,
1984320897Serj	     /*     maxsize */ size,
1985320897Serj	     /*   nsegments */ 1,
1986320897Serj	     /*  maxsegsize */ size,
1987320897Serj	     /*       flags */ BUS_DMA_ALLOCNOW,
1988320897Serj	     /*    lockfunc */ NULL,
1989320897Serj	     /* lockfuncarg */ NULL,
1990320897Serj	                       &dma->dma_tag);
1991280182Sjfv	if (r != 0) {
1992320897Serj		device_printf(dev,
1993320897Serj		    "ixgbe_dma_malloc: bus_dma_tag_create failed; error %u\n",
1994320897Serj		    r);
1995280182Sjfv		goto fail_0;
1996280182Sjfv	}
1997280182Sjfv	r = bus_dmamem_alloc(dma->dma_tag, (void **)&dma->dma_vaddr,
1998320897Serj	    BUS_DMA_NOWAIT, &dma->dma_map);
1999280182Sjfv	if (r != 0) {
2000320897Serj		device_printf(dev,
2001320897Serj		    "ixgbe_dma_malloc: bus_dmamem_alloc failed; error %u\n", r);
2002280182Sjfv		goto fail_1;
2003280182Sjfv	}
2004320897Serj	r = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr, size,
2005320897Serj	    ixgbe_dmamap_cb, &dma->dma_paddr, mapflags | BUS_DMA_NOWAIT);
2006280182Sjfv	if (r != 0) {
2007320897Serj		device_printf(dev,
2008320897Serj		    "ixgbe_dma_malloc: bus_dmamap_load failed; error %u\n", r);
2009280182Sjfv		goto fail_2;
2010280182Sjfv	}
2011280182Sjfv	dma->dma_size = size;
2012320897Serj
2013280182Sjfv	return (0);
2014280182Sjfvfail_2:
2015280182Sjfv	bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
2016280182Sjfvfail_1:
2017280182Sjfv	bus_dma_tag_destroy(dma->dma_tag);
2018280182Sjfvfail_0:
2019280182Sjfv	dma->dma_tag = NULL;
2020320897Serj
2021280182Sjfv	return (r);
2022320897Serj} /* ixgbe_dma_malloc */
2023280182Sjfv
2024320897Serj/************************************************************************
2025320897Serj * ixgbe_dma_free
2026320897Serj ************************************************************************/
2027320897Serjstatic void
2028280182Sjfvixgbe_dma_free(struct adapter *adapter, struct ixgbe_dma_alloc *dma)
2029280182Sjfv{
2030280182Sjfv	bus_dmamap_sync(dma->dma_tag, dma->dma_map,
2031280182Sjfv	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2032280182Sjfv	bus_dmamap_unload(dma->dma_tag, dma->dma_map);
2033280182Sjfv	bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
2034280182Sjfv	bus_dma_tag_destroy(dma->dma_tag);
2035320897Serj} /* ixgbe_dma_free */
2036280182Sjfv
2037280182Sjfv
2038320897Serj/************************************************************************
2039320897Serj * ixgbe_allocate_queues
2040280182Sjfv *
2041320897Serj *   Allocate memory for the transmit and receive rings, and then
2042320897Serj *   the descriptors associated with each, called only once at attach.
2043320897Serj ************************************************************************/
2044280182Sjfvint
2045280182Sjfvixgbe_allocate_queues(struct adapter *adapter)
2046280182Sjfv{
2047320897Serj	device_t        dev = adapter->dev;
2048320897Serj	struct ix_queue *que;
2049320897Serj	struct tx_ring  *txr;
2050320897Serj	struct rx_ring  *rxr;
2051320897Serj	int             rsize, tsize, error = IXGBE_SUCCESS;
2052320897Serj	int             txconf = 0, rxconf = 0;
2053280182Sjfv
2054320897Serj	/* First, allocate the top level queue structs */
2055320897Serj	adapter->queues = (struct ix_queue *)malloc(sizeof(struct ix_queue) *
2056320897Serj	    adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO);
2057320897Serj	if (adapter->queues == NULL) {
2058320897Serj		device_printf(dev, "Unable to allocate queue memory\n");
2059320897Serj		error = ENOMEM;
2060320897Serj		goto fail;
2061320897Serj	}
2062280182Sjfv
2063320897Serj	/* Second, allocate the TX ring struct memory */
2064320897Serj	adapter->tx_rings = (struct tx_ring *)malloc(sizeof(struct tx_ring) *
2065320897Serj	    adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO);
2066320897Serj	if (adapter->tx_rings == NULL) {
2067280182Sjfv		device_printf(dev, "Unable to allocate TX ring memory\n");
2068280182Sjfv		error = ENOMEM;
2069280182Sjfv		goto tx_fail;
2070280182Sjfv	}
2071280182Sjfv
2072320897Serj	/* Third, allocate the RX ring */
2073320897Serj	adapter->rx_rings = (struct rx_ring *)malloc(sizeof(struct rx_ring) *
2074320897Serj	    adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO);
2075320897Serj	if (adapter->rx_rings == NULL) {
2076280182Sjfv		device_printf(dev, "Unable to allocate RX ring memory\n");
2077280182Sjfv		error = ENOMEM;
2078280182Sjfv		goto rx_fail;
2079280182Sjfv	}
2080280182Sjfv
2081280182Sjfv	/* For the ring itself */
2082320897Serj	tsize = roundup2(adapter->num_tx_desc * sizeof(union ixgbe_adv_tx_desc),
2083320897Serj	    DBA_ALIGN);
2084280182Sjfv
2085280182Sjfv	/*
2086280182Sjfv	 * Now set up the TX queues, txconf is needed to handle the
2087280182Sjfv	 * possibility that things fail midcourse and we need to
2088280182Sjfv	 * undo memory gracefully
2089320897Serj	 */
2090280182Sjfv	for (int i = 0; i < adapter->num_queues; i++, txconf++) {
2091280182Sjfv		/* Set up some basics */
2092280182Sjfv		txr = &adapter->tx_rings[i];
2093280182Sjfv		txr->adapter = adapter;
2094320897Serj		txr->br = NULL;
2095320897Serj		/* In case SR-IOV is enabled, align the index properly */
2096320897Serj		txr->me = ixgbe_vf_que_index(adapter->iov_mode, adapter->pool,
2097320897Serj		    i);
2098280182Sjfv		txr->num_desc = adapter->num_tx_desc;
2099280182Sjfv
2100280182Sjfv		/* Initialize the TX side lock */
2101280182Sjfv		snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
2102280182Sjfv		    device_get_nameunit(dev), txr->me);
2103280182Sjfv		mtx_init(&txr->tx_mtx, txr->mtx_name, NULL, MTX_DEF);
2104280182Sjfv
2105320897Serj		if (ixgbe_dma_malloc(adapter, tsize, &txr->txdma,
2106320897Serj		    BUS_DMA_NOWAIT)) {
2107280182Sjfv			device_printf(dev,
2108280182Sjfv			    "Unable to allocate TX Descriptor memory\n");
2109280182Sjfv			error = ENOMEM;
2110280182Sjfv			goto err_tx_desc;
2111280182Sjfv		}
2112280182Sjfv		txr->tx_base = (union ixgbe_adv_tx_desc *)txr->txdma.dma_vaddr;
2113280182Sjfv		bzero((void *)txr->tx_base, tsize);
2114280182Sjfv
2115320897Serj		/* Now allocate transmit buffers for the ring */
2116320897Serj		if (ixgbe_allocate_transmit_buffers(txr)) {
2117280182Sjfv			device_printf(dev,
2118280182Sjfv			    "Critical Failure setting up transmit buffers\n");
2119280182Sjfv			error = ENOMEM;
2120280182Sjfv			goto err_tx_desc;
2121320897Serj		}
2122320897Serj		if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) {
2123320897Serj			/* Allocate a buf ring */
2124320897Serj			txr->br = buf_ring_alloc(IXGBE_BR_SIZE, M_DEVBUF,
2125320897Serj			    M_WAITOK, &txr->tx_mtx);
2126320897Serj			if (txr->br == NULL) {
2127320897Serj				device_printf(dev,
2128320897Serj				    "Critical Failure setting up buf ring\n");
2129320897Serj				error = ENOMEM;
2130320897Serj				goto err_tx_desc;
2131320897Serj			}
2132320897Serj		}
2133280182Sjfv	}
2134280182Sjfv
2135280182Sjfv	/*
2136280182Sjfv	 * Next the RX queues...
2137320897Serj	 */
2138320897Serj	rsize = roundup2(adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc),
2139320897Serj	    DBA_ALIGN);
2140280182Sjfv	for (int i = 0; i < adapter->num_queues; i++, rxconf++) {
2141280182Sjfv		rxr = &adapter->rx_rings[i];
2142280182Sjfv		/* Set up some basics */
2143280182Sjfv		rxr->adapter = adapter;
2144320897Serj		/* In case SR-IOV is enabled, align the index properly */
2145320897Serj		rxr->me = ixgbe_vf_que_index(adapter->iov_mode, adapter->pool,
2146320897Serj		    i);
2147280182Sjfv		rxr->num_desc = adapter->num_rx_desc;
2148280182Sjfv
2149280182Sjfv		/* Initialize the RX side lock */
2150280182Sjfv		snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
2151280182Sjfv		    device_get_nameunit(dev), rxr->me);
2152280182Sjfv		mtx_init(&rxr->rx_mtx, rxr->mtx_name, NULL, MTX_DEF);
2153280182Sjfv
2154320897Serj		if (ixgbe_dma_malloc(adapter, rsize, &rxr->rxdma,
2155320897Serj		    BUS_DMA_NOWAIT)) {
2156280182Sjfv			device_printf(dev,
2157280182Sjfv			    "Unable to allocate RxDescriptor memory\n");
2158280182Sjfv			error = ENOMEM;
2159280182Sjfv			goto err_rx_desc;
2160280182Sjfv		}
2161280182Sjfv		rxr->rx_base = (union ixgbe_adv_rx_desc *)rxr->rxdma.dma_vaddr;
2162280182Sjfv		bzero((void *)rxr->rx_base, rsize);
2163280182Sjfv
2164320897Serj		/* Allocate receive buffers for the ring */
2165280182Sjfv		if (ixgbe_allocate_receive_buffers(rxr)) {
2166280182Sjfv			device_printf(dev,
2167280182Sjfv			    "Critical Failure setting up receive buffers\n");
2168280182Sjfv			error = ENOMEM;
2169280182Sjfv			goto err_rx_desc;
2170280182Sjfv		}
2171280182Sjfv	}
2172280182Sjfv
2173280182Sjfv	/*
2174320897Serj	 * Finally set up the queue holding structs
2175320897Serj	 */
2176280182Sjfv	for (int i = 0; i < adapter->num_queues; i++) {
2177280182Sjfv		que = &adapter->queues[i];
2178280182Sjfv		que->adapter = adapter;
2179280182Sjfv		que->me = i;
2180280182Sjfv		que->txr = &adapter->tx_rings[i];
2181280182Sjfv		que->rxr = &adapter->rx_rings[i];
2182280182Sjfv	}
2183280182Sjfv
2184280182Sjfv	return (0);
2185280182Sjfv
2186280182Sjfverr_rx_desc:
2187280182Sjfv	for (rxr = adapter->rx_rings; rxconf > 0; rxr++, rxconf--)
2188280182Sjfv		ixgbe_dma_free(adapter, &rxr->rxdma);
2189280182Sjfverr_tx_desc:
2190280182Sjfv	for (txr = adapter->tx_rings; txconf > 0; txr++, txconf--)
2191280182Sjfv		ixgbe_dma_free(adapter, &txr->txdma);
2192280182Sjfv	free(adapter->rx_rings, M_DEVBUF);
2193280182Sjfvrx_fail:
2194280182Sjfv	free(adapter->tx_rings, M_DEVBUF);
2195280182Sjfvtx_fail:
2196280182Sjfv	free(adapter->queues, M_DEVBUF);
2197280182Sjfvfail:
2198280182Sjfv	return (error);
2199320897Serj} /* ixgbe_allocate_queues */
2200