1280182Sjfv/******************************************************************************
2280182Sjfv
3315333Serj  Copyright (c) 2001-2017, Intel Corporation
4280182Sjfv  All rights reserved.
5315333Serj
6315333Serj  Redistribution and use in source and binary forms, with or without
7280182Sjfv  modification, are permitted provided that the following conditions are met:
8315333Serj
9315333Serj   1. Redistributions of source code must retain the above copyright notice,
10280182Sjfv      this list of conditions and the following disclaimer.
11315333Serj
12315333Serj   2. Redistributions in binary form must reproduce the above copyright
13315333Serj      notice, this list of conditions and the following disclaimer in the
14280182Sjfv      documentation and/or other materials provided with the distribution.
15315333Serj
16315333Serj   3. Neither the name of the Intel Corporation nor the names of its
17315333Serj      contributors may be used to endorse or promote products derived from
18280182Sjfv      this software without specific prior written permission.
19315333Serj
20280182Sjfv  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21315333Serj  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22315333Serj  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23315333Serj  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24315333Serj  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25315333Serj  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26315333Serj  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27315333Serj  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28315333Serj  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29280182Sjfv  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30280182Sjfv  POSSIBILITY OF SUCH DAMAGE.
31280182Sjfv
32280182Sjfv******************************************************************************/
33280182Sjfv/*$FreeBSD: stable/10/sys/dev/ixgbe/ix_txrx.c 315333 2017-03-15 21:20:17Z erj $*/
34280182Sjfv
35280182Sjfv
36280182Sjfv#ifndef IXGBE_STANDALONE_BUILD
37280182Sjfv#include "opt_inet.h"
38280182Sjfv#include "opt_inet6.h"
39280182Sjfv#endif
40280182Sjfv
41280182Sjfv#include "ixgbe.h"
42280182Sjfv
43283620Serjextern int ix_crcstrip;
44280182Sjfv
45280182Sjfv/*
46315333Serj * HW RSC control:
47315333Serj *  this feature only works with
48315333Serj *  IPv4, and only on 82599 and later.
49315333Serj *  Also this will cause IP forwarding to
50315333Serj *  fail and that can't be controlled by
51315333Serj *  the stack as LRO can. For all these
52315333Serj *  reasons I've deemed it best to leave
53315333Serj *  this off and not bother with a tuneable
54315333Serj *  interface, this would need to be compiled
55315333Serj *  to enable.
56315333Serj */
57280182Sjfvstatic bool ixgbe_rsc_enable = FALSE;
58280182Sjfv
59280182Sjfv/*
60315333Serj * For Flow Director: this is the
61315333Serj * number of TX packets we sample
62315333Serj * for the filter pool, this means
63315333Serj * every 20th packet will be probed.
64315333Serj *
65315333Serj * This feature can be disabled by
66315333Serj * setting this to 0.
67315333Serj */
68280182Sjfvstatic int atr_sample_rate = 20;
69280182Sjfv
70315333Serj/************************************************************************
71280182Sjfv *  Local Function prototypes
72315333Serj ************************************************************************/
73315333Serjstatic void          ixgbe_setup_transmit_ring(struct tx_ring *);
74315333Serjstatic void          ixgbe_free_transmit_buffers(struct tx_ring *);
75315333Serjstatic int           ixgbe_setup_receive_ring(struct rx_ring *);
76315333Serjstatic void          ixgbe_free_receive_buffers(struct rx_ring *);
77315333Serjstatic void          ixgbe_rx_checksum(u32, struct mbuf *, u32);
78315333Serjstatic void          ixgbe_refresh_mbufs(struct rx_ring *, int);
79315333Serjstatic int           ixgbe_xmit(struct tx_ring *, struct mbuf **);
80315333Serjstatic int           ixgbe_tx_ctx_setup(struct tx_ring *,
81315333Serj                                        struct mbuf *, u32 *, u32 *);
82315333Serjstatic int           ixgbe_tso_setup(struct tx_ring *,
83315333Serj                                     struct mbuf *, u32 *, u32 *);
84280182Sjfvstatic __inline void ixgbe_rx_discard(struct rx_ring *, int);
85280182Sjfvstatic __inline void ixgbe_rx_input(struct rx_ring *, struct ifnet *,
86315333Serj                                    struct mbuf *, u32);
87315333Serjstatic int           ixgbe_dma_malloc(struct adapter *, bus_size_t,
88315333Serj                                      struct ixgbe_dma_alloc *, int);
89315333Serjstatic void          ixgbe_dma_free(struct adapter *, struct ixgbe_dma_alloc *);
90280182Sjfv
91315333SerjMALLOC_DECLARE(M_IXGBE);
92315333Serj
93315333Serj/************************************************************************
94315333Serj * ixgbe_legacy_start_locked - Transmit entry point
95280182Sjfv *
96315333Serj *   Called by the stack to initiate a transmit.
97315333Serj *   The driver will remain in this routine as long as there are
98315333Serj *   packets to transmit and transmit resources are available.
99315333Serj *   In case resources are not available, the stack is notified
100315333Serj *   and the packet is requeued.
101315333Serj ************************************************************************/
102315333Serjint
103315333Serjixgbe_legacy_start_locked(struct ifnet *ifp, struct tx_ring *txr)
104280182Sjfv{
105280182Sjfv	struct mbuf    *m_head;
106280182Sjfv	struct adapter *adapter = txr->adapter;
107280182Sjfv
108280182Sjfv	IXGBE_TX_LOCK_ASSERT(txr);
109280182Sjfv
110280182Sjfv	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
111315333Serj		return (ENETDOWN);
112280182Sjfv	if (!adapter->link_active)
113315333Serj		return (ENETDOWN);
114280182Sjfv
115280182Sjfv	while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
116280182Sjfv		if (txr->tx_avail <= IXGBE_QUEUE_MIN_FREE)
117280182Sjfv			break;
118280182Sjfv
119280182Sjfv		IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
120280182Sjfv		if (m_head == NULL)
121280182Sjfv			break;
122280182Sjfv
123280182Sjfv		if (ixgbe_xmit(txr, &m_head)) {
124280182Sjfv			if (m_head != NULL)
125280182Sjfv				IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
126280182Sjfv			break;
127280182Sjfv		}
128280182Sjfv		/* Send a copy of the frame to the BPF listener */
129280182Sjfv		ETHER_BPF_MTAP(ifp, m_head);
130280182Sjfv	}
131280182Sjfv
132315333Serj	return IXGBE_SUCCESS;
133315333Serj} /* ixgbe_legacy_start_locked */
134315333Serj
135315333Serj/************************************************************************
136315333Serj * ixgbe_legacy_start
137315333Serj *
138315333Serj *   Called by the stack, this always uses the first tx ring,
139315333Serj *   and should not be used with multiqueue tx enabled.
140315333Serj ************************************************************************/
141280182Sjfvvoid
142315333Serjixgbe_legacy_start(struct ifnet *ifp)
143280182Sjfv{
144280182Sjfv	struct adapter *adapter = ifp->if_softc;
145315333Serj	struct tx_ring *txr = adapter->tx_rings;
146280182Sjfv
147280182Sjfv	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
148280182Sjfv		IXGBE_TX_LOCK(txr);
149315333Serj		ixgbe_legacy_start_locked(ifp, txr);
150280182Sjfv		IXGBE_TX_UNLOCK(txr);
151280182Sjfv	}
152315333Serj} /* ixgbe_legacy_start */
153280182Sjfv
154315333Serj/************************************************************************
155315333Serj * ixgbe_mq_start - Multiqueue Transmit Entry Point
156315333Serj *
157315333Serj *   (if_transmit function)
158315333Serj ************************************************************************/
159280182Sjfvint
160280182Sjfvixgbe_mq_start(struct ifnet *ifp, struct mbuf *m)
161280182Sjfv{
162315333Serj	struct adapter  *adapter = ifp->if_softc;
163315333Serj	struct ix_queue *que;
164315333Serj	struct tx_ring  *txr;
165315333Serj	int             i, err = 0;
166315333Serj	uint32_t        bucket_id;
167280182Sjfv
168280182Sjfv	/*
169280182Sjfv	 * When doing RSS, map it to the same outbound queue
170280182Sjfv	 * as the incoming flow would be mapped to.
171280182Sjfv	 *
172280182Sjfv	 * If everything is setup correctly, it should be the
173280182Sjfv	 * same bucket that the current CPU we're on is.
174280182Sjfv	 */
175295008Ssbruno	if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) {
176315333Serj		if ((adapter->feat_en & IXGBE_FEATURE_RSS) &&
177315333Serj		    (rss_hash2bucket(m->m_pkthdr.flowid, M_HASHTYPE_GET(m),
178315333Serj		    &bucket_id) == 0)) {
179295008Ssbruno			i = bucket_id % adapter->num_queues;
180295524Ssbruno#ifdef IXGBE_DEBUG
181295524Ssbruno			if (bucket_id > adapter->num_queues)
182315333Serj				if_printf(ifp,
183315333Serj				    "bucket_id (%d) > num_queues (%d)\n",
184315333Serj				    bucket_id, adapter->num_queues);
185295008Ssbruno#endif
186315333Serj		} else
187295008Ssbruno			i = m->m_pkthdr.flowid % adapter->num_queues;
188295008Ssbruno	} else
189280182Sjfv		i = curcpu % adapter->num_queues;
190280182Sjfv
191280182Sjfv	/* Check for a hung queue and pick alternative */
192280182Sjfv	if (((1 << i) & adapter->active_queues) == 0)
193280182Sjfv		i = ffsl(adapter->active_queues);
194280182Sjfv
195280182Sjfv	txr = &adapter->tx_rings[i];
196280182Sjfv	que = &adapter->queues[i];
197280182Sjfv
198280182Sjfv	err = drbr_enqueue(ifp, txr->br, m);
199280182Sjfv	if (err)
200280182Sjfv		return (err);
201280182Sjfv	if (IXGBE_TX_TRYLOCK(txr)) {
202280182Sjfv		ixgbe_mq_start_locked(ifp, txr);
203280182Sjfv		IXGBE_TX_UNLOCK(txr);
204280182Sjfv	} else
205280182Sjfv		taskqueue_enqueue(que->tq, &txr->txq_task);
206280182Sjfv
207280182Sjfv	return (0);
208315333Serj} /* ixgbe_mq_start */
209280182Sjfv
210315333Serj/************************************************************************
211315333Serj * ixgbe_mq_start_locked
212315333Serj ************************************************************************/
213280182Sjfvint
214280182Sjfvixgbe_mq_start_locked(struct ifnet *ifp, struct tx_ring *txr)
215280182Sjfv{
216315333Serj	struct mbuf    *next;
217315333Serj	int            enqueued = 0, err = 0;
218280182Sjfv
219315333Serj	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
220280182Sjfv		return (ENETDOWN);
221315333Serj	if (!txr->adapter->link_active)
222315333Serj		return (ENETDOWN);
223280182Sjfv
224280182Sjfv	/* Process the queue */
225280182Sjfv#if __FreeBSD_version < 901504
226280182Sjfv	next = drbr_dequeue(ifp, txr->br);
227280182Sjfv	while (next != NULL) {
228315333Serj		err = ixgbe_xmit(txr, &next);
229315333Serj		if (err != 0) {
230280182Sjfv			if (next != NULL)
231280182Sjfv				err = drbr_enqueue(ifp, txr->br, next);
232280182Sjfv#else
233280182Sjfv	while ((next = drbr_peek(ifp, txr->br)) != NULL) {
234315333Serj		err = ixgbe_xmit(txr, &next);
235315333Serj		if (err != 0) {
236315333Serj			if (next == NULL)
237280182Sjfv				drbr_advance(ifp, txr->br);
238315333Serj			else
239280182Sjfv				drbr_putback(ifp, txr->br, next);
240280182Sjfv#endif
241280182Sjfv			break;
242280182Sjfv		}
243280182Sjfv#if __FreeBSD_version >= 901504
244280182Sjfv		drbr_advance(ifp, txr->br);
245280182Sjfv#endif
246280182Sjfv		enqueued++;
247280182Sjfv		/* Send a copy of the frame to the BPF listener */
248280182Sjfv		ETHER_BPF_MTAP(ifp, next);
249280182Sjfv		if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
250280182Sjfv			break;
251280182Sjfv#if __FreeBSD_version < 901504
252280182Sjfv		next = drbr_dequeue(ifp, txr->br);
253280182Sjfv#endif
254280182Sjfv	}
255280182Sjfv
256315333Serj	if (txr->tx_avail < IXGBE_TX_CLEANUP_THRESHOLD(txr->adapter))
257280182Sjfv		ixgbe_txeof(txr);
258280182Sjfv
259280182Sjfv	return (err);
260315333Serj} /* ixgbe_mq_start_locked */
261280182Sjfv
262315333Serj/************************************************************************
263315333Serj * ixgbe_deferred_mq_start
264315333Serj *
265315333Serj *   Called from a taskqueue to drain queued transmit packets.
266315333Serj ************************************************************************/
267280182Sjfvvoid
268280182Sjfvixgbe_deferred_mq_start(void *arg, int pending)
269280182Sjfv{
270280182Sjfv	struct tx_ring *txr = arg;
271280182Sjfv	struct adapter *adapter = txr->adapter;
272315333Serj	struct ifnet   *ifp = adapter->ifp;
273280182Sjfv
274280182Sjfv	IXGBE_TX_LOCK(txr);
275280182Sjfv	if (!drbr_empty(ifp, txr->br))
276280182Sjfv		ixgbe_mq_start_locked(ifp, txr);
277280182Sjfv	IXGBE_TX_UNLOCK(txr);
278315333Serj} /* ixgbe_deferred_mq_start */
279280182Sjfv
280315333Serj/************************************************************************
281315333Serj * ixgbe_qflush - Flush all ring buffers
282315333Serj ************************************************************************/
283280182Sjfvvoid
284280182Sjfvixgbe_qflush(struct ifnet *ifp)
285280182Sjfv{
286315333Serj	struct adapter *adapter = ifp->if_softc;
287315333Serj	struct tx_ring *txr = adapter->tx_rings;
288315333Serj	struct mbuf    *m;
289280182Sjfv
290280182Sjfv	for (int i = 0; i < adapter->num_queues; i++, txr++) {
291280182Sjfv		IXGBE_TX_LOCK(txr);
292280182Sjfv		while ((m = buf_ring_dequeue_sc(txr->br)) != NULL)
293280182Sjfv			m_freem(m);
294280182Sjfv		IXGBE_TX_UNLOCK(txr);
295280182Sjfv	}
296280182Sjfv	if_qflush(ifp);
297315333Serj} /* ixgbe_qflush */
298280182Sjfv
299280182Sjfv
300315333Serj/************************************************************************
301315333Serj * ixgbe_xmit
302280182Sjfv *
303315333Serj *   This routine maps the mbufs to tx descriptors, allowing the
304315333Serj *   TX engine to transmit the packets.
305280182Sjfv *
306315333Serj *   Return 0 on success, positive on failure
307315333Serj ************************************************************************/
308280182Sjfvstatic int
309280182Sjfvixgbe_xmit(struct tx_ring *txr, struct mbuf **m_headp)
310280182Sjfv{
311315333Serj	struct adapter          *adapter = txr->adapter;
312315333Serj	struct ixgbe_tx_buf     *txbuf;
313280182Sjfv	union ixgbe_adv_tx_desc *txd = NULL;
314315333Serj	struct mbuf             *m_head;
315315333Serj	int                     i, j, error, nsegs;
316315333Serj	int                     first;
317315333Serj	u32                     olinfo_status = 0, cmd_type_len;
318315333Serj	bool                    remap = TRUE;
319315333Serj	bus_dma_segment_t       segs[adapter->num_segs];
320315333Serj	bus_dmamap_t            map;
321280182Sjfv
322280182Sjfv	m_head = *m_headp;
323280182Sjfv
324280182Sjfv	/* Basic descriptor defines */
325315333Serj	cmd_type_len = (IXGBE_ADVTXD_DTYP_DATA |
326280182Sjfv	    IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT);
327280182Sjfv
328280182Sjfv	if (m_head->m_flags & M_VLANTAG)
329315333Serj		cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
330280182Sjfv
331315333Serj	/*
332315333Serj	 * Important to capture the first descriptor
333315333Serj	 * used because it will contain the index of
334315333Serj	 * the one we tell the hardware to report back
335315333Serj	 */
336315333Serj	first = txr->next_avail_desc;
337280182Sjfv	txbuf = &txr->tx_buffers[first];
338280182Sjfv	map = txbuf->map;
339280182Sjfv
340280182Sjfv	/*
341280182Sjfv	 * Map the packet for DMA.
342280182Sjfv	 */
343280182Sjfvretry:
344315333Serj	error = bus_dmamap_load_mbuf_sg(txr->txtag, map, *m_headp, segs,
345315333Serj	    &nsegs, BUS_DMA_NOWAIT);
346280182Sjfv
347280182Sjfv	if (__predict_false(error)) {
348280182Sjfv		struct mbuf *m;
349280182Sjfv
350280182Sjfv		switch (error) {
351280182Sjfv		case EFBIG:
352280182Sjfv			/* Try it again? - one try */
353280182Sjfv			if (remap == TRUE) {
354280182Sjfv				remap = FALSE;
355283620Serj				/*
356283620Serj				 * XXX: m_defrag will choke on
357283620Serj				 * non-MCLBYTES-sized clusters
358283620Serj				 */
359280182Sjfv				m = m_defrag(*m_headp, M_NOWAIT);
360280182Sjfv				if (m == NULL) {
361280182Sjfv					adapter->mbuf_defrag_failed++;
362280182Sjfv					m_freem(*m_headp);
363280182Sjfv					*m_headp = NULL;
364280182Sjfv					return (ENOBUFS);
365280182Sjfv				}
366280182Sjfv				*m_headp = m;
367280182Sjfv				goto retry;
368280182Sjfv			} else
369280182Sjfv				return (error);
370280182Sjfv		case ENOMEM:
371280182Sjfv			txr->no_tx_dma_setup++;
372280182Sjfv			return (error);
373280182Sjfv		default:
374280182Sjfv			txr->no_tx_dma_setup++;
375280182Sjfv			m_freem(*m_headp);
376280182Sjfv			*m_headp = NULL;
377280182Sjfv			return (error);
378280182Sjfv		}
379280182Sjfv	}
380280182Sjfv
381280182Sjfv	/* Make certain there are enough descriptors */
382299192Ssbruno	if (txr->tx_avail < (nsegs + 2)) {
383280182Sjfv		txr->no_desc_avail++;
384280182Sjfv		bus_dmamap_unload(txr->txtag, map);
385280182Sjfv		return (ENOBUFS);
386280182Sjfv	}
387280182Sjfv	m_head = *m_headp;
388280182Sjfv
389280182Sjfv	/*
390283620Serj	 * Set up the appropriate offload context
391283620Serj	 * this will consume the first descriptor
392283620Serj	 */
393280182Sjfv	error = ixgbe_tx_ctx_setup(txr, m_head, &cmd_type_len, &olinfo_status);
394280182Sjfv	if (__predict_false(error)) {
395280182Sjfv		if (error == ENOBUFS)
396280182Sjfv			*m_headp = NULL;
397280182Sjfv		return (error);
398280182Sjfv	}
399280182Sjfv
400280182Sjfv	/* Do the flow director magic */
401315333Serj	if ((adapter->feat_en & IXGBE_FEATURE_FDIR) &&
402315333Serj	    (txr->atr_sample) && (!adapter->fdir_reinit)) {
403280182Sjfv		++txr->atr_count;
404280182Sjfv		if (txr->atr_count >= atr_sample_rate) {
405280182Sjfv			ixgbe_atr(txr, m_head);
406280182Sjfv			txr->atr_count = 0;
407280182Sjfv		}
408280182Sjfv	}
409280182Sjfv
410295524Ssbruno	olinfo_status |= IXGBE_ADVTXD_CC;
411280182Sjfv	i = txr->next_avail_desc;
412280182Sjfv	for (j = 0; j < nsegs; j++) {
413280182Sjfv		bus_size_t seglen;
414280182Sjfv		bus_addr_t segaddr;
415280182Sjfv
416280182Sjfv		txbuf = &txr->tx_buffers[i];
417280182Sjfv		txd = &txr->tx_base[i];
418280182Sjfv		seglen = segs[j].ds_len;
419280182Sjfv		segaddr = htole64(segs[j].ds_addr);
420280182Sjfv
421280182Sjfv		txd->read.buffer_addr = segaddr;
422280182Sjfv		txd->read.cmd_type_len = htole32(txr->txd_cmd |
423315333Serj		    cmd_type_len | seglen);
424280182Sjfv		txd->read.olinfo_status = htole32(olinfo_status);
425280182Sjfv
426280182Sjfv		if (++i == txr->num_desc)
427280182Sjfv			i = 0;
428280182Sjfv	}
429280182Sjfv
430315333Serj	txd->read.cmd_type_len |= htole32(IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS);
431280182Sjfv	txr->tx_avail -= nsegs;
432280182Sjfv	txr->next_avail_desc = i;
433280182Sjfv
434280182Sjfv	txbuf->m_head = m_head;
435280182Sjfv	/*
436283620Serj	 * Here we swap the map so the last descriptor,
437283620Serj	 * which gets the completion interrupt has the
438283620Serj	 * real map, and the first descriptor gets the
439283620Serj	 * unused map from this descriptor.
440283620Serj	 */
441280182Sjfv	txr->tx_buffers[first].map = txbuf->map;
442280182Sjfv	txbuf->map = map;
443280182Sjfv	bus_dmamap_sync(txr->txtag, map, BUS_DMASYNC_PREWRITE);
444280182Sjfv
445315333Serj	/* Set the EOP descriptor that will be marked done */
446315333Serj	txbuf = &txr->tx_buffers[first];
447280182Sjfv	txbuf->eop = txd;
448280182Sjfv
449315333Serj	bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
450315333Serj	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
451280182Sjfv	/*
452280182Sjfv	 * Advance the Transmit Descriptor Tail (Tdt), this tells the
453280182Sjfv	 * hardware that this frame is available to transmit.
454280182Sjfv	 */
455280182Sjfv	++txr->total_packets;
456280182Sjfv	IXGBE_WRITE_REG(&adapter->hw, txr->tail, i);
457280182Sjfv
458280182Sjfv	/* Mark queue as having work */
459280182Sjfv	if (txr->busy == 0)
460280182Sjfv		txr->busy = 1;
461280182Sjfv
462280182Sjfv	return (0);
463315333Serj} /* ixgbe_xmit */
464280182Sjfv
465280182Sjfv
466315333Serj/************************************************************************
467315333Serj * ixgbe_allocate_transmit_buffers
468280182Sjfv *
469315333Serj *   Allocate memory for tx_buffer structures. The tx_buffer stores all
470315333Serj *   the information needed to transmit a packet on the wire. This is
471315333Serj *   called only once at attach, setup is done every reset.
472315333Serj ************************************************************************/
473315333Serjstatic int
474280182Sjfvixgbe_allocate_transmit_buffers(struct tx_ring *txr)
475280182Sjfv{
476315333Serj	struct adapter      *adapter = txr->adapter;
477315333Serj	device_t            dev = adapter->dev;
478280182Sjfv	struct ixgbe_tx_buf *txbuf;
479315333Serj	int                 error, i;
480280182Sjfv
481280182Sjfv	/*
482280182Sjfv	 * Setup DMA descriptor areas.
483280182Sjfv	 */
484315333Serj	error = bus_dma_tag_create(
485315333Serj	         /*      parent */ bus_get_dma_tag(adapter->dev),
486315333Serj	         /*   alignment */ 1,
487315333Serj	         /*      bounds */ 0,
488315333Serj	         /*     lowaddr */ BUS_SPACE_MAXADDR,
489315333Serj	         /*    highaddr */ BUS_SPACE_MAXADDR,
490315333Serj	         /*      filter */ NULL,
491315333Serj	         /*   filterarg */ NULL,
492315333Serj	         /*     maxsize */ IXGBE_TSO_SIZE,
493315333Serj	         /*   nsegments */ adapter->num_segs,
494315333Serj	         /*  maxsegsize */ PAGE_SIZE,
495315333Serj	         /*       flags */ 0,
496315333Serj	         /*    lockfunc */ NULL,
497315333Serj	         /* lockfuncarg */ NULL,
498315333Serj	                           &txr->txtag);
499315333Serj	if (error) {
500315333Serj		device_printf(dev, "Unable to allocate TX DMA tag\n");
501280182Sjfv		goto fail;
502280182Sjfv	}
503280182Sjfv
504315333Serj	txr->tx_buffers =
505315333Serj	    (struct ixgbe_tx_buf *)malloc(sizeof(struct ixgbe_tx_buf) *
506315333Serj	    adapter->num_tx_desc, M_IXGBE, M_NOWAIT | M_ZERO);
507315333Serj	if (!txr->tx_buffers) {
508280182Sjfv		device_printf(dev, "Unable to allocate tx_buffer memory\n");
509280182Sjfv		error = ENOMEM;
510280182Sjfv		goto fail;
511280182Sjfv	}
512280182Sjfv
513315333Serj	/* Create the descriptor buffer dma maps */
514280182Sjfv	txbuf = txr->tx_buffers;
515280182Sjfv	for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) {
516280182Sjfv		error = bus_dmamap_create(txr->txtag, 0, &txbuf->map);
517280182Sjfv		if (error != 0) {
518280182Sjfv			device_printf(dev, "Unable to create TX DMA map\n");
519280182Sjfv			goto fail;
520280182Sjfv		}
521280182Sjfv	}
522280182Sjfv
523280182Sjfv	return 0;
524280182Sjfvfail:
525280182Sjfv	/* We free all, it handles case where we are in the middle */
526280182Sjfv	ixgbe_free_transmit_structures(adapter);
527315333Serj
528280182Sjfv	return (error);
529315333Serj} /* ixgbe_allocate_transmit_buffers */
530280182Sjfv
531315333Serj/************************************************************************
532280182Sjfv *
533280182Sjfv *  Initialize a transmit ring.
534280182Sjfv *
535315333Serj ************************************************************************/
536280182Sjfvstatic void
537280182Sjfvixgbe_setup_transmit_ring(struct tx_ring *txr)
538280182Sjfv{
539315333Serj	struct adapter        *adapter = txr->adapter;
540315333Serj	struct ixgbe_tx_buf   *txbuf;
541280182Sjfv#ifdef DEV_NETMAP
542280182Sjfv	struct netmap_adapter *na = NA(adapter->ifp);
543315333Serj	struct netmap_slot    *slot;
544280182Sjfv#endif /* DEV_NETMAP */
545280182Sjfv
546280182Sjfv	/* Clear the old ring contents */
547280182Sjfv	IXGBE_TX_LOCK(txr);
548315333Serj
549280182Sjfv#ifdef DEV_NETMAP
550315333Serj	if (adapter->feat_en & IXGBE_FEATURE_NETMAP) {
551315333Serj		/*
552315333Serj		 * (under lock): if in netmap mode, do some consistency
553315333Serj		 * checks and set slot to entry 0 of the netmap ring.
554315333Serj		 */
555315333Serj		slot = netmap_reset(na, NR_TX, txr->me, 0);
556315333Serj	}
557280182Sjfv#endif /* DEV_NETMAP */
558315333Serj
559280182Sjfv	bzero((void *)txr->tx_base,
560315333Serj	    (sizeof(union ixgbe_adv_tx_desc)) * adapter->num_tx_desc);
561280182Sjfv	/* Reset indices */
562280182Sjfv	txr->next_avail_desc = 0;
563280182Sjfv	txr->next_to_clean = 0;
564280182Sjfv
565280182Sjfv	/* Free any existing tx buffers. */
566315333Serj	txbuf = txr->tx_buffers;
567295008Ssbruno	for (int i = 0; i < txr->num_desc; i++, txbuf++) {
568280182Sjfv		if (txbuf->m_head != NULL) {
569280182Sjfv			bus_dmamap_sync(txr->txtag, txbuf->map,
570280182Sjfv			    BUS_DMASYNC_POSTWRITE);
571280182Sjfv			bus_dmamap_unload(txr->txtag, txbuf->map);
572280182Sjfv			m_freem(txbuf->m_head);
573280182Sjfv			txbuf->m_head = NULL;
574280182Sjfv		}
575315333Serj
576280182Sjfv#ifdef DEV_NETMAP
577280182Sjfv		/*
578280182Sjfv		 * In netmap mode, set the map for the packet buffer.
579280182Sjfv		 * NOTE: Some drivers (not this one) also need to set
580280182Sjfv		 * the physical buffer address in the NIC ring.
581280182Sjfv		 * Slots in the netmap ring (indexed by "si") are
582280182Sjfv		 * kring->nkr_hwofs positions "ahead" wrt the
583280182Sjfv		 * corresponding slot in the NIC ring. In some drivers
584280182Sjfv		 * (not here) nkr_hwofs can be negative. Function
585280182Sjfv		 * netmap_idx_n2k() handles wraparounds properly.
586280182Sjfv		 */
587315333Serj		if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) && slot) {
588280182Sjfv			int si = netmap_idx_n2k(&na->tx_rings[txr->me], i);
589295008Ssbruno			netmap_load_map(na, txr->txtag,
590295008Ssbruno			    txbuf->map, NMB(na, slot + si));
591280182Sjfv		}
592280182Sjfv#endif /* DEV_NETMAP */
593315333Serj
594280182Sjfv		/* Clear the EOP descriptor pointer */
595280182Sjfv		txbuf->eop = NULL;
596315333Serj	}
597280182Sjfv
598280182Sjfv	/* Set the rate at which we sample packets */
599315333Serj	if (adapter->feat_en & IXGBE_FEATURE_FDIR)
600280182Sjfv		txr->atr_sample = atr_sample_rate;
601280182Sjfv
602280182Sjfv	/* Set number of descriptors available */
603280182Sjfv	txr->tx_avail = adapter->num_tx_desc;
604280182Sjfv
605280182Sjfv	bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
606280182Sjfv	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
607280182Sjfv	IXGBE_TX_UNLOCK(txr);
608315333Serj} /* ixgbe_setup_transmit_ring */
609280182Sjfv
610315333Serj/************************************************************************
611315333Serj * ixgbe_setup_transmit_structures - Initialize all transmit rings.
612315333Serj ************************************************************************/
613280182Sjfvint
614280182Sjfvixgbe_setup_transmit_structures(struct adapter *adapter)
615280182Sjfv{
616280182Sjfv	struct tx_ring *txr = adapter->tx_rings;
617280182Sjfv
618280182Sjfv	for (int i = 0; i < adapter->num_queues; i++, txr++)
619280182Sjfv		ixgbe_setup_transmit_ring(txr);
620280182Sjfv
621280182Sjfv	return (0);
622315333Serj} /* ixgbe_setup_transmit_structures */
623280182Sjfv
624315333Serj/************************************************************************
625315333Serj * ixgbe_free_transmit_structures - Free all transmit rings.
626315333Serj ************************************************************************/
627280182Sjfvvoid
628280182Sjfvixgbe_free_transmit_structures(struct adapter *adapter)
629280182Sjfv{
630280182Sjfv	struct tx_ring *txr = adapter->tx_rings;
631280182Sjfv
632280182Sjfv	for (int i = 0; i < adapter->num_queues; i++, txr++) {
633280182Sjfv		IXGBE_TX_LOCK(txr);
634280182Sjfv		ixgbe_free_transmit_buffers(txr);
635280182Sjfv		ixgbe_dma_free(adapter, &txr->txdma);
636280182Sjfv		IXGBE_TX_UNLOCK(txr);
637280182Sjfv		IXGBE_TX_LOCK_DESTROY(txr);
638280182Sjfv	}
639315333Serj	free(adapter->tx_rings, M_IXGBE);
640315333Serj} /* ixgbe_free_transmit_structures */
641280182Sjfv
642315333Serj/************************************************************************
643315333Serj * ixgbe_free_transmit_buffers
644280182Sjfv *
645315333Serj *   Free transmit ring related data structures.
646315333Serj ************************************************************************/
647280182Sjfvstatic void
648280182Sjfvixgbe_free_transmit_buffers(struct tx_ring *txr)
649280182Sjfv{
650315333Serj	struct adapter      *adapter = txr->adapter;
651280182Sjfv	struct ixgbe_tx_buf *tx_buffer;
652315333Serj	int                 i;
653280182Sjfv
654280182Sjfv	INIT_DEBUGOUT("ixgbe_free_transmit_ring: begin");
655280182Sjfv
656280182Sjfv	if (txr->tx_buffers == NULL)
657280182Sjfv		return;
658280182Sjfv
659280182Sjfv	tx_buffer = txr->tx_buffers;
660280182Sjfv	for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
661280182Sjfv		if (tx_buffer->m_head != NULL) {
662280182Sjfv			bus_dmamap_sync(txr->txtag, tx_buffer->map,
663280182Sjfv			    BUS_DMASYNC_POSTWRITE);
664315333Serj			bus_dmamap_unload(txr->txtag, tx_buffer->map);
665280182Sjfv			m_freem(tx_buffer->m_head);
666280182Sjfv			tx_buffer->m_head = NULL;
667280182Sjfv			if (tx_buffer->map != NULL) {
668315333Serj				bus_dmamap_destroy(txr->txtag, tx_buffer->map);
669280182Sjfv				tx_buffer->map = NULL;
670280182Sjfv			}
671280182Sjfv		} else if (tx_buffer->map != NULL) {
672315333Serj			bus_dmamap_unload(txr->txtag, tx_buffer->map);
673315333Serj			bus_dmamap_destroy(txr->txtag, tx_buffer->map);
674280182Sjfv			tx_buffer->map = NULL;
675280182Sjfv		}
676280182Sjfv	}
677280182Sjfv	if (txr->br != NULL)
678315333Serj		buf_ring_free(txr->br, M_IXGBE);
679280182Sjfv	if (txr->tx_buffers != NULL) {
680315333Serj		free(txr->tx_buffers, M_IXGBE);
681280182Sjfv		txr->tx_buffers = NULL;
682280182Sjfv	}
683280182Sjfv	if (txr->txtag != NULL) {
684280182Sjfv		bus_dma_tag_destroy(txr->txtag);
685280182Sjfv		txr->txtag = NULL;
686280182Sjfv	}
687315333Serj} /* ixgbe_free_transmit_buffers */
688280182Sjfv
689315333Serj/************************************************************************
690315333Serj * ixgbe_tx_ctx_setup
691280182Sjfv *
692315333Serj *   Advanced Context Descriptor setup for VLAN, CSUM or TSO
693315333Serj ************************************************************************/
694280182Sjfvstatic int
695280182Sjfvixgbe_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp,
696280182Sjfv    u32 *cmd_type_len, u32 *olinfo_status)
697280182Sjfv{
698280182Sjfv	struct ixgbe_adv_tx_context_desc *TXD;
699315333Serj	struct ether_vlan_header         *eh;
700295524Ssbruno#ifdef INET
701315333Serj	struct ip                        *ip;
702295524Ssbruno#endif
703295524Ssbruno#ifdef INET6
704315333Serj	struct ip6_hdr                   *ip6;
705295524Ssbruno#endif
706315333Serj	int                              ehdrlen, ip_hlen = 0;
707315333Serj	int                              offload = TRUE;
708315333Serj	int                              ctxd = txr->next_avail_desc;
709315333Serj	u32                              vlan_macip_lens = 0;
710315333Serj	u32                              type_tucmd_mlhl = 0;
711315333Serj	u16                              vtag = 0;
712315333Serj	u16                              etype;
713315333Serj	u8                               ipproto = 0;
714315333Serj	caddr_t                          l3d;
715280182Sjfv
716295524Ssbruno
717280182Sjfv	/* First check if TSO is to be used */
718315333Serj	if (mp->m_pkthdr.csum_flags & (CSUM_IP_TSO | CSUM_IP6_TSO))
719280182Sjfv		return (ixgbe_tso_setup(txr, mp, cmd_type_len, olinfo_status));
720280182Sjfv
721280182Sjfv	if ((mp->m_pkthdr.csum_flags & CSUM_OFFLOAD) == 0)
722280182Sjfv		offload = FALSE;
723280182Sjfv
724280182Sjfv	/* Indicate the whole packet as payload when not doing TSO */
725315333Serj	*olinfo_status |= mp->m_pkthdr.len << IXGBE_ADVTXD_PAYLEN_SHIFT;
726280182Sjfv
727280182Sjfv	/* Now ready a context descriptor */
728315333Serj	TXD = (struct ixgbe_adv_tx_context_desc *)&txr->tx_base[ctxd];
729280182Sjfv
730280182Sjfv	/*
731315333Serj	 * In advanced descriptors the vlan tag must
732315333Serj	 * be placed into the context descriptor. Hence
733315333Serj	 * we need to make one even if not doing offloads.
734315333Serj	 */
735280182Sjfv	if (mp->m_flags & M_VLANTAG) {
736280182Sjfv		vtag = htole16(mp->m_pkthdr.ether_vtag);
737280182Sjfv		vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
738315333Serj	} else if (!IXGBE_IS_X550VF(txr->adapter) && (offload == FALSE))
739283620Serj		return (0);
740280182Sjfv
741280182Sjfv	/*
742280182Sjfv	 * Determine where frame payload starts.
743280182Sjfv	 * Jump over vlan headers if already present,
744280182Sjfv	 * helpful for QinQ too.
745280182Sjfv	 */
746280182Sjfv	eh = mtod(mp, struct ether_vlan_header *);
747280182Sjfv	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
748280182Sjfv		etype = ntohs(eh->evl_proto);
749280182Sjfv		ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
750280182Sjfv	} else {
751280182Sjfv		etype = ntohs(eh->evl_encap_proto);
752280182Sjfv		ehdrlen = ETHER_HDR_LEN;
753280182Sjfv	}
754280182Sjfv
755280182Sjfv	/* Set the ether header length */
756280182Sjfv	vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
757280182Sjfv
758280182Sjfv	if (offload == FALSE)
759280182Sjfv		goto no_offloads;
760280182Sjfv
761295524Ssbruno	/*
762315333Serj	 * If the first mbuf only includes the ethernet header,
763315333Serj	 * jump to the next one
764315333Serj	 * XXX: This assumes the stack splits mbufs containing headers
765315333Serj	 *      on header boundaries
766295524Ssbruno	 * XXX: And assumes the entire IP header is contained in one mbuf
767295524Ssbruno	 */
768295524Ssbruno	if (mp->m_len == ehdrlen && mp->m_next)
769295524Ssbruno		l3d = mtod(mp->m_next, caddr_t);
770295524Ssbruno	else
771295524Ssbruno		l3d = mtod(mp, caddr_t) + ehdrlen;
772295524Ssbruno
773280182Sjfv	switch (etype) {
774295524Ssbruno#ifdef INET
775280182Sjfv		case ETHERTYPE_IP:
776295524Ssbruno			ip = (struct ip *)(l3d);
777280182Sjfv			ip_hlen = ip->ip_hl << 2;
778280182Sjfv			ipproto = ip->ip_p;
779280182Sjfv			type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
780295524Ssbruno			/* Insert IPv4 checksum into data descriptors */
781295524Ssbruno			if (mp->m_pkthdr.csum_flags & CSUM_IP) {
782295524Ssbruno				ip->ip_sum = 0;
783295524Ssbruno				*olinfo_status |= IXGBE_TXD_POPTS_IXSM << 8;
784295524Ssbruno			}
785280182Sjfv			break;
786295524Ssbruno#endif
787295524Ssbruno#ifdef INET6
788280182Sjfv		case ETHERTYPE_IPV6:
789295524Ssbruno			ip6 = (struct ip6_hdr *)(l3d);
790280182Sjfv			ip_hlen = sizeof(struct ip6_hdr);
791280182Sjfv			ipproto = ip6->ip6_nxt;
792280182Sjfv			type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6;
793280182Sjfv			break;
794295524Ssbruno#endif
795280182Sjfv		default:
796280182Sjfv			offload = FALSE;
797280182Sjfv			break;
798280182Sjfv	}
799280182Sjfv
800280182Sjfv	vlan_macip_lens |= ip_hlen;
801280182Sjfv
802295524Ssbruno	/* No support for offloads for non-L4 next headers */
803280182Sjfv	switch (ipproto) {
804280182Sjfv		case IPPROTO_TCP:
805315333Serj			if (mp->m_pkthdr.csum_flags &
806315333Serj			    (CSUM_IP_TCP | CSUM_IP6_TCP))
807280182Sjfv				type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
808295524Ssbruno			else
809295524Ssbruno				offload = false;
810280182Sjfv			break;
811280182Sjfv		case IPPROTO_UDP:
812315333Serj			if (mp->m_pkthdr.csum_flags &
813315333Serj			    (CSUM_IP_UDP | CSUM_IP6_UDP))
814280182Sjfv				type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP;
815295524Ssbruno			else
816295524Ssbruno				offload = false;
817280182Sjfv			break;
818280182Sjfv		case IPPROTO_SCTP:
819315333Serj			if (mp->m_pkthdr.csum_flags &
820315333Serj			    (CSUM_IP_SCTP | CSUM_IP6_SCTP))
821280182Sjfv				type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
822295524Ssbruno			else
823295524Ssbruno				offload = false;
824280182Sjfv			break;
825280182Sjfv		default:
826295524Ssbruno			offload = false;
827280182Sjfv			break;
828280182Sjfv	}
829280182Sjfv
830295524Ssbruno	if (offload) /* Insert L4 checksum into data descriptors */
831280182Sjfv		*olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
832280182Sjfv
833280182Sjfvno_offloads:
834280182Sjfv	type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
835280182Sjfv
836280182Sjfv	/* Now copy bits into descriptor */
837280182Sjfv	TXD->vlan_macip_lens = htole32(vlan_macip_lens);
838280182Sjfv	TXD->type_tucmd_mlhl = htole32(type_tucmd_mlhl);
839280182Sjfv	TXD->seqnum_seed = htole32(0);
840280182Sjfv	TXD->mss_l4len_idx = htole32(0);
841280182Sjfv
842280182Sjfv	/* We've consumed the first desc, adjust counters */
843280182Sjfv	if (++ctxd == txr->num_desc)
844280182Sjfv		ctxd = 0;
845280182Sjfv	txr->next_avail_desc = ctxd;
846280182Sjfv	--txr->tx_avail;
847280182Sjfv
848315333Serj	return (0);
849315333Serj} /* ixgbe_tx_ctx_setup */
850280182Sjfv
851315333Serj/************************************************************************
852315333Serj * ixgbe_tso_setup
853280182Sjfv *
854315333Serj *   Setup work for hardware segmentation offload (TSO) on
855315333Serj *   adapters using advanced tx descriptors
856315333Serj ************************************************************************/
857280182Sjfvstatic int
858315333Serjixgbe_tso_setup(struct tx_ring *txr, struct mbuf *mp, u32 *cmd_type_len,
859315333Serj    u32 *olinfo_status)
860280182Sjfv{
861280182Sjfv	struct ixgbe_adv_tx_context_desc *TXD;
862315333Serj	struct ether_vlan_header         *eh;
863280182Sjfv#ifdef INET6
864315333Serj	struct ip6_hdr                   *ip6;
865280182Sjfv#endif
866280182Sjfv#ifdef INET
867315333Serj	struct ip                        *ip;
868280182Sjfv#endif
869315333Serj	struct tcphdr                    *th;
870315333Serj	int                              ctxd, ehdrlen, ip_hlen, tcp_hlen;
871315333Serj	u32                              vlan_macip_lens = 0;
872315333Serj	u32                              type_tucmd_mlhl = 0;
873315333Serj	u32                              mss_l4len_idx = 0, paylen;
874315333Serj	u16                              vtag = 0, eh_type;
875280182Sjfv
876280182Sjfv	/*
877280182Sjfv	 * Determine where frame payload starts.
878280182Sjfv	 * Jump over vlan headers if already present
879280182Sjfv	 */
880280182Sjfv	eh = mtod(mp, struct ether_vlan_header *);
881280182Sjfv	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
882280182Sjfv		ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
883280182Sjfv		eh_type = eh->evl_proto;
884280182Sjfv	} else {
885280182Sjfv		ehdrlen = ETHER_HDR_LEN;
886280182Sjfv		eh_type = eh->evl_encap_proto;
887280182Sjfv	}
888280182Sjfv
889280182Sjfv	switch (ntohs(eh_type)) {
890280182Sjfv#ifdef INET
891280182Sjfv	case ETHERTYPE_IP:
892280182Sjfv		ip = (struct ip *)(mp->m_data + ehdrlen);
893280182Sjfv		if (ip->ip_p != IPPROTO_TCP)
894280182Sjfv			return (ENXIO);
895280182Sjfv		ip->ip_sum = 0;
896280182Sjfv		ip_hlen = ip->ip_hl << 2;
897280182Sjfv		th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
898280182Sjfv		th->th_sum = in_pseudo(ip->ip_src.s_addr,
899280182Sjfv		    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
900280182Sjfv		type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
901280182Sjfv		/* Tell transmit desc to also do IPv4 checksum. */
902280182Sjfv		*olinfo_status |= IXGBE_TXD_POPTS_IXSM << 8;
903280182Sjfv		break;
904280182Sjfv#endif
905315333Serj#ifdef INET6
906315333Serj	case ETHERTYPE_IPV6:
907315333Serj		ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
908315333Serj		/* XXX-BZ For now we do not pretend to support ext. hdrs. */
909315333Serj		if (ip6->ip6_nxt != IPPROTO_TCP)
910315333Serj			return (ENXIO);
911315333Serj		ip_hlen = sizeof(struct ip6_hdr);
912315333Serj		th = (struct tcphdr *)((caddr_t)ip6 + ip_hlen);
913315333Serj		th->th_sum = in6_cksum_pseudo(ip6, 0, IPPROTO_TCP, 0);
914315333Serj		type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6;
915315333Serj		break;
916315333Serj#endif
917280182Sjfv	default:
918280182Sjfv		panic("%s: CSUM_TSO but no supported IP version (0x%04x)",
919280182Sjfv		    __func__, ntohs(eh_type));
920280182Sjfv		break;
921280182Sjfv	}
922280182Sjfv
923280182Sjfv	ctxd = txr->next_avail_desc;
924315333Serj	TXD = (struct ixgbe_adv_tx_context_desc *)&txr->tx_base[ctxd];
925280182Sjfv
926280182Sjfv	tcp_hlen = th->th_off << 2;
927280182Sjfv
928280182Sjfv	/* This is used in the transmit desc in encap */
929280182Sjfv	paylen = mp->m_pkthdr.len - ehdrlen - ip_hlen - tcp_hlen;
930280182Sjfv
931280182Sjfv	/* VLAN MACLEN IPLEN */
932280182Sjfv	if (mp->m_flags & M_VLANTAG) {
933280182Sjfv		vtag = htole16(mp->m_pkthdr.ether_vtag);
934315333Serj		vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
935280182Sjfv	}
936280182Sjfv
937280182Sjfv	vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
938280182Sjfv	vlan_macip_lens |= ip_hlen;
939280182Sjfv	TXD->vlan_macip_lens = htole32(vlan_macip_lens);
940280182Sjfv
941280182Sjfv	/* ADV DTYPE TUCMD */
942280182Sjfv	type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
943280182Sjfv	type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
944280182Sjfv	TXD->type_tucmd_mlhl = htole32(type_tucmd_mlhl);
945280182Sjfv
946280182Sjfv	/* MSS L4LEN IDX */
947280182Sjfv	mss_l4len_idx |= (mp->m_pkthdr.tso_segsz << IXGBE_ADVTXD_MSS_SHIFT);
948280182Sjfv	mss_l4len_idx |= (tcp_hlen << IXGBE_ADVTXD_L4LEN_SHIFT);
949280182Sjfv	TXD->mss_l4len_idx = htole32(mss_l4len_idx);
950280182Sjfv
951280182Sjfv	TXD->seqnum_seed = htole32(0);
952280182Sjfv
953280182Sjfv	if (++ctxd == txr->num_desc)
954280182Sjfv		ctxd = 0;
955280182Sjfv
956280182Sjfv	txr->tx_avail--;
957280182Sjfv	txr->next_avail_desc = ctxd;
958280182Sjfv	*cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
959280182Sjfv	*olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
960280182Sjfv	*olinfo_status |= paylen << IXGBE_ADVTXD_PAYLEN_SHIFT;
961280182Sjfv	++txr->tso_tx;
962315333Serj
963280182Sjfv	return (0);
964315333Serj} /* ixgbe_tso_setup */
965280182Sjfv
966280182Sjfv
967315333Serj/************************************************************************
968315333Serj * ixgbe_txeof
969280182Sjfv *
970315333Serj *   Examine each tx_buffer in the used queue. If the hardware is done
971315333Serj *   processing the packet then free associated resources. The
972315333Serj *   tx_buffer is put back on the free queue.
973315333Serj ************************************************************************/
974280182Sjfvvoid
975280182Sjfvixgbe_txeof(struct tx_ring *txr)
976280182Sjfv{
977315333Serj	struct adapter          *adapter = txr->adapter;
978315333Serj	struct ixgbe_tx_buf     *buf;
979280182Sjfv	union ixgbe_adv_tx_desc *txd;
980315333Serj	u32                     work, processed = 0;
981315333Serj	u32                     limit = adapter->tx_process_limit;
982280182Sjfv
983280182Sjfv	mtx_assert(&txr->tx_mtx, MA_OWNED);
984280182Sjfv
985280182Sjfv#ifdef DEV_NETMAP
986315333Serj	if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
987315333Serj	    (adapter->ifp->if_capenable & IFCAP_NETMAP)) {
988315333Serj		struct netmap_adapter *na = NA(adapter->ifp);
989280182Sjfv		struct netmap_kring *kring = &na->tx_rings[txr->me];
990280182Sjfv		txd = txr->tx_base;
991280182Sjfv		bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
992280182Sjfv		    BUS_DMASYNC_POSTREAD);
993280182Sjfv		/*
994280182Sjfv		 * In netmap mode, all the work is done in the context
995280182Sjfv		 * of the client thread. Interrupt handlers only wake up
996280182Sjfv		 * clients, which may be sleeping on individual rings
997280182Sjfv		 * or on a global resource for all rings.
998280182Sjfv		 * To implement tx interrupt mitigation, we wake up the client
999280182Sjfv		 * thread roughly every half ring, even if the NIC interrupts
1000280182Sjfv		 * more frequently. This is implemented as follows:
1001280182Sjfv		 * - ixgbe_txsync() sets kring->nr_kflags with the index of
1002280182Sjfv		 *   the slot that should wake up the thread (nkr_num_slots
1003280182Sjfv		 *   means the user thread should not be woken up);
1004280182Sjfv		 * - the driver ignores tx interrupts unless netmap_mitigate=0
1005280182Sjfv		 *   or the slot has the DD bit set.
1006280182Sjfv		 */
1007280182Sjfv		if (!netmap_mitigate ||
1008280182Sjfv		    (kring->nr_kflags < kring->nkr_num_slots &&
1009315333Serj		     txd[kring->nr_kflags].wb.status & IXGBE_TXD_STAT_DD)) {
1010315333Serj			netmap_tx_irq(adapter->ifp, txr->me);
1011280182Sjfv		}
1012280182Sjfv		return;
1013280182Sjfv	}
1014280182Sjfv#endif /* DEV_NETMAP */
1015280182Sjfv
1016280182Sjfv	if (txr->tx_avail == txr->num_desc) {
1017280182Sjfv		txr->busy = 0;
1018280182Sjfv		return;
1019280182Sjfv	}
1020280182Sjfv
1021280182Sjfv	/* Get work starting point */
1022280182Sjfv	work = txr->next_to_clean;
1023280182Sjfv	buf = &txr->tx_buffers[work];
1024280182Sjfv	txd = &txr->tx_base[work];
1025280182Sjfv	work -= txr->num_desc; /* The distance to ring end */
1026315333Serj	bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
1027315333Serj	    BUS_DMASYNC_POSTREAD);
1028280182Sjfv
1029280182Sjfv	do {
1030295524Ssbruno		union ixgbe_adv_tx_desc *eop = buf->eop;
1031280182Sjfv		if (eop == NULL) /* No work */
1032280182Sjfv			break;
1033280182Sjfv
1034280182Sjfv		if ((eop->wb.status & IXGBE_TXD_STAT_DD) == 0)
1035280182Sjfv			break;	/* I/O not complete */
1036280182Sjfv
1037280182Sjfv		if (buf->m_head) {
1038315333Serj			txr->bytes += buf->m_head->m_pkthdr.len;
1039315333Serj			bus_dmamap_sync(txr->txtag, buf->map,
1040280182Sjfv			    BUS_DMASYNC_POSTWRITE);
1041315333Serj			bus_dmamap_unload(txr->txtag, buf->map);
1042280182Sjfv			m_freem(buf->m_head);
1043280182Sjfv			buf->m_head = NULL;
1044280182Sjfv		}
1045280182Sjfv		buf->eop = NULL;
1046280182Sjfv		++txr->tx_avail;
1047280182Sjfv
1048280182Sjfv		/* We clean the range if multi segment */
1049280182Sjfv		while (txd != eop) {
1050280182Sjfv			++txd;
1051280182Sjfv			++buf;
1052280182Sjfv			++work;
1053280182Sjfv			/* wrap the ring? */
1054280182Sjfv			if (__predict_false(!work)) {
1055280182Sjfv				work -= txr->num_desc;
1056280182Sjfv				buf = txr->tx_buffers;
1057280182Sjfv				txd = txr->tx_base;
1058280182Sjfv			}
1059280182Sjfv			if (buf->m_head) {
1060315333Serj				txr->bytes += buf->m_head->m_pkthdr.len;
1061315333Serj				bus_dmamap_sync(txr->txtag, buf->map,
1062280182Sjfv				    BUS_DMASYNC_POSTWRITE);
1063315333Serj				bus_dmamap_unload(txr->txtag, buf->map);
1064280182Sjfv				m_freem(buf->m_head);
1065280182Sjfv				buf->m_head = NULL;
1066280182Sjfv			}
1067280182Sjfv			++txr->tx_avail;
1068280182Sjfv			buf->eop = NULL;
1069280182Sjfv
1070280182Sjfv		}
1071280182Sjfv		++txr->packets;
1072280182Sjfv		++processed;
1073280182Sjfv
1074280182Sjfv		/* Try the next packet */
1075280182Sjfv		++txd;
1076280182Sjfv		++buf;
1077280182Sjfv		++work;
1078280182Sjfv		/* reset with a wrap */
1079280182Sjfv		if (__predict_false(!work)) {
1080280182Sjfv			work -= txr->num_desc;
1081280182Sjfv			buf = txr->tx_buffers;
1082280182Sjfv			txd = txr->tx_base;
1083280182Sjfv		}
1084280182Sjfv		prefetch(txd);
1085280182Sjfv	} while (__predict_true(--limit));
1086280182Sjfv
1087280182Sjfv	bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
1088280182Sjfv	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1089280182Sjfv
1090280182Sjfv	work += txr->num_desc;
1091280182Sjfv	txr->next_to_clean = work;
1092280182Sjfv
1093280182Sjfv	/*
1094315333Serj	 * Queue Hang detection, we know there's
1095315333Serj	 * work outstanding or the first return
1096315333Serj	 * would have been taken, so increment busy
1097315333Serj	 * if nothing managed to get cleaned, then
1098315333Serj	 * in local_timer it will be checked and
1099315333Serj	 * marked as HUNG if it exceeds a MAX attempt.
1100315333Serj	 */
1101280182Sjfv	if ((processed == 0) && (txr->busy != IXGBE_QUEUE_HUNG))
1102280182Sjfv		++txr->busy;
1103280182Sjfv	/*
1104315333Serj	 * If anything gets cleaned we reset state to 1,
1105315333Serj	 * note this will turn off HUNG if its set.
1106315333Serj	 */
1107280182Sjfv	if (processed)
1108280182Sjfv		txr->busy = 1;
1109280182Sjfv
1110280182Sjfv	if (txr->tx_avail == txr->num_desc)
1111280182Sjfv		txr->busy = 0;
1112280182Sjfv
1113280182Sjfv	return;
1114315333Serj} /* ixgbe_txeof */
1115280182Sjfv
1116315333Serj/************************************************************************
1117315333Serj * ixgbe_rsc_count
1118315333Serj *
1119315333Serj *   Used to detect a descriptor that has been merged by Hardware RSC.
1120315333Serj ************************************************************************/
1121280182Sjfvstatic inline u32
1122280182Sjfvixgbe_rsc_count(union ixgbe_adv_rx_desc *rx)
1123280182Sjfv{
1124280182Sjfv	return (le32toh(rx->wb.lower.lo_dword.data) &
1125280182Sjfv	    IXGBE_RXDADV_RSCCNT_MASK) >> IXGBE_RXDADV_RSCCNT_SHIFT;
1126315333Serj} /* ixgbe_rsc_count */
1127280182Sjfv
1128315333Serj/************************************************************************
1129315333Serj * ixgbe_setup_hw_rsc
1130280182Sjfv *
1131315333Serj *   Initialize Hardware RSC (LRO) feature on 82599
1132315333Serj *   for an RX ring, this is toggled by the LRO capability
1133315333Serj *   even though it is transparent to the stack.
1134280182Sjfv *
1135315333Serj *   NOTE: Since this HW feature only works with IPv4 and
1136315333Serj *         testing has shown soft LRO to be as effective,
1137315333Serj *         this feature will be disabled by default.
1138315333Serj ************************************************************************/
1139280182Sjfvstatic void
1140280182Sjfvixgbe_setup_hw_rsc(struct rx_ring *rxr)
1141280182Sjfv{
1142315333Serj	struct adapter  *adapter = rxr->adapter;
1143315333Serj	struct ixgbe_hw *hw = &adapter->hw;
1144315333Serj	u32             rscctrl, rdrxctl;
1145280182Sjfv
1146280182Sjfv	/* If turning LRO/RSC off we need to disable it */
1147280182Sjfv	if ((adapter->ifp->if_capenable & IFCAP_LRO) == 0) {
1148280182Sjfv		rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(rxr->me));
1149280182Sjfv		rscctrl &= ~IXGBE_RSCCTL_RSCEN;
1150280182Sjfv		return;
1151280182Sjfv	}
1152280182Sjfv
1153280182Sjfv	rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
1154280182Sjfv	rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
1155315333Serj#ifdef DEV_NETMAP
1156315333Serj	/* Always strip CRC unless Netmap disabled it */
1157315333Serj	if (!(adapter->feat_en & IXGBE_FEATURE_NETMAP) ||
1158315333Serj	    !(adapter->ifp->if_capenable & IFCAP_NETMAP) ||
1159315333Serj	    ix_crcstrip)
1160280182Sjfv#endif /* DEV_NETMAP */
1161315333Serj		rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
1162280182Sjfv	rdrxctl |= IXGBE_RDRXCTL_RSCACKC;
1163280182Sjfv	IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
1164280182Sjfv
1165280182Sjfv	rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(rxr->me));
1166280182Sjfv	rscctrl |= IXGBE_RSCCTL_RSCEN;
1167280182Sjfv	/*
1168315333Serj	 * Limit the total number of descriptors that
1169315333Serj	 * can be combined, so it does not exceed 64K
1170315333Serj	 */
1171280182Sjfv	if (rxr->mbuf_sz == MCLBYTES)
1172280182Sjfv		rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
1173280182Sjfv	else if (rxr->mbuf_sz == MJUMPAGESIZE)
1174280182Sjfv		rscctrl |= IXGBE_RSCCTL_MAXDESC_8;
1175280182Sjfv	else if (rxr->mbuf_sz == MJUM9BYTES)
1176280182Sjfv		rscctrl |= IXGBE_RSCCTL_MAXDESC_4;
1177280182Sjfv	else  /* Using 16K cluster */
1178280182Sjfv		rscctrl |= IXGBE_RSCCTL_MAXDESC_1;
1179280182Sjfv
1180280182Sjfv	IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(rxr->me), rscctrl);
1181280182Sjfv
1182280182Sjfv	/* Enable TCP header recognition */
1183280182Sjfv	IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0),
1184315333Serj	    (IXGBE_READ_REG(hw, IXGBE_PSRTYPE(0)) | IXGBE_PSRTYPE_TCPHDR));
1185280182Sjfv
1186280182Sjfv	/* Disable RSC for ACK packets */
1187280182Sjfv	IXGBE_WRITE_REG(hw, IXGBE_RSCDBU,
1188280182Sjfv	    (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU)));
1189280182Sjfv
1190280182Sjfv	rxr->hw_rsc = TRUE;
1191315333Serj} /* ixgbe_setup_hw_rsc */
1192295524Ssbruno
1193315333Serj/************************************************************************
1194315333Serj * ixgbe_refresh_mbufs
1195280182Sjfv *
1196315333Serj *   Refresh mbuf buffers for RX descriptor rings
1197315333Serj *    - now keeps its own state so discards due to resource
1198315333Serj *      exhaustion are unnecessary, if an mbuf cannot be obtained
1199315333Serj *      it just returns, keeping its placeholder, thus it can simply
1200315333Serj *      be recalled to try again.
1201315333Serj ************************************************************************/
1202280182Sjfvstatic void
1203280182Sjfvixgbe_refresh_mbufs(struct rx_ring *rxr, int limit)
1204280182Sjfv{
1205315333Serj	struct adapter      *adapter = rxr->adapter;
1206315333Serj	struct ixgbe_rx_buf *rxbuf;
1207315333Serj	struct mbuf         *mp;
1208315333Serj	bus_dma_segment_t   seg[1];
1209315333Serj	int                 i, j, nsegs, error;
1210315333Serj	bool                refreshed = FALSE;
1211280182Sjfv
1212280182Sjfv	i = j = rxr->next_to_refresh;
1213280182Sjfv	/* Control the loop with one beyond */
1214280182Sjfv	if (++j == rxr->num_desc)
1215280182Sjfv		j = 0;
1216280182Sjfv
1217280182Sjfv	while (j != limit) {
1218280182Sjfv		rxbuf = &rxr->rx_buffers[i];
1219280182Sjfv		if (rxbuf->buf == NULL) {
1220315333Serj			mp = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR,
1221315333Serj			    rxr->mbuf_sz);
1222280182Sjfv			if (mp == NULL)
1223280182Sjfv				goto update;
1224280182Sjfv			if (adapter->max_frame_size <= (MCLBYTES - ETHER_ALIGN))
1225280182Sjfv				m_adj(mp, ETHER_ALIGN);
1226280182Sjfv		} else
1227280182Sjfv			mp = rxbuf->buf;
1228280182Sjfv
1229280182Sjfv		mp->m_pkthdr.len = mp->m_len = rxr->mbuf_sz;
1230280182Sjfv
1231280182Sjfv		/* If we're dealing with an mbuf that was copied rather
1232280182Sjfv		 * than replaced, there's no need to go through busdma.
1233280182Sjfv		 */
1234280182Sjfv		if ((rxbuf->flags & IXGBE_RX_COPY) == 0) {
1235280182Sjfv			/* Get the memory mapping */
1236283620Serj			bus_dmamap_unload(rxr->ptag, rxbuf->pmap);
1237315333Serj			error = bus_dmamap_load_mbuf_sg(rxr->ptag, rxbuf->pmap,
1238315333Serj			    mp, seg, &nsegs, BUS_DMA_NOWAIT);
1239280182Sjfv			if (error != 0) {
1240315333Serj				printf("Refresh mbufs: payload dmamap load failure - %d\n", error);
1241280182Sjfv				m_free(mp);
1242280182Sjfv				rxbuf->buf = NULL;
1243280182Sjfv				goto update;
1244280182Sjfv			}
1245280182Sjfv			rxbuf->buf = mp;
1246280182Sjfv			bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
1247280182Sjfv			    BUS_DMASYNC_PREREAD);
1248280182Sjfv			rxbuf->addr = rxr->rx_base[i].read.pkt_addr =
1249280182Sjfv			    htole64(seg[0].ds_addr);
1250280182Sjfv		} else {
1251280182Sjfv			rxr->rx_base[i].read.pkt_addr = rxbuf->addr;
1252280182Sjfv			rxbuf->flags &= ~IXGBE_RX_COPY;
1253280182Sjfv		}
1254280182Sjfv
1255280182Sjfv		refreshed = TRUE;
1256280182Sjfv		/* Next is precalculated */
1257280182Sjfv		i = j;
1258280182Sjfv		rxr->next_to_refresh = i;
1259280182Sjfv		if (++j == rxr->num_desc)
1260280182Sjfv			j = 0;
1261280182Sjfv	}
1262315333Serj
1263280182Sjfvupdate:
1264280182Sjfv	if (refreshed) /* Update hardware tail index */
1265315333Serj		IXGBE_WRITE_REG(&adapter->hw, rxr->tail, rxr->next_to_refresh);
1266315333Serj
1267280182Sjfv	return;
1268315333Serj} /* ixgbe_refresh_mbufs */
1269280182Sjfv
1270315333Serj/************************************************************************
1271315333Serj * ixgbe_allocate_receive_buffers
1272280182Sjfv *
1273315333Serj *   Allocate memory for rx_buffer structures. Since we use one
1274315333Serj *   rx_buffer per received packet, the maximum number of rx_buffer's
1275315333Serj *   that we'll need is equal to the number of receive descriptors
1276315333Serj *   that we've allocated.
1277315333Serj ************************************************************************/
1278315333Serjstatic int
1279280182Sjfvixgbe_allocate_receive_buffers(struct rx_ring *rxr)
1280280182Sjfv{
1281315333Serj	struct adapter      *adapter = rxr->adapter;
1282315333Serj	device_t            dev = adapter->dev;
1283315333Serj	struct ixgbe_rx_buf *rxbuf;
1284315333Serj	int                 bsize, error;
1285280182Sjfv
1286280182Sjfv	bsize = sizeof(struct ixgbe_rx_buf) * rxr->num_desc;
1287315333Serj	rxr->rx_buffers = (struct ixgbe_rx_buf *)malloc(bsize, M_IXGBE,
1288315333Serj	    M_NOWAIT | M_ZERO);
1289315333Serj	if (!rxr->rx_buffers) {
1290280182Sjfv		device_printf(dev, "Unable to allocate rx_buffer memory\n");
1291280182Sjfv		error = ENOMEM;
1292280182Sjfv		goto fail;
1293280182Sjfv	}
1294280182Sjfv
1295315333Serj	error = bus_dma_tag_create(
1296315333Serj	         /*      parent */ bus_get_dma_tag(dev),
1297315333Serj	         /*   alignment */ 1,
1298315333Serj	         /*      bounds */ 0,
1299315333Serj	         /*     lowaddr */ BUS_SPACE_MAXADDR,
1300315333Serj	         /*    highaddr */ BUS_SPACE_MAXADDR,
1301315333Serj	         /*      filter */ NULL,
1302315333Serj	         /*   filterarg */ NULL,
1303315333Serj	         /*     maxsize */ MJUM16BYTES,
1304315333Serj	         /*   nsegments */ 1,
1305315333Serj	         /*  maxsegsize */ MJUM16BYTES,
1306315333Serj	         /*       flags */ 0,
1307315333Serj	         /*    lockfunc */ NULL,
1308315333Serj	         /* lockfuncarg */ NULL,
1309315333Serj	                           &rxr->ptag);
1310315333Serj	if (error) {
1311280182Sjfv		device_printf(dev, "Unable to create RX DMA tag\n");
1312280182Sjfv		goto fail;
1313280182Sjfv	}
1314280182Sjfv
1315295008Ssbruno	for (int i = 0; i < rxr->num_desc; i++, rxbuf++) {
1316280182Sjfv		rxbuf = &rxr->rx_buffers[i];
1317283620Serj		error = bus_dmamap_create(rxr->ptag, 0, &rxbuf->pmap);
1318280182Sjfv		if (error) {
1319280182Sjfv			device_printf(dev, "Unable to create RX dma map\n");
1320280182Sjfv			goto fail;
1321280182Sjfv		}
1322280182Sjfv	}
1323280182Sjfv
1324280182Sjfv	return (0);
1325280182Sjfv
1326280182Sjfvfail:
1327280182Sjfv	/* Frees all, but can handle partial completion */
1328280182Sjfv	ixgbe_free_receive_structures(adapter);
1329315333Serj
1330280182Sjfv	return (error);
1331315333Serj} /* ixgbe_allocate_receive_buffers */
1332280182Sjfv
1333315333Serj/************************************************************************
1334315333Serj * ixgbe_free_receive_ring
1335315333Serj ************************************************************************/
1336315333Serjstatic void
1337280182Sjfvixgbe_free_receive_ring(struct rx_ring *rxr)
1338315333Serj{
1339315333Serj	struct ixgbe_rx_buf *rxbuf;
1340280182Sjfv
1341295008Ssbruno	for (int i = 0; i < rxr->num_desc; i++) {
1342280182Sjfv		rxbuf = &rxr->rx_buffers[i];
1343280182Sjfv		if (rxbuf->buf != NULL) {
1344280182Sjfv			bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
1345280182Sjfv			    BUS_DMASYNC_POSTREAD);
1346280182Sjfv			bus_dmamap_unload(rxr->ptag, rxbuf->pmap);
1347280182Sjfv			rxbuf->buf->m_flags |= M_PKTHDR;
1348280182Sjfv			m_freem(rxbuf->buf);
1349280182Sjfv			rxbuf->buf = NULL;
1350280182Sjfv			rxbuf->flags = 0;
1351280182Sjfv		}
1352280182Sjfv	}
1353315333Serj} /* ixgbe_free_receive_ring */
1354280182Sjfv
1355315333Serj/************************************************************************
1356315333Serj * ixgbe_setup_receive_ring
1357280182Sjfv *
1358315333Serj *   Initialize a receive ring and its buffers.
1359315333Serj ************************************************************************/
1360280182Sjfvstatic int
1361280182Sjfvixgbe_setup_receive_ring(struct rx_ring *rxr)
1362280182Sjfv{
1363315333Serj	struct adapter        *adapter;
1364315333Serj	struct ifnet          *ifp;
1365315333Serj	device_t              dev;
1366315333Serj	struct ixgbe_rx_buf   *rxbuf;
1367315333Serj	struct lro_ctrl       *lro = &rxr->lro;
1368280182Sjfv#ifdef DEV_NETMAP
1369280182Sjfv	struct netmap_adapter *na = NA(rxr->adapter->ifp);
1370315333Serj	struct netmap_slot    *slot;
1371280182Sjfv#endif /* DEV_NETMAP */
1372315333Serj	bus_dma_segment_t     seg[1];
1373315333Serj	int                   rsize, nsegs, error = 0;
1374280182Sjfv
1375280182Sjfv	adapter = rxr->adapter;
1376280182Sjfv	ifp = adapter->ifp;
1377280182Sjfv	dev = adapter->dev;
1378280182Sjfv
1379280182Sjfv	/* Clear the ring contents */
1380280182Sjfv	IXGBE_RX_LOCK(rxr);
1381315333Serj
1382280182Sjfv#ifdef DEV_NETMAP
1383315333Serj	if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
1384315333Serj		slot = netmap_reset(na, NR_RX, rxr->me, 0);
1385280182Sjfv#endif /* DEV_NETMAP */
1386315333Serj
1387280182Sjfv	rsize = roundup2(adapter->num_rx_desc *
1388280182Sjfv	    sizeof(union ixgbe_adv_rx_desc), DBA_ALIGN);
1389280182Sjfv	bzero((void *)rxr->rx_base, rsize);
1390280182Sjfv	/* Cache the size */
1391280182Sjfv	rxr->mbuf_sz = adapter->rx_mbuf_sz;
1392280182Sjfv
1393280182Sjfv	/* Free current RX buffer structs and their mbufs */
1394280182Sjfv	ixgbe_free_receive_ring(rxr);
1395280182Sjfv
1396280182Sjfv	/* Now replenish the mbufs */
1397280182Sjfv	for (int j = 0; j != rxr->num_desc; ++j) {
1398315333Serj		struct mbuf *mp;
1399280182Sjfv
1400280182Sjfv		rxbuf = &rxr->rx_buffers[j];
1401315333Serj
1402280182Sjfv#ifdef DEV_NETMAP
1403280182Sjfv		/*
1404280182Sjfv		 * In netmap mode, fill the map and set the buffer
1405280182Sjfv		 * address in the NIC ring, considering the offset
1406280182Sjfv		 * between the netmap and NIC rings (see comment in
1407280182Sjfv		 * ixgbe_setup_transmit_ring() ). No need to allocate
1408280182Sjfv		 * an mbuf, so end the block with a continue;
1409280182Sjfv		 */
1410315333Serj		if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) && slot) {
1411280182Sjfv			int sj = netmap_idx_n2k(&na->rx_rings[rxr->me], j);
1412280182Sjfv			uint64_t paddr;
1413280182Sjfv			void *addr;
1414280182Sjfv
1415280182Sjfv			addr = PNMB(na, slot + sj, &paddr);
1416280182Sjfv			netmap_load_map(na, rxr->ptag, rxbuf->pmap, addr);
1417280182Sjfv			/* Update descriptor and the cached value */
1418280182Sjfv			rxr->rx_base[j].read.pkt_addr = htole64(paddr);
1419280182Sjfv			rxbuf->addr = htole64(paddr);
1420280182Sjfv			continue;
1421280182Sjfv		}
1422280182Sjfv#endif /* DEV_NETMAP */
1423315333Serj
1424315333Serj		rxbuf->flags = 0;
1425315333Serj		rxbuf->buf = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR,
1426315333Serj		    adapter->rx_mbuf_sz);
1427280182Sjfv		if (rxbuf->buf == NULL) {
1428280182Sjfv			error = ENOBUFS;
1429315333Serj			goto fail;
1430280182Sjfv		}
1431280182Sjfv		mp = rxbuf->buf;
1432280182Sjfv		mp->m_pkthdr.len = mp->m_len = rxr->mbuf_sz;
1433280182Sjfv		/* Get the memory mapping */
1434315333Serj		error = bus_dmamap_load_mbuf_sg(rxr->ptag, rxbuf->pmap, mp, seg,
1435280182Sjfv		    &nsegs, BUS_DMA_NOWAIT);
1436280182Sjfv		if (error != 0)
1437315333Serj			goto fail;
1438315333Serj		bus_dmamap_sync(rxr->ptag, rxbuf->pmap, BUS_DMASYNC_PREREAD);
1439280182Sjfv		/* Update the descriptor and the cached value */
1440280182Sjfv		rxr->rx_base[j].read.pkt_addr = htole64(seg[0].ds_addr);
1441280182Sjfv		rxbuf->addr = htole64(seg[0].ds_addr);
1442280182Sjfv	}
1443280182Sjfv
1444280182Sjfv
1445280182Sjfv	/* Setup our descriptor indices */
1446280182Sjfv	rxr->next_to_check = 0;
1447280182Sjfv	rxr->next_to_refresh = 0;
1448280182Sjfv	rxr->lro_enabled = FALSE;
1449280182Sjfv	rxr->rx_copies = 0;
1450280182Sjfv	rxr->rx_bytes = 0;
1451280182Sjfv	rxr->vtag_strip = FALSE;
1452280182Sjfv
1453280182Sjfv	bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
1454280182Sjfv	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1455280182Sjfv
1456280182Sjfv	/*
1457315333Serj	 * Now set up the LRO interface
1458315333Serj	 */
1459280182Sjfv	if (ixgbe_rsc_enable)
1460280182Sjfv		ixgbe_setup_hw_rsc(rxr);
1461280182Sjfv	else if (ifp->if_capenable & IFCAP_LRO) {
1462280182Sjfv		int err = tcp_lro_init(lro);
1463280182Sjfv		if (err) {
1464280182Sjfv			device_printf(dev, "LRO Initialization failed!\n");
1465280182Sjfv			goto fail;
1466280182Sjfv		}
1467280182Sjfv		INIT_DEBUGOUT("RX Soft LRO Initialized\n");
1468280182Sjfv		rxr->lro_enabled = TRUE;
1469280182Sjfv		lro->ifp = adapter->ifp;
1470280182Sjfv	}
1471280182Sjfv
1472280182Sjfv	IXGBE_RX_UNLOCK(rxr);
1473315333Serj
1474280182Sjfv	return (0);
1475280182Sjfv
1476280182Sjfvfail:
1477280182Sjfv	ixgbe_free_receive_ring(rxr);
1478280182Sjfv	IXGBE_RX_UNLOCK(rxr);
1479315333Serj
1480280182Sjfv	return (error);
1481315333Serj} /* ixgbe_setup_receive_ring */
1482280182Sjfv
1483315333Serj/************************************************************************
1484315333Serj * ixgbe_setup_receive_structures - Initialize all receive rings.
1485315333Serj ************************************************************************/
1486280182Sjfvint
1487280182Sjfvixgbe_setup_receive_structures(struct adapter *adapter)
1488280182Sjfv{
1489280182Sjfv	struct rx_ring *rxr = adapter->rx_rings;
1490315333Serj	int            j;
1491280182Sjfv
1492280182Sjfv	for (j = 0; j < adapter->num_queues; j++, rxr++)
1493280182Sjfv		if (ixgbe_setup_receive_ring(rxr))
1494280182Sjfv			goto fail;
1495280182Sjfv
1496280182Sjfv	return (0);
1497280182Sjfvfail:
1498280182Sjfv	/*
1499280182Sjfv	 * Free RX buffers allocated so far, we will only handle
1500280182Sjfv	 * the rings that completed, the failing case will have
1501280182Sjfv	 * cleaned up for itself. 'j' failed, so its the terminus.
1502280182Sjfv	 */
1503280182Sjfv	for (int i = 0; i < j; ++i) {
1504280182Sjfv		rxr = &adapter->rx_rings[i];
1505280182Sjfv		ixgbe_free_receive_ring(rxr);
1506280182Sjfv	}
1507280182Sjfv
1508280182Sjfv	return (ENOBUFS);
1509315333Serj} /* ixgbe_setup_receive_structures */
1510280182Sjfv
1511280182Sjfv
1512315333Serj/************************************************************************
1513315333Serj * ixgbe_free_receive_structures - Free all receive rings.
1514315333Serj ************************************************************************/
1515280182Sjfvvoid
1516280182Sjfvixgbe_free_receive_structures(struct adapter *adapter)
1517280182Sjfv{
1518280182Sjfv	struct rx_ring *rxr = adapter->rx_rings;
1519315333Serj	struct lro_ctrl *lro;
1520280182Sjfv
1521280182Sjfv	INIT_DEBUGOUT("ixgbe_free_receive_structures: begin");
1522280182Sjfv
1523280182Sjfv	for (int i = 0; i < adapter->num_queues; i++, rxr++) {
1524315333Serj		lro = &rxr->lro;
1525280182Sjfv		ixgbe_free_receive_buffers(rxr);
1526280182Sjfv		/* Free LRO memory */
1527280182Sjfv		tcp_lro_free(lro);
1528280182Sjfv		/* Free the ring memory as well */
1529280182Sjfv		ixgbe_dma_free(adapter, &rxr->rxdma);
1530280182Sjfv	}
1531280182Sjfv
1532315333Serj	free(adapter->rx_rings, M_IXGBE);
1533315333Serj} /* ixgbe_free_receive_structures */
1534280182Sjfv
1535280182Sjfv
1536315333Serj/************************************************************************
1537315333Serj * ixgbe_free_receive_buffers - Free receive ring data structures
1538315333Serj ************************************************************************/
1539280182Sjfvvoid
1540280182Sjfvixgbe_free_receive_buffers(struct rx_ring *rxr)
1541280182Sjfv{
1542315333Serj	struct adapter      *adapter = rxr->adapter;
1543315333Serj	struct ixgbe_rx_buf *rxbuf;
1544280182Sjfv
1545280182Sjfv	INIT_DEBUGOUT("ixgbe_free_receive_buffers: begin");
1546280182Sjfv
1547280182Sjfv	/* Cleanup any existing buffers */
1548280182Sjfv	if (rxr->rx_buffers != NULL) {
1549280182Sjfv		for (int i = 0; i < adapter->num_rx_desc; i++) {
1550280182Sjfv			rxbuf = &rxr->rx_buffers[i];
1551280182Sjfv			if (rxbuf->buf != NULL) {
1552280182Sjfv				bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
1553280182Sjfv				    BUS_DMASYNC_POSTREAD);
1554280182Sjfv				bus_dmamap_unload(rxr->ptag, rxbuf->pmap);
1555280182Sjfv				rxbuf->buf->m_flags |= M_PKTHDR;
1556280182Sjfv				m_freem(rxbuf->buf);
1557280182Sjfv			}
1558280182Sjfv			rxbuf->buf = NULL;
1559280182Sjfv			if (rxbuf->pmap != NULL) {
1560280182Sjfv				bus_dmamap_destroy(rxr->ptag, rxbuf->pmap);
1561280182Sjfv				rxbuf->pmap = NULL;
1562280182Sjfv			}
1563280182Sjfv		}
1564280182Sjfv		if (rxr->rx_buffers != NULL) {
1565315333Serj			free(rxr->rx_buffers, M_IXGBE);
1566280182Sjfv			rxr->rx_buffers = NULL;
1567280182Sjfv		}
1568280182Sjfv	}
1569280182Sjfv
1570280182Sjfv	if (rxr->ptag != NULL) {
1571280182Sjfv		bus_dma_tag_destroy(rxr->ptag);
1572280182Sjfv		rxr->ptag = NULL;
1573280182Sjfv	}
1574280182Sjfv
1575280182Sjfv	return;
1576315333Serj} /* ixgbe_free_receive_buffers */
1577280182Sjfv
1578315333Serj/************************************************************************
1579315333Serj * ixgbe_rx_input
1580315333Serj ************************************************************************/
1581280182Sjfvstatic __inline void
1582315333Serjixgbe_rx_input(struct rx_ring *rxr, struct ifnet *ifp, struct mbuf *m,
1583315333Serj    u32 ptype)
1584280182Sjfv{
1585315333Serj	/*
1586315333Serj	 * ATM LRO is only for IP/TCP packets and TCP checksum of the packet
1587315333Serj	 * should be computed by hardware. Also it should not have VLAN tag in
1588315333Serj	 * ethernet header.  In case of IPv6 we do not yet support ext. hdrs.
1589315333Serj	 */
1590315333Serj	if (rxr->lro_enabled &&
1591315333Serj	    (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 &&
1592315333Serj	    (ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 &&
1593315333Serj	    ((ptype & (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP)) ==
1594315333Serj	     (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP) ||
1595315333Serj	     (ptype & (IXGBE_RXDADV_PKTTYPE_IPV6 | IXGBE_RXDADV_PKTTYPE_TCP)) ==
1596315333Serj	     (IXGBE_RXDADV_PKTTYPE_IPV6 | IXGBE_RXDADV_PKTTYPE_TCP)) &&
1597315333Serj	    (m->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) ==
1598315333Serj	    (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) {
1599315333Serj		/*
1600315333Serj		 * Send to the stack if:
1601315333Serj		 *  - LRO not enabled, or
1602315333Serj		 *  - no LRO resources, or
1603315333Serj		 *  - lro enqueue fails
1604315333Serj		 */
1605315333Serj		if (rxr->lro.lro_cnt != 0)
1606315333Serj			if (tcp_lro_rx(&rxr->lro, m, 0) == 0)
1607315333Serj				return;
1608315333Serj	}
1609280182Sjfv	IXGBE_RX_UNLOCK(rxr);
1610315333Serj	(*ifp->if_input)(ifp, m);
1611280182Sjfv	IXGBE_RX_LOCK(rxr);
1612315333Serj} /* ixgbe_rx_input */
1613280182Sjfv
1614315333Serj/************************************************************************
1615315333Serj * ixgbe_rx_discard
1616315333Serj ************************************************************************/
1617280182Sjfvstatic __inline void
1618280182Sjfvixgbe_rx_discard(struct rx_ring *rxr, int i)
1619280182Sjfv{
1620315333Serj	struct ixgbe_rx_buf *rbuf;
1621280182Sjfv
1622280182Sjfv	rbuf = &rxr->rx_buffers[i];
1623280182Sjfv
1624280182Sjfv	/*
1625315333Serj	 * With advanced descriptors the writeback
1626315333Serj	 * clobbers the buffer addrs, so its easier
1627315333Serj	 * to just free the existing mbufs and take
1628315333Serj	 * the normal refresh path to get new buffers
1629315333Serj	 * and mapping.
1630315333Serj	 */
1631280182Sjfv
1632280182Sjfv	if (rbuf->fmp != NULL) {/* Partial chain ? */
1633280182Sjfv		rbuf->fmp->m_flags |= M_PKTHDR;
1634280182Sjfv		m_freem(rbuf->fmp);
1635280182Sjfv		rbuf->fmp = NULL;
1636280182Sjfv		rbuf->buf = NULL; /* rbuf->buf is part of fmp's chain */
1637280182Sjfv	} else if (rbuf->buf) {
1638280182Sjfv		m_free(rbuf->buf);
1639280182Sjfv		rbuf->buf = NULL;
1640280182Sjfv	}
1641283620Serj	bus_dmamap_unload(rxr->ptag, rbuf->pmap);
1642280182Sjfv
1643280182Sjfv	rbuf->flags = 0;
1644315333Serj
1645280182Sjfv	return;
1646315333Serj} /* ixgbe_rx_discard */
1647280182Sjfv
1648280182Sjfv
1649315333Serj/************************************************************************
1650315333Serj * ixgbe_rxeof
1651280182Sjfv *
1652315333Serj *   This routine executes in interrupt context. It replenishes
1653315333Serj *   the mbufs in the descriptor and sends data which has been
1654315333Serj *   dma'ed into host memory to upper layer.
1655280182Sjfv *
1656315333Serj *   Return TRUE for more work, FALSE for all clean.
1657315333Serj ************************************************************************/
1658280182Sjfvbool
1659280182Sjfvixgbe_rxeof(struct ix_queue *que)
1660280182Sjfv{
1661315333Serj	struct adapter          *adapter = que->adapter;
1662315333Serj	struct rx_ring          *rxr = que->rxr;
1663315333Serj	struct ifnet            *ifp = adapter->ifp;
1664315333Serj	struct lro_ctrl         *lro = &rxr->lro;
1665315333Serj#if __FreeBSD_version < 1100105
1666315333Serj	struct lro_entry        *queued;
1667315333Serj#endif
1668315333Serj	union ixgbe_adv_rx_desc *cur;
1669315333Serj	struct ixgbe_rx_buf     *rbuf, *nbuf;
1670315333Serj	int                     i, nextp, processed = 0;
1671315333Serj	u32                     staterr = 0;
1672315333Serj	u32                     count = adapter->rx_process_limit;
1673315333Serj	u16                     pkt_info;
1674280182Sjfv
1675280182Sjfv	IXGBE_RX_LOCK(rxr);
1676280182Sjfv
1677280182Sjfv#ifdef DEV_NETMAP
1678315333Serj	if (adapter->feat_en & IXGBE_FEATURE_NETMAP) {
1679315333Serj		/* Same as the txeof routine: wakeup clients on intr. */
1680315333Serj		if (netmap_rx_irq(ifp, rxr->me, &processed)) {
1681315333Serj			IXGBE_RX_UNLOCK(rxr);
1682315333Serj			return (FALSE);
1683315333Serj		}
1684280182Sjfv	}
1685280182Sjfv#endif /* DEV_NETMAP */
1686280182Sjfv
1687280182Sjfv	for (i = rxr->next_to_check; count != 0;) {
1688315333Serj		struct mbuf *sendmp, *mp;
1689315333Serj		u32         rsc, ptype;
1690315333Serj		u16         len;
1691315333Serj		u16         vtag = 0;
1692315333Serj		bool        eop;
1693315333Serj
1694280182Sjfv		/* Sync the ring. */
1695280182Sjfv		bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
1696280182Sjfv		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1697280182Sjfv
1698280182Sjfv		cur = &rxr->rx_base[i];
1699280182Sjfv		staterr = le32toh(cur->wb.upper.status_error);
1700280182Sjfv		pkt_info = le16toh(cur->wb.lower.lo_dword.hs_rss.pkt_info);
1701280182Sjfv
1702280182Sjfv		if ((staterr & IXGBE_RXD_STAT_DD) == 0)
1703280182Sjfv			break;
1704280182Sjfv		if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1705280182Sjfv			break;
1706280182Sjfv
1707280182Sjfv		count--;
1708280182Sjfv		sendmp = NULL;
1709280182Sjfv		nbuf = NULL;
1710280182Sjfv		rsc = 0;
1711280182Sjfv		cur->wb.upper.status_error = 0;
1712280182Sjfv		rbuf = &rxr->rx_buffers[i];
1713280182Sjfv		mp = rbuf->buf;
1714280182Sjfv
1715280182Sjfv		len = le16toh(cur->wb.upper.length);
1716280182Sjfv		ptype = le32toh(cur->wb.lower.lo_dword.data) &
1717280182Sjfv		    IXGBE_RXDADV_PKTTYPE_MASK;
1718280182Sjfv		eop = ((staterr & IXGBE_RXD_STAT_EOP) != 0);
1719280182Sjfv
1720280182Sjfv		/* Make sure bad packets are discarded */
1721280182Sjfv		if (eop && (staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) != 0) {
1722280182Sjfv			rxr->rx_discarded++;
1723280182Sjfv			ixgbe_rx_discard(rxr, i);
1724280182Sjfv			goto next_desc;
1725280182Sjfv		}
1726280182Sjfv
1727280182Sjfv		/*
1728315333Serj		 * On 82599 which supports a hardware
1729315333Serj		 * LRO (called HW RSC), packets need
1730315333Serj		 * not be fragmented across sequential
1731315333Serj		 * descriptors, rather the next descriptor
1732315333Serj		 * is indicated in bits of the descriptor.
1733315333Serj		 * This also means that we might proceses
1734315333Serj		 * more than one packet at a time, something
1735315333Serj		 * that has never been true before, it
1736315333Serj		 * required eliminating global chain pointers
1737315333Serj		 * in favor of what we are doing here.  -jfv
1738315333Serj		 */
1739280182Sjfv		if (!eop) {
1740280182Sjfv			/*
1741315333Serj			 * Figure out the next descriptor
1742315333Serj			 * of this frame.
1743315333Serj			 */
1744280182Sjfv			if (rxr->hw_rsc == TRUE) {
1745280182Sjfv				rsc = ixgbe_rsc_count(cur);
1746280182Sjfv				rxr->rsc_num += (rsc - 1);
1747280182Sjfv			}
1748280182Sjfv			if (rsc) { /* Get hardware index */
1749315333Serj				nextp = ((staterr & IXGBE_RXDADV_NEXTP_MASK) >>
1750280182Sjfv				    IXGBE_RXDADV_NEXTP_SHIFT);
1751280182Sjfv			} else { /* Just sequential */
1752280182Sjfv				nextp = i + 1;
1753280182Sjfv				if (nextp == adapter->num_rx_desc)
1754280182Sjfv					nextp = 0;
1755280182Sjfv			}
1756280182Sjfv			nbuf = &rxr->rx_buffers[nextp];
1757280182Sjfv			prefetch(nbuf);
1758280182Sjfv		}
1759280182Sjfv		/*
1760315333Serj		 * Rather than using the fmp/lmp global pointers
1761315333Serj		 * we now keep the head of a packet chain in the
1762315333Serj		 * buffer struct and pass this along from one
1763315333Serj		 * descriptor to the next, until we get EOP.
1764315333Serj		 */
1765280182Sjfv		mp->m_len = len;
1766280182Sjfv		/*
1767315333Serj		 * See if there is a stored head
1768315333Serj		 * that determines what we are
1769315333Serj		 */
1770280182Sjfv		sendmp = rbuf->fmp;
1771280182Sjfv		if (sendmp != NULL) {  /* secondary frag */
1772280182Sjfv			rbuf->buf = rbuf->fmp = NULL;
1773280182Sjfv			mp->m_flags &= ~M_PKTHDR;
1774280182Sjfv			sendmp->m_pkthdr.len += mp->m_len;
1775280182Sjfv		} else {
1776280182Sjfv			/*
1777280182Sjfv			 * Optimize.  This might be a small packet,
1778280182Sjfv			 * maybe just a TCP ACK.  Do a fast copy that
1779280182Sjfv			 * is cache aligned into a new mbuf, and
1780280182Sjfv			 * leave the old mbuf+cluster for re-use.
1781280182Sjfv			 */
1782280182Sjfv			if (eop && len <= IXGBE_RX_COPY_LEN) {
1783280182Sjfv				sendmp = m_gethdr(M_NOWAIT, MT_DATA);
1784280182Sjfv				if (sendmp != NULL) {
1785315333Serj					sendmp->m_data += IXGBE_RX_COPY_ALIGN;
1786315333Serj					ixgbe_bcopy(mp->m_data, sendmp->m_data,
1787315333Serj					    len);
1788280182Sjfv					sendmp->m_len = len;
1789280182Sjfv					rxr->rx_copies++;
1790280182Sjfv					rbuf->flags |= IXGBE_RX_COPY;
1791280182Sjfv				}
1792280182Sjfv			}
1793280182Sjfv			if (sendmp == NULL) {
1794280182Sjfv				rbuf->buf = rbuf->fmp = NULL;
1795280182Sjfv				sendmp = mp;
1796280182Sjfv			}
1797280182Sjfv
1798280182Sjfv			/* first desc of a non-ps chain */
1799280182Sjfv			sendmp->m_flags |= M_PKTHDR;
1800280182Sjfv			sendmp->m_pkthdr.len = mp->m_len;
1801280182Sjfv		}
1802280182Sjfv		++processed;
1803280182Sjfv
1804280182Sjfv		/* Pass the head pointer on */
1805280182Sjfv		if (eop == 0) {
1806280182Sjfv			nbuf->fmp = sendmp;
1807280182Sjfv			sendmp = NULL;
1808280182Sjfv			mp->m_next = nbuf->buf;
1809280182Sjfv		} else { /* Sending this frame */
1810280182Sjfv			sendmp->m_pkthdr.rcvif = ifp;
1811280182Sjfv			rxr->rx_packets++;
1812280182Sjfv			/* capture data for AIM */
1813280182Sjfv			rxr->bytes += sendmp->m_pkthdr.len;
1814280182Sjfv			rxr->rx_bytes += sendmp->m_pkthdr.len;
1815280182Sjfv			/* Process vlan info */
1816315333Serj			if ((rxr->vtag_strip) && (staterr & IXGBE_RXD_STAT_VP))
1817280182Sjfv				vtag = le16toh(cur->wb.upper.vlan);
1818280182Sjfv			if (vtag) {
1819280182Sjfv				sendmp->m_pkthdr.ether_vtag = vtag;
1820280182Sjfv				sendmp->m_flags |= M_VLANTAG;
1821280182Sjfv			}
1822280182Sjfv			if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
1823280182Sjfv				ixgbe_rx_checksum(staterr, sendmp, ptype);
1824285764Shiren
1825315333Serj			/*
1826315333Serj			 * In case of multiqueue, we have RXCSUM.PCSD bit set
1827315333Serj			 * and never cleared. This means we have RSS hash
1828315333Serj			 * available to be used.
1829315333Serj			 */
1830315333Serj			if (adapter->num_queues > 1) {
1831315333Serj				sendmp->m_pkthdr.flowid =
1832315333Serj				    le32toh(cur->wb.lower.hi_dword.rss);
1833315333Serj				switch (pkt_info & IXGBE_RXDADV_RSSTYPE_MASK) {
1834315333Serj				case IXGBE_RXDADV_RSSTYPE_IPV4:
1835315333Serj					M_HASHTYPE_SET(sendmp,
1836315333Serj					    M_HASHTYPE_RSS_IPV4);
1837315333Serj					break;
1838315333Serj				case IXGBE_RXDADV_RSSTYPE_IPV4_TCP:
1839315333Serj					M_HASHTYPE_SET(sendmp,
1840315333Serj					    M_HASHTYPE_RSS_TCP_IPV4);
1841315333Serj					break;
1842315333Serj				case IXGBE_RXDADV_RSSTYPE_IPV6:
1843315333Serj					M_HASHTYPE_SET(sendmp,
1844315333Serj					    M_HASHTYPE_RSS_IPV6);
1845315333Serj					break;
1846315333Serj				case IXGBE_RXDADV_RSSTYPE_IPV6_TCP:
1847315333Serj					M_HASHTYPE_SET(sendmp,
1848315333Serj					    M_HASHTYPE_RSS_TCP_IPV6);
1849315333Serj					break;
1850315333Serj				case IXGBE_RXDADV_RSSTYPE_IPV6_EX:
1851315333Serj					M_HASHTYPE_SET(sendmp,
1852315333Serj					    M_HASHTYPE_RSS_IPV6_EX);
1853315333Serj					break;
1854315333Serj				case IXGBE_RXDADV_RSSTYPE_IPV6_TCP_EX:
1855315333Serj					M_HASHTYPE_SET(sendmp,
1856315333Serj					    M_HASHTYPE_RSS_TCP_IPV6_EX);
1857315333Serj					break;
1858315333Serj#if __FreeBSD_version > 1100000
1859315333Serj				case IXGBE_RXDADV_RSSTYPE_IPV4_UDP:
1860315333Serj					M_HASHTYPE_SET(sendmp,
1861315333Serj					    M_HASHTYPE_RSS_UDP_IPV4);
1862315333Serj					break;
1863315333Serj				case IXGBE_RXDADV_RSSTYPE_IPV6_UDP:
1864315333Serj					M_HASHTYPE_SET(sendmp,
1865315333Serj					    M_HASHTYPE_RSS_UDP_IPV6);
1866315333Serj					break;
1867315333Serj				case IXGBE_RXDADV_RSSTYPE_IPV6_UDP_EX:
1868315333Serj					M_HASHTYPE_SET(sendmp,
1869315333Serj					    M_HASHTYPE_RSS_UDP_IPV6_EX);
1870315333Serj					break;
1871315333Serj#endif
1872315333Serj				default:
1873315333Serj#if __FreeBSD_version < 1100116
1874315333Serj					M_HASHTYPE_SET(sendmp,
1875315333Serj					    M_HASHTYPE_OPAQUE);
1876315333Serj#else
1877315333Serj					M_HASHTYPE_SET(sendmp,
1878315333Serj					    M_HASHTYPE_OPAQUE_HASH);
1879315333Serj#endif
1880315333Serj				}
1881315333Serj			} else {
1882315333Serj				sendmp->m_pkthdr.flowid = que->msix;
1883285764Shiren				M_HASHTYPE_SET(sendmp, M_HASHTYPE_OPAQUE);
1884285764Shiren			}
1885280182Sjfv		}
1886280182Sjfvnext_desc:
1887280182Sjfv		bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
1888280182Sjfv		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1889280182Sjfv
1890280182Sjfv		/* Advance our pointers to the next descriptor. */
1891280182Sjfv		if (++i == rxr->num_desc)
1892280182Sjfv			i = 0;
1893280182Sjfv
1894280182Sjfv		/* Now send to the stack or do LRO */
1895280182Sjfv		if (sendmp != NULL) {
1896280182Sjfv			rxr->next_to_check = i;
1897280182Sjfv			ixgbe_rx_input(rxr, ifp, sendmp, ptype);
1898280182Sjfv			i = rxr->next_to_check;
1899280182Sjfv		}
1900280182Sjfv
1901315333Serj		/* Every 8 descriptors we go to refresh mbufs */
1902280182Sjfv		if (processed == 8) {
1903280182Sjfv			ixgbe_refresh_mbufs(rxr, i);
1904280182Sjfv			processed = 0;
1905280182Sjfv		}
1906280182Sjfv	}
1907280182Sjfv
1908280182Sjfv	/* Refresh any remaining buf structs */
1909280182Sjfv	if (ixgbe_rx_unrefreshed(rxr))
1910280182Sjfv		ixgbe_refresh_mbufs(rxr, i);
1911280182Sjfv
1912280182Sjfv	rxr->next_to_check = i;
1913280182Sjfv
1914280182Sjfv	/*
1915280182Sjfv	 * Flush any outstanding LRO work
1916280182Sjfv	 */
1917315333Serj#if __FreeBSD_version < 1100105
1918280182Sjfv	while ((queued = SLIST_FIRST(&lro->lro_active)) != NULL) {
1919280182Sjfv		SLIST_REMOVE_HEAD(&lro->lro_active, next);
1920280182Sjfv		tcp_lro_flush(lro, queued);
1921280182Sjfv	}
1922315333Serj#else
1923315333Serj	tcp_lro_flush_all(lro);
1924315333Serj#endif
1925280182Sjfv
1926280182Sjfv	IXGBE_RX_UNLOCK(rxr);
1927280182Sjfv
1928280182Sjfv	/*
1929315333Serj	 * Still have cleaning to do?
1930315333Serj	 */
1931280182Sjfv	if ((staterr & IXGBE_RXD_STAT_DD) != 0)
1932280182Sjfv		return (TRUE);
1933280182Sjfv
1934315333Serj	return (FALSE);
1935315333Serj} /* ixgbe_rxeof */
1936280182Sjfv
1937315333Serj
1938315333Serj/************************************************************************
1939315333Serj * ixgbe_rx_checksum
1940280182Sjfv *
1941315333Serj *   Verify that the hardware indicated that the checksum is valid.
1942315333Serj *   Inform the stack about the status of checksum so that stack
1943315333Serj *   doesn't spend time verifying the checksum.
1944315333Serj ************************************************************************/
1945280182Sjfvstatic void
1946280182Sjfvixgbe_rx_checksum(u32 staterr, struct mbuf * mp, u32 ptype)
1947280182Sjfv{
1948315333Serj	u16  status = (u16)staterr;
1949315333Serj	u8   errors = (u8)(staterr >> 24);
1950315333Serj	bool sctp = false;
1951280182Sjfv
1952280182Sjfv	if ((ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 &&
1953280182Sjfv	    (ptype & IXGBE_RXDADV_PKTTYPE_SCTP) != 0)
1954295524Ssbruno		sctp = true;
1955280182Sjfv
1956295524Ssbruno	/* IPv4 checksum */
1957280182Sjfv	if (status & IXGBE_RXD_STAT_IPCS) {
1958295524Ssbruno		mp->m_pkthdr.csum_flags |= CSUM_L3_CALC;
1959295524Ssbruno		/* IP Checksum Good */
1960295524Ssbruno		if (!(errors & IXGBE_RXD_ERR_IPE))
1961295524Ssbruno			mp->m_pkthdr.csum_flags |= CSUM_L3_VALID;
1962280182Sjfv	}
1963295524Ssbruno	/* TCP/UDP/SCTP checksum */
1964280182Sjfv	if (status & IXGBE_RXD_STAT_L4CS) {
1965295524Ssbruno		mp->m_pkthdr.csum_flags |= CSUM_L4_CALC;
1966280182Sjfv		if (!(errors & IXGBE_RXD_ERR_TCPE)) {
1967295524Ssbruno			mp->m_pkthdr.csum_flags |= CSUM_L4_VALID;
1968280182Sjfv			if (!sctp)
1969280182Sjfv				mp->m_pkthdr.csum_data = htons(0xffff);
1970295524Ssbruno		}
1971280182Sjfv	}
1972315333Serj} /* ixgbe_rx_checksum */
1973280182Sjfv
1974315333Serj/************************************************************************
1975315333Serj * ixgbe_dmamap_cb - Manage DMA'able memory.
1976315333Serj ************************************************************************/
1977280182Sjfvstatic void
1978280182Sjfvixgbe_dmamap_cb(void *arg, bus_dma_segment_t * segs, int nseg, int error)
1979280182Sjfv{
1980280182Sjfv	if (error)
1981280182Sjfv		return;
1982315333Serj	*(bus_addr_t *)arg = segs->ds_addr;
1983315333Serj
1984280182Sjfv	return;
1985315333Serj} /* ixgbe_dmamap_cb */
1986280182Sjfv
1987315333Serj/************************************************************************
1988315333Serj * ixgbe_dma_malloc
1989315333Serj ************************************************************************/
1990315333Serjstatic int
1991280182Sjfvixgbe_dma_malloc(struct adapter *adapter, bus_size_t size,
1992315333Serj                 struct ixgbe_dma_alloc *dma, int mapflags)
1993280182Sjfv{
1994280182Sjfv	device_t dev = adapter->dev;
1995315333Serj	int      r;
1996280182Sjfv
1997315333Serj	r = bus_dma_tag_create(
1998315333Serj	     /*      parent */ bus_get_dma_tag(adapter->dev),
1999315333Serj	     /*   alignment */ DBA_ALIGN,
2000315333Serj	     /*      bounds */ 0,
2001315333Serj	     /*     lowaddr */ BUS_SPACE_MAXADDR,
2002315333Serj	     /*    highaddr */ BUS_SPACE_MAXADDR,
2003315333Serj	     /*      filter */ NULL,
2004315333Serj	     /*   filterarg */ NULL,
2005315333Serj	     /*     maxsize */ size,
2006315333Serj	     /*   nsegments */ 1,
2007315333Serj	     /*  maxsegsize */ size,
2008315333Serj	     /*       flags */ BUS_DMA_ALLOCNOW,
2009315333Serj	     /*    lockfunc */ NULL,
2010315333Serj	     /* lockfuncarg */ NULL,
2011315333Serj	                       &dma->dma_tag);
2012280182Sjfv	if (r != 0) {
2013315333Serj		device_printf(dev,
2014315333Serj		    "ixgbe_dma_malloc: bus_dma_tag_create failed; error %u\n",
2015315333Serj		    r);
2016280182Sjfv		goto fail_0;
2017280182Sjfv	}
2018280182Sjfv	r = bus_dmamem_alloc(dma->dma_tag, (void **)&dma->dma_vaddr,
2019315333Serj	    BUS_DMA_NOWAIT, &dma->dma_map);
2020280182Sjfv	if (r != 0) {
2021315333Serj		device_printf(dev,
2022315333Serj		    "ixgbe_dma_malloc: bus_dmamem_alloc failed; error %u\n", r);
2023280182Sjfv		goto fail_1;
2024280182Sjfv	}
2025315333Serj	r = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr, size,
2026315333Serj	    ixgbe_dmamap_cb, &dma->dma_paddr, mapflags | BUS_DMA_NOWAIT);
2027280182Sjfv	if (r != 0) {
2028315333Serj		device_printf(dev,
2029315333Serj		    "ixgbe_dma_malloc: bus_dmamap_load failed; error %u\n", r);
2030280182Sjfv		goto fail_2;
2031280182Sjfv	}
2032280182Sjfv	dma->dma_size = size;
2033315333Serj
2034280182Sjfv	return (0);
2035280182Sjfvfail_2:
2036280182Sjfv	bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
2037280182Sjfvfail_1:
2038280182Sjfv	bus_dma_tag_destroy(dma->dma_tag);
2039280182Sjfvfail_0:
2040280182Sjfv	dma->dma_tag = NULL;
2041315333Serj
2042280182Sjfv	return (r);
2043315333Serj} /* ixgbe_dma_malloc */
2044280182Sjfv
2045315333Serjstatic void
2046280182Sjfvixgbe_dma_free(struct adapter *adapter, struct ixgbe_dma_alloc *dma)
2047280182Sjfv{
2048280182Sjfv	bus_dmamap_sync(dma->dma_tag, dma->dma_map,
2049280182Sjfv	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2050280182Sjfv	bus_dmamap_unload(dma->dma_tag, dma->dma_map);
2051280182Sjfv	bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
2052280182Sjfv	bus_dma_tag_destroy(dma->dma_tag);
2053315333Serj} /* ixgbe_dma_free */
2054280182Sjfv
2055280182Sjfv
2056315333Serj/************************************************************************
2057315333Serj * ixgbe_allocate_queues
2058280182Sjfv *
2059315333Serj *   Allocate memory for the transmit and receive rings, and then
2060315333Serj *   the descriptors associated with each, called only once at attach.
2061315333Serj ************************************************************************/
2062280182Sjfvint
2063280182Sjfvixgbe_allocate_queues(struct adapter *adapter)
2064280182Sjfv{
2065315333Serj	device_t        dev = adapter->dev;
2066315333Serj	struct ix_queue *que;
2067315333Serj	struct tx_ring  *txr;
2068315333Serj	struct rx_ring  *rxr;
2069315333Serj	int             rsize, tsize, error = IXGBE_SUCCESS;
2070315333Serj	int             txconf = 0, rxconf = 0;
2071280182Sjfv
2072315333Serj	/* First, allocate the top level queue structs */
2073315333Serj	adapter->queues = (struct ix_queue *)malloc(sizeof(struct ix_queue) *
2074315333Serj	    adapter->num_queues, M_IXGBE, M_NOWAIT | M_ZERO);
2075315333Serj	if (!adapter->queues) {
2076315333Serj		device_printf(dev, "Unable to allocate queue memory\n");
2077315333Serj		error = ENOMEM;
2078315333Serj		goto fail;
2079315333Serj	}
2080280182Sjfv
2081315333Serj	/* Second, allocate the TX ring struct memory */
2082315333Serj	adapter->tx_rings = (struct tx_ring *)malloc(sizeof(struct tx_ring) *
2083315333Serj	    adapter->num_queues, M_IXGBE, M_NOWAIT | M_ZERO);
2084315333Serj	if (!adapter->tx_rings) {
2085280182Sjfv		device_printf(dev, "Unable to allocate TX ring memory\n");
2086280182Sjfv		error = ENOMEM;
2087280182Sjfv		goto tx_fail;
2088280182Sjfv	}
2089280182Sjfv
2090315333Serj	/* Third, allocate the RX ring */
2091315333Serj	adapter->rx_rings = (struct rx_ring *)malloc(sizeof(struct rx_ring) *
2092315333Serj	    adapter->num_queues, M_IXGBE, M_NOWAIT | M_ZERO);
2093315333Serj	if (!adapter->rx_rings) {
2094280182Sjfv		device_printf(dev, "Unable to allocate RX ring memory\n");
2095280182Sjfv		error = ENOMEM;
2096280182Sjfv		goto rx_fail;
2097280182Sjfv	}
2098280182Sjfv
2099280182Sjfv	/* For the ring itself */
2100315333Serj	tsize = roundup2(adapter->num_tx_desc * sizeof(union ixgbe_adv_tx_desc),
2101315333Serj	    DBA_ALIGN);
2102280182Sjfv
2103280182Sjfv	/*
2104280182Sjfv	 * Now set up the TX queues, txconf is needed to handle the
2105280182Sjfv	 * possibility that things fail midcourse and we need to
2106280182Sjfv	 * undo memory gracefully
2107315333Serj	 */
2108280182Sjfv	for (int i = 0; i < adapter->num_queues; i++, txconf++) {
2109280182Sjfv		/* Set up some basics */
2110280182Sjfv		txr = &adapter->tx_rings[i];
2111280182Sjfv		txr->adapter = adapter;
2112315333Serj		txr->br = NULL;
2113315333Serj		/* In case SR-IOV is enabled, align the index properly */
2114315333Serj		txr->me = ixgbe_vf_que_index(adapter->iov_mode, adapter->pool,
2115315333Serj		    i);
2116280182Sjfv		txr->num_desc = adapter->num_tx_desc;
2117280182Sjfv
2118280182Sjfv		/* Initialize the TX side lock */
2119280182Sjfv		snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
2120280182Sjfv		    device_get_nameunit(dev), txr->me);
2121280182Sjfv		mtx_init(&txr->tx_mtx, txr->mtx_name, NULL, MTX_DEF);
2122280182Sjfv
2123315333Serj		if (ixgbe_dma_malloc(adapter, tsize, &txr->txdma,
2124315333Serj		    BUS_DMA_NOWAIT)) {
2125280182Sjfv			device_printf(dev,
2126280182Sjfv			    "Unable to allocate TX Descriptor memory\n");
2127280182Sjfv			error = ENOMEM;
2128280182Sjfv			goto err_tx_desc;
2129280182Sjfv		}
2130280182Sjfv		txr->tx_base = (union ixgbe_adv_tx_desc *)txr->txdma.dma_vaddr;
2131280182Sjfv		bzero((void *)txr->tx_base, tsize);
2132280182Sjfv
2133315333Serj		/* Now allocate transmit buffers for the ring */
2134315333Serj		if (ixgbe_allocate_transmit_buffers(txr)) {
2135280182Sjfv			device_printf(dev,
2136280182Sjfv			    "Critical Failure setting up transmit buffers\n");
2137280182Sjfv			error = ENOMEM;
2138280182Sjfv			goto err_tx_desc;
2139315333Serj		}
2140315333Serj		if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) {
2141315333Serj			/* Allocate a buf ring */
2142315333Serj			txr->br = buf_ring_alloc(IXGBE_BR_SIZE, M_IXGBE,
2143315333Serj			    M_WAITOK, &txr->tx_mtx);
2144315333Serj			if (txr->br == NULL) {
2145315333Serj				device_printf(dev,
2146315333Serj				    "Critical Failure setting up buf ring\n");
2147315333Serj				error = ENOMEM;
2148315333Serj				goto err_tx_desc;
2149315333Serj			}
2150315333Serj		}
2151280182Sjfv	}
2152280182Sjfv
2153280182Sjfv	/*
2154280182Sjfv	 * Next the RX queues...
2155315333Serj	 */
2156315333Serj	rsize = roundup2(adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc),
2157315333Serj	    DBA_ALIGN);
2158280182Sjfv	for (int i = 0; i < adapter->num_queues; i++, rxconf++) {
2159280182Sjfv		rxr = &adapter->rx_rings[i];
2160280182Sjfv		/* Set up some basics */
2161280182Sjfv		rxr->adapter = adapter;
2162315333Serj		/* In case SR-IOV is enabled, align the index properly */
2163315333Serj		rxr->me = ixgbe_vf_que_index(adapter->iov_mode, adapter->pool,
2164315333Serj		    i);
2165280182Sjfv		rxr->num_desc = adapter->num_rx_desc;
2166280182Sjfv
2167280182Sjfv		/* Initialize the RX side lock */
2168280182Sjfv		snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
2169280182Sjfv		    device_get_nameunit(dev), rxr->me);
2170280182Sjfv		mtx_init(&rxr->rx_mtx, rxr->mtx_name, NULL, MTX_DEF);
2171280182Sjfv
2172315333Serj		if (ixgbe_dma_malloc(adapter, rsize, &rxr->rxdma,
2173315333Serj		    BUS_DMA_NOWAIT)) {
2174280182Sjfv			device_printf(dev,
2175280182Sjfv			    "Unable to allocate RxDescriptor memory\n");
2176280182Sjfv			error = ENOMEM;
2177280182Sjfv			goto err_rx_desc;
2178280182Sjfv		}
2179280182Sjfv		rxr->rx_base = (union ixgbe_adv_rx_desc *)rxr->rxdma.dma_vaddr;
2180280182Sjfv		bzero((void *)rxr->rx_base, rsize);
2181280182Sjfv
2182315333Serj		/* Allocate receive buffers for the ring */
2183280182Sjfv		if (ixgbe_allocate_receive_buffers(rxr)) {
2184280182Sjfv			device_printf(dev,
2185280182Sjfv			    "Critical Failure setting up receive buffers\n");
2186280182Sjfv			error = ENOMEM;
2187280182Sjfv			goto err_rx_desc;
2188280182Sjfv		}
2189280182Sjfv	}
2190280182Sjfv
2191280182Sjfv	/*
2192315333Serj	 * Finally set up the queue holding structs
2193315333Serj	 */
2194280182Sjfv	for (int i = 0; i < adapter->num_queues; i++) {
2195280182Sjfv		que = &adapter->queues[i];
2196280182Sjfv		que->adapter = adapter;
2197280182Sjfv		que->me = i;
2198280182Sjfv		que->txr = &adapter->tx_rings[i];
2199280182Sjfv		que->rxr = &adapter->rx_rings[i];
2200280182Sjfv	}
2201280182Sjfv
2202280182Sjfv	return (0);
2203280182Sjfv
2204280182Sjfverr_rx_desc:
2205280182Sjfv	for (rxr = adapter->rx_rings; rxconf > 0; rxr++, rxconf--)
2206280182Sjfv		ixgbe_dma_free(adapter, &rxr->rxdma);
2207280182Sjfverr_tx_desc:
2208280182Sjfv	for (txr = adapter->tx_rings; txconf > 0; txr++, txconf--)
2209280182Sjfv		ixgbe_dma_free(adapter, &txr->txdma);
2210315333Serj	free(adapter->rx_rings, M_IXGBE);
2211280182Sjfvrx_fail:
2212315333Serj	free(adapter->tx_rings, M_IXGBE);
2213280182Sjfvtx_fail:
2214315333Serj	free(adapter->queues, M_IXGBE);
2215280182Sjfvfail:
2216280182Sjfv	return (error);
2217315333Serj} /* ixgbe_allocate_queues */
2218