Deleted Added
full compact
ix_txrx.c (302408) ix_txrx.c (320897)
1/******************************************************************************
2
1/******************************************************************************
2
3 Copyright (c) 2001-2015, Intel Corporation
3 Copyright (c) 2001-2017, Intel Corporation
4 All rights reserved.
4 All rights reserved.
5
6 Redistribution and use in source and binary forms, with or without
5
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
7 modification, are permitted provided that the following conditions are met:
8
9 1. Redistributions of source code must retain the above copyright notice,
8
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
10 this list of conditions and the following disclaimer.
11
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
11
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
14 documentation and/or other materials provided with the distribution.
15
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
15
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
18 this software without specific prior written permission.
19
19
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
31
32******************************************************************************/
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
31
32******************************************************************************/
33/*$FreeBSD: stable/11/sys/dev/ixgbe/ix_txrx.c 301538 2016-06-07 04:51:50Z sephe $*/
33/*$FreeBSD: stable/11/sys/dev/ixgbe/ix_txrx.c 320897 2017-07-11 21:25:07Z erj $*/
34
35
36#ifndef IXGBE_STANDALONE_BUILD
37#include "opt_inet.h"
38#include "opt_inet6.h"
39#include "opt_rss.h"
40#endif
41
42#include "ixgbe.h"
43
34
35
36#ifndef IXGBE_STANDALONE_BUILD
37#include "opt_inet.h"
38#include "opt_inet6.h"
39#include "opt_rss.h"
40#endif
41
42#include "ixgbe.h"
43
44#ifdef RSS
45#include <net/rss_config.h>
46#include <netinet/in_rss.h>
47#endif
48
49#ifdef DEV_NETMAP
50#include <net/netmap.h>
51#include <sys/selinfo.h>
52#include <dev/netmap/netmap_kern.h>
53
54extern int ix_crcstrip;
55#endif
56
57/*
44/*
58** HW RSC control:
59** this feature only works with
60** IPv4, and only on 82599 and later.
61** Also this will cause IP forwarding to
62** fail and that can't be controlled by
63** the stack as LRO can. For all these
64** reasons I've deemed it best to leave
65** this off and not bother with a tuneable
66** interface, this would need to be compiled
67** to enable.
68*/
45 * HW RSC control:
46 * this feature only works with
47 * IPv4, and only on 82599 and later.
48 * Also this will cause IP forwarding to
49 * fail and that can't be controlled by
50 * the stack as LRO can. For all these
51 * reasons I've deemed it best to leave
52 * this off and not bother with a tuneable
53 * interface, this would need to be compiled
54 * to enable.
55 */
69static bool ixgbe_rsc_enable = FALSE;
70
56static bool ixgbe_rsc_enable = FALSE;
57
71#ifdef IXGBE_FDIR
72/*
58/*
73** For Flow Director: this is the
74** number of TX packets we sample
75** for the filter pool, this means
76** every 20th packet will be probed.
77**
78** This feature can be disabled by
79** setting this to 0.
80*/
59 * For Flow Director: this is the
60 * number of TX packets we sample
61 * for the filter pool, this means
62 * every 20th packet will be probed.
63 *
64 * This feature can be disabled by
65 * setting this to 0.
66 */
81static int atr_sample_rate = 20;
67static int atr_sample_rate = 20;
82#endif
83
68
84/*********************************************************************
69/************************************************************************
85 * Local Function prototypes
70 * Local Function prototypes
86 *********************************************************************/
87static void ixgbe_setup_transmit_ring(struct tx_ring *);
88static void ixgbe_free_transmit_buffers(struct tx_ring *);
89static int ixgbe_setup_receive_ring(struct rx_ring *);
90static void ixgbe_free_receive_buffers(struct rx_ring *);
91
92static void ixgbe_rx_checksum(u32, struct mbuf *, u32);
93static void ixgbe_refresh_mbufs(struct rx_ring *, int);
94static int ixgbe_xmit(struct tx_ring *, struct mbuf **);
95static int ixgbe_tx_ctx_setup(struct tx_ring *,
96 struct mbuf *, u32 *, u32 *);
97static int ixgbe_tso_setup(struct tx_ring *,
98 struct mbuf *, u32 *, u32 *);
99#ifdef IXGBE_FDIR
100static void ixgbe_atr(struct tx_ring *, struct mbuf *);
101#endif
71 ************************************************************************/
72static void ixgbe_setup_transmit_ring(struct tx_ring *);
73static void ixgbe_free_transmit_buffers(struct tx_ring *);
74static int ixgbe_setup_receive_ring(struct rx_ring *);
75static void ixgbe_free_receive_buffers(struct rx_ring *);
76static void ixgbe_rx_checksum(u32, struct mbuf *, u32);
77static void ixgbe_refresh_mbufs(struct rx_ring *, int);
78static int ixgbe_xmit(struct tx_ring *, struct mbuf **);
79static int ixgbe_tx_ctx_setup(struct tx_ring *,
80 struct mbuf *, u32 *, u32 *);
81static int ixgbe_tso_setup(struct tx_ring *,
82 struct mbuf *, u32 *, u32 *);
102static __inline void ixgbe_rx_discard(struct rx_ring *, int);
103static __inline void ixgbe_rx_input(struct rx_ring *, struct ifnet *,
83static __inline void ixgbe_rx_discard(struct rx_ring *, int);
84static __inline void ixgbe_rx_input(struct rx_ring *, struct ifnet *,
104 struct mbuf *, u32);
85 struct mbuf *, u32);
86static int ixgbe_dma_malloc(struct adapter *, bus_size_t,
87 struct ixgbe_dma_alloc *, int);
88static void ixgbe_dma_free(struct adapter *, struct ixgbe_dma_alloc *);
105
89
106#ifdef IXGBE_LEGACY_TX
107/*********************************************************************
108 * Transmit entry point
90/************************************************************************
91 * ixgbe_legacy_start_locked - Transmit entry point
109 *
92 *
110 * ixgbe_start is called by the stack to initiate a transmit.
111 * The driver will remain in this routine as long as there are
112 * packets to transmit and transmit resources are available.
113 * In case resources are not available stack is notified and
114 * the packet is requeued.
115 **********************************************************************/
116
117void
118ixgbe_start_locked(struct tx_ring *txr, struct ifnet * ifp)
93 * Called by the stack to initiate a transmit.
94 * The driver will remain in this routine as long as there are
95 * packets to transmit and transmit resources are available.
96 * In case resources are not available, the stack is notified
97 * and the packet is requeued.
98 ************************************************************************/
99int
100ixgbe_legacy_start_locked(struct ifnet *ifp, struct tx_ring *txr)
119{
120 struct mbuf *m_head;
121 struct adapter *adapter = txr->adapter;
122
123 IXGBE_TX_LOCK_ASSERT(txr);
124
125 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
101{
102 struct mbuf *m_head;
103 struct adapter *adapter = txr->adapter;
104
105 IXGBE_TX_LOCK_ASSERT(txr);
106
107 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
126 return;
108 return (ENETDOWN);
127 if (!adapter->link_active)
109 if (!adapter->link_active)
128 return;
110 return (ENETDOWN);
129
130 while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
131 if (txr->tx_avail <= IXGBE_QUEUE_MIN_FREE)
132 break;
133
134 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
135 if (m_head == NULL)
136 break;
137
138 if (ixgbe_xmit(txr, &m_head)) {
139 if (m_head != NULL)
140 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
141 break;
142 }
143 /* Send a copy of the frame to the BPF listener */
144 ETHER_BPF_MTAP(ifp, m_head);
145 }
111
112 while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
113 if (txr->tx_avail <= IXGBE_QUEUE_MIN_FREE)
114 break;
115
116 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
117 if (m_head == NULL)
118 break;
119
120 if (ixgbe_xmit(txr, &m_head)) {
121 if (m_head != NULL)
122 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
123 break;
124 }
125 /* Send a copy of the frame to the BPF listener */
126 ETHER_BPF_MTAP(ifp, m_head);
127 }
146 return;
147}
148
128
149/*
150 * Legacy TX start - called by the stack, this
151 * always uses the first tx ring, and should
152 * not be used with multiqueue tx enabled.
153 */
129 return IXGBE_SUCCESS;
130} /* ixgbe_legacy_start_locked */
131
132/************************************************************************
133 * ixgbe_legacy_start
134 *
135 * Called by the stack, this always uses the first tx ring,
136 * and should not be used with multiqueue tx enabled.
137 ************************************************************************/
154void
138void
155ixgbe_start(struct ifnet *ifp)
139ixgbe_legacy_start(struct ifnet *ifp)
156{
157 struct adapter *adapter = ifp->if_softc;
140{
141 struct adapter *adapter = ifp->if_softc;
158 struct tx_ring *txr = adapter->tx_rings;
142 struct tx_ring *txr = adapter->tx_rings;
159
160 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
161 IXGBE_TX_LOCK(txr);
143
144 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
145 IXGBE_TX_LOCK(txr);
162 ixgbe_start_locked(txr, ifp);
146 ixgbe_legacy_start_locked(ifp, txr);
163 IXGBE_TX_UNLOCK(txr);
164 }
147 IXGBE_TX_UNLOCK(txr);
148 }
165 return;
166}
149} /* ixgbe_legacy_start */
167
150
168#else /* ! IXGBE_LEGACY_TX */
169
170/*
171** Multiqueue Transmit Entry Point
172** (if_transmit function)
173*/
151/************************************************************************
152 * ixgbe_mq_start - Multiqueue Transmit Entry Point
153 *
154 * (if_transmit function)
155 ************************************************************************/
174int
175ixgbe_mq_start(struct ifnet *ifp, struct mbuf *m)
176{
156int
157ixgbe_mq_start(struct ifnet *ifp, struct mbuf *m)
158{
177 struct adapter *adapter = ifp->if_softc;
178 struct ix_queue *que;
179 struct tx_ring *txr;
180 int i, err = 0;
181#ifdef RSS
182 uint32_t bucket_id;
183#endif
159 struct adapter *adapter = ifp->if_softc;
160 struct ix_queue *que;
161 struct tx_ring *txr;
162 int i, err = 0;
163 uint32_t bucket_id;
184
185 /*
186 * When doing RSS, map it to the same outbound queue
187 * as the incoming flow would be mapped to.
188 *
189 * If everything is setup correctly, it should be the
190 * same bucket that the current CPU we're on is.
191 */
192 if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) {
164
165 /*
166 * When doing RSS, map it to the same outbound queue
167 * as the incoming flow would be mapped to.
168 *
169 * If everything is setup correctly, it should be the
170 * same bucket that the current CPU we're on is.
171 */
172 if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) {
193#ifdef RSS
194 if (rss_hash2bucket(m->m_pkthdr.flowid,
195 M_HASHTYPE_GET(m), &bucket_id) == 0) {
173 if ((adapter->feat_en & IXGBE_FEATURE_RSS) &&
174 (rss_hash2bucket(m->m_pkthdr.flowid, M_HASHTYPE_GET(m),
175 &bucket_id) == 0)) {
196 i = bucket_id % adapter->num_queues;
197#ifdef IXGBE_DEBUG
198 if (bucket_id > adapter->num_queues)
176 i = bucket_id % adapter->num_queues;
177#ifdef IXGBE_DEBUG
178 if (bucket_id > adapter->num_queues)
199 if_printf(ifp, "bucket_id (%d) > num_queues "
200 "(%d)\n", bucket_id, adapter->num_queues);
179 if_printf(ifp,
180 "bucket_id (%d) > num_queues (%d)\n",
181 bucket_id, adapter->num_queues);
201#endif
182#endif
202 } else
203#endif
183 } else
204 i = m->m_pkthdr.flowid % adapter->num_queues;
205 } else
206 i = curcpu % adapter->num_queues;
207
208 /* Check for a hung queue and pick alternative */
209 if (((1 << i) & adapter->active_queues) == 0)
210 i = ffsl(adapter->active_queues);
211

--- 5 unchanged lines hidden (view full) ---

217 return (err);
218 if (IXGBE_TX_TRYLOCK(txr)) {
219 ixgbe_mq_start_locked(ifp, txr);
220 IXGBE_TX_UNLOCK(txr);
221 } else
222 taskqueue_enqueue(que->tq, &txr->txq_task);
223
224 return (0);
184 i = m->m_pkthdr.flowid % adapter->num_queues;
185 } else
186 i = curcpu % adapter->num_queues;
187
188 /* Check for a hung queue and pick alternative */
189 if (((1 << i) & adapter->active_queues) == 0)
190 i = ffsl(adapter->active_queues);
191

--- 5 unchanged lines hidden (view full) ---

197 return (err);
198 if (IXGBE_TX_TRYLOCK(txr)) {
199 ixgbe_mq_start_locked(ifp, txr);
200 IXGBE_TX_UNLOCK(txr);
201 } else
202 taskqueue_enqueue(que->tq, &txr->txq_task);
203
204 return (0);
225}
205} /* ixgbe_mq_start */
226
206
207/************************************************************************
208 * ixgbe_mq_start_locked
209 ************************************************************************/
227int
228ixgbe_mq_start_locked(struct ifnet *ifp, struct tx_ring *txr)
229{
210int
211ixgbe_mq_start_locked(struct ifnet *ifp, struct tx_ring *txr)
212{
230 struct adapter *adapter = txr->adapter;
231 struct mbuf *next;
232 int enqueued = 0, err = 0;
213 struct mbuf *next;
214 int enqueued = 0, err = 0;
233
215
234 if (((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) ||
235 adapter->link_active == 0)
216 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
236 return (ENETDOWN);
217 return (ENETDOWN);
218 if (txr->adapter->link_active == 0)
219 return (ENETDOWN);
237
238 /* Process the queue */
239#if __FreeBSD_version < 901504
240 next = drbr_dequeue(ifp, txr->br);
241 while (next != NULL) {
242 if ((err = ixgbe_xmit(txr, &next)) != 0) {
243 if (next != NULL)
244 err = drbr_enqueue(ifp, txr->br, next);
245#else
246 while ((next = drbr_peek(ifp, txr->br)) != NULL) {
220
221 /* Process the queue */
222#if __FreeBSD_version < 901504
223 next = drbr_dequeue(ifp, txr->br);
224 while (next != NULL) {
225 if ((err = ixgbe_xmit(txr, &next)) != 0) {
226 if (next != NULL)
227 err = drbr_enqueue(ifp, txr->br, next);
228#else
229 while ((next = drbr_peek(ifp, txr->br)) != NULL) {
247 if ((err = ixgbe_xmit(txr, &next)) != 0) {
248 if (next == NULL) {
230 err = ixgbe_xmit(txr, &next);
231 if (err != 0) {
232 if (next == NULL)
249 drbr_advance(ifp, txr->br);
233 drbr_advance(ifp, txr->br);
250 } else {
234 else
251 drbr_putback(ifp, txr->br, next);
235 drbr_putback(ifp, txr->br, next);
252 }
253#endif
254 break;
255 }
256#if __FreeBSD_version >= 901504
257 drbr_advance(ifp, txr->br);
258#endif
259 enqueued++;
236#endif
237 break;
238 }
239#if __FreeBSD_version >= 901504
240 drbr_advance(ifp, txr->br);
241#endif
242 enqueued++;
260#if 0 // this is VF-only
261#if __FreeBSD_version >= 1100036
262 /*
263 * Since we're looking at the tx ring, we can check
264 * to see if we're a VF by examing our tail register
265 * address.
266 */
243#if __FreeBSD_version >= 1100036
244 /*
245 * Since we're looking at the tx ring, we can check
246 * to see if we're a VF by examing our tail register
247 * address.
248 */
267 if (txr->tail < IXGBE_TDT(0) && next->m_flags & M_MCAST)
249 if ((txr->adapter->feat_en & IXGBE_FEATURE_VF) &&
250 (next->m_flags & M_MCAST))
268 if_inc_counter(ifp, IFCOUNTER_OMCASTS, 1);
269#endif
251 if_inc_counter(ifp, IFCOUNTER_OMCASTS, 1);
252#endif
270#endif
271 /* Send a copy of the frame to the BPF listener */
272 ETHER_BPF_MTAP(ifp, next);
273 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
274 break;
275#if __FreeBSD_version < 901504
276 next = drbr_dequeue(ifp, txr->br);
277#endif
278 }
279
253 /* Send a copy of the frame to the BPF listener */
254 ETHER_BPF_MTAP(ifp, next);
255 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
256 break;
257#if __FreeBSD_version < 901504
258 next = drbr_dequeue(ifp, txr->br);
259#endif
260 }
261
280 if (txr->tx_avail < IXGBE_TX_CLEANUP_THRESHOLD)
262 if (txr->tx_avail < IXGBE_TX_CLEANUP_THRESHOLD(txr->adapter))
281 ixgbe_txeof(txr);
282
283 return (err);
263 ixgbe_txeof(txr);
264
265 return (err);
284}
266} /* ixgbe_mq_start_locked */
285
267
286/*
287 * Called from a taskqueue to drain queued transmit packets.
288 */
268/************************************************************************
269 * ixgbe_deferred_mq_start
270 *
271 * Called from a taskqueue to drain queued transmit packets.
272 ************************************************************************/
289void
290ixgbe_deferred_mq_start(void *arg, int pending)
291{
292 struct tx_ring *txr = arg;
293 struct adapter *adapter = txr->adapter;
273void
274ixgbe_deferred_mq_start(void *arg, int pending)
275{
276 struct tx_ring *txr = arg;
277 struct adapter *adapter = txr->adapter;
294 struct ifnet *ifp = adapter->ifp;
278 struct ifnet *ifp = adapter->ifp;
295
296 IXGBE_TX_LOCK(txr);
297 if (!drbr_empty(ifp, txr->br))
298 ixgbe_mq_start_locked(ifp, txr);
299 IXGBE_TX_UNLOCK(txr);
279
280 IXGBE_TX_LOCK(txr);
281 if (!drbr_empty(ifp, txr->br))
282 ixgbe_mq_start_locked(ifp, txr);
283 IXGBE_TX_UNLOCK(txr);
300}
284} /* ixgbe_deferred_mq_start */
301
285
302/*
303 * Flush all ring buffers
304 */
286/************************************************************************
287 * ixgbe_qflush - Flush all ring buffers
288 ************************************************************************/
305void
306ixgbe_qflush(struct ifnet *ifp)
307{
289void
290ixgbe_qflush(struct ifnet *ifp)
291{
308 struct adapter *adapter = ifp->if_softc;
309 struct tx_ring *txr = adapter->tx_rings;
310 struct mbuf *m;
292 struct adapter *adapter = ifp->if_softc;
293 struct tx_ring *txr = adapter->tx_rings;
294 struct mbuf *m;
311
312 for (int i = 0; i < adapter->num_queues; i++, txr++) {
313 IXGBE_TX_LOCK(txr);
314 while ((m = buf_ring_dequeue_sc(txr->br)) != NULL)
315 m_freem(m);
316 IXGBE_TX_UNLOCK(txr);
317 }
318 if_qflush(ifp);
295
296 for (int i = 0; i < adapter->num_queues; i++, txr++) {
297 IXGBE_TX_LOCK(txr);
298 while ((m = buf_ring_dequeue_sc(txr->br)) != NULL)
299 m_freem(m);
300 IXGBE_TX_UNLOCK(txr);
301 }
302 if_qflush(ifp);
319}
320#endif /* IXGBE_LEGACY_TX */
303} /* ixgbe_qflush */
321
322
304
305
323/*********************************************************************
306/************************************************************************
307 * ixgbe_xmit
324 *
308 *
325 * This routine maps the mbufs to tx descriptors, allowing the
326 * TX engine to transmit the packets.
327 * - return 0 on success, positive on failure
309 * Maps the mbufs to tx descriptors, allowing the
310 * TX engine to transmit the packets.
328 *
311 *
329 **********************************************************************/
330
312 * Return 0 on success, positive on failure
313 ************************************************************************/
331static int
332ixgbe_xmit(struct tx_ring *txr, struct mbuf **m_headp)
333{
314static int
315ixgbe_xmit(struct tx_ring *txr, struct mbuf **m_headp)
316{
334 struct adapter *adapter = txr->adapter;
335 u32 olinfo_status = 0, cmd_type_len;
336 int i, j, error, nsegs;
337 int first;
338 bool remap = TRUE;
339 struct mbuf *m_head;
340 bus_dma_segment_t segs[adapter->num_segs];
341 bus_dmamap_t map;
342 struct ixgbe_tx_buf *txbuf;
317 struct adapter *adapter = txr->adapter;
318 struct ixgbe_tx_buf *txbuf;
343 union ixgbe_adv_tx_desc *txd = NULL;
319 union ixgbe_adv_tx_desc *txd = NULL;
320 struct mbuf *m_head;
321 int i, j, error, nsegs;
322 int first;
323 u32 olinfo_status = 0, cmd_type_len;
324 bool remap = TRUE;
325 bus_dma_segment_t segs[adapter->num_segs];
326 bus_dmamap_t map;
344
345 m_head = *m_headp;
346
347 /* Basic descriptor defines */
327
328 m_head = *m_headp;
329
330 /* Basic descriptor defines */
348 cmd_type_len = (IXGBE_ADVTXD_DTYP_DATA |
331 cmd_type_len = (IXGBE_ADVTXD_DTYP_DATA |
349 IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT);
350
351 if (m_head->m_flags & M_VLANTAG)
332 IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT);
333
334 if (m_head->m_flags & M_VLANTAG)
352 cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
335 cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
353
336
354 /*
355 * Important to capture the first descriptor
356 * used because it will contain the index of
357 * the one we tell the hardware to report back
358 */
359 first = txr->next_avail_desc;
337 /*
338 * Important to capture the first descriptor
339 * used because it will contain the index of
340 * the one we tell the hardware to report back
341 */
342 first = txr->next_avail_desc;
360 txbuf = &txr->tx_buffers[first];
361 map = txbuf->map;
362
363 /*
364 * Map the packet for DMA.
365 */
366retry:
343 txbuf = &txr->tx_buffers[first];
344 map = txbuf->map;
345
346 /*
347 * Map the packet for DMA.
348 */
349retry:
367 error = bus_dmamap_load_mbuf_sg(txr->txtag, map,
368 *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
350 error = bus_dmamap_load_mbuf_sg(txr->txtag, map, *m_headp, segs,
351 &nsegs, BUS_DMA_NOWAIT);
369
370 if (__predict_false(error)) {
371 struct mbuf *m;
372
373 switch (error) {
374 case EFBIG:
375 /* Try it again? - one try */
376 if (remap == TRUE) {

--- 38 unchanged lines hidden (view full) ---

415 */
416 error = ixgbe_tx_ctx_setup(txr, m_head, &cmd_type_len, &olinfo_status);
417 if (__predict_false(error)) {
418 if (error == ENOBUFS)
419 *m_headp = NULL;
420 return (error);
421 }
422
352
353 if (__predict_false(error)) {
354 struct mbuf *m;
355
356 switch (error) {
357 case EFBIG:
358 /* Try it again? - one try */
359 if (remap == TRUE) {

--- 38 unchanged lines hidden (view full) ---

398 */
399 error = ixgbe_tx_ctx_setup(txr, m_head, &cmd_type_len, &olinfo_status);
400 if (__predict_false(error)) {
401 if (error == ENOBUFS)
402 *m_headp = NULL;
403 return (error);
404 }
405
423#ifdef IXGBE_FDIR
424 /* Do the flow director magic */
406 /* Do the flow director magic */
425 if ((txr->atr_sample) && (!adapter->fdir_reinit)) {
407 if ((adapter->feat_en & IXGBE_FEATURE_FDIR) &&
408 (txr->atr_sample) && (!adapter->fdir_reinit)) {
426 ++txr->atr_count;
427 if (txr->atr_count >= atr_sample_rate) {
428 ixgbe_atr(txr, m_head);
429 txr->atr_count = 0;
430 }
431 }
409 ++txr->atr_count;
410 if (txr->atr_count >= atr_sample_rate) {
411 ixgbe_atr(txr, m_head);
412 txr->atr_count = 0;
413 }
414 }
432#endif
433
434 olinfo_status |= IXGBE_ADVTXD_CC;
435 i = txr->next_avail_desc;
436 for (j = 0; j < nsegs; j++) {
437 bus_size_t seglen;
438 bus_addr_t segaddr;
439
440 txbuf = &txr->tx_buffers[i];
441 txd = &txr->tx_base[i];
442 seglen = segs[j].ds_len;
443 segaddr = htole64(segs[j].ds_addr);
444
445 txd->read.buffer_addr = segaddr;
446 txd->read.cmd_type_len = htole32(txr->txd_cmd |
415
416 olinfo_status |= IXGBE_ADVTXD_CC;
417 i = txr->next_avail_desc;
418 for (j = 0; j < nsegs; j++) {
419 bus_size_t seglen;
420 bus_addr_t segaddr;
421
422 txbuf = &txr->tx_buffers[i];
423 txd = &txr->tx_base[i];
424 seglen = segs[j].ds_len;
425 segaddr = htole64(segs[j].ds_addr);
426
427 txd->read.buffer_addr = segaddr;
428 txd->read.cmd_type_len = htole32(txr->txd_cmd |
447 cmd_type_len |seglen);
429 cmd_type_len | seglen);
448 txd->read.olinfo_status = htole32(olinfo_status);
449
450 if (++i == txr->num_desc)
451 i = 0;
452 }
453
430 txd->read.olinfo_status = htole32(olinfo_status);
431
432 if (++i == txr->num_desc)
433 i = 0;
434 }
435
454 txd->read.cmd_type_len |=
455 htole32(IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS);
436 txd->read.cmd_type_len |= htole32(IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS);
456 txr->tx_avail -= nsegs;
457 txr->next_avail_desc = i;
458
459 txbuf->m_head = m_head;
460 /*
461 * Here we swap the map so the last descriptor,
462 * which gets the completion interrupt has the
463 * real map, and the first descriptor gets the
464 * unused map from this descriptor.
465 */
466 txr->tx_buffers[first].map = txbuf->map;
467 txbuf->map = map;
468 bus_dmamap_sync(txr->txtag, map, BUS_DMASYNC_PREWRITE);
469
437 txr->tx_avail -= nsegs;
438 txr->next_avail_desc = i;
439
440 txbuf->m_head = m_head;
441 /*
442 * Here we swap the map so the last descriptor,
443 * which gets the completion interrupt has the
444 * real map, and the first descriptor gets the
445 * unused map from this descriptor.
446 */
447 txr->tx_buffers[first].map = txbuf->map;
448 txbuf->map = map;
449 bus_dmamap_sync(txr->txtag, map, BUS_DMASYNC_PREWRITE);
450
470 /* Set the EOP descriptor that will be marked done */
471 txbuf = &txr->tx_buffers[first];
451 /* Set the EOP descriptor that will be marked done */
452 txbuf = &txr->tx_buffers[first];
472 txbuf->eop = txd;
473
453 txbuf->eop = txd;
454
474 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
475 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
455 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
456 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
476 /*
477 * Advance the Transmit Descriptor Tail (Tdt), this tells the
478 * hardware that this frame is available to transmit.
479 */
480 ++txr->total_packets;
481 IXGBE_WRITE_REG(&adapter->hw, txr->tail, i);
482
483 /* Mark queue as having work */
484 if (txr->busy == 0)
485 txr->busy = 1;
486
487 return (0);
457 /*
458 * Advance the Transmit Descriptor Tail (Tdt), this tells the
459 * hardware that this frame is available to transmit.
460 */
461 ++txr->total_packets;
462 IXGBE_WRITE_REG(&adapter->hw, txr->tail, i);
463
464 /* Mark queue as having work */
465 if (txr->busy == 0)
466 txr->busy = 1;
467
468 return (0);
488}
469} /* ixgbe_xmit */
489
490
470
471
491/*********************************************************************
472/************************************************************************
473 * ixgbe_allocate_transmit_buffers
492 *
474 *
493 * Allocate memory for tx_buffer structures. The tx_buffer stores all
494 * the information needed to transmit a packet on the wire. This is
495 * called only once at attach, setup is done every reset.
496 *
497 **********************************************************************/
498int
475 * Allocate memory for tx_buffer structures. The tx_buffer stores all
476 * the information needed to transmit a packet on the wire. This is
477 * called only once at attach, setup is done every reset.
478 ************************************************************************/
479static int
499ixgbe_allocate_transmit_buffers(struct tx_ring *txr)
500{
480ixgbe_allocate_transmit_buffers(struct tx_ring *txr)
481{
501 struct adapter *adapter = txr->adapter;
502 device_t dev = adapter->dev;
482 struct adapter *adapter = txr->adapter;
483 device_t dev = adapter->dev;
503 struct ixgbe_tx_buf *txbuf;
484 struct ixgbe_tx_buf *txbuf;
504 int error, i;
485 int error, i;
505
506 /*
507 * Setup DMA descriptor areas.
508 */
486
487 /*
488 * Setup DMA descriptor areas.
489 */
509 if ((error = bus_dma_tag_create(
510 bus_get_dma_tag(adapter->dev), /* parent */
511 1, 0, /* alignment, bounds */
512 BUS_SPACE_MAXADDR, /* lowaddr */
513 BUS_SPACE_MAXADDR, /* highaddr */
514 NULL, NULL, /* filter, filterarg */
515 IXGBE_TSO_SIZE, /* maxsize */
516 adapter->num_segs, /* nsegments */
517 PAGE_SIZE, /* maxsegsize */
518 0, /* flags */
519 NULL, /* lockfunc */
520 NULL, /* lockfuncarg */
521 &txr->txtag))) {
522 device_printf(dev,"Unable to allocate TX DMA tag\n");
490 error = bus_dma_tag_create(
491 /* parent */ bus_get_dma_tag(adapter->dev),
492 /* alignment */ 1,
493 /* bounds */ 0,
494 /* lowaddr */ BUS_SPACE_MAXADDR,
495 /* highaddr */ BUS_SPACE_MAXADDR,
496 /* filter */ NULL,
497 /* filterarg */ NULL,
498 /* maxsize */ IXGBE_TSO_SIZE,
499 /* nsegments */ adapter->num_segs,
500 /* maxsegsize */ PAGE_SIZE,
501 /* flags */ 0,
502 /* lockfunc */ NULL,
503 /* lockfuncarg */ NULL,
504 &txr->txtag);
505 if (error != 0) {
506 device_printf(dev, "Unable to allocate TX DMA tag\n");
523 goto fail;
524 }
525
507 goto fail;
508 }
509
526 if (!(txr->tx_buffers =
527 (struct ixgbe_tx_buf *) malloc(sizeof(struct ixgbe_tx_buf) *
528 adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO))) {
510 txr->tx_buffers =
511 (struct ixgbe_tx_buf *)malloc(sizeof(struct ixgbe_tx_buf) *
512 adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO);
513 if (txr->tx_buffers == NULL) {
529 device_printf(dev, "Unable to allocate tx_buffer memory\n");
530 error = ENOMEM;
531 goto fail;
532 }
533
514 device_printf(dev, "Unable to allocate tx_buffer memory\n");
515 error = ENOMEM;
516 goto fail;
517 }
518
534 /* Create the descriptor buffer dma maps */
519 /* Create the descriptor buffer dma maps */
535 txbuf = txr->tx_buffers;
536 for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) {
537 error = bus_dmamap_create(txr->txtag, 0, &txbuf->map);
538 if (error != 0) {
539 device_printf(dev, "Unable to create TX DMA map\n");
540 goto fail;
541 }
542 }
543
544 return 0;
545fail:
546 /* We free all, it handles case where we are in the middle */
547 ixgbe_free_transmit_structures(adapter);
520 txbuf = txr->tx_buffers;
521 for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) {
522 error = bus_dmamap_create(txr->txtag, 0, &txbuf->map);
523 if (error != 0) {
524 device_printf(dev, "Unable to create TX DMA map\n");
525 goto fail;
526 }
527 }
528
529 return 0;
530fail:
531 /* We free all, it handles case where we are in the middle */
532 ixgbe_free_transmit_structures(adapter);
533
548 return (error);
534 return (error);
549}
535} /* ixgbe_allocate_transmit_buffers */
550
536
551/*********************************************************************
552 *
553 * Initialize a transmit ring.
554 *
555 **********************************************************************/
537/************************************************************************
538 * ixgbe_setup_transmit_ring - Initialize a transmit ring.
539 ************************************************************************/
556static void
557ixgbe_setup_transmit_ring(struct tx_ring *txr)
558{
540static void
541ixgbe_setup_transmit_ring(struct tx_ring *txr)
542{
559 struct adapter *adapter = txr->adapter;
560 struct ixgbe_tx_buf *txbuf;
543 struct adapter *adapter = txr->adapter;
544 struct ixgbe_tx_buf *txbuf;
561#ifdef DEV_NETMAP
562 struct netmap_adapter *na = NA(adapter->ifp);
545#ifdef DEV_NETMAP
546 struct netmap_adapter *na = NA(adapter->ifp);
563 struct netmap_slot *slot;
547 struct netmap_slot *slot;
564#endif /* DEV_NETMAP */
565
566 /* Clear the old ring contents */
567 IXGBE_TX_LOCK(txr);
548#endif /* DEV_NETMAP */
549
550 /* Clear the old ring contents */
551 IXGBE_TX_LOCK(txr);
552
568#ifdef DEV_NETMAP
553#ifdef DEV_NETMAP
569 /*
570 * (under lock): if in netmap mode, do some consistency
571 * checks and set slot to entry 0 of the netmap ring.
572 */
573 slot = netmap_reset(na, NR_TX, txr->me, 0);
554 if (adapter->feat_en & IXGBE_FEATURE_NETMAP) {
555 /*
556 * (under lock): if in netmap mode, do some consistency
557 * checks and set slot to entry 0 of the netmap ring.
558 */
559 slot = netmap_reset(na, NR_TX, txr->me, 0);
560 }
574#endif /* DEV_NETMAP */
561#endif /* DEV_NETMAP */
562
575 bzero((void *)txr->tx_base,
563 bzero((void *)txr->tx_base,
576 (sizeof(union ixgbe_adv_tx_desc)) * adapter->num_tx_desc);
564 (sizeof(union ixgbe_adv_tx_desc)) * adapter->num_tx_desc);
577 /* Reset indices */
578 txr->next_avail_desc = 0;
579 txr->next_to_clean = 0;
580
581 /* Free any existing tx buffers. */
565 /* Reset indices */
566 txr->next_avail_desc = 0;
567 txr->next_to_clean = 0;
568
569 /* Free any existing tx buffers. */
582 txbuf = txr->tx_buffers;
570 txbuf = txr->tx_buffers;
583 for (int i = 0; i < txr->num_desc; i++, txbuf++) {
584 if (txbuf->m_head != NULL) {
585 bus_dmamap_sync(txr->txtag, txbuf->map,
586 BUS_DMASYNC_POSTWRITE);
587 bus_dmamap_unload(txr->txtag, txbuf->map);
588 m_freem(txbuf->m_head);
589 txbuf->m_head = NULL;
590 }
571 for (int i = 0; i < txr->num_desc; i++, txbuf++) {
572 if (txbuf->m_head != NULL) {
573 bus_dmamap_sync(txr->txtag, txbuf->map,
574 BUS_DMASYNC_POSTWRITE);
575 bus_dmamap_unload(txr->txtag, txbuf->map);
576 m_freem(txbuf->m_head);
577 txbuf->m_head = NULL;
578 }
579
591#ifdef DEV_NETMAP
592 /*
593 * In netmap mode, set the map for the packet buffer.
594 * NOTE: Some drivers (not this one) also need to set
595 * the physical buffer address in the NIC ring.
596 * Slots in the netmap ring (indexed by "si") are
597 * kring->nkr_hwofs positions "ahead" wrt the
598 * corresponding slot in the NIC ring. In some drivers
599 * (not here) nkr_hwofs can be negative. Function
600 * netmap_idx_n2k() handles wraparounds properly.
601 */
580#ifdef DEV_NETMAP
581 /*
582 * In netmap mode, set the map for the packet buffer.
583 * NOTE: Some drivers (not this one) also need to set
584 * the physical buffer address in the NIC ring.
585 * Slots in the netmap ring (indexed by "si") are
586 * kring->nkr_hwofs positions "ahead" wrt the
587 * corresponding slot in the NIC ring. In some drivers
588 * (not here) nkr_hwofs can be negative. Function
589 * netmap_idx_n2k() handles wraparounds properly.
590 */
602 if (slot) {
591 if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) && slot) {
603 int si = netmap_idx_n2k(&na->tx_rings[txr->me], i);
604 netmap_load_map(na, txr->txtag,
605 txbuf->map, NMB(na, slot + si));
606 }
607#endif /* DEV_NETMAP */
592 int si = netmap_idx_n2k(&na->tx_rings[txr->me], i);
593 netmap_load_map(na, txr->txtag,
594 txbuf->map, NMB(na, slot + si));
595 }
596#endif /* DEV_NETMAP */
597
608 /* Clear the EOP descriptor pointer */
609 txbuf->eop = NULL;
598 /* Clear the EOP descriptor pointer */
599 txbuf->eop = NULL;
610 }
600 }
611
601
612#ifdef IXGBE_FDIR
613 /* Set the rate at which we sample packets */
602 /* Set the rate at which we sample packets */
614 if (adapter->hw.mac.type != ixgbe_mac_82598EB)
603 if (adapter->feat_en & IXGBE_FEATURE_FDIR)
615 txr->atr_sample = atr_sample_rate;
604 txr->atr_sample = atr_sample_rate;
616#endif
617
618 /* Set number of descriptors available */
619 txr->tx_avail = adapter->num_tx_desc;
620
621 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
622 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
623 IXGBE_TX_UNLOCK(txr);
605
606 /* Set number of descriptors available */
607 txr->tx_avail = adapter->num_tx_desc;
608
609 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
610 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
611 IXGBE_TX_UNLOCK(txr);
624}
612} /* ixgbe_setup_transmit_ring */
625
613
626/*********************************************************************
627 *
628 * Initialize all transmit rings.
629 *
630 **********************************************************************/
614/************************************************************************
615 * ixgbe_setup_transmit_structures - Initialize all transmit rings.
616 ************************************************************************/
631int
632ixgbe_setup_transmit_structures(struct adapter *adapter)
633{
634 struct tx_ring *txr = adapter->tx_rings;
635
636 for (int i = 0; i < adapter->num_queues; i++, txr++)
637 ixgbe_setup_transmit_ring(txr);
638
639 return (0);
617int
618ixgbe_setup_transmit_structures(struct adapter *adapter)
619{
620 struct tx_ring *txr = adapter->tx_rings;
621
622 for (int i = 0; i < adapter->num_queues; i++, txr++)
623 ixgbe_setup_transmit_ring(txr);
624
625 return (0);
640}
626} /* ixgbe_setup_transmit_structures */
641
627
642/*********************************************************************
643 *
644 * Free all transmit rings.
645 *
646 **********************************************************************/
628/************************************************************************
629 * ixgbe_free_transmit_structures - Free all transmit rings.
630 ************************************************************************/
647void
648ixgbe_free_transmit_structures(struct adapter *adapter)
649{
650 struct tx_ring *txr = adapter->tx_rings;
651
652 for (int i = 0; i < adapter->num_queues; i++, txr++) {
653 IXGBE_TX_LOCK(txr);
654 ixgbe_free_transmit_buffers(txr);
655 ixgbe_dma_free(adapter, &txr->txdma);
656 IXGBE_TX_UNLOCK(txr);
657 IXGBE_TX_LOCK_DESTROY(txr);
658 }
659 free(adapter->tx_rings, M_DEVBUF);
631void
632ixgbe_free_transmit_structures(struct adapter *adapter)
633{
634 struct tx_ring *txr = adapter->tx_rings;
635
636 for (int i = 0; i < adapter->num_queues; i++, txr++) {
637 IXGBE_TX_LOCK(txr);
638 ixgbe_free_transmit_buffers(txr);
639 ixgbe_dma_free(adapter, &txr->txdma);
640 IXGBE_TX_UNLOCK(txr);
641 IXGBE_TX_LOCK_DESTROY(txr);
642 }
643 free(adapter->tx_rings, M_DEVBUF);
660}
644} /* ixgbe_free_transmit_structures */
661
645
662/*********************************************************************
646/************************************************************************
647 * ixgbe_free_transmit_buffers
663 *
648 *
664 * Free transmit ring related data structures.
665 *
666 **********************************************************************/
649 * Free transmit ring related data structures.
650 ************************************************************************/
667static void
668ixgbe_free_transmit_buffers(struct tx_ring *txr)
669{
651static void
652ixgbe_free_transmit_buffers(struct tx_ring *txr)
653{
670 struct adapter *adapter = txr->adapter;
654 struct adapter *adapter = txr->adapter;
671 struct ixgbe_tx_buf *tx_buffer;
655 struct ixgbe_tx_buf *tx_buffer;
672 int i;
656 int i;
673
674 INIT_DEBUGOUT("ixgbe_free_transmit_ring: begin");
675
676 if (txr->tx_buffers == NULL)
677 return;
678
679 tx_buffer = txr->tx_buffers;
680 for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
681 if (tx_buffer->m_head != NULL) {
682 bus_dmamap_sync(txr->txtag, tx_buffer->map,
683 BUS_DMASYNC_POSTWRITE);
657
658 INIT_DEBUGOUT("ixgbe_free_transmit_ring: begin");
659
660 if (txr->tx_buffers == NULL)
661 return;
662
663 tx_buffer = txr->tx_buffers;
664 for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
665 if (tx_buffer->m_head != NULL) {
666 bus_dmamap_sync(txr->txtag, tx_buffer->map,
667 BUS_DMASYNC_POSTWRITE);
684 bus_dmamap_unload(txr->txtag,
685 tx_buffer->map);
668 bus_dmamap_unload(txr->txtag, tx_buffer->map);
686 m_freem(tx_buffer->m_head);
687 tx_buffer->m_head = NULL;
688 if (tx_buffer->map != NULL) {
669 m_freem(tx_buffer->m_head);
670 tx_buffer->m_head = NULL;
671 if (tx_buffer->map != NULL) {
689 bus_dmamap_destroy(txr->txtag,
690 tx_buffer->map);
672 bus_dmamap_destroy(txr->txtag, tx_buffer->map);
691 tx_buffer->map = NULL;
692 }
693 } else if (tx_buffer->map != NULL) {
673 tx_buffer->map = NULL;
674 }
675 } else if (tx_buffer->map != NULL) {
694 bus_dmamap_unload(txr->txtag,
695 tx_buffer->map);
696 bus_dmamap_destroy(txr->txtag,
697 tx_buffer->map);
676 bus_dmamap_unload(txr->txtag, tx_buffer->map);
677 bus_dmamap_destroy(txr->txtag, tx_buffer->map);
698 tx_buffer->map = NULL;
699 }
700 }
678 tx_buffer->map = NULL;
679 }
680 }
701#ifdef IXGBE_LEGACY_TX
702 if (txr->br != NULL)
703 buf_ring_free(txr->br, M_DEVBUF);
681 if (txr->br != NULL)
682 buf_ring_free(txr->br, M_DEVBUF);
704#endif
705 if (txr->tx_buffers != NULL) {
706 free(txr->tx_buffers, M_DEVBUF);
707 txr->tx_buffers = NULL;
708 }
709 if (txr->txtag != NULL) {
710 bus_dma_tag_destroy(txr->txtag);
711 txr->txtag = NULL;
712 }
683 if (txr->tx_buffers != NULL) {
684 free(txr->tx_buffers, M_DEVBUF);
685 txr->tx_buffers = NULL;
686 }
687 if (txr->txtag != NULL) {
688 bus_dma_tag_destroy(txr->txtag);
689 txr->txtag = NULL;
690 }
713 return;
714}
691} /* ixgbe_free_transmit_buffers */
715
692
716/*********************************************************************
693/************************************************************************
694 * ixgbe_tx_ctx_setup
717 *
695 *
718 * Advanced Context Descriptor setup for VLAN, CSUM or TSO
719 *
720 **********************************************************************/
721
696 * Advanced Context Descriptor setup for VLAN, CSUM or TSO
697 ************************************************************************/
722static int
723ixgbe_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp,
724 u32 *cmd_type_len, u32 *olinfo_status)
725{
698static int
699ixgbe_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp,
700 u32 *cmd_type_len, u32 *olinfo_status)
701{
726 struct adapter *adapter = txr->adapter;
727 struct ixgbe_adv_tx_context_desc *TXD;
702 struct ixgbe_adv_tx_context_desc *TXD;
728 struct ether_vlan_header *eh;
703 struct ether_vlan_header *eh;
729#ifdef INET
704#ifdef INET
730 struct ip *ip;
705 struct ip *ip;
731#endif
732#ifdef INET6
706#endif
707#ifdef INET6
733 struct ip6_hdr *ip6;
708 struct ip6_hdr *ip6;
734#endif
709#endif
735 u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
736 int ehdrlen, ip_hlen = 0;
737 u16 etype;
738 u8 ipproto = 0;
739 int offload = TRUE;
740 int ctxd = txr->next_avail_desc;
741 u16 vtag = 0;
742 caddr_t l3d;
710 int ehdrlen, ip_hlen = 0;
711 int offload = TRUE;
712 int ctxd = txr->next_avail_desc;
713 u32 vlan_macip_lens = 0;
714 u32 type_tucmd_mlhl = 0;
715 u16 vtag = 0;
716 u16 etype;
717 u8 ipproto = 0;
718 caddr_t l3d;
743
744
745 /* First check if TSO is to be used */
719
720
721 /* First check if TSO is to be used */
746 if (mp->m_pkthdr.csum_flags & (CSUM_IP_TSO|CSUM_IP6_TSO))
722 if (mp->m_pkthdr.csum_flags & (CSUM_IP_TSO | CSUM_IP6_TSO))
747 return (ixgbe_tso_setup(txr, mp, cmd_type_len, olinfo_status));
748
749 if ((mp->m_pkthdr.csum_flags & CSUM_OFFLOAD) == 0)
750 offload = FALSE;
751
752 /* Indicate the whole packet as payload when not doing TSO */
723 return (ixgbe_tso_setup(txr, mp, cmd_type_len, olinfo_status));
724
725 if ((mp->m_pkthdr.csum_flags & CSUM_OFFLOAD) == 0)
726 offload = FALSE;
727
728 /* Indicate the whole packet as payload when not doing TSO */
753 *olinfo_status |= mp->m_pkthdr.len << IXGBE_ADVTXD_PAYLEN_SHIFT;
729 *olinfo_status |= mp->m_pkthdr.len << IXGBE_ADVTXD_PAYLEN_SHIFT;
754
755 /* Now ready a context descriptor */
730
731 /* Now ready a context descriptor */
756 TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd];
732 TXD = (struct ixgbe_adv_tx_context_desc *)&txr->tx_base[ctxd];
757
758 /*
733
734 /*
759 ** In advanced descriptors the vlan tag must
760 ** be placed into the context descriptor. Hence
761 ** we need to make one even if not doing offloads.
762 */
735 * In advanced descriptors the vlan tag must
736 * be placed into the context descriptor. Hence
737 * we need to make one even if not doing offloads.
738 */
763 if (mp->m_flags & M_VLANTAG) {
764 vtag = htole16(mp->m_pkthdr.ether_vtag);
765 vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
739 if (mp->m_flags & M_VLANTAG) {
740 vtag = htole16(mp->m_pkthdr.ether_vtag);
741 vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
766 } else if (!IXGBE_IS_X550VF(adapter) && (offload == FALSE))
742 } else if (!(txr->adapter->feat_en & IXGBE_FEATURE_NEEDS_CTXD) &&
743 (offload == FALSE))
767 return (0);
768
769 /*
770 * Determine where frame payload starts.
771 * Jump over vlan headers if already present,
772 * helpful for QinQ too.
773 */
774 eh = mtod(mp, struct ether_vlan_header *);

--- 7 unchanged lines hidden (view full) ---

782
783 /* Set the ether header length */
784 vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
785
786 if (offload == FALSE)
787 goto no_offloads;
788
789 /*
744 return (0);
745
746 /*
747 * Determine where frame payload starts.
748 * Jump over vlan headers if already present,
749 * helpful for QinQ too.
750 */
751 eh = mtod(mp, struct ether_vlan_header *);

--- 7 unchanged lines hidden (view full) ---

759
760 /* Set the ether header length */
761 vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
762
763 if (offload == FALSE)
764 goto no_offloads;
765
766 /*
790 * If the first mbuf only includes the ethernet header, jump to the next one
791 * XXX: This assumes the stack splits mbufs containing headers on header boundaries
767 * If the first mbuf only includes the ethernet header,
768 * jump to the next one
769 * XXX: This assumes the stack splits mbufs containing headers
770 * on header boundaries
792 * XXX: And assumes the entire IP header is contained in one mbuf
793 */
794 if (mp->m_len == ehdrlen && mp->m_next)
795 l3d = mtod(mp->m_next, caddr_t);
796 else
797 l3d = mtod(mp, caddr_t) + ehdrlen;
798
799 switch (etype) {

--- 23 unchanged lines hidden (view full) ---

823 break;
824 }
825
826 vlan_macip_lens |= ip_hlen;
827
828 /* No support for offloads for non-L4 next headers */
829 switch (ipproto) {
830 case IPPROTO_TCP:
771 * XXX: And assumes the entire IP header is contained in one mbuf
772 */
773 if (mp->m_len == ehdrlen && mp->m_next)
774 l3d = mtod(mp->m_next, caddr_t);
775 else
776 l3d = mtod(mp, caddr_t) + ehdrlen;
777
778 switch (etype) {

--- 23 unchanged lines hidden (view full) ---

802 break;
803 }
804
805 vlan_macip_lens |= ip_hlen;
806
807 /* No support for offloads for non-L4 next headers */
808 switch (ipproto) {
809 case IPPROTO_TCP:
831 if (mp->m_pkthdr.csum_flags & (CSUM_IP_TCP | CSUM_IP6_TCP))
810 if (mp->m_pkthdr.csum_flags &
811 (CSUM_IP_TCP | CSUM_IP6_TCP))
832 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
833 else
834 offload = false;
835 break;
836 case IPPROTO_UDP:
812 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
813 else
814 offload = false;
815 break;
816 case IPPROTO_UDP:
837 if (mp->m_pkthdr.csum_flags & (CSUM_IP_UDP | CSUM_IP6_UDP))
817 if (mp->m_pkthdr.csum_flags &
818 (CSUM_IP_UDP | CSUM_IP6_UDP))
838 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP;
839 else
840 offload = false;
841 break;
842 case IPPROTO_SCTP:
819 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP;
820 else
821 offload = false;
822 break;
823 case IPPROTO_SCTP:
843 if (mp->m_pkthdr.csum_flags & (CSUM_IP_SCTP | CSUM_IP6_SCTP))
824 if (mp->m_pkthdr.csum_flags &
825 (CSUM_IP_SCTP | CSUM_IP6_SCTP))
844 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
845 else
846 offload = false;
847 break;
848 default:
849 offload = false;
850 break;
851 }

--- 11 unchanged lines hidden (view full) ---

863 TXD->mss_l4len_idx = htole32(0);
864
865 /* We've consumed the first desc, adjust counters */
866 if (++ctxd == txr->num_desc)
867 ctxd = 0;
868 txr->next_avail_desc = ctxd;
869 --txr->tx_avail;
870
826 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
827 else
828 offload = false;
829 break;
830 default:
831 offload = false;
832 break;
833 }

--- 11 unchanged lines hidden (view full) ---

845 TXD->mss_l4len_idx = htole32(0);
846
847 /* We've consumed the first desc, adjust counters */
848 if (++ctxd == txr->num_desc)
849 ctxd = 0;
850 txr->next_avail_desc = ctxd;
851 --txr->tx_avail;
852
871 return (0);
872}
853 return (0);
854} /* ixgbe_tx_ctx_setup */
873
855
874/**********************************************************************
856/************************************************************************
857 * ixgbe_tso_setup
875 *
858 *
876 * Setup work for hardware segmentation offload (TSO) on
877 * adapters using advanced tx descriptors
878 *
879 **********************************************************************/
859 * Setup work for hardware segmentation offload (TSO) on
860 * adapters using advanced tx descriptors
861 ************************************************************************/
880static int
862static int
881ixgbe_tso_setup(struct tx_ring *txr, struct mbuf *mp,
882 u32 *cmd_type_len, u32 *olinfo_status)
863ixgbe_tso_setup(struct tx_ring *txr, struct mbuf *mp, u32 *cmd_type_len,
864 u32 *olinfo_status)
883{
884 struct ixgbe_adv_tx_context_desc *TXD;
865{
866 struct ixgbe_adv_tx_context_desc *TXD;
885 u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
886 u32 mss_l4len_idx = 0, paylen;
887 u16 vtag = 0, eh_type;
888 int ctxd, ehdrlen, ip_hlen, tcp_hlen;
889 struct ether_vlan_header *eh;
867 struct ether_vlan_header *eh;
890#ifdef INET6
868#ifdef INET6
891 struct ip6_hdr *ip6;
869 struct ip6_hdr *ip6;
892#endif
893#ifdef INET
870#endif
871#ifdef INET
894 struct ip *ip;
872 struct ip *ip;
895#endif
873#endif
896 struct tcphdr *th;
874 struct tcphdr *th;
875 int ctxd, ehdrlen, ip_hlen, tcp_hlen;
876 u32 vlan_macip_lens = 0;
877 u32 type_tucmd_mlhl = 0;
878 u32 mss_l4len_idx = 0, paylen;
879 u16 vtag = 0, eh_type;
897
898 /*
899 * Determine where frame payload starts.
900 * Jump over vlan headers if already present
901 */
902 eh = mtod(mp, struct ether_vlan_header *);
903 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
904 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
905 eh_type = eh->evl_proto;
906 } else {
907 ehdrlen = ETHER_HDR_LEN;
908 eh_type = eh->evl_encap_proto;
909 }
910
911 switch (ntohs(eh_type)) {
880
881 /*
882 * Determine where frame payload starts.
883 * Jump over vlan headers if already present
884 */
885 eh = mtod(mp, struct ether_vlan_header *);
886 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
887 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
888 eh_type = eh->evl_proto;
889 } else {
890 ehdrlen = ETHER_HDR_LEN;
891 eh_type = eh->evl_encap_proto;
892 }
893
894 switch (ntohs(eh_type)) {
912#ifdef INET6
913 case ETHERTYPE_IPV6:
914 ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
915 /* XXX-BZ For now we do not pretend to support ext. hdrs. */
916 if (ip6->ip6_nxt != IPPROTO_TCP)
917 return (ENXIO);
918 ip_hlen = sizeof(struct ip6_hdr);
919 ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
920 th = (struct tcphdr *)((caddr_t)ip6 + ip_hlen);
921 th->th_sum = in6_cksum_pseudo(ip6, 0, IPPROTO_TCP, 0);
922 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6;
923 break;
924#endif
925#ifdef INET
926 case ETHERTYPE_IP:
927 ip = (struct ip *)(mp->m_data + ehdrlen);
928 if (ip->ip_p != IPPROTO_TCP)
929 return (ENXIO);
930 ip->ip_sum = 0;
931 ip_hlen = ip->ip_hl << 2;
932 th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
933 th->th_sum = in_pseudo(ip->ip_src.s_addr,
934 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
935 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
936 /* Tell transmit desc to also do IPv4 checksum. */
937 *olinfo_status |= IXGBE_TXD_POPTS_IXSM << 8;
938 break;
939#endif
895#ifdef INET
896 case ETHERTYPE_IP:
897 ip = (struct ip *)(mp->m_data + ehdrlen);
898 if (ip->ip_p != IPPROTO_TCP)
899 return (ENXIO);
900 ip->ip_sum = 0;
901 ip_hlen = ip->ip_hl << 2;
902 th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
903 th->th_sum = in_pseudo(ip->ip_src.s_addr,
904 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
905 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
906 /* Tell transmit desc to also do IPv4 checksum. */
907 *olinfo_status |= IXGBE_TXD_POPTS_IXSM << 8;
908 break;
909#endif
910#ifdef INET6
911 case ETHERTYPE_IPV6:
912 ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
913 /* XXX-BZ For now we do not pretend to support ext. hdrs. */
914 if (ip6->ip6_nxt != IPPROTO_TCP)
915 return (ENXIO);
916 ip_hlen = sizeof(struct ip6_hdr);
917 th = (struct tcphdr *)((caddr_t)ip6 + ip_hlen);
918 th->th_sum = in6_cksum_pseudo(ip6, 0, IPPROTO_TCP, 0);
919 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6;
920 break;
921#endif
940 default:
941 panic("%s: CSUM_TSO but no supported IP version (0x%04x)",
942 __func__, ntohs(eh_type));
943 break;
944 }
945
946 ctxd = txr->next_avail_desc;
922 default:
923 panic("%s: CSUM_TSO but no supported IP version (0x%04x)",
924 __func__, ntohs(eh_type));
925 break;
926 }
927
928 ctxd = txr->next_avail_desc;
947 TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd];
929 TXD = (struct ixgbe_adv_tx_context_desc *)&txr->tx_base[ctxd];
948
949 tcp_hlen = th->th_off << 2;
950
951 /* This is used in the transmit desc in encap */
952 paylen = mp->m_pkthdr.len - ehdrlen - ip_hlen - tcp_hlen;
953
954 /* VLAN MACLEN IPLEN */
955 if (mp->m_flags & M_VLANTAG) {
956 vtag = htole16(mp->m_pkthdr.ether_vtag);
930
931 tcp_hlen = th->th_off << 2;
932
933 /* This is used in the transmit desc in encap */
934 paylen = mp->m_pkthdr.len - ehdrlen - ip_hlen - tcp_hlen;
935
936 /* VLAN MACLEN IPLEN */
937 if (mp->m_flags & M_VLANTAG) {
938 vtag = htole16(mp->m_pkthdr.ether_vtag);
957 vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
939 vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
958 }
959
960 vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
961 vlan_macip_lens |= ip_hlen;
962 TXD->vlan_macip_lens = htole32(vlan_macip_lens);
963
964 /* ADV DTYPE TUCMD */
965 type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;

--- 11 unchanged lines hidden (view full) ---

977 ctxd = 0;
978
979 txr->tx_avail--;
980 txr->next_avail_desc = ctxd;
981 *cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
982 *olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
983 *olinfo_status |= paylen << IXGBE_ADVTXD_PAYLEN_SHIFT;
984 ++txr->tso_tx;
940 }
941
942 vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
943 vlan_macip_lens |= ip_hlen;
944 TXD->vlan_macip_lens = htole32(vlan_macip_lens);
945
946 /* ADV DTYPE TUCMD */
947 type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;

--- 11 unchanged lines hidden (view full) ---

959 ctxd = 0;
960
961 txr->tx_avail--;
962 txr->next_avail_desc = ctxd;
963 *cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
964 *olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
965 *olinfo_status |= paylen << IXGBE_ADVTXD_PAYLEN_SHIFT;
966 ++txr->tso_tx;
967
985 return (0);
968 return (0);
986}
969} /* ixgbe_tso_setup */
987
988
970
971
989/**********************************************************************
972/************************************************************************
973 * ixgbe_txeof
990 *
974 *
991 * Examine each tx_buffer in the used queue. If the hardware is done
992 * processing the packet then free associated resources. The
993 * tx_buffer is put back on the free queue.
994 *
995 **********************************************************************/
975 * Examine each tx_buffer in the used queue. If the hardware is done
976 * processing the packet then free associated resources. The
977 * tx_buffer is put back on the free queue.
978 ************************************************************************/
996void
997ixgbe_txeof(struct tx_ring *txr)
998{
979void
980ixgbe_txeof(struct tx_ring *txr)
981{
999 struct adapter *adapter = txr->adapter;
1000#ifdef DEV_NETMAP
1001 struct ifnet *ifp = adapter->ifp;
1002#endif
1003 u32 work, processed = 0;
1004 u32 limit = adapter->tx_process_limit;
1005 struct ixgbe_tx_buf *buf;
982 struct adapter *adapter = txr->adapter;
983 struct ixgbe_tx_buf *buf;
1006 union ixgbe_adv_tx_desc *txd;
984 union ixgbe_adv_tx_desc *txd;
985 u32 work, processed = 0;
986 u32 limit = adapter->tx_process_limit;
1007
1008 mtx_assert(&txr->tx_mtx, MA_OWNED);
1009
1010#ifdef DEV_NETMAP
987
988 mtx_assert(&txr->tx_mtx, MA_OWNED);
989
990#ifdef DEV_NETMAP
1011 if (ifp->if_capenable & IFCAP_NETMAP) {
1012 struct netmap_adapter *na = NA(ifp);
991 if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
992 (adapter->ifp->if_capenable & IFCAP_NETMAP)) {
993 struct netmap_adapter *na = NA(adapter->ifp);
1013 struct netmap_kring *kring = &na->tx_rings[txr->me];
1014 txd = txr->tx_base;
1015 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
1016 BUS_DMASYNC_POSTREAD);
1017 /*
1018 * In netmap mode, all the work is done in the context
1019 * of the client thread. Interrupt handlers only wake up
1020 * clients, which may be sleeping on individual rings

--- 4 unchanged lines hidden (view full) ---

1025 * - ixgbe_txsync() sets kring->nr_kflags with the index of
1026 * the slot that should wake up the thread (nkr_num_slots
1027 * means the user thread should not be woken up);
1028 * - the driver ignores tx interrupts unless netmap_mitigate=0
1029 * or the slot has the DD bit set.
1030 */
1031 if (!netmap_mitigate ||
1032 (kring->nr_kflags < kring->nkr_num_slots &&
994 struct netmap_kring *kring = &na->tx_rings[txr->me];
995 txd = txr->tx_base;
996 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
997 BUS_DMASYNC_POSTREAD);
998 /*
999 * In netmap mode, all the work is done in the context
1000 * of the client thread. Interrupt handlers only wake up
1001 * clients, which may be sleeping on individual rings

--- 4 unchanged lines hidden (view full) ---

1006 * - ixgbe_txsync() sets kring->nr_kflags with the index of
1007 * the slot that should wake up the thread (nkr_num_slots
1008 * means the user thread should not be woken up);
1009 * - the driver ignores tx interrupts unless netmap_mitigate=0
1010 * or the slot has the DD bit set.
1011 */
1012 if (!netmap_mitigate ||
1013 (kring->nr_kflags < kring->nkr_num_slots &&
1033 txd[kring->nr_kflags].wb.status & IXGBE_TXD_STAT_DD)) {
1034 netmap_tx_irq(ifp, txr->me);
1014 txd[kring->nr_kflags].wb.status & IXGBE_TXD_STAT_DD)) {
1015 netmap_tx_irq(adapter->ifp, txr->me);
1035 }
1036 return;
1037 }
1038#endif /* DEV_NETMAP */
1039
1040 if (txr->tx_avail == txr->num_desc) {
1041 txr->busy = 0;
1042 return;
1043 }
1044
1045 /* Get work starting point */
1046 work = txr->next_to_clean;
1047 buf = &txr->tx_buffers[work];
1048 txd = &txr->tx_base[work];
1049 work -= txr->num_desc; /* The distance to ring end */
1016 }
1017 return;
1018 }
1019#endif /* DEV_NETMAP */
1020
1021 if (txr->tx_avail == txr->num_desc) {
1022 txr->busy = 0;
1023 return;
1024 }
1025
1026 /* Get work starting point */
1027 work = txr->next_to_clean;
1028 buf = &txr->tx_buffers[work];
1029 txd = &txr->tx_base[work];
1030 work -= txr->num_desc; /* The distance to ring end */
1050 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
1051 BUS_DMASYNC_POSTREAD);
1031 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
1032 BUS_DMASYNC_POSTREAD);
1052
1053 do {
1054 union ixgbe_adv_tx_desc *eop = buf->eop;
1055 if (eop == NULL) /* No work */
1056 break;
1057
1058 if ((eop->wb.status & IXGBE_TXD_STAT_DD) == 0)
1059 break; /* I/O not complete */
1060
1061 if (buf->m_head) {
1033
1034 do {
1035 union ixgbe_adv_tx_desc *eop = buf->eop;
1036 if (eop == NULL) /* No work */
1037 break;
1038
1039 if ((eop->wb.status & IXGBE_TXD_STAT_DD) == 0)
1040 break; /* I/O not complete */
1041
1042 if (buf->m_head) {
1062 txr->bytes +=
1063 buf->m_head->m_pkthdr.len;
1064 bus_dmamap_sync(txr->txtag,
1065 buf->map,
1043 txr->bytes += buf->m_head->m_pkthdr.len;
1044 bus_dmamap_sync(txr->txtag, buf->map,
1066 BUS_DMASYNC_POSTWRITE);
1045 BUS_DMASYNC_POSTWRITE);
1067 bus_dmamap_unload(txr->txtag,
1068 buf->map);
1046 bus_dmamap_unload(txr->txtag, buf->map);
1069 m_freem(buf->m_head);
1070 buf->m_head = NULL;
1071 }
1072 buf->eop = NULL;
1073 ++txr->tx_avail;
1074
1075 /* We clean the range if multi segment */
1076 while (txd != eop) {
1077 ++txd;
1078 ++buf;
1079 ++work;
1080 /* wrap the ring? */
1081 if (__predict_false(!work)) {
1082 work -= txr->num_desc;
1083 buf = txr->tx_buffers;
1084 txd = txr->tx_base;
1085 }
1086 if (buf->m_head) {
1047 m_freem(buf->m_head);
1048 buf->m_head = NULL;
1049 }
1050 buf->eop = NULL;
1051 ++txr->tx_avail;
1052
1053 /* We clean the range if multi segment */
1054 while (txd != eop) {
1055 ++txd;
1056 ++buf;
1057 ++work;
1058 /* wrap the ring? */
1059 if (__predict_false(!work)) {
1060 work -= txr->num_desc;
1061 buf = txr->tx_buffers;
1062 txd = txr->tx_base;
1063 }
1064 if (buf->m_head) {
1087 txr->bytes +=
1088 buf->m_head->m_pkthdr.len;
1089 bus_dmamap_sync(txr->txtag,
1090 buf->map,
1065 txr->bytes += buf->m_head->m_pkthdr.len;
1066 bus_dmamap_sync(txr->txtag, buf->map,
1091 BUS_DMASYNC_POSTWRITE);
1067 BUS_DMASYNC_POSTWRITE);
1092 bus_dmamap_unload(txr->txtag,
1093 buf->map);
1068 bus_dmamap_unload(txr->txtag, buf->map);
1094 m_freem(buf->m_head);
1095 buf->m_head = NULL;
1096 }
1097 ++txr->tx_avail;
1098 buf->eop = NULL;
1099
1100 }
1101 ++txr->packets;

--- 14 unchanged lines hidden (view full) ---

1116
1117 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
1118 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1119
1120 work += txr->num_desc;
1121 txr->next_to_clean = work;
1122
1123 /*
1069 m_freem(buf->m_head);
1070 buf->m_head = NULL;
1071 }
1072 ++txr->tx_avail;
1073 buf->eop = NULL;
1074
1075 }
1076 ++txr->packets;

--- 14 unchanged lines hidden (view full) ---

1091
1092 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
1093 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1094
1095 work += txr->num_desc;
1096 txr->next_to_clean = work;
1097
1098 /*
1124 ** Queue Hang detection, we know there's
1125 ** work outstanding or the first return
1126 ** would have been taken, so increment busy
1127 ** if nothing managed to get cleaned, then
1128 ** in local_timer it will be checked and
1129 ** marked as HUNG if it exceeds a MAX attempt.
1130 */
1099 * Queue Hang detection, we know there's
1100 * work outstanding or the first return
1101 * would have been taken, so increment busy
1102 * if nothing managed to get cleaned, then
1103 * in local_timer it will be checked and
1104 * marked as HUNG if it exceeds a MAX attempt.
1105 */
1131 if ((processed == 0) && (txr->busy != IXGBE_QUEUE_HUNG))
1132 ++txr->busy;
1133 /*
1106 if ((processed == 0) && (txr->busy != IXGBE_QUEUE_HUNG))
1107 ++txr->busy;
1108 /*
1134 ** If anything gets cleaned we reset state to 1,
1135 ** note this will turn off HUNG if its set.
1136 */
1109 * If anything gets cleaned we reset state to 1,
1110 * note this will turn off HUNG if its set.
1111 */
1137 if (processed)
1138 txr->busy = 1;
1139
1140 if (txr->tx_avail == txr->num_desc)
1141 txr->busy = 0;
1142
1143 return;
1112 if (processed)
1113 txr->busy = 1;
1114
1115 if (txr->tx_avail == txr->num_desc)
1116 txr->busy = 0;
1117
1118 return;
1144}
1119} /* ixgbe_txeof */
1145
1120
1146
1147#ifdef IXGBE_FDIR
1148/*
1149** This routine parses packet headers so that Flow
1150** Director can make a hashed filter table entry
1151** allowing traffic flows to be identified and kept
1152** on the same cpu. This would be a performance
1153** hit, but we only do it at IXGBE_FDIR_RATE of
1154** packets.
1155*/
1156static void
1157ixgbe_atr(struct tx_ring *txr, struct mbuf *mp)
1158{
1159 struct adapter *adapter = txr->adapter;
1160 struct ix_queue *que;
1161 struct ip *ip;
1162 struct tcphdr *th;
1163 struct udphdr *uh;
1164 struct ether_vlan_header *eh;
1165 union ixgbe_atr_hash_dword input = {.dword = 0};
1166 union ixgbe_atr_hash_dword common = {.dword = 0};
1167 int ehdrlen, ip_hlen;
1168 u16 etype;
1169
1170 eh = mtod(mp, struct ether_vlan_header *);
1171 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1172 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1173 etype = eh->evl_proto;
1174 } else {
1175 ehdrlen = ETHER_HDR_LEN;
1176 etype = eh->evl_encap_proto;
1177 }
1178
1179 /* Only handling IPv4 */
1180 if (etype != htons(ETHERTYPE_IP))
1181 return;
1182
1183 ip = (struct ip *)(mp->m_data + ehdrlen);
1184 ip_hlen = ip->ip_hl << 2;
1185
1186 /* check if we're UDP or TCP */
1187 switch (ip->ip_p) {
1188 case IPPROTO_TCP:
1189 th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
1190 /* src and dst are inverted */
1191 common.port.dst ^= th->th_sport;
1192 common.port.src ^= th->th_dport;
1193 input.formatted.flow_type ^= IXGBE_ATR_FLOW_TYPE_TCPV4;
1194 break;
1195 case IPPROTO_UDP:
1196 uh = (struct udphdr *)((caddr_t)ip + ip_hlen);
1197 /* src and dst are inverted */
1198 common.port.dst ^= uh->uh_sport;
1199 common.port.src ^= uh->uh_dport;
1200 input.formatted.flow_type ^= IXGBE_ATR_FLOW_TYPE_UDPV4;
1201 break;
1202 default:
1203 return;
1204 }
1205
1206 input.formatted.vlan_id = htobe16(mp->m_pkthdr.ether_vtag);
1207 if (mp->m_pkthdr.ether_vtag)
1208 common.flex_bytes ^= htons(ETHERTYPE_VLAN);
1209 else
1210 common.flex_bytes ^= etype;
1211 common.ip ^= ip->ip_src.s_addr ^ ip->ip_dst.s_addr;
1212
1213 que = &adapter->queues[txr->me];
1214 /*
1215 ** This assumes the Rx queue and Tx
1216 ** queue are bound to the same CPU
1217 */
1218 ixgbe_fdir_add_signature_filter_82599(&adapter->hw,
1219 input, common, que->msix);
1220}
1221#endif /* IXGBE_FDIR */
1222
1223/*
1224** Used to detect a descriptor that has
1225** been merged by Hardware RSC.
1226*/
1121/************************************************************************
1122 * ixgbe_rsc_count
1123 *
1124 * Used to detect a descriptor that has been merged by Hardware RSC.
1125 ************************************************************************/
1227static inline u32
1228ixgbe_rsc_count(union ixgbe_adv_rx_desc *rx)
1229{
1230 return (le32toh(rx->wb.lower.lo_dword.data) &
1231 IXGBE_RXDADV_RSCCNT_MASK) >> IXGBE_RXDADV_RSCCNT_SHIFT;
1126static inline u32
1127ixgbe_rsc_count(union ixgbe_adv_rx_desc *rx)
1128{
1129 return (le32toh(rx->wb.lower.lo_dword.data) &
1130 IXGBE_RXDADV_RSCCNT_MASK) >> IXGBE_RXDADV_RSCCNT_SHIFT;
1232}
1131} /* ixgbe_rsc_count */
1233
1132
1234/*********************************************************************
1133/************************************************************************
1134 * ixgbe_setup_hw_rsc
1235 *
1135 *
1236 * Initialize Hardware RSC (LRO) feature on 82599
1237 * for an RX ring, this is toggled by the LRO capability
1238 * even though it is transparent to the stack.
1136 * Initialize Hardware RSC (LRO) feature on 82599
1137 * for an RX ring, this is toggled by the LRO capability
1138 * even though it is transparent to the stack.
1239 *
1139 *
1240 * NOTE: since this HW feature only works with IPV4 and
1241 * our testing has shown soft LRO to be as effective
1242 * I have decided to disable this by default.
1243 *
1244 **********************************************************************/
1140 * NOTE: Since this HW feature only works with IPv4 and
1141 * testing has shown soft LRO to be as effective,
1142 * this feature will be disabled by default.
1143 ************************************************************************/
1245static void
1246ixgbe_setup_hw_rsc(struct rx_ring *rxr)
1247{
1144static void
1145ixgbe_setup_hw_rsc(struct rx_ring *rxr)
1146{
1248 struct adapter *adapter = rxr->adapter;
1249 struct ixgbe_hw *hw = &adapter->hw;
1250 u32 rscctrl, rdrxctl;
1147 struct adapter *adapter = rxr->adapter;
1148 struct ixgbe_hw *hw = &adapter->hw;
1149 u32 rscctrl, rdrxctl;
1251
1252 /* If turning LRO/RSC off we need to disable it */
1253 if ((adapter->ifp->if_capenable & IFCAP_LRO) == 0) {
1254 rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(rxr->me));
1255 rscctrl &= ~IXGBE_RSCCTL_RSCEN;
1256 return;
1257 }
1258
1259 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
1260 rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
1150
1151 /* If turning LRO/RSC off we need to disable it */
1152 if ((adapter->ifp->if_capenable & IFCAP_LRO) == 0) {
1153 rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(rxr->me));
1154 rscctrl &= ~IXGBE_RSCCTL_RSCEN;
1155 return;
1156 }
1157
1158 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
1159 rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
1261#ifdef DEV_NETMAP /* crcstrip is optional in netmap */
1262 if (adapter->ifp->if_capenable & IFCAP_NETMAP && !ix_crcstrip)
1160#ifdef DEV_NETMAP
1161 /* Always strip CRC unless Netmap disabled it */
1162 if (!(adapter->feat_en & IXGBE_FEATURE_NETMAP) ||
1163 !(adapter->ifp->if_capenable & IFCAP_NETMAP) ||
1164 ix_crcstrip)
1263#endif /* DEV_NETMAP */
1165#endif /* DEV_NETMAP */
1264 rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
1166 rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
1265 rdrxctl |= IXGBE_RDRXCTL_RSCACKC;
1266 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
1267
1268 rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(rxr->me));
1269 rscctrl |= IXGBE_RSCCTL_RSCEN;
1270 /*
1167 rdrxctl |= IXGBE_RDRXCTL_RSCACKC;
1168 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
1169
1170 rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(rxr->me));
1171 rscctrl |= IXGBE_RSCCTL_RSCEN;
1172 /*
1271 ** Limit the total number of descriptors that
1272 ** can be combined, so it does not exceed 64K
1273 */
1173 * Limit the total number of descriptors that
1174 * can be combined, so it does not exceed 64K
1175 */
1274 if (rxr->mbuf_sz == MCLBYTES)
1275 rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
1276 else if (rxr->mbuf_sz == MJUMPAGESIZE)
1277 rscctrl |= IXGBE_RSCCTL_MAXDESC_8;
1278 else if (rxr->mbuf_sz == MJUM9BYTES)
1279 rscctrl |= IXGBE_RSCCTL_MAXDESC_4;
1280 else /* Using 16K cluster */
1281 rscctrl |= IXGBE_RSCCTL_MAXDESC_1;
1282
1283 IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(rxr->me), rscctrl);
1284
1285 /* Enable TCP header recognition */
1286 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0),
1176 if (rxr->mbuf_sz == MCLBYTES)
1177 rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
1178 else if (rxr->mbuf_sz == MJUMPAGESIZE)
1179 rscctrl |= IXGBE_RSCCTL_MAXDESC_8;
1180 else if (rxr->mbuf_sz == MJUM9BYTES)
1181 rscctrl |= IXGBE_RSCCTL_MAXDESC_4;
1182 else /* Using 16K cluster */
1183 rscctrl |= IXGBE_RSCCTL_MAXDESC_1;
1184
1185 IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(rxr->me), rscctrl);
1186
1187 /* Enable TCP header recognition */
1188 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0),
1287 (IXGBE_READ_REG(hw, IXGBE_PSRTYPE(0)) |
1288 IXGBE_PSRTYPE_TCPHDR));
1189 (IXGBE_READ_REG(hw, IXGBE_PSRTYPE(0)) | IXGBE_PSRTYPE_TCPHDR));
1289
1290 /* Disable RSC for ACK packets */
1291 IXGBE_WRITE_REG(hw, IXGBE_RSCDBU,
1292 (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU)));
1293
1294 rxr->hw_rsc = TRUE;
1190
1191 /* Disable RSC for ACK packets */
1192 IXGBE_WRITE_REG(hw, IXGBE_RSCDBU,
1193 (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU)));
1194
1195 rxr->hw_rsc = TRUE;
1295}
1196} /* ixgbe_setup_hw_rsc */
1296
1197
1297/*********************************************************************
1198/************************************************************************
1199 * ixgbe_refresh_mbufs
1298 *
1200 *
1299 * Refresh mbuf buffers for RX descriptor rings
1300 * - now keeps its own state so discards due to resource
1301 * exhaustion are unnecessary, if an mbuf cannot be obtained
1302 * it just returns, keeping its placeholder, thus it can simply
1303 * be recalled to try again.
1304 *
1305 **********************************************************************/
1201 * Refresh mbuf buffers for RX descriptor rings
1202 * - now keeps its own state so discards due to resource
1203 * exhaustion are unnecessary, if an mbuf cannot be obtained
1204 * it just returns, keeping its placeholder, thus it can simply
1205 * be recalled to try again.
1206 ************************************************************************/
1306static void
1307ixgbe_refresh_mbufs(struct rx_ring *rxr, int limit)
1308{
1207static void
1208ixgbe_refresh_mbufs(struct rx_ring *rxr, int limit)
1209{
1309 struct adapter *adapter = rxr->adapter;
1310 bus_dma_segment_t seg[1];
1311 struct ixgbe_rx_buf *rxbuf;
1312 struct mbuf *mp;
1313 int i, j, nsegs, error;
1314 bool refreshed = FALSE;
1210 struct adapter *adapter = rxr->adapter;
1211 struct ixgbe_rx_buf *rxbuf;
1212 struct mbuf *mp;
1213 bus_dma_segment_t seg[1];
1214 int i, j, nsegs, error;
1215 bool refreshed = FALSE;
1315
1316 i = j = rxr->next_to_refresh;
1317 /* Control the loop with one beyond */
1318 if (++j == rxr->num_desc)
1319 j = 0;
1320
1321 while (j != limit) {
1322 rxbuf = &rxr->rx_buffers[i];
1323 if (rxbuf->buf == NULL) {
1216
1217 i = j = rxr->next_to_refresh;
1218 /* Control the loop with one beyond */
1219 if (++j == rxr->num_desc)
1220 j = 0;
1221
1222 while (j != limit) {
1223 rxbuf = &rxr->rx_buffers[i];
1224 if (rxbuf->buf == NULL) {
1324 mp = m_getjcl(M_NOWAIT, MT_DATA,
1325 M_PKTHDR, rxr->mbuf_sz);
1225 mp = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR,
1226 rxr->mbuf_sz);
1326 if (mp == NULL)
1327 goto update;
1328 if (adapter->max_frame_size <= (MCLBYTES - ETHER_ALIGN))
1329 m_adj(mp, ETHER_ALIGN);
1330 } else
1331 mp = rxbuf->buf;
1332
1333 mp->m_pkthdr.len = mp->m_len = rxr->mbuf_sz;
1334
1335 /* If we're dealing with an mbuf that was copied rather
1336 * than replaced, there's no need to go through busdma.
1337 */
1338 if ((rxbuf->flags & IXGBE_RX_COPY) == 0) {
1339 /* Get the memory mapping */
1340 bus_dmamap_unload(rxr->ptag, rxbuf->pmap);
1227 if (mp == NULL)
1228 goto update;
1229 if (adapter->max_frame_size <= (MCLBYTES - ETHER_ALIGN))
1230 m_adj(mp, ETHER_ALIGN);
1231 } else
1232 mp = rxbuf->buf;
1233
1234 mp->m_pkthdr.len = mp->m_len = rxr->mbuf_sz;
1235
1236 /* If we're dealing with an mbuf that was copied rather
1237 * than replaced, there's no need to go through busdma.
1238 */
1239 if ((rxbuf->flags & IXGBE_RX_COPY) == 0) {
1240 /* Get the memory mapping */
1241 bus_dmamap_unload(rxr->ptag, rxbuf->pmap);
1341 error = bus_dmamap_load_mbuf_sg(rxr->ptag,
1342 rxbuf->pmap, mp, seg, &nsegs, BUS_DMA_NOWAIT);
1242 error = bus_dmamap_load_mbuf_sg(rxr->ptag, rxbuf->pmap,
1243 mp, seg, &nsegs, BUS_DMA_NOWAIT);
1343 if (error != 0) {
1244 if (error != 0) {
1344 printf("Refresh mbufs: payload dmamap load"
1345 " failure - %d\n", error);
1245 printf("Refresh mbufs: payload dmamap load failure - %d\n", error);
1346 m_free(mp);
1347 rxbuf->buf = NULL;
1348 goto update;
1349 }
1350 rxbuf->buf = mp;
1351 bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
1352 BUS_DMASYNC_PREREAD);
1353 rxbuf->addr = rxr->rx_base[i].read.pkt_addr =

--- 5 unchanged lines hidden (view full) ---

1359
1360 refreshed = TRUE;
1361 /* Next is precalculated */
1362 i = j;
1363 rxr->next_to_refresh = i;
1364 if (++j == rxr->num_desc)
1365 j = 0;
1366 }
1246 m_free(mp);
1247 rxbuf->buf = NULL;
1248 goto update;
1249 }
1250 rxbuf->buf = mp;
1251 bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
1252 BUS_DMASYNC_PREREAD);
1253 rxbuf->addr = rxr->rx_base[i].read.pkt_addr =

--- 5 unchanged lines hidden (view full) ---

1259
1260 refreshed = TRUE;
1261 /* Next is precalculated */
1262 i = j;
1263 rxr->next_to_refresh = i;
1264 if (++j == rxr->num_desc)
1265 j = 0;
1266 }
1267
1367update:
1368 if (refreshed) /* Update hardware tail index */
1268update:
1269 if (refreshed) /* Update hardware tail index */
1369 IXGBE_WRITE_REG(&adapter->hw,
1370 rxr->tail, rxr->next_to_refresh);
1270 IXGBE_WRITE_REG(&adapter->hw, rxr->tail, rxr->next_to_refresh);
1271
1371 return;
1272 return;
1372}
1273} /* ixgbe_refresh_mbufs */
1373
1274
1374/*********************************************************************
1275/************************************************************************
1276 * ixgbe_allocate_receive_buffers
1375 *
1277 *
1376 * Allocate memory for rx_buffer structures. Since we use one
1377 * rx_buffer per received packet, the maximum number of rx_buffer's
1378 * that we'll need is equal to the number of receive descriptors
1379 * that we've allocated.
1380 *
1381 **********************************************************************/
1382int
1278 * Allocate memory for rx_buffer structures. Since we use one
1279 * rx_buffer per received packet, the maximum number of rx_buffer's
1280 * that we'll need is equal to the number of receive descriptors
1281 * that we've allocated.
1282 ************************************************************************/
1283static int
1383ixgbe_allocate_receive_buffers(struct rx_ring *rxr)
1384{
1284ixgbe_allocate_receive_buffers(struct rx_ring *rxr)
1285{
1385 struct adapter *adapter = rxr->adapter;
1386 device_t dev = adapter->dev;
1387 struct ixgbe_rx_buf *rxbuf;
1388 int bsize, error;
1286 struct adapter *adapter = rxr->adapter;
1287 device_t dev = adapter->dev;
1288 struct ixgbe_rx_buf *rxbuf;
1289 int bsize, error;
1389
1390 bsize = sizeof(struct ixgbe_rx_buf) * rxr->num_desc;
1290
1291 bsize = sizeof(struct ixgbe_rx_buf) * rxr->num_desc;
1391 if (!(rxr->rx_buffers =
1392 (struct ixgbe_rx_buf *) malloc(bsize,
1393 M_DEVBUF, M_NOWAIT | M_ZERO))) {
1292 rxr->rx_buffers = (struct ixgbe_rx_buf *)malloc(bsize, M_DEVBUF,
1293 M_NOWAIT | M_ZERO);
1294 if (rxr->rx_buffers == NULL) {
1394 device_printf(dev, "Unable to allocate rx_buffer memory\n");
1395 error = ENOMEM;
1396 goto fail;
1397 }
1398
1295 device_printf(dev, "Unable to allocate rx_buffer memory\n");
1296 error = ENOMEM;
1297 goto fail;
1298 }
1299
1399 if ((error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
1400 1, 0, /* alignment, bounds */
1401 BUS_SPACE_MAXADDR, /* lowaddr */
1402 BUS_SPACE_MAXADDR, /* highaddr */
1403 NULL, NULL, /* filter, filterarg */
1404 MJUM16BYTES, /* maxsize */
1405 1, /* nsegments */
1406 MJUM16BYTES, /* maxsegsize */
1407 0, /* flags */
1408 NULL, /* lockfunc */
1409 NULL, /* lockfuncarg */
1410 &rxr->ptag))) {
1300 error = bus_dma_tag_create(
1301 /* parent */ bus_get_dma_tag(dev),
1302 /* alignment */ 1,
1303 /* bounds */ 0,
1304 /* lowaddr */ BUS_SPACE_MAXADDR,
1305 /* highaddr */ BUS_SPACE_MAXADDR,
1306 /* filter */ NULL,
1307 /* filterarg */ NULL,
1308 /* maxsize */ MJUM16BYTES,
1309 /* nsegments */ 1,
1310 /* maxsegsize */ MJUM16BYTES,
1311 /* flags */ 0,
1312 /* lockfunc */ NULL,
1313 /* lockfuncarg */ NULL,
1314 &rxr->ptag);
1315 if (error != 0) {
1411 device_printf(dev, "Unable to create RX DMA tag\n");
1412 goto fail;
1413 }
1414
1415 for (int i = 0; i < rxr->num_desc; i++, rxbuf++) {
1416 rxbuf = &rxr->rx_buffers[i];
1417 error = bus_dmamap_create(rxr->ptag, 0, &rxbuf->pmap);
1418 if (error) {
1419 device_printf(dev, "Unable to create RX dma map\n");
1420 goto fail;
1421 }
1422 }
1423
1424 return (0);
1425
1426fail:
1427 /* Frees all, but can handle partial completion */
1428 ixgbe_free_receive_structures(adapter);
1316 device_printf(dev, "Unable to create RX DMA tag\n");
1317 goto fail;
1318 }
1319
1320 for (int i = 0; i < rxr->num_desc; i++, rxbuf++) {
1321 rxbuf = &rxr->rx_buffers[i];
1322 error = bus_dmamap_create(rxr->ptag, 0, &rxbuf->pmap);
1323 if (error) {
1324 device_printf(dev, "Unable to create RX dma map\n");
1325 goto fail;
1326 }
1327 }
1328
1329 return (0);
1330
1331fail:
1332 /* Frees all, but can handle partial completion */
1333 ixgbe_free_receive_structures(adapter);
1334
1429 return (error);
1335 return (error);
1430}
1336} /* ixgbe_allocate_receive_buffers */
1431
1337
1432static void
1338/************************************************************************
1339 * ixgbe_free_receive_ring
1340 ************************************************************************/
1341static void
1433ixgbe_free_receive_ring(struct rx_ring *rxr)
1342ixgbe_free_receive_ring(struct rx_ring *rxr)
1434{
1435 struct ixgbe_rx_buf *rxbuf;
1436
1343{
1437 for (int i = 0; i < rxr->num_desc; i++) {
1344 for (int i = 0; i < rxr->num_desc; i++) {
1438 rxbuf = &rxr->rx_buffers[i];
1439 if (rxbuf->buf != NULL) {
1440 bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
1441 BUS_DMASYNC_POSTREAD);
1442 bus_dmamap_unload(rxr->ptag, rxbuf->pmap);
1443 rxbuf->buf->m_flags |= M_PKTHDR;
1444 m_freem(rxbuf->buf);
1445 rxbuf->buf = NULL;
1446 rxbuf->flags = 0;
1447 }
1345 ixgbe_rx_discard(rxr, i);
1448 }
1346 }
1449}
1347} /* ixgbe_free_receive_ring */
1450
1348
1451/*********************************************************************
1349/************************************************************************
1350 * ixgbe_setup_receive_ring
1452 *
1351 *
1453 * Initialize a receive ring and its buffers.
1454 *
1455 **********************************************************************/
1352 * Initialize a receive ring and its buffers.
1353 ************************************************************************/
1456static int
1457ixgbe_setup_receive_ring(struct rx_ring *rxr)
1458{
1354static int
1355ixgbe_setup_receive_ring(struct rx_ring *rxr)
1356{
1459 struct adapter *adapter;
1460 struct ifnet *ifp;
1461 device_t dev;
1462 struct ixgbe_rx_buf *rxbuf;
1463 bus_dma_segment_t seg[1];
1464 struct lro_ctrl *lro = &rxr->lro;
1465 int rsize, nsegs, error = 0;
1357 struct adapter *adapter;
1358 struct ifnet *ifp;
1359 device_t dev;
1360 struct ixgbe_rx_buf *rxbuf;
1361 struct lro_ctrl *lro = &rxr->lro;
1466#ifdef DEV_NETMAP
1467 struct netmap_adapter *na = NA(rxr->adapter->ifp);
1362#ifdef DEV_NETMAP
1363 struct netmap_adapter *na = NA(rxr->adapter->ifp);
1468 struct netmap_slot *slot;
1364 struct netmap_slot *slot;
1469#endif /* DEV_NETMAP */
1365#endif /* DEV_NETMAP */
1366 bus_dma_segment_t seg[1];
1367 int rsize, nsegs, error = 0;
1470
1471 adapter = rxr->adapter;
1472 ifp = adapter->ifp;
1473 dev = adapter->dev;
1474
1475 /* Clear the ring contents */
1476 IXGBE_RX_LOCK(rxr);
1368
1369 adapter = rxr->adapter;
1370 ifp = adapter->ifp;
1371 dev = adapter->dev;
1372
1373 /* Clear the ring contents */
1374 IXGBE_RX_LOCK(rxr);
1375
1477#ifdef DEV_NETMAP
1376#ifdef DEV_NETMAP
1478 /* same as in ixgbe_setup_transmit_ring() */
1479 slot = netmap_reset(na, NR_RX, rxr->me, 0);
1377 if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
1378 slot = netmap_reset(na, NR_RX, rxr->me, 0);
1480#endif /* DEV_NETMAP */
1379#endif /* DEV_NETMAP */
1380
1481 rsize = roundup2(adapter->num_rx_desc *
1482 sizeof(union ixgbe_adv_rx_desc), DBA_ALIGN);
1483 bzero((void *)rxr->rx_base, rsize);
1484 /* Cache the size */
1485 rxr->mbuf_sz = adapter->rx_mbuf_sz;
1486
1487 /* Free current RX buffer structs and their mbufs */
1488 ixgbe_free_receive_ring(rxr);
1489
1490 /* Now replenish the mbufs */
1491 for (int j = 0; j != rxr->num_desc; ++j) {
1381 rsize = roundup2(adapter->num_rx_desc *
1382 sizeof(union ixgbe_adv_rx_desc), DBA_ALIGN);
1383 bzero((void *)rxr->rx_base, rsize);
1384 /* Cache the size */
1385 rxr->mbuf_sz = adapter->rx_mbuf_sz;
1386
1387 /* Free current RX buffer structs and their mbufs */
1388 ixgbe_free_receive_ring(rxr);
1389
1390 /* Now replenish the mbufs */
1391 for (int j = 0; j != rxr->num_desc; ++j) {
1492 struct mbuf *mp;
1392 struct mbuf *mp;
1493
1494 rxbuf = &rxr->rx_buffers[j];
1393
1394 rxbuf = &rxr->rx_buffers[j];
1395
1495#ifdef DEV_NETMAP
1496 /*
1497 * In netmap mode, fill the map and set the buffer
1498 * address in the NIC ring, considering the offset
1499 * between the netmap and NIC rings (see comment in
1500 * ixgbe_setup_transmit_ring() ). No need to allocate
1501 * an mbuf, so end the block with a continue;
1502 */
1396#ifdef DEV_NETMAP
1397 /*
1398 * In netmap mode, fill the map and set the buffer
1399 * address in the NIC ring, considering the offset
1400 * between the netmap and NIC rings (see comment in
1401 * ixgbe_setup_transmit_ring() ). No need to allocate
1402 * an mbuf, so end the block with a continue;
1403 */
1503 if (slot) {
1404 if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) && slot) {
1504 int sj = netmap_idx_n2k(&na->rx_rings[rxr->me], j);
1505 uint64_t paddr;
1506 void *addr;
1507
1508 addr = PNMB(na, slot + sj, &paddr);
1509 netmap_load_map(na, rxr->ptag, rxbuf->pmap, addr);
1510 /* Update descriptor and the cached value */
1511 rxr->rx_base[j].read.pkt_addr = htole64(paddr);
1512 rxbuf->addr = htole64(paddr);
1513 continue;
1514 }
1515#endif /* DEV_NETMAP */
1405 int sj = netmap_idx_n2k(&na->rx_rings[rxr->me], j);
1406 uint64_t paddr;
1407 void *addr;
1408
1409 addr = PNMB(na, slot + sj, &paddr);
1410 netmap_load_map(na, rxr->ptag, rxbuf->pmap, addr);
1411 /* Update descriptor and the cached value */
1412 rxr->rx_base[j].read.pkt_addr = htole64(paddr);
1413 rxbuf->addr = htole64(paddr);
1414 continue;
1415 }
1416#endif /* DEV_NETMAP */
1516 rxbuf->flags = 0;
1517 rxbuf->buf = m_getjcl(M_NOWAIT, MT_DATA,
1518 M_PKTHDR, adapter->rx_mbuf_sz);
1417
1418 rxbuf->flags = 0;
1419 rxbuf->buf = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR,
1420 adapter->rx_mbuf_sz);
1519 if (rxbuf->buf == NULL) {
1520 error = ENOBUFS;
1421 if (rxbuf->buf == NULL) {
1422 error = ENOBUFS;
1521 goto fail;
1423 goto fail;
1522 }
1523 mp = rxbuf->buf;
1524 mp->m_pkthdr.len = mp->m_len = rxr->mbuf_sz;
1525 /* Get the memory mapping */
1424 }
1425 mp = rxbuf->buf;
1426 mp->m_pkthdr.len = mp->m_len = rxr->mbuf_sz;
1427 /* Get the memory mapping */
1526 error = bus_dmamap_load_mbuf_sg(rxr->ptag,
1527 rxbuf->pmap, mp, seg,
1428 error = bus_dmamap_load_mbuf_sg(rxr->ptag, rxbuf->pmap, mp, seg,
1528 &nsegs, BUS_DMA_NOWAIT);
1529 if (error != 0)
1429 &nsegs, BUS_DMA_NOWAIT);
1430 if (error != 0)
1530 goto fail;
1531 bus_dmamap_sync(rxr->ptag,
1532 rxbuf->pmap, BUS_DMASYNC_PREREAD);
1431 goto fail;
1432 bus_dmamap_sync(rxr->ptag, rxbuf->pmap, BUS_DMASYNC_PREREAD);
1533 /* Update the descriptor and the cached value */
1534 rxr->rx_base[j].read.pkt_addr = htole64(seg[0].ds_addr);
1535 rxbuf->addr = htole64(seg[0].ds_addr);
1536 }
1537
1538
1539 /* Setup our descriptor indices */
1540 rxr->next_to_check = 0;
1541 rxr->next_to_refresh = 0;
1542 rxr->lro_enabled = FALSE;
1543 rxr->rx_copies = 0;
1544 rxr->rx_bytes = 0;
1545 rxr->vtag_strip = FALSE;
1546
1547 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
1548 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1549
1550 /*
1433 /* Update the descriptor and the cached value */
1434 rxr->rx_base[j].read.pkt_addr = htole64(seg[0].ds_addr);
1435 rxbuf->addr = htole64(seg[0].ds_addr);
1436 }
1437
1438
1439 /* Setup our descriptor indices */
1440 rxr->next_to_check = 0;
1441 rxr->next_to_refresh = 0;
1442 rxr->lro_enabled = FALSE;
1443 rxr->rx_copies = 0;
1444 rxr->rx_bytes = 0;
1445 rxr->vtag_strip = FALSE;
1446
1447 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
1448 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1449
1450 /*
1551 ** Now set up the LRO interface:
1552 */
1451 * Now set up the LRO interface
1452 */
1553 if (ixgbe_rsc_enable)
1554 ixgbe_setup_hw_rsc(rxr);
1555 else if (ifp->if_capenable & IFCAP_LRO) {
1556 int err = tcp_lro_init(lro);
1557 if (err) {
1558 device_printf(dev, "LRO Initialization failed!\n");
1559 goto fail;
1560 }
1561 INIT_DEBUGOUT("RX Soft LRO Initialized\n");
1562 rxr->lro_enabled = TRUE;
1563 lro->ifp = adapter->ifp;
1564 }
1565
1566 IXGBE_RX_UNLOCK(rxr);
1453 if (ixgbe_rsc_enable)
1454 ixgbe_setup_hw_rsc(rxr);
1455 else if (ifp->if_capenable & IFCAP_LRO) {
1456 int err = tcp_lro_init(lro);
1457 if (err) {
1458 device_printf(dev, "LRO Initialization failed!\n");
1459 goto fail;
1460 }
1461 INIT_DEBUGOUT("RX Soft LRO Initialized\n");
1462 rxr->lro_enabled = TRUE;
1463 lro->ifp = adapter->ifp;
1464 }
1465
1466 IXGBE_RX_UNLOCK(rxr);
1467
1567 return (0);
1568
1569fail:
1570 ixgbe_free_receive_ring(rxr);
1571 IXGBE_RX_UNLOCK(rxr);
1468 return (0);
1469
1470fail:
1471 ixgbe_free_receive_ring(rxr);
1472 IXGBE_RX_UNLOCK(rxr);
1473
1572 return (error);
1474 return (error);
1573}
1475} /* ixgbe_setup_receive_ring */
1574
1476
1575/*********************************************************************
1576 *
1577 * Initialize all receive rings.
1578 *
1579 **********************************************************************/
1477/************************************************************************
1478 * ixgbe_setup_receive_structures - Initialize all receive rings.
1479 ************************************************************************/
1580int
1581ixgbe_setup_receive_structures(struct adapter *adapter)
1582{
1583 struct rx_ring *rxr = adapter->rx_rings;
1480int
1481ixgbe_setup_receive_structures(struct adapter *adapter)
1482{
1483 struct rx_ring *rxr = adapter->rx_rings;
1584 int j;
1484 int j;
1585
1586 for (j = 0; j < adapter->num_queues; j++, rxr++)
1587 if (ixgbe_setup_receive_ring(rxr))
1588 goto fail;
1589
1590 return (0);
1591fail:
1592 /*
1593 * Free RX buffers allocated so far, we will only handle
1594 * the rings that completed, the failing case will have
1595 * cleaned up for itself. 'j' failed, so its the terminus.
1596 */
1597 for (int i = 0; i < j; ++i) {
1598 rxr = &adapter->rx_rings[i];
1485
1486 for (j = 0; j < adapter->num_queues; j++, rxr++)
1487 if (ixgbe_setup_receive_ring(rxr))
1488 goto fail;
1489
1490 return (0);
1491fail:
1492 /*
1493 * Free RX buffers allocated so far, we will only handle
1494 * the rings that completed, the failing case will have
1495 * cleaned up for itself. 'j' failed, so its the terminus.
1496 */
1497 for (int i = 0; i < j; ++i) {
1498 rxr = &adapter->rx_rings[i];
1499 IXGBE_RX_LOCK(rxr);
1599 ixgbe_free_receive_ring(rxr);
1500 ixgbe_free_receive_ring(rxr);
1501 IXGBE_RX_UNLOCK(rxr);
1600 }
1601
1602 return (ENOBUFS);
1502 }
1503
1504 return (ENOBUFS);
1603}
1505} /* ixgbe_setup_receive_structures */
1604
1605
1506
1507
1606/*********************************************************************
1607 *
1608 * Free all receive rings.
1609 *
1610 **********************************************************************/
1508/************************************************************************
1509 * ixgbe_free_receive_structures - Free all receive rings.
1510 ************************************************************************/
1611void
1612ixgbe_free_receive_structures(struct adapter *adapter)
1613{
1614 struct rx_ring *rxr = adapter->rx_rings;
1615
1616 INIT_DEBUGOUT("ixgbe_free_receive_structures: begin");
1617
1618 for (int i = 0; i < adapter->num_queues; i++, rxr++) {
1511void
1512ixgbe_free_receive_structures(struct adapter *adapter)
1513{
1514 struct rx_ring *rxr = adapter->rx_rings;
1515
1516 INIT_DEBUGOUT("ixgbe_free_receive_structures: begin");
1517
1518 for (int i = 0; i < adapter->num_queues; i++, rxr++) {
1619 struct lro_ctrl *lro = &rxr->lro;
1620 ixgbe_free_receive_buffers(rxr);
1621 /* Free LRO memory */
1519 ixgbe_free_receive_buffers(rxr);
1520 /* Free LRO memory */
1622 tcp_lro_free(lro);
1521 tcp_lro_free(&rxr->lro);
1623 /* Free the ring memory as well */
1624 ixgbe_dma_free(adapter, &rxr->rxdma);
1625 }
1626
1627 free(adapter->rx_rings, M_DEVBUF);
1522 /* Free the ring memory as well */
1523 ixgbe_dma_free(adapter, &rxr->rxdma);
1524 }
1525
1526 free(adapter->rx_rings, M_DEVBUF);
1628}
1527} /* ixgbe_free_receive_structures */
1629
1630
1528
1529
1631/*********************************************************************
1632 *
1633 * Free receive ring data structures
1634 *
1635 **********************************************************************/
1636void
1530/************************************************************************
1531 * ixgbe_free_receive_buffers - Free receive ring data structures
1532 ************************************************************************/
1533static void
1637ixgbe_free_receive_buffers(struct rx_ring *rxr)
1638{
1534ixgbe_free_receive_buffers(struct rx_ring *rxr)
1535{
1639 struct adapter *adapter = rxr->adapter;
1640 struct ixgbe_rx_buf *rxbuf;
1536 struct adapter *adapter = rxr->adapter;
1537 struct ixgbe_rx_buf *rxbuf;
1641
1642 INIT_DEBUGOUT("ixgbe_free_receive_buffers: begin");
1643
1644 /* Cleanup any existing buffers */
1645 if (rxr->rx_buffers != NULL) {
1646 for (int i = 0; i < adapter->num_rx_desc; i++) {
1647 rxbuf = &rxr->rx_buffers[i];
1538
1539 INIT_DEBUGOUT("ixgbe_free_receive_buffers: begin");
1540
1541 /* Cleanup any existing buffers */
1542 if (rxr->rx_buffers != NULL) {
1543 for (int i = 0; i < adapter->num_rx_desc; i++) {
1544 rxbuf = &rxr->rx_buffers[i];
1648 if (rxbuf->buf != NULL) {
1649 bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
1650 BUS_DMASYNC_POSTREAD);
1651 bus_dmamap_unload(rxr->ptag, rxbuf->pmap);
1652 rxbuf->buf->m_flags |= M_PKTHDR;
1653 m_freem(rxbuf->buf);
1654 }
1655 rxbuf->buf = NULL;
1545 ixgbe_rx_discard(rxr, i);
1656 if (rxbuf->pmap != NULL) {
1657 bus_dmamap_destroy(rxr->ptag, rxbuf->pmap);
1658 rxbuf->pmap = NULL;
1659 }
1660 }
1661 if (rxr->rx_buffers != NULL) {
1662 free(rxr->rx_buffers, M_DEVBUF);
1663 rxr->rx_buffers = NULL;
1664 }
1665 }
1666
1667 if (rxr->ptag != NULL) {
1668 bus_dma_tag_destroy(rxr->ptag);
1669 rxr->ptag = NULL;
1670 }
1671
1672 return;
1546 if (rxbuf->pmap != NULL) {
1547 bus_dmamap_destroy(rxr->ptag, rxbuf->pmap);
1548 rxbuf->pmap = NULL;
1549 }
1550 }
1551 if (rxr->rx_buffers != NULL) {
1552 free(rxr->rx_buffers, M_DEVBUF);
1553 rxr->rx_buffers = NULL;
1554 }
1555 }
1556
1557 if (rxr->ptag != NULL) {
1558 bus_dma_tag_destroy(rxr->ptag);
1559 rxr->ptag = NULL;
1560 }
1561
1562 return;
1673}
1563} /* ixgbe_free_receive_buffers */
1674
1564
1565/************************************************************************
1566 * ixgbe_rx_input
1567 ************************************************************************/
1675static __inline void
1568static __inline void
1676ixgbe_rx_input(struct rx_ring *rxr, struct ifnet *ifp, struct mbuf *m, u32 ptype)
1569ixgbe_rx_input(struct rx_ring *rxr, struct ifnet *ifp, struct mbuf *m,
1570 u32 ptype)
1677{
1571{
1678
1679 /*
1680 * ATM LRO is only for IP/TCP packets and TCP checksum of the packet
1681 * should be computed by hardware. Also it should not have VLAN tag in
1682 * ethernet header. In case of IPv6 we do not yet support ext. hdrs.
1683 */
1684 if (rxr->lro_enabled &&
1685 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 &&
1686 (ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 &&
1687 ((ptype & (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP)) ==
1688 (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP) ||
1689 (ptype & (IXGBE_RXDADV_PKTTYPE_IPV6 | IXGBE_RXDADV_PKTTYPE_TCP)) ==
1690 (IXGBE_RXDADV_PKTTYPE_IPV6 | IXGBE_RXDADV_PKTTYPE_TCP)) &&
1691 (m->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) ==
1692 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) {
1693 /*
1694 * Send to the stack if:
1695 ** - LRO not enabled, or
1696 ** - no LRO resources, or
1697 ** - lro enqueue fails
1698 */
1699 if (rxr->lro.lro_cnt != 0)
1700 if (tcp_lro_rx(&rxr->lro, m, 0) == 0)
1701 return;
1702 }
1572 /*
1573 * ATM LRO is only for IP/TCP packets and TCP checksum of the packet
1574 * should be computed by hardware. Also it should not have VLAN tag in
1575 * ethernet header. In case of IPv6 we do not yet support ext. hdrs.
1576 */
1577 if (rxr->lro_enabled &&
1578 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 &&
1579 (ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 &&
1580 ((ptype & (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP)) ==
1581 (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP) ||
1582 (ptype & (IXGBE_RXDADV_PKTTYPE_IPV6 | IXGBE_RXDADV_PKTTYPE_TCP)) ==
1583 (IXGBE_RXDADV_PKTTYPE_IPV6 | IXGBE_RXDADV_PKTTYPE_TCP)) &&
1584 (m->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) ==
1585 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) {
1586 /*
1587 * Send to the stack if:
1588 * - LRO not enabled, or
1589 * - no LRO resources, or
1590 * - lro enqueue fails
1591 */
1592 if (rxr->lro.lro_cnt != 0)
1593 if (tcp_lro_rx(&rxr->lro, m, 0) == 0)
1594 return;
1595 }
1703 IXGBE_RX_UNLOCK(rxr);
1596 IXGBE_RX_UNLOCK(rxr);
1704 (*ifp->if_input)(ifp, m);
1597 (*ifp->if_input)(ifp, m);
1705 IXGBE_RX_LOCK(rxr);
1598 IXGBE_RX_LOCK(rxr);
1706}
1599} /* ixgbe_rx_input */
1707
1600
1601/************************************************************************
1602 * ixgbe_rx_discard
1603 ************************************************************************/
1708static __inline void
1709ixgbe_rx_discard(struct rx_ring *rxr, int i)
1710{
1604static __inline void
1605ixgbe_rx_discard(struct rx_ring *rxr, int i)
1606{
1711 struct ixgbe_rx_buf *rbuf;
1607 struct ixgbe_rx_buf *rbuf;
1712
1713 rbuf = &rxr->rx_buffers[i];
1714
1608
1609 rbuf = &rxr->rx_buffers[i];
1610
1715
1716 /*
1611 /*
1717 ** With advanced descriptors the writeback
1718 ** clobbers the buffer addrs, so its easier
1719 ** to just free the existing mbufs and take
1720 ** the normal refresh path to get new buffers
1721 ** and mapping.
1722 */
1612 * With advanced descriptors the writeback
1613 * clobbers the buffer addrs, so its easier
1614 * to just free the existing mbufs and take
1615 * the normal refresh path to get new buffers
1616 * and mapping.
1617 */
1723
1724 if (rbuf->fmp != NULL) {/* Partial chain ? */
1618
1619 if (rbuf->fmp != NULL) {/* Partial chain ? */
1725 rbuf->fmp->m_flags |= M_PKTHDR;
1620 bus_dmamap_sync(rxr->ptag, rbuf->pmap, BUS_DMASYNC_POSTREAD);
1726 m_freem(rbuf->fmp);
1727 rbuf->fmp = NULL;
1728 rbuf->buf = NULL; /* rbuf->buf is part of fmp's chain */
1729 } else if (rbuf->buf) {
1621 m_freem(rbuf->fmp);
1622 rbuf->fmp = NULL;
1623 rbuf->buf = NULL; /* rbuf->buf is part of fmp's chain */
1624 } else if (rbuf->buf) {
1625 bus_dmamap_sync(rxr->ptag, rbuf->pmap, BUS_DMASYNC_POSTREAD);
1730 m_free(rbuf->buf);
1731 rbuf->buf = NULL;
1732 }
1733 bus_dmamap_unload(rxr->ptag, rbuf->pmap);
1734
1735 rbuf->flags = 0;
1626 m_free(rbuf->buf);
1627 rbuf->buf = NULL;
1628 }
1629 bus_dmamap_unload(rxr->ptag, rbuf->pmap);
1630
1631 rbuf->flags = 0;
1736
1632
1737 return;
1633 return;
1738}
1634} /* ixgbe_rx_discard */
1739
1740
1635
1636
1741/*********************************************************************
1637/************************************************************************
1638 * ixgbe_rxeof
1742 *
1639 *
1743 * This routine executes in interrupt context. It replenishes
1744 * the mbufs in the descriptor and sends data which has been
1745 * dma'ed into host memory to upper layer.
1640 * Executes in interrupt context. It replenishes the
1641 * mbufs in the descriptor and sends data which has
1642 * been dma'ed into host memory to upper layer.
1746 *
1643 *
1747 * Return TRUE for more work, FALSE for all clean.
1748 *********************************************************************/
1644 * Return TRUE for more work, FALSE for all clean.
1645 ************************************************************************/
1749bool
1750ixgbe_rxeof(struct ix_queue *que)
1751{
1646bool
1647ixgbe_rxeof(struct ix_queue *que)
1648{
1752 struct adapter *adapter = que->adapter;
1753 struct rx_ring *rxr = que->rxr;
1754 struct ifnet *ifp = adapter->ifp;
1755 struct lro_ctrl *lro = &rxr->lro;
1756 int i, nextp, processed = 0;
1757 u32 staterr = 0;
1758 u32 count = adapter->rx_process_limit;
1759 union ixgbe_adv_rx_desc *cur;
1760 struct ixgbe_rx_buf *rbuf, *nbuf;
1761 u16 pkt_info;
1649 struct adapter *adapter = que->adapter;
1650 struct rx_ring *rxr = que->rxr;
1651 struct ifnet *ifp = adapter->ifp;
1652 struct lro_ctrl *lro = &rxr->lro;
1653 union ixgbe_adv_rx_desc *cur;
1654 struct ixgbe_rx_buf *rbuf, *nbuf;
1655 int i, nextp, processed = 0;
1656 u32 staterr = 0;
1657 u32 count = adapter->rx_process_limit;
1658 u16 pkt_info;
1762
1763 IXGBE_RX_LOCK(rxr);
1764
1765#ifdef DEV_NETMAP
1659
1660 IXGBE_RX_LOCK(rxr);
1661
1662#ifdef DEV_NETMAP
1766 /* Same as the txeof routine: wakeup clients on intr. */
1767 if (netmap_rx_irq(ifp, rxr->me, &processed)) {
1768 IXGBE_RX_UNLOCK(rxr);
1769 return (FALSE);
1663 if (adapter->feat_en & IXGBE_FEATURE_NETMAP) {
1664 /* Same as the txeof routine: wakeup clients on intr. */
1665 if (netmap_rx_irq(ifp, rxr->me, &processed)) {
1666 IXGBE_RX_UNLOCK(rxr);
1667 return (FALSE);
1668 }
1770 }
1771#endif /* DEV_NETMAP */
1772
1773 for (i = rxr->next_to_check; count != 0;) {
1669 }
1670#endif /* DEV_NETMAP */
1671
1672 for (i = rxr->next_to_check; count != 0;) {
1774 struct mbuf *sendmp, *mp;
1775 u32 rsc, ptype;
1776 u16 len;
1777 u16 vtag = 0;
1778 bool eop;
1779
1673 struct mbuf *sendmp, *mp;
1674 u32 rsc, ptype;
1675 u16 len;
1676 u16 vtag = 0;
1677 bool eop;
1678
1780 /* Sync the ring. */
1781 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
1782 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1783
1784 cur = &rxr->rx_base[i];
1785 staterr = le32toh(cur->wb.upper.status_error);
1786 pkt_info = le16toh(cur->wb.lower.lo_dword.hs_rss.pkt_info);
1787

--- 13 unchanged lines hidden (view full) ---

1801 len = le16toh(cur->wb.upper.length);
1802 ptype = le32toh(cur->wb.lower.lo_dword.data) &
1803 IXGBE_RXDADV_PKTTYPE_MASK;
1804 eop = ((staterr & IXGBE_RXD_STAT_EOP) != 0);
1805
1806 /* Make sure bad packets are discarded */
1807 if (eop && (staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) != 0) {
1808#if __FreeBSD_version >= 1100036
1679 /* Sync the ring. */
1680 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
1681 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1682
1683 cur = &rxr->rx_base[i];
1684 staterr = le32toh(cur->wb.upper.status_error);
1685 pkt_info = le16toh(cur->wb.lower.lo_dword.hs_rss.pkt_info);
1686

--- 13 unchanged lines hidden (view full) ---

1700 len = le16toh(cur->wb.upper.length);
1701 ptype = le32toh(cur->wb.lower.lo_dword.data) &
1702 IXGBE_RXDADV_PKTTYPE_MASK;
1703 eop = ((staterr & IXGBE_RXD_STAT_EOP) != 0);
1704
1705 /* Make sure bad packets are discarded */
1706 if (eop && (staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) != 0) {
1707#if __FreeBSD_version >= 1100036
1809 if (IXGBE_IS_VF(adapter))
1708 if (adapter->feat_en & IXGBE_FEATURE_VF)
1810 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
1811#endif
1812 rxr->rx_discarded++;
1813 ixgbe_rx_discard(rxr, i);
1814 goto next_desc;
1815 }
1816
1709 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
1710#endif
1711 rxr->rx_discarded++;
1712 ixgbe_rx_discard(rxr, i);
1713 goto next_desc;
1714 }
1715
1716 bus_dmamap_sync(rxr->ptag, rbuf->pmap, BUS_DMASYNC_POSTREAD);
1717
1817 /*
1718 /*
1818 ** On 82599 which supports a hardware
1819 ** LRO (called HW RSC), packets need
1820 ** not be fragmented across sequential
1821 ** descriptors, rather the next descriptor
1822 ** is indicated in bits of the descriptor.
1823 ** This also means that we might proceses
1824 ** more than one packet at a time, something
1825 ** that has never been true before, it
1826 ** required eliminating global chain pointers
1827 ** in favor of what we are doing here. -jfv
1828 */
1719 * On 82599 which supports a hardware
1720 * LRO (called HW RSC), packets need
1721 * not be fragmented across sequential
1722 * descriptors, rather the next descriptor
1723 * is indicated in bits of the descriptor.
1724 * This also means that we might proceses
1725 * more than one packet at a time, something
1726 * that has never been true before, it
1727 * required eliminating global chain pointers
1728 * in favor of what we are doing here. -jfv
1729 */
1829 if (!eop) {
1830 /*
1730 if (!eop) {
1731 /*
1831 ** Figure out the next descriptor
1832 ** of this frame.
1833 */
1732 * Figure out the next descriptor
1733 * of this frame.
1734 */
1834 if (rxr->hw_rsc == TRUE) {
1835 rsc = ixgbe_rsc_count(cur);
1836 rxr->rsc_num += (rsc - 1);
1837 }
1838 if (rsc) { /* Get hardware index */
1735 if (rxr->hw_rsc == TRUE) {
1736 rsc = ixgbe_rsc_count(cur);
1737 rxr->rsc_num += (rsc - 1);
1738 }
1739 if (rsc) { /* Get hardware index */
1839 nextp = ((staterr &
1840 IXGBE_RXDADV_NEXTP_MASK) >>
1740 nextp = ((staterr & IXGBE_RXDADV_NEXTP_MASK) >>
1841 IXGBE_RXDADV_NEXTP_SHIFT);
1842 } else { /* Just sequential */
1843 nextp = i + 1;
1844 if (nextp == adapter->num_rx_desc)
1845 nextp = 0;
1846 }
1847 nbuf = &rxr->rx_buffers[nextp];
1848 prefetch(nbuf);
1849 }
1850 /*
1741 IXGBE_RXDADV_NEXTP_SHIFT);
1742 } else { /* Just sequential */
1743 nextp = i + 1;
1744 if (nextp == adapter->num_rx_desc)
1745 nextp = 0;
1746 }
1747 nbuf = &rxr->rx_buffers[nextp];
1748 prefetch(nbuf);
1749 }
1750 /*
1851 ** Rather than using the fmp/lmp global pointers
1852 ** we now keep the head of a packet chain in the
1853 ** buffer struct and pass this along from one
1854 ** descriptor to the next, until we get EOP.
1855 */
1751 * Rather than using the fmp/lmp global pointers
1752 * we now keep the head of a packet chain in the
1753 * buffer struct and pass this along from one
1754 * descriptor to the next, until we get EOP.
1755 */
1856 mp->m_len = len;
1857 /*
1756 mp->m_len = len;
1757 /*
1858 ** See if there is a stored head
1859 ** that determines what we are
1860 */
1758 * See if there is a stored head
1759 * that determines what we are
1760 */
1861 sendmp = rbuf->fmp;
1862 if (sendmp != NULL) { /* secondary frag */
1863 rbuf->buf = rbuf->fmp = NULL;
1864 mp->m_flags &= ~M_PKTHDR;
1865 sendmp->m_pkthdr.len += mp->m_len;
1866 } else {
1867 /*
1868 * Optimize. This might be a small packet,
1869 * maybe just a TCP ACK. Do a fast copy that
1870 * is cache aligned into a new mbuf, and
1871 * leave the old mbuf+cluster for re-use.
1872 */
1873 if (eop && len <= IXGBE_RX_COPY_LEN) {
1874 sendmp = m_gethdr(M_NOWAIT, MT_DATA);
1875 if (sendmp != NULL) {
1761 sendmp = rbuf->fmp;
1762 if (sendmp != NULL) { /* secondary frag */
1763 rbuf->buf = rbuf->fmp = NULL;
1764 mp->m_flags &= ~M_PKTHDR;
1765 sendmp->m_pkthdr.len += mp->m_len;
1766 } else {
1767 /*
1768 * Optimize. This might be a small packet,
1769 * maybe just a TCP ACK. Do a fast copy that
1770 * is cache aligned into a new mbuf, and
1771 * leave the old mbuf+cluster for re-use.
1772 */
1773 if (eop && len <= IXGBE_RX_COPY_LEN) {
1774 sendmp = m_gethdr(M_NOWAIT, MT_DATA);
1775 if (sendmp != NULL) {
1876 sendmp->m_data +=
1877 IXGBE_RX_COPY_ALIGN;
1878 ixgbe_bcopy(mp->m_data,
1879 sendmp->m_data, len);
1776 sendmp->m_data += IXGBE_RX_COPY_ALIGN;
1777 ixgbe_bcopy(mp->m_data, sendmp->m_data,
1778 len);
1880 sendmp->m_len = len;
1881 rxr->rx_copies++;
1882 rbuf->flags |= IXGBE_RX_COPY;
1883 }
1884 }
1885 if (sendmp == NULL) {
1886 rbuf->buf = rbuf->fmp = NULL;
1887 sendmp = mp;

--- 12 unchanged lines hidden (view full) ---

1900 mp->m_next = nbuf->buf;
1901 } else { /* Sending this frame */
1902 sendmp->m_pkthdr.rcvif = ifp;
1903 rxr->rx_packets++;
1904 /* capture data for AIM */
1905 rxr->bytes += sendmp->m_pkthdr.len;
1906 rxr->rx_bytes += sendmp->m_pkthdr.len;
1907 /* Process vlan info */
1779 sendmp->m_len = len;
1780 rxr->rx_copies++;
1781 rbuf->flags |= IXGBE_RX_COPY;
1782 }
1783 }
1784 if (sendmp == NULL) {
1785 rbuf->buf = rbuf->fmp = NULL;
1786 sendmp = mp;

--- 12 unchanged lines hidden (view full) ---

1799 mp->m_next = nbuf->buf;
1800 } else { /* Sending this frame */
1801 sendmp->m_pkthdr.rcvif = ifp;
1802 rxr->rx_packets++;
1803 /* capture data for AIM */
1804 rxr->bytes += sendmp->m_pkthdr.len;
1805 rxr->rx_bytes += sendmp->m_pkthdr.len;
1806 /* Process vlan info */
1908 if ((rxr->vtag_strip) &&
1909 (staterr & IXGBE_RXD_STAT_VP))
1807 if ((rxr->vtag_strip) && (staterr & IXGBE_RXD_STAT_VP))
1910 vtag = le16toh(cur->wb.upper.vlan);
1911 if (vtag) {
1912 sendmp->m_pkthdr.ether_vtag = vtag;
1913 sendmp->m_flags |= M_VLANTAG;
1914 }
1915 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
1916 ixgbe_rx_checksum(staterr, sendmp, ptype);
1917
1808 vtag = le16toh(cur->wb.upper.vlan);
1809 if (vtag) {
1810 sendmp->m_pkthdr.ether_vtag = vtag;
1811 sendmp->m_flags |= M_VLANTAG;
1812 }
1813 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
1814 ixgbe_rx_checksum(staterr, sendmp, ptype);
1815
1918 /*
1919 * In case of multiqueue, we have RXCSUM.PCSD bit set
1920 * and never cleared. This means we have RSS hash
1921 * available to be used.
1922 */
1923 if (adapter->num_queues > 1) {
1924 sendmp->m_pkthdr.flowid =
1925 le32toh(cur->wb.lower.hi_dword.rss);
1926 switch (pkt_info & IXGBE_RXDADV_RSSTYPE_MASK) {
1927 case IXGBE_RXDADV_RSSTYPE_IPV4:
1928 M_HASHTYPE_SET(sendmp,
1929 M_HASHTYPE_RSS_IPV4);
1930 break;
1931 case IXGBE_RXDADV_RSSTYPE_IPV4_TCP:
1932 M_HASHTYPE_SET(sendmp,
1933 M_HASHTYPE_RSS_TCP_IPV4);
1934 break;
1935 case IXGBE_RXDADV_RSSTYPE_IPV6:
1936 M_HASHTYPE_SET(sendmp,
1937 M_HASHTYPE_RSS_IPV6);
1938 break;
1939 case IXGBE_RXDADV_RSSTYPE_IPV6_TCP:
1940 M_HASHTYPE_SET(sendmp,
1941 M_HASHTYPE_RSS_TCP_IPV6);
1942 break;
1943 case IXGBE_RXDADV_RSSTYPE_IPV6_EX:
1944 M_HASHTYPE_SET(sendmp,
1945 M_HASHTYPE_RSS_IPV6_EX);
1946 break;
1947 case IXGBE_RXDADV_RSSTYPE_IPV6_TCP_EX:
1948 M_HASHTYPE_SET(sendmp,
1949 M_HASHTYPE_RSS_TCP_IPV6_EX);
1950 break;
1816 /*
1817 * In case of multiqueue, we have RXCSUM.PCSD bit set
1818 * and never cleared. This means we have RSS hash
1819 * available to be used.
1820 */
1821 if (adapter->num_queues > 1) {
1822 sendmp->m_pkthdr.flowid =
1823 le32toh(cur->wb.lower.hi_dword.rss);
1824 switch (pkt_info & IXGBE_RXDADV_RSSTYPE_MASK) {
1825 case IXGBE_RXDADV_RSSTYPE_IPV4:
1826 M_HASHTYPE_SET(sendmp,
1827 M_HASHTYPE_RSS_IPV4);
1828 break;
1829 case IXGBE_RXDADV_RSSTYPE_IPV4_TCP:
1830 M_HASHTYPE_SET(sendmp,
1831 M_HASHTYPE_RSS_TCP_IPV4);
1832 break;
1833 case IXGBE_RXDADV_RSSTYPE_IPV6:
1834 M_HASHTYPE_SET(sendmp,
1835 M_HASHTYPE_RSS_IPV6);
1836 break;
1837 case IXGBE_RXDADV_RSSTYPE_IPV6_TCP:
1838 M_HASHTYPE_SET(sendmp,
1839 M_HASHTYPE_RSS_TCP_IPV6);
1840 break;
1841 case IXGBE_RXDADV_RSSTYPE_IPV6_EX:
1842 M_HASHTYPE_SET(sendmp,
1843 M_HASHTYPE_RSS_IPV6_EX);
1844 break;
1845 case IXGBE_RXDADV_RSSTYPE_IPV6_TCP_EX:
1846 M_HASHTYPE_SET(sendmp,
1847 M_HASHTYPE_RSS_TCP_IPV6_EX);
1848 break;
1951#if __FreeBSD_version > 1100000
1849#if __FreeBSD_version > 1100000
1952 case IXGBE_RXDADV_RSSTYPE_IPV4_UDP:
1953 M_HASHTYPE_SET(sendmp,
1954 M_HASHTYPE_RSS_UDP_IPV4);
1955 break;
1956 case IXGBE_RXDADV_RSSTYPE_IPV6_UDP:
1957 M_HASHTYPE_SET(sendmp,
1958 M_HASHTYPE_RSS_UDP_IPV6);
1959 break;
1960 case IXGBE_RXDADV_RSSTYPE_IPV6_UDP_EX:
1961 M_HASHTYPE_SET(sendmp,
1962 M_HASHTYPE_RSS_UDP_IPV6_EX);
1963 break;
1850 case IXGBE_RXDADV_RSSTYPE_IPV4_UDP:
1851 M_HASHTYPE_SET(sendmp,
1852 M_HASHTYPE_RSS_UDP_IPV4);
1853 break;
1854 case IXGBE_RXDADV_RSSTYPE_IPV6_UDP:
1855 M_HASHTYPE_SET(sendmp,
1856 M_HASHTYPE_RSS_UDP_IPV6);
1857 break;
1858 case IXGBE_RXDADV_RSSTYPE_IPV6_UDP_EX:
1859 M_HASHTYPE_SET(sendmp,
1860 M_HASHTYPE_RSS_UDP_IPV6_EX);
1861 break;
1964#endif
1862#endif
1965 default:
1966 M_HASHTYPE_SET(sendmp,
1967 M_HASHTYPE_OPAQUE_HASH);
1968 }
1969 } else {
1970 sendmp->m_pkthdr.flowid = que->msix;
1863 default:
1864 M_HASHTYPE_SET(sendmp,
1865 M_HASHTYPE_OPAQUE_HASH);
1866 }
1867 } else {
1868 sendmp->m_pkthdr.flowid = que->msix;
1971 M_HASHTYPE_SET(sendmp, M_HASHTYPE_OPAQUE);
1972 }
1973 }
1974next_desc:
1975 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
1976 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1977
1978 /* Advance our pointers to the next descriptor. */
1979 if (++i == rxr->num_desc)
1980 i = 0;
1981
1982 /* Now send to the stack or do LRO */
1983 if (sendmp != NULL) {
1984 rxr->next_to_check = i;
1985 ixgbe_rx_input(rxr, ifp, sendmp, ptype);
1986 i = rxr->next_to_check;
1987 }
1988
1869 M_HASHTYPE_SET(sendmp, M_HASHTYPE_OPAQUE);
1870 }
1871 }
1872next_desc:
1873 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
1874 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1875
1876 /* Advance our pointers to the next descriptor. */
1877 if (++i == rxr->num_desc)
1878 i = 0;
1879
1880 /* Now send to the stack or do LRO */
1881 if (sendmp != NULL) {
1882 rxr->next_to_check = i;
1883 ixgbe_rx_input(rxr, ifp, sendmp, ptype);
1884 i = rxr->next_to_check;
1885 }
1886
1989 /* Every 8 descriptors we go to refresh mbufs */
1887 /* Every 8 descriptors we go to refresh mbufs */
1990 if (processed == 8) {
1991 ixgbe_refresh_mbufs(rxr, i);
1992 processed = 0;
1993 }
1994 }
1995
1996 /* Refresh any remaining buf structs */
1997 if (ixgbe_rx_unrefreshed(rxr))

--- 4 unchanged lines hidden (view full) ---

2002 /*
2003 * Flush any outstanding LRO work
2004 */
2005 tcp_lro_flush_all(lro);
2006
2007 IXGBE_RX_UNLOCK(rxr);
2008
2009 /*
1888 if (processed == 8) {
1889 ixgbe_refresh_mbufs(rxr, i);
1890 processed = 0;
1891 }
1892 }
1893
1894 /* Refresh any remaining buf structs */
1895 if (ixgbe_rx_unrefreshed(rxr))

--- 4 unchanged lines hidden (view full) ---

1900 /*
1901 * Flush any outstanding LRO work
1902 */
1903 tcp_lro_flush_all(lro);
1904
1905 IXGBE_RX_UNLOCK(rxr);
1906
1907 /*
2010 ** Still have cleaning to do?
2011 */
1908 * Still have cleaning to do?
1909 */
2012 if ((staterr & IXGBE_RXD_STAT_DD) != 0)
2013 return (TRUE);
1910 if ((staterr & IXGBE_RXD_STAT_DD) != 0)
1911 return (TRUE);
2014 else
2015 return (FALSE);
2016}
2017
1912
1913 return (FALSE);
1914} /* ixgbe_rxeof */
2018
1915
2019/*********************************************************************
1916
1917/************************************************************************
1918 * ixgbe_rx_checksum
2020 *
1919 *
2021 * Verify that the hardware indicated that the checksum is valid.
2022 * Inform the stack about the status of checksum so that stack
2023 * doesn't spend time verifying the checksum.
2024 *
2025 *********************************************************************/
1920 * Verify that the hardware indicated that the checksum is valid.
1921 * Inform the stack about the status of checksum so that stack
1922 * doesn't spend time verifying the checksum.
1923 ************************************************************************/
2026static void
2027ixgbe_rx_checksum(u32 staterr, struct mbuf * mp, u32 ptype)
2028{
1924static void
1925ixgbe_rx_checksum(u32 staterr, struct mbuf * mp, u32 ptype)
1926{
2029 u16 status = (u16) staterr;
2030 u8 errors = (u8) (staterr >> 24);
2031 bool sctp = false;
1927 u16 status = (u16)staterr;
1928 u8 errors = (u8)(staterr >> 24);
1929 bool sctp = false;
2032
2033 if ((ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 &&
2034 (ptype & IXGBE_RXDADV_PKTTYPE_SCTP) != 0)
2035 sctp = true;
2036
2037 /* IPv4 checksum */
2038 if (status & IXGBE_RXD_STAT_IPCS) {
2039 mp->m_pkthdr.csum_flags |= CSUM_L3_CALC;

--- 5 unchanged lines hidden (view full) ---

2045 if (status & IXGBE_RXD_STAT_L4CS) {
2046 mp->m_pkthdr.csum_flags |= CSUM_L4_CALC;
2047 if (!(errors & IXGBE_RXD_ERR_TCPE)) {
2048 mp->m_pkthdr.csum_flags |= CSUM_L4_VALID;
2049 if (!sctp)
2050 mp->m_pkthdr.csum_data = htons(0xffff);
2051 }
2052 }
1930
1931 if ((ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 &&
1932 (ptype & IXGBE_RXDADV_PKTTYPE_SCTP) != 0)
1933 sctp = true;
1934
1935 /* IPv4 checksum */
1936 if (status & IXGBE_RXD_STAT_IPCS) {
1937 mp->m_pkthdr.csum_flags |= CSUM_L3_CALC;

--- 5 unchanged lines hidden (view full) ---

1943 if (status & IXGBE_RXD_STAT_L4CS) {
1944 mp->m_pkthdr.csum_flags |= CSUM_L4_CALC;
1945 if (!(errors & IXGBE_RXD_ERR_TCPE)) {
1946 mp->m_pkthdr.csum_flags |= CSUM_L4_VALID;
1947 if (!sctp)
1948 mp->m_pkthdr.csum_data = htons(0xffff);
1949 }
1950 }
2053}
1951} /* ixgbe_rx_checksum */
2054
1952
2055/********************************************************************
2056 * Manage DMA'able memory.
2057 *******************************************************************/
1953/************************************************************************
1954 * ixgbe_dmamap_cb - Manage DMA'able memory.
1955 ************************************************************************/
2058static void
2059ixgbe_dmamap_cb(void *arg, bus_dma_segment_t * segs, int nseg, int error)
2060{
2061 if (error)
2062 return;
1956static void
1957ixgbe_dmamap_cb(void *arg, bus_dma_segment_t * segs, int nseg, int error)
1958{
1959 if (error)
1960 return;
2063 *(bus_addr_t *) arg = segs->ds_addr;
1961 *(bus_addr_t *)arg = segs->ds_addr;
1962
2064 return;
1963 return;
2065}
1964} /* ixgbe_dmamap_cb */
2066
1965
2067int
1966/************************************************************************
1967 * ixgbe_dma_malloc
1968 ************************************************************************/
1969static int
2068ixgbe_dma_malloc(struct adapter *adapter, bus_size_t size,
1970ixgbe_dma_malloc(struct adapter *adapter, bus_size_t size,
2069 struct ixgbe_dma_alloc *dma, int mapflags)
1971 struct ixgbe_dma_alloc *dma, int mapflags)
2070{
2071 device_t dev = adapter->dev;
1972{
1973 device_t dev = adapter->dev;
2072 int r;
1974 int r;
2073
1975
2074 r = bus_dma_tag_create(bus_get_dma_tag(adapter->dev), /* parent */
2075 DBA_ALIGN, 0, /* alignment, bounds */
2076 BUS_SPACE_MAXADDR, /* lowaddr */
2077 BUS_SPACE_MAXADDR, /* highaddr */
2078 NULL, NULL, /* filter, filterarg */
2079 size, /* maxsize */
2080 1, /* nsegments */
2081 size, /* maxsegsize */
2082 BUS_DMA_ALLOCNOW, /* flags */
2083 NULL, /* lockfunc */
2084 NULL, /* lockfuncarg */
2085 &dma->dma_tag);
1976 r = bus_dma_tag_create(
1977 /* parent */ bus_get_dma_tag(adapter->dev),
1978 /* alignment */ DBA_ALIGN,
1979 /* bounds */ 0,
1980 /* lowaddr */ BUS_SPACE_MAXADDR,
1981 /* highaddr */ BUS_SPACE_MAXADDR,
1982 /* filter */ NULL,
1983 /* filterarg */ NULL,
1984 /* maxsize */ size,
1985 /* nsegments */ 1,
1986 /* maxsegsize */ size,
1987 /* flags */ BUS_DMA_ALLOCNOW,
1988 /* lockfunc */ NULL,
1989 /* lockfuncarg */ NULL,
1990 &dma->dma_tag);
2086 if (r != 0) {
1991 if (r != 0) {
2087 device_printf(dev,"ixgbe_dma_malloc: bus_dma_tag_create failed; "
2088 "error %u\n", r);
1992 device_printf(dev,
1993 "ixgbe_dma_malloc: bus_dma_tag_create failed; error %u\n",
1994 r);
2089 goto fail_0;
2090 }
2091 r = bus_dmamem_alloc(dma->dma_tag, (void **)&dma->dma_vaddr,
1995 goto fail_0;
1996 }
1997 r = bus_dmamem_alloc(dma->dma_tag, (void **)&dma->dma_vaddr,
2092 BUS_DMA_NOWAIT, &dma->dma_map);
1998 BUS_DMA_NOWAIT, &dma->dma_map);
2093 if (r != 0) {
1999 if (r != 0) {
2094 device_printf(dev,"ixgbe_dma_malloc: bus_dmamem_alloc failed; "
2095 "error %u\n", r);
2000 device_printf(dev,
2001 "ixgbe_dma_malloc: bus_dmamem_alloc failed; error %u\n", r);
2096 goto fail_1;
2097 }
2002 goto fail_1;
2003 }
2098 r = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
2099 size,
2100 ixgbe_dmamap_cb,
2101 &dma->dma_paddr,
2102 mapflags | BUS_DMA_NOWAIT);
2004 r = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr, size,
2005 ixgbe_dmamap_cb, &dma->dma_paddr, mapflags | BUS_DMA_NOWAIT);
2103 if (r != 0) {
2006 if (r != 0) {
2104 device_printf(dev,"ixgbe_dma_malloc: bus_dmamap_load failed; "
2105 "error %u\n", r);
2007 device_printf(dev,
2008 "ixgbe_dma_malloc: bus_dmamap_load failed; error %u\n", r);
2106 goto fail_2;
2107 }
2108 dma->dma_size = size;
2009 goto fail_2;
2010 }
2011 dma->dma_size = size;
2012
2109 return (0);
2110fail_2:
2111 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
2112fail_1:
2113 bus_dma_tag_destroy(dma->dma_tag);
2114fail_0:
2115 dma->dma_tag = NULL;
2013 return (0);
2014fail_2:
2015 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
2016fail_1:
2017 bus_dma_tag_destroy(dma->dma_tag);
2018fail_0:
2019 dma->dma_tag = NULL;
2020
2116 return (r);
2021 return (r);
2117}
2022} /* ixgbe_dma_malloc */
2118
2023
2119void
2024/************************************************************************
2025 * ixgbe_dma_free
2026 ************************************************************************/
2027static void
2120ixgbe_dma_free(struct adapter *adapter, struct ixgbe_dma_alloc *dma)
2121{
2122 bus_dmamap_sync(dma->dma_tag, dma->dma_map,
2123 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2124 bus_dmamap_unload(dma->dma_tag, dma->dma_map);
2125 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
2126 bus_dma_tag_destroy(dma->dma_tag);
2028ixgbe_dma_free(struct adapter *adapter, struct ixgbe_dma_alloc *dma)
2029{
2030 bus_dmamap_sync(dma->dma_tag, dma->dma_map,
2031 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2032 bus_dmamap_unload(dma->dma_tag, dma->dma_map);
2033 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
2034 bus_dma_tag_destroy(dma->dma_tag);
2127}
2035} /* ixgbe_dma_free */
2128
2129
2036
2037
2130/*********************************************************************
2038/************************************************************************
2039 * ixgbe_allocate_queues
2131 *
2040 *
2132 * Allocate memory for the transmit and receive rings, and then
2133 * the descriptors associated with each, called only once at attach.
2134 *
2135 **********************************************************************/
2041 * Allocate memory for the transmit and receive rings, and then
2042 * the descriptors associated with each, called only once at attach.
2043 ************************************************************************/
2136int
2137ixgbe_allocate_queues(struct adapter *adapter)
2138{
2044int
2045ixgbe_allocate_queues(struct adapter *adapter)
2046{
2139 device_t dev = adapter->dev;
2140 struct ix_queue *que;
2141 struct tx_ring *txr;
2142 struct rx_ring *rxr;
2143 int rsize, tsize, error = IXGBE_SUCCESS;
2144 int txconf = 0, rxconf = 0;
2145#ifdef PCI_IOV
2146 enum ixgbe_iov_mode iov_mode;
2147#endif
2047 device_t dev = adapter->dev;
2048 struct ix_queue *que;
2049 struct tx_ring *txr;
2050 struct rx_ring *rxr;
2051 int rsize, tsize, error = IXGBE_SUCCESS;
2052 int txconf = 0, rxconf = 0;
2148
2053
2149 /* First allocate the top level queue structs */
2150 if (!(adapter->queues =
2151 (struct ix_queue *) malloc(sizeof(struct ix_queue) *
2152 adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2153 device_printf(dev, "Unable to allocate queue memory\n");
2154 error = ENOMEM;
2155 goto fail;
2156 }
2054 /* First, allocate the top level queue structs */
2055 adapter->queues = (struct ix_queue *)malloc(sizeof(struct ix_queue) *
2056 adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO);
2057 if (adapter->queues == NULL) {
2058 device_printf(dev, "Unable to allocate queue memory\n");
2059 error = ENOMEM;
2060 goto fail;
2061 }
2157
2062
2158 /* First allocate the TX ring struct memory */
2159 if (!(adapter->tx_rings =
2160 (struct tx_ring *) malloc(sizeof(struct tx_ring) *
2161 adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2063 /* Second, allocate the TX ring struct memory */
2064 adapter->tx_rings = (struct tx_ring *)malloc(sizeof(struct tx_ring) *
2065 adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO);
2066 if (adapter->tx_rings == NULL) {
2162 device_printf(dev, "Unable to allocate TX ring memory\n");
2163 error = ENOMEM;
2164 goto tx_fail;
2165 }
2166
2067 device_printf(dev, "Unable to allocate TX ring memory\n");
2068 error = ENOMEM;
2069 goto tx_fail;
2070 }
2071
2167 /* Next allocate the RX */
2168 if (!(adapter->rx_rings =
2169 (struct rx_ring *) malloc(sizeof(struct rx_ring) *
2170 adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2072 /* Third, allocate the RX ring */
2073 adapter->rx_rings = (struct rx_ring *)malloc(sizeof(struct rx_ring) *
2074 adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO);
2075 if (adapter->rx_rings == NULL) {
2171 device_printf(dev, "Unable to allocate RX ring memory\n");
2172 error = ENOMEM;
2173 goto rx_fail;
2174 }
2175
2176 /* For the ring itself */
2076 device_printf(dev, "Unable to allocate RX ring memory\n");
2077 error = ENOMEM;
2078 goto rx_fail;
2079 }
2080
2081 /* For the ring itself */
2177 tsize = roundup2(adapter->num_tx_desc *
2178 sizeof(union ixgbe_adv_tx_desc), DBA_ALIGN);
2082 tsize = roundup2(adapter->num_tx_desc * sizeof(union ixgbe_adv_tx_desc),
2083 DBA_ALIGN);
2179
2084
2180#ifdef PCI_IOV
2181 iov_mode = ixgbe_get_iov_mode(adapter);
2182 adapter->pool = ixgbe_max_vfs(iov_mode);
2183#else
2184 adapter->pool = 0;
2185#endif
2186 /*
2187 * Now set up the TX queues, txconf is needed to handle the
2188 * possibility that things fail midcourse and we need to
2189 * undo memory gracefully
2085 /*
2086 * Now set up the TX queues, txconf is needed to handle the
2087 * possibility that things fail midcourse and we need to
2088 * undo memory gracefully
2190 */
2089 */
2191 for (int i = 0; i < adapter->num_queues; i++, txconf++) {
2192 /* Set up some basics */
2193 txr = &adapter->tx_rings[i];
2194 txr->adapter = adapter;
2090 for (int i = 0; i < adapter->num_queues; i++, txconf++) {
2091 /* Set up some basics */
2092 txr = &adapter->tx_rings[i];
2093 txr->adapter = adapter;
2195#ifdef PCI_IOV
2196 txr->me = ixgbe_pf_que_index(iov_mode, i);
2197#else
2198 txr->me = i;
2199#endif
2094 txr->br = NULL;
2095 /* In case SR-IOV is enabled, align the index properly */
2096 txr->me = ixgbe_vf_que_index(adapter->iov_mode, adapter->pool,
2097 i);
2200 txr->num_desc = adapter->num_tx_desc;
2201
2202 /* Initialize the TX side lock */
2203 snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
2204 device_get_nameunit(dev), txr->me);
2205 mtx_init(&txr->tx_mtx, txr->mtx_name, NULL, MTX_DEF);
2206
2098 txr->num_desc = adapter->num_tx_desc;
2099
2100 /* Initialize the TX side lock */
2101 snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
2102 device_get_nameunit(dev), txr->me);
2103 mtx_init(&txr->tx_mtx, txr->mtx_name, NULL, MTX_DEF);
2104
2207 if (ixgbe_dma_malloc(adapter, tsize,
2208 &txr->txdma, BUS_DMA_NOWAIT)) {
2105 if (ixgbe_dma_malloc(adapter, tsize, &txr->txdma,
2106 BUS_DMA_NOWAIT)) {
2209 device_printf(dev,
2210 "Unable to allocate TX Descriptor memory\n");
2211 error = ENOMEM;
2212 goto err_tx_desc;
2213 }
2214 txr->tx_base = (union ixgbe_adv_tx_desc *)txr->txdma.dma_vaddr;
2215 bzero((void *)txr->tx_base, tsize);
2216
2107 device_printf(dev,
2108 "Unable to allocate TX Descriptor memory\n");
2109 error = ENOMEM;
2110 goto err_tx_desc;
2111 }
2112 txr->tx_base = (union ixgbe_adv_tx_desc *)txr->txdma.dma_vaddr;
2113 bzero((void *)txr->tx_base, tsize);
2114
2217 /* Now allocate transmit buffers for the ring */
2218 if (ixgbe_allocate_transmit_buffers(txr)) {
2115 /* Now allocate transmit buffers for the ring */
2116 if (ixgbe_allocate_transmit_buffers(txr)) {
2219 device_printf(dev,
2220 "Critical Failure setting up transmit buffers\n");
2221 error = ENOMEM;
2222 goto err_tx_desc;
2117 device_printf(dev,
2118 "Critical Failure setting up transmit buffers\n");
2119 error = ENOMEM;
2120 goto err_tx_desc;
2223 }
2224#ifndef IXGBE_LEGACY_TX
2225 /* Allocate a buf ring */
2226 txr->br = buf_ring_alloc(IXGBE_BR_SIZE, M_DEVBUF,
2227 M_WAITOK, &txr->tx_mtx);
2228 if (txr->br == NULL) {
2229 device_printf(dev,
2230 "Critical Failure setting up buf ring\n");
2231 error = ENOMEM;
2232 goto err_tx_desc;
2233 }
2234#endif
2121 }
2122 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) {
2123 /* Allocate a buf ring */
2124 txr->br = buf_ring_alloc(IXGBE_BR_SIZE, M_DEVBUF,
2125 M_WAITOK, &txr->tx_mtx);
2126 if (txr->br == NULL) {
2127 device_printf(dev,
2128 "Critical Failure setting up buf ring\n");
2129 error = ENOMEM;
2130 goto err_tx_desc;
2131 }
2132 }
2235 }
2236
2237 /*
2238 * Next the RX queues...
2133 }
2134
2135 /*
2136 * Next the RX queues...
2239 */
2240 rsize = roundup2(adapter->num_rx_desc *
2241 sizeof(union ixgbe_adv_rx_desc), DBA_ALIGN);
2137 */
2138 rsize = roundup2(adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc),
2139 DBA_ALIGN);
2242 for (int i = 0; i < adapter->num_queues; i++, rxconf++) {
2243 rxr = &adapter->rx_rings[i];
2244 /* Set up some basics */
2245 rxr->adapter = adapter;
2140 for (int i = 0; i < adapter->num_queues; i++, rxconf++) {
2141 rxr = &adapter->rx_rings[i];
2142 /* Set up some basics */
2143 rxr->adapter = adapter;
2246#ifdef PCI_IOV
2247 rxr->me = ixgbe_pf_que_index(iov_mode, i);
2248#else
2249 rxr->me = i;
2250#endif
2144 /* In case SR-IOV is enabled, align the index properly */
2145 rxr->me = ixgbe_vf_que_index(adapter->iov_mode, adapter->pool,
2146 i);
2251 rxr->num_desc = adapter->num_rx_desc;
2252
2253 /* Initialize the RX side lock */
2254 snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
2255 device_get_nameunit(dev), rxr->me);
2256 mtx_init(&rxr->rx_mtx, rxr->mtx_name, NULL, MTX_DEF);
2257
2147 rxr->num_desc = adapter->num_rx_desc;
2148
2149 /* Initialize the RX side lock */
2150 snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
2151 device_get_nameunit(dev), rxr->me);
2152 mtx_init(&rxr->rx_mtx, rxr->mtx_name, NULL, MTX_DEF);
2153
2258 if (ixgbe_dma_malloc(adapter, rsize,
2259 &rxr->rxdma, BUS_DMA_NOWAIT)) {
2154 if (ixgbe_dma_malloc(adapter, rsize, &rxr->rxdma,
2155 BUS_DMA_NOWAIT)) {
2260 device_printf(dev,
2261 "Unable to allocate RxDescriptor memory\n");
2262 error = ENOMEM;
2263 goto err_rx_desc;
2264 }
2265 rxr->rx_base = (union ixgbe_adv_rx_desc *)rxr->rxdma.dma_vaddr;
2266 bzero((void *)rxr->rx_base, rsize);
2267
2156 device_printf(dev,
2157 "Unable to allocate RxDescriptor memory\n");
2158 error = ENOMEM;
2159 goto err_rx_desc;
2160 }
2161 rxr->rx_base = (union ixgbe_adv_rx_desc *)rxr->rxdma.dma_vaddr;
2162 bzero((void *)rxr->rx_base, rsize);
2163
2268 /* Allocate receive buffers for the ring*/
2164 /* Allocate receive buffers for the ring */
2269 if (ixgbe_allocate_receive_buffers(rxr)) {
2270 device_printf(dev,
2271 "Critical Failure setting up receive buffers\n");
2272 error = ENOMEM;
2273 goto err_rx_desc;
2274 }
2275 }
2276
2277 /*
2165 if (ixgbe_allocate_receive_buffers(rxr)) {
2166 device_printf(dev,
2167 "Critical Failure setting up receive buffers\n");
2168 error = ENOMEM;
2169 goto err_rx_desc;
2170 }
2171 }
2172
2173 /*
2278 ** Finally set up the queue holding structs
2279 */
2174 * Finally set up the queue holding structs
2175 */
2280 for (int i = 0; i < adapter->num_queues; i++) {
2281 que = &adapter->queues[i];
2282 que->adapter = adapter;
2283 que->me = i;
2284 que->txr = &adapter->tx_rings[i];
2285 que->rxr = &adapter->rx_rings[i];
2286 }
2287

--- 7 unchanged lines hidden (view full) ---

2295 ixgbe_dma_free(adapter, &txr->txdma);
2296 free(adapter->rx_rings, M_DEVBUF);
2297rx_fail:
2298 free(adapter->tx_rings, M_DEVBUF);
2299tx_fail:
2300 free(adapter->queues, M_DEVBUF);
2301fail:
2302 return (error);
2176 for (int i = 0; i < adapter->num_queues; i++) {
2177 que = &adapter->queues[i];
2178 que->adapter = adapter;
2179 que->me = i;
2180 que->txr = &adapter->tx_rings[i];
2181 que->rxr = &adapter->rx_rings[i];
2182 }
2183

--- 7 unchanged lines hidden (view full) ---

2191 ixgbe_dma_free(adapter, &txr->txdma);
2192 free(adapter->rx_rings, M_DEVBUF);
2193rx_fail:
2194 free(adapter->tx_rings, M_DEVBUF);
2195tx_fail:
2196 free(adapter->queues, M_DEVBUF);
2197fail:
2198 return (error);
2303}
2199} /* ixgbe_allocate_queues */