Deleted Added
full compact
1/******************************************************************************
2
3 Copyright (c) 2001-2008, Intel Corporation
4 All rights reserved.
5
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
11
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
15
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
19
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
31
32******************************************************************************/
33/*$FreeBSD: head/sys/dev/ixgbe/ixgbe.c 181003 2008-07-30 18:15:18Z jfv $*/
33/*$FreeBSD: head/sys/dev/ixgbe/ixgbe.c 185352 2008-11-26 23:41:18Z jfv $*/
34
35#ifdef HAVE_KERNEL_OPTION_HEADERS
36#include "opt_device_polling.h"
37#endif
38
39/* Undefine this if not using CURRENT */
40#define IXGBE_VLAN_EVENTS
41
39#include "ixgbe.h"
40
41/*********************************************************************
42 * Set this to one to display debug statistics
43 *********************************************************************/
44int ixgbe_display_debug_stats = 0;
45
46/*********************************************************************
47 * Driver version
48 *********************************************************************/
52char ixgbe_driver_version[] = "1.4.7";
49char ixgbe_driver_version[] = "1.6.2";
50
51/*********************************************************************
52 * PCI Device ID Table
53 *
54 * Used by probe to select devices to load on
55 * Last field stores an index into ixgbe_strings
56 * Last entry must be all 0s
57 *
58 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
59 *********************************************************************/
60
61static ixgbe_vendor_info_t ixgbe_vendor_info_array[] =
62{
63 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, 0, 0, 0},
64 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, 0, 0, 0},
68 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT_DUAL_PORT, 0, 0, 0},
65 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, 0, 0, 0},
66 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, 0, 0, 0},
67 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, 0, 0, 0},
68 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, 0, 0, 0},
69 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, 0, 0, 0},
70 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, 0, 0, 0},
71 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, 0, 0, 0},
72 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, 0, 0, 0},
73 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, 0, 0, 0},
74 /* required last entry */
75 {0, 0, 0, 0, 0}
76};
77
78/*********************************************************************
79 * Table of branding strings
80 *********************************************************************/
81
82static char *ixgbe_strings[] = {
83 "Intel(R) PRO/10GbE PCI-Express Network Driver"
84};
85
86/*********************************************************************
87 * Function prototypes
88 *********************************************************************/
89static int ixgbe_probe(device_t);
90static int ixgbe_attach(device_t);
91static int ixgbe_detach(device_t);
92static int ixgbe_shutdown(device_t);
93static void ixgbe_start(struct ifnet *);
94static void ixgbe_start_locked(struct tx_ring *, struct ifnet *);
95static int ixgbe_ioctl(struct ifnet *, u_long, caddr_t);
96static void ixgbe_watchdog(struct adapter *);
97static void ixgbe_init(void *);
98static void ixgbe_init_locked(struct adapter *);
99static void ixgbe_stop(void *);
100static void ixgbe_media_status(struct ifnet *, struct ifmediareq *);
101static int ixgbe_media_change(struct ifnet *);
102static void ixgbe_identify_hardware(struct adapter *);
103static int ixgbe_allocate_pci_resources(struct adapter *);
104static int ixgbe_allocate_msix(struct adapter *);
105static int ixgbe_allocate_legacy(struct adapter *);
106static int ixgbe_allocate_queues(struct adapter *);
107static int ixgbe_setup_msix(struct adapter *);
108static void ixgbe_free_pci_resources(struct adapter *);
109static void ixgbe_local_timer(void *);
110static int ixgbe_hardware_init(struct adapter *);
111static void ixgbe_setup_interface(device_t, struct adapter *);
112
113static int ixgbe_allocate_transmit_buffers(struct tx_ring *);
114static int ixgbe_setup_transmit_structures(struct adapter *);
115static void ixgbe_setup_transmit_ring(struct tx_ring *);
116static void ixgbe_initialize_transmit_units(struct adapter *);
117static void ixgbe_free_transmit_structures(struct adapter *);
118static void ixgbe_free_transmit_buffers(struct tx_ring *);
119
120static int ixgbe_allocate_receive_buffers(struct rx_ring *);
121static int ixgbe_setup_receive_structures(struct adapter *);
122static int ixgbe_setup_receive_ring(struct rx_ring *);
123static void ixgbe_initialize_receive_units(struct adapter *);
124static void ixgbe_free_receive_structures(struct adapter *);
125static void ixgbe_free_receive_buffers(struct rx_ring *);
126
127static void ixgbe_enable_intr(struct adapter *);
128static void ixgbe_disable_intr(struct adapter *);
129static void ixgbe_update_stats_counters(struct adapter *);
130static bool ixgbe_txeof(struct tx_ring *);
131static bool ixgbe_rxeof(struct rx_ring *, int);
131static void ixgbe_rx_checksum(struct adapter *, u32, struct mbuf *);
132static void ixgbe_rx_checksum(u32, struct mbuf *);
133static void ixgbe_set_promisc(struct adapter *);
134static void ixgbe_disable_promisc(struct adapter *);
135static void ixgbe_set_multi(struct adapter *);
136static void ixgbe_print_hw_stats(struct adapter *);
137static void ixgbe_print_debug_info(struct adapter *);
138static void ixgbe_update_link_status(struct adapter *);
138static int ixgbe_get_buf(struct rx_ring *, int);
139static int ixgbe_get_buf(struct rx_ring *, int, u8);
140static int ixgbe_xmit(struct tx_ring *, struct mbuf **);
141static int ixgbe_sysctl_stats(SYSCTL_HANDLER_ARGS);
142static int ixgbe_sysctl_debug(SYSCTL_HANDLER_ARGS);
143static int ixgbe_set_flowcntl(SYSCTL_HANDLER_ARGS);
144static int ixgbe_dma_malloc(struct adapter *, bus_size_t,
145 struct ixgbe_dma_alloc *, int);
146static void ixgbe_dma_free(struct adapter *, struct ixgbe_dma_alloc *);
147static void ixgbe_add_rx_process_limit(struct adapter *, const char *,
148 const char *, int *, int);
149static boolean_t ixgbe_tx_ctx_setup(struct tx_ring *, struct mbuf *);
150static boolean_t ixgbe_tso_setup(struct tx_ring *, struct mbuf *, u32 *);
150static void ixgbe_set_ivar(struct adapter *, u16, u8);
151static void ixgbe_set_ivar(struct adapter *, u16, u8, s8);
152static void ixgbe_configure_ivars(struct adapter *);
153static u8 * ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
154
154#ifdef IXGBE_VLAN_EVENTS
155#ifdef IXGBE_HW_VLAN_SUPPORT
156static void ixgbe_register_vlan(void *, struct ifnet *, u16);
157static void ixgbe_unregister_vlan(void *, struct ifnet *, u16);
158#endif
159
160static void ixgbe_update_aim(struct rx_ring *);
161
162/* Support for pluggable optic modules */
163static bool ixgbe_sfp_probe(struct adapter *);
164
165/* Legacy (single vector interrupt handler */
166static void ixgbe_legacy_irq(void *);
167
168/* The MSI/X Interrupt handlers */
169static void ixgbe_msix_tx(void *);
170static void ixgbe_msix_rx(void *);
171static void ixgbe_msix_link(void *);
172
173/* Legacy interrupts use deferred handlers */
174static void ixgbe_handle_tx(void *context, int pending);
175static void ixgbe_handle_rx(void *context, int pending);
176
171#ifndef NO_82598_A0_SUPPORT
172static void desc_flip(void *);
173#endif
177
178/*********************************************************************
179 * FreeBSD Device Interface Entry Points
180 *********************************************************************/
181
182static device_method_t ixgbe_methods[] = {
183 /* Device interface */
184 DEVMETHOD(device_probe, ixgbe_probe),
185 DEVMETHOD(device_attach, ixgbe_attach),
186 DEVMETHOD(device_detach, ixgbe_detach),
187 DEVMETHOD(device_shutdown, ixgbe_shutdown),
188 {0, 0}
189};
190
191static driver_t ixgbe_driver = {
192 "ix", ixgbe_methods, sizeof(struct adapter),
193};
194
195static devclass_t ixgbe_devclass;
196DRIVER_MODULE(ixgbe, pci, ixgbe_driver, ixgbe_devclass, 0, 0);
197
198MODULE_DEPEND(ixgbe, pci, 1, 1, 1);
199MODULE_DEPEND(ixgbe, ether, 1, 1, 1);
200
201/*
202** TUNEABLE PARAMETERS:
203*/
204
205/*
206** These parameters are used in Adaptive
207** Interrupt Moderation. The value is set
208** into EITR and controls the interrupt
209** frequency. They can be modified but
210** be careful in tuning them.
211*/
212static int ixgbe_enable_aim = TRUE;
213TUNABLE_INT("hw.ixgbe.enable_aim", &ixgbe_enable_aim);
214static int ixgbe_low_latency = IXGBE_LOW_LATENCY;
215TUNABLE_INT("hw.ixgbe.low_latency", &ixgbe_low_latency);
216static int ixgbe_ave_latency = IXGBE_LOW_LATENCY;
217TUNABLE_INT("hw.ixgbe.ave_latency", &ixgbe_low_latency);
218static int ixgbe_bulk_latency = IXGBE_BULK_LATENCY;
219TUNABLE_INT("hw.ixgbe.bulk_latency", &ixgbe_bulk_latency);
220
221/* How many packets rxeof tries to clean at a time */
222static int ixgbe_rx_process_limit = 100;
223TUNABLE_INT("hw.ixgbe.rx_process_limit", &ixgbe_rx_process_limit);
224
225/* Flow control setting, default to full */
207static int ixgbe_flow_control = 3;
226static int ixgbe_flow_control = ixgbe_fc_none;
227TUNABLE_INT("hw.ixgbe.flow_control", &ixgbe_flow_control);
228
229/*
230 * Should the driver do LRO on the RX end
231 * this can be toggled on the fly, but the
232 * interface must be reset (down/up) for it
233 * to take effect.
234 */
216static int ixgbe_enable_lro = 0;
235static int ixgbe_enable_lro = 1;
236TUNABLE_INT("hw.ixgbe.enable_lro", &ixgbe_enable_lro);
237
238/*
239 * MSIX should be the default for best performance,
240 * but this allows it to be forced off for testing.
241 */
242static int ixgbe_enable_msix = 1;
243TUNABLE_INT("hw.ixgbe.enable_msix", &ixgbe_enable_msix);
244
245/*
246 * Enable RX Header Split
247 */
248static int ixgbe_rx_hdr_split = 1;
249TUNABLE_INT("hw.ixgbe.rx_hdr_split", &ixgbe_rx_hdr_split);
250
251/*
252 * Number of TX/RX Queues, with 0 setting
253 * it autoconfigures to the number of cpus.
254 */
255static int ixgbe_tx_queues = 1;
256TUNABLE_INT("hw.ixgbe.tx_queues", &ixgbe_tx_queues);
232static int ixgbe_rx_queues = 4;
257static int ixgbe_rx_queues = 1;
258TUNABLE_INT("hw.ixgbe.rx_queues", &ixgbe_rx_queues);
259
260/* Number of TX descriptors per ring */
261static int ixgbe_txd = DEFAULT_TXD;
262TUNABLE_INT("hw.ixgbe.txd", &ixgbe_txd);
263
264/* Number of RX descriptors per ring */
265static int ixgbe_rxd = DEFAULT_RXD;
266TUNABLE_INT("hw.ixgbe.rxd", &ixgbe_rxd);
267
268/* Total number of Interfaces - need for config sanity check */
269static int ixgbe_total_ports;
270
246/* Optics type of this interface */
247static int ixgbe_optics;
248
271/*********************************************************************
272 * Device identification routine
273 *
274 * ixgbe_probe determines if the driver should be loaded on
275 * adapter based on PCI vendor/device id of the adapter.
276 *
277 * return 0 on success, positive on failure
278 *********************************************************************/
279
280static int
281ixgbe_probe(device_t dev)
282{
283 ixgbe_vendor_info_t *ent;
284
263 u_int16_t pci_vendor_id = 0;
264 u_int16_t pci_device_id = 0;
265 u_int16_t pci_subvendor_id = 0;
266 u_int16_t pci_subdevice_id = 0;
267 char adapter_name[128];
285 u16 pci_vendor_id = 0;
286 u16 pci_device_id = 0;
287 u16 pci_subvendor_id = 0;
288 u16 pci_subdevice_id = 0;
289 char adapter_name[256];
290
291 INIT_DEBUGOUT("ixgbe_probe: begin");
292
293 pci_vendor_id = pci_get_vendor(dev);
294 if (pci_vendor_id != IXGBE_INTEL_VENDOR_ID)
295 return (ENXIO);
296
297 pci_device_id = pci_get_device(dev);
298 pci_subvendor_id = pci_get_subvendor(dev);
299 pci_subdevice_id = pci_get_subdevice(dev);
300
301 ent = ixgbe_vendor_info_array;
302 while (ent->vendor_id != 0) {
303 if ((pci_vendor_id == ent->vendor_id) &&
304 (pci_device_id == ent->device_id) &&
305
306 ((pci_subvendor_id == ent->subvendor_id) ||
307 (ent->subvendor_id == 0)) &&
308
309 ((pci_subdevice_id == ent->subdevice_id) ||
310 (ent->subdevice_id == 0))) {
311 sprintf(adapter_name, "%s, Version - %s",
312 ixgbe_strings[ent->index],
313 ixgbe_driver_version);
292 switch (pci_device_id) {
293 case IXGBE_DEV_ID_82598AT_DUAL_PORT :
294 ixgbe_total_ports += 2;
295 break;
296 case IXGBE_DEV_ID_82598_CX4_DUAL_PORT :
297 ixgbe_optics = IFM_10G_CX4;
298 ixgbe_total_ports += 2;
299 break;
300 case IXGBE_DEV_ID_82598AF_DUAL_PORT :
301 ixgbe_optics = IFM_10G_SR;
302 ixgbe_total_ports += 2;
303 break;
304 case IXGBE_DEV_ID_82598AF_SINGLE_PORT :
305 ixgbe_optics = IFM_10G_SR;
306 ixgbe_total_ports += 1;
307 break;
308 case IXGBE_DEV_ID_82598EB_XF_LR :
309 ixgbe_optics = IFM_10G_LR;
310 ixgbe_total_ports += 1;
311 break;
312 case IXGBE_DEV_ID_82598EB_CX4 :
313 ixgbe_optics = IFM_10G_CX4;
314 ixgbe_total_ports += 1;
315 break;
316 case IXGBE_DEV_ID_82598AT :
317 ixgbe_total_ports += 1;
318 default:
319 break;
320 }
314 device_set_desc_copy(dev, adapter_name);
315 return (0);
316 }
317 ent++;
318 }
326
319 return (ENXIO);
320}
321
322/*********************************************************************
323 * Device initialization routine
324 *
325 * The attach entry point is called when the driver is being loaded.
326 * This routine identifies the type of hardware, allocates all resources
327 * and initializes the hardware.
328 *
329 * return 0 on success, positive on failure
330 *********************************************************************/
331
332static int
333ixgbe_attach(device_t dev)
334{
335 struct adapter *adapter;
336 int error = 0;
345 u32 ctrl_ext;
337 u16 pci_device_id;
338 u32 ctrl_ext;
339
340 INIT_DEBUGOUT("ixgbe_attach: begin");
341
342 /* Allocate, clear, and link in our adapter structure */
343 adapter = device_get_softc(dev);
344 adapter->dev = adapter->osdep.dev = dev;
345
346 /* Core Lock Init*/
347 IXGBE_CORE_LOCK_INIT(adapter, device_get_nameunit(dev));
348
349 /* Keep track of number of ports and optics */
350 pci_device_id = pci_get_device(dev);
351 switch (pci_device_id) {
352 case IXGBE_DEV_ID_82598_CX4_DUAL_PORT :
353 adapter->optics = IFM_10G_CX4;
354 ixgbe_total_ports += 2;
355 break;
356 case IXGBE_DEV_ID_82598AF_DUAL_PORT :
357 adapter->optics = IFM_10G_SR;
358 ixgbe_total_ports += 2;
359 break;
360 case IXGBE_DEV_ID_82598AF_SINGLE_PORT :
361 adapter->optics = IFM_10G_SR;
362 ixgbe_total_ports += 1;
363 break;
364 case IXGBE_DEV_ID_82598EB_XF_LR :
365 adapter->optics = IFM_10G_LR;
366 ixgbe_total_ports += 1;
367 break;
368 case IXGBE_DEV_ID_82598EB_CX4 :
369 adapter->optics = IFM_10G_CX4;
370 ixgbe_total_ports += 1;
371 break;
372 case IXGBE_DEV_ID_82598AT :
373 ixgbe_total_ports += 1;
374 case IXGBE_DEV_ID_82598_DA_DUAL_PORT :
375 ixgbe_total_ports += 2;
376 default:
377 break;
378 }
379
380 /* SYSCTL APIs */
381 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
382 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
383 OID_AUTO, "stats", CTLTYPE_INT | CTLFLAG_RW,
384 adapter, 0, ixgbe_sysctl_stats, "I", "Statistics");
385
386 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
387 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
388 OID_AUTO, "debug", CTLTYPE_INT | CTLFLAG_RW,
389 adapter, 0, ixgbe_sysctl_debug, "I", "Debug Info");
390
391 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
392 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
393 OID_AUTO, "flow_control", CTLTYPE_INT | CTLFLAG_RW,
394 adapter, 0, ixgbe_set_flowcntl, "I", "Flow Control");
395
396 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
397 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
398 OID_AUTO, "enable_lro", CTLTYPE_INT|CTLFLAG_RW,
399 &ixgbe_enable_lro, 1, "Large Receive Offload");
400
401 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
402 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
403 OID_AUTO, "enable_aim", CTLTYPE_INT|CTLFLAG_RW,
404 &ixgbe_enable_aim, 1, "Interrupt Moderation");
405
406 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
407 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
408 OID_AUTO, "low_latency", CTLTYPE_INT|CTLFLAG_RW,
409 &ixgbe_low_latency, 1, "Low Latency");
410
411 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
412 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
413 OID_AUTO, "ave_latency", CTLTYPE_INT|CTLFLAG_RW,
414 &ixgbe_ave_latency, 1, "Average Latency");
415
416 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
417 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
418 OID_AUTO, "bulk_latency", CTLTYPE_INT|CTLFLAG_RW,
419 &ixgbe_bulk_latency, 1, "Bulk Latency");
420
421 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
422 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
423 OID_AUTO, "hdr_split", CTLTYPE_INT|CTLFLAG_RW,
424 &ixgbe_rx_hdr_split, 1, "RX Header Split");
425
426 /* Set up the timer callout */
427 callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0);
428
429 /* Determine hardware revision */
430 ixgbe_identify_hardware(adapter);
431
383 /* Indicate to RX setup to use Jumbo Clusters */
384 adapter->bigbufs = TRUE;
385
432 /* Do base PCI setup - map BAR0 */
433 if (ixgbe_allocate_pci_resources(adapter)) {
434 device_printf(dev, "Allocation of PCI resources failed\n");
435 error = ENXIO;
436 goto err_out;
437 }
438
439 /* Do descriptor calc and sanity checks */
440 if (((ixgbe_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
441 ixgbe_txd < MIN_TXD || ixgbe_txd > MAX_TXD) {
442 device_printf(dev, "TXD config issue, using default!\n");
443 adapter->num_tx_desc = DEFAULT_TXD;
444 } else
445 adapter->num_tx_desc = ixgbe_txd;
446
447 /*
448 ** With many RX rings it is easy to exceed the
449 ** system mbuf allocation. Tuning nmbclusters
450 ** can alleviate this.
451 */
452 if ((adapter->num_rx_queues > 1) && (nmbclusters > 0 )){
453 int s;
454 /* Calculate the total RX mbuf needs */
455 s = (ixgbe_rxd * adapter->num_rx_queues) * ixgbe_total_ports;
456 if (s > nmbclusters) {
457 device_printf(dev, "RX Descriptors exceed "
458 "system mbuf max, using default instead!\n");
459 ixgbe_rxd = DEFAULT_RXD;
460 }
461 }
462
463 if (((ixgbe_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
464 ixgbe_rxd < MIN_TXD || ixgbe_rxd > MAX_TXD) {
465 device_printf(dev, "RXD config issue, using default!\n");
466 adapter->num_rx_desc = DEFAULT_RXD;
467 } else
468 adapter->num_rx_desc = ixgbe_rxd;
469
470 /* Allocate our TX/RX Queues */
471 if (ixgbe_allocate_queues(adapter)) {
472 error = ENOMEM;
473 goto err_out;
474 }
475
476 /* Initialize the shared code */
431 if (ixgbe_init_shared_code(&adapter->hw)) {
477 error = ixgbe_init_shared_code(&adapter->hw);
478 if (error == IXGBE_ERR_SFP_NOT_PRESENT) {
479 /*
480 ** No optics in this port, set up
481 ** so the timer routine will probe
482 ** for later insertion.
483 */
484 adapter->sfp_probe = TRUE;
485 error = 0;
486 } else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
487 device_printf(dev,"Unsupported SFP+ module detected!\n");
488 error = EIO;
489 goto err_late;
490 } else if (error) {
491 device_printf(dev,"Unable to initialize the shared code\n");
492 error = EIO;
493 goto err_late;
494 }
495
496 /* Initialize the hardware */
497 if (ixgbe_hardware_init(adapter)) {
498 device_printf(dev,"Unable to initialize the hardware\n");
499 error = EIO;
500 goto err_late;
501 }
502
503 if ((adapter->msix > 1) && (ixgbe_enable_msix))
504 error = ixgbe_allocate_msix(adapter);
505 else
506 error = ixgbe_allocate_legacy(adapter);
507 if (error)
508 goto err_late;
509
510 /* Setup OS specific network interface */
511 ixgbe_setup_interface(dev, adapter);
512
513 /* Sysctl for limiting the amount of work done in the taskqueue */
514 ixgbe_add_rx_process_limit(adapter, "rx_processing_limit",
515 "max number of rx packets to process", &adapter->rx_process_limit,
516 ixgbe_rx_process_limit);
517
518 /* Initialize statistics */
519 ixgbe_update_stats_counters(adapter);
520
462#ifdef IXGBE_VLAN_EVENTS
521#ifdef IXGBE_HW_VLAN_SUPPORT
522 /* Register for VLAN events */
523 adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
524 ixgbe_register_vlan, 0, EVENTHANDLER_PRI_FIRST);
525 adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
526 ixgbe_unregister_vlan, 0, EVENTHANDLER_PRI_FIRST);
527#endif
469
528
529 /* let hardware know driver is loaded */
530 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
531 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
532 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
533
534 INIT_DEBUGOUT("ixgbe_attach: end");
535 return (0);
536err_late:
537 ixgbe_free_transmit_structures(adapter);
538 ixgbe_free_receive_structures(adapter);
539err_out:
540 ixgbe_free_pci_resources(adapter);
541 return (error);
542
543}
544
545/*********************************************************************
546 * Device removal routine
547 *
548 * The detach entry point is called when the driver is being removed.
549 * This routine stops the adapter and deallocates all the resources
550 * that were allocated for driver operation.
551 *
552 * return 0 on success, positive on failure
553 *********************************************************************/
554
555static int
556ixgbe_detach(device_t dev)
557{
558 struct adapter *adapter = device_get_softc(dev);
559 struct tx_ring *txr = adapter->tx_rings;
560 struct rx_ring *rxr = adapter->rx_rings;
561 u32 ctrl_ext;
562
563 INIT_DEBUGOUT("ixgbe_detach: begin");
564
565 /* Make sure VLANS are not using driver */
566#if __FreeBSD_version >= 700000
567 if (adapter->ifp->if_vlantrunk != NULL) {
568#else
569 if (adapter->ifp->if_nvlans != 0) {
570#endif
571 device_printf(dev,"Vlan in use, detach first\n");
572 return (EBUSY);
573 }
574
575 IXGBE_CORE_LOCK(adapter);
576 ixgbe_stop(adapter);
577 IXGBE_CORE_UNLOCK(adapter);
578
579 for (int i = 0; i < adapter->num_tx_queues; i++, txr++) {
580 if (txr->tq) {
581 taskqueue_drain(txr->tq, &txr->tx_task);
582 taskqueue_free(txr->tq);
524 txr->tq = NULL;
583 }
584 }
585
586 for (int i = 0; i < adapter->num_rx_queues; i++, rxr++) {
587 if (rxr->tq) {
588 taskqueue_drain(rxr->tq, &rxr->rx_task);
589 taskqueue_free(rxr->tq);
532 rxr->tq = NULL;
590 }
591 }
592
536#ifdef IXGBE_VLAN_EVENTS
593 /* let hardware know driver is unloading */
594 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
595 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
596 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
597
598#ifdef IXGBE_HW_VLAN_SUPPORT
599 /* Unregister VLAN events */
600 if (adapter->vlan_attach != NULL)
601 EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
602 if (adapter->vlan_detach != NULL)
603 EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
542#endif
604#endif
605
544 /* let hardware know driver is unloading */
545 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
546 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
547 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
548
606 ether_ifdetach(adapter->ifp);
607 callout_drain(&adapter->timer);
608 ixgbe_free_pci_resources(adapter);
609 bus_generic_detach(dev);
610 if_free(adapter->ifp);
611
612 ixgbe_free_transmit_structures(adapter);
613 ixgbe_free_receive_structures(adapter);
614
615 IXGBE_CORE_LOCK_DESTROY(adapter);
616 return (0);
617}
618
619/*********************************************************************
620 *
621 * Shutdown entry point
622 *
623 **********************************************************************/
624
625static int
626ixgbe_shutdown(device_t dev)
627{
628 struct adapter *adapter = device_get_softc(dev);
629 IXGBE_CORE_LOCK(adapter);
630 ixgbe_stop(adapter);
631 IXGBE_CORE_UNLOCK(adapter);
632 return (0);
633}
634
635
636/*********************************************************************
637 * Transmit entry point
638 *
639 * ixgbe_start is called by the stack to initiate a transmit.
640 * The driver will remain in this routine as long as there are
641 * packets to transmit and transmit resources are available.
642 * In case resources are not available stack is notified and
643 * the packet is requeued.
644 **********************************************************************/
645
646static void
647ixgbe_start_locked(struct tx_ring *txr, struct ifnet * ifp)
648{
649 struct mbuf *m_head;
650 struct adapter *adapter = txr->adapter;
651
652 IXGBE_TX_LOCK_ASSERT(txr);
653
654 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) !=
655 IFF_DRV_RUNNING)
656 return;
657 if (!adapter->link_active)
658 return;
659
660 while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
661
662 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
663 if (m_head == NULL)
664 break;
665
666 if (ixgbe_xmit(txr, &m_head)) {
667 if (m_head == NULL)
668 break;
669 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
670 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
671 break;
672 }
673 /* Send a copy of the frame to the BPF listener */
674 ETHER_BPF_MTAP(ifp, m_head);
675
676 /* Set timeout in case hardware has problems transmitting */
677 txr->watchdog_timer = IXGBE_TX_TIMEOUT;
678
679 }
680 return;
681}
682
683
684static void
685ixgbe_start(struct ifnet *ifp)
686{
687 struct adapter *adapter = ifp->if_softc;
688 struct tx_ring *txr = adapter->tx_rings;
689 u32 queue = 0;
690
691 /*
692 ** This is really just here for testing
693 ** TX multiqueue, ultimately what is
694 ** needed is the flow support in the stack
695 ** and appropriate logic here to deal with
696 ** it. -jfv
697 */
698 if (adapter->num_tx_queues > 1)
699 queue = (curcpu % adapter->num_tx_queues);
700
701 txr = &adapter->tx_rings[queue];
702
703 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
704 IXGBE_TX_LOCK(txr);
705 ixgbe_start_locked(txr, ifp);
706 IXGBE_TX_UNLOCK(txr);
707 }
708 return;
709}
710
711/*********************************************************************
712 * Ioctl entry point
713 *
714 * ixgbe_ioctl is called when the user wants to configure the
715 * interface.
716 *
717 * return 0 on success, positive on failure
718 **********************************************************************/
719
720static int
721ixgbe_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
722{
723 int error = 0;
724 struct ifreq *ifr = (struct ifreq *) data;
725 struct ifaddr *ifa = (struct ifaddr *) data;
726 struct adapter *adapter = ifp->if_softc;
727
728 switch (command) {
729 case SIOCSIFADDR:
730 IOCTL_DEBUGOUT("ioctl: SIOCxIFADDR (Get/Set Interface Addr)");
731 if (ifa->ifa_addr->sa_family == AF_INET) {
732 ifp->if_flags |= IFF_UP;
733 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
734 IXGBE_CORE_LOCK(adapter);
735 ixgbe_init_locked(adapter);
736 IXGBE_CORE_UNLOCK(adapter);
737 }
738 arp_ifinit(ifp, ifa);
739 } else
740 ether_ioctl(ifp, command, data);
741 break;
742 case SIOCSIFMTU:
743 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
744 if (ifr->ifr_mtu > IXGBE_MAX_FRAME_SIZE - ETHER_HDR_LEN) {
745 error = EINVAL;
746 } else {
747 IXGBE_CORE_LOCK(adapter);
748 ifp->if_mtu = ifr->ifr_mtu;
749 adapter->max_frame_size =
750 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
751 ixgbe_init_locked(adapter);
752 IXGBE_CORE_UNLOCK(adapter);
753 }
754 break;
755 case SIOCSIFFLAGS:
756 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
757 IXGBE_CORE_LOCK(adapter);
758 if (ifp->if_flags & IFF_UP) {
759 if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
760 if ((ifp->if_flags ^ adapter->if_flags) &
761 (IFF_PROMISC | IFF_ALLMULTI)) {
762 ixgbe_disable_promisc(adapter);
763 ixgbe_set_promisc(adapter);
764 }
765 } else
766 ixgbe_init_locked(adapter);
767 } else
768 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
769 ixgbe_stop(adapter);
770 adapter->if_flags = ifp->if_flags;
771 IXGBE_CORE_UNLOCK(adapter);
772 break;
773 case SIOCADDMULTI:
774 case SIOCDELMULTI:
775 IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
776 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
777 IXGBE_CORE_LOCK(adapter);
778 ixgbe_disable_intr(adapter);
779 ixgbe_set_multi(adapter);
780 ixgbe_enable_intr(adapter);
781 IXGBE_CORE_UNLOCK(adapter);
782 }
783 break;
784 case SIOCSIFMEDIA:
785 case SIOCGIFMEDIA:
786 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
787 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
788 break;
789 case SIOCSIFCAP:
790 {
791 int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
792 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
793 if (mask & IFCAP_HWCSUM)
794 ifp->if_capenable ^= IFCAP_HWCSUM;
795 if (mask & IFCAP_TSO4)
796 ifp->if_capenable ^= IFCAP_TSO4;
797 if (mask & IFCAP_VLAN_HWTAGGING)
798 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
799 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
800 ixgbe_init(adapter);
801#if __FreeBSD_version >= 700000
802 VLAN_CAPABILITIES(ifp);
803#endif
804 break;
805 }
806 default:
807 IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)\n", (int)command);
808 error = ether_ioctl(ifp, command, data);
809 break;
810 }
811
812 return (error);
813}
814
815/*********************************************************************
816 * Watchdog entry point
817 *
818 * This routine is called by the local timer
819 * to detect hardware hangs .
820 *
821 **********************************************************************/
822
823static void
824ixgbe_watchdog(struct adapter *adapter)
825{
826 device_t dev = adapter->dev;
827 struct tx_ring *txr = adapter->tx_rings;
828 struct ixgbe_hw *hw = &adapter->hw;
829 bool tx_hang = FALSE;
830
831 IXGBE_CORE_LOCK_ASSERT(adapter);
832
833 /*
834 * The timer is set to 5 every time ixgbe_start() queues a packet.
835 * Then ixgbe_txeof() keeps resetting to 5 as long as it cleans at
836 * least one descriptor.
837 * Finally, anytime all descriptors are clean the timer is
838 * set to 0.
839 */
840 for (int i = 0; i < adapter->num_tx_queues; i++, txr++) {
841 u32 head, tail;
842
843 IXGBE_TX_LOCK(txr);
844 if (txr->watchdog_timer == 0 || --txr->watchdog_timer) {
845 IXGBE_TX_UNLOCK(txr);
846 continue;
847 } else {
848 head = IXGBE_READ_REG(hw, IXGBE_TDH(i));
849 tail = IXGBE_READ_REG(hw, IXGBE_TDT(i));
850 if (head == tail) { /* last minute check */
851 IXGBE_TX_UNLOCK(txr);
852 continue;
853 }
854 /* Well, seems something is really hung */
855 tx_hang = TRUE;
856 IXGBE_TX_UNLOCK(txr);
857 break;
858 }
859 }
860 if (tx_hang == FALSE)
861 return;
862
863 /*
864 * If we are in this routine because of pause frames, then don't
865 * reset the hardware.
866 */
867 if (IXGBE_READ_REG(hw, IXGBE_TFCS) & IXGBE_TFCS_TXOFF) {
868 txr = adapter->tx_rings; /* reset pointer */
869 for (int i = 0; i < adapter->num_tx_queues; i++, txr++) {
870 IXGBE_TX_LOCK(txr);
871 txr->watchdog_timer = IXGBE_TX_TIMEOUT;
872 IXGBE_TX_UNLOCK(txr);
873 }
874 return;
875 }
876
877
878 device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
879 for (int i = 0; i < adapter->num_tx_queues; i++, txr++) {
880 device_printf(dev,"Queue(%d) tdh = %d, hw tdt = %d\n", i,
881 IXGBE_READ_REG(hw, IXGBE_TDH(i)),
882 IXGBE_READ_REG(hw, IXGBE_TDT(i)));
883 device_printf(dev,"TX(%d) desc avail = %d,"
884 "Next TX to Clean = %d\n",
885 i, txr->tx_avail, txr->next_tx_to_clean);
886 }
887 adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
888 adapter->watchdog_events++;
889
890 ixgbe_init_locked(adapter);
891}
892
893/*********************************************************************
894 * Init entry point
895 *
896 * This routine is used in two ways. It is used by the stack as
897 * init entry point in network interface structure. It is also used
898 * by the driver as a hw/sw initialization routine to get to a
899 * consistent state.
900 *
901 * return 0 on success, positive on failure
902 **********************************************************************/
903#define IXGBE_MHADD_MFS_SHIFT 16
904
905static void
906ixgbe_init_locked(struct adapter *adapter)
907{
908 struct rx_ring *rxr = adapter->rx_rings;
909 struct tx_ring *txr = adapter->tx_rings;
910 struct ifnet *ifp = adapter->ifp;
911 device_t dev = adapter->dev;
912 struct ixgbe_hw *hw;
854 u32 txdctl, rxdctl, mhadd, gpie;
913 u32 k, txdctl, mhadd, gpie;
914 u32 rxdctl, rxctrl;
915
916 INIT_DEBUGOUT("ixgbe_init: begin");
917
918 hw = &adapter->hw;
919 mtx_assert(&adapter->core_mtx, MA_OWNED);
920
921 ixgbe_stop(adapter);
922
923 /* Get the latest mac address, User can use a LAA */
924 bcopy(IF_LLADDR(adapter->ifp), adapter->hw.mac.addr,
925 IXGBE_ETH_LENGTH_OF_ADDRESS);
926 ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, 1);
927 adapter->hw.addr_ctrl.rar_used_count = 1;
928
929 /* Initialize the hardware */
930 if (ixgbe_hardware_init(adapter)) {
931 device_printf(dev, "Unable to initialize the hardware\n");
932 return;
933 }
934
875#ifndef IXGBE_VLAN_EVENTS
876 /* With events this is done when a vlan registers */
935#ifndef IXGBE_HW_VLAN_SUPPORT
936 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) {
878 u32 ctrl;
937 u32 ctrl;
938
939 ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_VLNCTRL);
940 ctrl |= IXGBE_VLNCTRL_VME;
941 ctrl &= ~IXGBE_VLNCTRL_CFIEN;
942 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VLNCTRL, ctrl);
943 }
944#endif
885
945 /* Prepare transmit descriptors and buffers */
946 if (ixgbe_setup_transmit_structures(adapter)) {
947 device_printf(dev,"Could not setup transmit structures\n");
948 ixgbe_stop(adapter);
949 return;
950 }
951
952 ixgbe_initialize_transmit_units(adapter);
953
954 /* TX irq moderation rate is fixed */
955 for (int i = 0; i < adapter->num_tx_queues; i++, txr++) {
956 IXGBE_WRITE_REG(&adapter->hw,
957 IXGBE_EITR(txr->msix), ixgbe_ave_latency);
958 txr->watchdog_timer = FALSE;
959 }
960
961 /* Setup Multicast table */
962 ixgbe_set_multi(adapter);
963
964 /*
899 ** If we are resetting MTU smaller than 2K
900 ** drop to small RX buffers
965 ** Determine the correct mbuf pool
966 ** for doing jumbo/headersplit
967 */
902 if (adapter->max_frame_size <= MCLBYTES)
903 adapter->bigbufs = FALSE;
968 if (ifp->if_mtu > ETHERMTU)
969 adapter->rx_mbuf_sz = MJUMPAGESIZE;
970 else
971 adapter->rx_mbuf_sz = MCLBYTES;
972
973 /* Prepare receive descriptors and buffers */
974 if (ixgbe_setup_receive_structures(adapter)) {
975 device_printf(dev,"Could not setup receive structures\n");
976 ixgbe_stop(adapter);
977 return;
978 }
979
980 /* Configure RX settings */
981 ixgbe_initialize_receive_units(adapter);
982
983 /* RX moderation will be adapted over time, set default */
984 for (int i = 0; i < adapter->num_rx_queues; i++, rxr++) {
985 IXGBE_WRITE_REG(&adapter->hw,
986 IXGBE_EITR(rxr->msix), ixgbe_low_latency);
987 }
988
989 /* Set Link moderation */
990 IXGBE_WRITE_REG(&adapter->hw,
991 IXGBE_EITR(adapter->linkvec), IXGBE_LINK_ITR);
992
993 gpie = IXGBE_READ_REG(&adapter->hw, IXGBE_GPIE);
994
995 /* Enable Fan Failure Interrupt */
996 if (adapter->hw.phy.media_type == ixgbe_media_type_copper)
997 gpie |= IXGBE_SDP1_GPIEN;
998
999 if (adapter->msix) {
1000 /* Enable Enhanced MSIX mode */
1001 gpie |= IXGBE_GPIE_MSIX_MODE;
1002 gpie |= IXGBE_GPIE_EIAME | IXGBE_GPIE_PBA_SUPPORT |
1003 IXGBE_GPIE_OCD;
1004 }
1005 IXGBE_WRITE_REG(&adapter->hw, IXGBE_GPIE, gpie);
1006
1007 /* Set the various hardware offload abilities */
1008 ifp->if_hwassist = 0;
1009 if (ifp->if_capenable & IFCAP_TSO4)
1010 ifp->if_hwassist |= CSUM_TSO;
1011 else if (ifp->if_capenable & IFCAP_TXCSUM)
1012 ifp->if_hwassist = (CSUM_TCP | CSUM_UDP);
1013
1014 /* Set MTU size */
1015 if (ifp->if_mtu > ETHERMTU) {
1016 mhadd = IXGBE_READ_REG(&adapter->hw, IXGBE_MHADD);
1017 mhadd &= ~IXGBE_MHADD_MFS_MASK;
1018 mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
1019 IXGBE_WRITE_REG(&adapter->hw, IXGBE_MHADD, mhadd);
1020 }
1021
1022 /* Now enable all the queues */
1023
1024 for (int i = 0; i < adapter->num_tx_queues; i++) {
1025 txdctl = IXGBE_READ_REG(&adapter->hw, IXGBE_TXDCTL(i));
1026 txdctl |= IXGBE_TXDCTL_ENABLE;
1027 /* Set WTHRESH to 8, burst writeback */
1028 txdctl |= (8 << 16);
1029 IXGBE_WRITE_REG(&adapter->hw, IXGBE_TXDCTL(i), txdctl);
1030 }
1031
1032 for (int i = 0; i < adapter->num_rx_queues; i++) {
1033 rxdctl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXDCTL(i));
1034 /* PTHRESH set to 32 */
1035 rxdctl |= 0x0020;
1036 rxdctl |= IXGBE_RXDCTL_ENABLE;
1037 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXDCTL(i), rxdctl);
1038 for (k = 0; k < 10; k++) {
1039 if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(i)) &
1040 IXGBE_RXDCTL_ENABLE)
1041 break;
1042 else
1043 msec_delay(1);
1044 }
1045 wmb();
1046 IXGBE_WRITE_REG(hw, IXGBE_RDT(i), adapter->num_rx_desc - 1);
1047 }
1048
1049 /* Enable Receive engine */
1050 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
1051 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
1052 rxctrl |= IXGBE_RXCTRL_DMBYPS;
1053 rxctrl |= IXGBE_RXCTRL_RXEN;
1054 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl);
1055
1056 callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
1057
1058 /* Set up MSI/X routing */
963 ixgbe_configure_ivars(adapter);
1059 if (ixgbe_enable_msix)
1060 ixgbe_configure_ivars(adapter);
1061
1062 ixgbe_enable_intr(adapter);
1063
1064 /* Now inform the stack we're ready */
1065 ifp->if_drv_flags |= IFF_DRV_RUNNING;
1066 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1067
1068 return;
1069}
1070
1071static void
1072ixgbe_init(void *arg)
1073{
1074 struct adapter *adapter = arg;
1075
1076 IXGBE_CORE_LOCK(adapter);
1077 ixgbe_init_locked(adapter);
1078 IXGBE_CORE_UNLOCK(adapter);
1079 return;
1080}
1081
1082
1083/*
987** Legacy Deferred Interrupt Handlers
1084** MSIX Interrupt Handlers
1085*/
1086
1087static void
1088ixgbe_handle_rx(void *context, int pending)
1089{
1090 struct rx_ring *rxr = context;
1091 struct adapter *adapter = rxr->adapter;
995 u32 loop = 0;
1092 u32 loop = MAX_LOOP;
1093 bool more;
1094
997 while (loop++ < MAX_INTR)
998 if (ixgbe_rxeof(rxr, adapter->rx_process_limit) == 0)
999 break;
1095 do {
1096 more = ixgbe_rxeof(rxr, -1);
1097 } while (loop-- && more);
1098 /* Reenable this interrupt */
1099 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, rxr->eims);
1100}
1101
1102static void
1103ixgbe_handle_tx(void *context, int pending)
1104{
1105 struct tx_ring *txr = context;
1106 struct adapter *adapter = txr->adapter;
1107 struct ifnet *ifp = adapter->ifp;
1008 u32 loop = 0;
1108 u32 loop = MAX_LOOP;
1109 bool more;
1110
1010 IXGBE_TX_LOCK(txr);
1011 while (loop++ < MAX_INTR)
1012 if (ixgbe_txeof(txr) == 0)
1013 break;
1014 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1015 ixgbe_start_locked(txr, ifp);
1016 IXGBE_TX_UNLOCK(txr);
1111 IXGBE_TX_LOCK(txr);
1112 do {
1113 more = ixgbe_txeof(txr);
1114 } while (loop-- && more);
1115
1116 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1117 ixgbe_start_locked(txr, ifp);
1118
1119 IXGBE_TX_UNLOCK(txr);
1120
1121 /* Reenable this interrupt */
1122 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, txr->eims);
1123}
1124
1125
1126/*********************************************************************
1127 *
1128 * Legacy Interrupt Service routine
1129 *
1130 **********************************************************************/
1131
1132static void
1133ixgbe_legacy_irq(void *arg)
1134{
1029 u32 reg_eicr;
1135 struct adapter *adapter = arg;
1136 struct ixgbe_hw *hw = &adapter->hw;
1137 struct tx_ring *txr = adapter->tx_rings;
1138 struct rx_ring *rxr = adapter->rx_rings;
1033 struct ixgbe_hw *hw;
1139 u32 reg_eicr;
1140
1035 hw = &adapter->hw;
1036 reg_eicr = IXGBE_READ_REG(&adapter->hw, IXGBE_EICR);
1037 if (reg_eicr == 0)
1141
1142 reg_eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
1143
1144 if (reg_eicr == 0) {
1145 ixgbe_enable_intr(adapter);
1146 return;
1147 }
1148
1040 if (ixgbe_rxeof(rxr, adapter->rx_process_limit) != 0)
1149 if (ixgbe_rxeof(rxr, adapter->rx_process_limit))
1150 taskqueue_enqueue(rxr->tq, &rxr->rx_task);
1042 if (ixgbe_txeof(txr) != 0)
1043 taskqueue_enqueue(txr->tq, &txr->tx_task);
1151 if (ixgbe_txeof(txr))
1152 taskqueue_enqueue(txr->tq, &txr->tx_task);
1153
1154 /* Check for fan failure */
1155 if ((hw->phy.media_type == ixgbe_media_type_copper) &&
1156 (reg_eicr & IXGBE_EICR_GPI_SDP1)) {
1157 device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! "
1158 "REPLACE IMMEDIATELY!!\n");
1050 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS,
1051 IXGBE_EICR_GPI_SDP1);
1159 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1);
1160 }
1161
1162 /* Link status change */
1163 if (reg_eicr & IXGBE_EICR_LSC)
1164 ixgbe_update_link_status(adapter);
1165
1166 ixgbe_enable_intr(adapter);
1167 return;
1168}
1169
1170
1171/*********************************************************************
1172 *
1173 * MSI TX Interrupt Service routine
1174 *
1175 **********************************************************************/
1176
1177void
1178ixgbe_msix_tx(void *arg)
1179{
1070 struct tx_ring *txr = arg;
1071 struct adapter *adapter = txr->adapter;
1072 u32 loop = 0;
1180 struct tx_ring *txr = arg;
1181 struct adapter *adapter = txr->adapter;
1182 bool more;
1183
1074 ++txr->tx_irq;
1184 IXGBE_TX_LOCK(txr);
1076 while (loop++ < MAX_INTR)
1077 if (ixgbe_txeof(txr) == 0)
1078 break;
1185 ++txr->tx_irq;
1186 more = ixgbe_txeof(txr);
1187 IXGBE_TX_UNLOCK(txr);
1080 /* Reenable this interrupt */
1081 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, txr->eims);
1082
1188 if (more)
1189 taskqueue_enqueue(txr->tq, &txr->tx_task);
1190 else /* Reenable this interrupt */
1191 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, txr->eims);
1192 return;
1193}
1194
1195
1196/*********************************************************************
1197 *
1088 * MSI RX Interrupt Service routine
1198 * MSIX RX Interrupt Service routine
1199 *
1200 **********************************************************************/
1201
1202static void
1203ixgbe_msix_rx(void *arg)
1204{
1205 struct rx_ring *rxr = arg;
1096 struct adapter *adapter = rxr->adapter;
1097 u32 loop = 0;
1206 struct adapter *adapter = rxr->adapter;
1207 bool more;
1208
1209 ++rxr->rx_irq;
1100 while (loop++ < MAX_INTR)
1101 if (ixgbe_rxeof(rxr, adapter->rx_process_limit) == 0)
1102 break;
1103 /* Reenable this interrupt */
1104 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, rxr->eims);
1210 more = ixgbe_rxeof(rxr, -1);
1211 if (more)
1212 taskqueue_enqueue(rxr->tq, &rxr->rx_task);
1213 else
1214 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, rxr->eims);
1215 /* Update interrupt rate */
1216 if (ixgbe_enable_aim == TRUE)
1217 ixgbe_update_aim(rxr);
1218 return;
1219}
1220
1221/*
1222** Routine to do adjust the RX EITR value based on traffic,
1223** its a simple three state model, but seems to help.
1224**
1225** Note that the three EITR values are tuneable using
1226** sysctl in real time. The feature can be effectively
1227** nullified by setting them equal.
1228*/
1229#define BULK_THRESHOLD 10000
1230#define AVE_THRESHOLD 1600
1231
1232static void
1233ixgbe_update_aim(struct rx_ring *rxr)
1234{
1235 struct adapter *adapter = rxr->adapter;
1236 u32 olditr, newitr;
1237
1238 /* Update interrupt moderation based on traffic */
1239 olditr = rxr->eitr_setting;
1240 newitr = olditr;
1241
1242 /* Idle, don't change setting */
1243 if (rxr->bytes == 0)
1244 return;
1245
1246 if (olditr == ixgbe_low_latency) {
1247 if (rxr->bytes > AVE_THRESHOLD)
1248 newitr = ixgbe_ave_latency;
1249 } else if (olditr == ixgbe_ave_latency) {
1250 if (rxr->bytes < AVE_THRESHOLD)
1251 newitr = ixgbe_low_latency;
1252 else if (rxr->bytes > BULK_THRESHOLD)
1253 newitr = ixgbe_bulk_latency;
1254 } else if (olditr == ixgbe_bulk_latency) {
1255 if (rxr->bytes < BULK_THRESHOLD)
1256 newitr = ixgbe_ave_latency;
1257 }
1258
1259 if (olditr != newitr) {
1260 /* Change interrupt rate */
1261 rxr->eitr_setting = newitr;
1262 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(rxr->me),
1263 newitr | (newitr << 16));
1264 }
1265
1266 rxr->bytes = 0;
1267 return;
1268}
1269
1270
1271static void
1272ixgbe_msix_link(void *arg)
1273{
1274 struct adapter *adapter = arg;
1275 struct ixgbe_hw *hw = &adapter->hw;
1276 u32 reg_eicr;
1277
1278 ++adapter->link_irq;
1279
1280 reg_eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
1281
1282 if (reg_eicr & IXGBE_EICR_LSC)
1283 ixgbe_update_link_status(adapter);
1284
1285 /* Check for fan failure */
1286 if ((hw->phy.media_type == ixgbe_media_type_copper) &&
1287 (reg_eicr & IXGBE_EICR_GPI_SDP1)) {
1288 device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! "
1289 "REPLACE IMMEDIATELY!!\n");
1290 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1);
1291 }
1292
1293 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
1294 return;
1295}
1296
1297
1298/*********************************************************************
1299 *
1300 * Media Ioctl callback
1301 *
1302 * This routine is called whenever the user queries the status of
1303 * the interface using ifconfig.
1304 *
1305 **********************************************************************/
1306static void
1307ixgbe_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
1308{
1309 struct adapter *adapter = ifp->if_softc;
1310
1311 INIT_DEBUGOUT("ixgbe_media_status: begin");
1312 IXGBE_CORE_LOCK(adapter);
1313 ixgbe_update_link_status(adapter);
1314
1315 ifmr->ifm_status = IFM_AVALID;
1316 ifmr->ifm_active = IFM_ETHER;
1317
1318 if (!adapter->link_active) {
1319 IXGBE_CORE_UNLOCK(adapter);
1320 return;
1321 }
1322
1323 ifmr->ifm_status |= IFM_ACTIVE;
1324
1325 switch (adapter->link_speed) {
1326 case IXGBE_LINK_SPEED_1GB_FULL:
1327 ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
1328 break;
1329 case IXGBE_LINK_SPEED_10GB_FULL:
1167 ifmr->ifm_active |= ixgbe_optics | IFM_FDX;
1330 ifmr->ifm_active |= adapter->optics | IFM_FDX;
1331 break;
1332 }
1333
1334 IXGBE_CORE_UNLOCK(adapter);
1335
1336 return;
1337}
1338
1339/*********************************************************************
1340 *
1341 * Media Ioctl callback
1342 *
1343 * This routine is called when the user changes speed/duplex using
1344 * media/mediopt option with ifconfig.
1345 *
1346 **********************************************************************/
1347static int
1348ixgbe_media_change(struct ifnet * ifp)
1349{
1350 struct adapter *adapter = ifp->if_softc;
1351 struct ifmedia *ifm = &adapter->media;
1352
1353 INIT_DEBUGOUT("ixgbe_media_change: begin");
1354
1355 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1356 return (EINVAL);
1357
1358 switch (IFM_SUBTYPE(ifm->ifm_media)) {
1359 case IFM_AUTO:
1360 adapter->hw.mac.autoneg = TRUE;
1361 adapter->hw.phy.autoneg_advertised =
1362 IXGBE_LINK_SPEED_1GB_FULL | IXGBE_LINK_SPEED_10GB_FULL;
1363 break;
1364 default:
1365 device_printf(adapter->dev, "Only auto media type\n");
1366 return (EINVAL);
1367 }
1368
1369 return (0);
1370}
1371
1372/*********************************************************************
1373 *
1374 * This routine maps the mbufs to tx descriptors.
1375 * WARNING: while this code is using an MQ style infrastructure,
1376 * it would NOT work as is with more than 1 queue.
1377 *
1378 * return 0 on success, positive on failure
1379 **********************************************************************/
1380
1381static int
1382ixgbe_xmit(struct tx_ring *txr, struct mbuf **m_headp)
1383{
1384 struct adapter *adapter = txr->adapter;
1385 u32 olinfo_status = 0, cmd_type_len = 0;
1223 u32 paylen;
1386 u32 paylen = 0;
1387 int i, j, error, nsegs;
1388 int first, last = 0;
1389 struct mbuf *m_head;
1390 bus_dma_segment_t segs[IXGBE_MAX_SCATTER];
1391 bus_dmamap_t map;
1392 struct ixgbe_tx_buf *txbuf, *txbuf_mapped;
1393 union ixgbe_adv_tx_desc *txd = NULL;
1394
1395 m_head = *m_headp;
1233 paylen = 0;
1396
1397 /* Basic descriptor defines */
1398 cmd_type_len |= IXGBE_ADVTXD_DTYP_DATA;
1399 cmd_type_len |= IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT;
1400
1401 if (m_head->m_flags & M_VLANTAG)
1402 cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
1403
1404 /*
1405 * Force a cleanup if number of TX descriptors
1406 * available is below the threshold. If it fails
1407 * to get above, then abort transmit.
1408 */
1409 if (txr->tx_avail <= IXGBE_TX_CLEANUP_THRESHOLD) {
1410 ixgbe_txeof(txr);
1411 /* Make sure things have improved */
1412 if (txr->tx_avail <= IXGBE_TX_OP_THRESHOLD) {
1413 txr->no_tx_desc_avail++;
1414 return (ENOBUFS);
1415 }
1416 }
1417
1418 /*
1419 * Important to capture the first descriptor
1420 * used because it will contain the index of
1421 * the one we tell the hardware to report back
1422 */
1423 first = txr->next_avail_tx_desc;
1424 txbuf = &txr->tx_buffers[first];
1425 txbuf_mapped = txbuf;
1426 map = txbuf->map;
1427
1428 /*
1429 * Map the packet for DMA.
1430 */
1431 error = bus_dmamap_load_mbuf_sg(txr->txtag, map,
1432 *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
1433
1434 if (error == EFBIG) {
1435 struct mbuf *m;
1436
1437 m = m_defrag(*m_headp, M_DONTWAIT);
1438 if (m == NULL) {
1277 adapter->mbuf_alloc_failed++;
1439 adapter->mbuf_defrag_failed++;
1440 m_freem(*m_headp);
1441 *m_headp = NULL;
1442 return (ENOBUFS);
1443 }
1444 *m_headp = m;
1445
1446 /* Try it again */
1447 error = bus_dmamap_load_mbuf_sg(txr->txtag, map,
1448 *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
1449
1450 if (error == ENOMEM) {
1451 adapter->no_tx_dma_setup++;
1452 return (error);
1453 } else if (error != 0) {
1454 adapter->no_tx_dma_setup++;
1455 m_freem(*m_headp);
1456 *m_headp = NULL;
1457 return (error);
1458 }
1459 } else if (error == ENOMEM) {
1460 adapter->no_tx_dma_setup++;
1461 return (error);
1462 } else if (error != 0) {
1463 adapter->no_tx_dma_setup++;
1464 m_freem(*m_headp);
1465 *m_headp = NULL;
1466 return (error);
1467 }
1468
1469 /* Make certain there are enough descriptors */
1470 if (nsegs > txr->tx_avail - 2) {
1471 txr->no_tx_desc_avail++;
1472 error = ENOBUFS;
1473 goto xmit_fail;
1474 }
1475 m_head = *m_headp;
1476
1477 /*
1478 ** Set the appropriate offload context
1479 ** this becomes the first descriptor of
1480 ** a packet.
1481 */
1482 if (ixgbe_tso_setup(txr, m_head, &paylen)) {
1483 cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
1484 olinfo_status |= IXGBE_TXD_POPTS_IXSM << 8;
1485 olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
1486 olinfo_status |= paylen << IXGBE_ADVTXD_PAYLEN_SHIFT;
1487 ++adapter->tso_tx;
1488 } else if (ixgbe_tx_ctx_setup(txr, m_head))
1489 olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
1490
1491 /* Record payload length */
1492 if (paylen == 0)
1493 olinfo_status |= m_head->m_pkthdr.len <<
1494 IXGBE_ADVTXD_PAYLEN_SHIFT;
1495
1496 i = txr->next_avail_tx_desc;
1497 for (j = 0; j < nsegs; j++) {
1498 bus_size_t seglen;
1499 bus_addr_t segaddr;
1500
1501 txbuf = &txr->tx_buffers[i];
1502 txd = &txr->tx_base[i];
1503 seglen = segs[j].ds_len;
1504 segaddr = htole64(segs[j].ds_addr);
1505
1506 txd->read.buffer_addr = segaddr;
1507 txd->read.cmd_type_len = htole32(txr->txd_cmd |
1508 cmd_type_len |seglen);
1509 txd->read.olinfo_status = htole32(olinfo_status);
1510 last = i; /* Next descriptor that will get completed */
1511
1512 if (++i == adapter->num_tx_desc)
1513 i = 0;
1514
1515 txbuf->m_head = NULL;
1349 /*
1350 ** we have to do this inside the loop right now
1351 ** because of the hardware workaround.
1352 */
1353 if (j == (nsegs -1)) /* Last descriptor gets EOP and RS */
1354 txd->read.cmd_type_len |=
1355 htole32(IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS);
1356#ifndef NO_82598_A0_SUPPORT
1357 if (adapter->hw.revision_id == 0)
1358 desc_flip(txd);
1359#endif
1516 }
1517
1518 txd->read.cmd_type_len |=
1519 htole32(IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS);
1520 txr->tx_avail -= nsegs;
1521 txr->next_avail_tx_desc = i;
1522
1523 txbuf->m_head = m_head;
1524 txbuf->map = map;
1525 bus_dmamap_sync(txr->txtag, map, BUS_DMASYNC_PREWRITE);
1526
1527 /* Set the index of the descriptor that will be marked done */
1528 txbuf = &txr->tx_buffers[first];
1529
1530 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
1531 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1532 /*
1533 * Advance the Transmit Descriptor Tail (Tdt), this tells the
1534 * hardware that this frame is available to transmit.
1535 */
1536 ++txr->total_packets;
1537 IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDT(txr->me), i);
1379 ++txr->tx_packets;
1538 return (0);
1539
1540xmit_fail:
1541 bus_dmamap_unload(txr->txtag, txbuf->map);
1542 return (error);
1543
1544}
1545
1546static void
1547ixgbe_set_promisc(struct adapter *adapter)
1548{
1549
1550 u_int32_t reg_rctl;
1551 struct ifnet *ifp = adapter->ifp;
1552
1553 reg_rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
1554
1555 if (ifp->if_flags & IFF_PROMISC) {
1556 reg_rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1557 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
1558 } else if (ifp->if_flags & IFF_ALLMULTI) {
1559 reg_rctl |= IXGBE_FCTRL_MPE;
1560 reg_rctl &= ~IXGBE_FCTRL_UPE;
1561 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
1562 }
1563 return;
1564}
1565
1566static void
1567ixgbe_disable_promisc(struct adapter * adapter)
1568{
1569 u_int32_t reg_rctl;
1570
1571 reg_rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
1572
1573 reg_rctl &= (~IXGBE_FCTRL_UPE);
1574 reg_rctl &= (~IXGBE_FCTRL_MPE);
1575 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
1576
1577 return;
1578}
1579
1580
1581/*********************************************************************
1582 * Multicast Update
1583 *
1584 * This routine is called whenever multicast address list is updated.
1585 *
1586 **********************************************************************/
1587#define IXGBE_RAR_ENTRIES 16
1588
1589static void
1590ixgbe_set_multi(struct adapter *adapter)
1591{
1592 u32 fctrl;
1593 u8 mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS];
1594 u8 *update_ptr;
1595 struct ifmultiaddr *ifma;
1596 int mcnt = 0;
1597 struct ifnet *ifp = adapter->ifp;
1598
1599 IOCTL_DEBUGOUT("ixgbe_set_multi: begin");
1600
1601 fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
1602 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1603 if (ifp->if_flags & IFF_PROMISC)
1604 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1605 else if (ifp->if_flags & IFF_ALLMULTI) {
1606 fctrl |= IXGBE_FCTRL_MPE;
1607 fctrl &= ~IXGBE_FCTRL_UPE;
1608 } else
1609 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1610
1611 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
1612
1613 IF_ADDR_LOCK(ifp);
1614 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1615 if (ifma->ifma_addr->sa_family != AF_LINK)
1616 continue;
1617 bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
1618 &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
1619 IXGBE_ETH_LENGTH_OF_ADDRESS);
1620 mcnt++;
1621 }
1622 IF_ADDR_UNLOCK(ifp);
1623
1624 update_ptr = mta;
1625 ixgbe_update_mc_addr_list(&adapter->hw,
1626 update_ptr, mcnt, ixgbe_mc_array_itr);
1627
1628 return;
1629}
1630
1631/*
1632 * This is an iterator function now needed by the multicast
1633 * shared code. It simply feeds the shared code routine the
1634 * addresses in the array of ixgbe_set_multi() one by one.
1635 */
1636static u8 *
1637ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
1638{
1639 u8 *addr = *update_ptr;
1640 u8 *newptr;
1641 *vmdq = 0;
1642
1643 newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
1644 *update_ptr = newptr;
1645 return addr;
1646}
1647
1648
1649/*********************************************************************
1650 * Timer routine
1651 *
1652 * This routine checks for link status,updates statistics,
1653 * and runs the watchdog timer.
1654 *
1655 **********************************************************************/
1656
1657static void
1658ixgbe_local_timer(void *arg)
1659{
1660 struct adapter *adapter = arg;
1661 struct ifnet *ifp = adapter->ifp;
1662
1663 mtx_assert(&adapter->core_mtx, MA_OWNED);
1664
1665 /* Check for pluggable optics */
1666 if (adapter->sfp_probe)
1667 if (!ixgbe_sfp_probe(adapter))
1668 goto out; /* Nothing to do */
1669
1670 ixgbe_update_link_status(adapter);
1671 ixgbe_update_stats_counters(adapter);
1672 if (ixgbe_display_debug_stats && ifp->if_drv_flags & IFF_DRV_RUNNING) {
1673 ixgbe_print_hw_stats(adapter);
1674 }
1675 /*
1513 * Each second we check the watchdog
1676 * Each tick we check the watchdog
1677 * to protect against hardware hangs.
1678 */
1679 ixgbe_watchdog(adapter);
1680
1681out:
1682 /* Trigger an RX interrupt on all queues */
1683 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, adapter->rx_mask);
1684
1685 callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
1686}
1687
1688static void
1689ixgbe_update_link_status(struct adapter *adapter)
1690{
1691 boolean_t link_up = FALSE;
1692 struct ifnet *ifp = adapter->ifp;
1693 struct tx_ring *txr = adapter->tx_rings;
1694 device_t dev = adapter->dev;
1695
1696 ixgbe_check_link(&adapter->hw, &adapter->link_speed, &link_up, 0);
1697
1698 if (link_up){
1699 if (adapter->link_active == FALSE) {
1700 if (bootverbose)
1701 device_printf(dev,"Link is up %d Gbps %s \n",
1702 ((adapter->link_speed == 128)? 10:1),
1703 "Full Duplex");
1704 adapter->link_active = TRUE;
1705 if_link_state_change(ifp, LINK_STATE_UP);
1706 }
1707 } else { /* Link down */
1708 if (adapter->link_active == TRUE) {
1709 if (bootverbose)
1710 device_printf(dev,"Link is Down\n");
1711 if_link_state_change(ifp, LINK_STATE_DOWN);
1712 adapter->link_active = FALSE;
1713 for (int i = 0; i < adapter->num_tx_queues;
1714 i++, txr++)
1715 txr->watchdog_timer = FALSE;
1716 }
1717 }
1718
1719 return;
1720}
1721
1722
1723
1724/*********************************************************************
1725 *
1726 * This routine disables all traffic on the adapter by issuing a
1727 * global reset on the MAC and deallocates TX/RX buffers.
1728 *
1729 **********************************************************************/
1730
1731static void
1732ixgbe_stop(void *arg)
1733{
1734 struct ifnet *ifp;
1735 struct adapter *adapter = arg;
1736 ifp = adapter->ifp;
1737
1738 mtx_assert(&adapter->core_mtx, MA_OWNED);
1739
1740 INIT_DEBUGOUT("ixgbe_stop: begin\n");
1741 ixgbe_disable_intr(adapter);
1742
1743 /* Tell the stack that the interface is no longer active */
1744 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1745
1746 ixgbe_reset_hw(&adapter->hw);
1747 adapter->hw.adapter_stopped = FALSE;
1748 ixgbe_stop_adapter(&adapter->hw);
1749 callout_stop(&adapter->timer);
1750
1751 /* reprogram the RAR[0] in case user changed it. */
1752 ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
1753
1754 return;
1755}
1756
1757
1758/*********************************************************************
1759 *
1760 * Determine hardware revision.
1761 *
1762 **********************************************************************/
1763static void
1764ixgbe_identify_hardware(struct adapter *adapter)
1765{
1766 device_t dev = adapter->dev;
1767
1768 /* Save off the information about this board */
1769 adapter->hw.vendor_id = pci_get_vendor(dev);
1770 adapter->hw.device_id = pci_get_device(dev);
1771 adapter->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1);
1772 adapter->hw.subsystem_vendor_id =
1773 pci_read_config(dev, PCIR_SUBVEND_0, 2);
1774 adapter->hw.subsystem_device_id =
1775 pci_read_config(dev, PCIR_SUBDEV_0, 2);
1776
1777 return;
1778}
1779
1780/*********************************************************************
1781 *
1782 * Setup the Legacy or MSI Interrupt handler
1783 *
1784 **********************************************************************/
1785static int
1786ixgbe_allocate_legacy(struct adapter *adapter)
1787{
1788 device_t dev = adapter->dev;
1789 struct tx_ring *txr = adapter->tx_rings;
1790 struct rx_ring *rxr = adapter->rx_rings;
1791 int error;
1792
1793 /* Legacy RID at 0 */
1794 if (adapter->msix == 0)
1795 adapter->rid[0] = 0;
1796
1797 /* We allocate a single interrupt resource */
1798 adapter->res[0] = bus_alloc_resource_any(dev,
1799 SYS_RES_IRQ, &adapter->rid[0], RF_SHAREABLE | RF_ACTIVE);
1800 if (adapter->res[0] == NULL) {
1801 device_printf(dev, "Unable to allocate bus resource: "
1802 "interrupt\n");
1803 return (ENXIO);
1804 }
1805
1806 /*
1807 * Try allocating a fast interrupt and the associated deferred
1808 * processing contexts.
1809 */
1810 TASK_INIT(&txr->tx_task, 0, ixgbe_handle_tx, txr);
1811 TASK_INIT(&rxr->rx_task, 0, ixgbe_handle_rx, rxr);
1812 txr->tq = taskqueue_create_fast("ixgbe_txq", M_NOWAIT,
1813 taskqueue_thread_enqueue, &txr->tq);
1814 rxr->tq = taskqueue_create_fast("ixgbe_rxq", M_NOWAIT,
1815 taskqueue_thread_enqueue, &rxr->tq);
1816 taskqueue_start_threads(&txr->tq, 1, PI_NET, "%s txq",
1817 device_get_nameunit(adapter->dev));
1818 taskqueue_start_threads(&rxr->tq, 1, PI_NET, "%s rxq",
1819 device_get_nameunit(adapter->dev));
1820 if ((error = bus_setup_intr(dev, adapter->res[0],
1821 INTR_TYPE_NET | INTR_MPSAFE, NULL, ixgbe_legacy_irq,
1822 adapter, &adapter->tag[0])) != 0) {
1823 device_printf(dev, "Failed to register fast interrupt "
1824 "handler: %d\n", error);
1825 taskqueue_free(txr->tq);
1826 taskqueue_free(rxr->tq);
1827 txr->tq = NULL;
1828 rxr->tq = NULL;
1829 return (error);
1830 }
1831
1832 return (0);
1833}
1834
1835
1836/*********************************************************************
1837 *
1838 * Setup MSIX Interrupt resources and handlers
1839 *
1840 **********************************************************************/
1841static int
1842ixgbe_allocate_msix(struct adapter *adapter)
1843{
1844 device_t dev = adapter->dev;
1845 struct tx_ring *txr = adapter->tx_rings;
1846 struct rx_ring *rxr = adapter->rx_rings;
1847 int error, vector = 0;
1848
1849 /* TX setup: the code is here for multi tx,
1850 there are other parts of the driver not ready for it */
1851 for (int i = 0; i < adapter->num_tx_queues; i++, vector++, txr++) {
1852 adapter->res[vector] = bus_alloc_resource_any(dev,
1853 SYS_RES_IRQ, &adapter->rid[vector],
1854 RF_SHAREABLE | RF_ACTIVE);
1855 if (!adapter->res[vector]) {
1856 device_printf(dev,"Unable to allocate"
1857 " bus resource: tx interrupt [%d]\n", vector);
1858 return (ENXIO);
1859 }
1860 /* Set the handler function */
1861 error = bus_setup_intr(dev, adapter->res[vector],
1862 INTR_TYPE_NET | INTR_MPSAFE, NULL,
1863 ixgbe_msix_tx, txr, &adapter->tag[vector]);
1864 if (error) {
1865 adapter->res[vector] = NULL;
1866 device_printf(dev, "Failed to register TX handler");
1867 return (error);
1868 }
1869 txr->msix = vector;
1870 txr->eims = IXGBE_IVAR_TX_QUEUE(vector);
1871 TASK_INIT(&txr->tx_task, 0, ixgbe_handle_tx, txr);
1872 txr->tq = taskqueue_create_fast("ixgbe_txq", M_NOWAIT,
1873 taskqueue_thread_enqueue, &txr->tq);
1874 taskqueue_start_threads(&txr->tq, 1, PI_NET, "%s txq",
1875 device_get_nameunit(adapter->dev));
1876 }
1877
1878 /* RX setup */
1879 for (int i = 0; i < adapter->num_rx_queues; i++, vector++, rxr++) {
1880 adapter->res[vector] = bus_alloc_resource_any(dev,
1881 SYS_RES_IRQ, &adapter->rid[vector],
1882 RF_SHAREABLE | RF_ACTIVE);
1883 if (!adapter->res[vector]) {
1884 device_printf(dev,"Unable to allocate"
1885 " bus resource: rx interrupt [%d],"
1886 "rid = %d\n", i, adapter->rid[vector]);
1887 return (ENXIO);
1888 }
1889 /* Set the handler function */
1890 error = bus_setup_intr(dev, adapter->res[vector],
1891 INTR_TYPE_NET | INTR_MPSAFE, NULL, ixgbe_msix_rx,
1892 rxr, &adapter->tag[vector]);
1893 if (error) {
1894 adapter->res[vector] = NULL;
1895 device_printf(dev, "Failed to register RX handler");
1896 return (error);
1897 }
1898 rxr->msix = vector;
1899 rxr->eims = IXGBE_IVAR_RX_QUEUE(vector);
1900 /* used in local timer */
1901 adapter->rx_mask |= rxr->eims;
1902 TASK_INIT(&rxr->rx_task, 0, ixgbe_handle_rx, rxr);
1903 rxr->tq = taskqueue_create_fast("ixgbe_rxq", M_NOWAIT,
1904 taskqueue_thread_enqueue, &rxr->tq);
1905 taskqueue_start_threads(&rxr->tq, 1, PI_NET, "%s rxq",
1906 device_get_nameunit(adapter->dev));
1907 }
1908
1909 /* Now for Link changes */
1910 adapter->res[vector] = bus_alloc_resource_any(dev,
1911 SYS_RES_IRQ, &adapter->rid[vector], RF_SHAREABLE | RF_ACTIVE);
1912 if (!adapter->res[vector]) {
1913 device_printf(dev,"Unable to allocate"
1914 " bus resource: Link interrupt [%d]\n", adapter->rid[vector]);
1915 return (ENXIO);
1916 }
1917 /* Set the link handler function */
1918 error = bus_setup_intr(dev, adapter->res[vector],
1919 INTR_TYPE_NET | INTR_MPSAFE, NULL, ixgbe_msix_link,
1920 adapter, &adapter->tag[vector]);
1921 if (error) {
1922 adapter->res[vector] = NULL;
1923 device_printf(dev, "Failed to register LINK handler");
1924 return (error);
1925 }
1926 adapter->linkvec = vector;
1927
1928 return (0);
1929}
1930
1931
1932/*
1933 * Setup Either MSI/X or MSI
1934 */
1935static int
1936ixgbe_setup_msix(struct adapter *adapter)
1937{
1938 device_t dev = adapter->dev;
1939 int rid, want, queues, msgs;
1940
1941 /* Override by tuneable */
1942 if (ixgbe_enable_msix == 0)
1943 goto msi;
1944
1945 /* First try MSI/X */
1763 rid = PCIR_BAR(IXGBE_MSIX_BAR);
1946 rid = PCIR_BAR(MSIX_82598_BAR);
1947 adapter->msix_mem = bus_alloc_resource_any(dev,
1948 SYS_RES_MEMORY, &rid, RF_ACTIVE);
1949 if (!adapter->msix_mem) {
1950 rid += 4; /* 82599 maps in higher BAR */
1951 adapter->msix_mem = bus_alloc_resource_any(dev,
1952 SYS_RES_MEMORY, &rid, RF_ACTIVE);
1953 }
1954 if (!adapter->msix_mem) {
1955 /* May not be enabled */
1956 device_printf(adapter->dev,
1957 "Unable to map MSIX table \n");
1958 goto msi;
1959 }
1960
1961 msgs = pci_msix_count(dev);
1962 if (msgs == 0) { /* system has msix disabled */
1963 bus_release_resource(dev, SYS_RES_MEMORY,
1776 PCIR_BAR(IXGBE_MSIX_BAR), adapter->msix_mem);
1964 rid, adapter->msix_mem);
1965 adapter->msix_mem = NULL;
1966 goto msi;
1967 }
1968
1969 /* Figure out a reasonable auto config value */
1970 queues = (mp_ncpus > ((msgs-1)/2)) ? (msgs-1)/2 : mp_ncpus;
1971
1972 if (ixgbe_tx_queues == 0)
1973 ixgbe_tx_queues = queues;
1974 if (ixgbe_rx_queues == 0)
1975 ixgbe_rx_queues = queues;
1976 want = ixgbe_tx_queues + ixgbe_rx_queues + 1;
1977 if (msgs >= want)
1978 msgs = want;
1979 else {
1980 device_printf(adapter->dev,
1981 "MSIX Configuration Problem, "
1982 "%d vectors but %d queues wanted!\n",
1983 msgs, want);
1984 return (ENXIO);
1985 }
1986 if ((msgs) && pci_alloc_msix(dev, &msgs) == 0) {
1987 device_printf(adapter->dev,
1988 "Using MSIX interrupts with %d vectors\n", msgs);
1989 adapter->num_tx_queues = ixgbe_tx_queues;
1990 adapter->num_rx_queues = ixgbe_rx_queues;
1991 return (msgs);
1992 }
1993msi:
1994 msgs = pci_msi_count(dev);
1995 if (msgs == 1 && pci_alloc_msi(dev, &msgs) == 0)
1996 device_printf(adapter->dev,"Using MSI interrupt\n");
1997 return (msgs);
1998}
1999
2000static int
2001ixgbe_allocate_pci_resources(struct adapter *adapter)
2002{
2003 int rid;
2004 device_t dev = adapter->dev;
2005
2006 rid = PCIR_BAR(0);
2007 adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
2008 &rid, RF_ACTIVE);
2009
2010 if (!(adapter->pci_mem)) {
2011 device_printf(dev,"Unable to allocate bus resource: memory\n");
2012 return (ENXIO);
2013 }
2014
2015 adapter->osdep.mem_bus_space_tag =
2016 rman_get_bustag(adapter->pci_mem);
2017 adapter->osdep.mem_bus_space_handle =
2018 rman_get_bushandle(adapter->pci_mem);
2019 adapter->hw.hw_addr = (u8 *) &adapter->osdep.mem_bus_space_handle;
2020
2021 /*
2022 * Init the resource arrays
2023 */
2024 for (int i = 0; i < IXGBE_MSGS; i++) {
2025 adapter->rid[i] = i + 1; /* MSI/X RID starts at 1 */
2026 adapter->tag[i] = NULL;
2027 adapter->res[i] = NULL;
2028 }
2029
2030 /* Legacy defaults */
2031 adapter->num_tx_queues = 1;
2032 adapter->num_rx_queues = 1;
2033
2034 /* Now setup MSI or MSI/X */
2035 adapter->msix = ixgbe_setup_msix(adapter);
2036
2037 adapter->hw.back = &adapter->osdep;
2038 return (0);
2039}
2040
2041static void
2042ixgbe_free_pci_resources(struct adapter * adapter)
2043{
1856 device_t dev = adapter->dev;
2044 device_t dev = adapter->dev;
2045 int rid;
2046
2047 /*
2048 * Legacy has this set to 0, but we need
2049 * to run this once, so reset it.
2050 */
2051 if (adapter->msix == 0)
2052 adapter->msix = 1;
2053
2054 rid = PCIR_BAR(MSIX_82598_BAR);
2055
2056 /*
2057 * First release all the interrupt resources:
2058 * notice that since these are just kept
2059 * in an array we can do the same logic
2060 * whether its MSIX or just legacy.
2061 */
2062 for (int i = 0; i < adapter->msix; i++) {
2063 if (adapter->tag[i] != NULL) {
2064 bus_teardown_intr(dev, adapter->res[i],
2065 adapter->tag[i]);
2066 adapter->tag[i] = NULL;
2067 }
2068 if (adapter->res[i] != NULL) {
2069 bus_release_resource(dev, SYS_RES_IRQ,
2070 adapter->rid[i], adapter->res[i]);
2071 }
2072 }
2073
2074 if (adapter->msix)
2075 pci_release_msi(dev);
2076
2077 if (adapter->msix_mem != NULL)
2078 bus_release_resource(dev, SYS_RES_MEMORY,
1888 PCIR_BAR(IXGBE_MSIX_BAR), adapter->msix_mem);
2079 rid, adapter->msix_mem);
2080
2081 if (adapter->pci_mem != NULL)
2082 bus_release_resource(dev, SYS_RES_MEMORY,
2083 PCIR_BAR(0), adapter->pci_mem);
2084
2085 return;
2086}
2087
2088/*********************************************************************
2089 *
2090 * Initialize the hardware to a configuration as specified by the
2091 * adapter structure. The controller is reset, the EEPROM is
2092 * verified, the MAC address is set, then the shared initialization
2093 * routines are called.
2094 *
2095 **********************************************************************/
2096static int
2097ixgbe_hardware_init(struct adapter *adapter)
2098{
2099 device_t dev = adapter->dev;
2100 u16 csum;
2101
2102 csum = 0;
2103 /* Issue a global reset */
2104 adapter->hw.adapter_stopped = FALSE;
2105 ixgbe_stop_adapter(&adapter->hw);
2106
2107 /* Make sure we have a good EEPROM before we read from it */
2108 if (ixgbe_validate_eeprom_checksum(&adapter->hw, &csum) < 0) {
2109 device_printf(dev,"The EEPROM Checksum Is Not Valid\n");
2110 return (EIO);
2111 }
2112
2113 /* Get Hardware Flow Control setting */
1923 adapter->hw.fc.type = ixgbe_fc_full;
2114 adapter->hw.fc.requested_mode = ixgbe_fc_full;
2115 adapter->hw.fc.pause_time = IXGBE_FC_PAUSE;
2116 adapter->hw.fc.low_water = IXGBE_FC_LO;
2117 adapter->hw.fc.high_water = IXGBE_FC_HI;
2118 adapter->hw.fc.send_xon = TRUE;
2119
2120 if (ixgbe_init_hw(&adapter->hw)) {
2121 device_printf(dev,"Hardware Initialization Failed");
2122 return (EIO);
2123 }
2124
2125 return (0);
2126}
2127
2128/*********************************************************************
2129 *
2130 * Setup networking device structure and register an interface.
2131 *
2132 **********************************************************************/
2133static void
2134ixgbe_setup_interface(device_t dev, struct adapter *adapter)
2135{
2136 struct ifnet *ifp;
2137 struct ixgbe_hw *hw = &adapter->hw;
2138 INIT_DEBUGOUT("ixgbe_setup_interface: begin");
2139
2140 ifp = adapter->ifp = if_alloc(IFT_ETHER);
2141 if (ifp == NULL)
2142 panic("%s: can not if_alloc()\n", device_get_nameunit(dev));
2143 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2144 ifp->if_mtu = ETHERMTU;
2145 ifp->if_baudrate = 1000000000;
2146 ifp->if_init = ixgbe_init;
2147 ifp->if_softc = adapter;
2148 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2149 ifp->if_ioctl = ixgbe_ioctl;
2150 ifp->if_start = ixgbe_start;
2151 ifp->if_timer = 0;
2152 ifp->if_watchdog = NULL;
2153 ifp->if_snd.ifq_maxlen = adapter->num_tx_desc - 1;
2154
2155 ether_ifattach(ifp, adapter->hw.mac.addr);
2156
2157 adapter->max_frame_size =
2158 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
2159
2160 /*
2161 * Tell the upper layer(s) we support long frames.
2162 */
2163 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
2164
2165 ifp->if_capabilities |= (IFCAP_HWCSUM | IFCAP_TSO4);
2166 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
2167 ifp->if_capabilities |= IFCAP_JUMBO_MTU;
2168
2169 ifp->if_capenable = ifp->if_capabilities;
2170
1980 if ((hw->device_id == IXGBE_DEV_ID_82598AT) ||
1981 (hw->device_id == IXGBE_DEV_ID_82598AT_DUAL_PORT))
2171 if (hw->device_id == IXGBE_DEV_ID_82598AT)
2172 ixgbe_setup_link_speed(hw, (IXGBE_LINK_SPEED_10GB_FULL |
2173 IXGBE_LINK_SPEED_1GB_FULL), TRUE, TRUE);
2174 else
2175 ixgbe_setup_link_speed(hw, IXGBE_LINK_SPEED_10GB_FULL,
2176 TRUE, FALSE);
2177
2178 /*
2179 * Specify the media types supported by this adapter and register
2180 * callbacks to update media and link information
2181 */
2182 ifmedia_init(&adapter->media, IFM_IMASK, ixgbe_media_change,
2183 ixgbe_media_status);
1994 ifmedia_add(&adapter->media, IFM_ETHER | ixgbe_optics |
2184 ifmedia_add(&adapter->media, IFM_ETHER | adapter->optics |
2185 IFM_FDX, 0, NULL);
1996 if ((hw->device_id == IXGBE_DEV_ID_82598AT) ||
1997 (hw->device_id == IXGBE_DEV_ID_82598AT_DUAL_PORT)) {
2186 if (hw->device_id == IXGBE_DEV_ID_82598AT) {
2187 ifmedia_add(&adapter->media,
2188 IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
2189 ifmedia_add(&adapter->media,
2190 IFM_ETHER | IFM_1000_T, 0, NULL);
2191 }
2192 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2193 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
2194
2195 return;
2196}
2197
2198/********************************************************************
2199 * Manage DMA'able memory.
2200 *******************************************************************/
2201static void
2202ixgbe_dmamap_cb(void *arg, bus_dma_segment_t * segs, int nseg, int error)
2203{
2204 if (error)
2205 return;
2206 *(bus_addr_t *) arg = segs->ds_addr;
2207 return;
2208}
2209
2210static int
2211ixgbe_dma_malloc(struct adapter *adapter, bus_size_t size,
2212 struct ixgbe_dma_alloc *dma, int mapflags)
2213{
2214 device_t dev = adapter->dev;
2215 int r;
2216
2217 r = bus_dma_tag_create(NULL, /* parent */
2218 PAGE_SIZE, 0, /* alignment, bounds */
2219 BUS_SPACE_MAXADDR, /* lowaddr */
2220 BUS_SPACE_MAXADDR, /* highaddr */
2221 NULL, NULL, /* filter, filterarg */
2222 size, /* maxsize */
2223 1, /* nsegments */
2224 size, /* maxsegsize */
2225 BUS_DMA_ALLOCNOW, /* flags */
2226 NULL, /* lockfunc */
2227 NULL, /* lockfuncarg */
2228 &dma->dma_tag);
2229 if (r != 0) {
2230 device_printf(dev,"ixgbe_dma_malloc: bus_dma_tag_create failed; "
2231 "error %u\n", r);
2232 goto fail_0;
2233 }
2234 r = bus_dmamem_alloc(dma->dma_tag, (void **)&dma->dma_vaddr,
2235 BUS_DMA_NOWAIT, &dma->dma_map);
2236 if (r != 0) {
2237 device_printf(dev,"ixgbe_dma_malloc: bus_dmamem_alloc failed; "
2238 "error %u\n", r);
2239 goto fail_1;
2240 }
2241 r = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
2242 size,
2243 ixgbe_dmamap_cb,
2244 &dma->dma_paddr,
2245 mapflags | BUS_DMA_NOWAIT);
2246 if (r != 0) {
2247 device_printf(dev,"ixgbe_dma_malloc: bus_dmamap_load failed; "
2248 "error %u\n", r);
2249 goto fail_2;
2250 }
2251 dma->dma_size = size;
2252 return (0);
2253fail_2:
2254 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
2255fail_1:
2256 bus_dma_tag_destroy(dma->dma_tag);
2257fail_0:
2258 dma->dma_map = NULL;
2259 dma->dma_tag = NULL;
2260 return (r);
2261}
2262
2263static void
2264ixgbe_dma_free(struct adapter *adapter, struct ixgbe_dma_alloc *dma)
2265{
2266 bus_dmamap_sync(dma->dma_tag, dma->dma_map,
2267 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2268 bus_dmamap_unload(dma->dma_tag, dma->dma_map);
2269 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
2270 bus_dma_tag_destroy(dma->dma_tag);
2271}
2272
2273
2274/*********************************************************************
2275 *
2276 * Allocate memory for the transmit and receive rings, and then
2277 * the descriptors associated with each, called only once at attach.
2278 *
2279 **********************************************************************/
2280static int
2281ixgbe_allocate_queues(struct adapter *adapter)
2282{
2283 device_t dev = adapter->dev;
2284 struct tx_ring *txr;
2285 struct rx_ring *rxr;
2286 int rsize, tsize, error = IXGBE_SUCCESS;
2098 char name_string[16];
2287 int txconf = 0, rxconf = 0;
2288
2289 /* First allocate the TX ring struct memory */
2290 if (!(adapter->tx_rings =
2291 (struct tx_ring *) malloc(sizeof(struct tx_ring) *
2292 adapter->num_tx_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2293 device_printf(dev, "Unable to allocate TX ring memory\n");
2294 error = ENOMEM;
2295 goto fail;
2296 }
2297 txr = adapter->tx_rings;
2298
2299 /* Next allocate the RX */
2300 if (!(adapter->rx_rings =
2301 (struct rx_ring *) malloc(sizeof(struct rx_ring) *
2302 adapter->num_rx_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2303 device_printf(dev, "Unable to allocate RX ring memory\n");
2304 error = ENOMEM;
2305 goto rx_fail;
2306 }
2307 rxr = adapter->rx_rings;
2308
2309 /* For the ring itself */
2310 tsize = roundup2(adapter->num_tx_desc *
2311 sizeof(union ixgbe_adv_tx_desc), 4096);
2312
2313 /*
2314 * Now set up the TX queues, txconf is needed to handle the
2315 * possibility that things fail midcourse and we need to
2316 * undo memory gracefully
2317 */
2318 for (int i = 0; i < adapter->num_tx_queues; i++, txconf++) {
2319 /* Set up some basics */
2320 txr = &adapter->tx_rings[i];
2321 txr->adapter = adapter;
2322 txr->me = i;
2323
2324 /* Initialize the TX side lock */
2137 snprintf(name_string, sizeof(name_string), "%s:tx(%d)",
2325 snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
2326 device_get_nameunit(dev), txr->me);
2139 mtx_init(&txr->tx_mtx, name_string, NULL, MTX_DEF);
2327 mtx_init(&txr->tx_mtx, txr->mtx_name, NULL, MTX_DEF);
2328
2329 if (ixgbe_dma_malloc(adapter, tsize,
2330 &txr->txdma, BUS_DMA_NOWAIT)) {
2331 device_printf(dev,
2332 "Unable to allocate TX Descriptor memory\n");
2333 error = ENOMEM;
2334 goto err_tx_desc;
2335 }
2336 txr->tx_base = (union ixgbe_adv_tx_desc *)txr->txdma.dma_vaddr;
2337 bzero((void *)txr->tx_base, tsize);
2338
2339 /* Now allocate transmit buffers for the ring */
2340 if (ixgbe_allocate_transmit_buffers(txr)) {
2341 device_printf(dev,
2342 "Critical Failure setting up transmit buffers\n");
2343 error = ENOMEM;
2344 goto err_tx_desc;
2345 }
2346
2347 }
2348
2349 /*
2350 * Next the RX queues...
2351 */
2352 rsize = roundup2(adapter->num_rx_desc *
2353 sizeof(union ixgbe_adv_rx_desc), 4096);
2354 for (int i = 0; i < adapter->num_rx_queues; i++, rxconf++) {
2355 rxr = &adapter->rx_rings[i];
2356 /* Set up some basics */
2357 rxr->adapter = adapter;
2358 rxr->me = i;
2359
2172 /* Initialize the TX side lock */
2173 snprintf(name_string, sizeof(name_string), "%s:rx(%d)",
2360 /* Initialize the RX side lock */
2361 snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
2362 device_get_nameunit(dev), rxr->me);
2175 mtx_init(&rxr->rx_mtx, name_string, NULL, MTX_DEF);
2363 mtx_init(&rxr->rx_mtx, rxr->mtx_name, NULL, MTX_DEF);
2364
2365 if (ixgbe_dma_malloc(adapter, rsize,
2366 &rxr->rxdma, BUS_DMA_NOWAIT)) {
2367 device_printf(dev,
2368 "Unable to allocate RxDescriptor memory\n");
2369 error = ENOMEM;
2370 goto err_rx_desc;
2371 }
2372 rxr->rx_base = (union ixgbe_adv_rx_desc *)rxr->rxdma.dma_vaddr;
2373 bzero((void *)rxr->rx_base, rsize);
2374
2375 /* Allocate receive buffers for the ring*/
2376 if (ixgbe_allocate_receive_buffers(rxr)) {
2377 device_printf(dev,
2378 "Critical Failure setting up receive buffers\n");
2379 error = ENOMEM;
2380 goto err_rx_desc;
2381 }
2382 }
2383
2384 return (0);
2385
2386err_rx_desc:
2387 for (rxr = adapter->rx_rings; rxconf > 0; rxr++, rxconf--)
2388 ixgbe_dma_free(adapter, &rxr->rxdma);
2389err_tx_desc:
2390 for (txr = adapter->tx_rings; txconf > 0; txr++, txconf--)
2391 ixgbe_dma_free(adapter, &txr->txdma);
2392 free(adapter->rx_rings, M_DEVBUF);
2393rx_fail:
2394 free(adapter->tx_rings, M_DEVBUF);
2395fail:
2396 return (error);
2397}
2398
2399/*********************************************************************
2400 *
2401 * Allocate memory for tx_buffer structures. The tx_buffer stores all
2402 * the information needed to transmit a packet on the wire. This is
2403 * called only once at attach, setup is done every reset.
2404 *
2405 **********************************************************************/
2406static int
2407ixgbe_allocate_transmit_buffers(struct tx_ring *txr)
2408{
2409 struct adapter *adapter = txr->adapter;
2410 device_t dev = adapter->dev;
2411 struct ixgbe_tx_buf *txbuf;
2412 int error, i;
2413
2414 /*
2415 * Setup DMA descriptor areas.
2416 */
2417 if ((error = bus_dma_tag_create(NULL, /* parent */
2418 PAGE_SIZE, 0, /* alignment, bounds */
2419 BUS_SPACE_MAXADDR, /* lowaddr */
2420 BUS_SPACE_MAXADDR, /* highaddr */
2421 NULL, NULL, /* filter, filterarg */
2422 IXGBE_TSO_SIZE, /* maxsize */
2423 IXGBE_MAX_SCATTER, /* nsegments */
2424 PAGE_SIZE, /* maxsegsize */
2425 0, /* flags */
2426 NULL, /* lockfunc */
2427 NULL, /* lockfuncarg */
2428 &txr->txtag))) {
2429 device_printf(dev,"Unable to allocate TX DMA tag\n");
2430 goto fail;
2431 }
2432
2433 if (!(txr->tx_buffers =
2434 (struct ixgbe_tx_buf *) malloc(sizeof(struct ixgbe_tx_buf) *
2435 adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2436 device_printf(dev, "Unable to allocate tx_buffer memory\n");
2437 error = ENOMEM;
2438 goto fail;
2439 }
2440
2441 /* Create the descriptor buffer dma maps */
2442 txbuf = txr->tx_buffers;
2443 for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) {
2444 error = bus_dmamap_create(txr->txtag, 0, &txbuf->map);
2445 if (error != 0) {
2446 device_printf(dev, "Unable to create TX DMA map\n");
2447 goto fail;
2448 }
2449 }
2450
2451 return 0;
2452fail:
2453 /* We free all, it handles case where we are in the middle */
2454 ixgbe_free_transmit_structures(adapter);
2455 return (error);
2456}
2457
2458/*********************************************************************
2459 *
2460 * Initialize a transmit ring.
2461 *
2462 **********************************************************************/
2463static void
2464ixgbe_setup_transmit_ring(struct tx_ring *txr)
2465{
2466 struct adapter *adapter = txr->adapter;
2467 struct ixgbe_tx_buf *txbuf;
2468 int i;
2469
2470 /* Clear the old ring contents */
2471 bzero((void *)txr->tx_base,
2472 (sizeof(union ixgbe_adv_tx_desc)) * adapter->num_tx_desc);
2473 /* Reset indices */
2474 txr->next_avail_tx_desc = 0;
2475 txr->next_tx_to_clean = 0;
2476
2477 /* Free any existing tx buffers. */
2478 txbuf = txr->tx_buffers;
2479 for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) {
2480 if (txbuf->m_head != NULL) {
2481 bus_dmamap_sync(txr->txtag, txbuf->map,
2482 BUS_DMASYNC_POSTWRITE);
2483 bus_dmamap_unload(txr->txtag, txbuf->map);
2484 m_freem(txbuf->m_head);
2485 txbuf->m_head = NULL;
2486 }
2487 }
2488
2489 /* Set number of descriptors available */
2490 txr->tx_avail = adapter->num_tx_desc;
2491
2492 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
2493 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2494
2495}
2496
2497/*********************************************************************
2498 *
2499 * Initialize all transmit rings.
2500 *
2501 **********************************************************************/
2502static int
2503ixgbe_setup_transmit_structures(struct adapter *adapter)
2504{
2505 struct tx_ring *txr = adapter->tx_rings;
2506
2507 for (int i = 0; i < adapter->num_tx_queues; i++, txr++)
2508 ixgbe_setup_transmit_ring(txr);
2509
2510 return (0);
2511}
2512
2513/*********************************************************************
2514 *
2515 * Enable transmit unit.
2516 *
2517 **********************************************************************/
2518static void
2519ixgbe_initialize_transmit_units(struct adapter *adapter)
2520{
2521 struct tx_ring *txr = adapter->tx_rings;
2522 struct ixgbe_hw *hw = &adapter->hw;
2523
2524 /* Setup the Base and Length of the Tx Descriptor Ring */
2525
2526 for (int i = 0; i < adapter->num_tx_queues; i++, txr++) {
2527 u64 txhwb = 0, tdba = txr->txdma.dma_paddr;
2528 u32 txctrl;
2529
2530 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(i),
2531 (tdba & 0x00000000ffffffffULL));
2532 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(i), (tdba >> 32));
2533 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(i),
2534 adapter->num_tx_desc * sizeof(struct ixgbe_legacy_tx_desc));
2535
2536 /* Setup for Head WriteBack */
2537 txhwb = (u64)vtophys(&txr->tx_hwb);
2538 txhwb |= IXGBE_TDWBAL_HEAD_WB_ENABLE;
2539 IXGBE_WRITE_REG(hw, IXGBE_TDWBAL(i),
2540 (txhwb & 0x00000000ffffffffULL));
2541 IXGBE_WRITE_REG(hw, IXGBE_TDWBAH(i),
2542 (txhwb >> 32));
2543 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
2544 txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
2545 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), txctrl);
2546
2547 /* Setup the HW Tx Head and Tail descriptor pointers */
2548 IXGBE_WRITE_REG(hw, IXGBE_TDH(i), 0);
2549 IXGBE_WRITE_REG(hw, IXGBE_TDT(i), 0);
2550
2551 /* Setup Transmit Descriptor Cmd Settings */
2552 txr->txd_cmd = IXGBE_TXD_CMD_IFCS;
2553
2554 txr->watchdog_timer = 0;
2555 }
2556
2557 return;
2558}
2559
2560/*********************************************************************
2561 *
2562 * Free all transmit rings.
2563 *
2564 **********************************************************************/
2565static void
2566ixgbe_free_transmit_structures(struct adapter *adapter)
2567{
2568 struct tx_ring *txr = adapter->tx_rings;
2569
2570 for (int i = 0; i < adapter->num_tx_queues; i++, txr++) {
2571 IXGBE_TX_LOCK(txr);
2572 ixgbe_free_transmit_buffers(txr);
2573 ixgbe_dma_free(adapter, &txr->txdma);
2574 IXGBE_TX_UNLOCK(txr);
2575 IXGBE_TX_LOCK_DESTROY(txr);
2576 }
2577 free(adapter->tx_rings, M_DEVBUF);
2578}
2579
2580/*********************************************************************
2581 *
2582 * Free transmit ring related data structures.
2583 *
2584 **********************************************************************/
2585static void
2586ixgbe_free_transmit_buffers(struct tx_ring *txr)
2587{
2588 struct adapter *adapter = txr->adapter;
2589 struct ixgbe_tx_buf *tx_buffer;
2590 int i;
2591
2592 INIT_DEBUGOUT("free_transmit_ring: begin");
2593
2594 if (txr->tx_buffers == NULL)
2595 return;
2596
2597 tx_buffer = txr->tx_buffers;
2598 for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
2599 if (tx_buffer->m_head != NULL) {
2600 bus_dmamap_sync(txr->txtag, tx_buffer->map,
2601 BUS_DMASYNC_POSTWRITE);
2602 bus_dmamap_unload(txr->txtag,
2603 tx_buffer->map);
2604 m_freem(tx_buffer->m_head);
2605 tx_buffer->m_head = NULL;
2606 if (tx_buffer->map != NULL) {
2607 bus_dmamap_destroy(txr->txtag,
2608 tx_buffer->map);
2609 tx_buffer->map = NULL;
2610 }
2611 } else if (tx_buffer->map != NULL) {
2612 bus_dmamap_unload(txr->txtag,
2613 tx_buffer->map);
2614 bus_dmamap_destroy(txr->txtag,
2615 tx_buffer->map);
2616 tx_buffer->map = NULL;
2617 }
2618 }
2619
2620 if (txr->tx_buffers != NULL) {
2621 free(txr->tx_buffers, M_DEVBUF);
2622 txr->tx_buffers = NULL;
2623 }
2624 if (txr->txtag != NULL) {
2625 bus_dma_tag_destroy(txr->txtag);
2626 txr->txtag = NULL;
2627 }
2628 return;
2629}
2630
2631/*********************************************************************
2632 *
2633 * Advanced Context Descriptor setup for VLAN or CSUM
2634 *
2635 **********************************************************************/
2636
2637static boolean_t
2638ixgbe_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp)
2639{
2640 struct adapter *adapter = txr->adapter;
2641 struct ixgbe_adv_tx_context_desc *TXD;
2642 struct ixgbe_tx_buf *tx_buffer;
2643 u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
2644 struct ether_vlan_header *eh;
2645 struct ip *ip;
2646 struct ip6_hdr *ip6;
2647 int ehdrlen, ip_hlen = 0;
2648 u16 etype;
2649 u8 ipproto = 0;
2650 bool offload = TRUE;
2651 int ctxd = txr->next_avail_tx_desc;
2652#if __FreeBSD_version < 700000
2653 struct m_tag *mtag;
2654#else
2655 u16 vtag = 0;
2656#endif
2657
2658
2659 if ((mp->m_pkthdr.csum_flags & CSUM_OFFLOAD) == 0)
2660 offload = FALSE;
2661
2662 tx_buffer = &txr->tx_buffers[ctxd];
2663 TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd];
2664
2665 /*
2666 ** In advanced descriptors the vlan tag must
2667 ** be placed into the descriptor itself.
2668 */
2669#if __FreeBSD_version < 700000
2670 mtag = VLAN_OUTPUT_TAG(ifp, mp);
2671 if (mtag != NULL) {
2672 vlan_macip_lens |=
2673 htole16(VLAN_TAG_VALUE(mtag)) << IXGBE_ADVTXD_VLAN_SHIFT;
2674 } else if (offload == FALSE)
2675 return FALSE; /* No need for CTX */
2676#else
2677 if (mp->m_flags & M_VLANTAG) {
2678 vtag = htole16(mp->m_pkthdr.ether_vtag);
2679 vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
2680 } else if (offload == FALSE)
2681 return FALSE;
2682#endif
2683 /*
2684 * Determine where frame payload starts.
2685 * Jump over vlan headers if already present,
2686 * helpful for QinQ too.
2687 */
2688 eh = mtod(mp, struct ether_vlan_header *);
2689 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
2690 etype = ntohs(eh->evl_proto);
2691 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2692 } else {
2693 etype = ntohs(eh->evl_encap_proto);
2694 ehdrlen = ETHER_HDR_LEN;
2695 }
2696
2697 /* Set the ether header length */
2698 vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
2699
2700 switch (etype) {
2701 case ETHERTYPE_IP:
2702 ip = (struct ip *)(mp->m_data + ehdrlen);
2703 ip_hlen = ip->ip_hl << 2;
2704 if (mp->m_len < ehdrlen + ip_hlen)
2705 return FALSE; /* failure */
2706 ipproto = ip->ip_p;
2707 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
2708 break;
2709 case ETHERTYPE_IPV6:
2710 ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
2711 ip_hlen = sizeof(struct ip6_hdr);
2712 if (mp->m_len < ehdrlen + ip_hlen)
2713 return FALSE; /* failure */
2714 ipproto = ip6->ip6_nxt;
2715 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6;
2716 break;
2717 default:
2718 offload = FALSE;
2719 break;
2720 }
2721
2722 vlan_macip_lens |= ip_hlen;
2723 type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
2724
2725 switch (ipproto) {
2726 case IPPROTO_TCP:
2727 if (mp->m_pkthdr.csum_flags & CSUM_TCP)
2728 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
2729 break;
2730 case IPPROTO_UDP:
2731 if (mp->m_pkthdr.csum_flags & CSUM_UDP)
2732 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP;
2733 break;
2734 default:
2735 offload = FALSE;
2736 break;
2737 }
2738
2739 /* Now copy bits into descriptor */
2740 TXD->vlan_macip_lens |= htole32(vlan_macip_lens);
2741 TXD->type_tucmd_mlhl |= htole32(type_tucmd_mlhl);
2742 TXD->seqnum_seed = htole32(0);
2743 TXD->mss_l4len_idx = htole32(0);
2744
2557#ifndef NO_82598_A0_SUPPORT
2558 if (adapter->hw.revision_id == 0)
2559 desc_flip(TXD);
2560#endif
2561
2745 tx_buffer->m_head = NULL;
2746
2747 /* We've consumed the first desc, adjust counters */
2748 if (++ctxd == adapter->num_tx_desc)
2749 ctxd = 0;
2750 txr->next_avail_tx_desc = ctxd;
2751 --txr->tx_avail;
2752
2753 return (offload);
2754}
2755
2756#if __FreeBSD_version >= 700000
2757/**********************************************************************
2758 *
2759 * Setup work for hardware segmentation offload (TSO) on
2760 * adapters using advanced tx descriptors
2761 *
2762 **********************************************************************/
2763static boolean_t
2764ixgbe_tso_setup(struct tx_ring *txr, struct mbuf *mp, u32 *paylen)
2765{
2766 struct adapter *adapter = txr->adapter;
2767 struct ixgbe_adv_tx_context_desc *TXD;
2768 struct ixgbe_tx_buf *tx_buffer;
2769 u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
2770 u32 mss_l4len_idx = 0;
2771 u16 vtag = 0;
2772 int ctxd, ehdrlen, hdrlen, ip_hlen, tcp_hlen;
2773 struct ether_vlan_header *eh;
2774 struct ip *ip;
2775 struct tcphdr *th;
2776
2777 if (((mp->m_pkthdr.csum_flags & CSUM_TSO) == 0) ||
2778 (mp->m_pkthdr.len <= IXGBE_TX_BUFFER_SIZE))
2779 return FALSE;
2780
2781 /*
2782 * Determine where frame payload starts.
2783 * Jump over vlan headers if already present
2784 */
2785 eh = mtod(mp, struct ether_vlan_header *);
2786 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN))
2787 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2788 else
2789 ehdrlen = ETHER_HDR_LEN;
2790
2791 /* Ensure we have at least the IP+TCP header in the first mbuf. */
2792 if (mp->m_len < ehdrlen + sizeof(struct ip) + sizeof(struct tcphdr))
2793 return FALSE;
2794
2795 ctxd = txr->next_avail_tx_desc;
2796 tx_buffer = &txr->tx_buffers[ctxd];
2797 TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd];
2798
2799 ip = (struct ip *)(mp->m_data + ehdrlen);
2800 if (ip->ip_p != IPPROTO_TCP)
2801 return FALSE; /* 0 */
2802 ip->ip_len = 0;
2803 ip->ip_sum = 0;
2804 ip_hlen = ip->ip_hl << 2;
2805 th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
2806 th->th_sum = in_pseudo(ip->ip_src.s_addr,
2807 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
2808 tcp_hlen = th->th_off << 2;
2809 hdrlen = ehdrlen + ip_hlen + tcp_hlen;
2810 /* This is used in the transmit desc in encap */
2811 *paylen = mp->m_pkthdr.len - hdrlen;
2812
2813 /* VLAN MACLEN IPLEN */
2814 if (mp->m_flags & M_VLANTAG) {
2815 vtag = htole16(mp->m_pkthdr.ether_vtag);
2816 vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
2817 }
2818
2819 vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
2820 vlan_macip_lens |= ip_hlen;
2821 TXD->vlan_macip_lens |= htole32(vlan_macip_lens);
2822
2823 /* ADV DTYPE TUCMD */
2824 type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
2825 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
2826 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
2827 TXD->type_tucmd_mlhl |= htole32(type_tucmd_mlhl);
2828
2829
2830 /* MSS L4LEN IDX */
2831 mss_l4len_idx |= (mp->m_pkthdr.tso_segsz << IXGBE_ADVTXD_MSS_SHIFT);
2832 mss_l4len_idx |= (tcp_hlen << IXGBE_ADVTXD_L4LEN_SHIFT);
2833 TXD->mss_l4len_idx = htole32(mss_l4len_idx);
2834
2835 TXD->seqnum_seed = htole32(0);
2836 tx_buffer->m_head = NULL;
2837
2655#ifndef NO_82598_A0_SUPPORT
2656 if (adapter->hw.revision_id == 0)
2657 desc_flip(TXD);
2658#endif
2659
2838 if (++ctxd == adapter->num_tx_desc)
2839 ctxd = 0;
2840
2841 txr->tx_avail--;
2842 txr->next_avail_tx_desc = ctxd;
2843 return TRUE;
2844}
2845
2846#else /* For 6.2 RELEASE */
2847/* This makes it easy to keep the code common */
2848static boolean_t
2849ixgbe_tso_setup(struct tx_ring *txr, struct mbuf *mp, u32 *paylen)
2850{
2851 return (FALSE);
2852}
2853#endif
2854
2855/**********************************************************************
2856 *
2857 * Examine each tx_buffer in the used queue. If the hardware is done
2858 * processing the packet then free associated resources. The
2859 * tx_buffer is put back on the free queue.
2860 *
2861 **********************************************************************/
2862static boolean_t
2863ixgbe_txeof(struct tx_ring *txr)
2864{
2865 struct adapter * adapter = txr->adapter;
2866 struct ifnet *ifp = adapter->ifp;
2867 u32 first, last, done, num_avail;
2868 u32 cleaned = 0;
2869 struct ixgbe_tx_buf *tx_buffer;
2870 struct ixgbe_legacy_tx_desc *tx_desc;
2871
2872 mtx_assert(&txr->mtx, MA_OWNED);
2873
2874 if (txr->tx_avail == adapter->num_tx_desc)
2875 return FALSE;
2876
2877 num_avail = txr->tx_avail;
2878 first = txr->next_tx_to_clean;
2879
2880 tx_buffer = &txr->tx_buffers[first];
2881 /* For cleanup we just use legacy struct */
2882 tx_desc = (struct ixgbe_legacy_tx_desc *)&txr->tx_base[first];
2883
2884 /* Get the HWB */
2885 rmb();
2886 done = txr->tx_hwb;
2887
2888 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
2889 BUS_DMASYNC_POSTREAD);
2890
2891 while (TRUE) {
2892 /* We clean the range til last head write back */
2893 while (first != done) {
2894 tx_desc->upper.data = 0;
2895 tx_desc->lower.data = 0;
2896 tx_desc->buffer_addr = 0;
2897 num_avail++; cleaned++;
2898
2899 if (tx_buffer->m_head) {
2900 ifp->if_opackets++;
2901 bus_dmamap_sync(txr->txtag,
2902 tx_buffer->map,
2903 BUS_DMASYNC_POSTWRITE);
2904 bus_dmamap_unload(txr->txtag,
2905 tx_buffer->map);
2906 m_freem(tx_buffer->m_head);
2907 tx_buffer->m_head = NULL;
2908 tx_buffer->map = NULL;
2909 }
2910
2911 if (++first == adapter->num_tx_desc)
2912 first = 0;
2913
2914 tx_buffer = &txr->tx_buffers[first];
2915 tx_desc =
2916 (struct ixgbe_legacy_tx_desc *)&txr->tx_base[first];
2917 }
2918 /* See if there is more work now */
2919 last = done;
2920 rmb();
2921 done = txr->tx_hwb;
2922 if (last == done)
2923 break;
2924 }
2925 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
2926 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2927
2928 txr->next_tx_to_clean = first;
2929
2930 /*
2931 * If we have enough room, clear IFF_DRV_OACTIVE to tell the stack that
2932 * it is OK to send packets. If there are no pending descriptors,
2933 * clear the timeout. Otherwise, if some descriptors have been freed,
2934 * restart the timeout.
2935 */
2936 if (num_avail > IXGBE_TX_CLEANUP_THRESHOLD) {
2937 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2938 /* If all are clean turn off the timer */
2939 if (num_avail == adapter->num_tx_desc) {
2940 txr->watchdog_timer = 0;
2941 txr->tx_avail = num_avail;
2942 return FALSE;
2943 }
2944 }
2945
2946 /* Some were cleaned, so reset timer */
2947 if (cleaned)
2948 txr->watchdog_timer = IXGBE_TX_TIMEOUT;
2949 txr->tx_avail = num_avail;
2950 return TRUE;
2951}
2952
2953/*********************************************************************
2954 *
2955 * Get a buffer from system mbuf buffer pool.
2956 *
2957 **********************************************************************/
2958static int
2781ixgbe_get_buf(struct rx_ring *rxr, int i)
2959ixgbe_get_buf(struct rx_ring *rxr, int i, u8 clean)
2960{
2783 struct adapter *adapter = rxr->adapter;
2784 struct mbuf *mp;
2785 bus_dmamap_t map;
2786 int nsegs, error, old, s = 0;
2787 int size = MCLBYTES;
2788
2789
2790 bus_dma_segment_t segs[1];
2961 struct adapter *adapter = rxr->adapter;
2962 bus_dma_segment_t seg[2];
2963 struct ixgbe_rx_buf *rxbuf;
2964 struct mbuf *mh, *mp;
2965 bus_dmamap_t map;
2966 int nsegs, error;
2967 int merr = 0;
2968
2793 /* Are we going to Jumbo clusters? */
2794 if (adapter->bigbufs) {
2795 size = MJUMPAGESIZE;
2796 s = 1;
2797 };
2798
2799 mp = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, size);
2800 if (mp == NULL) {
2801 adapter->mbuf_alloc_failed++;
2802 return (ENOBUFS);
2803 }
2969
2805 mp->m_len = mp->m_pkthdr.len = size;
2970 rxbuf = &rxr->rx_buffers[i];
2971
2807 if (adapter->max_frame_size <= (MCLBYTES - ETHER_ALIGN))
2808 m_adj(mp, ETHER_ALIGN);
2972 /* First get our header and payload mbuf */
2973 if (clean & IXGBE_CLEAN_HDR) {
2974 mh = m_gethdr(M_DONTWAIT, MT_DATA);
2975 if (mh == NULL)
2976 goto remap;
2977 } else /* reuse */
2978 mh = rxr->rx_buffers[i].m_head;
2979
2980 mh->m_len = MHLEN;
2981 mh->m_flags |= M_PKTHDR;
2982
2983 if (clean & IXGBE_CLEAN_PKT) {
2984 mp = m_getjcl(M_DONTWAIT, MT_DATA,
2985 M_PKTHDR, adapter->rx_mbuf_sz);
2986 if (mp == NULL)
2987 goto remap;
2988 mp->m_len = adapter->rx_mbuf_sz;
2989 mp->m_flags &= ~M_PKTHDR;
2990 } else { /* reusing */
2991 mp = rxr->rx_buffers[i].m_pack;
2992 mp->m_len = adapter->rx_mbuf_sz;
2993 mp->m_flags &= ~M_PKTHDR;
2994 }
2995 /*
2811 * Using memory from the mbuf cluster pool, invoke the bus_dma
2812 * machinery to arrange the memory mapping.
2813 */
2814 error = bus_dmamap_load_mbuf_sg(rxr->rxtag[s], rxr->spare_map[s],
2815 mp, segs, &nsegs, BUS_DMA_NOWAIT);
2816 if (error) {
2817 m_free(mp);
2996 ** Need to create a chain for the following
2997 ** dmamap call at this point.
2998 */
2999 mh->m_next = mp;
3000 mh->m_pkthdr.len = mh->m_len + mp->m_len;
3001
3002 /* Get the memory mapping */
3003 error = bus_dmamap_load_mbuf_sg(rxr->rxtag,
3004 rxr->spare_map, mh, seg, &nsegs, BUS_DMA_NOWAIT);
3005 if (error != 0) {
3006 printf("GET BUF: dmamap load failure - %d\n", error);
3007 m_free(mh);
3008 return (error);
3009 }
3010
2821 /* Now check our target buffer for existing mapping */
2822 rxbuf = &rxr->rx_buffers[i];
2823 old = rxbuf->bigbuf;
3011 /* Unload old mapping and update buffer struct */
3012 if (rxbuf->m_head != NULL)
2825 bus_dmamap_unload(rxr->rxtag[old], rxbuf->map[old]);
3013 bus_dmamap_unload(rxr->rxtag, rxbuf->map);
3014 map = rxbuf->map;
3015 rxbuf->map = rxr->spare_map;
3016 rxr->spare_map = map;
3017 rxbuf->m_head = mh;
3018 rxbuf->m_pack = mp;
3019 bus_dmamap_sync(rxr->rxtag,
3020 rxbuf->map, BUS_DMASYNC_PREREAD);
3021
2827 map = rxbuf->map[old];
2828 rxbuf->map[s] = rxr->spare_map[s];
2829 rxr->spare_map[old] = map;
2830 bus_dmamap_sync(rxr->rxtag[s], rxbuf->map[s], BUS_DMASYNC_PREREAD);
2831 rxbuf->m_head = mp;
2832 rxbuf->bigbuf = s;
3022 /* Update descriptor */
3023 rxr->rx_base[i].read.hdr_addr = htole64(seg[0].ds_addr);
3024 rxr->rx_base[i].read.pkt_addr = htole64(seg[1].ds_addr);
3025
2834 rxr->rx_base[i].read.pkt_addr = htole64(segs[0].ds_addr);
3026 return (0);
3027
2836#ifndef NO_82598_A0_SUPPORT
2837 /* A0 needs to One's Compliment descriptors */
2838 if (adapter->hw.revision_id == 0) {
2839 struct dhack {u32 a1; u32 a2; u32 b1; u32 b2;};
2840 struct dhack *d;
2841
2842 d = (struct dhack *)&rxr->rx_base[i];
2843 d->a1 = ~(d->a1);
2844 d->a2 = ~(d->a2);
3028 /*
3029 ** If we get here, we have an mbuf resource
3030 ** issue, so we discard the incoming packet
3031 ** and attempt to reuse existing mbufs next
3032 ** pass thru the ring, but to do so we must
3033 ** fix up the descriptor which had the address
3034 ** clobbered with writeback info.
3035 */
3036remap:
3037 adapter->mbuf_header_failed++;
3038 merr = ENOBUFS;
3039 /* Is there a reusable buffer? */
3040 mh = rxr->rx_buffers[i].m_head;
3041 if (mh == NULL) /* Nope, init error */
3042 return (merr);
3043 mp = rxr->rx_buffers[i].m_pack;
3044 if (mp == NULL) /* Nope, init error */
3045 return (merr);
3046 /* Get our old mapping */
3047 rxbuf = &rxr->rx_buffers[i];
3048 error = bus_dmamap_load_mbuf_sg(rxr->rxtag,
3049 rxbuf->map, mh, seg, &nsegs, BUS_DMA_NOWAIT);
3050 if (error != 0) {
3051 /* We really have a problem */
3052 m_free(mh);
3053 return (error);
3054 }
2846#endif
3055 /* Now fix the descriptor as needed */
3056 rxr->rx_base[i].read.hdr_addr = htole64(seg[0].ds_addr);
3057 rxr->rx_base[i].read.pkt_addr = htole64(seg[1].ds_addr);
3058
2848 return (0);
3059 return (merr);
3060}
3061
3062
3063/*********************************************************************
3064 *
3065 * Allocate memory for rx_buffer structures. Since we use one
3066 * rx_buffer per received packet, the maximum number of rx_buffer's
3067 * that we'll need is equal to the number of receive descriptors
3068 * that we've allocated.
3069 *
3070 **********************************************************************/
3071static int
3072ixgbe_allocate_receive_buffers(struct rx_ring *rxr)
3073{
3074 struct adapter *adapter = rxr->adapter;
3075 device_t dev = adapter->dev;
3076 struct ixgbe_rx_buf *rxbuf;
3077 int i, bsize, error;
3078
3079 bsize = sizeof(struct ixgbe_rx_buf) * adapter->num_rx_desc;
3080 if (!(rxr->rx_buffers =
3081 (struct ixgbe_rx_buf *) malloc(bsize,
3082 M_DEVBUF, M_NOWAIT | M_ZERO))) {
3083 device_printf(dev, "Unable to allocate rx_buffer memory\n");
3084 error = ENOMEM;
3085 goto fail;
3086 }
3087
2876 /* First make the small (2K) tag/map */
3088 /*
3089 ** The tag is made to accomodate the largest buffer size
3090 ** with packet split (hence the two segments, even though
3091 ** it may not always use this.
3092 */
3093 if ((error = bus_dma_tag_create(NULL, /* parent */
3094 PAGE_SIZE, 0, /* alignment, bounds */
3095 BUS_SPACE_MAXADDR, /* lowaddr */
3096 BUS_SPACE_MAXADDR, /* highaddr */
3097 NULL, NULL, /* filter, filterarg */
2882 MCLBYTES, /* maxsize */
2883 1, /* nsegments */
2884 MCLBYTES, /* maxsegsize */
2885 0, /* flags */
2886 NULL, /* lockfunc */
2887 NULL, /* lockfuncarg */
2888 &rxr->rxtag[0]))) {
2889 device_printf(dev, "Unable to create RX Small DMA tag\n");
2890 goto fail;
2891 }
2892
2893 /* Next make the large (4K) tag/map */
2894 if ((error = bus_dma_tag_create(NULL, /* parent */
2895 PAGE_SIZE, 0, /* alignment, bounds */
2896 BUS_SPACE_MAXADDR, /* lowaddr */
2897 BUS_SPACE_MAXADDR, /* highaddr */
2898 NULL, NULL, /* filter, filterarg */
2899 MJUMPAGESIZE, /* maxsize */
2900 1, /* nsegments */
3098 MJUM16BYTES, /* maxsize */
3099 2, /* nsegments */
3100 MJUMPAGESIZE, /* maxsegsize */
3101 0, /* flags */
3102 NULL, /* lockfunc */
3103 NULL, /* lockfuncarg */
2905 &rxr->rxtag[1]))) {
2906 device_printf(dev, "Unable to create RX Large DMA tag\n");
3104 &rxr->rxtag))) {
3105 device_printf(dev, "Unable to create RX DMA tag\n");
3106 goto fail;
3107 }
3108
2910 /* Create the spare maps (used by getbuf) */
2911 error = bus_dmamap_create(rxr->rxtag[0], BUS_DMA_NOWAIT,
2912 &rxr->spare_map[0]);
2913 error = bus_dmamap_create(rxr->rxtag[1], BUS_DMA_NOWAIT,
2914 &rxr->spare_map[1]);
3109 /* Create the spare map (used by getbuf) */
3110 error = bus_dmamap_create(rxr->rxtag, BUS_DMA_NOWAIT,
3111 &rxr->spare_map);
3112 if (error) {
3113 device_printf(dev, "%s: bus_dmamap_create failed: %d\n",
3114 __func__, error);
3115 goto fail;
3116 }
3117
3118 for (i = 0; i < adapter->num_rx_desc; i++, rxbuf++) {
3119 rxbuf = &rxr->rx_buffers[i];
2923 error = bus_dmamap_create(rxr->rxtag[0],
2924 BUS_DMA_NOWAIT, &rxbuf->map[0]);
3120 error = bus_dmamap_create(rxr->rxtag,
3121 BUS_DMA_NOWAIT, &rxbuf->map);
3122 if (error) {
2926 device_printf(dev, "Unable to create Small RX DMA map\n");
3123 device_printf(dev, "Unable to create RX DMA map\n");
3124 goto fail;
3125 }
2929 error = bus_dmamap_create(rxr->rxtag[1],
2930 BUS_DMA_NOWAIT, &rxbuf->map[1]);
2931 if (error) {
2932 device_printf(dev, "Unable to create Large RX DMA map\n");
2933 goto fail;
2934 }
3126 }
3127
3128 return (0);
3129
3130fail:
3131 /* Frees all, but can handle partial completion */
3132 ixgbe_free_receive_structures(adapter);
3133 return (error);
3134}
3135
3136/*********************************************************************
3137 *
3138 * Initialize a receive ring and its buffers.
3139 *
3140 **********************************************************************/
3141static int
3142ixgbe_setup_receive_ring(struct rx_ring *rxr)
3143{
3144 struct adapter *adapter;
3145 device_t dev;
3146 struct ixgbe_rx_buf *rxbuf;
3147 struct lro_ctrl *lro = &rxr->lro;
2957 int j, rsize, s = 0;
3148 int j, rsize;
3149
3150 adapter = rxr->adapter;
3151 dev = adapter->dev;
2961 rsize = roundup2(adapter->num_rx_desc *
2962 sizeof(union ixgbe_adv_rx_desc), 4096);
3152
3153 /* Clear the ring contents */
3154 rsize = roundup2(adapter->num_rx_desc *
3155 sizeof(union ixgbe_adv_rx_desc), DBA_ALIGN);
3156 bzero((void *)rxr->rx_base, rsize);
3157
3158 /*
2967 ** Free current RX buffers: the size buffer
2968 ** that is loaded is indicated by the buffer
2969 ** bigbuf value.
3159 ** Free current RX buffer structs and their mbufs
3160 */
3161 for (int i = 0; i < adapter->num_rx_desc; i++) {
3162 rxbuf = &rxr->rx_buffers[i];
2973 s = rxbuf->bigbuf;
3163 if (rxbuf->m_head != NULL) {
2975 bus_dmamap_sync(rxr->rxtag[s], rxbuf->map[s],
3164 bus_dmamap_sync(rxr->rxtag, rxbuf->map,
3165 BUS_DMASYNC_POSTREAD);
2977 bus_dmamap_unload(rxr->rxtag[s], rxbuf->map[s]);
2978 m_freem(rxbuf->m_head);
3166 bus_dmamap_unload(rxr->rxtag, rxbuf->map);
3167 if (rxbuf->m_head) {
3168 rxbuf->m_head->m_next = rxbuf->m_pack;
3169 m_freem(rxbuf->m_head);
3170 }
3171 rxbuf->m_head = NULL;
3172 rxbuf->m_pack = NULL;
3173 }
3174 }
3175
3176 /* Now refresh the mbufs */
3177 for (j = 0; j < adapter->num_rx_desc; j++) {
2984 if (ixgbe_get_buf(rxr, j) == ENOBUFS) {
3178 if (ixgbe_get_buf(rxr, j, IXGBE_CLEAN_ALL) == ENOBUFS) {
3179 rxr->rx_buffers[j].m_head = NULL;
3180 rxr->rx_buffers[j].m_pack = NULL;
3181 rxr->rx_base[j].read.hdr_addr = 0;
3182 rxr->rx_base[j].read.pkt_addr = 0;
2987 /* If we fail some may have change size */
2988 s = adapter->bigbufs;
3183 goto fail;
3184 }
3185 }
3186
3187 /* Setup our descriptor indices */
3188 rxr->next_to_check = 0;
3189 rxr->last_cleaned = 0;
3190
3191 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
3192 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3193
3194 /* Now set up the LRO interface */
3195 if (ixgbe_enable_lro) {
3196 int err = tcp_lro_init(lro);
3197 if (err) {
3004 device_printf(dev,"LRO Initialization failed!\n");
3198 INIT_DEBUGOUT("LRO Initialization failed!\n");
3199 goto fail;
3200 }
3007 device_printf(dev,"RX LRO Initialized\n");
3201 INIT_DEBUGOUT("RX LRO Initialized\n");
3202 lro->ifp = adapter->ifp;
3203 }
3204
3011
3205 return (0);
3206
3207fail:
3208 /*
3015 * We need to clean up any buffers allocated so far
3016 * 'j' is the failing index, decrement it to get the
3017 * last success.
3209 * We need to clean up any buffers allocated
3210 * so far, 'j' is the failing index.
3211 */
3019 for (--j; j < 0; j--) {
3020 rxbuf = &rxr->rx_buffers[j];
3212 for (int i = 0; i < j; i++) {
3213 rxbuf = &rxr->rx_buffers[i];
3214 if (rxbuf->m_head != NULL) {
3022 bus_dmamap_sync(rxr->rxtag[s], rxbuf->map[s],
3215 bus_dmamap_sync(rxr->rxtag, rxbuf->map,
3216 BUS_DMASYNC_POSTREAD);
3024 bus_dmamap_unload(rxr->rxtag[s], rxbuf->map[s]);
3217 bus_dmamap_unload(rxr->rxtag, rxbuf->map);
3218 m_freem(rxbuf->m_head);
3219 rxbuf->m_head = NULL;
3220 }
3221 }
3222 return (ENOBUFS);
3223}
3224
3225/*********************************************************************
3226 *
3227 * Initialize all receive rings.
3228 *
3229 **********************************************************************/
3230static int
3231ixgbe_setup_receive_structures(struct adapter *adapter)
3232{
3233 struct rx_ring *rxr = adapter->rx_rings;
3041 int i, j, s;
3234 int j;
3235
3043 for (i = 0; i < adapter->num_rx_queues; i++, rxr++)
3236 for (j = 0; j < adapter->num_rx_queues; j++, rxr++)
3237 if (ixgbe_setup_receive_ring(rxr))
3238 goto fail;
3239
3240 return (0);
3241fail:
3242 /*
3243 * Free RX buffers allocated so far, we will only handle
3244 * the rings that completed, the failing case will have
3052 * cleaned up for itself. The value of 'i' will be the
3053 * failed ring so we must pre-decrement it.
3245 * cleaned up for itself. 'j' failed, so its the terminus.
3246 */
3055 rxr = adapter->rx_rings;
3056 for (--i; i > 0; i--, rxr++) {
3057 for (j = 0; j < adapter->num_rx_desc; j++) {
3247 for (int i = 0; i < j; ++i) {
3248 rxr = &adapter->rx_rings[i];
3249 for (int n = 0; n < adapter->num_rx_desc; n++) {
3250 struct ixgbe_rx_buf *rxbuf;
3059 rxbuf = &rxr->rx_buffers[j];
3060 s = rxbuf->bigbuf;
3251 rxbuf = &rxr->rx_buffers[n];
3252 if (rxbuf->m_head != NULL) {
3062 bus_dmamap_sync(rxr->rxtag[s], rxbuf->map[s],
3253 bus_dmamap_sync(rxr->rxtag, rxbuf->map,
3254 BUS_DMASYNC_POSTREAD);
3064 bus_dmamap_unload(rxr->rxtag[s], rxbuf->map[s]);
3255 bus_dmamap_unload(rxr->rxtag, rxbuf->map);
3256 m_freem(rxbuf->m_head);
3257 rxbuf->m_head = NULL;
3258 }
3259 }
3260 }
3261
3262 return (ENOBUFS);
3263}
3264
3265/*********************************************************************
3266 *
3076 * Enable receive unit.
3267 * Setup receive registers and features.
3268 *
3269 **********************************************************************/
3270#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
3271
3272static void
3273ixgbe_initialize_receive_units(struct adapter *adapter)
3274{
3275 struct rx_ring *rxr = adapter->rx_rings;
3276 struct ixgbe_hw *hw = &adapter->hw;
3277 struct ifnet *ifp = adapter->ifp;
3278 u32 rxctrl, fctrl, srrctl, rxcsum;
3085 u32 mrqc, hlreg, linkvec;
3086 u32 random[10];
3087 int i,j;
3088 union {
3089 u8 c[128];
3090 u32 i[32];
3091 } reta;
3279 u32 reta, mrqc = 0, hlreg, random[10];
3280
3281
3282 /*
3283 * Make sure receives are disabled while
3284 * setting up the descriptor ring
3285 */
3098 rxctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXCTRL);
3099 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL,
3286 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
3287 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL,
3288 rxctrl & ~IXGBE_RXCTRL_RXEN);
3289
3290 /* Enable broadcasts */
3103 fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
3291 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
3292 fctrl |= IXGBE_FCTRL_BAM;
3105 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
3293 fctrl |= IXGBE_FCTRL_DPF;
3294 fctrl |= IXGBE_FCTRL_PMCF;
3295 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
3296
3107 hlreg = IXGBE_READ_REG(&adapter->hw, IXGBE_HLREG0);
3108 if (ifp->if_mtu > ETHERMTU)
3109 hlreg |= IXGBE_HLREG0_JUMBOEN;
3110 else
3111 hlreg &= ~IXGBE_HLREG0_JUMBOEN;
3112 IXGBE_WRITE_REG(&adapter->hw, IXGBE_HLREG0, hlreg);
3113
3114 srrctl = IXGBE_READ_REG(&adapter->hw, IXGBE_SRRCTL(0));
3297 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(0));
3298 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
3299 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
3117 if (adapter->bigbufs)
3300
3301 hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
3302 /* Set for Jumbo Frames? */
3303 if (ifp->if_mtu > ETHERMTU) {
3304 hlreg |= IXGBE_HLREG0_JUMBOEN;
3305 srrctl |= 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
3119 else
3306 } else {
3307 hlreg &= ~IXGBE_HLREG0_JUMBOEN;
3308 srrctl |= 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
3121 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
3122 IXGBE_WRITE_REG(&adapter->hw, IXGBE_SRRCTL(0), srrctl);
3309 }
3310 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
3311
3124 /* Set Queue moderation rate */
3125 for (i = 0; i < IXGBE_MSGS; i++)
3126 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(i), DEFAULT_ITR);
3312 if (ixgbe_rx_hdr_split) {
3313 /* Use a standard mbuf for the header */
3314 srrctl |= ((IXGBE_RX_HDR << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT)
3315 & IXGBE_SRRCTL_BSIZEHDR_MASK);
3316 srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
3317 } else
3318 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
3319
3128 /* Set Link moderation lower */
3129 linkvec = adapter->num_tx_queues + adapter->num_rx_queues;
3130 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(linkvec), LINK_ITR);
3320 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(0), srrctl);
3321
3322 for (int i = 0; i < adapter->num_rx_queues; i++, rxr++) {
3323 u64 rdba = rxr->rxdma.dma_paddr;
3324 /* Setup the Base and Length of the Rx Descriptor Ring */
3135 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDBAL(i),
3325 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(i),
3326 (rdba & 0x00000000ffffffffULL));
3137 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDBAH(i), (rdba >> 32));
3138 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDLEN(i),
3327 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(i), (rdba >> 32));
3328 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(i),
3329 adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
3330
3331 /* Setup the HW Rx Head and Tail Descriptor Pointers */
3142 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDH(i), 0);
3143 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDT(i),
3144 adapter->num_rx_desc - 1);
3332 IXGBE_WRITE_REG(hw, IXGBE_RDH(i), 0);
3333 IXGBE_WRITE_REG(hw, IXGBE_RDT(i), 0);
3334 }
3335
3147 rxcsum = IXGBE_READ_REG(&adapter->hw, IXGBE_RXCSUM);
3336 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
3337
3338 /* Setup RSS */
3339 if (adapter->num_rx_queues > 1) {
3340 int i, j;
3341 reta = 0;
3342
3343 /* set up random bits */
3344 arc4rand(&random, sizeof(random), 0);
3345
3153 /* Create reta data */
3154 for (i = 0; i < 128; )
3155 for (j = 0; j < adapter->num_rx_queues &&
3156 i < 128; j++, i++)
3157 reta.c[i] = j;
3158
3346 /* Set up the redirection table */
3160 for (i = 0; i < 32; i++)
3161 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RETA(i), reta.i[i]);
3347 for (i = 0, j = 0; i < 128; i++, j++) {
3348 if (j == adapter->num_rx_queues) j = 0;
3349 reta = (reta << 8) | (j * 0x11);
3350 if ((i & 3) == 3)
3351 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
3352 }
3353
3354 /* Now fill our hash function seeds */
3355 for (int i = 0; i < 10; i++)
3165 IXGBE_WRITE_REG_ARRAY(&adapter->hw,
3166 IXGBE_RSSRK(0), i, random[i]);
3356 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), random[i]);
3357
3168 mrqc = IXGBE_MRQC_RSSEN
3169 /* Perform hash on these packet types */
3170 | IXGBE_MRQC_RSS_FIELD_IPV4
3171 | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
3172 | IXGBE_MRQC_RSS_FIELD_IPV4_UDP
3173 | IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP
3174 | IXGBE_MRQC_RSS_FIELD_IPV6_EX
3175 | IXGBE_MRQC_RSS_FIELD_IPV6
3176 | IXGBE_MRQC_RSS_FIELD_IPV6_TCP
3177 | IXGBE_MRQC_RSS_FIELD_IPV6_UDP
3178 | IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
3179 IXGBE_WRITE_REG(&adapter->hw, IXGBE_MRQC, mrqc);
3358 /* Perform hash on these packet types */
3359 mrqc |= IXGBE_MRQC_RSSEN
3360 | IXGBE_MRQC_RSS_FIELD_IPV4
3361 | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
3362 | IXGBE_MRQC_RSS_FIELD_IPV4_UDP
3363 | IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP
3364 | IXGBE_MRQC_RSS_FIELD_IPV6_EX
3365 | IXGBE_MRQC_RSS_FIELD_IPV6
3366 | IXGBE_MRQC_RSS_FIELD_IPV6_TCP
3367 | IXGBE_MRQC_RSS_FIELD_IPV6_UDP
3368 | IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
3369 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
3370
3371 /* RSS and RX IPP Checksum are mutually exclusive */
3372 rxcsum |= IXGBE_RXCSUM_PCSD;
3373 }
3374
3375 if (ifp->if_capenable & IFCAP_RXCSUM)
3376 rxcsum |= IXGBE_RXCSUM_PCSD;
3377
3378 if (!(rxcsum & IXGBE_RXCSUM_PCSD))
3379 rxcsum |= IXGBE_RXCSUM_IPPCSE;
3380
3191 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCSUM, rxcsum);
3381 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
3382
3193 /* Enable Receive engine */
3194 rxctrl |= (IXGBE_RXCTRL_RXEN | IXGBE_RXCTRL_DMBYPS);
3195 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL, rxctrl);
3196
3383 return;
3384}
3385
3386/*********************************************************************
3387 *
3388 * Free all receive rings.
3389 *
3390 **********************************************************************/
3391static void
3392ixgbe_free_receive_structures(struct adapter *adapter)
3393{
3394 struct rx_ring *rxr = adapter->rx_rings;
3395
3396 for (int i = 0; i < adapter->num_rx_queues; i++, rxr++) {
3397 struct lro_ctrl *lro = &rxr->lro;
3398 ixgbe_free_receive_buffers(rxr);
3399 /* Free LRO memory */
3400 tcp_lro_free(lro);
3401 /* Free the ring memory as well */
3402 ixgbe_dma_free(adapter, &rxr->rxdma);
3403 }
3404
3405 free(adapter->rx_rings, M_DEVBUF);
3406}
3407
3408/*********************************************************************
3409 *
3410 * Free receive ring data structures
3411 *
3412 **********************************************************************/
3413void
3414ixgbe_free_receive_buffers(struct rx_ring *rxr)
3415{
3416 struct adapter *adapter = NULL;
3417 struct ixgbe_rx_buf *rxbuf = NULL;
3418
3419 INIT_DEBUGOUT("free_receive_buffers: begin");
3420 adapter = rxr->adapter;
3421 if (rxr->rx_buffers != NULL) {
3422 rxbuf = &rxr->rx_buffers[0];
3423 for (int i = 0; i < adapter->num_rx_desc; i++) {
3238 int s = rxbuf->bigbuf;
3424 if (rxbuf->map != NULL) {
3240 bus_dmamap_unload(rxr->rxtag[s], rxbuf->map[s]);
3241 bus_dmamap_destroy(rxr->rxtag[s], rxbuf->map[s]);
3425 bus_dmamap_sync(rxr->rxtag, rxbuf->map,
3426 BUS_DMASYNC_POSTREAD);
3427 bus_dmamap_unload(rxr->rxtag, rxbuf->map);
3428 bus_dmamap_destroy(rxr->rxtag, rxbuf->map);
3429 }
3430 if (rxbuf->m_head != NULL) {
3431 m_freem(rxbuf->m_head);
3432 }
3433 rxbuf->m_head = NULL;
3434 ++rxbuf;
3435 }
3436 }
3437 if (rxr->rx_buffers != NULL) {
3438 free(rxr->rx_buffers, M_DEVBUF);
3439 rxr->rx_buffers = NULL;
3440 }
3254 for (int s = 0; s < 2; s++) {
3255 if (rxr->rxtag[s] != NULL) {
3256 bus_dma_tag_destroy(rxr->rxtag[s]);
3257 rxr->rxtag[s] = NULL;
3258 }
3441 if (rxr->rxtag != NULL) {
3442 bus_dma_tag_destroy(rxr->rxtag);
3443 rxr->rxtag = NULL;
3444 }
3445 return;
3446}
3447
3448/*********************************************************************
3449 *
3450 * This routine executes in interrupt context. It replenishes
3451 * the mbufs in the descriptor and sends data which has been
3452 * dma'ed into host memory to upper layer.
3453 *
3454 * We loop at most count times if count is > 0, or until done if
3455 * count < 0.
3456 *
3457 * Return TRUE for more work, FALSE for all clean.
3458 *********************************************************************/
3459static bool
3460ixgbe_rxeof(struct rx_ring *rxr, int count)
3461{
3462 struct adapter *adapter = rxr->adapter;
3463 struct ifnet *ifp = adapter->ifp;
3464 struct lro_ctrl *lro = &rxr->lro;
3465 struct lro_entry *queued;
3280 struct mbuf *mp;
3281 int len, i, eop = 0;
3282 u8 accept_frame = 0;
3283 u32 staterr;
3466 int i;
3467 u32 staterr;
3468 union ixgbe_adv_rx_desc *cur;
3469
3470
3471 IXGBE_RX_LOCK(rxr);
3472 i = rxr->next_to_check;
3473 cur = &rxr->rx_base[i];
3474 staterr = cur->wb.upper.status_error;
3475
3476 if (!(staterr & IXGBE_RXD_STAT_DD)) {
3477 IXGBE_RX_UNLOCK(rxr);
3478 return FALSE;
3479 }
3480
3481 /* Sync the ring */
3482 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
3483 BUS_DMASYNC_POSTREAD);
3484
3485 while ((staterr & IXGBE_RXD_STAT_DD) && (count != 0) &&
3486 (ifp->if_drv_flags & IFF_DRV_RUNNING)) {
3299 struct mbuf *m = NULL;
3300 int s;
3487 struct mbuf *sendmp, *mh, *mp;
3488 u16 hlen, plen, hdr;
3489 u8 dopayload, accept_frame, eop;
3490
3302 mp = rxr->rx_buffers[i].m_head;
3303 s = rxr->rx_buffers[i].bigbuf;
3304 bus_dmamap_sync(rxr->rxtag[s], rxr->rx_buffers[i].map[s],
3305 BUS_DMASYNC_POSTREAD);
3491
3492 accept_frame = 1;
3493 hlen = plen = 0;
3494 sendmp = mh = mp = NULL;
3495
3496 /* Sync the buffers */
3497 bus_dmamap_sync(rxr->rxtag, rxr->rx_buffers[i].map,
3498 BUS_DMASYNC_POSTREAD);
3499
3500 /*
3501 ** The way the hardware is configured to
3502 ** split, it will ONLY use the header buffer
3503 ** when header split is enabled, otherwise we
3504 ** get normal behavior, ie, both header and
3505 ** payload are DMA'd into the payload buffer.
3506 **
3507 ** The fmp test is to catch the case where a
3508 ** packet spans multiple descriptors, in that
3509 ** case only the first header is valid.
3510 */
3511 if ((ixgbe_rx_hdr_split) && (rxr->fmp == NULL)){
3512 hdr = le16toh(cur->
3513 wb.lower.lo_dword.hs_rss.hdr_info);
3514 hlen = (hdr & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
3515 IXGBE_RXDADV_HDRBUFLEN_SHIFT;
3516 if (hlen > IXGBE_RX_HDR)
3517 hlen = IXGBE_RX_HDR;
3518 plen = le16toh(cur->wb.upper.length);
3519 /* Handle the header mbuf */
3520 mh = rxr->rx_buffers[i].m_head;
3521 mh->m_len = hlen;
3522 dopayload = IXGBE_CLEAN_HDR;
3523 /*
3524 ** Get the payload length, this
3525 ** could be zero if its a small
3526 ** packet.
3527 */
3528 if (plen) {
3529 mp = rxr->rx_buffers[i].m_pack;
3530 mp->m_len = plen;
3531 mp->m_next = NULL;
3532 mp->m_flags &= ~M_PKTHDR;
3533 mh->m_next = mp;
3534 mh->m_flags |= M_PKTHDR;
3535 dopayload = IXGBE_CLEAN_ALL;
3536 rxr->rx_split_packets++;
3537 } else { /* small packets */
3538 mh->m_flags &= ~M_PKTHDR;
3539 mh->m_next = NULL;
3540 }
3541 } else {
3542 /*
3543 ** Either no header split, or a
3544 ** secondary piece of a fragmented
3545 ** split packet.
3546 */
3547 mh = rxr->rx_buffers[i].m_pack;
3548 mh->m_flags |= M_PKTHDR;
3549 mh->m_len = le16toh(cur->wb.upper.length);
3550 dopayload = IXGBE_CLEAN_PKT;
3551 }
3552
3553 if (staterr & IXGBE_RXD_STAT_EOP) {
3554 count--;
3555 eop = 1;
3310 } else {
3556 } else
3557 eop = 0;
3312 }
3313 len = cur->wb.upper.length;
3558
3559 if (staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK)
3560 accept_frame = 0;
3561
3562 if (accept_frame) {
3319 /* Get a fresh buffer first */
3320 if (ixgbe_get_buf(rxr, i) != 0) {
3563 if (ixgbe_get_buf(rxr, i, dopayload) != 0) {
3564 ifp->if_iqdrops++;
3565 goto discard;
3566 }
3324
3325 /* Assign correct length to the current fragment */
3326 mp->m_len = len;
3327
3567 /* Initial frame - setup */
3568 if (rxr->fmp == NULL) {
3329 mp->m_pkthdr.len = len;
3330 rxr->fmp = mp; /* Store the first mbuf */
3331 rxr->lmp = mp;
3569 mh->m_flags |= M_PKTHDR;
3570 mh->m_pkthdr.len = mh->m_len;
3571 rxr->fmp = mh; /* Store the first mbuf */
3572 rxr->lmp = mh;
3573 if (mp) { /* Add payload if split */
3574 mh->m_pkthdr.len += mp->m_len;
3575 rxr->lmp = mh->m_next;
3576 }
3577 } else {
3578 /* Chain mbuf's together */
3334 mp->m_flags &= ~M_PKTHDR;
3335 rxr->lmp->m_next = mp;
3579 mh->m_flags &= ~M_PKTHDR;
3580 rxr->lmp->m_next = mh;
3581 rxr->lmp = rxr->lmp->m_next;
3337 rxr->fmp->m_pkthdr.len += len;
3582 rxr->fmp->m_pkthdr.len += mh->m_len;
3583 }
3584
3585 if (eop) {
3586 rxr->fmp->m_pkthdr.rcvif = ifp;
3587 ifp->if_ipackets++;
3343 rxr->packet_count++;
3344 rxr->byte_count += rxr->fmp->m_pkthdr.len;
3345
3346 ixgbe_rx_checksum(adapter,
3347 staterr, rxr->fmp);
3348
3588 rxr->rx_packets++;
3589 /* capture data for AIM */
3590 rxr->bytes += rxr->fmp->m_pkthdr.len;
3591 rxr->rx_bytes += rxr->bytes;
3592 if (ifp->if_capenable & IFCAP_RXCSUM)
3593 ixgbe_rx_checksum(staterr, rxr->fmp);
3594 else
3595 rxr->fmp->m_pkthdr.csum_flags = 0;
3596 if (staterr & IXGBE_RXD_STAT_VP) {
3350#if __FreeBSD_version < 700000
3351 VLAN_INPUT_TAG_NEW(ifp, rxr->fmp,
3352 (le16toh(cur->wb.upper.vlan) &
3353 IXGBE_RX_DESC_SPECIAL_VLAN_MASK));
3354#else
3597 rxr->fmp->m_pkthdr.ether_vtag =
3356 le16toh(cur->wb.upper.vlan);
3357 rxr->fmp->m_flags |= M_VLANTAG;
3358#endif
3598 le16toh(cur->wb.upper.vlan);
3599 rxr->fmp->m_flags |= M_VLANTAG;
3600 }
3360 m = rxr->fmp;
3601 sendmp = rxr->fmp;
3602 rxr->fmp = NULL;
3603 rxr->lmp = NULL;
3604 }
3605 } else {
3606 ifp->if_ierrors++;
3607discard:
3608 /* Reuse loaded DMA map and just update mbuf chain */
3368 mp = rxr->rx_buffers[i].m_head;
3369 mp->m_len = mp->m_pkthdr.len =
3370 (rxr->rx_buffers[i].bigbuf ? MJUMPAGESIZE:MCLBYTES);
3609 if (hlen) {
3610 mh = rxr->rx_buffers[i].m_head;
3611 mh->m_len = MHLEN;
3612 mh->m_next = NULL;
3613 }
3614 mp = rxr->rx_buffers[i].m_pack;
3615 mp->m_len = mp->m_pkthdr.len = adapter->rx_mbuf_sz;
3616 mp->m_data = mp->m_ext.ext_buf;
3617 mp->m_next = NULL;
3373 if (adapter->max_frame_size <= (MCLBYTES - ETHER_ALIGN))
3618 if (adapter->max_frame_size <=
3619 (MCLBYTES - ETHER_ALIGN))
3620 m_adj(mp, ETHER_ALIGN);
3621 if (rxr->fmp != NULL) {
3622 /* handles the whole chain */
3623 m_freem(rxr->fmp);
3624 rxr->fmp = NULL;
3625 rxr->lmp = NULL;
3626 }
3380 m = NULL;
3627 sendmp = NULL;
3628 }
3382
3383 /* Zero out the receive descriptors status */
3384 cur->wb.upper.status_error = 0;
3629 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
3630 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3631
3632 rxr->last_cleaned = i; /* for updating tail */
3633
3634 if (++i == adapter->num_rx_desc)
3635 i = 0;
3636
3393 /* Now send up to the stack */
3394 if (m != NULL) {
3395 rxr->next_to_check = i;
3637 /*
3638 ** Now send up to the stack,
3639 ** note the the value of next_to_check
3640 ** is safe because we keep the RX lock
3641 ** thru this call.
3642 */
3643 if (sendmp != NULL) {
3644 /* Use LRO if possible */
3397 if ((!lro->lro_cnt) || (tcp_lro_rx(lro, m, 0))) {
3398 IXGBE_RX_UNLOCK(rxr);
3399 (*ifp->if_input)(ifp, m);
3400 IXGBE_RX_LOCK(rxr);
3401 i = rxr->next_to_check;
3402 }
3645 if ((!lro->lro_cnt) || (tcp_lro_rx(lro, sendmp, 0)))
3646 (*ifp->if_input)(ifp, sendmp);
3647 }
3648
3649 /* Get next descriptor */
3650 cur = &rxr->rx_base[i];
3651 staterr = cur->wb.upper.status_error;
3652 }
3653 rxr->next_to_check = i;
3654
3655 /* Advance the IXGB's Receive Queue "Tail Pointer" */
3656 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDT(rxr->me), rxr->last_cleaned);
3412 IXGBE_RX_UNLOCK(rxr);
3657
3658 /*
3415 ** Flush any outstanding LRO work
3416 ** this may call into the stack and
3417 ** must not hold a driver lock.
3418 */
3419 while(!SLIST_EMPTY(&lro->lro_active)) {
3659 * Flush any outstanding LRO work
3660 */
3661 while (!SLIST_EMPTY(&lro->lro_active)) {
3662 queued = SLIST_FIRST(&lro->lro_active);
3663 SLIST_REMOVE_HEAD(&lro->lro_active, next);
3664 tcp_lro_flush(lro, queued);
3665 }
3666
3425 if (!(staterr & IXGBE_RXD_STAT_DD))
3426 return FALSE;
3667 IXGBE_RX_UNLOCK(rxr);
3668
3428 return TRUE;
3669 /*
3670 ** Leaving with more to clean?
3671 ** then schedule another interrupt.
3672 */
3673 if (staterr & IXGBE_RXD_STAT_DD) {
3674 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, rxr->eims);
3675 return TRUE;
3676 }
3677
3678 return FALSE;
3679}
3680
3681/*********************************************************************
3682 *
3683 * Verify that the hardware indicated that the checksum is valid.
3684 * Inform the stack about the status of checksum so that stack
3685 * doesn't spend time verifying the checksum.
3686 *
3687 *********************************************************************/
3688static void
3439ixgbe_rx_checksum(struct adapter *adapter,
3440 u32 staterr, struct mbuf * mp)
3689ixgbe_rx_checksum(u32 staterr, struct mbuf * mp)
3690{
3442 struct ifnet *ifp = adapter->ifp;
3691 u16 status = (u16) staterr;
3692 u8 errors = (u8) (staterr >> 24);
3693
3446 /* Not offloading */
3447 if ((ifp->if_capenable & IFCAP_RXCSUM) == 0) {
3448 mp->m_pkthdr.csum_flags = 0;
3449 return;
3450 }
3451
3694 if (status & IXGBE_RXD_STAT_IPCS) {
3695 /* Did it pass? */
3696 if (!(errors & IXGBE_RXD_ERR_IPE)) {
3697 /* IP Checksum Good */
3698 mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
3699 mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
3700
3701 } else
3702 mp->m_pkthdr.csum_flags = 0;
3703 }
3704 if (status & IXGBE_RXD_STAT_L4CS) {
3705 /* Did it pass? */
3706 if (!(errors & IXGBE_RXD_ERR_TCPE)) {
3707 mp->m_pkthdr.csum_flags |=
3708 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
3709 mp->m_pkthdr.csum_data = htons(0xffff);
3710 }
3711 }
3712 return;
3713}
3714
3473#ifdef IXGBE_VLAN_EVENTS
3715
3716#ifdef IXGBE_HW_VLAN_SUPPORT
3717/*
3718 * This routine is run via an vlan
3719 * config EVENT
3720 */
3721static void
3722ixgbe_register_vlan(void *unused, struct ifnet *ifp, u16 vtag)
3723{
3724 struct adapter *adapter = ifp->if_softc;
3482 u32 ctrl;
3725 u32 ctrl, rctl, index, vfta;
3726
3727 ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_VLNCTRL);
3728 ctrl |= IXGBE_VLNCTRL_VME | IXGBE_VLNCTRL_VFE;
3729 ctrl &= ~IXGBE_VLNCTRL_CFIEN;
3730 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VLNCTRL, ctrl);
3731
3732 /* Make entry in the hardware filter table */
3733 ixgbe_set_vfta(&adapter->hw, vtag, 0, TRUE);
3734}
3735
3736/*
3737 * This routine is run via an vlan
3738 * unconfig EVENT
3739 */
3740static void
3741ixgbe_unregister_vlan(void *unused, struct ifnet *ifp, u16 vtag)
3742{
3743 struct adapter *adapter = ifp->if_softc;
3744 u32 index, vfta;
3745
3746 /* Remove entry in the hardware filter table */
3747 ixgbe_set_vfta(&adapter->hw, vtag, 0, FALSE);
3748
3749 /* Have all vlans unregistered? */
3750 if (adapter->ifp->if_vlantrunk == NULL) {
3751 u32 ctrl;
3752 /* Turn off the filter table */
3753 ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_VLNCTRL);
3754 ctrl &= ~IXGBE_VLNCTRL_VME;
3755 ctrl &= ~IXGBE_VLNCTRL_VFE;
3756 ctrl |= IXGBE_VLNCTRL_CFIEN;
3757 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VLNCTRL, ctrl);
3758 }
3759}
3516#endif /* IXGBE_VLAN_EVENTS */
3760#endif
3761
3762static void
3763ixgbe_enable_intr(struct adapter *adapter)
3764{
3765 struct ixgbe_hw *hw = &adapter->hw;
3766 u32 mask = IXGBE_EIMS_ENABLE_MASK;
3767
3768 /* Enable Fan Failure detection */
3769 if (hw->phy.media_type == ixgbe_media_type_copper)
3770 mask |= IXGBE_EIMS_GPI_SDP1;
3771
3772 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
3773
3774 /* With RSS we use auto clear */
3775 if (adapter->msix_mem) {
3776 /* Dont autoclear Link */
3777 mask &= ~IXGBE_EIMS_OTHER;
3778 mask &= ~IXGBE_EIMS_LSC;
3532 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC,
3533 adapter->eims_mask | mask);
3779 IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
3780 }
3781
3536 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
3782 IXGBE_WRITE_FLUSH(hw);
3783
3784 return;
3785}
3786
3787static void
3788ixgbe_disable_intr(struct adapter *adapter)
3789{
3790 if (adapter->msix_mem)
3791 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 0);
3792 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
3793 IXGBE_WRITE_FLUSH(&adapter->hw);
3794 return;
3795}
3796
3797u16
3798ixgbe_read_pci_cfg(struct ixgbe_hw *hw, u32 reg)
3799{
3800 u16 value;
3801
3802 value = pci_read_config(((struct ixgbe_osdep *)hw->back)->dev,
3803 reg, 2);
3804
3805 return (value);
3806}
3807
3808/*
3809** Setup the correct IVAR register for a particular MSIX interrupt
3810** (yes this is all very magic and confusing :)
3811** - entry is the register array entry
3812** - vector is the MSIX vector for this queue
3813** - type is RX/TX/MISC
3814*/
3815static void
3564ixgbe_set_ivar(struct adapter *adapter, u16 entry, u8 vector)
3816ixgbe_set_ivar(struct adapter *adapter, u16 entry, u8 vector, s8 type)
3817{
3818 struct ixgbe_hw *hw = &adapter->hw;
3819 u32 ivar, index;
3820
3821 vector |= IXGBE_IVAR_ALLOC_VAL;
3569 index = (entry >> 2) & 0x1F;
3570 ivar = IXGBE_READ_REG(&adapter->hw, IXGBE_IVAR(index));
3571 ivar &= ~(0xFF << (8 * (entry & 0x3)));
3572 ivar |= (vector << (8 * (entry & 0x3)));
3573 IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
3822
3823 switch (hw->mac.type) {
3824
3825 case ixgbe_mac_82598EB:
3826 if (type == -1)
3827 entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
3828 else
3829 entry += (type * 64);
3830 index = (entry >> 2) & 0x1F;
3831 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
3832 ivar &= ~(0xFF << (8 * (entry & 0x3)));
3833 ivar |= (vector << (8 * (entry & 0x3)));
3834 IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
3835 break;
3836
3837 default:
3838 break;
3839 }
3840}
3841
3842static void
3843ixgbe_configure_ivars(struct adapter *adapter)
3844{
3845 struct tx_ring *txr = adapter->tx_rings;
3846 struct rx_ring *rxr = adapter->rx_rings;
3847
3582 for (int i = 0; i < adapter->num_rx_queues; i++, rxr++) {
3583 ixgbe_set_ivar(adapter, IXGBE_IVAR_RX_QUEUE(i), rxr->msix);
3584 adapter->eims_mask |= rxr->eims;
3585 }
3848 for (int i = 0; i < adapter->num_rx_queues; i++, rxr++)
3849 ixgbe_set_ivar(adapter, i, rxr->msix, 0);
3850
3587 for (int i = 0; i < adapter->num_tx_queues; i++, txr++) {
3588 ixgbe_set_ivar(adapter, IXGBE_IVAR_TX_QUEUE(i), txr->msix);
3589 adapter->eims_mask |= txr->eims;
3590 }
3851 for (int i = 0; i < adapter->num_tx_queues; i++, txr++)
3852 ixgbe_set_ivar(adapter, i, txr->msix, 1);
3853
3854 /* For the Link interrupt */
3593 ixgbe_set_ivar(adapter, IXGBE_IVAR_OTHER_CAUSES_INDEX,
3594 adapter->linkvec);
3595 adapter->eims_mask |= IXGBE_IVAR_OTHER_CAUSES_INDEX;
3855 ixgbe_set_ivar(adapter, 1, adapter->linkvec, -1);
3856}
3857
3858/*
3859** ixgbe_sfp_probe - called in the local timer to
3860** determine if a port had optics inserted.
3861*/
3862static bool ixgbe_sfp_probe(struct adapter *adapter)
3863{
3864 struct ixgbe_hw *hw = &adapter->hw;
3865 device_t dev = adapter->dev;
3866 bool result = FALSE;
3867
3868 if ((hw->phy.type == ixgbe_phy_nl) &&
3869 (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
3870 s32 ret = hw->phy.ops.identify_sfp(hw);
3871 if (ret)
3872 goto out;
3873 ret = hw->phy.ops.reset(hw);
3874 if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3875 device_printf(dev,"Unsupported SFP+ module detected!");
3876 printf(" Reload driver with supported module.\n");
3877 adapter->sfp_probe = FALSE;
3878 goto out;
3879 } else
3880 device_printf(dev,"SFP+ module detected!\n");
3881 /* We now have supported optics */
3882 adapter->sfp_probe = FALSE;
3883 result = TRUE;
3884 }
3885out:
3886 return (result);
3887}
3888
3889
3890/**********************************************************************
3891 *
3892 * Update the board statistics counters.
3893 *
3894 **********************************************************************/
3895static void
3896ixgbe_update_stats_counters(struct adapter *adapter)
3897{
3898 struct ifnet *ifp = adapter->ifp;;
3899 struct ixgbe_hw *hw = &adapter->hw;
3900 u32 missed_rx = 0, bprc, lxon, lxoff, total;
3901
3902 adapter->stats.crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
3903
3904 for (int i = 0; i < 8; i++) {
3905 int mp;
3906 mp = IXGBE_READ_REG(hw, IXGBE_MPC(i));
3907 missed_rx += mp;
3908 adapter->stats.mpc[i] += mp;
3909 adapter->stats.rnbc[i] += IXGBE_READ_REG(hw, IXGBE_RNBC(i));
3910 }
3911
3912 /* Hardware workaround, gprc counts missed packets */
3913 adapter->stats.gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
3914 adapter->stats.gprc -= missed_rx;
3915
3916 adapter->stats.gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
3917 adapter->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
3918 adapter->stats.tor += IXGBE_READ_REG(hw, IXGBE_TORH);
3919
3920 /*
3921 * Workaround: mprc hardware is incorrectly counting
3922 * broadcasts, so for now we subtract those.
3923 */
3924 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
3925 adapter->stats.bprc += bprc;
3926 adapter->stats.mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
3927 adapter->stats.mprc -= bprc;
3928
3929 adapter->stats.roc += IXGBE_READ_REG(hw, IXGBE_ROC);
3930 adapter->stats.prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
3931 adapter->stats.prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
3932 adapter->stats.prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
3933 adapter->stats.prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
3934 adapter->stats.prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
3935 adapter->stats.prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
3936 adapter->stats.rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
3937
3646 adapter->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
3647 adapter->stats.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
3648
3938 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
3939 adapter->stats.lxontxc += lxon;
3940 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
3941 adapter->stats.lxofftxc += lxoff;
3942 total = lxon + lxoff;
3943
3944 adapter->stats.gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
3945 adapter->stats.mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
3946 adapter->stats.ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
3947 adapter->stats.gptc -= total;
3948 adapter->stats.mptc -= total;
3949 adapter->stats.ptc64 -= total;
3950 adapter->stats.gotc -= total * ETHER_MIN_LEN;
3951
3952 adapter->stats.ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
3953 adapter->stats.rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
3954 adapter->stats.rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
3955 adapter->stats.tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
3956 adapter->stats.ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
3957 adapter->stats.ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
3958 adapter->stats.ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
3959 adapter->stats.ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
3960 adapter->stats.ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
3961 adapter->stats.bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
3962
3963
3964 /* Fill out the OS statistics structure */
3965 ifp->if_ipackets = adapter->stats.gprc;
3966 ifp->if_opackets = adapter->stats.gptc;
3967 ifp->if_ibytes = adapter->stats.gorc;
3968 ifp->if_obytes = adapter->stats.gotc;
3969 ifp->if_imcasts = adapter->stats.mprc;
3970 ifp->if_collisions = 0;
3971
3972 /* Rx Errors */
3973 ifp->if_ierrors = missed_rx + adapter->stats.crcerrs +
3974 adapter->stats.rlec;
3975}
3976
3977
3978/**********************************************************************
3979 *
3980 * This routine is called only when ixgbe_display_debug_stats is enabled.
3981 * This routine provides a way to take a look at important statistics
3982 * maintained by the driver and hardware.
3983 *
3984 **********************************************************************/
3985static void
3986ixgbe_print_hw_stats(struct adapter * adapter)
3987{
3988 device_t dev = adapter->dev;
3989
3990
3991 device_printf(dev,"Std Mbuf Failed = %lu\n",
3703 adapter->mbuf_alloc_failed);
3704 device_printf(dev,"Std Cluster Failed = %lu\n",
3705 adapter->mbuf_cluster_failed);
3706
3992 adapter->mbuf_defrag_failed);
3993 device_printf(dev,"Missed Packets = %llu\n",
3994 (long long)adapter->stats.mpc[0]);
3995 device_printf(dev,"Receive length errors = %llu\n",
3996 ((long long)adapter->stats.roc +
3997 (long long)adapter->stats.ruc));
3998 device_printf(dev,"Crc errors = %llu\n",
3999 (long long)adapter->stats.crcerrs);
4000 device_printf(dev,"Driver dropped packets = %lu\n",
4001 adapter->dropped_pkts);
4002 device_printf(dev, "watchdog timeouts = %ld\n",
4003 adapter->watchdog_events);
4004
4005 device_printf(dev,"XON Rcvd = %llu\n",
4006 (long long)adapter->stats.lxonrxc);
4007 device_printf(dev,"XON Xmtd = %llu\n",
4008 (long long)adapter->stats.lxontxc);
4009 device_printf(dev,"XOFF Rcvd = %llu\n",
4010 (long long)adapter->stats.lxoffrxc);
4011 device_printf(dev,"XOFF Xmtd = %llu\n",
4012 (long long)adapter->stats.lxofftxc);
4013
4014 device_printf(dev,"Total Packets Rcvd = %llu\n",
4015 (long long)adapter->stats.tpr);
4016 device_printf(dev,"Good Packets Rcvd = %llu\n",
4017 (long long)adapter->stats.gprc);
4018 device_printf(dev,"Good Packets Xmtd = %llu\n",
4019 (long long)adapter->stats.gptc);
4020 device_printf(dev,"TSO Transmissions = %lu\n",
4021 adapter->tso_tx);
4022
4023 return;
4024}
4025
4026/**********************************************************************
4027 *
4028 * This routine is called only when em_display_debug_stats is enabled.
4029 * This routine provides a way to take a look at important statistics
4030 * maintained by the driver and hardware.
4031 *
4032 **********************************************************************/
4033static void
4034ixgbe_print_debug_info(struct adapter *adapter)
4035{
4036 device_t dev = adapter->dev;
4037 struct rx_ring *rxr = adapter->rx_rings;
4038 struct tx_ring *txr = adapter->tx_rings;
4039 struct ixgbe_hw *hw = &adapter->hw;
4040
4041 device_printf(dev,"Error Byte Count = %u \n",
4042 IXGBE_READ_REG(hw, IXGBE_ERRBC));
4043
4044 for (int i = 0; i < adapter->num_rx_queues; i++, rxr++) {
4045 struct lro_ctrl *lro = &rxr->lro;
4046 device_printf(dev,"Queue[%d]: rdh = %d, hw rdt = %d\n",
4047 i, IXGBE_READ_REG(hw, IXGBE_RDH(i)),
4048 IXGBE_READ_REG(hw, IXGBE_RDT(i)));
3763 device_printf(dev,"RX(%d) Packets Received: %lu\n",
3764 rxr->me, (long)rxr->packet_count);
4049 device_printf(dev,"RX(%d) Packets Received: %lld\n",
4050 rxr->me, (long long)rxr->rx_packets);
4051 device_printf(dev,"RX(%d) Split RX Packets: %lld\n",
4052 rxr->me, (long long)rxr->rx_split_packets);
4053 device_printf(dev,"RX(%d) Bytes Received: %lu\n",
3766 rxr->me, (long)rxr->byte_count);
4054 rxr->me, (long)rxr->rx_bytes);
4055 device_printf(dev,"RX(%d) IRQ Handled: %lu\n",
4056 rxr->me, (long)rxr->rx_irq);
4057 device_printf(dev,"RX(%d) LRO Queued= %d\n",
4058 rxr->me, lro->lro_queued);
4059 device_printf(dev,"RX(%d) LRO Flushed= %d\n",
4060 rxr->me, lro->lro_flushed);
4061 }
4062
4063 for (int i = 0; i < adapter->num_tx_queues; i++, txr++) {
4064 device_printf(dev,"Queue(%d) tdh = %d, hw tdt = %d\n", i,
4065 IXGBE_READ_REG(hw, IXGBE_TDH(i)),
4066 IXGBE_READ_REG(hw, IXGBE_TDT(i)));
4067 device_printf(dev,"TX(%d) Packets Sent: %lu\n",
3780 txr->me, (long)txr->tx_packets);
4068 txr->me, (long)txr->total_packets);
4069 device_printf(dev,"TX(%d) IRQ Handled: %lu\n",
4070 txr->me, (long)txr->tx_irq);
4071 device_printf(dev,"TX(%d) NO Desc Avail: %lu\n",
4072 txr->me, (long)txr->no_tx_desc_avail);
4073 }
4074
4075 device_printf(dev,"Link IRQ Handled: %lu\n",
4076 (long)adapter->link_irq);
4077 return;
4078}
4079
4080static int
4081ixgbe_sysctl_stats(SYSCTL_HANDLER_ARGS)
4082{
4083 int error;
4084 int result;
4085 struct adapter *adapter;
4086
4087 result = -1;
4088 error = sysctl_handle_int(oidp, &result, 0, req);
4089
4090 if (error || !req->newptr)
4091 return (error);
4092
4093 if (result == 1) {
4094 adapter = (struct adapter *) arg1;
4095 ixgbe_print_hw_stats(adapter);
4096 }
4097 return error;
4098}
4099
4100static int
4101ixgbe_sysctl_debug(SYSCTL_HANDLER_ARGS)
4102{
4103 int error, result;
4104 struct adapter *adapter;
4105
4106 result = -1;
4107 error = sysctl_handle_int(oidp, &result, 0, req);
4108
4109 if (error || !req->newptr)
4110 return (error);
4111
4112 if (result == 1) {
4113 adapter = (struct adapter *) arg1;
4114 ixgbe_print_debug_info(adapter);
4115 }
4116 return error;
4117}
4118
4119/*
4120** Set flow control using sysctl:
4121** Flow control values:
4122** 0 - off
4123** 1 - rx pause
4124** 2 - tx pause
4125** 3 - full
4126*/
4127static int
4128ixgbe_set_flowcntl(SYSCTL_HANDLER_ARGS)
4129{
4130 int error;
4131 struct adapter *adapter;
4132
4133 error = sysctl_handle_int(oidp, &ixgbe_flow_control, 0, req);
4134
4135 if (error)
4136 return (error);
4137
4138 adapter = (struct adapter *) arg1;
4139 switch (ixgbe_flow_control) {
4140 case ixgbe_fc_rx_pause:
4141 case ixgbe_fc_tx_pause:
4142 case ixgbe_fc_full:
3855 adapter->hw.fc.type = ixgbe_flow_control;
4143 adapter->hw.fc.requested_mode = ixgbe_flow_control;
4144 break;
4145 case ixgbe_fc_none:
4146 default:
3859 adapter->hw.fc.type = ixgbe_fc_none;
4147 adapter->hw.fc.requested_mode = ixgbe_fc_none;
4148 }
4149
4150 ixgbe_setup_fc(&adapter->hw, 0);
4151 return error;
4152}
4153
4154static void
4155ixgbe_add_rx_process_limit(struct adapter *adapter, const char *name,
4156 const char *description, int *limit, int value)
4157{
4158 *limit = value;
4159 SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev),
4160 SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
4161 OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW, limit, value, description);
4162}
3875
3876#ifndef NO_82598_A0_SUPPORT
3877/*
3878 * A0 Workaround: invert descriptor for hardware
3879 */
3880void
3881desc_flip(void *desc)
3882{
3883 struct dhack {u32 a1; u32 a2; u32 b1; u32 b2;};
3884 struct dhack *d;
3885
3886 d = (struct dhack *)desc;
3887 d->a1 = ~(d->a1);
3888 d->a2 = ~(d->a2);
3889 d->b1 = ~(d->b1);
3890 d->b2 = ~(d->b2);
3891 d->b2 &= 0xFFFFFFF0;
3892 d->b1 &= ~IXGBE_ADVTXD_DCMD_RS;
3893}
3894#endif
3895
3896
3897