Deleted Added
sdiff udiff text old ( 181003 ) new ( 185352 )
full compact
1/******************************************************************************
2
3 Copyright (c) 2001-2008, Intel Corporation
4 All rights reserved.
5
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8

--- 16 unchanged lines hidden (view full) ---

25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
31
32******************************************************************************/
33/*$FreeBSD: head/sys/dev/ixgbe/ixgbe.c 181003 2008-07-30 18:15:18Z jfv $*/
34
35#ifdef HAVE_KERNEL_OPTION_HEADERS
36#include "opt_device_polling.h"
37#endif
38
39/* Undefine this if not using CURRENT */
40#define IXGBE_VLAN_EVENTS
41
42#include "ixgbe.h"
43
44/*********************************************************************
45 * Set this to one to display debug statistics
46 *********************************************************************/
47int ixgbe_display_debug_stats = 0;
48
49/*********************************************************************
50 * Driver version
51 *********************************************************************/
52char ixgbe_driver_version[] = "1.4.7";
53
54/*********************************************************************
55 * PCI Device ID Table
56 *
57 * Used by probe to select devices to load on
58 * Last field stores an index into ixgbe_strings
59 * Last entry must be all 0s
60 *
61 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
62 *********************************************************************/
63
64static ixgbe_vendor_info_t ixgbe_vendor_info_array[] =
65{
66 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, 0, 0, 0},
67 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, 0, 0, 0},
68 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT_DUAL_PORT, 0, 0, 0},
69 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, 0, 0, 0},
70 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, 0, 0, 0},
71 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, 0, 0, 0},
72 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, 0, 0, 0},
73 /* required last entry */
74 {0, 0, 0, 0, 0}
75};
76
77/*********************************************************************
78 * Table of branding strings
79 *********************************************************************/
80

--- 42 unchanged lines hidden (view full) ---

123static void ixgbe_free_receive_structures(struct adapter *);
124static void ixgbe_free_receive_buffers(struct rx_ring *);
125
126static void ixgbe_enable_intr(struct adapter *);
127static void ixgbe_disable_intr(struct adapter *);
128static void ixgbe_update_stats_counters(struct adapter *);
129static bool ixgbe_txeof(struct tx_ring *);
130static bool ixgbe_rxeof(struct rx_ring *, int);
131static void ixgbe_rx_checksum(struct adapter *, u32, struct mbuf *);
132static void ixgbe_set_promisc(struct adapter *);
133static void ixgbe_disable_promisc(struct adapter *);
134static void ixgbe_set_multi(struct adapter *);
135static void ixgbe_print_hw_stats(struct adapter *);
136static void ixgbe_print_debug_info(struct adapter *);
137static void ixgbe_update_link_status(struct adapter *);
138static int ixgbe_get_buf(struct rx_ring *, int);
139static int ixgbe_xmit(struct tx_ring *, struct mbuf **);
140static int ixgbe_sysctl_stats(SYSCTL_HANDLER_ARGS);
141static int ixgbe_sysctl_debug(SYSCTL_HANDLER_ARGS);
142static int ixgbe_set_flowcntl(SYSCTL_HANDLER_ARGS);
143static int ixgbe_dma_malloc(struct adapter *, bus_size_t,
144 struct ixgbe_dma_alloc *, int);
145static void ixgbe_dma_free(struct adapter *, struct ixgbe_dma_alloc *);
146static void ixgbe_add_rx_process_limit(struct adapter *, const char *,
147 const char *, int *, int);
148static boolean_t ixgbe_tx_ctx_setup(struct tx_ring *, struct mbuf *);
149static boolean_t ixgbe_tso_setup(struct tx_ring *, struct mbuf *, u32 *);
150static void ixgbe_set_ivar(struct adapter *, u16, u8);
151static void ixgbe_configure_ivars(struct adapter *);
152static u8 * ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
153
154#ifdef IXGBE_VLAN_EVENTS
155static void ixgbe_register_vlan(void *, struct ifnet *, u16);
156static void ixgbe_unregister_vlan(void *, struct ifnet *, u16);
157#endif
158
159/* Legacy (single vector interrupt handler */
160static void ixgbe_legacy_irq(void *);
161
162/* The MSI/X Interrupt handlers */
163static void ixgbe_msix_tx(void *);
164static void ixgbe_msix_rx(void *);
165static void ixgbe_msix_link(void *);
166
167/* Legacy interrupts use deferred handlers */
168static void ixgbe_handle_tx(void *context, int pending);
169static void ixgbe_handle_rx(void *context, int pending);
170
171#ifndef NO_82598_A0_SUPPORT
172static void desc_flip(void *);
173#endif
174
175/*********************************************************************
176 * FreeBSD Device Interface Entry Points
177 *********************************************************************/
178
179static device_method_t ixgbe_methods[] = {
180 /* Device interface */
181 DEVMETHOD(device_probe, ixgbe_probe),

--- 12 unchanged lines hidden (view full) ---

194
195MODULE_DEPEND(ixgbe, pci, 1, 1, 1);
196MODULE_DEPEND(ixgbe, ether, 1, 1, 1);
197
198/*
199** TUNEABLE PARAMETERS:
200*/
201
202/* How many packets rxeof tries to clean at a time */
203static int ixgbe_rx_process_limit = 100;
204TUNABLE_INT("hw.ixgbe.rx_process_limit", &ixgbe_rx_process_limit);
205
206/* Flow control setting, default to full */
207static int ixgbe_flow_control = 3;
208TUNABLE_INT("hw.ixgbe.flow_control", &ixgbe_flow_control);
209
210/*
211 * Should the driver do LRO on the RX end
212 * this can be toggled on the fly, but the
213 * interface must be reset (down/up) for it
214 * to take effect.
215 */
216static int ixgbe_enable_lro = 0;
217TUNABLE_INT("hw.ixgbe.enable_lro", &ixgbe_enable_lro);
218
219/*
220 * MSIX should be the default for best performance,
221 * but this allows it to be forced off for testing.
222 */
223static int ixgbe_enable_msix = 1;
224TUNABLE_INT("hw.ixgbe.enable_msix", &ixgbe_enable_msix);
225
226/*
227 * Number of TX/RX Queues, with 0 setting
228 * it autoconfigures to the number of cpus.
229 */
230static int ixgbe_tx_queues = 1;
231TUNABLE_INT("hw.ixgbe.tx_queues", &ixgbe_tx_queues);
232static int ixgbe_rx_queues = 4;
233TUNABLE_INT("hw.ixgbe.rx_queues", &ixgbe_rx_queues);
234
235/* Number of TX descriptors per ring */
236static int ixgbe_txd = DEFAULT_TXD;
237TUNABLE_INT("hw.ixgbe.txd", &ixgbe_txd);
238
239/* Number of RX descriptors per ring */
240static int ixgbe_rxd = DEFAULT_RXD;
241TUNABLE_INT("hw.ixgbe.rxd", &ixgbe_rxd);
242
243/* Total number of Interfaces - need for config sanity check */
244static int ixgbe_total_ports;
245
246/* Optics type of this interface */
247static int ixgbe_optics;
248
249/*********************************************************************
250 * Device identification routine
251 *
252 * ixgbe_probe determines if the driver should be loaded on
253 * adapter based on PCI vendor/device id of the adapter.
254 *
255 * return 0 on success, positive on failure
256 *********************************************************************/
257
258static int
259ixgbe_probe(device_t dev)
260{
261 ixgbe_vendor_info_t *ent;
262
263 u_int16_t pci_vendor_id = 0;
264 u_int16_t pci_device_id = 0;
265 u_int16_t pci_subvendor_id = 0;
266 u_int16_t pci_subdevice_id = 0;
267 char adapter_name[128];
268
269 INIT_DEBUGOUT("ixgbe_probe: begin");
270
271 pci_vendor_id = pci_get_vendor(dev);
272 if (pci_vendor_id != IXGBE_INTEL_VENDOR_ID)
273 return (ENXIO);
274
275 pci_device_id = pci_get_device(dev);

--- 8 unchanged lines hidden (view full) ---

284 ((pci_subvendor_id == ent->subvendor_id) ||
285 (ent->subvendor_id == 0)) &&
286
287 ((pci_subdevice_id == ent->subdevice_id) ||
288 (ent->subdevice_id == 0))) {
289 sprintf(adapter_name, "%s, Version - %s",
290 ixgbe_strings[ent->index],
291 ixgbe_driver_version);
292 switch (pci_device_id) {
293 case IXGBE_DEV_ID_82598AT_DUAL_PORT :
294 ixgbe_total_ports += 2;
295 break;
296 case IXGBE_DEV_ID_82598_CX4_DUAL_PORT :
297 ixgbe_optics = IFM_10G_CX4;
298 ixgbe_total_ports += 2;
299 break;
300 case IXGBE_DEV_ID_82598AF_DUAL_PORT :
301 ixgbe_optics = IFM_10G_SR;
302 ixgbe_total_ports += 2;
303 break;
304 case IXGBE_DEV_ID_82598AF_SINGLE_PORT :
305 ixgbe_optics = IFM_10G_SR;
306 ixgbe_total_ports += 1;
307 break;
308 case IXGBE_DEV_ID_82598EB_XF_LR :
309 ixgbe_optics = IFM_10G_LR;
310 ixgbe_total_ports += 1;
311 break;
312 case IXGBE_DEV_ID_82598EB_CX4 :
313 ixgbe_optics = IFM_10G_CX4;
314 ixgbe_total_ports += 1;
315 break;
316 case IXGBE_DEV_ID_82598AT :
317 ixgbe_total_ports += 1;
318 default:
319 break;
320 }
321 device_set_desc_copy(dev, adapter_name);
322 return (0);
323 }
324 ent++;
325 }
326
327 return (ENXIO);
328}
329
330/*********************************************************************
331 * Device initialization routine
332 *
333 * The attach entry point is called when the driver is being loaded.
334 * This routine identifies the type of hardware, allocates all resources
335 * and initializes the hardware.
336 *
337 * return 0 on success, positive on failure
338 *********************************************************************/
339
340static int
341ixgbe_attach(device_t dev)
342{
343 struct adapter *adapter;
344 int error = 0;
345 u32 ctrl_ext;
346
347 INIT_DEBUGOUT("ixgbe_attach: begin");
348
349 /* Allocate, clear, and link in our adapter structure */
350 adapter = device_get_softc(dev);
351 adapter->dev = adapter->osdep.dev = dev;
352
353 /* Core Lock Init*/
354 IXGBE_CORE_LOCK_INIT(adapter, device_get_nameunit(dev));
355
356 /* SYSCTL APIs */
357 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
358 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
359 OID_AUTO, "stats", CTLTYPE_INT | CTLFLAG_RW,
360 adapter, 0, ixgbe_sysctl_stats, "I", "Statistics");
361
362 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
363 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),

--- 5 unchanged lines hidden (view full) ---

369 OID_AUTO, "flow_control", CTLTYPE_INT | CTLFLAG_RW,
370 adapter, 0, ixgbe_set_flowcntl, "I", "Flow Control");
371
372 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
373 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
374 OID_AUTO, "enable_lro", CTLTYPE_INT|CTLFLAG_RW,
375 &ixgbe_enable_lro, 1, "Large Receive Offload");
376
377 /* Set up the timer callout */
378 callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0);
379
380 /* Determine hardware revision */
381 ixgbe_identify_hardware(adapter);
382
383 /* Indicate to RX setup to use Jumbo Clusters */
384 adapter->bigbufs = TRUE;
385
386 /* Do base PCI setup - map BAR0 */
387 if (ixgbe_allocate_pci_resources(adapter)) {
388 device_printf(dev, "Allocation of PCI resources failed\n");
389 error = ENXIO;
390 goto err_out;
391 }
392
393 /* Do descriptor calc and sanity checks */

--- 29 unchanged lines hidden (view full) ---

423
424 /* Allocate our TX/RX Queues */
425 if (ixgbe_allocate_queues(adapter)) {
426 error = ENOMEM;
427 goto err_out;
428 }
429
430 /* Initialize the shared code */
431 if (ixgbe_init_shared_code(&adapter->hw)) {
432 device_printf(dev,"Unable to initialize the shared code\n");
433 error = EIO;
434 goto err_late;
435 }
436
437 /* Initialize the hardware */
438 if (ixgbe_hardware_init(adapter)) {
439 device_printf(dev,"Unable to initialize the hardware\n");

--- 14 unchanged lines hidden (view full) ---

454 /* Sysctl for limiting the amount of work done in the taskqueue */
455 ixgbe_add_rx_process_limit(adapter, "rx_processing_limit",
456 "max number of rx packets to process", &adapter->rx_process_limit,
457 ixgbe_rx_process_limit);
458
459 /* Initialize statistics */
460 ixgbe_update_stats_counters(adapter);
461
462#ifdef IXGBE_VLAN_EVENTS
463 /* Register for VLAN events */
464 adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
465 ixgbe_register_vlan, 0, EVENTHANDLER_PRI_FIRST);
466 adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
467 ixgbe_unregister_vlan, 0, EVENTHANDLER_PRI_FIRST);
468#endif
469
470 /* let hardware know driver is loaded */
471 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
472 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
473 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
474
475 INIT_DEBUGOUT("ixgbe_attach: end");
476 return (0);
477err_late:

--- 38 unchanged lines hidden (view full) ---

516 IXGBE_CORE_LOCK(adapter);
517 ixgbe_stop(adapter);
518 IXGBE_CORE_UNLOCK(adapter);
519
520 for (int i = 0; i < adapter->num_tx_queues; i++, txr++) {
521 if (txr->tq) {
522 taskqueue_drain(txr->tq, &txr->tx_task);
523 taskqueue_free(txr->tq);
524 txr->tq = NULL;
525 }
526 }
527
528 for (int i = 0; i < adapter->num_rx_queues; i++, rxr++) {
529 if (rxr->tq) {
530 taskqueue_drain(rxr->tq, &rxr->rx_task);
531 taskqueue_free(rxr->tq);
532 rxr->tq = NULL;
533 }
534 }
535
536#ifdef IXGBE_VLAN_EVENTS
537 /* Unregister VLAN events */
538 if (adapter->vlan_attach != NULL)
539 EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
540 if (adapter->vlan_detach != NULL)
541 EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
542#endif
543
544 /* let hardware know driver is unloading */
545 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
546 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
547 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
548
549 ether_ifdetach(adapter->ifp);
550 callout_drain(&adapter->timer);
551 ixgbe_free_pci_resources(adapter);
552 bus_generic_detach(dev);
553 if_free(adapter->ifp);
554
555 ixgbe_free_transmit_structures(adapter);
556 ixgbe_free_receive_structures(adapter);

--- 286 unchanged lines hidden (view full) ---

843 *
844 * return 0 on success, positive on failure
845 **********************************************************************/
846#define IXGBE_MHADD_MFS_SHIFT 16
847
848static void
849ixgbe_init_locked(struct adapter *adapter)
850{
851 struct ifnet *ifp = adapter->ifp;
852 device_t dev = adapter->dev;
853 struct ixgbe_hw *hw;
854 u32 txdctl, rxdctl, mhadd, gpie;
855
856 INIT_DEBUGOUT("ixgbe_init: begin");
857
858 hw = &adapter->hw;
859 mtx_assert(&adapter->core_mtx, MA_OWNED);
860
861 ixgbe_stop(adapter);
862

--- 4 unchanged lines hidden (view full) ---

867 adapter->hw.addr_ctrl.rar_used_count = 1;
868
869 /* Initialize the hardware */
870 if (ixgbe_hardware_init(adapter)) {
871 device_printf(dev, "Unable to initialize the hardware\n");
872 return;
873 }
874
875#ifndef IXGBE_VLAN_EVENTS
876 /* With events this is done when a vlan registers */
877 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) {
878 u32 ctrl;
879 ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_VLNCTRL);
880 ctrl |= IXGBE_VLNCTRL_VME;
881 ctrl &= ~IXGBE_VLNCTRL_CFIEN;
882 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VLNCTRL, ctrl);
883 }
884#endif
885
886 /* Prepare transmit descriptors and buffers */
887 if (ixgbe_setup_transmit_structures(adapter)) {
888 device_printf(dev,"Could not setup transmit structures\n");
889 ixgbe_stop(adapter);
890 return;
891 }
892
893 ixgbe_initialize_transmit_units(adapter);
894
895 /* Setup Multicast table */
896 ixgbe_set_multi(adapter);
897
898 /*
899 ** If we are resetting MTU smaller than 2K
900 ** drop to small RX buffers
901 */
902 if (adapter->max_frame_size <= MCLBYTES)
903 adapter->bigbufs = FALSE;
904
905 /* Prepare receive descriptors and buffers */
906 if (ixgbe_setup_receive_structures(adapter)) {
907 device_printf(dev,"Could not setup receive structures\n");
908 ixgbe_stop(adapter);
909 return;
910 }
911
912 /* Configure RX settings */
913 ixgbe_initialize_receive_units(adapter);
914
915 gpie = IXGBE_READ_REG(&adapter->hw, IXGBE_GPIE);
916 /* Enable Fan Failure Interrupt */
917 if (adapter->hw.phy.media_type == ixgbe_media_type_copper)
918 gpie |= IXGBE_SDP1_GPIEN;
919 if (adapter->msix) {
920 /* Enable Enhanced MSIX mode */
921 gpie |= IXGBE_GPIE_MSIX_MODE;
922 gpie |= IXGBE_GPIE_EIAME | IXGBE_GPIE_PBA_SUPPORT |
923 IXGBE_GPIE_OCD;
924 }
925 IXGBE_WRITE_REG(&adapter->hw, IXGBE_GPIE, gpie);
926

--- 23 unchanged lines hidden (view full) ---

950 }
951
952 for (int i = 0; i < adapter->num_rx_queues; i++) {
953 rxdctl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXDCTL(i));
954 /* PTHRESH set to 32 */
955 rxdctl |= 0x0020;
956 rxdctl |= IXGBE_RXDCTL_ENABLE;
957 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXDCTL(i), rxdctl);
958 }
959
960 callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
961
962 /* Set up MSI/X routing */
963 ixgbe_configure_ivars(adapter);
964
965 ixgbe_enable_intr(adapter);
966
967 /* Now inform the stack we're ready */
968 ifp->if_drv_flags |= IFF_DRV_RUNNING;
969 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
970
971 return;

--- 7 unchanged lines hidden (view full) ---

979 IXGBE_CORE_LOCK(adapter);
980 ixgbe_init_locked(adapter);
981 IXGBE_CORE_UNLOCK(adapter);
982 return;
983}
984
985
986/*
987** Legacy Deferred Interrupt Handlers
988*/
989
990static void
991ixgbe_handle_rx(void *context, int pending)
992{
993 struct rx_ring *rxr = context;
994 struct adapter *adapter = rxr->adapter;
995 u32 loop = 0;
996
997 while (loop++ < MAX_INTR)
998 if (ixgbe_rxeof(rxr, adapter->rx_process_limit) == 0)
999 break;
1000}
1001
1002static void
1003ixgbe_handle_tx(void *context, int pending)
1004{
1005 struct tx_ring *txr = context;
1006 struct adapter *adapter = txr->adapter;
1007 struct ifnet *ifp = adapter->ifp;
1008 u32 loop = 0;
1009
1010 IXGBE_TX_LOCK(txr);
1011 while (loop++ < MAX_INTR)
1012 if (ixgbe_txeof(txr) == 0)
1013 break;
1014 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1015 ixgbe_start_locked(txr, ifp);
1016 IXGBE_TX_UNLOCK(txr);
1017}
1018
1019
1020/*********************************************************************
1021 *
1022 * Legacy Interrupt Service routine
1023 *
1024 **********************************************************************/
1025
1026static void
1027ixgbe_legacy_irq(void *arg)
1028{
1029 u32 reg_eicr;
1030 struct adapter *adapter = arg;
1031 struct tx_ring *txr = adapter->tx_rings;
1032 struct rx_ring *rxr = adapter->rx_rings;
1033 struct ixgbe_hw *hw;
1034
1035 hw = &adapter->hw;
1036 reg_eicr = IXGBE_READ_REG(&adapter->hw, IXGBE_EICR);
1037 if (reg_eicr == 0)
1038 return;
1039
1040 if (ixgbe_rxeof(rxr, adapter->rx_process_limit) != 0)
1041 taskqueue_enqueue(rxr->tq, &rxr->rx_task);
1042 if (ixgbe_txeof(txr) != 0)
1043 taskqueue_enqueue(txr->tq, &txr->tx_task);
1044
1045 /* Check for fan failure */
1046 if ((hw->phy.media_type == ixgbe_media_type_copper) &&
1047 (reg_eicr & IXGBE_EICR_GPI_SDP1)) {
1048 device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! "
1049 "REPLACE IMMEDIATELY!!\n");
1050 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS,
1051 IXGBE_EICR_GPI_SDP1);
1052 }
1053 /* Link status change */
1054 if (reg_eicr & IXGBE_EICR_LSC)
1055 ixgbe_update_link_status(adapter);
1056
1057 return;
1058}
1059
1060
1061/*********************************************************************
1062 *
1063 * MSI TX Interrupt Service routine
1064 *
1065 **********************************************************************/
1066
1067void
1068ixgbe_msix_tx(void *arg)
1069{
1070 struct tx_ring *txr = arg;
1071 struct adapter *adapter = txr->adapter;
1072 u32 loop = 0;
1073
1074 ++txr->tx_irq;
1075 IXGBE_TX_LOCK(txr);
1076 while (loop++ < MAX_INTR)
1077 if (ixgbe_txeof(txr) == 0)
1078 break;
1079 IXGBE_TX_UNLOCK(txr);
1080 /* Reenable this interrupt */
1081 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, txr->eims);
1082
1083 return;
1084}
1085
1086/*********************************************************************
1087 *
1088 * MSI RX Interrupt Service routine
1089 *
1090 **********************************************************************/
1091
1092static void
1093ixgbe_msix_rx(void *arg)
1094{
1095 struct rx_ring *rxr = arg;
1096 struct adapter *adapter = rxr->adapter;
1097 u32 loop = 0;
1098
1099 ++rxr->rx_irq;
1100 while (loop++ < MAX_INTR)
1101 if (ixgbe_rxeof(rxr, adapter->rx_process_limit) == 0)
1102 break;
1103 /* Reenable this interrupt */
1104 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, rxr->eims);
1105 return;
1106}
1107
1108static void
1109ixgbe_msix_link(void *arg)
1110{
1111 struct adapter *adapter = arg;
1112 struct ixgbe_hw *hw = &adapter->hw;
1113 u32 reg_eicr;
1114
1115 ++adapter->link_irq;
1116

--- 42 unchanged lines hidden (view full) ---

1159
1160 ifmr->ifm_status |= IFM_ACTIVE;
1161
1162 switch (adapter->link_speed) {
1163 case IXGBE_LINK_SPEED_1GB_FULL:
1164 ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
1165 break;
1166 case IXGBE_LINK_SPEED_10GB_FULL:
1167 ifmr->ifm_active |= ixgbe_optics | IFM_FDX;
1168 break;
1169 }
1170
1171 IXGBE_CORE_UNLOCK(adapter);
1172
1173 return;
1174}
1175

--- 39 unchanged lines hidden (view full) ---

1215 * return 0 on success, positive on failure
1216 **********************************************************************/
1217
1218static int
1219ixgbe_xmit(struct tx_ring *txr, struct mbuf **m_headp)
1220{
1221 struct adapter *adapter = txr->adapter;
1222 u32 olinfo_status = 0, cmd_type_len = 0;
1223 u32 paylen;
1224 int i, j, error, nsegs;
1225 int first, last = 0;
1226 struct mbuf *m_head;
1227 bus_dma_segment_t segs[IXGBE_MAX_SCATTER];
1228 bus_dmamap_t map;
1229 struct ixgbe_tx_buf *txbuf, *txbuf_mapped;
1230 union ixgbe_adv_tx_desc *txd = NULL;
1231
1232 m_head = *m_headp;
1233 paylen = 0;
1234
1235 /* Basic descriptor defines */
1236 cmd_type_len |= IXGBE_ADVTXD_DTYP_DATA;
1237 cmd_type_len |= IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT;
1238
1239 if (m_head->m_flags & M_VLANTAG)
1240 cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
1241

--- 27 unchanged lines hidden (view full) ---

1269 error = bus_dmamap_load_mbuf_sg(txr->txtag, map,
1270 *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
1271
1272 if (error == EFBIG) {
1273 struct mbuf *m;
1274
1275 m = m_defrag(*m_headp, M_DONTWAIT);
1276 if (m == NULL) {
1277 adapter->mbuf_alloc_failed++;
1278 m_freem(*m_headp);
1279 *m_headp = NULL;
1280 return (ENOBUFS);
1281 }
1282 *m_headp = m;
1283
1284 /* Try it again */
1285 error = bus_dmamap_load_mbuf_sg(txr->txtag, map,

--- 35 unchanged lines hidden (view full) ---

1321 cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
1322 olinfo_status |= IXGBE_TXD_POPTS_IXSM << 8;
1323 olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
1324 olinfo_status |= paylen << IXGBE_ADVTXD_PAYLEN_SHIFT;
1325 ++adapter->tso_tx;
1326 } else if (ixgbe_tx_ctx_setup(txr, m_head))
1327 olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
1328
1329 i = txr->next_avail_tx_desc;
1330 for (j = 0; j < nsegs; j++) {
1331 bus_size_t seglen;
1332 bus_addr_t segaddr;
1333
1334 txbuf = &txr->tx_buffers[i];
1335 txd = &txr->tx_base[i];
1336 seglen = segs[j].ds_len;

--- 4 unchanged lines hidden (view full) ---

1341 cmd_type_len |seglen);
1342 txd->read.olinfo_status = htole32(olinfo_status);
1343 last = i; /* Next descriptor that will get completed */
1344
1345 if (++i == adapter->num_tx_desc)
1346 i = 0;
1347
1348 txbuf->m_head = NULL;
1349 /*
1350 ** we have to do this inside the loop right now
1351 ** because of the hardware workaround.
1352 */
1353 if (j == (nsegs -1)) /* Last descriptor gets EOP and RS */
1354 txd->read.cmd_type_len |=
1355 htole32(IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS);
1356#ifndef NO_82598_A0_SUPPORT
1357 if (adapter->hw.revision_id == 0)
1358 desc_flip(txd);
1359#endif
1360 }
1361
1362 txr->tx_avail -= nsegs;
1363 txr->next_avail_tx_desc = i;
1364
1365 txbuf->m_head = m_head;
1366 txbuf->map = map;
1367 bus_dmamap_sync(txr->txtag, map, BUS_DMASYNC_PREWRITE);
1368
1369 /* Set the index of the descriptor that will be marked done */
1370 txbuf = &txr->tx_buffers[first];
1371
1372 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
1373 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1374 /*
1375 * Advance the Transmit Descriptor Tail (Tdt), this tells the
1376 * hardware that this frame is available to transmit.
1377 */
1378 IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDT(txr->me), i);
1379 ++txr->tx_packets;
1380 return (0);
1381
1382xmit_fail:
1383 bus_dmamap_unload(txr->txtag, txbuf->map);
1384 return (error);
1385
1386}
1387

--- 111 unchanged lines hidden (view full) ---

1499static void
1500ixgbe_local_timer(void *arg)
1501{
1502 struct adapter *adapter = arg;
1503 struct ifnet *ifp = adapter->ifp;
1504
1505 mtx_assert(&adapter->core_mtx, MA_OWNED);
1506
1507 ixgbe_update_link_status(adapter);
1508 ixgbe_update_stats_counters(adapter);
1509 if (ixgbe_display_debug_stats && ifp->if_drv_flags & IFF_DRV_RUNNING) {
1510 ixgbe_print_hw_stats(adapter);
1511 }
1512 /*
1513 * Each second we check the watchdog
1514 * to protect against hardware hangs.
1515 */
1516 ixgbe_watchdog(adapter);
1517
1518 callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
1519}
1520
1521static void
1522ixgbe_update_link_status(struct adapter *adapter)
1523{
1524 boolean_t link_up = FALSE;
1525 struct ifnet *ifp = adapter->ifp;

--- 170 unchanged lines hidden (view full) ---

1696 ixgbe_msix_tx, txr, &adapter->tag[vector]);
1697 if (error) {
1698 adapter->res[vector] = NULL;
1699 device_printf(dev, "Failed to register TX handler");
1700 return (error);
1701 }
1702 txr->msix = vector;
1703 txr->eims = IXGBE_IVAR_TX_QUEUE(vector);
1704 }
1705
1706 /* RX setup */
1707 for (int i = 0; i < adapter->num_rx_queues; i++, vector++, rxr++) {
1708 adapter->res[vector] = bus_alloc_resource_any(dev,
1709 SYS_RES_IRQ, &adapter->rid[vector],
1710 RF_SHAREABLE | RF_ACTIVE);
1711 if (!adapter->res[vector]) {

--- 8 unchanged lines hidden (view full) ---

1720 rxr, &adapter->tag[vector]);
1721 if (error) {
1722 adapter->res[vector] = NULL;
1723 device_printf(dev, "Failed to register RX handler");
1724 return (error);
1725 }
1726 rxr->msix = vector;
1727 rxr->eims = IXGBE_IVAR_RX_QUEUE(vector);
1728 }
1729
1730 /* Now for Link changes */
1731 adapter->res[vector] = bus_alloc_resource_any(dev,
1732 SYS_RES_IRQ, &adapter->rid[vector], RF_SHAREABLE | RF_ACTIVE);
1733 if (!adapter->res[vector]) {
1734 device_printf(dev,"Unable to allocate"
1735 " bus resource: Link interrupt [%d]\n", adapter->rid[vector]);

--- 18 unchanged lines hidden (view full) ---

1754 * Setup Either MSI/X or MSI
1755 */
1756static int
1757ixgbe_setup_msix(struct adapter *adapter)
1758{
1759 device_t dev = adapter->dev;
1760 int rid, want, queues, msgs;
1761
1762 /* First try MSI/X */
1763 rid = PCIR_BAR(IXGBE_MSIX_BAR);
1764 adapter->msix_mem = bus_alloc_resource_any(dev,
1765 SYS_RES_MEMORY, &rid, RF_ACTIVE);
1766 if (!adapter->msix_mem) {
1767 /* May not be enabled */
1768 device_printf(adapter->dev,
1769 "Unable to map MSIX table \n");
1770 goto msi;
1771 }
1772
1773 msgs = pci_msix_count(dev);
1774 if (msgs == 0) { /* system has msix disabled */
1775 bus_release_resource(dev, SYS_RES_MEMORY,
1776 PCIR_BAR(IXGBE_MSIX_BAR), adapter->msix_mem);
1777 adapter->msix_mem = NULL;
1778 goto msi;
1779 }
1780
1781 /* Figure out a reasonable auto config value */
1782 queues = (mp_ncpus > ((msgs-1)/2)) ? (msgs-1)/2 : mp_ncpus;
1783
1784 if (ixgbe_tx_queues == 0)

--- 63 unchanged lines hidden (view full) ---

1848
1849 adapter->hw.back = &adapter->osdep;
1850 return (0);
1851}
1852
1853static void
1854ixgbe_free_pci_resources(struct adapter * adapter)
1855{
1856 device_t dev = adapter->dev;
1857
1858 /*
1859 * Legacy has this set to 0, but we need
1860 * to run this once, so reset it.
1861 */
1862 if (adapter->msix == 0)
1863 adapter->msix = 1;
1864
1865 /*
1866 * First release all the interrupt resources:
1867 * notice that since these are just kept
1868 * in an array we can do the same logic
1869 * whether its MSIX or just legacy.
1870 */
1871 for (int i = 0; i < adapter->msix; i++) {
1872 if (adapter->tag[i] != NULL) {

--- 7 unchanged lines hidden (view full) ---

1880 }
1881 }
1882
1883 if (adapter->msix)
1884 pci_release_msi(dev);
1885
1886 if (adapter->msix_mem != NULL)
1887 bus_release_resource(dev, SYS_RES_MEMORY,
1888 PCIR_BAR(IXGBE_MSIX_BAR), adapter->msix_mem);
1889
1890 if (adapter->pci_mem != NULL)
1891 bus_release_resource(dev, SYS_RES_MEMORY,
1892 PCIR_BAR(0), adapter->pci_mem);
1893
1894 return;
1895}
1896

--- 18 unchanged lines hidden (view full) ---

1915
1916 /* Make sure we have a good EEPROM before we read from it */
1917 if (ixgbe_validate_eeprom_checksum(&adapter->hw, &csum) < 0) {
1918 device_printf(dev,"The EEPROM Checksum Is Not Valid\n");
1919 return (EIO);
1920 }
1921
1922 /* Get Hardware Flow Control setting */
1923 adapter->hw.fc.type = ixgbe_fc_full;
1924 adapter->hw.fc.pause_time = IXGBE_FC_PAUSE;
1925 adapter->hw.fc.low_water = IXGBE_FC_LO;
1926 adapter->hw.fc.high_water = IXGBE_FC_HI;
1927 adapter->hw.fc.send_xon = TRUE;
1928
1929 if (ixgbe_init_hw(&adapter->hw)) {
1930 device_printf(dev,"Hardware Initialization Failed");
1931 return (EIO);

--- 40 unchanged lines hidden (view full) ---

1972 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
1973
1974 ifp->if_capabilities |= (IFCAP_HWCSUM | IFCAP_TSO4);
1975 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
1976 ifp->if_capabilities |= IFCAP_JUMBO_MTU;
1977
1978 ifp->if_capenable = ifp->if_capabilities;
1979
1980 if ((hw->device_id == IXGBE_DEV_ID_82598AT) ||
1981 (hw->device_id == IXGBE_DEV_ID_82598AT_DUAL_PORT))
1982 ixgbe_setup_link_speed(hw, (IXGBE_LINK_SPEED_10GB_FULL |
1983 IXGBE_LINK_SPEED_1GB_FULL), TRUE, TRUE);
1984 else
1985 ixgbe_setup_link_speed(hw, IXGBE_LINK_SPEED_10GB_FULL,
1986 TRUE, FALSE);
1987
1988 /*
1989 * Specify the media types supported by this adapter and register
1990 * callbacks to update media and link information
1991 */
1992 ifmedia_init(&adapter->media, IFM_IMASK, ixgbe_media_change,
1993 ixgbe_media_status);
1994 ifmedia_add(&adapter->media, IFM_ETHER | ixgbe_optics |
1995 IFM_FDX, 0, NULL);
1996 if ((hw->device_id == IXGBE_DEV_ID_82598AT) ||
1997 (hw->device_id == IXGBE_DEV_ID_82598AT_DUAL_PORT)) {
1998 ifmedia_add(&adapter->media,
1999 IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
2000 ifmedia_add(&adapter->media,
2001 IFM_ETHER | IFM_1000_T, 0, NULL);
2002 }
2003 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2004 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
2005

--- 84 unchanged lines hidden (view full) ---

2090 **********************************************************************/
2091static int
2092ixgbe_allocate_queues(struct adapter *adapter)
2093{
2094 device_t dev = adapter->dev;
2095 struct tx_ring *txr;
2096 struct rx_ring *rxr;
2097 int rsize, tsize, error = IXGBE_SUCCESS;
2098 char name_string[16];
2099 int txconf = 0, rxconf = 0;
2100
2101 /* First allocate the TX ring struct memory */
2102 if (!(adapter->tx_rings =
2103 (struct tx_ring *) malloc(sizeof(struct tx_ring) *
2104 adapter->num_tx_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2105 device_printf(dev, "Unable to allocate TX ring memory\n");
2106 error = ENOMEM;

--- 22 unchanged lines hidden (view full) ---

2129 */
2130 for (int i = 0; i < adapter->num_tx_queues; i++, txconf++) {
2131 /* Set up some basics */
2132 txr = &adapter->tx_rings[i];
2133 txr->adapter = adapter;
2134 txr->me = i;
2135
2136 /* Initialize the TX side lock */
2137 snprintf(name_string, sizeof(name_string), "%s:tx(%d)",
2138 device_get_nameunit(dev), txr->me);
2139 mtx_init(&txr->tx_mtx, name_string, NULL, MTX_DEF);
2140
2141 if (ixgbe_dma_malloc(adapter, tsize,
2142 &txr->txdma, BUS_DMA_NOWAIT)) {
2143 device_printf(dev,
2144 "Unable to allocate TX Descriptor memory\n");
2145 error = ENOMEM;
2146 goto err_tx_desc;
2147 }

--- 16 unchanged lines hidden (view full) ---

2164 rsize = roundup2(adapter->num_rx_desc *
2165 sizeof(union ixgbe_adv_rx_desc), 4096);
2166 for (int i = 0; i < adapter->num_rx_queues; i++, rxconf++) {
2167 rxr = &adapter->rx_rings[i];
2168 /* Set up some basics */
2169 rxr->adapter = adapter;
2170 rxr->me = i;
2171
2172 /* Initialize the TX side lock */
2173 snprintf(name_string, sizeof(name_string), "%s:rx(%d)",
2174 device_get_nameunit(dev), rxr->me);
2175 mtx_init(&rxr->rx_mtx, name_string, NULL, MTX_DEF);
2176
2177 if (ixgbe_dma_malloc(adapter, rsize,
2178 &rxr->rxdma, BUS_DMA_NOWAIT)) {
2179 device_printf(dev,
2180 "Unable to allocate RxDescriptor memory\n");
2181 error = ENOMEM;
2182 goto err_rx_desc;
2183 }

--- 365 unchanged lines hidden (view full) ---

2549 }
2550
2551 /* Now copy bits into descriptor */
2552 TXD->vlan_macip_lens |= htole32(vlan_macip_lens);
2553 TXD->type_tucmd_mlhl |= htole32(type_tucmd_mlhl);
2554 TXD->seqnum_seed = htole32(0);
2555 TXD->mss_l4len_idx = htole32(0);
2556
2557#ifndef NO_82598_A0_SUPPORT
2558 if (adapter->hw.revision_id == 0)
2559 desc_flip(TXD);
2560#endif
2561
2562 tx_buffer->m_head = NULL;
2563
2564 /* We've consumed the first desc, adjust counters */
2565 if (++ctxd == adapter->num_tx_desc)
2566 ctxd = 0;
2567 txr->next_avail_tx_desc = ctxd;
2568 --txr->tx_avail;
2569

--- 77 unchanged lines hidden (view full) ---

2647 /* MSS L4LEN IDX */
2648 mss_l4len_idx |= (mp->m_pkthdr.tso_segsz << IXGBE_ADVTXD_MSS_SHIFT);
2649 mss_l4len_idx |= (tcp_hlen << IXGBE_ADVTXD_L4LEN_SHIFT);
2650 TXD->mss_l4len_idx = htole32(mss_l4len_idx);
2651
2652 TXD->seqnum_seed = htole32(0);
2653 tx_buffer->m_head = NULL;
2654
2655#ifndef NO_82598_A0_SUPPORT
2656 if (adapter->hw.revision_id == 0)
2657 desc_flip(TXD);
2658#endif
2659
2660 if (++ctxd == adapter->num_tx_desc)
2661 ctxd = 0;
2662
2663 txr->tx_avail--;
2664 txr->next_avail_tx_desc = ctxd;
2665 return TRUE;
2666}
2667

--- 105 unchanged lines hidden (view full) ---

2773}
2774
2775/*********************************************************************
2776 *
2777 * Get a buffer from system mbuf buffer pool.
2778 *
2779 **********************************************************************/
2780static int
2781ixgbe_get_buf(struct rx_ring *rxr, int i)
2782{
2783 struct adapter *adapter = rxr->adapter;
2784 struct mbuf *mp;
2785 bus_dmamap_t map;
2786 int nsegs, error, old, s = 0;
2787 int size = MCLBYTES;
2788
2789
2790 bus_dma_segment_t segs[1];
2791 struct ixgbe_rx_buf *rxbuf;
2792
2793 /* Are we going to Jumbo clusters? */
2794 if (adapter->bigbufs) {
2795 size = MJUMPAGESIZE;
2796 s = 1;
2797 };
2798
2799 mp = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, size);
2800 if (mp == NULL) {
2801 adapter->mbuf_alloc_failed++;
2802 return (ENOBUFS);
2803 }
2804
2805 mp->m_len = mp->m_pkthdr.len = size;
2806
2807 if (adapter->max_frame_size <= (MCLBYTES - ETHER_ALIGN))
2808 m_adj(mp, ETHER_ALIGN);
2809
2810 /*
2811 * Using memory from the mbuf cluster pool, invoke the bus_dma
2812 * machinery to arrange the memory mapping.
2813 */
2814 error = bus_dmamap_load_mbuf_sg(rxr->rxtag[s], rxr->spare_map[s],
2815 mp, segs, &nsegs, BUS_DMA_NOWAIT);
2816 if (error) {
2817 m_free(mp);
2818 return (error);
2819 }
2820
2821 /* Now check our target buffer for existing mapping */
2822 rxbuf = &rxr->rx_buffers[i];
2823 old = rxbuf->bigbuf;
2824 if (rxbuf->m_head != NULL)
2825 bus_dmamap_unload(rxr->rxtag[old], rxbuf->map[old]);
2826
2827 map = rxbuf->map[old];
2828 rxbuf->map[s] = rxr->spare_map[s];
2829 rxr->spare_map[old] = map;
2830 bus_dmamap_sync(rxr->rxtag[s], rxbuf->map[s], BUS_DMASYNC_PREREAD);
2831 rxbuf->m_head = mp;
2832 rxbuf->bigbuf = s;
2833
2834 rxr->rx_base[i].read.pkt_addr = htole64(segs[0].ds_addr);
2835
2836#ifndef NO_82598_A0_SUPPORT
2837 /* A0 needs to One's Compliment descriptors */
2838 if (adapter->hw.revision_id == 0) {
2839 struct dhack {u32 a1; u32 a2; u32 b1; u32 b2;};
2840 struct dhack *d;
2841
2842 d = (struct dhack *)&rxr->rx_base[i];
2843 d->a1 = ~(d->a1);
2844 d->a2 = ~(d->a2);
2845 }
2846#endif
2847
2848 return (0);
2849}
2850
2851/*********************************************************************
2852 *
2853 * Allocate memory for rx_buffer structures. Since we use one
2854 * rx_buffer per received packet, the maximum number of rx_buffer's
2855 * that we'll need is equal to the number of receive descriptors
2856 * that we've allocated.
2857 *
2858 **********************************************************************/

--- 9 unchanged lines hidden (view full) ---

2868 if (!(rxr->rx_buffers =
2869 (struct ixgbe_rx_buf *) malloc(bsize,
2870 M_DEVBUF, M_NOWAIT | M_ZERO))) {
2871 device_printf(dev, "Unable to allocate rx_buffer memory\n");
2872 error = ENOMEM;
2873 goto fail;
2874 }
2875
2876 /* First make the small (2K) tag/map */
2877 if ((error = bus_dma_tag_create(NULL, /* parent */
2878 PAGE_SIZE, 0, /* alignment, bounds */
2879 BUS_SPACE_MAXADDR, /* lowaddr */
2880 BUS_SPACE_MAXADDR, /* highaddr */
2881 NULL, NULL, /* filter, filterarg */
2882 MCLBYTES, /* maxsize */
2883 1, /* nsegments */
2884 MCLBYTES, /* maxsegsize */
2885 0, /* flags */
2886 NULL, /* lockfunc */
2887 NULL, /* lockfuncarg */
2888 &rxr->rxtag[0]))) {
2889 device_printf(dev, "Unable to create RX Small DMA tag\n");
2890 goto fail;
2891 }
2892
2893 /* Next make the large (4K) tag/map */
2894 if ((error = bus_dma_tag_create(NULL, /* parent */
2895 PAGE_SIZE, 0, /* alignment, bounds */
2896 BUS_SPACE_MAXADDR, /* lowaddr */
2897 BUS_SPACE_MAXADDR, /* highaddr */
2898 NULL, NULL, /* filter, filterarg */
2899 MJUMPAGESIZE, /* maxsize */
2900 1, /* nsegments */
2901 MJUMPAGESIZE, /* maxsegsize */
2902 0, /* flags */
2903 NULL, /* lockfunc */
2904 NULL, /* lockfuncarg */
2905 &rxr->rxtag[1]))) {
2906 device_printf(dev, "Unable to create RX Large DMA tag\n");
2907 goto fail;
2908 }
2909
2910 /* Create the spare maps (used by getbuf) */
2911 error = bus_dmamap_create(rxr->rxtag[0], BUS_DMA_NOWAIT,
2912 &rxr->spare_map[0]);
2913 error = bus_dmamap_create(rxr->rxtag[1], BUS_DMA_NOWAIT,
2914 &rxr->spare_map[1]);
2915 if (error) {
2916 device_printf(dev, "%s: bus_dmamap_create failed: %d\n",
2917 __func__, error);
2918 goto fail;
2919 }
2920
2921 for (i = 0; i < adapter->num_rx_desc; i++, rxbuf++) {
2922 rxbuf = &rxr->rx_buffers[i];
2923 error = bus_dmamap_create(rxr->rxtag[0],
2924 BUS_DMA_NOWAIT, &rxbuf->map[0]);
2925 if (error) {
2926 device_printf(dev, "Unable to create Small RX DMA map\n");
2927 goto fail;
2928 }
2929 error = bus_dmamap_create(rxr->rxtag[1],
2930 BUS_DMA_NOWAIT, &rxbuf->map[1]);
2931 if (error) {
2932 device_printf(dev, "Unable to create Large RX DMA map\n");
2933 goto fail;
2934 }
2935 }
2936
2937 return (0);
2938
2939fail:
2940 /* Frees all, but can handle partial completion */
2941 ixgbe_free_receive_structures(adapter);
2942 return (error);

--- 6 unchanged lines hidden (view full) ---

2949 **********************************************************************/
2950static int
2951ixgbe_setup_receive_ring(struct rx_ring *rxr)
2952{
2953 struct adapter *adapter;
2954 device_t dev;
2955 struct ixgbe_rx_buf *rxbuf;
2956 struct lro_ctrl *lro = &rxr->lro;
2957 int j, rsize, s = 0;
2958
2959 adapter = rxr->adapter;
2960 dev = adapter->dev;
2961 rsize = roundup2(adapter->num_rx_desc *
2962 sizeof(union ixgbe_adv_rx_desc), 4096);
2963 /* Clear the ring contents */
2964 bzero((void *)rxr->rx_base, rsize);
2965
2966 /*
2967 ** Free current RX buffers: the size buffer
2968 ** that is loaded is indicated by the buffer
2969 ** bigbuf value.
2970 */
2971 for (int i = 0; i < adapter->num_rx_desc; i++) {
2972 rxbuf = &rxr->rx_buffers[i];
2973 s = rxbuf->bigbuf;
2974 if (rxbuf->m_head != NULL) {
2975 bus_dmamap_sync(rxr->rxtag[s], rxbuf->map[s],
2976 BUS_DMASYNC_POSTREAD);
2977 bus_dmamap_unload(rxr->rxtag[s], rxbuf->map[s]);
2978 m_freem(rxbuf->m_head);
2979 rxbuf->m_head = NULL;
2980 }
2981 }
2982
2983 for (j = 0; j < adapter->num_rx_desc; j++) {
2984 if (ixgbe_get_buf(rxr, j) == ENOBUFS) {
2985 rxr->rx_buffers[j].m_head = NULL;
2986 rxr->rx_base[j].read.pkt_addr = 0;
2987 /* If we fail some may have change size */
2988 s = adapter->bigbufs;
2989 goto fail;
2990 }
2991 }
2992
2993 /* Setup our descriptor indices */
2994 rxr->next_to_check = 0;
2995 rxr->last_cleaned = 0;
2996
2997 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
2998 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2999
3000 /* Now set up the LRO interface */
3001 if (ixgbe_enable_lro) {
3002 int err = tcp_lro_init(lro);
3003 if (err) {
3004 device_printf(dev,"LRO Initialization failed!\n");
3005 goto fail;
3006 }
3007 device_printf(dev,"RX LRO Initialized\n");
3008 lro->ifp = adapter->ifp;
3009 }
3010
3011
3012 return (0);
3013fail:
3014 /*
3015 * We need to clean up any buffers allocated so far
3016 * 'j' is the failing index, decrement it to get the
3017 * last success.
3018 */
3019 for (--j; j < 0; j--) {
3020 rxbuf = &rxr->rx_buffers[j];
3021 if (rxbuf->m_head != NULL) {
3022 bus_dmamap_sync(rxr->rxtag[s], rxbuf->map[s],
3023 BUS_DMASYNC_POSTREAD);
3024 bus_dmamap_unload(rxr->rxtag[s], rxbuf->map[s]);
3025 m_freem(rxbuf->m_head);
3026 rxbuf->m_head = NULL;
3027 }
3028 }
3029 return (ENOBUFS);
3030}
3031
3032/*********************************************************************
3033 *
3034 * Initialize all receive rings.
3035 *
3036 **********************************************************************/
3037static int
3038ixgbe_setup_receive_structures(struct adapter *adapter)
3039{
3040 struct rx_ring *rxr = adapter->rx_rings;
3041 int i, j, s;
3042
3043 for (i = 0; i < adapter->num_rx_queues; i++, rxr++)
3044 if (ixgbe_setup_receive_ring(rxr))
3045 goto fail;
3046
3047 return (0);
3048fail:
3049 /*
3050 * Free RX buffers allocated so far, we will only handle
3051 * the rings that completed, the failing case will have
3052 * cleaned up for itself. The value of 'i' will be the
3053 * failed ring so we must pre-decrement it.
3054 */
3055 rxr = adapter->rx_rings;
3056 for (--i; i > 0; i--, rxr++) {
3057 for (j = 0; j < adapter->num_rx_desc; j++) {
3058 struct ixgbe_rx_buf *rxbuf;
3059 rxbuf = &rxr->rx_buffers[j];
3060 s = rxbuf->bigbuf;
3061 if (rxbuf->m_head != NULL) {
3062 bus_dmamap_sync(rxr->rxtag[s], rxbuf->map[s],
3063 BUS_DMASYNC_POSTREAD);
3064 bus_dmamap_unload(rxr->rxtag[s], rxbuf->map[s]);
3065 m_freem(rxbuf->m_head);
3066 rxbuf->m_head = NULL;
3067 }
3068 }
3069 }
3070
3071 return (ENOBUFS);
3072}
3073
3074/*********************************************************************
3075 *
3076 * Enable receive unit.
3077 *
3078 **********************************************************************/
3079static void
3080ixgbe_initialize_receive_units(struct adapter *adapter)
3081{
3082 struct rx_ring *rxr = adapter->rx_rings;
3083 struct ifnet *ifp = adapter->ifp;
3084 u32 rxctrl, fctrl, srrctl, rxcsum;
3085 u32 mrqc, hlreg, linkvec;
3086 u32 random[10];
3087 int i,j;
3088 union {
3089 u8 c[128];
3090 u32 i[32];
3091 } reta;
3092
3093
3094 /*
3095 * Make sure receives are disabled while
3096 * setting up the descriptor ring
3097 */
3098 rxctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXCTRL);
3099 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL,
3100 rxctrl & ~IXGBE_RXCTRL_RXEN);
3101
3102 /* Enable broadcasts */
3103 fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
3104 fctrl |= IXGBE_FCTRL_BAM;
3105 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
3106
3107 hlreg = IXGBE_READ_REG(&adapter->hw, IXGBE_HLREG0);
3108 if (ifp->if_mtu > ETHERMTU)
3109 hlreg |= IXGBE_HLREG0_JUMBOEN;
3110 else
3111 hlreg &= ~IXGBE_HLREG0_JUMBOEN;
3112 IXGBE_WRITE_REG(&adapter->hw, IXGBE_HLREG0, hlreg);
3113
3114 srrctl = IXGBE_READ_REG(&adapter->hw, IXGBE_SRRCTL(0));
3115 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
3116 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
3117 if (adapter->bigbufs)
3118 srrctl |= 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
3119 else
3120 srrctl |= 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
3121 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
3122 IXGBE_WRITE_REG(&adapter->hw, IXGBE_SRRCTL(0), srrctl);
3123
3124 /* Set Queue moderation rate */
3125 for (i = 0; i < IXGBE_MSGS; i++)
3126 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(i), DEFAULT_ITR);
3127
3128 /* Set Link moderation lower */
3129 linkvec = adapter->num_tx_queues + adapter->num_rx_queues;
3130 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(linkvec), LINK_ITR);
3131
3132 for (int i = 0; i < adapter->num_rx_queues; i++, rxr++) {
3133 u64 rdba = rxr->rxdma.dma_paddr;
3134 /* Setup the Base and Length of the Rx Descriptor Ring */
3135 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDBAL(i),
3136 (rdba & 0x00000000ffffffffULL));
3137 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDBAH(i), (rdba >> 32));
3138 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDLEN(i),
3139 adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
3140
3141 /* Setup the HW Rx Head and Tail Descriptor Pointers */
3142 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDH(i), 0);
3143 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDT(i),
3144 adapter->num_rx_desc - 1);
3145 }
3146
3147 rxcsum = IXGBE_READ_REG(&adapter->hw, IXGBE_RXCSUM);
3148
3149 if (adapter->num_rx_queues > 1) {
3150 /* set up random bits */
3151 arc4rand(&random, sizeof(random), 0);
3152
3153 /* Create reta data */
3154 for (i = 0; i < 128; )
3155 for (j = 0; j < adapter->num_rx_queues &&
3156 i < 128; j++, i++)
3157 reta.c[i] = j;
3158
3159 /* Set up the redirection table */
3160 for (i = 0; i < 32; i++)
3161 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RETA(i), reta.i[i]);
3162
3163 /* Now fill our hash function seeds */
3164 for (int i = 0; i < 10; i++)
3165 IXGBE_WRITE_REG_ARRAY(&adapter->hw,
3166 IXGBE_RSSRK(0), i, random[i]);
3167
3168 mrqc = IXGBE_MRQC_RSSEN
3169 /* Perform hash on these packet types */
3170 | IXGBE_MRQC_RSS_FIELD_IPV4
3171 | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
3172 | IXGBE_MRQC_RSS_FIELD_IPV4_UDP
3173 | IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP
3174 | IXGBE_MRQC_RSS_FIELD_IPV6_EX
3175 | IXGBE_MRQC_RSS_FIELD_IPV6
3176 | IXGBE_MRQC_RSS_FIELD_IPV6_TCP
3177 | IXGBE_MRQC_RSS_FIELD_IPV6_UDP
3178 | IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
3179 IXGBE_WRITE_REG(&adapter->hw, IXGBE_MRQC, mrqc);
3180
3181 /* RSS and RX IPP Checksum are mutually exclusive */
3182 rxcsum |= IXGBE_RXCSUM_PCSD;
3183 }
3184
3185 if (ifp->if_capenable & IFCAP_RXCSUM)
3186 rxcsum |= IXGBE_RXCSUM_PCSD;
3187
3188 if (!(rxcsum & IXGBE_RXCSUM_PCSD))
3189 rxcsum |= IXGBE_RXCSUM_IPPCSE;
3190
3191 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCSUM, rxcsum);
3192
3193 /* Enable Receive engine */
3194 rxctrl |= (IXGBE_RXCTRL_RXEN | IXGBE_RXCTRL_DMBYPS);
3195 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL, rxctrl);
3196
3197 return;
3198}
3199
3200/*********************************************************************
3201 *
3202 * Free all receive rings.
3203 *
3204 **********************************************************************/

--- 25 unchanged lines hidden (view full) ---

3230 struct adapter *adapter = NULL;
3231 struct ixgbe_rx_buf *rxbuf = NULL;
3232
3233 INIT_DEBUGOUT("free_receive_buffers: begin");
3234 adapter = rxr->adapter;
3235 if (rxr->rx_buffers != NULL) {
3236 rxbuf = &rxr->rx_buffers[0];
3237 for (int i = 0; i < adapter->num_rx_desc; i++) {
3238 int s = rxbuf->bigbuf;
3239 if (rxbuf->map != NULL) {
3240 bus_dmamap_unload(rxr->rxtag[s], rxbuf->map[s]);
3241 bus_dmamap_destroy(rxr->rxtag[s], rxbuf->map[s]);
3242 }
3243 if (rxbuf->m_head != NULL) {
3244 m_freem(rxbuf->m_head);
3245 }
3246 rxbuf->m_head = NULL;
3247 ++rxbuf;
3248 }
3249 }
3250 if (rxr->rx_buffers != NULL) {
3251 free(rxr->rx_buffers, M_DEVBUF);
3252 rxr->rx_buffers = NULL;
3253 }
3254 for (int s = 0; s < 2; s++) {
3255 if (rxr->rxtag[s] != NULL) {
3256 bus_dma_tag_destroy(rxr->rxtag[s]);
3257 rxr->rxtag[s] = NULL;
3258 }
3259 }
3260 return;
3261}
3262
3263/*********************************************************************
3264 *
3265 * This routine executes in interrupt context. It replenishes
3266 * the mbufs in the descriptor and sends data which has been
3267 * dma'ed into host memory to upper layer.
3268 *
3269 * We loop at most count times if count is > 0, or until done if
3270 * count < 0.
3271 *
3272 *********************************************************************/
3273static bool
3274ixgbe_rxeof(struct rx_ring *rxr, int count)
3275{
3276 struct adapter *adapter = rxr->adapter;
3277 struct ifnet *ifp = adapter->ifp;
3278 struct lro_ctrl *lro = &rxr->lro;
3279 struct lro_entry *queued;
3280 struct mbuf *mp;
3281 int len, i, eop = 0;
3282 u8 accept_frame = 0;
3283 u32 staterr;
3284 union ixgbe_adv_rx_desc *cur;
3285
3286
3287 IXGBE_RX_LOCK(rxr);
3288 i = rxr->next_to_check;
3289 cur = &rxr->rx_base[i];
3290 staterr = cur->wb.upper.status_error;
3291
3292 if (!(staterr & IXGBE_RXD_STAT_DD)) {
3293 IXGBE_RX_UNLOCK(rxr);
3294 return FALSE;
3295 }
3296
3297 while ((staterr & IXGBE_RXD_STAT_DD) && (count != 0) &&
3298 (ifp->if_drv_flags & IFF_DRV_RUNNING)) {
3299 struct mbuf *m = NULL;
3300 int s;
3301
3302 mp = rxr->rx_buffers[i].m_head;
3303 s = rxr->rx_buffers[i].bigbuf;
3304 bus_dmamap_sync(rxr->rxtag[s], rxr->rx_buffers[i].map[s],
3305 BUS_DMASYNC_POSTREAD);
3306 accept_frame = 1;
3307 if (staterr & IXGBE_RXD_STAT_EOP) {
3308 count--;
3309 eop = 1;
3310 } else {
3311 eop = 0;
3312 }
3313 len = cur->wb.upper.length;
3314
3315 if (staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK)
3316 accept_frame = 0;
3317
3318 if (accept_frame) {
3319 /* Get a fresh buffer first */
3320 if (ixgbe_get_buf(rxr, i) != 0) {
3321 ifp->if_iqdrops++;
3322 goto discard;
3323 }
3324
3325 /* Assign correct length to the current fragment */
3326 mp->m_len = len;
3327
3328 if (rxr->fmp == NULL) {
3329 mp->m_pkthdr.len = len;
3330 rxr->fmp = mp; /* Store the first mbuf */
3331 rxr->lmp = mp;
3332 } else {
3333 /* Chain mbuf's together */
3334 mp->m_flags &= ~M_PKTHDR;
3335 rxr->lmp->m_next = mp;
3336 rxr->lmp = rxr->lmp->m_next;
3337 rxr->fmp->m_pkthdr.len += len;
3338 }
3339
3340 if (eop) {
3341 rxr->fmp->m_pkthdr.rcvif = ifp;
3342 ifp->if_ipackets++;
3343 rxr->packet_count++;
3344 rxr->byte_count += rxr->fmp->m_pkthdr.len;
3345
3346 ixgbe_rx_checksum(adapter,
3347 staterr, rxr->fmp);
3348
3349 if (staterr & IXGBE_RXD_STAT_VP) {
3350#if __FreeBSD_version < 700000
3351 VLAN_INPUT_TAG_NEW(ifp, rxr->fmp,
3352 (le16toh(cur->wb.upper.vlan) &
3353 IXGBE_RX_DESC_SPECIAL_VLAN_MASK));
3354#else
3355 rxr->fmp->m_pkthdr.ether_vtag =
3356 le16toh(cur->wb.upper.vlan);
3357 rxr->fmp->m_flags |= M_VLANTAG;
3358#endif
3359 }
3360 m = rxr->fmp;
3361 rxr->fmp = NULL;
3362 rxr->lmp = NULL;
3363 }
3364 } else {
3365 ifp->if_ierrors++;
3366discard:
3367 /* Reuse loaded DMA map and just update mbuf chain */
3368 mp = rxr->rx_buffers[i].m_head;
3369 mp->m_len = mp->m_pkthdr.len =
3370 (rxr->rx_buffers[i].bigbuf ? MJUMPAGESIZE:MCLBYTES);
3371 mp->m_data = mp->m_ext.ext_buf;
3372 mp->m_next = NULL;
3373 if (adapter->max_frame_size <= (MCLBYTES - ETHER_ALIGN))
3374 m_adj(mp, ETHER_ALIGN);
3375 if (rxr->fmp != NULL) {
3376 m_freem(rxr->fmp);
3377 rxr->fmp = NULL;
3378 rxr->lmp = NULL;
3379 }
3380 m = NULL;
3381 }
3382
3383 /* Zero out the receive descriptors status */
3384 cur->wb.upper.status_error = 0;
3385 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
3386 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3387
3388 rxr->last_cleaned = i; /* for updating tail */
3389
3390 if (++i == adapter->num_rx_desc)
3391 i = 0;
3392
3393 /* Now send up to the stack */
3394 if (m != NULL) {
3395 rxr->next_to_check = i;
3396 /* Use LRO if possible */
3397 if ((!lro->lro_cnt) || (tcp_lro_rx(lro, m, 0))) {
3398 IXGBE_RX_UNLOCK(rxr);
3399 (*ifp->if_input)(ifp, m);
3400 IXGBE_RX_LOCK(rxr);
3401 i = rxr->next_to_check;
3402 }
3403 }
3404 /* Get next descriptor */
3405 cur = &rxr->rx_base[i];
3406 staterr = cur->wb.upper.status_error;
3407 }
3408 rxr->next_to_check = i;
3409
3410 /* Advance the IXGB's Receive Queue "Tail Pointer" */
3411 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDT(rxr->me), rxr->last_cleaned);
3412 IXGBE_RX_UNLOCK(rxr);
3413
3414 /*
3415 ** Flush any outstanding LRO work
3416 ** this may call into the stack and
3417 ** must not hold a driver lock.
3418 */
3419 while(!SLIST_EMPTY(&lro->lro_active)) {
3420 queued = SLIST_FIRST(&lro->lro_active);
3421 SLIST_REMOVE_HEAD(&lro->lro_active, next);
3422 tcp_lro_flush(lro, queued);
3423 }
3424
3425 if (!(staterr & IXGBE_RXD_STAT_DD))
3426 return FALSE;
3427
3428 return TRUE;
3429}
3430
3431/*********************************************************************
3432 *
3433 * Verify that the hardware indicated that the checksum is valid.
3434 * Inform the stack about the status of checksum so that stack
3435 * doesn't spend time verifying the checksum.
3436 *
3437 *********************************************************************/
3438static void
3439ixgbe_rx_checksum(struct adapter *adapter,
3440 u32 staterr, struct mbuf * mp)
3441{
3442 struct ifnet *ifp = adapter->ifp;
3443 u16 status = (u16) staterr;
3444 u8 errors = (u8) (staterr >> 24);
3445
3446 /* Not offloading */
3447 if ((ifp->if_capenable & IFCAP_RXCSUM) == 0) {
3448 mp->m_pkthdr.csum_flags = 0;
3449 return;
3450 }
3451
3452 if (status & IXGBE_RXD_STAT_IPCS) {
3453 /* Did it pass? */
3454 if (!(errors & IXGBE_RXD_ERR_IPE)) {
3455 /* IP Checksum Good */
3456 mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
3457 mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
3458
3459 } else

--- 5 unchanged lines hidden (view full) ---

3465 mp->m_pkthdr.csum_flags |=
3466 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
3467 mp->m_pkthdr.csum_data = htons(0xffff);
3468 }
3469 }
3470 return;
3471}
3472
3473#ifdef IXGBE_VLAN_EVENTS
3474/*
3475 * This routine is run via an vlan
3476 * config EVENT
3477 */
3478static void
3479ixgbe_register_vlan(void *unused, struct ifnet *ifp, u16 vtag)
3480{
3481 struct adapter *adapter = ifp->if_softc;
3482 u32 ctrl;
3483
3484 ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_VLNCTRL);
3485 ctrl |= IXGBE_VLNCTRL_VME | IXGBE_VLNCTRL_VFE;
3486 ctrl &= ~IXGBE_VLNCTRL_CFIEN;
3487 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VLNCTRL, ctrl);
3488
3489 /* Make entry in the hardware filter table */
3490 ixgbe_set_vfta(&adapter->hw, vtag, 0, TRUE);
3491}
3492
3493/*
3494 * This routine is run via an vlan
3495 * unconfig EVENT
3496 */
3497static void
3498ixgbe_unregister_vlan(void *unused, struct ifnet *ifp, u16 vtag)
3499{
3500 struct adapter *adapter = ifp->if_softc;
3501
3502 /* Remove entry in the hardware filter table */
3503 ixgbe_set_vfta(&adapter->hw, vtag, 0, FALSE);
3504
3505 /* Have all vlans unregistered? */
3506 if (adapter->ifp->if_vlantrunk == NULL) {
3507 u32 ctrl;
3508 /* Turn off the filter table */
3509 ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_VLNCTRL);
3510 ctrl &= ~IXGBE_VLNCTRL_VME;
3511 ctrl &= ~IXGBE_VLNCTRL_VFE;
3512 ctrl |= IXGBE_VLNCTRL_CFIEN;
3513 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VLNCTRL, ctrl);
3514 }
3515}
3516#endif /* IXGBE_VLAN_EVENTS */
3517
3518static void
3519ixgbe_enable_intr(struct adapter *adapter)
3520{
3521 struct ixgbe_hw *hw = &adapter->hw;
3522 u32 mask = IXGBE_EIMS_ENABLE_MASK;
3523
3524 /* Enable Fan Failure detection */
3525 if (hw->phy.media_type == ixgbe_media_type_copper)
3526 mask |= IXGBE_EIMS_GPI_SDP1;
3527 /* With RSS we use auto clear */
3528 if (adapter->msix_mem) {
3529 /* Dont autoclear Link */
3530 mask &= ~IXGBE_EIMS_OTHER;
3531 mask &= ~IXGBE_EIMS_LSC;
3532 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC,
3533 adapter->eims_mask | mask);
3534 }
3535
3536 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
3537 IXGBE_WRITE_FLUSH(hw);
3538
3539 return;
3540}
3541
3542static void
3543ixgbe_disable_intr(struct adapter *adapter)
3544{

--- 10 unchanged lines hidden (view full) ---

3555 u16 value;
3556
3557 value = pci_read_config(((struct ixgbe_osdep *)hw->back)->dev,
3558 reg, 2);
3559
3560 return (value);
3561}
3562
3563static void
3564ixgbe_set_ivar(struct adapter *adapter, u16 entry, u8 vector)
3565{
3566 u32 ivar, index;
3567
3568 vector |= IXGBE_IVAR_ALLOC_VAL;
3569 index = (entry >> 2) & 0x1F;
3570 ivar = IXGBE_READ_REG(&adapter->hw, IXGBE_IVAR(index));
3571 ivar &= ~(0xFF << (8 * (entry & 0x3)));
3572 ivar |= (vector << (8 * (entry & 0x3)));
3573 IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
3574}
3575
3576static void
3577ixgbe_configure_ivars(struct adapter *adapter)
3578{
3579 struct tx_ring *txr = adapter->tx_rings;
3580 struct rx_ring *rxr = adapter->rx_rings;
3581
3582 for (int i = 0; i < adapter->num_rx_queues; i++, rxr++) {
3583 ixgbe_set_ivar(adapter, IXGBE_IVAR_RX_QUEUE(i), rxr->msix);
3584 adapter->eims_mask |= rxr->eims;
3585 }
3586
3587 for (int i = 0; i < adapter->num_tx_queues; i++, txr++) {
3588 ixgbe_set_ivar(adapter, IXGBE_IVAR_TX_QUEUE(i), txr->msix);
3589 adapter->eims_mask |= txr->eims;
3590 }
3591
3592 /* For the Link interrupt */
3593 ixgbe_set_ivar(adapter, IXGBE_IVAR_OTHER_CAUSES_INDEX,
3594 adapter->linkvec);
3595 adapter->eims_mask |= IXGBE_IVAR_OTHER_CAUSES_INDEX;
3596}
3597
3598/**********************************************************************
3599 *
3600 * Update the board statistics counters.
3601 *
3602 **********************************************************************/
3603static void
3604ixgbe_update_stats_counters(struct adapter *adapter)
3605{

--- 32 unchanged lines hidden (view full) ---

3638 adapter->stats.prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
3639 adapter->stats.prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
3640 adapter->stats.prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
3641 adapter->stats.prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
3642 adapter->stats.prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
3643 adapter->stats.prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
3644 adapter->stats.rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
3645
3646 adapter->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
3647 adapter->stats.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
3648
3649 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
3650 adapter->stats.lxontxc += lxon;
3651 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
3652 adapter->stats.lxofftxc += lxoff;
3653 total = lxon + lxoff;
3654
3655 adapter->stats.gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
3656 adapter->stats.mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);

--- 38 unchanged lines hidden (view full) ---

3695 **********************************************************************/
3696static void
3697ixgbe_print_hw_stats(struct adapter * adapter)
3698{
3699 device_t dev = adapter->dev;
3700
3701
3702 device_printf(dev,"Std Mbuf Failed = %lu\n",
3703 adapter->mbuf_alloc_failed);
3704 device_printf(dev,"Std Cluster Failed = %lu\n",
3705 adapter->mbuf_cluster_failed);
3706
3707 device_printf(dev,"Missed Packets = %llu\n",
3708 (long long)adapter->stats.mpc[0]);
3709 device_printf(dev,"Receive length errors = %llu\n",
3710 ((long long)adapter->stats.roc +
3711 (long long)adapter->stats.ruc));
3712 device_printf(dev,"Crc errors = %llu\n",
3713 (long long)adapter->stats.crcerrs);
3714 device_printf(dev,"Driver dropped packets = %lu\n",

--- 40 unchanged lines hidden (view full) ---

3755 device_printf(dev,"Error Byte Count = %u \n",
3756 IXGBE_READ_REG(hw, IXGBE_ERRBC));
3757
3758 for (int i = 0; i < adapter->num_rx_queues; i++, rxr++) {
3759 struct lro_ctrl *lro = &rxr->lro;
3760 device_printf(dev,"Queue[%d]: rdh = %d, hw rdt = %d\n",
3761 i, IXGBE_READ_REG(hw, IXGBE_RDH(i)),
3762 IXGBE_READ_REG(hw, IXGBE_RDT(i)));
3763 device_printf(dev,"RX(%d) Packets Received: %lu\n",
3764 rxr->me, (long)rxr->packet_count);
3765 device_printf(dev,"RX(%d) Bytes Received: %lu\n",
3766 rxr->me, (long)rxr->byte_count);
3767 device_printf(dev,"RX(%d) IRQ Handled: %lu\n",
3768 rxr->me, (long)rxr->rx_irq);
3769 device_printf(dev,"RX(%d) LRO Queued= %d\n",
3770 rxr->me, lro->lro_queued);
3771 device_printf(dev,"RX(%d) LRO Flushed= %d\n",
3772 rxr->me, lro->lro_flushed);
3773 }
3774
3775 for (int i = 0; i < adapter->num_tx_queues; i++, txr++) {
3776 device_printf(dev,"Queue(%d) tdh = %d, hw tdt = %d\n", i,
3777 IXGBE_READ_REG(hw, IXGBE_TDH(i)),
3778 IXGBE_READ_REG(hw, IXGBE_TDT(i)));
3779 device_printf(dev,"TX(%d) Packets Sent: %lu\n",
3780 txr->me, (long)txr->tx_packets);
3781 device_printf(dev,"TX(%d) IRQ Handled: %lu\n",
3782 txr->me, (long)txr->tx_irq);
3783 device_printf(dev,"TX(%d) NO Desc Avail: %lu\n",
3784 txr->me, (long)txr->no_tx_desc_avail);
3785 }
3786
3787 device_printf(dev,"Link IRQ Handled: %lu\n",
3788 (long)adapter->link_irq);

--- 58 unchanged lines hidden (view full) ---

3847 if (error)
3848 return (error);
3849
3850 adapter = (struct adapter *) arg1;
3851 switch (ixgbe_flow_control) {
3852 case ixgbe_fc_rx_pause:
3853 case ixgbe_fc_tx_pause:
3854 case ixgbe_fc_full:
3855 adapter->hw.fc.type = ixgbe_flow_control;
3856 break;
3857 case ixgbe_fc_none:
3858 default:
3859 adapter->hw.fc.type = ixgbe_fc_none;
3860 }
3861
3862 ixgbe_setup_fc(&adapter->hw, 0);
3863 return error;
3864}
3865
3866static void
3867ixgbe_add_rx_process_limit(struct adapter *adapter, const char *name,
3868 const char *description, int *limit, int value)
3869{
3870 *limit = value;
3871 SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev),
3872 SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
3873 OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW, limit, value, description);
3874}
3875
3876#ifndef NO_82598_A0_SUPPORT
3877/*
3878 * A0 Workaround: invert descriptor for hardware
3879 */
3880void
3881desc_flip(void *desc)
3882{
3883 struct dhack {u32 a1; u32 a2; u32 b1; u32 b2;};
3884 struct dhack *d;
3885
3886 d = (struct dhack *)desc;
3887 d->a1 = ~(d->a1);
3888 d->a2 = ~(d->a2);
3889 d->b1 = ~(d->b1);
3890 d->b2 = ~(d->b2);
3891 d->b2 &= 0xFFFFFFF0;
3892 d->b1 &= ~IXGBE_ADVTXD_DCMD_RS;
3893}
3894#endif
3895
3896
3897