Deleted Added
full compact
if_igb.c (199192) if_igb.c (200243)
1/******************************************************************************
2
3 Copyright (c) 2001-2009, Intel Corporation
4 All rights reserved.
5
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8

--- 16 unchanged lines hidden (view full) ---

25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
31
32******************************************************************************/
1/******************************************************************************
2
3 Copyright (c) 2001-2009, Intel Corporation
4 All rights reserved.
5
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8

--- 16 unchanged lines hidden (view full) ---

25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
31
32******************************************************************************/
33/*$FreeBSD: head/sys/dev/e1000/if_igb.c 199192 2009-11-11 19:13:40Z jfv $*/
33/*$FreeBSD: head/sys/dev/e1000/if_igb.c 200243 2009-12-08 01:07:44Z jfv $*/
34
35
36#ifdef HAVE_KERNEL_OPTION_HEADERS
37#include "opt_device_polling.h"
38#include "opt_inet.h"
39#endif
40
41#include <sys/param.h>

--- 54 unchanged lines hidden (view full) ---

96/*********************************************************************
97 * Set this to one to display debug statistics
98 *********************************************************************/
99int igb_display_debug_stats = 0;
100
101/*********************************************************************
102 * Driver version:
103 *********************************************************************/
34
35
36#ifdef HAVE_KERNEL_OPTION_HEADERS
37#include "opt_device_polling.h"
38#include "opt_inet.h"
39#endif
40
41#include <sys/param.h>

--- 54 unchanged lines hidden (view full) ---

96/*********************************************************************
97 * Set this to one to display debug statistics
98 *********************************************************************/
99int igb_display_debug_stats = 0;
100
101/*********************************************************************
102 * Driver version:
103 *********************************************************************/
104char igb_driver_version[] = "version - 1.7.3";
104char igb_driver_version[] = "version - 1.8.4";
105
106
107/*********************************************************************
108 * PCI Device ID Table
109 *
110 * Used by probe to select devices to load on
111 * Last field stores an index into e1000_strings
112 * Last entry must be all 0s

--- 5 unchanged lines hidden (view full) ---

118{
119 { 0x8086, E1000_DEV_ID_82575EB_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
120 { 0x8086, E1000_DEV_ID_82575EB_FIBER_SERDES,
121 PCI_ANY_ID, PCI_ANY_ID, 0},
122 { 0x8086, E1000_DEV_ID_82575GB_QUAD_COPPER,
123 PCI_ANY_ID, PCI_ANY_ID, 0},
124 { 0x8086, E1000_DEV_ID_82576, PCI_ANY_ID, PCI_ANY_ID, 0},
125 { 0x8086, E1000_DEV_ID_82576_NS, PCI_ANY_ID, PCI_ANY_ID, 0},
105
106
107/*********************************************************************
108 * PCI Device ID Table
109 *
110 * Used by probe to select devices to load on
111 * Last field stores an index into e1000_strings
112 * Last entry must be all 0s

--- 5 unchanged lines hidden (view full) ---

118{
119 { 0x8086, E1000_DEV_ID_82575EB_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
120 { 0x8086, E1000_DEV_ID_82575EB_FIBER_SERDES,
121 PCI_ANY_ID, PCI_ANY_ID, 0},
122 { 0x8086, E1000_DEV_ID_82575GB_QUAD_COPPER,
123 PCI_ANY_ID, PCI_ANY_ID, 0},
124 { 0x8086, E1000_DEV_ID_82576, PCI_ANY_ID, PCI_ANY_ID, 0},
125 { 0x8086, E1000_DEV_ID_82576_NS, PCI_ANY_ID, PCI_ANY_ID, 0},
126 { 0x8086, E1000_DEV_ID_82576_NS_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0},
126 { 0x8086, E1000_DEV_ID_82576_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
127 { 0x8086, E1000_DEV_ID_82576_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0},
128 { 0x8086, E1000_DEV_ID_82576_SERDES_QUAD,
129 PCI_ANY_ID, PCI_ANY_ID, 0},
130 { 0x8086, E1000_DEV_ID_82576_QUAD_COPPER,
131 PCI_ANY_ID, PCI_ANY_ID, 0},
127 { 0x8086, E1000_DEV_ID_82576_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
128 { 0x8086, E1000_DEV_ID_82576_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0},
129 { 0x8086, E1000_DEV_ID_82576_SERDES_QUAD,
130 PCI_ANY_ID, PCI_ANY_ID, 0},
131 { 0x8086, E1000_DEV_ID_82576_QUAD_COPPER,
132 PCI_ANY_ID, PCI_ANY_ID, 0},
133 { 0x8086, E1000_DEV_ID_82580_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
134 { 0x8086, E1000_DEV_ID_82580_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
135 { 0x8086, E1000_DEV_ID_82580_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0},
136 { 0x8086, E1000_DEV_ID_82580_SGMII, PCI_ANY_ID, PCI_ANY_ID, 0},
137 { 0x8086, E1000_DEV_ID_82580_COPPER_DUAL,
138 PCI_ANY_ID, PCI_ANY_ID, 0},
132 /* required last entry */
133 { 0, 0, 0, 0, 0}
134};
135
136/*********************************************************************
137 * Table of branding strings for all supported NICs.
138 *********************************************************************/
139

--- 14 unchanged lines hidden (view full) ---

154static void igb_start_locked(struct tx_ring *, struct ifnet *ifp);
155#if __FreeBSD_version >= 800000
156static int igb_mq_start(struct ifnet *, struct mbuf *);
157static int igb_mq_start_locked(struct ifnet *,
158 struct tx_ring *, struct mbuf *);
159static void igb_qflush(struct ifnet *);
160#endif
161static int igb_ioctl(struct ifnet *, u_long, caddr_t);
139 /* required last entry */
140 { 0, 0, 0, 0, 0}
141};
142
143/*********************************************************************
144 * Table of branding strings for all supported NICs.
145 *********************************************************************/
146

--- 14 unchanged lines hidden (view full) ---

161static void igb_start_locked(struct tx_ring *, struct ifnet *ifp);
162#if __FreeBSD_version >= 800000
163static int igb_mq_start(struct ifnet *, struct mbuf *);
164static int igb_mq_start_locked(struct ifnet *,
165 struct tx_ring *, struct mbuf *);
166static void igb_qflush(struct ifnet *);
167#endif
168static int igb_ioctl(struct ifnet *, u_long, caddr_t);
162static void igb_watchdog(struct adapter *);
163static void igb_init(void *);
164static void igb_init_locked(struct adapter *);
165static void igb_stop(void *);
166static void igb_media_status(struct ifnet *, struct ifmediareq *);
167static int igb_media_change(struct ifnet *);
168static void igb_identify_hardware(struct adapter *);
169static int igb_allocate_pci_resources(struct adapter *);
170static int igb_allocate_msix(struct adapter *);
171static int igb_allocate_legacy(struct adapter *);
172static int igb_setup_msix(struct adapter *);
173static void igb_free_pci_resources(struct adapter *);
174static void igb_local_timer(void *);
169static void igb_init(void *);
170static void igb_init_locked(struct adapter *);
171static void igb_stop(void *);
172static void igb_media_status(struct ifnet *, struct ifmediareq *);
173static int igb_media_change(struct ifnet *);
174static void igb_identify_hardware(struct adapter *);
175static int igb_allocate_pci_resources(struct adapter *);
176static int igb_allocate_msix(struct adapter *);
177static int igb_allocate_legacy(struct adapter *);
178static int igb_setup_msix(struct adapter *);
179static void igb_free_pci_resources(struct adapter *);
180static void igb_local_timer(void *);
175static int igb_hardware_init(struct adapter *);
181static void igb_reset(struct adapter *);
176static void igb_setup_interface(device_t, struct adapter *);
177static int igb_allocate_queues(struct adapter *);
178static void igb_configure_queues(struct adapter *);
179
180static int igb_allocate_transmit_buffers(struct tx_ring *);
181static void igb_setup_transmit_structures(struct adapter *);
182static void igb_setup_transmit_ring(struct tx_ring *);
183static void igb_initialize_transmit_units(struct adapter *);

--- 15 unchanged lines hidden (view full) ---

199static void igb_rx_checksum(u32, struct mbuf *, bool);
200static int igb_tx_ctx_setup(struct tx_ring *, struct mbuf *);
201static bool igb_tso_setup(struct tx_ring *, struct mbuf *, u32 *);
202static void igb_set_promisc(struct adapter *);
203static void igb_disable_promisc(struct adapter *);
204static void igb_set_multi(struct adapter *);
205static void igb_print_hw_stats(struct adapter *);
206static void igb_update_link_status(struct adapter *);
182static void igb_setup_interface(device_t, struct adapter *);
183static int igb_allocate_queues(struct adapter *);
184static void igb_configure_queues(struct adapter *);
185
186static int igb_allocate_transmit_buffers(struct tx_ring *);
187static void igb_setup_transmit_structures(struct adapter *);
188static void igb_setup_transmit_ring(struct tx_ring *);
189static void igb_initialize_transmit_units(struct adapter *);

--- 15 unchanged lines hidden (view full) ---

205static void igb_rx_checksum(u32, struct mbuf *, bool);
206static int igb_tx_ctx_setup(struct tx_ring *, struct mbuf *);
207static bool igb_tso_setup(struct tx_ring *, struct mbuf *, u32 *);
208static void igb_set_promisc(struct adapter *);
209static void igb_disable_promisc(struct adapter *);
210static void igb_set_multi(struct adapter *);
211static void igb_print_hw_stats(struct adapter *);
212static void igb_update_link_status(struct adapter *);
207static int igb_get_buf(struct rx_ring *, int, u8);
213static int igb_get_buf(struct rx_ring *, int, int);
208
209static void igb_register_vlan(void *, struct ifnet *, u16);
210static void igb_unregister_vlan(void *, struct ifnet *, u16);
211static void igb_setup_vlan_hw_support(struct adapter *);
212
213static int igb_xmit(struct tx_ring *, struct mbuf **);
214static int igb_dma_malloc(struct adapter *, bus_size_t,
215 struct igb_dma_alloc *, int);

--- 75 unchanged lines hidden (view full) ---

291static int igb_low_latency = IGB_LOW_LATENCY;
292TUNABLE_INT("hw.igb.low_latency", &igb_low_latency);
293static int igb_ave_latency = IGB_AVE_LATENCY;
294TUNABLE_INT("hw.igb.ave_latency", &igb_ave_latency);
295static int igb_bulk_latency = IGB_BULK_LATENCY;
296TUNABLE_INT("hw.igb.bulk_latency", &igb_bulk_latency);
297
298/*
214
215static void igb_register_vlan(void *, struct ifnet *, u16);
216static void igb_unregister_vlan(void *, struct ifnet *, u16);
217static void igb_setup_vlan_hw_support(struct adapter *);
218
219static int igb_xmit(struct tx_ring *, struct mbuf **);
220static int igb_dma_malloc(struct adapter *, bus_size_t,
221 struct igb_dma_alloc *, int);

--- 75 unchanged lines hidden (view full) ---

297static int igb_low_latency = IGB_LOW_LATENCY;
298TUNABLE_INT("hw.igb.low_latency", &igb_low_latency);
299static int igb_ave_latency = IGB_AVE_LATENCY;
300TUNABLE_INT("hw.igb.ave_latency", &igb_ave_latency);
301static int igb_bulk_latency = IGB_BULK_LATENCY;
302TUNABLE_INT("hw.igb.bulk_latency", &igb_bulk_latency);
303
304/*
305 * MSIX should be the default for best performance,
306 * but this allows it to be forced off for testing.
307 */
308static int igb_enable_msix = 1;
309TUNABLE_INT("hw.igb.enable_msix", &igb_enable_msix);
310
311/*
312 * Header split has seemed to be beneficial in
313 * all circumstances tested, so its on by default
314 * however this variable will allow it to be disabled
315 * for some debug purposes.
316 */
317static bool igb_header_split = TRUE;
318TUNABLE_INT("hw.igb.hdr_split", &igb_header_split);
319
320/*
299** This will autoconfigure based on the number
321** This will autoconfigure based on the number
300** of CPUs if set to 0. Only a matched pair of
322** of CPUs if left at 0. Only a matched pair of
301** TX and RX rings are allowed.
302*/
323** TX and RX rings are allowed.
324*/
303static int igb_num_queues = 1;
325static int igb_num_queues = 0;
304TUNABLE_INT("hw.igb.num_queues", &igb_num_queues);
305
306/* How many packets rxeof tries to clean at a time */
307static int igb_rx_process_limit = 100;
308TUNABLE_INT("hw.igb.rx_process_limit", &igb_rx_process_limit);
309
310/* Flow control setting - default to FULL */
311static int igb_fc_setting = e1000_fc_full;

--- 223 unchanged lines hidden (view full) ---

535 }
536 /* Check its sanity */
537 if (!igb_is_valid_ether_addr(adapter->hw.mac.addr)) {
538 device_printf(dev, "Invalid MAC address\n");
539 error = EIO;
540 goto err_late;
541 }
542
326TUNABLE_INT("hw.igb.num_queues", &igb_num_queues);
327
328/* How many packets rxeof tries to clean at a time */
329static int igb_rx_process_limit = 100;
330TUNABLE_INT("hw.igb.rx_process_limit", &igb_rx_process_limit);
331
332/* Flow control setting - default to FULL */
333static int igb_fc_setting = e1000_fc_full;

--- 223 unchanged lines hidden (view full) ---

557 }
558 /* Check its sanity */
559 if (!igb_is_valid_ether_addr(adapter->hw.mac.addr)) {
560 device_printf(dev, "Invalid MAC address\n");
561 error = EIO;
562 goto err_late;
563 }
564
543 /* Now Initialize the hardware */
544 if (igb_hardware_init(adapter)) {
545 device_printf(dev, "Unable to initialize the hardware\n");
546 error = EIO;
547 goto err_late;
548 }
549
550 /*
551 ** Configure Interrupts
552 */
565 /*
566 ** Configure Interrupts
567 */
553 if (adapter->msix > 1) /* MSIX */
568 if ((adapter->msix > 1) && (igb_enable_msix))
554 error = igb_allocate_msix(adapter);
555 else /* MSI or Legacy */
556 error = igb_allocate_legacy(adapter);
557 if (error)
558 goto err_late;
559
560 /* Setup OS specific network interface */
561 igb_setup_interface(dev, adapter);
562
569 error = igb_allocate_msix(adapter);
570 else /* MSI or Legacy */
571 error = igb_allocate_legacy(adapter);
572 if (error)
573 goto err_late;
574
575 /* Setup OS specific network interface */
576 igb_setup_interface(dev, adapter);
577
578 /* Now get a good starting state */
579 igb_reset(adapter);
580
563#ifdef IGB_IEEE1588
564 /*
565 ** Setup the timer: IEEE 1588 support
566 */
567 adapter->cycles.read = igb_read_clock;
568 adapter->cycles.mask = (u64)-1;
569 adapter->cycles.mult = 1;
570 adapter->cycles.shift = IGB_TSYNC_SHIFT;

--- 211 unchanged lines hidden (view full) ---

782 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
783 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
784 break;
785 }
786
787 /* Send a copy of the frame to the BPF listener */
788 ETHER_BPF_MTAP(ifp, m_head);
789
581#ifdef IGB_IEEE1588
582 /*
583 ** Setup the timer: IEEE 1588 support
584 */
585 adapter->cycles.read = igb_read_clock;
586 adapter->cycles.mask = (u64)-1;
587 adapter->cycles.mult = 1;
588 adapter->cycles.shift = IGB_TSYNC_SHIFT;

--- 211 unchanged lines hidden (view full) ---

800 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
801 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
802 break;
803 }
804
805 /* Send a copy of the frame to the BPF listener */
806 ETHER_BPF_MTAP(ifp, m_head);
807
790 /* Set timeout in case hardware has problems transmitting. */
791 txr->watchdog_timer = IGB_TX_TIMEOUT;
808 /* Set watchdog on */
809 txr->watchdog_check = TRUE;
792 }
793}
794
795/*
796 * Legacy TX driver routine, called from the
797 * stack, always uses tx[0], and spins for it.
798 * Should not be used with multiqueue tx
799 */

--- 49 unchanged lines hidden (view full) ---

849 return (err);
850 }
851
852 if (m == NULL) /* Called by tasklet */
853 goto process;
854
855 /* If nothing queued go right to xmit */
856 if (drbr_empty(ifp, txr->br)) {
810 }
811}
812
813/*
814 * Legacy TX driver routine, called from the
815 * stack, always uses tx[0], and spins for it.
816 * Should not be used with multiqueue tx
817 */

--- 49 unchanged lines hidden (view full) ---

867 return (err);
868 }
869
870 if (m == NULL) /* Called by tasklet */
871 goto process;
872
873 /* If nothing queued go right to xmit */
874 if (drbr_empty(ifp, txr->br)) {
857 if ((err = igb_xmit(txr, &m)) != 0) {
858 if (m != NULL)
859 err = drbr_enqueue(ifp, txr->br, m);
860 return (err);
875 if (igb_xmit(txr, &m)) {
876 if (m && (err = drbr_enqueue(ifp, txr->br, m)) != 0)
877 return (err);
861 } else {
862 /* Success, update stats */
863 drbr_stats_update(ifp, m->m_pkthdr.len, m->m_flags);
864 /* Send a copy of the frame to the BPF listener */
865 ETHER_BPF_MTAP(ifp, m);
866 /* Set the watchdog */
878 } else {
879 /* Success, update stats */
880 drbr_stats_update(ifp, m->m_pkthdr.len, m->m_flags);
881 /* Send a copy of the frame to the BPF listener */
882 ETHER_BPF_MTAP(ifp, m);
883 /* Set the watchdog */
867 txr->watchdog_timer = IGB_TX_TIMEOUT;
884 txr->watchdog_check = TRUE;
868 }
869
870 } else if ((err = drbr_enqueue(ifp, txr->br, m)) != 0)
871 return (err);
872
873process:
874 if (drbr_empty(ifp, txr->br))
875 return (err);
876
877 /* Process the queue */
878 while (TRUE) {
879 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
880 break;
881 next = drbr_dequeue(ifp, txr->br);
882 if (next == NULL)
883 break;
885 }
886
887 } else if ((err = drbr_enqueue(ifp, txr->br, m)) != 0)
888 return (err);
889
890process:
891 if (drbr_empty(ifp, txr->br))
892 return (err);
893
894 /* Process the queue */
895 while (TRUE) {
896 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
897 break;
898 next = drbr_dequeue(ifp, txr->br);
899 if (next == NULL)
900 break;
884 if ((err = igb_xmit(txr, &next)) != 0) {
885 if (next != NULL)
886 err = drbr_enqueue(ifp, txr->br, next);
901 if (igb_xmit(txr, &next))
887 break;
902 break;
888 }
889 drbr_stats_update(ifp, next->m_pkthdr.len, next->m_flags);
890 ETHER_BPF_MTAP(ifp, next);
891 /* Set the watchdog */
903 ETHER_BPF_MTAP(ifp, next);
904 /* Set the watchdog */
892 txr->watchdog_timer = IGB_TX_TIMEOUT;
905 txr->watchdog_check = TRUE;
893 }
894
895 if (txr->tx_avail <= IGB_TX_OP_THRESHOLD)
896 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
897
898 return (err);
899}
900

--- 51 unchanged lines hidden (view full) ---

952 * required.
953 */
954 ifp->if_flags |= IFF_UP;
955 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
956 IGB_CORE_LOCK(adapter);
957 igb_init_locked(adapter);
958 IGB_CORE_UNLOCK(adapter);
959 }
906 }
907
908 if (txr->tx_avail <= IGB_TX_OP_THRESHOLD)
909 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
910
911 return (err);
912}
913

--- 51 unchanged lines hidden (view full) ---

965 * required.
966 */
967 ifp->if_flags |= IFF_UP;
968 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
969 IGB_CORE_LOCK(adapter);
970 igb_init_locked(adapter);
971 IGB_CORE_UNLOCK(adapter);
972 }
960 if (!(ifp->if_flags & IFF_NOARP))
961 arp_ifinit(ifp, ifa);
973 arp_ifinit(ifp, ifa);
962 } else
963#endif
964 error = ether_ioctl(ifp, command, data);
965 break;
966 case SIOCSIFMTU:
967 {
968 int max_frame_size;
969

--- 74 unchanged lines hidden (view full) ---

1044 if (mask & IFCAP_TSO4) {
1045 ifp->if_capenable ^= IFCAP_TSO4;
1046 reinit = 1;
1047 }
1048 if (mask & IFCAP_VLAN_HWTAGGING) {
1049 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1050 reinit = 1;
1051 }
974 } else
975#endif
976 error = ether_ioctl(ifp, command, data);
977 break;
978 case SIOCSIFMTU:
979 {
980 int max_frame_size;
981

--- 74 unchanged lines hidden (view full) ---

1056 if (mask & IFCAP_TSO4) {
1057 ifp->if_capenable ^= IFCAP_TSO4;
1058 reinit = 1;
1059 }
1060 if (mask & IFCAP_VLAN_HWTAGGING) {
1061 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1062 reinit = 1;
1063 }
1052 if (mask & IFCAP_LRO) {
1064 if ((mask & IFCAP_LRO) && (igb_header_split)) {
1053 ifp->if_capenable ^= IFCAP_LRO;
1054 reinit = 1;
1055 }
1056 if (reinit && (ifp->if_drv_flags & IFF_DRV_RUNNING))
1057 igb_init(adapter);
1058 VLAN_CAPABILITIES(ifp);
1059 break;
1060 }

--- 10 unchanged lines hidden (view full) ---

1071 default:
1072 error = ether_ioctl(ifp, command, data);
1073 break;
1074 }
1075
1076 return (error);
1077}
1078
1065 ifp->if_capenable ^= IFCAP_LRO;
1066 reinit = 1;
1067 }
1068 if (reinit && (ifp->if_drv_flags & IFF_DRV_RUNNING))
1069 igb_init(adapter);
1070 VLAN_CAPABILITIES(ifp);
1071 break;
1072 }

--- 10 unchanged lines hidden (view full) ---

1083 default:
1084 error = ether_ioctl(ifp, command, data);
1085 break;
1086 }
1087
1088 return (error);
1089}
1090
1079/*********************************************************************
1080 * Watchdog timer:
1081 *
1082 * This routine is called from the local timer every second.
1083 * As long as transmit descriptors are being cleaned the value
1084 * is non-zero and we do nothing. Reaching 0 indicates a tx hang
1085 * and we then reset the device.
1086 *
1087 **********************************************************************/
1088
1091
1089static void
1090igb_watchdog(struct adapter *adapter)
1091{
1092 struct tx_ring *txr = adapter->tx_rings;
1093 bool tx_hang = FALSE;
1094
1095 IGB_CORE_LOCK_ASSERT(adapter);
1096
1097 /*
1098 ** The timer is set to 5 every time start() queues a packet.
1099 ** Then txeof keeps resetting it as long as it cleans at
1100 ** least one descriptor.
1101 ** Finally, anytime all descriptors are clean the timer is
1102 ** set to 0.
1103 **
1104 ** With TX Multiqueue we need to check every queue's timer,
1105 ** if any time out we do the reset.
1106 */
1107 for (int i = 0; i < adapter->num_queues; i++, txr++) {
1108 IGB_TX_LOCK(txr);
1109 if (txr->watchdog_timer == 0 ||
1110 (--txr->watchdog_timer)) {
1111 IGB_TX_UNLOCK(txr);
1112 continue;
1113 } else {
1114 tx_hang = TRUE;
1115 IGB_TX_UNLOCK(txr);
1116 break;
1117 }
1118 }
1119 if (tx_hang == FALSE)
1120 return;
1121
1122 /* If we are in this routine because of pause frames, then
1123 * don't reset the hardware.
1124 */
1125 if (E1000_READ_REG(&adapter->hw, E1000_STATUS) &
1126 E1000_STATUS_TXOFF) {
1127 txr = adapter->tx_rings; /* reset pointer */
1128 for (int i = 0; i < adapter->num_queues; i++, txr++) {
1129 IGB_TX_LOCK(txr);
1130 txr->watchdog_timer = IGB_TX_TIMEOUT;
1131 IGB_TX_UNLOCK(txr);
1132 }
1133 return;
1134 }
1135
1136 if (e1000_check_for_link(&adapter->hw) == 0)
1137 device_printf(adapter->dev, "watchdog timeout -- resetting\n");
1138
1139 for (int i = 0; i < adapter->num_queues; i++, txr++) {
1140 device_printf(adapter->dev, "Queue(%d) tdh = %d, tdt = %d\n",
1141 i, E1000_READ_REG(&adapter->hw, E1000_TDH(i)),
1142 E1000_READ_REG(&adapter->hw, E1000_TDT(i)));
1143 device_printf(adapter->dev, "Queue(%d) desc avail = %d,"
1144 " Next Desc to Clean = %d\n", i, txr->tx_avail,
1145 txr->next_to_clean);
1146 }
1147
1148 adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1149 adapter->watchdog_events++;
1150
1151 igb_init_locked(adapter);
1152}
1153
1154/*********************************************************************
1155 * Init entry point
1156 *
1157 * This routine is used in two ways. It is used by the stack as
1158 * init entry point in network interface structure. It is also used
1159 * by the driver as a hw/sw initialization routine to get to a
1160 * consistent state.
1161 *
1162 * return 0 on success, positive on failure
1163 **********************************************************************/
1164
1165static void
1166igb_init_locked(struct adapter *adapter)
1167{
1092/*********************************************************************
1093 * Init entry point
1094 *
1095 * This routine is used in two ways. It is used by the stack as
1096 * init entry point in network interface structure. It is also used
1097 * by the driver as a hw/sw initialization routine to get to a
1098 * consistent state.
1099 *
1100 * return 0 on success, positive on failure
1101 **********************************************************************/
1102
1103static void
1104igb_init_locked(struct adapter *adapter)
1105{
1168 struct rx_ring *rxr = adapter->rx_rings;
1169 struct tx_ring *txr = adapter->tx_rings;
1170 struct ifnet *ifp = adapter->ifp;
1171 device_t dev = adapter->dev;
1106 struct ifnet *ifp = adapter->ifp;
1107 device_t dev = adapter->dev;
1172 u32 pba = 0;
1173
1174 INIT_DEBUGOUT("igb_init: begin");
1175
1176 IGB_CORE_LOCK_ASSERT(adapter);
1177
1108
1109 INIT_DEBUGOUT("igb_init: begin");
1110
1111 IGB_CORE_LOCK_ASSERT(adapter);
1112
1178 igb_stop(adapter);
1113 igb_disable_intr(adapter);
1114 callout_stop(&adapter->timer);
1179
1115
1180 /*
1181 * Packet Buffer Allocation (PBA)
1182 * Writing PBA sets the receive portion of the buffer
1183 * the remainder is used for the transmit buffer.
1184 */
1185 if (adapter->hw.mac.type == e1000_82575) {
1186 INIT_DEBUGOUT1("igb_init: pba=%dK",pba);
1187 pba = E1000_PBA_32K; /* 32K for Rx, 16K for Tx */
1188 E1000_WRITE_REG(&adapter->hw, E1000_PBA, pba);
1189 }
1190
1191 /* Get the latest mac address, User can use a LAA */
1192 bcopy(IF_LLADDR(adapter->ifp), adapter->hw.mac.addr,
1193 ETHER_ADDR_LEN);
1194
1195 /* Put the address into the Receive Address Array */
1196 e1000_rar_set(&adapter->hw, adapter->hw.mac.addr, 0);
1197
1116 /* Get the latest mac address, User can use a LAA */
1117 bcopy(IF_LLADDR(adapter->ifp), adapter->hw.mac.addr,
1118 ETHER_ADDR_LEN);
1119
1120 /* Put the address into the Receive Address Array */
1121 e1000_rar_set(&adapter->hw, adapter->hw.mac.addr, 0);
1122
1198 /* Initialize the hardware */
1199 if (igb_hardware_init(adapter)) {
1200 device_printf(dev, "Unable to initialize the hardware\n");
1201 return;
1202 }
1123 igb_reset(adapter);
1203 igb_update_link_status(adapter);
1204
1124 igb_update_link_status(adapter);
1125
1205 E1000_WRITE_REG(&adapter->hw, E1000_VET, ETHERTYPE_VLAN);
1206
1207 /* Set hardware offload abilities */
1208 ifp->if_hwassist = 0;
1209 if (ifp->if_capenable & IFCAP_TXCSUM) {
1210 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
1211#if __FreeBSD_version >= 800000
1212 if (adapter->hw.mac.type == e1000_82576)
1213 ifp->if_hwassist |= CSUM_SCTP;
1214#endif

--- 19 unchanged lines hidden (view full) ---

1234 if (ifp->if_mtu > ETHERMTU)
1235 adapter->rx_mbuf_sz = MJUMPAGESIZE;
1236 else
1237 adapter->rx_mbuf_sz = MCLBYTES;
1238
1239 /* Prepare receive descriptors and buffers */
1240 if (igb_setup_receive_structures(adapter)) {
1241 device_printf(dev, "Could not setup receive structures\n");
1126 /* Set hardware offload abilities */
1127 ifp->if_hwassist = 0;
1128 if (ifp->if_capenable & IFCAP_TXCSUM) {
1129 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
1130#if __FreeBSD_version >= 800000
1131 if (adapter->hw.mac.type == e1000_82576)
1132 ifp->if_hwassist |= CSUM_SCTP;
1133#endif

--- 19 unchanged lines hidden (view full) ---

1153 if (ifp->if_mtu > ETHERMTU)
1154 adapter->rx_mbuf_sz = MJUMPAGESIZE;
1155 else
1156 adapter->rx_mbuf_sz = MCLBYTES;
1157
1158 /* Prepare receive descriptors and buffers */
1159 if (igb_setup_receive_structures(adapter)) {
1160 device_printf(dev, "Could not setup receive structures\n");
1242 igb_stop(adapter);
1243 return;
1244 }
1245 igb_initialize_receive_units(adapter);
1246
1247 /* Don't lose promiscuous settings */
1248 igb_set_promisc(adapter);
1249
1250 ifp->if_drv_flags |= IFF_DRV_RUNNING;
1251 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1252
1253 callout_reset(&adapter->timer, hz, igb_local_timer, adapter);
1254 e1000_clear_hw_cntrs_base_generic(&adapter->hw);
1255
1256 if (adapter->msix > 1) /* Set up queue routing */
1257 igb_configure_queues(adapter);
1258
1259 /* Set up VLAN tag offload and filter */
1260 igb_setup_vlan_hw_support(adapter);
1261
1161 return;
1162 }
1163 igb_initialize_receive_units(adapter);
1164
1165 /* Don't lose promiscuous settings */
1166 igb_set_promisc(adapter);
1167
1168 ifp->if_drv_flags |= IFF_DRV_RUNNING;
1169 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1170
1171 callout_reset(&adapter->timer, hz, igb_local_timer, adapter);
1172 e1000_clear_hw_cntrs_base_generic(&adapter->hw);
1173
1174 if (adapter->msix > 1) /* Set up queue routing */
1175 igb_configure_queues(adapter);
1176
1177 /* Set up VLAN tag offload and filter */
1178 igb_setup_vlan_hw_support(adapter);
1179
1262 /* Set default RX interrupt moderation */
1263 for (int i = 0; i < adapter->num_queues; i++, rxr++) {
1264 E1000_WRITE_REG(&adapter->hw,
1265 E1000_EITR(rxr->msix), igb_ave_latency);
1266 rxr->eitr_setting = igb_ave_latency;
1267 }
1180 /* this clears any pending interrupts */
1181 E1000_READ_REG(&adapter->hw, E1000_ICR);
1182 igb_enable_intr(adapter);
1183 E1000_WRITE_REG(&adapter->hw, E1000_ICS, E1000_ICS_LSC);
1268
1184
1269 /* Set TX interrupt rate & reset TX watchdog */
1270 for (int i = 0; i < adapter->num_queues; i++, txr++) {
1271 E1000_WRITE_REG(&adapter->hw,
1272 E1000_EITR(txr->msix), igb_ave_latency);
1273 txr->watchdog_timer = FALSE;
1274 }
1275
1276 {
1277 /* this clears any pending interrupts */
1278 E1000_READ_REG(&adapter->hw, E1000_ICR);
1279 igb_enable_intr(adapter);
1280 E1000_WRITE_REG(&adapter->hw, E1000_ICS, E1000_ICS_LSC);
1281 }
1282
1283 /* Don't reset the phy next time init gets called */
1284 adapter->hw.phy.reset_disable = TRUE;
1285}
1286
1287static void
1288igb_init(void *arg)
1289{
1290 struct adapter *adapter = arg;

--- 33 unchanged lines hidden (view full) ---

1324 igb_enable_intr(adapter);
1325}
1326
1327static void
1328igb_handle_rx(void *context, int pending)
1329{
1330 struct rx_ring *rxr = context;
1331 struct adapter *adapter = rxr->adapter;
1185 /* Don't reset the phy next time init gets called */
1186 adapter->hw.phy.reset_disable = TRUE;
1187}
1188
1189static void
1190igb_init(void *arg)
1191{
1192 struct adapter *adapter = arg;

--- 33 unchanged lines hidden (view full) ---

1226 igb_enable_intr(adapter);
1227}
1228
1229static void
1230igb_handle_rx(void *context, int pending)
1231{
1232 struct rx_ring *rxr = context;
1233 struct adapter *adapter = rxr->adapter;
1332 struct ifnet *ifp = adapter->ifp;
1234 u32 loop = IGB_MAX_LOOP;
1235 bool more;
1333
1236
1334 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1335 if (igb_rxeof(rxr, adapter->rx_process_limit) != 0)
1336 /* More to clean, schedule another task */
1337 taskqueue_enqueue(adapter->tq, &rxr->rx_task);
1237 do {
1238 more = igb_rxeof(rxr, -1);
1239 } while (loop-- && more);
1338
1240
1241 /* Reenable this interrupt */
1242 E1000_WRITE_REG(&adapter->hw, E1000_EIMS, rxr->eims);
1339}
1340
1341static void
1342igb_handle_tx(void *context, int pending)
1343{
1344 struct tx_ring *txr = context;
1345 struct adapter *adapter = txr->adapter;
1243}
1244
1245static void
1246igb_handle_tx(void *context, int pending)
1247{
1248 struct tx_ring *txr = context;
1249 struct adapter *adapter = txr->adapter;
1346 struct ifnet *ifp = adapter->ifp;
1250 struct ifnet *ifp = adapter->ifp;
1251 u32 loop = IGB_MAX_LOOP;
1252 bool more;
1347
1253
1348 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1349 IGB_TX_LOCK(txr);
1350 igb_txeof(txr);
1254 IGB_TX_LOCK(txr);
1255 do {
1256 more = igb_txeof(txr);
1257 } while (loop-- && more);
1351#if __FreeBSD_version >= 800000
1258#if __FreeBSD_version >= 800000
1352 if (!drbr_empty(ifp, txr->br))
1353 igb_mq_start_locked(ifp, txr, NULL);
1259 if (!drbr_empty(ifp, txr->br))
1260 igb_mq_start_locked(ifp, txr, NULL);
1354#else
1261#else
1355 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1356 igb_start_locked(txr, ifp);
1262 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1263 igb_start_locked(txr, ifp);
1357#endif
1264#endif
1358 IGB_TX_UNLOCK(txr);
1359 }
1265 IGB_TX_UNLOCK(txr);
1266 /* Reenable this interrupt */
1267 E1000_WRITE_REG(&adapter->hw, E1000_EIMS, txr->eims);
1360}
1361
1362
1363/*********************************************************************
1364 *
1365 * MSI/Legacy Deferred
1366 * Interrupt Service routine
1367 *

--- 43 unchanged lines hidden (view full) ---

1411 * MSIX TX Interrupt Service routine
1412 *
1413 **********************************************************************/
1414static void
1415igb_msix_tx(void *arg)
1416{
1417 struct tx_ring *txr = arg;
1418 struct adapter *adapter = txr->adapter;
1268}
1269
1270
1271/*********************************************************************
1272 *
1273 * MSI/Legacy Deferred
1274 * Interrupt Service routine
1275 *

--- 43 unchanged lines hidden (view full) ---

1319 * MSIX TX Interrupt Service routine
1320 *
1321 **********************************************************************/
1322static void
1323igb_msix_tx(void *arg)
1324{
1325 struct tx_ring *txr = arg;
1326 struct adapter *adapter = txr->adapter;
1419 u32 loop = IGB_MAX_LOOP;
1420 bool more;
1421
1327 bool more;
1328
1422 ++txr->tx_irq;
1423 IGB_TX_LOCK(txr);
1329 E1000_WRITE_REG(&adapter->hw, E1000_EIMC, txr->eims);
1424
1330
1425 do {
1426 more = igb_txeof(txr);
1427 } while (loop-- && more);
1428
1331 IGB_TX_LOCK(txr);
1332 ++txr->tx_irq;
1333 more = igb_txeof(txr);
1429 IGB_TX_UNLOCK(txr);
1430
1334 IGB_TX_UNLOCK(txr);
1335
1431 /* Schedule a clean task */
1432 taskqueue_enqueue(adapter->tq, &txr->tx_task);
1433
1434 /* Reenable this interrupt */
1435 E1000_WRITE_REG(&adapter->hw, E1000_EIMS, txr->eims);
1336 /* Schedule a clean task if needed*/
1337 if (more)
1338 taskqueue_enqueue(txr->tq, &txr->tx_task);
1339 else
1340 /* Reenable this interrupt */
1341 E1000_WRITE_REG(&adapter->hw, E1000_EIMS, txr->eims);
1436 return;
1437}
1438
1439/*********************************************************************
1440 *
1441 * MSIX RX Interrupt Service routine
1442 *
1443 **********************************************************************/
1444
1445static void
1446igb_msix_rx(void *arg)
1447{
1448 struct rx_ring *rxr = arg;
1449 struct adapter *adapter = rxr->adapter;
1342 return;
1343}
1344
1345/*********************************************************************
1346 *
1347 * MSIX RX Interrupt Service routine
1348 *
1349 **********************************************************************/
1350
1351static void
1352igb_msix_rx(void *arg)
1353{
1354 struct rx_ring *rxr = arg;
1355 struct adapter *adapter = rxr->adapter;
1450 u32 loop = IGB_MAX_LOOP;
1451 bool more;
1452
1356 bool more;
1357
1358 E1000_WRITE_REG(&adapter->hw, E1000_EIMC, rxr->eims);
1359
1453 ++rxr->rx_irq;
1360 ++rxr->rx_irq;
1454 do {
1455 more = igb_rxeof(rxr, adapter->rx_process_limit);
1456 } while (loop-- && more);
1361 more = igb_rxeof(rxr, adapter->rx_process_limit);
1457
1458 /* Update interrupt rate */
1459 if (igb_enable_aim == TRUE)
1460 igb_update_aim(rxr);
1461
1462 /* Schedule another clean */
1362
1363 /* Update interrupt rate */
1364 if (igb_enable_aim == TRUE)
1365 igb_update_aim(rxr);
1366
1367 /* Schedule another clean */
1463 taskqueue_enqueue(adapter->tq, &rxr->rx_task);
1464
1465 /* Reenable this interrupt */
1466 E1000_WRITE_REG(&adapter->hw, E1000_EIMS, rxr->eims);
1368 if (more)
1369 taskqueue_enqueue(rxr->tq, &rxr->rx_task);
1370 else
1371 /* Reenable this interrupt */
1372 E1000_WRITE_REG(&adapter->hw, E1000_EIMS, rxr->eims);
1467 return;
1468}
1469
1470
1471/*********************************************************************
1472 *
1473 * MSIX Link Interrupt Service routine
1474 *

--- 56 unchanged lines hidden (view full) ---

1531 } else if (olditr == igb_bulk_latency) {
1532 if (rxr->bytes < BULK_THRESHOLD)
1533 newitr = igb_ave_latency;
1534 }
1535
1536 if (olditr != newitr) {
1537 /* Change interrupt rate */
1538 rxr->eitr_setting = newitr;
1373 return;
1374}
1375
1376
1377/*********************************************************************
1378 *
1379 * MSIX Link Interrupt Service routine
1380 *

--- 56 unchanged lines hidden (view full) ---

1437 } else if (olditr == igb_bulk_latency) {
1438 if (rxr->bytes < BULK_THRESHOLD)
1439 newitr = igb_ave_latency;
1440 }
1441
1442 if (olditr != newitr) {
1443 /* Change interrupt rate */
1444 rxr->eitr_setting = newitr;
1539 if (adapter->hw.mac.type == e1000_82575)
1540 newitr |= newitr << 16;
1541 else
1542 newitr |= 0x8000000;
1543 E1000_WRITE_REG(&adapter->hw, E1000_EITR(rxr->me), newitr);
1445 E1000_WRITE_REG(&adapter->hw, E1000_EITR(rxr->me),
1446 newitr | (newitr << 16));
1544 }
1545
1546 rxr->bytes = 0;
1547 return;
1548}
1549
1550
1551/*********************************************************************

--- 232 unchanged lines hidden (view full) ---

1784 /* This is changing soon to an mtag detection */
1785 if (we detect this mbuf has a TSTAMP mtag)
1786 cmd_type_len |= E1000_ADVTXD_MAC_TSTAMP;
1787#endif
1788 /* Calculate payload length */
1789 olinfo_status |= ((m_head->m_pkthdr.len - hdrlen)
1790 << E1000_ADVTXD_PAYLEN_SHIFT);
1791
1447 }
1448
1449 rxr->bytes = 0;
1450 return;
1451}
1452
1453
1454/*********************************************************************

--- 232 unchanged lines hidden (view full) ---

1687 /* This is changing soon to an mtag detection */
1688 if (we detect this mbuf has a TSTAMP mtag)
1689 cmd_type_len |= E1000_ADVTXD_MAC_TSTAMP;
1690#endif
1691 /* Calculate payload length */
1692 olinfo_status |= ((m_head->m_pkthdr.len - hdrlen)
1693 << E1000_ADVTXD_PAYLEN_SHIFT);
1694
1695 /* 82575 needs the queue index added */
1696 if (adapter->hw.mac.type == e1000_82575)
1697 olinfo_status |= txr->me << 4;
1698
1792 /* Set up our transmit descriptors */
1793 i = txr->next_avail_desc;
1794 for (j = 0; j < nsegs; j++) {
1795 bus_size_t seg_len;
1796 bus_addr_t seg_addr;
1797
1798 tx_buffer = &txr->tx_buffers[i];
1799 txd = (union e1000_adv_tx_desc *)&txr->tx_base[i];

--- 91 unchanged lines hidden (view full) ---

1891 struct ifmultiaddr *ifma;
1892 u32 reg_rctl = 0;
1893 u8 mta[MAX_NUM_MULTICAST_ADDRESSES * ETH_ADDR_LEN];
1894
1895 int mcnt = 0;
1896
1897 IOCTL_DEBUGOUT("igb_set_multi: begin");
1898
1699 /* Set up our transmit descriptors */
1700 i = txr->next_avail_desc;
1701 for (j = 0; j < nsegs; j++) {
1702 bus_size_t seg_len;
1703 bus_addr_t seg_addr;
1704
1705 tx_buffer = &txr->tx_buffers[i];
1706 txd = (union e1000_adv_tx_desc *)&txr->tx_base[i];

--- 91 unchanged lines hidden (view full) ---

1798 struct ifmultiaddr *ifma;
1799 u32 reg_rctl = 0;
1800 u8 mta[MAX_NUM_MULTICAST_ADDRESSES * ETH_ADDR_LEN];
1801
1802 int mcnt = 0;
1803
1804 IOCTL_DEBUGOUT("igb_set_multi: begin");
1805
1806#if __FreeBSD_version < 800000
1807 IF_ADDR_LOCK(ifp);
1808#else
1899 if_maddr_rlock(ifp);
1809 if_maddr_rlock(ifp);
1810#endif
1900 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1901 if (ifma->ifma_addr->sa_family != AF_LINK)
1902 continue;
1903
1904 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
1905 break;
1906
1907 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
1908 &mta[mcnt * ETH_ADDR_LEN], ETH_ADDR_LEN);
1909 mcnt++;
1910 }
1811 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1812 if (ifma->ifma_addr->sa_family != AF_LINK)
1813 continue;
1814
1815 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
1816 break;
1817
1818 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
1819 &mta[mcnt * ETH_ADDR_LEN], ETH_ADDR_LEN);
1820 mcnt++;
1821 }
1822#if __FreeBSD_version < 800000
1823 IF_ADDR_UNLOCK(ifp);
1824#else
1911 if_maddr_runlock(ifp);
1825 if_maddr_runlock(ifp);
1912
1826#endif
1913 if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES) {
1914 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
1915 reg_rctl |= E1000_RCTL_MPE;
1916 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
1917 } else
1918 e1000_update_mc_addr_list(&adapter->hw, mta, mcnt);
1919}
1920
1921
1922/*********************************************************************
1827 if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES) {
1828 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
1829 reg_rctl |= E1000_RCTL_MPE;
1830 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
1831 } else
1832 e1000_update_mc_addr_list(&adapter->hw, mta, mcnt);
1833}
1834
1835
1836/*********************************************************************
1923 * Timer routine
1837 * Timer routine:
1838 * This routine checks for link status,
1839 * updates statistics, and does the watchdog.
1924 *
1840 *
1925 * This routine checks for link status and updates statistics.
1926 *
1927 **********************************************************************/
1928
1929static void
1930igb_local_timer(void *arg)
1931{
1841 **********************************************************************/
1842
1843static void
1844igb_local_timer(void *arg)
1845{
1932 struct adapter *adapter = arg;
1933 struct ifnet *ifp = adapter->ifp;
1846 struct adapter *adapter = arg;
1847 struct ifnet *ifp = adapter->ifp;
1848 device_t dev = adapter->dev;
1849 struct tx_ring *txr = adapter->tx_rings;
1934
1850
1851
1935 IGB_CORE_LOCK_ASSERT(adapter);
1936
1937 igb_update_link_status(adapter);
1938 igb_update_stats_counters(adapter);
1939
1940 if (igb_display_debug_stats && ifp->if_drv_flags & IFF_DRV_RUNNING)
1941 igb_print_hw_stats(adapter);
1942
1852 IGB_CORE_LOCK_ASSERT(adapter);
1853
1854 igb_update_link_status(adapter);
1855 igb_update_stats_counters(adapter);
1856
1857 if (igb_display_debug_stats && ifp->if_drv_flags & IFF_DRV_RUNNING)
1858 igb_print_hw_stats(adapter);
1859
1943 /*
1944 * Each second we check the watchdog to
1945 * protect against hardware hangs.
1946 */
1947 igb_watchdog(adapter);
1860 /*
1861 ** Watchdog: check for time since any descriptor was cleaned
1862 */
1863 for (int i = 0; i < adapter->num_queues; i++, txr++) {
1864 if (txr->watchdog_check == FALSE)
1865 continue;
1866 if ((ticks - txr->watchdog_time) > IGB_WATCHDOG)
1867 goto timeout;
1868 }
1948
1949 /* Trigger an RX interrupt on all queues */
1950 E1000_WRITE_REG(&adapter->hw, E1000_EICS, adapter->rx_mask);
1869
1870 /* Trigger an RX interrupt on all queues */
1871 E1000_WRITE_REG(&adapter->hw, E1000_EICS, adapter->rx_mask);
1951
1952 callout_reset(&adapter->timer, hz, igb_local_timer, adapter);
1872 callout_reset(&adapter->timer, hz, igb_local_timer, adapter);
1873 return;
1953
1874
1875timeout:
1876 device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
1877 device_printf(dev,"Queue(%d) tdh = %d, hw tdt = %d\n", txr->me,
1878 E1000_READ_REG(&adapter->hw, E1000_TDH(txr->me)),
1879 E1000_READ_REG(&adapter->hw, E1000_TDT(txr->me)));
1880 device_printf(dev,"TX(%d) desc avail = %d,"
1881 "Next TX to Clean = %d\n",
1882 txr->me, txr->tx_avail, txr->next_to_clean);
1883 adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1884 adapter->watchdog_events++;
1885 igb_init_locked(adapter);
1954}
1955
1956static void
1957igb_update_link_status(struct adapter *adapter)
1958{
1959 struct e1000_hw *hw = &adapter->hw;
1960 struct ifnet *ifp = adapter->ifp;
1961 device_t dev = adapter->dev;

--- 40 unchanged lines hidden (view full) ---

2002 ifp->if_baudrate = adapter->link_speed = 0;
2003 adapter->link_duplex = 0;
2004 if (bootverbose)
2005 device_printf(dev, "Link is Down\n");
2006 adapter->link_active = 0;
2007 if_link_state_change(ifp, LINK_STATE_DOWN);
2008 /* Turn off watchdogs */
2009 for (int i = 0; i < adapter->num_queues; i++, txr++)
1886}
1887
1888static void
1889igb_update_link_status(struct adapter *adapter)
1890{
1891 struct e1000_hw *hw = &adapter->hw;
1892 struct ifnet *ifp = adapter->ifp;
1893 device_t dev = adapter->dev;

--- 40 unchanged lines hidden (view full) ---

1934 ifp->if_baudrate = adapter->link_speed = 0;
1935 adapter->link_duplex = 0;
1936 if (bootverbose)
1937 device_printf(dev, "Link is Down\n");
1938 adapter->link_active = 0;
1939 if_link_state_change(ifp, LINK_STATE_DOWN);
1940 /* Turn off watchdogs */
1941 for (int i = 0; i < adapter->num_queues; i++, txr++)
2010 txr->watchdog_timer = FALSE;
1942 txr->watchdog_check = FALSE;
2011 }
2012}
2013
2014/*********************************************************************
2015 *
2016 * This routine disables all traffic on the adapter by issuing a
2017 * global reset on the MAC and deallocates TX/RX buffers.
2018 *

--- 168 unchanged lines hidden (view full) ---

2187 error = bus_setup_intr(dev, txr->res,
2188 INTR_TYPE_NET | INTR_MPSAFE, NULL,
2189 igb_msix_tx, txr, &txr->tag);
2190 if (error) {
2191 txr->res = NULL;
2192 device_printf(dev, "Failed to register TX handler");
2193 return (error);
2194 }
1943 }
1944}
1945
1946/*********************************************************************
1947 *
1948 * This routine disables all traffic on the adapter by issuing a
1949 * global reset on the MAC and deallocates TX/RX buffers.
1950 *

--- 168 unchanged lines hidden (view full) ---

2119 error = bus_setup_intr(dev, txr->res,
2120 INTR_TYPE_NET | INTR_MPSAFE, NULL,
2121 igb_msix_tx, txr, &txr->tag);
2122 if (error) {
2123 txr->res = NULL;
2124 device_printf(dev, "Failed to register TX handler");
2125 return (error);
2126 }
2195 /* Make tasklet for deferred handling - one per queue */
2196 TASK_INIT(&txr->tx_task, 0, igb_handle_tx, txr);
2197 txr->msix = vector;
2198 if (adapter->hw.mac.type == e1000_82575)
2199 txr->eims = E1000_EICR_TX_QUEUE0 << i;
2200 else
2201 txr->eims = 1 << vector;
2202 /*
2203 ** Bind the msix vector, and thus the
2204 ** ring to the corresponding cpu.
2205 */
2206 if (adapter->num_queues > 1)
2207 bus_bind_intr(dev, txr->res, i);
2127 txr->msix = vector;
2128 if (adapter->hw.mac.type == e1000_82575)
2129 txr->eims = E1000_EICR_TX_QUEUE0 << i;
2130 else
2131 txr->eims = 1 << vector;
2132 /*
2133 ** Bind the msix vector, and thus the
2134 ** ring to the corresponding cpu.
2135 */
2136 if (adapter->num_queues > 1)
2137 bus_bind_intr(dev, txr->res, i);
2138 /* Make tasklet for deferred handling - one per queue */
2139 TASK_INIT(&txr->tx_task, 0, igb_handle_tx, txr);
2140 txr->tq = taskqueue_create_fast("igb_txq", M_NOWAIT,
2141 taskqueue_thread_enqueue, &txr->tq);
2142 taskqueue_start_threads(&txr->tq, 1, PI_NET, "%s txq",
2143 device_get_nameunit(adapter->dev));
2208 }
2209
2210 /* RX Setup */
2211 for (int i = 0; i < adapter->num_queues; i++, vector++, rxr++) {
2212 rid = vector +1;
2213 rxr->res = bus_alloc_resource_any(dev,
2214 SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
2215 if (rxr->res == NULL) {

--- 5 unchanged lines hidden (view full) ---

2221 error = bus_setup_intr(dev, rxr->res,
2222 INTR_TYPE_NET | INTR_MPSAFE, NULL,
2223 igb_msix_rx, rxr, &rxr->tag);
2224 if (error) {
2225 rxr->res = NULL;
2226 device_printf(dev, "Failed to register RX handler");
2227 return (error);
2228 }
2144 }
2145
2146 /* RX Setup */
2147 for (int i = 0; i < adapter->num_queues; i++, vector++, rxr++) {
2148 rid = vector +1;
2149 rxr->res = bus_alloc_resource_any(dev,
2150 SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
2151 if (rxr->res == NULL) {

--- 5 unchanged lines hidden (view full) ---

2157 error = bus_setup_intr(dev, rxr->res,
2158 INTR_TYPE_NET | INTR_MPSAFE, NULL,
2159 igb_msix_rx, rxr, &rxr->tag);
2160 if (error) {
2161 rxr->res = NULL;
2162 device_printf(dev, "Failed to register RX handler");
2163 return (error);
2164 }
2229 /* Make tasklet for deferred handling - one per queue */
2230 TASK_INIT(&rxr->rx_task, 0, igb_handle_rx, rxr);
2231 rxr->msix = vector;
2232 if (adapter->hw.mac.type == e1000_82575)
2233 rxr->eims = E1000_EICR_RX_QUEUE0 << i;
2234 else
2235 rxr->eims = 1 << vector;
2236 /* Get a mask for local timer */
2237 adapter->rx_mask |= rxr->eims;
2238 /*
2239 ** Bind the msix vector, and thus the
2240 ** ring to the corresponding cpu.
2241 ** Notice that this makes an RX/TX pair
2242 ** bound to each CPU, limited by the MSIX
2243 ** vectors.
2244 */
2245 if (adapter->num_queues > 1)
2246 bus_bind_intr(dev, rxr->res, i);
2165 rxr->msix = vector;
2166 if (adapter->hw.mac.type == e1000_82575)
2167 rxr->eims = E1000_EICR_RX_QUEUE0 << i;
2168 else
2169 rxr->eims = 1 << vector;
2170 /* Get a mask for local timer */
2171 adapter->rx_mask |= rxr->eims;
2172 /*
2173 ** Bind the msix vector, and thus the
2174 ** ring to the corresponding cpu.
2175 ** Notice that this makes an RX/TX pair
2176 ** bound to each CPU, limited by the MSIX
2177 ** vectors.
2178 */
2179 if (adapter->num_queues > 1)
2180 bus_bind_intr(dev, rxr->res, i);
2181
2182 /* Make tasklet for deferred handling - one per queue */
2183 TASK_INIT(&rxr->rx_task, 0, igb_handle_rx, rxr);
2184 rxr->tq = taskqueue_create_fast("igb_rxq", M_NOWAIT,
2185 taskqueue_thread_enqueue, &rxr->tq);
2186 taskqueue_start_threads(&rxr->tq, 1, PI_NET, "%s rxq",
2187 device_get_nameunit(adapter->dev));
2247 }
2248
2249 /* And Link */
2250 rid = vector +1;
2251 adapter->res = bus_alloc_resource_any(dev,
2252 SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
2253 if (adapter->res == NULL) {
2254 device_printf(dev,
2255 "Unable to allocate bus resource: "
2256 "MSIX Link Interrupt\n");
2257 return (ENXIO);
2258 }
2259 if ((error = bus_setup_intr(dev, adapter->res,
2260 INTR_TYPE_NET | INTR_MPSAFE, NULL,
2261 igb_msix_link, adapter, &adapter->tag)) != 0) {
2262 device_printf(dev, "Failed to register Link handler");
2263 return (error);
2264 }
2265 adapter->linkvec = vector;
2188 }
2189
2190 /* And Link */
2191 rid = vector +1;
2192 adapter->res = bus_alloc_resource_any(dev,
2193 SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
2194 if (adapter->res == NULL) {
2195 device_printf(dev,
2196 "Unable to allocate bus resource: "
2197 "MSIX Link Interrupt\n");
2198 return (ENXIO);
2199 }
2200 if ((error = bus_setup_intr(dev, adapter->res,
2201 INTR_TYPE_NET | INTR_MPSAFE, NULL,
2202 igb_msix_link, adapter, &adapter->tag)) != 0) {
2203 device_printf(dev, "Failed to register Link handler");
2204 return (error);
2205 }
2206 adapter->linkvec = vector;
2266 adapter->tq = taskqueue_create_fast("igb_taskq", M_NOWAIT,
2267 taskqueue_thread_enqueue, &adapter->tq);
2268 taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s taskq",
2269 device_get_nameunit(adapter->dev));
2270
2271 return (0);
2272}
2273
2274
2275static void
2276igb_configure_queues(struct adapter *adapter)
2277{
2278 struct e1000_hw *hw = &adapter->hw;
2279 struct tx_ring *txr;
2280 struct rx_ring *rxr;
2207
2208 return (0);
2209}
2210
2211
2212static void
2213igb_configure_queues(struct adapter *adapter)
2214{
2215 struct e1000_hw *hw = &adapter->hw;
2216 struct tx_ring *txr;
2217 struct rx_ring *rxr;
2218 u32 tmp, ivar = 0;
2281
2219
2282 /* Turn on MSIX */
2283 /*
2284 ** 82576 uses IVARs to route MSI/X
2285 ** interrupts, its not very intuitive,
2286 ** study the code carefully :)
2287 */
2288 if (adapter->hw.mac.type == e1000_82576) {
2289 u32 ivar = 0;
2290 /* First turn on the capability */
2220 /* First turn on RSS capability */
2221 if (adapter->hw.mac.type > e1000_82575)
2291 E1000_WRITE_REG(hw, E1000_GPIE,
2222 E1000_WRITE_REG(hw, E1000_GPIE,
2292 E1000_GPIE_MSIX_MODE |
2293 E1000_GPIE_EIAME |
2223 E1000_GPIE_MSIX_MODE | E1000_GPIE_EIAME |
2294 E1000_GPIE_PBA | E1000_GPIE_NSICR);
2224 E1000_GPIE_PBA | E1000_GPIE_NSICR);
2225
2226 /* Turn on MSIX */
2227 switch (adapter->hw.mac.type) {
2228 case e1000_82580:
2295 /* RX */
2296 for (int i = 0; i < adapter->num_queues; i++) {
2229 /* RX */
2230 for (int i = 0; i < adapter->num_queues; i++) {
2231 u32 index = i >> 1;
2232 ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index);
2233 rxr = &adapter->rx_rings[i];
2234 if (i & 1) {
2235 ivar &= 0xFF00FFFF;
2236 ivar |= (rxr->msix | E1000_IVAR_VALID) << 16;
2237 } else {
2238 ivar &= 0xFFFFFF00;
2239 ivar |= rxr->msix | E1000_IVAR_VALID;
2240 }
2241 E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar);
2242 adapter->eims_mask |= rxr->eims;
2243 }
2244 /* TX */
2245 for (int i = 0; i < adapter->num_queues; i++) {
2246 u32 index = i >> 1;
2247 ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index);
2248 txr = &adapter->tx_rings[i];
2249 if (i & 1) {
2250 ivar &= 0x00FFFFFF;
2251 ivar |= (txr->msix | E1000_IVAR_VALID) << 24;
2252 } else {
2253 ivar &= 0xFFFF00FF;
2254 ivar |= (txr->msix | E1000_IVAR_VALID) << 8;
2255 }
2256 E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar);
2257 adapter->eims_mask |= txr->eims;
2258 }
2259
2260 /* And for the link interrupt */
2261 ivar = (adapter->linkvec | E1000_IVAR_VALID) << 8;
2262 adapter->link_mask = 1 << adapter->linkvec;
2263 adapter->eims_mask |= adapter->link_mask;
2264 E1000_WRITE_REG(hw, E1000_IVAR_MISC, ivar);
2265 break;
2266 case e1000_82576:
2267 /* RX */
2268 for (int i = 0; i < adapter->num_queues; i++) {
2297 u32 index = i & 0x7; /* Each IVAR has two entries */
2298 ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index);
2299 rxr = &adapter->rx_rings[i];
2300 if (i < 8) {
2301 ivar &= 0xFFFFFF00;
2302 ivar |= rxr->msix | E1000_IVAR_VALID;
2303 } else {
2304 ivar &= 0xFF00FFFF;

--- 18 unchanged lines hidden (view full) ---

2323 adapter->eims_mask |= txr->eims;
2324 }
2325
2326 /* And for the link interrupt */
2327 ivar = (adapter->linkvec | E1000_IVAR_VALID) << 8;
2328 adapter->link_mask = 1 << adapter->linkvec;
2329 adapter->eims_mask |= adapter->link_mask;
2330 E1000_WRITE_REG(hw, E1000_IVAR_MISC, ivar);
2269 u32 index = i & 0x7; /* Each IVAR has two entries */
2270 ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index);
2271 rxr = &adapter->rx_rings[i];
2272 if (i < 8) {
2273 ivar &= 0xFFFFFF00;
2274 ivar |= rxr->msix | E1000_IVAR_VALID;
2275 } else {
2276 ivar &= 0xFF00FFFF;

--- 18 unchanged lines hidden (view full) ---

2295 adapter->eims_mask |= txr->eims;
2296 }
2297
2298 /* And for the link interrupt */
2299 ivar = (adapter->linkvec | E1000_IVAR_VALID) << 8;
2300 adapter->link_mask = 1 << adapter->linkvec;
2301 adapter->eims_mask |= adapter->link_mask;
2302 E1000_WRITE_REG(hw, E1000_IVAR_MISC, ivar);
2331 } else
2332 { /* 82575 */
2333 int tmp;
2303 break;
2334
2304
2335 /* enable MSI-X PBA support*/
2305 case e1000_82575:
2306 /* enable MSI-X support*/
2336 tmp = E1000_READ_REG(hw, E1000_CTRL_EXT);
2337 tmp |= E1000_CTRL_EXT_PBA_CLR;
2338 /* Auto-Mask interrupts upon ICR read. */
2339 tmp |= E1000_CTRL_EXT_EIAME;
2340 tmp |= E1000_CTRL_EXT_IRCA;
2341 E1000_WRITE_REG(hw, E1000_CTRL_EXT, tmp);
2342
2343 /* TX */

--- 12 unchanged lines hidden (view full) ---

2356 adapter->eims_mask |= rxr->eims;
2357 }
2358
2359 /* Link */
2360 E1000_WRITE_REG(hw, E1000_MSIXBM(adapter->linkvec),
2361 E1000_EIMS_OTHER);
2362 adapter->link_mask |= E1000_EIMS_OTHER;
2363 adapter->eims_mask |= adapter->link_mask;
2307 tmp = E1000_READ_REG(hw, E1000_CTRL_EXT);
2308 tmp |= E1000_CTRL_EXT_PBA_CLR;
2309 /* Auto-Mask interrupts upon ICR read. */
2310 tmp |= E1000_CTRL_EXT_EIAME;
2311 tmp |= E1000_CTRL_EXT_IRCA;
2312 E1000_WRITE_REG(hw, E1000_CTRL_EXT, tmp);
2313
2314 /* TX */

--- 12 unchanged lines hidden (view full) ---

2327 adapter->eims_mask |= rxr->eims;
2328 }
2329
2330 /* Link */
2331 E1000_WRITE_REG(hw, E1000_MSIXBM(adapter->linkvec),
2332 E1000_EIMS_OTHER);
2333 adapter->link_mask |= E1000_EIMS_OTHER;
2334 adapter->eims_mask |= adapter->link_mask;
2335 default:
2336 break;
2364 }
2337 }
2338
2365 return;
2366}
2367
2368
2369static void
2370igb_free_pci_resources(struct adapter *adapter)
2371{
2372 struct tx_ring *txr = adapter->tx_rings;

--- 66 unchanged lines hidden (view full) ---

2439 * Setup Either MSI/X or MSI
2440 */
2441static int
2442igb_setup_msix(struct adapter *adapter)
2443{
2444 device_t dev = adapter->dev;
2445 int rid, want, queues, msgs;
2446
2339 return;
2340}
2341
2342
2343static void
2344igb_free_pci_resources(struct adapter *adapter)
2345{
2346 struct tx_ring *txr = adapter->tx_rings;

--- 66 unchanged lines hidden (view full) ---

2413 * Setup Either MSI/X or MSI
2414 */
2415static int
2416igb_setup_msix(struct adapter *adapter)
2417{
2418 device_t dev = adapter->dev;
2419 int rid, want, queues, msgs;
2420
2421 /* tuneable override */
2422 if (igb_enable_msix == 0)
2423 goto msi;
2424
2447 /* First try MSI/X */
2448 rid = PCIR_BAR(IGB_MSIX_BAR);
2449 adapter->msix_mem = bus_alloc_resource_any(dev,
2450 SYS_RES_MEMORY, &rid, RF_ACTIVE);
2451 if (!adapter->msix_mem) {
2452 /* May not be enabled */
2453 device_printf(adapter->dev,
2454 "Unable to map MSIX table \n");

--- 37 unchanged lines hidden (view full) ---

2492 msgs = pci_msi_count(dev);
2493 if (msgs == 1 && pci_alloc_msi(dev, &msgs) == 0)
2494 device_printf(adapter->dev,"Using MSI interrupt\n");
2495 return (msgs);
2496}
2497
2498/*********************************************************************
2499 *
2425 /* First try MSI/X */
2426 rid = PCIR_BAR(IGB_MSIX_BAR);
2427 adapter->msix_mem = bus_alloc_resource_any(dev,
2428 SYS_RES_MEMORY, &rid, RF_ACTIVE);
2429 if (!adapter->msix_mem) {
2430 /* May not be enabled */
2431 device_printf(adapter->dev,
2432 "Unable to map MSIX table \n");

--- 37 unchanged lines hidden (view full) ---

2470 msgs = pci_msi_count(dev);
2471 if (msgs == 1 && pci_alloc_msi(dev, &msgs) == 0)
2472 device_printf(adapter->dev,"Using MSI interrupt\n");
2473 return (msgs);
2474}
2475
2476/*********************************************************************
2477 *
2500 * Initialize the hardware to a configuration
2501 * as specified by the adapter structure.
2478 * Set up an fresh starting state
2502 *
2503 **********************************************************************/
2479 *
2480 **********************************************************************/
2504static int
2505igb_hardware_init(struct adapter *adapter)
2481static void
2482igb_reset(struct adapter *adapter)
2506{
2507 device_t dev = adapter->dev;
2483{
2484 device_t dev = adapter->dev;
2508 u32 rx_buffer_size;
2485 struct e1000_hw *hw = &adapter->hw;
2486 struct e1000_fc_info *fc = &hw->fc;
2487 struct ifnet *ifp = adapter->ifp;
2488 u32 pba = 0;
2489 u16 hwm;
2509
2490
2510 INIT_DEBUGOUT("igb_hardware_init: begin");
2491 INIT_DEBUGOUT("igb_reset: begin");
2511
2492
2512 /* Issue a global reset */
2513 e1000_reset_hw(&adapter->hw);
2514
2515 /* Let the firmware know the OS is in control */
2516 igb_get_hw_control(adapter);
2517
2518 /*
2493 /* Let the firmware know the OS is in control */
2494 igb_get_hw_control(adapter);
2495
2496 /*
2497 * Packet Buffer Allocation (PBA)
2498 * Writing PBA sets the receive portion of the buffer
2499 * the remainder is used for the transmit buffer.
2500 */
2501 switch (hw->mac.type) {
2502 case e1000_82575:
2503 pba = E1000_PBA_32K;
2504 break;
2505 case e1000_82576:
2506 pba = E1000_PBA_64K;
2507 break;
2508 case e1000_82580:
2509 pba = E1000_PBA_35K;
2510 default:
2511 break;
2512 }
2513
2514 /* Special needs in case of Jumbo frames */
2515 if ((hw->mac.type == e1000_82575) && (ifp->if_mtu > ETHERMTU)) {
2516 u32 tx_space, min_tx, min_rx;
2517 pba = E1000_READ_REG(hw, E1000_PBA);
2518 tx_space = pba >> 16;
2519 pba &= 0xffff;
2520 min_tx = (adapter->max_frame_size +
2521 sizeof(struct e1000_tx_desc) - ETHERNET_FCS_SIZE) * 2;
2522 min_tx = roundup2(min_tx, 1024);
2523 min_tx >>= 10;
2524 min_rx = adapter->max_frame_size;
2525 min_rx = roundup2(min_rx, 1024);
2526 min_rx >>= 10;
2527 if (tx_space < min_tx &&
2528 ((min_tx - tx_space) < pba)) {
2529 pba = pba - (min_tx - tx_space);
2530 /*
2531 * if short on rx space, rx wins
2532 * and must trump tx adjustment
2533 */
2534 if (pba < min_rx)
2535 pba = min_rx;
2536 }
2537 E1000_WRITE_REG(hw, E1000_PBA, pba);
2538 }
2539
2540 INIT_DEBUGOUT1("igb_init: pba=%dK",pba);
2541
2542 /*
2519 * These parameters control the automatic generation (Tx) and
2520 * response (Rx) to Ethernet PAUSE frames.
2521 * - High water mark should allow for at least two frames to be
2522 * received after sending an XOFF.
2523 * - Low water mark works best when it is very near the high water mark.
2524 * This allows the receiver to restart by sending XON when it has
2543 * These parameters control the automatic generation (Tx) and
2544 * response (Rx) to Ethernet PAUSE frames.
2545 * - High water mark should allow for at least two frames to be
2546 * received after sending an XOFF.
2547 * - Low water mark works best when it is very near the high water mark.
2548 * This allows the receiver to restart by sending XON when it has
2525 * drained a bit. Here we use an arbitary value of 1500 which will
2526 * restart after one full frame is pulled from the buffer. There
2527 * could be several smaller frames in the buffer and if so they will
2528 * not trigger the XON until their total number reduces the buffer
2529 * by 1500.
2530 * - The pause time is fairly large at 1000 x 512ns = 512 usec.
2549 * drained a bit.
2531 */
2550 */
2532 if (adapter->hw.mac.type == e1000_82576)
2533 rx_buffer_size = ((E1000_READ_REG(&adapter->hw,
2534 E1000_RXPBS) & 0xffff) << 10 );
2535 else
2536 rx_buffer_size = ((E1000_READ_REG(&adapter->hw,
2537 E1000_PBA) & 0xffff) << 10 );
2551 hwm = min(((pba << 10) * 9 / 10),
2552 ((pba << 10) - 2 * adapter->max_frame_size));
2538
2553
2539 adapter->hw.fc.high_water = rx_buffer_size -
2540 roundup2(adapter->max_frame_size, 1024);
2541 adapter->hw.fc.low_water = adapter->hw.fc.high_water - 1500;
2554 if (hw->mac.type < e1000_82576) {
2555 fc->high_water = hwm & 0xFFF8; /* 8-byte granularity */
2556 fc->low_water = fc->high_water - 8;
2557 } else {
2558 fc->high_water = hwm & 0xFFF0; /* 16-byte granularity */
2559 fc->low_water = fc->high_water - 16;
2560 }
2542
2561
2543 adapter->hw.fc.pause_time = IGB_FC_PAUSE_TIME;
2544 adapter->hw.fc.send_xon = TRUE;
2562 fc->pause_time = IGB_FC_PAUSE_TIME;
2563 fc->send_xon = TRUE;
2545
2546 /* Set Flow control, use the tunable location if sane */
2547 if ((igb_fc_setting >= 0) || (igb_fc_setting < 4))
2564
2565 /* Set Flow control, use the tunable location if sane */
2566 if ((igb_fc_setting >= 0) || (igb_fc_setting < 4))
2548 adapter->hw.fc.requested_mode = igb_fc_setting;
2567 fc->requested_mode = igb_fc_setting;
2549 else
2568 else
2550 adapter->hw.fc.requested_mode = e1000_fc_none;
2569 fc->requested_mode = e1000_fc_none;
2551
2570
2552 if (e1000_init_hw(&adapter->hw) < 0) {
2571 fc->current_mode = fc->requested_mode;
2572
2573 /* Issue a global reset */
2574 e1000_reset_hw(hw);
2575 E1000_WRITE_REG(hw, E1000_WUC, 0);
2576
2577 if (e1000_init_hw(hw) < 0)
2553 device_printf(dev, "Hardware Initialization Failed\n");
2578 device_printf(dev, "Hardware Initialization Failed\n");
2554 return (EIO);
2555 }
2556
2579
2557 e1000_check_for_link(&adapter->hw);
2580 if (hw->mac.type == e1000_82580) {
2581 u32 reg;
2558
2582
2559 return (0);
2583 hwm = (pba << 10) - (2 * adapter->max_frame_size);
2584 /*
2585 * 0x80000000 - enable DMA COAL
2586 * 0x10000000 - use L0s as low power
2587 * 0x20000000 - use L1 as low power
2588 * X << 16 - exit dma coal when rx data exceeds X kB
2589 * Y - upper limit to stay in dma coal in units of 32usecs
2590 */
2591 E1000_WRITE_REG(hw, E1000_DMACR,
2592 0xA0000006 | ((hwm << 6) & 0x00FF0000));
2593
2594 /* set hwm to PBA - 2 * max frame size */
2595 E1000_WRITE_REG(hw, E1000_FCRTC, hwm);
2596 /*
2597 * This sets the time to wait before requesting transition to
2598 * low power state to number of usecs needed to receive 1 512
2599 * byte frame at gigabit line rate
2600 */
2601 E1000_WRITE_REG(hw, E1000_DMCTLX, 4);
2602
2603 /* free space in tx packet buffer to wake from DMA coal */
2604 E1000_WRITE_REG(hw, E1000_DMCTXTH,
2605 (20480 - (2 * adapter->max_frame_size)) >> 6);
2606
2607 /* make low power state decision controlled by DMA coal */
2608 reg = E1000_READ_REG(hw, E1000_PCIEMISC);
2609 E1000_WRITE_REG(hw, E1000_PCIEMISC,
2610 reg | E1000_PCIEMISC_LX_DECISION);
2611 }
2612
2613 E1000_WRITE_REG(&adapter->hw, E1000_VET, ETHERTYPE_VLAN);
2614 e1000_get_phy_info(hw);
2615 e1000_check_for_link(hw);
2616 return;
2560}
2561
2562/*********************************************************************
2563 *
2564 * Setup networking device structure and register an interface.
2565 *
2566 **********************************************************************/
2567static void

--- 12 unchanged lines hidden (view full) ---

2580 ifp->if_softc = adapter;
2581 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2582 ifp->if_ioctl = igb_ioctl;
2583 ifp->if_start = igb_start;
2584#if __FreeBSD_version >= 800000
2585 ifp->if_transmit = igb_mq_start;
2586 ifp->if_qflush = igb_qflush;
2587#endif
2617}
2618
2619/*********************************************************************
2620 *
2621 * Setup networking device structure and register an interface.
2622 *
2623 **********************************************************************/
2624static void

--- 12 unchanged lines hidden (view full) ---

2637 ifp->if_softc = adapter;
2638 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2639 ifp->if_ioctl = igb_ioctl;
2640 ifp->if_start = igb_start;
2641#if __FreeBSD_version >= 800000
2642 ifp->if_transmit = igb_mq_start;
2643 ifp->if_qflush = igb_qflush;
2644#endif
2588 IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 1);
2589 ifp->if_snd.ifq_drv_maxlen = adapter->num_tx_desc - 1;
2645 IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2);
2646 ifp->if_snd.ifq_drv_maxlen = adapter->num_tx_desc - 2;
2590 IFQ_SET_READY(&ifp->if_snd);
2591
2592 ether_ifattach(ifp, adapter->hw.mac.addr);
2593
2594 ifp->if_capabilities = ifp->if_capenable = 0;
2595
2596 ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_VLAN_MTU;
2597 ifp->if_capabilities |= IFCAP_TSO4;
2598 ifp->if_capabilities |= IFCAP_JUMBO_MTU;
2647 IFQ_SET_READY(&ifp->if_snd);
2648
2649 ether_ifattach(ifp, adapter->hw.mac.addr);
2650
2651 ifp->if_capabilities = ifp->if_capenable = 0;
2652
2653 ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_VLAN_MTU;
2654 ifp->if_capabilities |= IFCAP_TSO4;
2655 ifp->if_capabilities |= IFCAP_JUMBO_MTU;
2656 if (igb_header_split)
2657 ifp->if_capabilities |= IFCAP_LRO;
2658
2599 ifp->if_capenable = ifp->if_capabilities;
2600
2601 /*
2602 * Tell the upper layer(s) we support long frames.
2603 */
2604 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
2605 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
2606 ifp->if_capenable |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;

--- 254 unchanged lines hidden (view full) ---

2861 struct adapter *adapter = txr->adapter;
2862 device_t dev = adapter->dev;
2863 struct igb_tx_buffer *txbuf;
2864 int error, i;
2865
2866 /*
2867 * Setup DMA descriptor areas.
2868 */
2659 ifp->if_capenable = ifp->if_capabilities;
2660
2661 /*
2662 * Tell the upper layer(s) we support long frames.
2663 */
2664 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
2665 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
2666 ifp->if_capenable |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;

--- 254 unchanged lines hidden (view full) ---

2921 struct adapter *adapter = txr->adapter;
2922 device_t dev = adapter->dev;
2923 struct igb_tx_buffer *txbuf;
2924 int error, i;
2925
2926 /*
2927 * Setup DMA descriptor areas.
2928 */
2869 if ((error = bus_dma_tag_create(NULL, /* parent */
2929 if ((error = bus_dma_tag_create(bus_get_dma_tag(adapter->dev),
2870 1, 0, /* alignment, bounds */
2871 BUS_SPACE_MAXADDR, /* lowaddr */
2872 BUS_SPACE_MAXADDR, /* highaddr */
2873 NULL, NULL, /* filter, filterarg */
2874 IGB_TSO_SIZE, /* maxsize */
2875 IGB_MAX_SCATTER, /* nsegments */
2930 1, 0, /* alignment, bounds */
2931 BUS_SPACE_MAXADDR, /* lowaddr */
2932 BUS_SPACE_MAXADDR, /* highaddr */
2933 NULL, NULL, /* filter, filterarg */
2934 IGB_TSO_SIZE, /* maxsize */
2935 IGB_MAX_SCATTER, /* nsegments */
2876 PAGE_SIZE, /* maxsegsize */
2936 IGB_TSO_SEG_SIZE, /* maxsegsize */
2877 0, /* flags */
2878 NULL, /* lockfunc */
2879 NULL, /* lockfuncarg */
2880 &txr->txtag))) {
2881 device_printf(dev,"Unable to allocate TX DMA tag\n");
2882 goto fail;
2883 }
2884

--- 87 unchanged lines hidden (view full) ---

2972static void
2973igb_initialize_transmit_units(struct adapter *adapter)
2974{
2975 struct tx_ring *txr = adapter->tx_rings;
2976 u32 tctl, txdctl;
2977
2978 INIT_DEBUGOUT("igb_initialize_transmit_units: begin");
2979
2937 0, /* flags */
2938 NULL, /* lockfunc */
2939 NULL, /* lockfuncarg */
2940 &txr->txtag))) {
2941 device_printf(dev,"Unable to allocate TX DMA tag\n");
2942 goto fail;
2943 }
2944

--- 87 unchanged lines hidden (view full) ---

3032static void
3033igb_initialize_transmit_units(struct adapter *adapter)
3034{
3035 struct tx_ring *txr = adapter->tx_rings;
3036 u32 tctl, txdctl;
3037
3038 INIT_DEBUGOUT("igb_initialize_transmit_units: begin");
3039
2980 /* Setup the Base and Length of the Tx Descriptor Rings */
3040 /* Setup Transmit Descriptor Base Settings */
3041 adapter->txd_cmd = E1000_TXD_CMD_IFCS;
3042
3043 /* Setup the Tx Descriptor Rings */
2981 for (int i = 0; i < adapter->num_queues; i++, txr++) {
2982 u64 bus_addr = txr->txdma.dma_paddr;
2983
2984 E1000_WRITE_REG(&adapter->hw, E1000_TDLEN(i),
2985 adapter->num_tx_desc * sizeof(struct e1000_tx_desc));
2986 E1000_WRITE_REG(&adapter->hw, E1000_TDBAH(i),
2987 (uint32_t)(bus_addr >> 32));
2988 E1000_WRITE_REG(&adapter->hw, E1000_TDBAL(i),
2989 (uint32_t)bus_addr);
2990
2991 /* Setup the HW Tx Head and Tail descriptor pointers */
2992 E1000_WRITE_REG(&adapter->hw, E1000_TDT(i), 0);
2993 E1000_WRITE_REG(&adapter->hw, E1000_TDH(i), 0);
2994
2995 HW_DEBUGOUT2("Base = %x, Length = %x\n",
2996 E1000_READ_REG(&adapter->hw, E1000_TDBAL(i)),
2997 E1000_READ_REG(&adapter->hw, E1000_TDLEN(i)));
2998
3044 for (int i = 0; i < adapter->num_queues; i++, txr++) {
3045 u64 bus_addr = txr->txdma.dma_paddr;
3046
3047 E1000_WRITE_REG(&adapter->hw, E1000_TDLEN(i),
3048 adapter->num_tx_desc * sizeof(struct e1000_tx_desc));
3049 E1000_WRITE_REG(&adapter->hw, E1000_TDBAH(i),
3050 (uint32_t)(bus_addr >> 32));
3051 E1000_WRITE_REG(&adapter->hw, E1000_TDBAL(i),
3052 (uint32_t)bus_addr);
3053
3054 /* Setup the HW Tx Head and Tail descriptor pointers */
3055 E1000_WRITE_REG(&adapter->hw, E1000_TDT(i), 0);
3056 E1000_WRITE_REG(&adapter->hw, E1000_TDH(i), 0);
3057
3058 HW_DEBUGOUT2("Base = %x, Length = %x\n",
3059 E1000_READ_REG(&adapter->hw, E1000_TDBAL(i)),
3060 E1000_READ_REG(&adapter->hw, E1000_TDLEN(i)));
3061
2999 /* Setup Transmit Descriptor Base Settings */
3000 adapter->txd_cmd = E1000_TXD_CMD_IFCS;
3062 txr->watchdog_check = FALSE;
3001
3002 txdctl = E1000_READ_REG(&adapter->hw, E1000_TXDCTL(i));
3003 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
3004 E1000_WRITE_REG(&adapter->hw, E1000_TXDCTL(i), txdctl);
3063
3064 txdctl = E1000_READ_REG(&adapter->hw, E1000_TXDCTL(i));
3065 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
3066 E1000_WRITE_REG(&adapter->hw, E1000_TXDCTL(i), txdctl);
3067
3068 /* Default interrupt rate */
3069 E1000_WRITE_REG(&adapter->hw, E1000_EITR(txr->msix),
3070 igb_ave_latency);
3005 }
3006
3007 /* Program the Transmit Control Register */
3008 tctl = E1000_READ_REG(&adapter->hw, E1000_TCTL);
3009 tctl &= ~E1000_TCTL_CT;
3010 tctl |= (E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN |
3011 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT));
3012

--- 76 unchanged lines hidden (view full) ---

3089 txr->txtag = NULL;
3090 }
3091 return;
3092}
3093
3094/**********************************************************************
3095 *
3096 * Setup work for hardware segmentation offload (TSO) on
3071 }
3072
3073 /* Program the Transmit Control Register */
3074 tctl = E1000_READ_REG(&adapter->hw, E1000_TCTL);
3075 tctl &= ~E1000_TCTL_CT;
3076 tctl |= (E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN |
3077 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT));
3078

--- 76 unchanged lines hidden (view full) ---

3155 txr->txtag = NULL;
3156 }
3157 return;
3158}
3159
3160/**********************************************************************
3161 *
3162 * Setup work for hardware segmentation offload (TSO) on
3097 * adapters using advanced tx descriptors (82575)
3163 * adapters using advanced tx descriptors
3098 *
3099 **********************************************************************/
3100static boolean_t
3101igb_tso_setup(struct tx_ring *txr, struct mbuf *mp, u32 *hdrlen)
3102{
3103 struct adapter *adapter = txr->adapter;
3104 struct e1000_adv_tx_context_desc *TXD;
3105 struct igb_tx_buffer *tx_buffer;

--- 54 unchanged lines hidden (view full) ---

3160 type_tucmd_mlhl |= E1000_ADVTXD_DCMD_DEXT | E1000_ADVTXD_DTYP_CTXT;
3161 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_TCP;
3162 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_IPV4;
3163 TXD->type_tucmd_mlhl |= htole32(type_tucmd_mlhl);
3164
3165 /* MSS L4LEN IDX */
3166 mss_l4len_idx |= (mp->m_pkthdr.tso_segsz << E1000_ADVTXD_MSS_SHIFT);
3167 mss_l4len_idx |= (tcp_hlen << E1000_ADVTXD_L4LEN_SHIFT);
3164 *
3165 **********************************************************************/
3166static boolean_t
3167igb_tso_setup(struct tx_ring *txr, struct mbuf *mp, u32 *hdrlen)
3168{
3169 struct adapter *adapter = txr->adapter;
3170 struct e1000_adv_tx_context_desc *TXD;
3171 struct igb_tx_buffer *tx_buffer;

--- 54 unchanged lines hidden (view full) ---

3226 type_tucmd_mlhl |= E1000_ADVTXD_DCMD_DEXT | E1000_ADVTXD_DTYP_CTXT;
3227 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_TCP;
3228 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_IPV4;
3229 TXD->type_tucmd_mlhl |= htole32(type_tucmd_mlhl);
3230
3231 /* MSS L4LEN IDX */
3232 mss_l4len_idx |= (mp->m_pkthdr.tso_segsz << E1000_ADVTXD_MSS_SHIFT);
3233 mss_l4len_idx |= (tcp_hlen << E1000_ADVTXD_L4LEN_SHIFT);
3234 /* 82575 needs the queue index added */
3235 if (adapter->hw.mac.type == e1000_82575)
3236 mss_l4len_idx |= txr->me << 4;
3168 TXD->mss_l4len_idx = htole32(mss_l4len_idx);
3169
3170 TXD->seqnum_seed = htole32(0);
3171 tx_buffer->m_head = NULL;
3172 tx_buffer->next_eop = -1;
3173
3174 if (++ctxd == adapter->num_tx_desc)
3175 ctxd = 0;

--- 11 unchanged lines hidden (view full) ---

3187 **********************************************************************/
3188
3189static bool
3190igb_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp)
3191{
3192 struct adapter *adapter = txr->adapter;
3193 struct e1000_adv_tx_context_desc *TXD;
3194 struct igb_tx_buffer *tx_buffer;
3237 TXD->mss_l4len_idx = htole32(mss_l4len_idx);
3238
3239 TXD->seqnum_seed = htole32(0);
3240 tx_buffer->m_head = NULL;
3241 tx_buffer->next_eop = -1;
3242
3243 if (++ctxd == adapter->num_tx_desc)
3244 ctxd = 0;

--- 11 unchanged lines hidden (view full) ---

3256 **********************************************************************/
3257
3258static bool
3259igb_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp)
3260{
3261 struct adapter *adapter = txr->adapter;
3262 struct e1000_adv_tx_context_desc *TXD;
3263 struct igb_tx_buffer *tx_buffer;
3195 uint32_t vlan_macip_lens = 0, type_tucmd_mlhl = 0;
3264 u32 vlan_macip_lens, type_tucmd_mlhl, mss_l4len_idx;
3196 struct ether_vlan_header *eh;
3197 struct ip *ip = NULL;
3198 struct ip6_hdr *ip6;
3199 int ehdrlen, ctxd, ip_hlen = 0;
3200 u16 etype, vtag = 0;
3201 u8 ipproto = 0;
3202 bool offload = TRUE;
3203
3204 if ((mp->m_pkthdr.csum_flags & CSUM_OFFLOAD) == 0)
3205 offload = FALSE;
3206
3265 struct ether_vlan_header *eh;
3266 struct ip *ip = NULL;
3267 struct ip6_hdr *ip6;
3268 int ehdrlen, ctxd, ip_hlen = 0;
3269 u16 etype, vtag = 0;
3270 u8 ipproto = 0;
3271 bool offload = TRUE;
3272
3273 if ((mp->m_pkthdr.csum_flags & CSUM_OFFLOAD) == 0)
3274 offload = FALSE;
3275
3276 vlan_macip_lens = type_tucmd_mlhl = mss_l4len_idx = 0;
3207 ctxd = txr->next_avail_desc;
3208 tx_buffer = &txr->tx_buffers[ctxd];
3209 TXD = (struct e1000_adv_tx_context_desc *) &txr->tx_base[ctxd];
3210
3211 /*
3212 ** In advanced descriptors the vlan tag must
3213 ** be placed into the context descriptor, thus
3214 ** we need to be here just for that setup.

--- 63 unchanged lines hidden (view full) ---

3278 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_SCTP;
3279 break;
3280#endif
3281 default:
3282 offload = FALSE;
3283 break;
3284 }
3285
3277 ctxd = txr->next_avail_desc;
3278 tx_buffer = &txr->tx_buffers[ctxd];
3279 TXD = (struct e1000_adv_tx_context_desc *) &txr->tx_base[ctxd];
3280
3281 /*
3282 ** In advanced descriptors the vlan tag must
3283 ** be placed into the context descriptor, thus
3284 ** we need to be here just for that setup.

--- 63 unchanged lines hidden (view full) ---

3348 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_SCTP;
3349 break;
3350#endif
3351 default:
3352 offload = FALSE;
3353 break;
3354 }
3355
3356 /* 82575 needs the queue index added */
3357 if (adapter->hw.mac.type == e1000_82575)
3358 mss_l4len_idx = txr->me << 4;
3359
3286 /* Now copy bits into descriptor */
3287 TXD->vlan_macip_lens |= htole32(vlan_macip_lens);
3288 TXD->type_tucmd_mlhl |= htole32(type_tucmd_mlhl);
3289 TXD->seqnum_seed = htole32(0);
3360 /* Now copy bits into descriptor */
3361 TXD->vlan_macip_lens |= htole32(vlan_macip_lens);
3362 TXD->type_tucmd_mlhl |= htole32(type_tucmd_mlhl);
3363 TXD->seqnum_seed = htole32(0);
3290 TXD->mss_l4len_idx = htole32(0);
3364 TXD->mss_l4len_idx = htole32(mss_l4len_idx);
3291
3292 tx_buffer->m_head = NULL;
3293 tx_buffer->next_eop = -1;
3294
3295 /* We've consumed the first desc, adjust counters */
3296 if (++ctxd == adapter->num_tx_desc)
3297 ctxd = 0;
3298 txr->next_avail_desc = ctxd;

--- 10 unchanged lines hidden (view full) ---

3309 * tx_buffer is put back on the free queue.
3310 *
3311 * TRUE return means there's work in the ring to clean, FALSE its empty.
3312 **********************************************************************/
3313static bool
3314igb_txeof(struct tx_ring *txr)
3315{
3316 struct adapter *adapter = txr->adapter;
3365
3366 tx_buffer->m_head = NULL;
3367 tx_buffer->next_eop = -1;
3368
3369 /* We've consumed the first desc, adjust counters */
3370 if (++ctxd == adapter->num_tx_desc)
3371 ctxd = 0;
3372 txr->next_avail_desc = ctxd;

--- 10 unchanged lines hidden (view full) ---

3383 * tx_buffer is put back on the free queue.
3384 *
3385 * TRUE return means there's work in the ring to clean, FALSE its empty.
3386 **********************************************************************/
3387static bool
3388igb_txeof(struct tx_ring *txr)
3389{
3390 struct adapter *adapter = txr->adapter;
3317 int first, last, done, num_avail;
3318 u32 cleaned = 0;
3391 int first, last, done;
3319 struct igb_tx_buffer *tx_buffer;
3320 struct e1000_tx_desc *tx_desc, *eop_desc;
3321 struct ifnet *ifp = adapter->ifp;
3322
3323 IGB_TX_LOCK_ASSERT(txr);
3324
3325 if (txr->tx_avail == adapter->num_tx_desc)
3326 return FALSE;
3327
3392 struct igb_tx_buffer *tx_buffer;
3393 struct e1000_tx_desc *tx_desc, *eop_desc;
3394 struct ifnet *ifp = adapter->ifp;
3395
3396 IGB_TX_LOCK_ASSERT(txr);
3397
3398 if (txr->tx_avail == adapter->num_tx_desc)
3399 return FALSE;
3400
3328 num_avail = txr->tx_avail;
3329 first = txr->next_to_clean;
3330 tx_desc = &txr->tx_base[first];
3331 tx_buffer = &txr->tx_buffers[first];
3332 last = tx_buffer->next_eop;
3333 eop_desc = &txr->tx_base[last];
3334
3335 /*
3336 * What this does is get the index of the
3337 * first descriptor AFTER the EOP of the
3338 * first packet, that way we can do the
3339 * simple comparison on the inner while loop.
3340 */
3341 if (++last == adapter->num_tx_desc)
3342 last = 0;
3343 done = last;
3344
3345 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
3401 first = txr->next_to_clean;
3402 tx_desc = &txr->tx_base[first];
3403 tx_buffer = &txr->tx_buffers[first];
3404 last = tx_buffer->next_eop;
3405 eop_desc = &txr->tx_base[last];
3406
3407 /*
3408 * What this does is get the index of the
3409 * first descriptor AFTER the EOP of the
3410 * first packet, that way we can do the
3411 * simple comparison on the inner while loop.
3412 */
3413 if (++last == adapter->num_tx_desc)
3414 last = 0;
3415 done = last;
3416
3417 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
3346 BUS_DMASYNC_POSTREAD);
3418 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3347
3348 while (eop_desc->upper.fields.status & E1000_TXD_STAT_DD) {
3349 /* We clean the range of the packet */
3350 while (first != done) {
3351 tx_desc->upper.data = 0;
3352 tx_desc->lower.data = 0;
3353 tx_desc->buffer_addr = 0;
3419
3420 while (eop_desc->upper.fields.status & E1000_TXD_STAT_DD) {
3421 /* We clean the range of the packet */
3422 while (first != done) {
3423 tx_desc->upper.data = 0;
3424 tx_desc->lower.data = 0;
3425 tx_desc->buffer_addr = 0;
3354 ++num_avail; ++cleaned;
3426 ++txr->tx_avail;
3355
3356 if (tx_buffer->m_head) {
3357 ifp->if_opackets++;
3358 bus_dmamap_sync(txr->txtag,
3359 tx_buffer->map,
3360 BUS_DMASYNC_POSTWRITE);
3361 bus_dmamap_unload(txr->txtag,
3362 tx_buffer->map);
3363
3364 m_freem(tx_buffer->m_head);
3365 tx_buffer->m_head = NULL;
3366 }
3367 tx_buffer->next_eop = -1;
3427
3428 if (tx_buffer->m_head) {
3429 ifp->if_opackets++;
3430 bus_dmamap_sync(txr->txtag,
3431 tx_buffer->map,
3432 BUS_DMASYNC_POSTWRITE);
3433 bus_dmamap_unload(txr->txtag,
3434 tx_buffer->map);
3435
3436 m_freem(tx_buffer->m_head);
3437 tx_buffer->m_head = NULL;
3438 }
3439 tx_buffer->next_eop = -1;
3440 txr->watchdog_time = ticks;
3368
3369 if (++first == adapter->num_tx_desc)
3370 first = 0;
3371
3372 tx_buffer = &txr->tx_buffers[first];
3373 tx_desc = &txr->tx_base[first];
3374 }
3375 /* See if we can continue to the next packet */

--- 7 unchanged lines hidden (view full) ---

3383 break;
3384 }
3385 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
3386 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3387
3388 txr->next_to_clean = first;
3389
3390 /*
3441
3442 if (++first == adapter->num_tx_desc)
3443 first = 0;
3444
3445 tx_buffer = &txr->tx_buffers[first];
3446 tx_desc = &txr->tx_base[first];
3447 }
3448 /* See if we can continue to the next packet */

--- 7 unchanged lines hidden (view full) ---

3456 break;
3457 }
3458 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
3459 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3460
3461 txr->next_to_clean = first;
3462
3463 /*
3391 * If we have enough room, clear IFF_DRV_OACTIVE to tell the stack
3392 * that it is OK to send packets.
3393 * If there are no pending descriptors, clear the timeout. Otherwise,
3394 * if some descriptors have been freed, restart the timeout.
3464 * If we have enough room, clear IFF_DRV_OACTIVE
3465 * to tell the stack that it is OK to send packets.
3395 */
3466 */
3396 if (num_avail > IGB_TX_CLEANUP_THRESHOLD) {
3467 if (txr->tx_avail > IGB_TX_CLEANUP_THRESHOLD) {
3397 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3468 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3398 /* All clean, turn off the timer */
3399 if (num_avail == adapter->num_tx_desc) {
3400 txr->watchdog_timer = 0;
3401 txr->tx_avail = num_avail;
3469 /* All clean, turn off the watchdog */
3470 if (txr->tx_avail == adapter->num_tx_desc) {
3471 txr->watchdog_check = FALSE;
3402 return FALSE;
3403 }
3404 }
3405
3472 return FALSE;
3473 }
3474 }
3475
3406 /* Some cleaned, reset the timer */
3407 if (cleaned)
3408 txr->watchdog_timer = IGB_TX_TIMEOUT;
3409 txr->tx_avail = num_avail;
3410 return TRUE;
3411}
3412
3413
3414/*********************************************************************
3415 *
3476 return TRUE;
3477}
3478
3479
3480/*********************************************************************
3481 *
3416 * Setup descriptor buffer(s) from system mbuf buffer pools.
3417 * i - designates the ring index
3418 * clean - tells the function whether to update
3419 * the header, the packet buffer, or both.
3482 * Refresh mbuf buffers for a range of descriptors
3420 *
3421 **********************************************************************/
3422static int
3483 *
3484 **********************************************************************/
3485static int
3423igb_get_buf(struct rx_ring *rxr, int i, u8 clean)
3486igb_get_buf(struct rx_ring *rxr, int first, int limit)
3424{
3425 struct adapter *adapter = rxr->adapter;
3487{
3488 struct adapter *adapter = rxr->adapter;
3426 struct mbuf *mh, *mp;
3427 bus_dma_segment_t seg[2];
3489 bus_dma_segment_t seg[2];
3490 struct igb_rx_buf *rxbuf;
3491 struct mbuf *mh, *mp;
3428 bus_dmamap_t map;
3492 bus_dmamap_t map;
3429 struct igb_rx_buffer *rx_buffer;
3430 int error, nsegs;
3431 int merr = 0;
3493 int i, nsegs, error;
3432
3494
3495 i = first;
3496 while (i != limit) {
3497 rxbuf = &rxr->rx_buffers[i];
3433
3498
3434 rx_buffer = &rxr->rx_buffers[i];
3499 if (rxbuf->m_head == NULL) {
3500 mh = m_gethdr(M_DONTWAIT, MT_DATA);
3501 if (mh == NULL)
3502 goto failure;
3503 } else /* reuse */
3504 mh = rxbuf->m_head;
3435
3505
3436 /* First get our header and payload mbuf */
3437 if (clean & IGB_CLEAN_HEADER) {
3438 mh = m_gethdr(M_DONTWAIT, MT_DATA);
3439 if (mh == NULL)
3440 goto remap;
3441 } else /* reuse */
3442 mh = rxr->rx_buffers[i].m_head;
3506 mh->m_len = MHLEN;
3507 mh->m_flags |= M_PKTHDR;
3443
3508
3444 mh->m_len = MHLEN;
3445 mh->m_flags |= M_PKTHDR;
3509 if (rxbuf->m_pack == NULL) {
3510 mp = m_getjcl(M_DONTWAIT, MT_DATA,
3511 M_PKTHDR, adapter->rx_mbuf_sz);
3512 if (mp == NULL)
3513 goto failure;
3514 mp->m_len = adapter->rx_mbuf_sz;
3515 mp->m_flags &= ~M_PKTHDR;
3516 } else { /* reusing */
3517 mp = rxbuf->m_pack;
3518 mp->m_len = adapter->rx_mbuf_sz;
3519 mp->m_flags &= ~M_PKTHDR;
3520 }
3446
3521
3447 if (clean & IGB_CLEAN_PAYLOAD) {
3448 mp = m_getjcl(M_DONTWAIT, MT_DATA,
3449 M_PKTHDR, adapter->rx_mbuf_sz);
3450 if (mp == NULL)
3451 goto remap;
3452 mp->m_len = adapter->rx_mbuf_sz;
3453 mp->m_flags &= ~M_PKTHDR;
3454 } else { /* reusing */
3455 mp = rxr->rx_buffers[i].m_pack;
3456 mp->m_len = adapter->rx_mbuf_sz;
3457 mp->m_flags &= ~M_PKTHDR;
3458 }
3459 /*
3460 ** Need to create a chain for the following
3461 ** dmamap call at this point.
3462 */
3463 mh->m_next = mp;
3464 mh->m_pkthdr.len = mh->m_len + mp->m_len;
3522 /*
3523 ** Need to create a chain for the following
3524 ** dmamap call at this point.
3525 */
3526 mh->m_next = mp;
3527 mh->m_pkthdr.len = mh->m_len + mp->m_len;
3465
3528
3466 /* Get the memory mapping */
3467 error = bus_dmamap_load_mbuf_sg(rxr->rxtag,
3468 rxr->rx_spare_map, mh, seg, &nsegs, BUS_DMA_NOWAIT);
3469 if (error != 0) {
3470 printf("GET BUF: dmamap load failure - %d\n", error);
3471 m_free(mh);
3472 return (error);
3473 }
3529 /* Get the memory mapping */
3530 error = bus_dmamap_load_mbuf_sg(rxr->rxtag,
3531 rxr->spare_map, mh, seg, &nsegs, BUS_DMA_NOWAIT);
3532 if (error != 0)
3533 panic("igb_get_buf: dmamap load failure\n");
3474
3534
3475 /* Unload old mapping and update buffer struct */
3476 if (rx_buffer->m_head != NULL)
3477 bus_dmamap_unload(rxr->rxtag, rx_buffer->map);
3478 map = rx_buffer->map;
3479 rx_buffer->map = rxr->rx_spare_map;
3480 rxr->rx_spare_map = map;
3481 rx_buffer->m_head = mh;
3482 rx_buffer->m_pack = mp;
3483 bus_dmamap_sync(rxr->rxtag,
3484 rx_buffer->map, BUS_DMASYNC_PREREAD);
3535 /* Unload old mapping and update buffer struct */
3536 if (rxbuf->m_head != NULL)
3537 bus_dmamap_unload(rxr->rxtag, rxbuf->map);
3538 map = rxbuf->map;
3539 rxbuf->map = rxr->spare_map;
3540 rxr->spare_map = map;
3541 rxbuf->m_head = mh;
3542 rxbuf->m_pack = mp;
3543 bus_dmamap_sync(rxr->rxtag,
3544 rxbuf->map, BUS_DMASYNC_PREREAD);
3485
3545
3486 /* Update descriptor */
3487 rxr->rx_base[i].read.hdr_addr = htole64(seg[0].ds_addr);
3488 rxr->rx_base[i].read.pkt_addr = htole64(seg[1].ds_addr);
3546 /* Update descriptor */
3547 rxr->rx_base[i].read.hdr_addr = htole64(seg[0].ds_addr);
3548 rxr->rx_base[i].read.pkt_addr = htole64(seg[1].ds_addr);
3489
3549
3550 /* Calculate next index */
3551 if (++i == adapter->num_rx_desc)
3552 i = 0;
3553 }
3554
3490 return (0);
3491
3555 return (0);
3556
3557failure:
3492 /*
3558 /*
3493 ** If we get here, we have an mbuf resource
3494 ** issue, so we discard the incoming packet
3495 ** and attempt to reuse existing mbufs next
3496 ** pass thru the ring, but to do so we must
3497 ** fix up the descriptor which had the address
3498 ** clobbered with writeback info.
3559 ** Its unforunate to have to panic, but
3560 ** with the new design I see no other
3561 ** graceful failure mode, this is ONLY
3562 ** called in the RX clean path, and the
3563 ** old mbuf has been used, it MUST be
3564 ** refreshed. This should be avoided by
3565 ** proper configuration. -jfv
3499 */
3566 */
3500remap:
3501 adapter->mbuf_header_failed++;
3502 merr = ENOBUFS;
3503 /* Is there a reusable buffer? */
3504 mh = rxr->rx_buffers[i].m_head;
3505 if (mh == NULL) /* Nope, init error */
3506 return (merr);
3507 mp = rxr->rx_buffers[i].m_pack;
3508 if (mp == NULL) /* Nope, init error */
3509 return (merr);
3510 /* Get our old mapping */
3511 rx_buffer = &rxr->rx_buffers[i];
3512 error = bus_dmamap_load_mbuf_sg(rxr->rxtag,
3513 rx_buffer->map, mh, seg, &nsegs, BUS_DMA_NOWAIT);
3514 if (error != 0) {
3515 /* We really have a problem */
3516 m_free(mh);
3517 return (error);
3518 }
3519 /* Now fix the descriptor as needed */
3520 rxr->rx_base[i].read.hdr_addr = htole64(seg[0].ds_addr);
3521 rxr->rx_base[i].read.pkt_addr = htole64(seg[1].ds_addr);
3522 return (merr);
3567 panic("igb_get_buf: ENOBUFS\n");
3523}
3524
3568}
3569
3525
3526/*********************************************************************
3527 *
3528 * Allocate memory for rx_buffer structures. Since we use one
3529 * rx_buffer per received packet, the maximum number of rx_buffer's
3530 * that we'll need is equal to the number of receive descriptors
3531 * that we've allocated.
3532 *
3533 **********************************************************************/
3534static int
3535igb_allocate_receive_buffers(struct rx_ring *rxr)
3536{
3537 struct adapter *adapter = rxr->adapter;
3538 device_t dev = adapter->dev;
3570/*********************************************************************
3571 *
3572 * Allocate memory for rx_buffer structures. Since we use one
3573 * rx_buffer per received packet, the maximum number of rx_buffer's
3574 * that we'll need is equal to the number of receive descriptors
3575 * that we've allocated.
3576 *
3577 **********************************************************************/
3578static int
3579igb_allocate_receive_buffers(struct rx_ring *rxr)
3580{
3581 struct adapter *adapter = rxr->adapter;
3582 device_t dev = adapter->dev;
3539 struct igb_rx_buffer *rxbuf;
3583 struct igb_rx_buf *rxbuf;
3540 int i, bsize, error;
3541
3584 int i, bsize, error;
3585
3542 bsize = sizeof(struct igb_rx_buffer) * adapter->num_rx_desc;
3586 bsize = sizeof(struct igb_rx_buf) * adapter->num_rx_desc;
3543 if (!(rxr->rx_buffers =
3587 if (!(rxr->rx_buffers =
3544 (struct igb_rx_buffer *) malloc(bsize,
3588 (struct igb_rx_buf *) malloc(bsize,
3545 M_DEVBUF, M_NOWAIT | M_ZERO))) {
3546 device_printf(dev, "Unable to allocate rx_buffer memory\n");
3547 error = ENOMEM;
3548 goto fail;
3549 }
3550
3551 /*
3552 ** The tag is made to accomodate the largest buffer size
3553 ** with packet split (hence the two segments, even though
3554 ** it may not always use this.
3555 */
3589 M_DEVBUF, M_NOWAIT | M_ZERO))) {
3590 device_printf(dev, "Unable to allocate rx_buffer memory\n");
3591 error = ENOMEM;
3592 goto fail;
3593 }
3594
3595 /*
3596 ** The tag is made to accomodate the largest buffer size
3597 ** with packet split (hence the two segments, even though
3598 ** it may not always use this.
3599 */
3556 if ((error = bus_dma_tag_create(NULL, /* parent */
3600 if ((error = bus_dma_tag_create(bus_get_dma_tag(adapter->dev),
3557 1, 0, /* alignment, bounds */
3558 BUS_SPACE_MAXADDR, /* lowaddr */
3559 BUS_SPACE_MAXADDR, /* highaddr */
3560 NULL, NULL, /* filter, filterarg */
3561 MJUM16BYTES, /* maxsize */
3562 2, /* nsegments */
3563 MJUMPAGESIZE, /* maxsegsize */
3564 0, /* flags */
3565 NULL, /* lockfunc */
3566 NULL, /* lockfuncarg */
3567 &rxr->rxtag))) {
3568 device_printf(dev, "Unable to create RX DMA tag\n");
3569 goto fail;
3570 }
3571
3572 /* Create the spare map (used by getbuf) */
3573 error = bus_dmamap_create(rxr->rxtag, BUS_DMA_NOWAIT,
3601 1, 0, /* alignment, bounds */
3602 BUS_SPACE_MAXADDR, /* lowaddr */
3603 BUS_SPACE_MAXADDR, /* highaddr */
3604 NULL, NULL, /* filter, filterarg */
3605 MJUM16BYTES, /* maxsize */
3606 2, /* nsegments */
3607 MJUMPAGESIZE, /* maxsegsize */
3608 0, /* flags */
3609 NULL, /* lockfunc */
3610 NULL, /* lockfuncarg */
3611 &rxr->rxtag))) {
3612 device_printf(dev, "Unable to create RX DMA tag\n");
3613 goto fail;
3614 }
3615
3616 /* Create the spare map (used by getbuf) */
3617 error = bus_dmamap_create(rxr->rxtag, BUS_DMA_NOWAIT,
3574 &rxr->rx_spare_map);
3618 &rxr->spare_map);
3575 if (error) {
3576 device_printf(dev,
3577 "%s: bus_dmamap_create header spare failed: %d\n",
3578 __func__, error);
3579 goto fail;
3580 }
3581
3582 for (i = 0; i < adapter->num_rx_desc; i++, rxbuf++) {

--- 20 unchanged lines hidden (view full) ---

3603 *
3604 **********************************************************************/
3605static int
3606igb_setup_receive_ring(struct rx_ring *rxr)
3607{
3608 struct adapter *adapter;
3609 struct ifnet *ifp;
3610 device_t dev;
3619 if (error) {
3620 device_printf(dev,
3621 "%s: bus_dmamap_create header spare failed: %d\n",
3622 __func__, error);
3623 goto fail;
3624 }
3625
3626 for (i = 0; i < adapter->num_rx_desc; i++, rxbuf++) {

--- 20 unchanged lines hidden (view full) ---

3647 *
3648 **********************************************************************/
3649static int
3650igb_setup_receive_ring(struct rx_ring *rxr)
3651{
3652 struct adapter *adapter;
3653 struct ifnet *ifp;
3654 device_t dev;
3611 struct igb_rx_buffer *rxbuf;
3655 struct igb_rx_buf *rxbuf;
3656 bus_dma_segment_t seg[2];
3612 struct lro_ctrl *lro = &rxr->lro;
3657 struct lro_ctrl *lro = &rxr->lro;
3613 int j, rsize;
3658 int rsize, nsegs, error = 0;
3614
3615 adapter = rxr->adapter;
3616 dev = adapter->dev;
3617 ifp = adapter->ifp;
3659
3660 adapter = rxr->adapter;
3661 dev = adapter->dev;
3662 ifp = adapter->ifp;
3618 rxr->lro_enabled = FALSE;
3619 rxr->hdr_split = FALSE;
3620
3621 /* Clear the ring contents */
3622 rsize = roundup2(adapter->num_rx_desc *
3623 sizeof(union e1000_adv_rx_desc), IGB_DBA_ALIGN);
3624 bzero((void *)rxr->rx_base, rsize);
3625
3626 /*
3627 ** Free current RX buffer structures and their mbufs

--- 6 unchanged lines hidden (view full) ---

3634 if (rxbuf->m_head) {
3635 rxbuf->m_head->m_next = rxbuf->m_pack;
3636 m_freem(rxbuf->m_head);
3637 }
3638 rxbuf->m_head = NULL;
3639 rxbuf->m_pack = NULL;
3640 }
3641
3663
3664 /* Clear the ring contents */
3665 rsize = roundup2(adapter->num_rx_desc *
3666 sizeof(union e1000_adv_rx_desc), IGB_DBA_ALIGN);
3667 bzero((void *)rxr->rx_base, rsize);
3668
3669 /*
3670 ** Free current RX buffer structures and their mbufs

--- 6 unchanged lines hidden (view full) ---

3677 if (rxbuf->m_head) {
3678 rxbuf->m_head->m_next = rxbuf->m_pack;
3679 m_freem(rxbuf->m_head);
3680 }
3681 rxbuf->m_head = NULL;
3682 rxbuf->m_pack = NULL;
3683 }
3684
3642 /* Next replenish the ring */
3643 for (j = 0; j < adapter->num_rx_desc; j++) {
3644 if (igb_get_buf(rxr, j, IGB_CLEAN_BOTH) == ENOBUFS) {
3645 rxr->rx_buffers[j].m_head = NULL;
3646 rxr->rx_buffers[j].m_pack = NULL;
3647 rxr->rx_base[j].read.hdr_addr = 0;
3648 rxr->rx_base[j].read.pkt_addr = 0;
3649 goto fail;
3650 }
3685 /* Now replenish the mbufs */
3686 for (int j = 0; j != adapter->num_rx_desc; ++j) {
3687
3688 rxbuf = &rxr->rx_buffers[j];
3689 rxbuf->m_head = m_gethdr(M_DONTWAIT, MT_DATA);
3690 if (rxbuf->m_head == NULL)
3691 panic("RX ring hdr initialization failed!\n");
3692 rxbuf->m_head->m_len = MHLEN;
3693 rxbuf->m_head->m_flags |= M_PKTHDR;
3694 rxbuf->m_head->m_pkthdr.len = rxbuf->m_head->m_len;
3695
3696 rxbuf->m_pack = m_getjcl(M_DONTWAIT, MT_DATA,
3697 M_PKTHDR, adapter->rx_mbuf_sz);
3698 if (rxbuf->m_pack == NULL)
3699 panic("RX ring pkt initialization failed!\n");
3700 rxbuf->m_pack->m_len = adapter->rx_mbuf_sz;
3701 rxbuf->m_head->m_next = rxbuf->m_pack;
3702 rxbuf->m_head->m_pkthdr.len += rxbuf->m_pack->m_len;
3703
3704 /* Get the memory mapping */
3705 error = bus_dmamap_load_mbuf_sg(rxr->rxtag,
3706 rxbuf->map, rxbuf->m_head, seg,
3707 &nsegs, BUS_DMA_NOWAIT);
3708 if (error != 0)
3709 panic("RX ring dma initialization failed!\n");
3710 bus_dmamap_sync(rxr->rxtag,
3711 rxbuf->map, BUS_DMASYNC_PREREAD);
3712
3713 /* Update descriptor */
3714 rxr->rx_base[j].read.hdr_addr = htole64(seg[0].ds_addr);
3715 rxr->rx_base[j].read.pkt_addr = htole64(seg[1].ds_addr);
3651 }
3652
3653 /* Setup our descriptor indices */
3654 rxr->next_to_check = 0;
3655 rxr->last_cleaned = 0;
3716 }
3717
3718 /* Setup our descriptor indices */
3719 rxr->next_to_check = 0;
3720 rxr->last_cleaned = 0;
3721 rxr->lro_enabled = FALSE;
3656
3722
3723 if (igb_header_split)
3724 rxr->hdr_split = TRUE;
3725 else
3726 ifp->if_capabilities &= ~IFCAP_LRO;
3727
3657 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
3658 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3659
3660 /*
3661 ** Now set up the LRO interface, we
3662 ** also only do head split when LRO
3663 ** is enabled, since so often they
3664 ** are undesireable in similar setups.
3665 */
3728 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
3729 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3730
3731 /*
3732 ** Now set up the LRO interface, we
3733 ** also only do head split when LRO
3734 ** is enabled, since so often they
3735 ** are undesireable in similar setups.
3736 */
3666 if (ifp->if_capenable & IFCAP_LRO) {
3737 if ((ifp->if_capenable & IFCAP_LRO) && (rxr->hdr_split)) {
3667 int err = tcp_lro_init(lro);
3738 int err = tcp_lro_init(lro);
3668 if (err) {
3669 device_printf(dev,"LRO Initialization failed!\n");
3670 goto fail;
3671 }
3739 if (err)
3740 panic("LRO Initialization failed!\n");
3672 INIT_DEBUGOUT("RX LRO Initialized\n");
3673 rxr->lro_enabled = TRUE;
3741 INIT_DEBUGOUT("RX LRO Initialized\n");
3742 rxr->lro_enabled = TRUE;
3674 rxr->hdr_split = TRUE;
3675 lro->ifp = adapter->ifp;
3676 }
3677
3678 return (0);
3743 lro->ifp = adapter->ifp;
3744 }
3745
3746 return (0);
3747#if 0
3679fail:
3680 /*
3681 * We need to clean up any buffers allocated
3682 * so far, 'j' is the failing index.
3683 */
3684 for (int i = 0; i < j; i++) {
3685 rxbuf = &rxr->rx_buffers[i];
3686 if (rxbuf->m_head != NULL) {
3687 bus_dmamap_sync(rxr->rxtag, rxbuf->map,
3688 BUS_DMASYNC_POSTREAD);
3689 bus_dmamap_unload(rxr->rxtag, rxbuf->map);
3690 m_freem(rxbuf->m_head);
3691 rxbuf->m_head = NULL;
3692 }
3693 }
3694 return (ENOBUFS);
3748fail:
3749 /*
3750 * We need to clean up any buffers allocated
3751 * so far, 'j' is the failing index.
3752 */
3753 for (int i = 0; i < j; i++) {
3754 rxbuf = &rxr->rx_buffers[i];
3755 if (rxbuf->m_head != NULL) {
3756 bus_dmamap_sync(rxr->rxtag, rxbuf->map,
3757 BUS_DMASYNC_POSTREAD);
3758 bus_dmamap_unload(rxr->rxtag, rxbuf->map);
3759 m_freem(rxbuf->m_head);
3760 rxbuf->m_head = NULL;
3761 }
3762 }
3763 return (ENOBUFS);
3764#endif
3695}
3696
3697/*********************************************************************
3698 *
3699 * Initialize all receive rings.
3700 *
3701 **********************************************************************/
3702static int

--- 12 unchanged lines hidden (view full) ---

3715 * Free RX buffers allocated so far, we will only handle
3716 * the rings that completed, the failing case will have
3717 * cleaned up for itself. The value of 'i' will be the
3718 * failed ring so we must pre-decrement it.
3719 */
3720 rxr = adapter->rx_rings;
3721 for (--i; i > 0; i--, rxr++) {
3722 for (j = 0; j < adapter->num_rx_desc; j++) {
3765}
3766
3767/*********************************************************************
3768 *
3769 * Initialize all receive rings.
3770 *
3771 **********************************************************************/
3772static int

--- 12 unchanged lines hidden (view full) ---

3785 * Free RX buffers allocated so far, we will only handle
3786 * the rings that completed, the failing case will have
3787 * cleaned up for itself. The value of 'i' will be the
3788 * failed ring so we must pre-decrement it.
3789 */
3790 rxr = adapter->rx_rings;
3791 for (--i; i > 0; i--, rxr++) {
3792 for (j = 0; j < adapter->num_rx_desc; j++) {
3723 struct igb_rx_buffer *rxbuf;
3793 struct igb_rx_buf *rxbuf;
3724 rxbuf = &rxr->rx_buffers[j];
3725 if (rxbuf->m_head != NULL) {
3726 bus_dmamap_sync(rxr->rxtag, rxbuf->map,
3727 BUS_DMASYNC_POSTREAD);
3728 bus_dmamap_unload(rxr->rxtag, rxbuf->map);
3729 m_freem(rxbuf->m_head);
3730 rxbuf->m_head = NULL;
3731 }

--- 69 unchanged lines hidden (view full) ---

3801 /* Enable this Queue */
3802 rxdctl = E1000_READ_REG(&adapter->hw, E1000_RXDCTL(i));
3803 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
3804 rxdctl &= 0xFFF00000;
3805 rxdctl |= IGB_RX_PTHRESH;
3806 rxdctl |= IGB_RX_HTHRESH << 8;
3807 rxdctl |= IGB_RX_WTHRESH << 16;
3808 E1000_WRITE_REG(&adapter->hw, E1000_RXDCTL(i), rxdctl);
3794 rxbuf = &rxr->rx_buffers[j];
3795 if (rxbuf->m_head != NULL) {
3796 bus_dmamap_sync(rxr->rxtag, rxbuf->map,
3797 BUS_DMASYNC_POSTREAD);
3798 bus_dmamap_unload(rxr->rxtag, rxbuf->map);
3799 m_freem(rxbuf->m_head);
3800 rxbuf->m_head = NULL;
3801 }

--- 69 unchanged lines hidden (view full) ---

3871 /* Enable this Queue */
3872 rxdctl = E1000_READ_REG(&adapter->hw, E1000_RXDCTL(i));
3873 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
3874 rxdctl &= 0xFFF00000;
3875 rxdctl |= IGB_RX_PTHRESH;
3876 rxdctl |= IGB_RX_HTHRESH << 8;
3877 rxdctl |= IGB_RX_WTHRESH << 16;
3878 E1000_WRITE_REG(&adapter->hw, E1000_RXDCTL(i), rxdctl);
3879
3880 /* Initial RX interrupt moderation */
3881 rxr->eitr_setting = igb_ave_latency;
3882 E1000_WRITE_REG(&adapter->hw,
3883 E1000_EITR(rxr->msix), igb_ave_latency);
3809 }
3810
3811 /*
3812 ** Setup for RX MultiQueue
3813 */
3814 rxcsum = E1000_READ_REG(&adapter->hw, E1000_RXCSUM);
3815 if (adapter->num_queues >1) {
3816 u32 random[10], mrqc, shift = 0;

--- 105 unchanged lines hidden (view full) ---

3922/*********************************************************************
3923 *
3924 * Free receive ring data structures.
3925 *
3926 **********************************************************************/
3927static void
3928igb_free_receive_buffers(struct rx_ring *rxr)
3929{
3884 }
3885
3886 /*
3887 ** Setup for RX MultiQueue
3888 */
3889 rxcsum = E1000_READ_REG(&adapter->hw, E1000_RXCSUM);
3890 if (adapter->num_queues >1) {
3891 u32 random[10], mrqc, shift = 0;

--- 105 unchanged lines hidden (view full) ---

3997/*********************************************************************
3998 *
3999 * Free receive ring data structures.
4000 *
4001 **********************************************************************/
4002static void
4003igb_free_receive_buffers(struct rx_ring *rxr)
4004{
3930 struct adapter *adapter = rxr->adapter;
3931 struct igb_rx_buffer *rx_buffer;
4005 struct adapter *adapter = rxr->adapter;
4006 struct igb_rx_buf *rx_buffer;
3932
3933 INIT_DEBUGOUT("free_receive_structures: begin");
3934
4007
4008 INIT_DEBUGOUT("free_receive_structures: begin");
4009
3935 if (rxr->rx_spare_map) {
3936 bus_dmamap_destroy(rxr->rxtag, rxr->rx_spare_map);
3937 rxr->rx_spare_map = NULL;
4010 if (rxr->spare_map) {
4011 bus_dmamap_destroy(rxr->rxtag, rxr->spare_map);
4012 rxr->spare_map = NULL;
3938 }
3939
3940 /* Cleanup any existing buffers */
3941 if (rxr->rx_buffers != NULL) {
3942 rx_buffer = &rxr->rx_buffers[0];
3943 for (int i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
3944 if (rx_buffer->m_head != NULL) {
3945 bus_dmamap_sync(rxr->rxtag, rx_buffer->map,

--- 29 unchanged lines hidden (view full) ---

3975 * the mbufs in the descriptor and sends data which has been
3976 * dma'ed into host memory to upper layer.
3977 *
3978 * We loop at most count times if count is > 0, or until done if
3979 * count < 0.
3980 *
3981 * Return TRUE if more to clean, FALSE otherwise
3982 *********************************************************************/
4013 }
4014
4015 /* Cleanup any existing buffers */
4016 if (rxr->rx_buffers != NULL) {
4017 rx_buffer = &rxr->rx_buffers[0];
4018 for (int i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
4019 if (rx_buffer->m_head != NULL) {
4020 bus_dmamap_sync(rxr->rxtag, rx_buffer->map,

--- 29 unchanged lines hidden (view full) ---

4050 * the mbufs in the descriptor and sends data which has been
4051 * dma'ed into host memory to upper layer.
4052 *
4053 * We loop at most count times if count is > 0, or until done if
4054 * count < 0.
4055 *
4056 * Return TRUE if more to clean, FALSE otherwise
4057 *********************************************************************/
4058
3983static bool
3984igb_rxeof(struct rx_ring *rxr, int count)
3985{
4059static bool
4060igb_rxeof(struct rx_ring *rxr, int count)
4061{
3986 struct adapter *adapter = rxr->adapter;
3987 struct ifnet *ifp;
4062 struct adapter *adapter = rxr->adapter;
4063 struct ifnet *ifp = adapter->ifp;
3988 struct lro_ctrl *lro = &rxr->lro;
3989 struct lro_entry *queued;
4064 struct lro_ctrl *lro = &rxr->lro;
4065 struct lro_entry *queued;
3990 int i;
4066 int i, processed = 0;
3991 u32 staterr;
3992 union e1000_adv_rx_desc *cur;
3993
3994
3995 IGB_RX_LOCK(rxr);
4067 u32 staterr;
4068 union e1000_adv_rx_desc *cur;
4069
4070
4071 IGB_RX_LOCK(rxr);
3996 ifp = adapter->ifp;
3997 i = rxr->next_to_check;
3998 cur = &rxr->rx_base[i];
3999 staterr = cur->wb.upper.status_error;
4000
4001 if (!(staterr & E1000_RXD_STAT_DD)) {
4002 IGB_RX_UNLOCK(rxr);
4003 return FALSE;
4004 }
4005
4006 /* Sync the ring */
4007 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
4008 BUS_DMASYNC_POSTREAD);
4009
4072 i = rxr->next_to_check;
4073 cur = &rxr->rx_base[i];
4074 staterr = cur->wb.upper.status_error;
4075
4076 if (!(staterr & E1000_RXD_STAT_DD)) {
4077 IGB_RX_UNLOCK(rxr);
4078 return FALSE;
4079 }
4080
4081 /* Sync the ring */
4082 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
4083 BUS_DMASYNC_POSTREAD);
4084
4010 /* Main clean loop */
4011 while ((staterr & E1000_RXD_STAT_DD) &&
4012 (count != 0) &&
4085 while ((staterr & E1000_RXD_STAT_DD) && (count != 0) &&
4013 (ifp->if_drv_flags & IFF_DRV_RUNNING)) {
4086 (ifp->if_drv_flags & IFF_DRV_RUNNING)) {
4014 struct mbuf *sendmp, *mh, *mp;
4015 u16 hlen, plen, hdr, ptype, len_adj, vtag;
4016 u8 dopayload, accept_frame, eop;
4017
4018 accept_frame = 1;
4019 hlen = plen = len_adj = vtag = 0;
4020 sendmp = mh = mp = NULL;
4021 ptype = (u16)(cur->wb.lower.lo_dword.data >> 4);
4087 struct mbuf *sendmp, *mh, *mp, *nh, *np;
4088 struct igb_rx_buf *nxtbuf;
4089 u32 ptype;
4090 u16 hlen, plen, hdr, nextp, vtag;
4091 bool accept_frame, eop, sctp = FALSE;
4022
4092
4093
4094 accept_frame = TRUE;
4095 hlen = plen = nextp = 0;
4096 sendmp = mh = mp = nh = np = NULL;
4097
4098 ptype = (le32toh(cur->wb.lower.lo_dword.data) &
4099 IGB_PKTTYPE_MASK);
4100 if (((ptype & E1000_RXDADV_PKTTYPE_ETQF) == 0) &&
4101 ((ptype & E1000_RXDADV_PKTTYPE_SCTP) != 0))
4102 sctp = TRUE;
4103
4023 /* Sync the buffers */
4024 bus_dmamap_sync(rxr->rxtag, rxr->rx_buffers[i].map,
4025 BUS_DMASYNC_POSTREAD);
4104 /* Sync the buffers */
4105 bus_dmamap_sync(rxr->rxtag, rxr->rx_buffers[i].map,
4106 BUS_DMASYNC_POSTREAD);
4107 mh = rxr->rx_buffers[i].m_head;
4108 mp = rxr->rx_buffers[i].m_pack;
4109 vtag = le16toh(cur->wb.upper.vlan);
4110 eop = ((staterr & E1000_RXD_STAT_EOP) != 0);
4026
4111
4112 /* Get the next descriptor we will process */
4113 if (!eop) {
4114 nextp = i + 1;
4115 if (nextp == adapter->num_rx_desc)
4116 nextp = 0;
4117 nxtbuf = &rxr->rx_buffers[nextp];
4118 prefetch(nxtbuf);
4119 }
4120
4027 /*
4028 ** The way the hardware is configured to
4029 ** split, it will ONLY use the header buffer
4030 ** when header split is enabled, otherwise we
4121 /*
4122 ** The way the hardware is configured to
4123 ** split, it will ONLY use the header buffer
4124 ** when header split is enabled, otherwise we
4031 ** get normal behavior, ie, both header and
4032 ** payload are DMA'd into the payload buffer.
4125 ** get legacy behavior, ie, both header and
4126 ** payload are DMA'd into JUST the payload buffer.
4033 **
4127 **
4034 ** The fmp test is to catch the case where a
4035 ** packet spans multiple descriptors, in that
4036 ** case only the first header is valid.
4128 ** Rather than using the fmp/lmp global pointers
4129 ** we now keep the head of a packet chain in the
4130 ** m_nextpkt pointer and pass this along from one
4131 ** descriptor to the next, until we get EOP.
4132 **
4037 */
4133 */
4038 if ((rxr->hdr_split) && (rxr->fmp == NULL)){
4134 if ((rxr->hdr_split) && (mh->m_nextpkt == NULL)) {
4039 hdr = le16toh(cur->
4040 wb.lower.lo_dword.hs_rss.hdr_info);
4041 hlen = (hdr & E1000_RXDADV_HDRBUFLEN_MASK) >>
4042 E1000_RXDADV_HDRBUFLEN_SHIFT;
4043 if (hlen > IGB_HDR_BUF)
4044 hlen = IGB_HDR_BUF;
4045 plen = le16toh(cur->wb.upper.length);
4135 hdr = le16toh(cur->
4136 wb.lower.lo_dword.hs_rss.hdr_info);
4137 hlen = (hdr & E1000_RXDADV_HDRBUFLEN_MASK) >>
4138 E1000_RXDADV_HDRBUFLEN_SHIFT;
4139 if (hlen > IGB_HDR_BUF)
4140 hlen = IGB_HDR_BUF;
4141 plen = le16toh(cur->wb.upper.length);
4046 /* Handle the header mbuf */
4047 mh = rxr->rx_buffers[i].m_head;
4048 mh->m_len = hlen;
4142 mh->m_len = hlen;
4049 dopayload = IGB_CLEAN_HEADER;
4143 mh->m_flags |= M_PKTHDR;
4144 mh->m_next = NULL;
4145 mh->m_pkthdr.len = mh->m_len;
4146 /* Null this so getbuf replenishes */
4147 rxr->rx_buffers[i].m_head = NULL;
4050 /*
4051 ** Get the payload length, this
4052 ** could be zero if its a small
4053 ** packet.
4054 */
4055 if (plen) {
4148 /*
4149 ** Get the payload length, this
4150 ** could be zero if its a small
4151 ** packet.
4152 */
4153 if (plen) {
4056 mp = rxr->rx_buffers[i].m_pack;
4057 mp->m_len = plen;
4058 mp->m_next = NULL;
4059 mp->m_flags &= ~M_PKTHDR;
4060 mh->m_next = mp;
4154 mp->m_len = plen;
4155 mp->m_next = NULL;
4156 mp->m_flags &= ~M_PKTHDR;
4157 mh->m_next = mp;
4061 mh->m_flags |= M_PKTHDR;
4062 dopayload = IGB_CLEAN_BOTH;
4158 mh->m_pkthdr.len += mp->m_len;
4159 /* Null this so getbuf replenishes */
4160 rxr->rx_buffers[i].m_pack = NULL;
4063 rxr->rx_split_packets++;
4161 rxr->rx_split_packets++;
4064 } else { /* small packets */
4065 mh->m_flags &= ~M_PKTHDR;
4066 mh->m_next = NULL;
4067 }
4162 }
4163 /* Setup the forward chain */
4164 if (eop == 0) {
4165 nh = rxr->rx_buffers[nextp].m_head;
4166 np = rxr->rx_buffers[nextp].m_pack;
4167 nh->m_nextpkt = mh;
4168 if (plen)
4169 mp->m_next = np;
4170 else
4171 mh->m_next = np;
4172 } else {
4173 sendmp = mh;
4174 if (staterr & E1000_RXD_STAT_VP) {
4175 sendmp->m_pkthdr.ether_vtag = vtag;
4176 sendmp->m_flags |= M_VLANTAG;
4177 }
4178 }
4068 } else {
4069 /*
4070 ** Either no header split, or a
4071 ** secondary piece of a fragmented
4179 } else {
4180 /*
4181 ** Either no header split, or a
4182 ** secondary piece of a fragmented
4072 ** split packet.
4183 ** packet.
4073 */
4184 */
4074 mh = rxr->rx_buffers[i].m_pack;
4075 mh->m_flags |= M_PKTHDR;
4076 mh->m_len = le16toh(cur->wb.upper.length);
4077 dopayload = IGB_CLEAN_PAYLOAD;
4185 mp->m_len = le16toh(cur->wb.upper.length);
4186 rxr->rx_buffers[i].m_pack = NULL;
4187 /* stored head pointer */
4188 sendmp = mh->m_nextpkt;
4189 if (sendmp != NULL) {
4190 sendmp->m_pkthdr.len += mp->m_len;
4191 sendmp->m_nextpkt = NULL;
4192 } else {
4193 /* first desc of a non-ps chain */
4194 sendmp = mp;
4195 sendmp->m_flags |= M_PKTHDR;
4196 sendmp->m_pkthdr.len = mp->m_len;
4197 if (staterr & E1000_RXD_STAT_VP) {
4198 sendmp->m_pkthdr.ether_vtag = vtag;
4199 sendmp->m_flags |= M_VLANTAG;
4200 }
4201 }
4202 /* Carry head forward */
4203 if (eop == 0) {
4204 nh = rxr->rx_buffers[nextp].m_head;
4205 np = rxr->rx_buffers[nextp].m_pack;
4206 nh->m_nextpkt = sendmp;
4207 mp->m_next = np;
4208 sendmp = NULL;
4209 }
4210 mh->m_nextpkt = NULL;
4078 }
4079
4211 }
4212
4080 if (staterr & E1000_RXD_STAT_EOP) {
4081 count--;
4082 eop = 1;
4083 /*
4084 ** Strip CRC and account for frag
4085 */
4086 if (mp) {
4087 if (mp->m_len < ETHER_CRC_LEN) {
4088 /* a frag, how much is left? */
4089 len_adj = ETHER_CRC_LEN - mp->m_len;
4090 mp->m_len = 0;
4091 } else
4092 mp->m_len -= ETHER_CRC_LEN;
4093 } else { /* not split */
4094 if (mh->m_len < ETHER_CRC_LEN) {
4095 len_adj = ETHER_CRC_LEN - mh->m_len;
4096 mh->m_len = 0;
4097 } else
4098 mh->m_len -= ETHER_CRC_LEN;
4099 }
4100 } else
4101 eop = 0;
4102
4103 if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK)
4213 if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK)
4104 accept_frame = 0;
4105#ifdef IGB_IEEE1588
4106 This linux code needs to be converted to work here
4107 -----------------------------------------------------
4108 if (unlikely(staterr & E1000_RXD_STAT_TS)) {
4109 u64 regval;
4110 u64 ns;
4111// Create an mtag and set it up
4112 struct skb_shared_hwtstamps *shhwtstamps =
4113 skb_hwtstamps(skb);
4214 accept_frame = FALSE;
4114
4215
4115 rd32(E1000_TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID),
4116 "igb: no RX time stamp available for time stamped packet");
4117 regval = rd32(E1000_RXSTMPL);
4118 regval |= (u64)rd32(E1000_RXSTMPH) << 32;
4119// Do time conversion from the register
4120 ns = timecounter_cyc2time(&adapter->clock, regval);
4121 clocksync_update(&adapter->sync, ns);
4122 memset(shhwtstamps, 0, sizeof(*shhwtstamps));
4123 shhwtstamps->hwtstamp = ns_to_ktime(ns);
4124 shhwtstamps->syststamp =
4125 clocksync_hw2sys(&adapter->sync, ns);
4126 }
4127#endif
4128 if (accept_frame) {
4216 if (accept_frame) {
4129 /*
4130 ** get_buf will overwrite the writeback
4131 ** descriptor so save the VLAN tag now.
4132 */
4133 vtag = le16toh(cur->wb.upper.vlan);
4134 if (igb_get_buf(rxr, i, dopayload) != 0) {
4135 ifp->if_iqdrops++;
4136 goto discard;
4137 }
4138 /* Initial frame - setup */
4139 if (rxr->fmp == NULL) {
4140 mh->m_flags |= M_PKTHDR;
4141 mh->m_pkthdr.len = mh->m_len;
4142 rxr->fmp = mh; /* Store the first mbuf */
4143 rxr->lmp = mh;
4144 if (mp) { /* Add payload if split */
4145 mh->m_pkthdr.len += mp->m_len;
4146 rxr->lmp = mh->m_next;
4147 }
4148 } else {
4149 /* Chain mbuf's together */
4150 mh->m_flags &= ~M_PKTHDR;
4151 rxr->lmp->m_next = mh;
4152 rxr->lmp = rxr->lmp->m_next;
4153 rxr->fmp->m_pkthdr.len += mh->m_len;
4154 /* Adjust for CRC frag */
4155 if (len_adj) {
4156 rxr->lmp->m_len -= len_adj;
4157 rxr->fmp->m_pkthdr.len -= len_adj;
4158 }
4159 }
4160
4217 ++processed;
4161 if (eop) {
4218 if (eop) {
4162 bool sctp = ((ptype & 0x40) != 0);
4163 rxr->fmp->m_pkthdr.rcvif = ifp;
4219 --count;
4220 sendmp->m_pkthdr.rcvif = ifp;
4164 ifp->if_ipackets++;
4165 rxr->rx_packets++;
4166 /* capture data for AIM */
4221 ifp->if_ipackets++;
4222 rxr->rx_packets++;
4223 /* capture data for AIM */
4167 rxr->bytes += rxr->fmp->m_pkthdr.len;
4168 rxr->rx_bytes += rxr->fmp->m_pkthdr.len;
4169
4170 igb_rx_checksum(staterr, rxr->fmp, sctp);
4171 if (staterr & E1000_RXD_STAT_VP) {
4172 rxr->fmp->m_pkthdr.ether_vtag = vtag;
4173 rxr->fmp->m_flags |= M_VLANTAG;
4174 }
4224 rxr->bytes += sendmp->m_pkthdr.len;
4225 rxr->rx_bytes += rxr->bytes;
4226 if (ifp->if_capenable & IFCAP_RXCSUM)
4227 igb_rx_checksum(staterr, sendmp, sctp);
4228 else
4229 sendmp->m_pkthdr.csum_flags = 0;
4175#if __FreeBSD_version >= 800000
4230#if __FreeBSD_version >= 800000
4176 rxr->fmp->m_pkthdr.flowid = curcpu;
4177 rxr->fmp->m_flags |= M_FLOWID;
4231 /* Get the RSS Hash */
4232 sendmp->m_pkthdr.flowid =
4233 le32toh(cur->wb.lower.hi_dword.rss);
4234 curcpu;
4235 sendmp->m_flags |= M_FLOWID;
4178#endif
4236#endif
4179 sendmp = rxr->fmp;
4180 rxr->fmp = NULL;
4181 rxr->lmp = NULL;
4182 }
4183 } else {
4184 ifp->if_ierrors++;
4237 }
4238 } else {
4239 ifp->if_ierrors++;
4185discard:
4186 /* Reuse loaded DMA map and just update mbuf chain */
4240 /* Reuse loaded DMA map and just update mbuf chain */
4187 if (hlen) {
4188 mh = rxr->rx_buffers[i].m_head;
4189 mh->m_len = MHLEN;
4190 mh->m_next = NULL;
4191 }
4192 mp = rxr->rx_buffers[i].m_pack;
4241 mh->m_len = MHLEN;
4242 mh->m_flags |= M_PKTHDR;
4243 mh->m_next = NULL;
4193 mp->m_len = mp->m_pkthdr.len = adapter->rx_mbuf_sz;
4194 mp->m_data = mp->m_ext.ext_buf;
4244 mp->m_len = mp->m_pkthdr.len = adapter->rx_mbuf_sz;
4245 mp->m_data = mp->m_ext.ext_buf;
4246 if (mp->m_next) { /* Free chain */
4247 sendmp = mp->m_next;
4248 m_free(sendmp);
4249 }
4195 mp->m_next = NULL;
4196 if (adapter->max_frame_size <=
4197 (MCLBYTES - ETHER_ALIGN))
4198 m_adj(mp, ETHER_ALIGN);
4250 mp->m_next = NULL;
4251 if (adapter->max_frame_size <=
4252 (MCLBYTES - ETHER_ALIGN))
4253 m_adj(mp, ETHER_ALIGN);
4199 if (rxr->fmp != NULL) {
4200 /* handles the whole chain */
4201 m_freem(rxr->fmp);
4202 rxr->fmp = NULL;
4203 rxr->lmp = NULL;
4204 }
4205 sendmp = NULL;
4206 }
4254 sendmp = NULL;
4255 }
4207
4208 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
4209 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
4210
4256 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
4257 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
4258
4211 rxr->last_cleaned = i; /* For updating tail */
4212
4213 /* Advance our pointers to the next descriptor. */
4259 rxr->last_cleaned = i; /* for updating tail */
4214 if (++i == adapter->num_rx_desc)
4215 i = 0;
4260 if (++i == adapter->num_rx_desc)
4261 i = 0;
4216
4262 /* Prefetch next descriptor */
4263 cur = &rxr->rx_base[i];
4264 prefetch(cur);
4265
4217 /*
4266 /*
4218 ** Note that we hold the RX lock thru
4219 ** the following call so this ring's
4220 ** next_to_check is not gonna change.
4267 ** Now send up to the stack,
4268 ** note that the RX lock is
4269 ** held thru this call.
4221 */
4270 */
4222 if (sendmp != NULL) {
4271 if (sendmp != NULL) {
4223 /*
4224 ** Send to the stack if:
4225 ** - LRO not enabled, or
4226 ** - no LRO resources, or
4227 ** - lro enqueue fails
4228 */
4229 if ((!rxr->lro_enabled) ||
4230 ((!lro->lro_cnt) || (tcp_lro_rx(lro, sendmp, 0))))
4272 /*
4273 ** Send to the stack if:
4274 ** - LRO not enabled, or
4275 ** - no LRO resources, or
4276 ** - lro enqueue fails
4277 */
4278 if ((!rxr->lro_enabled) ||
4279 ((!lro->lro_cnt) || (tcp_lro_rx(lro, sendmp, 0))))
4231 (*ifp->if_input)(ifp, sendmp);
4280 (*ifp->if_input)(ifp, sendmp);
4232 }
4233
4281 }
4282
4234 /* Get the next descriptor */
4235 cur = &rxr->rx_base[i];
4283 /* Replenish every 4 max */
4284 if (processed == 4) {
4285 igb_get_buf(rxr, rxr->next_to_check, i);
4286 processed = 0;
4287 E1000_WRITE_REG(&adapter->hw,
4288 E1000_RDT(rxr->me), rxr->last_cleaned);
4289 rxr->next_to_check = i;
4290 }
4291
4292 /* Next iteration */
4236 staterr = cur->wb.upper.status_error;
4237 }
4293 staterr = cur->wb.upper.status_error;
4294 }
4238 rxr->next_to_check = i;
4239
4295
4240 /* Advance the E1000's Receive Queue #0 "Tail Pointer". */
4241 E1000_WRITE_REG(&adapter->hw, E1000_RDT(rxr->me), rxr->last_cleaned);
4296 /* Replenish remaining */
4297 if (processed != 0) {
4298 igb_get_buf(rxr, rxr->next_to_check, i);
4299 processed = 0;
4300 E1000_WRITE_REG(&adapter->hw,
4301 E1000_RDT(rxr->me), rxr->last_cleaned);
4302 }
4242
4303
4304 rxr->next_to_check = i;
4305
4243 /*
4244 * Flush any outstanding LRO work
4245 */
4246 while (!SLIST_EMPTY(&lro->lro_active)) {
4247 queued = SLIST_FIRST(&lro->lro_active);
4248 SLIST_REMOVE_HEAD(&lro->lro_active, next);
4249 tcp_lro_flush(lro, queued);
4250 }
4251
4252 IGB_RX_UNLOCK(rxr);
4253
4254 /*
4306 /*
4307 * Flush any outstanding LRO work
4308 */
4309 while (!SLIST_EMPTY(&lro->lro_active)) {
4310 queued = SLIST_FIRST(&lro->lro_active);
4311 SLIST_REMOVE_HEAD(&lro->lro_active, next);
4312 tcp_lro_flush(lro, queued);
4313 }
4314
4315 IGB_RX_UNLOCK(rxr);
4316
4317 /*
4255 ** We still have cleaning to do?
4256 ** Schedule another interrupt if so.
4318 ** Leaving with more to clean?
4319 ** then schedule another interrupt.
4257 */
4258 if (staterr & E1000_RXD_STAT_DD) {
4259 E1000_WRITE_REG(&adapter->hw, E1000_EICS, rxr->eims);
4260 return TRUE;
4261 }
4262
4263 return FALSE;
4264}
4265
4320 */
4321 if (staterr & E1000_RXD_STAT_DD) {
4322 E1000_WRITE_REG(&adapter->hw, E1000_EICS, rxr->eims);
4323 return TRUE;
4324 }
4325
4326 return FALSE;
4327}
4328
4266
4267/*********************************************************************
4268 *
4269 * Verify that the hardware indicated that the checksum is valid.
4270 * Inform the stack about the status of checksum so that stack
4271 * doesn't spend time verifying the checksum.
4272 *
4273 *********************************************************************/
4274static void

--- 22 unchanged lines hidden (view full) ---

4297 u16 type = (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
4298#if __FreeBSD_version >= 800000
4299 if (sctp) /* reassign */
4300 type = CSUM_SCTP_VALID;
4301#endif
4302 /* Did it pass? */
4303 if (!(errors & E1000_RXD_ERR_TCPE)) {
4304 mp->m_pkthdr.csum_flags |= type;
4329/*********************************************************************
4330 *
4331 * Verify that the hardware indicated that the checksum is valid.
4332 * Inform the stack about the status of checksum so that stack
4333 * doesn't spend time verifying the checksum.
4334 *
4335 *********************************************************************/
4336static void

--- 22 unchanged lines hidden (view full) ---

4359 u16 type = (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
4360#if __FreeBSD_version >= 800000
4361 if (sctp) /* reassign */
4362 type = CSUM_SCTP_VALID;
4363#endif
4364 /* Did it pass? */
4365 if (!(errors & E1000_RXD_ERR_TCPE)) {
4366 mp->m_pkthdr.csum_flags |= type;
4305 if (!sctp)
4367 if (sctp == FALSE)
4306 mp->m_pkthdr.csum_data = htons(0xffff);
4307 }
4308 }
4309 return;
4310}
4311
4312/*
4313 * This routine is run via an vlan
4314 * config EVENT
4315 */
4316static void
4317igb_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
4318{
4319 struct adapter *adapter = ifp->if_softc;
4320 u32 index, bit;
4321
4368 mp->m_pkthdr.csum_data = htons(0xffff);
4369 }
4370 }
4371 return;
4372}
4373
4374/*
4375 * This routine is run via an vlan
4376 * config EVENT
4377 */
4378static void
4379igb_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
4380{
4381 struct adapter *adapter = ifp->if_softc;
4382 u32 index, bit;
4383
4322 if (ifp->if_softc != arg) /* Not our event */
4384 if (ifp->if_softc != arg) /* Not our event */
4323 return;
4324
4325 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
4326 return;
4327
4328 index = (vtag >> 5) & 0x7F;
4329 bit = vtag & 0x1F;
4330 igb_shadow_vfta[index] |= (1 << bit);

--- 7 unchanged lines hidden (view full) ---

4338 * unconfig EVENT
4339 */
4340static void
4341igb_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
4342{
4343 struct adapter *adapter = ifp->if_softc;
4344 u32 index, bit;
4345
4385 return;
4386
4387 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
4388 return;
4389
4390 index = (vtag >> 5) & 0x7F;
4391 bit = vtag & 0x1F;
4392 igb_shadow_vfta[index] |= (1 << bit);

--- 7 unchanged lines hidden (view full) ---

4400 * unconfig EVENT
4401 */
4402static void
4403igb_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
4404{
4405 struct adapter *adapter = ifp->if_softc;
4406 u32 index, bit;
4407
4346 if (ifp->if_softc != arg)
4408 if (ifp->if_softc != arg)
4347 return;
4348
4349 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
4350 return;
4351
4352 index = (vtag >> 5) & 0x7F;
4353 bit = vtag & 0x1F;
4354 igb_shadow_vfta[index] &= ~(1 << bit);

--- 675 unchanged lines hidden ---
4409 return;
4410
4411 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
4412 return;
4413
4414 index = (vtag >> 5) & 0x7F;
4415 bit = vtag & 0x1F;
4416 igb_shadow_vfta[index] &= ~(1 << bit);

--- 675 unchanged lines hidden ---