Deleted Added
full compact
if_ix.c (313388) if_ix.c (320897)
1/******************************************************************************
2
1/******************************************************************************
2
3 Copyright (c) 2001-2015, Intel Corporation
3 Copyright (c) 2001-2017, Intel Corporation
4 All rights reserved.
4 All rights reserved.
5
6 Redistribution and use in source and binary forms, with or without
5
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
7 modification, are permitted provided that the following conditions are met:
8
9 1. Redistributions of source code must retain the above copyright notice,
8
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
10 this list of conditions and the following disclaimer.
11
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
11
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
14 documentation and/or other materials provided with the distribution.
15
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
15
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
18 this software without specific prior written permission.
19
19
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
31
32******************************************************************************/
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
31
32******************************************************************************/
33/*$FreeBSD: stable/11/sys/dev/ixgbe/if_ix.c 313388 2017-02-07 15:13:19Z rstone $*/
33/*$FreeBSD: stable/11/sys/dev/ixgbe/if_ix.c 320897 2017-07-11 21:25:07Z erj $*/
34
35
36#ifndef IXGBE_STANDALONE_BUILD
37#include "opt_inet.h"
38#include "opt_inet6.h"
39#include "opt_rss.h"
40#endif
41
42#include "ixgbe.h"
43
34
35
36#ifndef IXGBE_STANDALONE_BUILD
37#include "opt_inet.h"
38#include "opt_inet6.h"
39#include "opt_rss.h"
40#endif
41
42#include "ixgbe.h"
43
44#ifdef RSS
45#include <net/rss_config.h>
46#include <netinet/in_rss.h>
47#endif
44/************************************************************************
45 * Driver version
46 ************************************************************************/
47char ixgbe_driver_version[] = "3.2.12-k";
48
48
49/*********************************************************************
50 * Driver version
51 *********************************************************************/
52char ixgbe_driver_version[] = "3.1.13-k";
53
49
54
55/*********************************************************************
56 * PCI Device ID Table
50/************************************************************************
51 * PCI Device ID Table
57 *
52 *
58 * Used by probe to select devices to load on
59 * Last field stores an index into ixgbe_strings
60 * Last entry must be all 0s
53 * Used by probe to select devices to load on
54 * Last field stores an index into ixgbe_strings
55 * Last entry must be all 0s
61 *
56 *
62 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
63 *********************************************************************/
64
57 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
58 ************************************************************************/
65static ixgbe_vendor_info_t ixgbe_vendor_info_array[] =
66{
67 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, 0, 0, 0},
68 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, 0, 0, 0},
69 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, 0, 0, 0},
70 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, 0, 0, 0},
71 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, 0, 0, 0},
72 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, 0, 0, 0},

--- 17 unchanged lines hidden (view full) ---

90 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP, 0, 0, 0},
91 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, 0, 0, 0},
92 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, 0, 0, 0},
93 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, 0, 0, 0},
94 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, 0, 0, 0},
95 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR, 0, 0, 0},
96 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4, 0, 0, 0},
97 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T, 0, 0, 0},
59static ixgbe_vendor_info_t ixgbe_vendor_info_array[] =
60{
61 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, 0, 0, 0},
62 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, 0, 0, 0},
63 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, 0, 0, 0},
64 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, 0, 0, 0},
65 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, 0, 0, 0},
66 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, 0, 0, 0},

--- 17 unchanged lines hidden (view full) ---

84 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP, 0, 0, 0},
85 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, 0, 0, 0},
86 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, 0, 0, 0},
87 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, 0, 0, 0},
88 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, 0, 0, 0},
89 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR, 0, 0, 0},
90 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4, 0, 0, 0},
91 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T, 0, 0, 0},
92 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T, 0, 0, 0},
98 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP, 0, 0, 0},
93 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP, 0, 0, 0},
94 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR, 0, 0, 0},
95 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L, 0, 0, 0},
96 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP, 0, 0, 0},
97 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N, 0, 0, 0},
98 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII, 0, 0, 0},
99 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L, 0, 0, 0},
100 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T, 0, 0, 0},
101 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T, 0, 0, 0},
102 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L, 0, 0, 0},
103 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_BYPASS, 0, 0, 0},
104 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS, 0, 0, 0},
99 /* required last entry */
100 {0, 0, 0, 0, 0}
101};
102
105 /* required last entry */
106 {0, 0, 0, 0, 0}
107};
108
103/*********************************************************************
104 * Table of branding strings
105 *********************************************************************/
106
109/************************************************************************
110 * Table of branding strings
111 ************************************************************************/
107static char *ixgbe_strings[] = {
108 "Intel(R) PRO/10GbE PCI-Express Network Driver"
109};
110
112static char *ixgbe_strings[] = {
113 "Intel(R) PRO/10GbE PCI-Express Network Driver"
114};
115
111/*********************************************************************
112 * Function prototypes
113 *********************************************************************/
116/************************************************************************
117 * Function prototypes
118 ************************************************************************/
114static int ixgbe_probe(device_t);
115static int ixgbe_attach(device_t);
116static int ixgbe_detach(device_t);
117static int ixgbe_shutdown(device_t);
119static int ixgbe_probe(device_t);
120static int ixgbe_attach(device_t);
121static int ixgbe_detach(device_t);
122static int ixgbe_shutdown(device_t);
118static int ixgbe_suspend(device_t);
119static int ixgbe_resume(device_t);
123static int ixgbe_suspend(device_t);
124static int ixgbe_resume(device_t);
120static int ixgbe_ioctl(struct ifnet *, u_long, caddr_t);
125static int ixgbe_ioctl(struct ifnet *, u_long, caddr_t);
121static void ixgbe_init(void *);
122static void ixgbe_init_locked(struct adapter *);
126static void ixgbe_init(void *);
127static void ixgbe_init_locked(struct adapter *);
123static void ixgbe_stop(void *);
124#if __FreeBSD_version >= 1100036
128static void ixgbe_stop(void *);
129#if __FreeBSD_version >= 1100036
125static uint64_t ixgbe_get_counter(struct ifnet *, ift_counter);
130static uint64_t ixgbe_get_counter(struct ifnet *, ift_counter);
126#endif
131#endif
127static void ixgbe_add_media_types(struct adapter *);
132static void ixgbe_init_device_features(struct adapter *);
133static void ixgbe_check_fan_failure(struct adapter *, u32, bool);
134static void ixgbe_add_media_types(struct adapter *);
128static void ixgbe_media_status(struct ifnet *, struct ifmediareq *);
129static int ixgbe_media_change(struct ifnet *);
135static void ixgbe_media_status(struct ifnet *, struct ifmediareq *);
136static int ixgbe_media_change(struct ifnet *);
130static void ixgbe_identify_hardware(struct adapter *);
131static int ixgbe_allocate_pci_resources(struct adapter *);
137static int ixgbe_allocate_pci_resources(struct adapter *);
132static void ixgbe_get_slot_info(struct adapter *);
138static void ixgbe_get_slot_info(struct adapter *);
133static int ixgbe_allocate_msix(struct adapter *);
134static int ixgbe_allocate_legacy(struct adapter *);
139static int ixgbe_allocate_msix(struct adapter *);
140static int ixgbe_allocate_legacy(struct adapter *);
135static int ixgbe_setup_msix(struct adapter *);
136static void ixgbe_free_pci_resources(struct adapter *);
137static void ixgbe_local_timer(void *);
138static int ixgbe_setup_interface(device_t, struct adapter *);
139static void ixgbe_config_gpie(struct adapter *);
140static void ixgbe_config_dmac(struct adapter *);
141static void ixgbe_config_delay_values(struct adapter *);
142static void ixgbe_config_link(struct adapter *);
143static void ixgbe_check_wol_support(struct adapter *);
144static int ixgbe_setup_low_power_mode(struct adapter *);
145static void ixgbe_rearm_queues(struct adapter *, u64);
141static int ixgbe_configure_interrupts(struct adapter *);
142static void ixgbe_free_pci_resources(struct adapter *);
143static void ixgbe_local_timer(void *);
144static int ixgbe_setup_interface(device_t, struct adapter *);
145static void ixgbe_config_gpie(struct adapter *);
146static void ixgbe_config_dmac(struct adapter *);
147static void ixgbe_config_delay_values(struct adapter *);
148static void ixgbe_config_link(struct adapter *);
149static void ixgbe_check_wol_support(struct adapter *);
150static int ixgbe_setup_low_power_mode(struct adapter *);
151static void ixgbe_rearm_queues(struct adapter *, u64);
146
147static void ixgbe_initialize_transmit_units(struct adapter *);
148static void ixgbe_initialize_receive_units(struct adapter *);
152
153static void ixgbe_initialize_transmit_units(struct adapter *);
154static void ixgbe_initialize_receive_units(struct adapter *);
149static void ixgbe_enable_rx_drop(struct adapter *);
150static void ixgbe_disable_rx_drop(struct adapter *);
151static void ixgbe_initialize_rss_mapping(struct adapter *);
155static void ixgbe_enable_rx_drop(struct adapter *);
156static void ixgbe_disable_rx_drop(struct adapter *);
157static void ixgbe_initialize_rss_mapping(struct adapter *);
152
153static void ixgbe_enable_intr(struct adapter *);
154static void ixgbe_disable_intr(struct adapter *);
155static void ixgbe_update_stats_counters(struct adapter *);
156static void ixgbe_set_promisc(struct adapter *);
157static void ixgbe_set_multi(struct adapter *);
158static void ixgbe_update_link_status(struct adapter *);
158
159static void ixgbe_enable_intr(struct adapter *);
160static void ixgbe_disable_intr(struct adapter *);
161static void ixgbe_update_stats_counters(struct adapter *);
162static void ixgbe_set_promisc(struct adapter *);
163static void ixgbe_set_multi(struct adapter *);
164static void ixgbe_update_link_status(struct adapter *);
159static void ixgbe_set_ivar(struct adapter *, u8, u8, s8);
160static void ixgbe_configure_ivars(struct adapter *);
161static u8 * ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
165static void ixgbe_set_ivar(struct adapter *, u8, u8, s8);
166static void ixgbe_configure_ivars(struct adapter *);
167static u8 *ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
162
168
163static void ixgbe_setup_vlan_hw_support(struct adapter *);
164static void ixgbe_register_vlan(void *, struct ifnet *, u16);
165static void ixgbe_unregister_vlan(void *, struct ifnet *, u16);
169static void ixgbe_setup_vlan_hw_support(struct adapter *);
170static void ixgbe_register_vlan(void *, struct ifnet *, u16);
171static void ixgbe_unregister_vlan(void *, struct ifnet *, u16);
166
172
167static void ixgbe_add_device_sysctls(struct adapter *);
173static void ixgbe_add_device_sysctls(struct adapter *);
168static void ixgbe_add_hw_stats(struct adapter *);
174static void ixgbe_add_hw_stats(struct adapter *);
169static int ixgbe_set_flowcntl(struct adapter *, int);
170static int ixgbe_set_advertise(struct adapter *, int);
175static int ixgbe_set_flowcntl(struct adapter *, int);
176static int ixgbe_set_advertise(struct adapter *, int);
177static int ixgbe_get_advertise(struct adapter *);
171
172/* Sysctl handlers */
178
179/* Sysctl handlers */
173static void ixgbe_set_sysctl_value(struct adapter *, const char *,
174 const char *, int *, int);
175static int ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS);
176static int ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS);
177static int ixgbe_sysctl_thermal_test(SYSCTL_HANDLER_ARGS);
178static int ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS);
179static int ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS);
180static int ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS);
180static void ixgbe_set_sysctl_value(struct adapter *, const char *,
181 const char *, int *, int);
182static int ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS);
183static int ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS);
184static int ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS);
185static int ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS);
186static int ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS);
187static int ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS);
181#ifdef IXGBE_DEBUG
188#ifdef IXGBE_DEBUG
182static int ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS);
183static int ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS);
189static int ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS);
190static int ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS);
184#endif
191#endif
185static int ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS);
186static int ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS);
187static int ixgbe_sysctl_eee_enable(SYSCTL_HANDLER_ARGS);
188static int ixgbe_sysctl_eee_negotiated(SYSCTL_HANDLER_ARGS);
189static int ixgbe_sysctl_eee_rx_lpi_status(SYSCTL_HANDLER_ARGS);
190static int ixgbe_sysctl_eee_tx_lpi_status(SYSCTL_HANDLER_ARGS);
191static int ixgbe_sysctl_eee_tx_lpi_delay(SYSCTL_HANDLER_ARGS);
192static int ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS);
193static int ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS);
194static int ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS);
195static int ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS);
196static int ixgbe_sysctl_eee_state(SYSCTL_HANDLER_ARGS);
197static int ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS);
198static int ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS);
192
193/* Support for pluggable optic modules */
199
200/* Support for pluggable optic modules */
194static bool ixgbe_sfp_probe(struct adapter *);
195static void ixgbe_setup_optics(struct adapter *);
201static bool ixgbe_sfp_probe(struct adapter *);
196
202
197/* Legacy (single vector interrupt handler */
198static void ixgbe_legacy_irq(void *);
203/* Legacy (single vector) interrupt handler */
204static void ixgbe_legacy_irq(void *);
199
205
200/* The MSI/X Interrupt handlers */
201static void ixgbe_msix_que(void *);
202static void ixgbe_msix_link(void *);
206/* The MSI/MSI-X Interrupt handlers */
207static void ixgbe_msix_que(void *);
208static void ixgbe_msix_link(void *);
203
204/* Deferred interrupt tasklets */
209
210/* Deferred interrupt tasklets */
205static void ixgbe_handle_que(void *, int);
206static void ixgbe_handle_link(void *, int);
207static void ixgbe_handle_msf(void *, int);
208static void ixgbe_handle_mod(void *, int);
209static void ixgbe_handle_phy(void *, int);
211static void ixgbe_handle_que(void *, int);
212static void ixgbe_handle_link(void *, int);
213static void ixgbe_handle_msf(void *, int);
214static void ixgbe_handle_mod(void *, int);
215static void ixgbe_handle_phy(void *, int);
210
216
211#ifdef IXGBE_FDIR
212static void ixgbe_reinit_fdir(void *, int);
213#endif
214
217
215#ifdef PCI_IOV
216static void ixgbe_ping_all_vfs(struct adapter *);
217static void ixgbe_handle_mbx(void *, int);
218static int ixgbe_init_iov(device_t, u16, const nvlist_t *);
219static void ixgbe_uninit_iov(device_t);
220static int ixgbe_add_vf(device_t, u16, const nvlist_t *);
221static void ixgbe_initialize_iov(struct adapter *);
222static void ixgbe_recalculate_max_frame(struct adapter *);
223static void ixgbe_init_vf(struct adapter *, struct ixgbe_vf *);
224#endif /* PCI_IOV */
225
226
227/*********************************************************************
218/************************************************************************
228 * FreeBSD Device Interface Entry Points
219 * FreeBSD Device Interface Entry Points
229 *********************************************************************/
230
220 ************************************************************************/
231static device_method_t ix_methods[] = {
232 /* Device interface */
233 DEVMETHOD(device_probe, ixgbe_probe),
234 DEVMETHOD(device_attach, ixgbe_attach),
235 DEVMETHOD(device_detach, ixgbe_detach),
236 DEVMETHOD(device_shutdown, ixgbe_shutdown),
237 DEVMETHOD(device_suspend, ixgbe_suspend),
238 DEVMETHOD(device_resume, ixgbe_resume),

--- 9 unchanged lines hidden (view full) ---

248 "ix", ix_methods, sizeof(struct adapter),
249};
250
251devclass_t ix_devclass;
252DRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0);
253
254MODULE_DEPEND(ix, pci, 1, 1, 1);
255MODULE_DEPEND(ix, ether, 1, 1, 1);
221static device_method_t ix_methods[] = {
222 /* Device interface */
223 DEVMETHOD(device_probe, ixgbe_probe),
224 DEVMETHOD(device_attach, ixgbe_attach),
225 DEVMETHOD(device_detach, ixgbe_detach),
226 DEVMETHOD(device_shutdown, ixgbe_shutdown),
227 DEVMETHOD(device_suspend, ixgbe_suspend),
228 DEVMETHOD(device_resume, ixgbe_resume),

--- 9 unchanged lines hidden (view full) ---

238 "ix", ix_methods, sizeof(struct adapter),
239};
240
241devclass_t ix_devclass;
242DRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0);
243
244MODULE_DEPEND(ix, pci, 1, 1, 1);
245MODULE_DEPEND(ix, ether, 1, 1, 1);
256#ifdef DEV_NETMAP
257MODULE_DEPEND(ix, netmap, 1, 1, 1);
246MODULE_DEPEND(ix, netmap, 1, 1, 1);
258#endif /* DEV_NETMAP */
259
260/*
247
248/*
261** TUNEABLE PARAMETERS:
262*/
249 * TUNEABLE PARAMETERS:
250 */
263
251
264static SYSCTL_NODE(_hw, OID_AUTO, ix, CTLFLAG_RD, 0,
265 "IXGBE driver parameters");
252static SYSCTL_NODE(_hw, OID_AUTO, ix, CTLFLAG_RD, 0, "IXGBE driver parameters");
266
267/*
253
254/*
268** AIM: Adaptive Interrupt Moderation
269** which means that the interrupt rate
270** is varied over time based on the
271** traffic for that interrupt vector
272*/
255 * AIM: Adaptive Interrupt Moderation
256 * which means that the interrupt rate
257 * is varied over time based on the
258 * traffic for that interrupt vector
259 */
273static int ixgbe_enable_aim = TRUE;
260static int ixgbe_enable_aim = TRUE;
274SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RWTUN, &ixgbe_enable_aim, 0,
261SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RDTUN, &ixgbe_enable_aim, 0,
275 "Enable adaptive interrupt moderation");
276
277static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
278SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN,
279 &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second");
280
281/* How many packets rxeof tries to clean at a time */
282static int ixgbe_rx_process_limit = 256;
283SYSCTL_INT(_hw_ix, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN,
262 "Enable adaptive interrupt moderation");
263
264static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
265SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN,
266 &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second");
267
268/* How many packets rxeof tries to clean at a time */
269static int ixgbe_rx_process_limit = 256;
270SYSCTL_INT(_hw_ix, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN,
284 &ixgbe_rx_process_limit, 0,
285 "Maximum number of received packets to process at a time,"
286 "-1 means unlimited");
271 &ixgbe_rx_process_limit, 0, "Maximum number of received packets to process at a time, -1 means unlimited");
287
288/* How many packets txeof tries to clean at a time */
289static int ixgbe_tx_process_limit = 256;
290SYSCTL_INT(_hw_ix, OID_AUTO, tx_process_limit, CTLFLAG_RDTUN,
291 &ixgbe_tx_process_limit, 0,
272
273/* How many packets txeof tries to clean at a time */
274static int ixgbe_tx_process_limit = 256;
275SYSCTL_INT(_hw_ix, OID_AUTO, tx_process_limit, CTLFLAG_RDTUN,
276 &ixgbe_tx_process_limit, 0,
292 "Maximum number of sent packets to process at a time,"
293 "-1 means unlimited");
277 "Maximum number of sent packets to process at a time, -1 means unlimited");
294
295/* Flow control setting, default to full */
296static int ixgbe_flow_control = ixgbe_fc_full;
297SYSCTL_INT(_hw_ix, OID_AUTO, flow_control, CTLFLAG_RDTUN,
298 &ixgbe_flow_control, 0, "Default flow control used for all adapters");
299
300/* Advertise Speed, default to 0 (auto) */
301static int ixgbe_advertise_speed = 0;
302SYSCTL_INT(_hw_ix, OID_AUTO, advertise_speed, CTLFLAG_RDTUN,
303 &ixgbe_advertise_speed, 0, "Default advertised speed for all adapters");
304
305/*
278
279/* Flow control setting, default to full */
280static int ixgbe_flow_control = ixgbe_fc_full;
281SYSCTL_INT(_hw_ix, OID_AUTO, flow_control, CTLFLAG_RDTUN,
282 &ixgbe_flow_control, 0, "Default flow control used for all adapters");
283
284/* Advertise Speed, default to 0 (auto) */
285static int ixgbe_advertise_speed = 0;
286SYSCTL_INT(_hw_ix, OID_AUTO, advertise_speed, CTLFLAG_RDTUN,
287 &ixgbe_advertise_speed, 0, "Default advertised speed for all adapters");
288
289/*
306** Smart speed setting, default to on
307** this only works as a compile option
308** right now as its during attach, set
309** this to 'ixgbe_smart_speed_off' to
310** disable.
311*/
290 * Smart speed setting, default to on
291 * this only works as a compile option
292 * right now as its during attach, set
293 * this to 'ixgbe_smart_speed_off' to
294 * disable.
295 */
312static int ixgbe_smart_speed = ixgbe_smart_speed_on;
313
314/*
296static int ixgbe_smart_speed = ixgbe_smart_speed_on;
297
298/*
315 * MSIX should be the default for best performance,
299 * MSI-X should be the default for best performance,
316 * but this allows it to be forced off for testing.
317 */
318static int ixgbe_enable_msix = 1;
319SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0,
320 "Enable MSI-X interrupts");
321
322/*
323 * Number of Queues, can be set to 0,
324 * it then autoconfigures based on the
325 * number of cpus with a max of 8. This
326 * can be overriden manually here.
327 */
328static int ixgbe_num_queues = 0;
329SYSCTL_INT(_hw_ix, OID_AUTO, num_queues, CTLFLAG_RDTUN, &ixgbe_num_queues, 0,
330 "Number of queues to configure, 0 indicates autoconfigure");
331
332/*
300 * but this allows it to be forced off for testing.
301 */
302static int ixgbe_enable_msix = 1;
303SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0,
304 "Enable MSI-X interrupts");
305
306/*
307 * Number of Queues, can be set to 0,
308 * it then autoconfigures based on the
309 * number of cpus with a max of 8. This
310 * can be overriden manually here.
311 */
312static int ixgbe_num_queues = 0;
313SYSCTL_INT(_hw_ix, OID_AUTO, num_queues, CTLFLAG_RDTUN, &ixgbe_num_queues, 0,
314 "Number of queues to configure, 0 indicates autoconfigure");
315
316/*
333** Number of TX descriptors per ring,
334** setting higher than RX as this seems
335** the better performing choice.
336*/
317 * Number of TX descriptors per ring,
318 * setting higher than RX as this seems
319 * the better performing choice.
320 */
337static int ixgbe_txd = PERFORM_TXD;
338SYSCTL_INT(_hw_ix, OID_AUTO, txd, CTLFLAG_RDTUN, &ixgbe_txd, 0,
339 "Number of transmit descriptors per queue");
340
341/* Number of RX descriptors per ring */
342static int ixgbe_rxd = PERFORM_RXD;
343SYSCTL_INT(_hw_ix, OID_AUTO, rxd, CTLFLAG_RDTUN, &ixgbe_rxd, 0,
344 "Number of receive descriptors per queue");
345
346/*
321static int ixgbe_txd = PERFORM_TXD;
322SYSCTL_INT(_hw_ix, OID_AUTO, txd, CTLFLAG_RDTUN, &ixgbe_txd, 0,
323 "Number of transmit descriptors per queue");
324
325/* Number of RX descriptors per ring */
326static int ixgbe_rxd = PERFORM_RXD;
327SYSCTL_INT(_hw_ix, OID_AUTO, rxd, CTLFLAG_RDTUN, &ixgbe_rxd, 0,
328 "Number of receive descriptors per queue");
329
330/*
347** Defining this on will allow the use
348** of unsupported SFP+ modules, note that
349** doing so you are on your own :)
350*/
331 * Defining this on will allow the use
332 * of unsupported SFP+ modules, note that
333 * doing so you are on your own :)
334 */
351static int allow_unsupported_sfp = FALSE;
335static int allow_unsupported_sfp = FALSE;
352TUNABLE_INT("hw.ix.unsupported_sfp", &allow_unsupported_sfp);
336SYSCTL_INT(_hw_ix, OID_AUTO, allow_unsupported_sfp, CTLFLAG_RDTUN,
337 &allow_unsupported_sfp, 0,
338 "Allow unsupported SFP modules...use at your own risk");
353
339
340/*
341 * Not sure if Flow Director is fully baked,
342 * so we'll default to turning it off.
343 */
344static int ixgbe_enable_fdir = 0;
345SYSCTL_INT(_hw_ix, OID_AUTO, enable_fdir, CTLFLAG_RDTUN, &ixgbe_enable_fdir, 0,
346 "Enable Flow Director");
347
348/* Legacy Transmit (single queue) */
349static int ixgbe_enable_legacy_tx = 0;
350SYSCTL_INT(_hw_ix, OID_AUTO, enable_legacy_tx, CTLFLAG_RDTUN,
351 &ixgbe_enable_legacy_tx, 0, "Enable Legacy TX flow");
352
353/* Receive-Side Scaling */
354static int ixgbe_enable_rss = 1;
355SYSCTL_INT(_hw_ix, OID_AUTO, enable_rss, CTLFLAG_RDTUN, &ixgbe_enable_rss, 0,
356 "Enable Receive-Side Scaling (RSS)");
357
354/* Keep running tab on them for sanity check */
355static int ixgbe_total_ports;
356
358/* Keep running tab on them for sanity check */
359static int ixgbe_total_ports;
360
357#ifdef IXGBE_FDIR
358/*
359** Flow Director actually 'steals'
360** part of the packet buffer as its
361** filter pool, this variable controls
362** how much it uses:
363** 0 = 64K, 1 = 128K, 2 = 256K
364*/
365static int fdir_pballoc = 1;
366#endif
361static int (*ixgbe_start_locked)(struct ifnet *, struct tx_ring *);
362static int (*ixgbe_ring_empty)(struct ifnet *, struct buf_ring *);
367
363
364MALLOC_DEFINE(M_IXGBE, "ix", "ix driver allocations");
365
366/************************************************************************
367 * ixgbe_initialize_rss_mapping
368 ************************************************************************/
369static void
370ixgbe_initialize_rss_mapping(struct adapter *adapter)
371{
372 struct ixgbe_hw *hw = &adapter->hw;
373 u32 reta = 0, mrqc, rss_key[10];
374 int queue_id, table_size, index_mult;
375 int i, j;
376 u32 rss_hash_config;
377
378 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
379 /* Fetch the configured RSS key */
380 rss_getkey((uint8_t *)&rss_key);
381 } else {
382 /* set up random bits */
383 arc4rand(&rss_key, sizeof(rss_key), 0);
384 }
385
386 /* Set multiplier for RETA setup and table size based on MAC */
387 index_mult = 0x1;
388 table_size = 128;
389 switch (adapter->hw.mac.type) {
390 case ixgbe_mac_82598EB:
391 index_mult = 0x11;
392 break;
393 case ixgbe_mac_X550:
394 case ixgbe_mac_X550EM_x:
395 case ixgbe_mac_X550EM_a:
396 table_size = 512;
397 break;
398 default:
399 break;
400 }
401
402 /* Set up the redirection table */
403 for (i = 0, j = 0; i < table_size; i++, j++) {
404 if (j == adapter->num_queues)
405 j = 0;
406
407 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
408 /*
409 * Fetch the RSS bucket id for the given indirection
410 * entry. Cap it at the number of configured buckets
411 * (which is num_queues.)
412 */
413 queue_id = rss_get_indirection_to_bucket(i);
414 queue_id = queue_id % adapter->num_queues;
415 } else
416 queue_id = (j * index_mult);
417
418 /*
419 * The low 8 bits are for hash value (n+0);
420 * The next 8 bits are for hash value (n+1), etc.
421 */
422 reta = reta >> 8;
423 reta = reta | (((uint32_t)queue_id) << 24);
424 if ((i & 3) == 3) {
425 if (i < 128)
426 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
427 else
428 IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32),
429 reta);
430 reta = 0;
431 }
432 }
433
434 /* Now fill our hash function seeds */
435 for (i = 0; i < 10; i++)
436 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]);
437
438 /* Perform hash on these packet types */
439 if (adapter->feat_en & IXGBE_FEATURE_RSS)
440 rss_hash_config = rss_gethashconfig();
441 else {
442 /*
443 * Disable UDP - IP fragments aren't currently being handled
444 * and so we end up with a mix of 2-tuple and 4-tuple
445 * traffic.
446 */
447 rss_hash_config = RSS_HASHTYPE_RSS_IPV4
448 | RSS_HASHTYPE_RSS_TCP_IPV4
449 | RSS_HASHTYPE_RSS_IPV6
450 | RSS_HASHTYPE_RSS_TCP_IPV6
451 | RSS_HASHTYPE_RSS_IPV6_EX
452 | RSS_HASHTYPE_RSS_TCP_IPV6_EX;
453 }
454
455 mrqc = IXGBE_MRQC_RSSEN;
456 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
457 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
458 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
459 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
460 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
461 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
462 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
463 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
464 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
465 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
466 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
467 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
468 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
469 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
470 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4_EX)
471 device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_UDP_IPV4_EX defined, but not supported\n",
472 __func__);
473 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
474 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
475 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
476 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
477 mrqc |= ixgbe_get_mrqc(adapter->iov_mode);
478 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
479} /* ixgbe_initialize_rss_mapping */
480
481/************************************************************************
482 * ixgbe_initialize_receive_units - Setup receive registers and features.
483 ************************************************************************/
484#define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1)
485
486static void
487ixgbe_initialize_receive_units(struct adapter *adapter)
488{
489 struct rx_ring *rxr = adapter->rx_rings;
490 struct ixgbe_hw *hw = &adapter->hw;
491 struct ifnet *ifp = adapter->ifp;
492 int i, j;
493 u32 bufsz, fctrl, srrctl, rxcsum;
494 u32 hlreg;
495
496 /*
497 * Make sure receives are disabled while
498 * setting up the descriptor ring
499 */
500 ixgbe_disable_rx(hw);
501
502 /* Enable broadcasts */
503 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
504 fctrl |= IXGBE_FCTRL_BAM;
505 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
506 fctrl |= IXGBE_FCTRL_DPF;
507 fctrl |= IXGBE_FCTRL_PMCF;
508 }
509 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
510
511 /* Set for Jumbo Frames? */
512 hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
513 if (ifp->if_mtu > ETHERMTU)
514 hlreg |= IXGBE_HLREG0_JUMBOEN;
515 else
516 hlreg &= ~IXGBE_HLREG0_JUMBOEN;
517
368#ifdef DEV_NETMAP
518#ifdef DEV_NETMAP
369/*
370 * The #ifdef DEV_NETMAP / #endif blocks in this file are meant to
371 * be a reference on how to implement netmap support in a driver.
372 * Additional comments are in ixgbe_netmap.h .
373 *
374 * <dev/netmap/ixgbe_netmap.h> contains functions for netmap support
375 * that extend the standard driver.
376 */
377#include <dev/netmap/ixgbe_netmap.h>
519 /* CRC stripping is conditional in Netmap */
520 if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
521 (ifp->if_capenable & IFCAP_NETMAP) &&
522 !ix_crcstrip)
523 hlreg &= ~IXGBE_HLREG0_RXCRCSTRP;
524 else
378#endif /* DEV_NETMAP */
525#endif /* DEV_NETMAP */
526 hlreg |= IXGBE_HLREG0_RXCRCSTRP;
379
527
380static MALLOC_DEFINE(M_IXGBE, "ix", "ix driver allocations");
528 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
381
529
382/*********************************************************************
383 * Device identification routine
384 *
385 * ixgbe_probe determines if the driver should be loaded on
386 * adapter based on PCI vendor/device id of the adapter.
387 *
388 * return BUS_PROBE_DEFAULT on success, positive on failure
389 *********************************************************************/
530 bufsz = (adapter->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >>
531 IXGBE_SRRCTL_BSIZEPKT_SHIFT;
390
532
391static int
392ixgbe_probe(device_t dev)
393{
394 ixgbe_vendor_info_t *ent;
533 for (i = 0; i < adapter->num_queues; i++, rxr++) {
534 u64 rdba = rxr->rxdma.dma_paddr;
535 j = rxr->me;
395
536
396 u16 pci_vendor_id = 0;
397 u16 pci_device_id = 0;
398 u16 pci_subvendor_id = 0;
399 u16 pci_subdevice_id = 0;
400 char adapter_name[256];
537 /* Setup the Base and Length of the Rx Descriptor Ring */
538 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j),
539 (rdba & 0x00000000ffffffffULL));
540 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
541 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j),
542 adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
401
543
402 INIT_DEBUGOUT("ixgbe_probe: begin");
544 /* Set up the SRRCTL register */
545 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j));
546 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
547 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
548 srrctl |= bufsz;
549 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
403
550
404 pci_vendor_id = pci_get_vendor(dev);
405 if (pci_vendor_id != IXGBE_INTEL_VENDOR_ID)
406 return (ENXIO);
551 /*
552 * Set DROP_EN iff we have no flow control and >1 queue.
553 * Note that srrctl was cleared shortly before during reset,
554 * so we do not need to clear the bit, but do it just in case
555 * this code is moved elsewhere.
556 */
557 if (adapter->num_queues > 1 &&
558 adapter->hw.fc.requested_mode == ixgbe_fc_none) {
559 srrctl |= IXGBE_SRRCTL_DROP_EN;
560 } else {
561 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
562 }
407
563
408 pci_device_id = pci_get_device(dev);
409 pci_subvendor_id = pci_get_subvendor(dev);
410 pci_subdevice_id = pci_get_subdevice(dev);
564 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl);
411
565
412 ent = ixgbe_vendor_info_array;
413 while (ent->vendor_id != 0) {
414 if ((pci_vendor_id == ent->vendor_id) &&
415 (pci_device_id == ent->device_id) &&
566 /* Setup the HW Rx Head and Tail Descriptor Pointers */
567 IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
568 IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
416
569
417 ((pci_subvendor_id == ent->subvendor_id) ||
418 (ent->subvendor_id == 0)) &&
570 /* Set the driver rx tail address */
571 rxr->tail = IXGBE_RDT(rxr->me);
572 }
419
573
420 ((pci_subdevice_id == ent->subdevice_id) ||
421 (ent->subdevice_id == 0))) {
422 sprintf(adapter_name, "%s, Version - %s",
423 ixgbe_strings[ent->index],
424 ixgbe_driver_version);
425 device_set_desc_copy(dev, adapter_name);
426 ++ixgbe_total_ports;
427 return (BUS_PROBE_DEFAULT);
574 if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
575 u32 psrtype = IXGBE_PSRTYPE_TCPHDR
576 | IXGBE_PSRTYPE_UDPHDR
577 | IXGBE_PSRTYPE_IPV4HDR
578 | IXGBE_PSRTYPE_IPV6HDR;
579 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
580 }
581
582 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
583
584 ixgbe_initialize_rss_mapping(adapter);
585
586 if (adapter->num_queues > 1) {
587 /* RSS and RX IPP Checksum are mutually exclusive */
588 rxcsum |= IXGBE_RXCSUM_PCSD;
589 }
590
591 if (ifp->if_capenable & IFCAP_RXCSUM)
592 rxcsum |= IXGBE_RXCSUM_PCSD;
593
594 /* This is useful for calculating UDP/IP fragment checksums */
595 if (!(rxcsum & IXGBE_RXCSUM_PCSD))
596 rxcsum |= IXGBE_RXCSUM_IPPCSE;
597
598 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
599
600 return;
601} /* ixgbe_initialize_receive_units */
602
603/************************************************************************
604 * ixgbe_initialize_transmit_units - Enable transmit units.
605 ************************************************************************/
606static void
607ixgbe_initialize_transmit_units(struct adapter *adapter)
608{
609 struct tx_ring *txr = adapter->tx_rings;
610 struct ixgbe_hw *hw = &adapter->hw;
611
612 /* Setup the Base and Length of the Tx Descriptor Ring */
613 for (int i = 0; i < adapter->num_queues; i++, txr++) {
614 u64 tdba = txr->txdma.dma_paddr;
615 u32 txctrl = 0;
616 int j = txr->me;
617
618 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
619 (tdba & 0x00000000ffffffffULL));
620 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
621 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j),
622 adapter->num_tx_desc * sizeof(union ixgbe_adv_tx_desc));
623
624 /* Setup the HW Tx Head and Tail descriptor pointers */
625 IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
626 IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
627
628 /* Cache the tail address */
629 txr->tail = IXGBE_TDT(j);
630
631 /* Disable Head Writeback */
632 /*
633 * Note: for X550 series devices, these registers are actually
634 * prefixed with TPH_ isntead of DCA_, but the addresses and
635 * fields remain the same.
636 */
637 switch (hw->mac.type) {
638 case ixgbe_mac_82598EB:
639 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
640 break;
641 default:
642 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j));
643 break;
428 }
644 }
429 ent++;
645 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
646 switch (hw->mac.type) {
647 case ixgbe_mac_82598EB:
648 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
649 break;
650 default:
651 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl);
652 break;
653 }
654
430 }
655 }
431 return (ENXIO);
432}
433
656
434/*********************************************************************
435 * Device initialization routine
657 if (hw->mac.type != ixgbe_mac_82598EB) {
658 u32 dmatxctl, rttdcs;
659
660 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
661 dmatxctl |= IXGBE_DMATXCTL_TE;
662 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
663 /* Disable arbiter to set MTQC */
664 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
665 rttdcs |= IXGBE_RTTDCS_ARBDIS;
666 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
667 IXGBE_WRITE_REG(hw, IXGBE_MTQC,
668 ixgbe_get_mtqc(adapter->iov_mode));
669 rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
670 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
671 }
672
673 return;
674} /* ixgbe_initialize_transmit_units */
675
676/************************************************************************
677 * ixgbe_attach - Device initialization routine
436 *
678 *
437 * The attach entry point is called when the driver is being loaded.
438 * This routine identifies the type of hardware, allocates all resources
439 * and initializes the hardware.
679 * Called when the driver is being loaded.
680 * Identifies the type of hardware, allocates all resources
681 * and initializes the hardware.
440 *
682 *
441 * return 0 on success, positive on failure
442 *********************************************************************/
443
683 * return 0 on success, positive on failure
684 ************************************************************************/
444static int
445ixgbe_attach(device_t dev)
446{
685static int
686ixgbe_attach(device_t dev)
687{
447 struct adapter *adapter;
688 struct adapter *adapter;
448 struct ixgbe_hw *hw;
449 int error = 0;
689 struct ixgbe_hw *hw;
690 int error = 0;
450 u16 csum;
451 u32 ctrl_ext;
691 u32 ctrl_ext;
452
453 INIT_DEBUGOUT("ixgbe_attach: begin");
454
455 /* Allocate, clear, and link in our adapter structure */
456 adapter = device_get_softc(dev);
692
693 INIT_DEBUGOUT("ixgbe_attach: begin");
694
695 /* Allocate, clear, and link in our adapter structure */
696 adapter = device_get_softc(dev);
697 adapter->hw.back = adapter;
457 adapter->dev = dev;
458 hw = &adapter->hw;
459
698 adapter->dev = dev;
699 hw = &adapter->hw;
700
460#ifdef DEV_NETMAP
461 adapter->init_locked = ixgbe_init_locked;
462 adapter->stop_locked = ixgbe_stop;
463#endif
464
465 /* Core Lock Init*/
466 IXGBE_CORE_LOCK_INIT(adapter, device_get_nameunit(dev));
467
468 /* Set up the timer callout */
469 callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0);
470
471 /* Determine hardware revision */
701 /* Core Lock Init*/
702 IXGBE_CORE_LOCK_INIT(adapter, device_get_nameunit(dev));
703
704 /* Set up the timer callout */
705 callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0);
706
707 /* Determine hardware revision */
472 ixgbe_identify_hardware(adapter);
708 hw->vendor_id = pci_get_vendor(dev);
709 hw->device_id = pci_get_device(dev);
710 hw->revision_id = pci_get_revid(dev);
711 hw->subsystem_vendor_id = pci_get_subvendor(dev);
712 hw->subsystem_device_id = pci_get_subdevice(dev);
473
713
714 /*
715 * Make sure BUSMASTER is set
716 */
717 pci_enable_busmaster(dev);
718
474 /* Do base PCI setup - map BAR0 */
475 if (ixgbe_allocate_pci_resources(adapter)) {
476 device_printf(dev, "Allocation of PCI resources failed\n");
477 error = ENXIO;
478 goto err_out;
479 }
480
719 /* Do base PCI setup - map BAR0 */
720 if (ixgbe_allocate_pci_resources(adapter)) {
721 device_printf(dev, "Allocation of PCI resources failed\n");
722 error = ENXIO;
723 goto err_out;
724 }
725
726 /* let hardware know driver is loaded */
727 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
728 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
729 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
730
731 /*
732 * Initialize the shared code
733 */
734 if (ixgbe_init_shared_code(hw)) {
735 device_printf(dev, "Unable to initialize the shared code\n");
736 error = ENXIO;
737 goto err_out;
738 }
739
740 if (hw->mbx.ops.init_params)
741 hw->mbx.ops.init_params(hw);
742
743 hw->allow_unsupported_sfp = allow_unsupported_sfp;
744
745 /* Pick up the 82599 settings */
746 if (hw->mac.type != ixgbe_mac_82598EB) {
747 hw->phy.smart_speed = ixgbe_smart_speed;
748 adapter->num_segs = IXGBE_82599_SCATTER;
749 } else
750 adapter->num_segs = IXGBE_82598_SCATTER;
751
752 ixgbe_init_device_features(adapter);
753
754 if (ixgbe_configure_interrupts(adapter)) {
755 error = ENXIO;
756 goto err_out;
757 }
758
759 /* Allocate multicast array memory. */
760 adapter->mta = malloc(sizeof(*adapter->mta) *
761 MAX_NUM_MULTICAST_ADDRESSES, M_IXGBE, M_NOWAIT);
762 if (adapter->mta == NULL) {
763 device_printf(dev, "Can not allocate multicast setup array\n");
764 error = ENOMEM;
765 goto err_out;
766 }
767
768 /* Enable WoL (if supported) */
769 ixgbe_check_wol_support(adapter);
770
771 /* Register for VLAN events */
772 adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
773 ixgbe_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
774 adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
775 ixgbe_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
776
777 /* Verify adapter fan is still functional (if applicable) */
778 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
779 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
780 ixgbe_check_fan_failure(adapter, esdp, FALSE);
781 }
782
783 /* Ensure SW/FW semaphore is free */
784 ixgbe_init_swfw_semaphore(hw);
785
786 /* Enable EEE power saving */
787 if (adapter->feat_en & IXGBE_FEATURE_EEE)
788 hw->mac.ops.setup_eee(hw, TRUE);
789
790 /* Set an initial default flow control value */
791 hw->fc.requested_mode = ixgbe_flow_control;
792
481 /* Sysctls for limiting the amount of work done in the taskqueues */
482 ixgbe_set_sysctl_value(adapter, "rx_processing_limit",
483 "max number of rx packets to process",
484 &adapter->rx_process_limit, ixgbe_rx_process_limit);
485
486 ixgbe_set_sysctl_value(adapter, "tx_processing_limit",
487 "max number of tx packets to process",
793 /* Sysctls for limiting the amount of work done in the taskqueues */
794 ixgbe_set_sysctl_value(adapter, "rx_processing_limit",
795 "max number of rx packets to process",
796 &adapter->rx_process_limit, ixgbe_rx_process_limit);
797
798 ixgbe_set_sysctl_value(adapter, "tx_processing_limit",
799 "max number of tx packets to process",
488 &adapter->tx_process_limit, ixgbe_tx_process_limit);
800 &adapter->tx_process_limit, ixgbe_tx_process_limit);
489
490 /* Do descriptor calc and sanity checks */
491 if (((ixgbe_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
492 ixgbe_txd < MIN_TXD || ixgbe_txd > MAX_TXD) {
493 device_printf(dev, "TXD config issue, using default!\n");
494 adapter->num_tx_desc = DEFAULT_TXD;
495 } else
496 adapter->num_tx_desc = ixgbe_txd;
497
498 /*
801
802 /* Do descriptor calc and sanity checks */
803 if (((ixgbe_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
804 ixgbe_txd < MIN_TXD || ixgbe_txd > MAX_TXD) {
805 device_printf(dev, "TXD config issue, using default!\n");
806 adapter->num_tx_desc = DEFAULT_TXD;
807 } else
808 adapter->num_tx_desc = ixgbe_txd;
809
810 /*
499 ** With many RX rings it is easy to exceed the
500 ** system mbuf allocation. Tuning nmbclusters
501 ** can alleviate this.
502 */
811 * With many RX rings it is easy to exceed the
812 * system mbuf allocation. Tuning nmbclusters
813 * can alleviate this.
814 */
503 if (nmbclusters > 0) {
504 int s;
505 s = (ixgbe_rxd * adapter->num_queues) * ixgbe_total_ports;
506 if (s > nmbclusters) {
815 if (nmbclusters > 0) {
816 int s;
817 s = (ixgbe_rxd * adapter->num_queues) * ixgbe_total_ports;
818 if (s > nmbclusters) {
507 device_printf(dev, "RX Descriptors exceed "
508 "system mbuf max, using default instead!\n");
819 device_printf(dev, "RX Descriptors exceed system mbuf max, using default instead!\n");
509 ixgbe_rxd = DEFAULT_RXD;
510 }
511 }
512
513 if (((ixgbe_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
514 ixgbe_rxd < MIN_RXD || ixgbe_rxd > MAX_RXD) {
515 device_printf(dev, "RXD config issue, using default!\n");
516 adapter->num_rx_desc = DEFAULT_RXD;
517 } else
518 adapter->num_rx_desc = ixgbe_rxd;
519
520 /* Allocate our TX/RX Queues */
521 if (ixgbe_allocate_queues(adapter)) {
522 error = ENOMEM;
523 goto err_out;
524 }
525
820 ixgbe_rxd = DEFAULT_RXD;
821 }
822 }
823
824 if (((ixgbe_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
825 ixgbe_rxd < MIN_RXD || ixgbe_rxd > MAX_RXD) {
826 device_printf(dev, "RXD config issue, using default!\n");
827 adapter->num_rx_desc = DEFAULT_RXD;
828 } else
829 adapter->num_rx_desc = ixgbe_rxd;
830
831 /* Allocate our TX/RX Queues */
832 if (ixgbe_allocate_queues(adapter)) {
833 error = ENOMEM;
834 goto err_out;
835 }
836
526 /* Allocate multicast array memory. */
527 adapter->mta = malloc(sizeof(*adapter->mta) *
528 MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT);
529 if (adapter->mta == NULL) {
530 device_printf(dev, "Can not allocate multicast setup array\n");
531 error = ENOMEM;
532 goto err_late;
533 }
534
535 /* Initialize the shared code */
536 hw->allow_unsupported_sfp = allow_unsupported_sfp;
537 error = ixgbe_init_shared_code(hw);
837 hw->phy.reset_if_overtemp = TRUE;
838 error = ixgbe_reset_hw(hw);
839 hw->phy.reset_if_overtemp = FALSE;
538 if (error == IXGBE_ERR_SFP_NOT_PRESENT) {
539 /*
840 if (error == IXGBE_ERR_SFP_NOT_PRESENT) {
841 /*
540 ** No optics in this port, set up
541 ** so the timer routine will probe
542 ** for later insertion.
543 */
842 * No optics in this port, set up
843 * so the timer routine will probe
844 * for later insertion.
845 */
544 adapter->sfp_probe = TRUE;
846 adapter->sfp_probe = TRUE;
545 error = 0;
847 error = IXGBE_SUCCESS;
546 } else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
547 device_printf(dev, "Unsupported SFP+ module detected!\n");
548 error = EIO;
549 goto err_late;
550 } else if (error) {
848 } else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
849 device_printf(dev, "Unsupported SFP+ module detected!\n");
850 error = EIO;
851 goto err_late;
852 } else if (error) {
551 device_printf(dev, "Unable to initialize the shared code\n");
853 device_printf(dev, "Hardware initialization failed\n");
552 error = EIO;
553 goto err_late;
554 }
555
556 /* Make sure we have a good EEPROM before we read from it */
854 error = EIO;
855 goto err_late;
856 }
857
858 /* Make sure we have a good EEPROM before we read from it */
557 if (ixgbe_validate_eeprom_checksum(&adapter->hw, &csum) < 0) {
859 if (ixgbe_validate_eeprom_checksum(&adapter->hw, NULL) < 0) {
558 device_printf(dev, "The EEPROM Checksum Is Not Valid\n");
559 error = EIO;
560 goto err_late;
561 }
562
860 device_printf(dev, "The EEPROM Checksum Is Not Valid\n");
861 error = EIO;
862 goto err_late;
863 }
864
563 error = ixgbe_init_hw(hw);
865 /* Setup OS specific network interface */
866 if (ixgbe_setup_interface(dev, adapter) != 0)
867 goto err_late;
868
869 if (adapter->feat_en & IXGBE_FEATURE_MSIX)
870 error = ixgbe_allocate_msix(adapter);
871 else
872 error = ixgbe_allocate_legacy(adapter);
873 if (error)
874 goto err_late;
875
876 error = ixgbe_start_hw(hw);
564 switch (error) {
565 case IXGBE_ERR_EEPROM_VERSION:
877 switch (error) {
878 case IXGBE_ERR_EEPROM_VERSION:
566 device_printf(dev, "This device is a pre-production adapter/"
567 "LOM. Please be aware there may be issues associated "
568 "with your hardware.\nIf you are experiencing problems "
569 "please contact your Intel or hardware representative "
570 "who provided you with this hardware.\n");
879 device_printf(dev, "This device is a pre-production adapter/LOM. Please be aware there may be issues associated with your hardware.\nIf you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n");
571 break;
572 case IXGBE_ERR_SFP_NOT_SUPPORTED:
573 device_printf(dev, "Unsupported SFP+ Module\n");
574 error = EIO;
575 goto err_late;
576 case IXGBE_ERR_SFP_NOT_PRESENT:
577 device_printf(dev, "No SFP+ Module found\n");
578 /* falls thru */
579 default:
580 break;
581 }
582
880 break;
881 case IXGBE_ERR_SFP_NOT_SUPPORTED:
882 device_printf(dev, "Unsupported SFP+ Module\n");
883 error = EIO;
884 goto err_late;
885 case IXGBE_ERR_SFP_NOT_PRESENT:
886 device_printf(dev, "No SFP+ Module found\n");
887 /* falls thru */
888 default:
889 break;
890 }
891
583 /* hw.ix defaults init */
584 ixgbe_set_advertise(adapter, ixgbe_advertise_speed);
585 ixgbe_set_flowcntl(adapter, ixgbe_flow_control);
586 adapter->enable_aim = ixgbe_enable_aim;
587
588 if ((adapter->msix > 1) && (ixgbe_enable_msix))
589 error = ixgbe_allocate_msix(adapter);
590 else
591 error = ixgbe_allocate_legacy(adapter);
592 if (error)
593 goto err_late;
594
595 /* Enable the optics for 82599 SFP+ fiber */
596 ixgbe_enable_tx_laser(hw);
597
598 /* Enable power to the phy. */
599 ixgbe_set_phy_power(hw, TRUE);
600
892 /* Enable the optics for 82599 SFP+ fiber */
893 ixgbe_enable_tx_laser(hw);
894
895 /* Enable power to the phy. */
896 ixgbe_set_phy_power(hw, TRUE);
897
601 /* Setup OS specific network interface */
602 if (ixgbe_setup_interface(dev, adapter) != 0)
603 goto err_late;
604
605 /* Initialize statistics */
606 ixgbe_update_stats_counters(adapter);
607
898 /* Initialize statistics */
899 ixgbe_update_stats_counters(adapter);
900
608 /* Register for VLAN events */
609 adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
610 ixgbe_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
611 adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
612 ixgbe_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
613
614 /* Check PCIE slot type/speed/width */
901 /* Check PCIE slot type/speed/width */
615 ixgbe_get_slot_info(adapter);
616
902 ixgbe_get_slot_info(adapter);
903
617 /* Set an initial default flow control & dmac value */
618 adapter->fc = ixgbe_fc_full;
904 /*
905 * Do time init and sysctl init here, but
906 * only on the first port of a bypass adapter.
907 */
908 ixgbe_bypass_init(adapter);
909
910 /* Set an initial dmac value */
619 adapter->dmac = 0;
911 adapter->dmac = 0;
620 adapter->eee_enabled = 0;
912 /* Set initial advertised speeds (if applicable) */
913 adapter->advertise = ixgbe_get_advertise(adapter);
621
914
622#ifdef PCI_IOV
623 if ((hw->mac.type != ixgbe_mac_82598EB) && (adapter->msix > 1)) {
624 nvlist_t *pf_schema, *vf_schema;
915 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
916 ixgbe_define_iov_schemas(dev, &error);
625
917
626 hw->mbx.ops.init_params(hw);
627 pf_schema = pci_iov_schema_alloc_node();
628 vf_schema = pci_iov_schema_alloc_node();
629 pci_iov_schema_add_unicast_mac(vf_schema, "mac-addr", 0, NULL);
630 pci_iov_schema_add_bool(vf_schema, "mac-anti-spoof",
631 IOV_SCHEMA_HASDEFAULT, TRUE);
632 pci_iov_schema_add_bool(vf_schema, "allow-set-mac",
633 IOV_SCHEMA_HASDEFAULT, FALSE);
634 pci_iov_schema_add_bool(vf_schema, "allow-promisc",
635 IOV_SCHEMA_HASDEFAULT, FALSE);
636 error = pci_iov_attach(dev, pf_schema, vf_schema);
637 if (error != 0) {
638 device_printf(dev,
639 "Error %d setting up SR-IOV\n", error);
640 }
641 }
642#endif /* PCI_IOV */
643
644 /* Check for certain supported features */
645 ixgbe_check_wol_support(adapter);
646
647 /* Add sysctls */
648 ixgbe_add_device_sysctls(adapter);
649 ixgbe_add_hw_stats(adapter);
650
918 /* Add sysctls */
919 ixgbe_add_device_sysctls(adapter);
920 ixgbe_add_hw_stats(adapter);
921
651 /* let hardware know driver is loaded */
652 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
653 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
654 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
922 /* For Netmap */
923 adapter->init_locked = ixgbe_init_locked;
924 adapter->stop_locked = ixgbe_stop;
655
925
656#ifdef DEV_NETMAP
657 ixgbe_netmap_attach(adapter);
658#endif /* DEV_NETMAP */
926 if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
927 ixgbe_netmap_attach(adapter);
928
659 INIT_DEBUGOUT("ixgbe_attach: end");
929 INIT_DEBUGOUT("ixgbe_attach: end");
930
660 return (0);
661
662err_late:
663 ixgbe_free_transmit_structures(adapter);
664 ixgbe_free_receive_structures(adapter);
931 return (0);
932
933err_late:
934 ixgbe_free_transmit_structures(adapter);
935 ixgbe_free_receive_structures(adapter);
936 free(adapter->queues, M_DEVBUF);
665err_out:
666 if (adapter->ifp != NULL)
667 if_free(adapter->ifp);
937err_out:
938 if (adapter->ifp != NULL)
939 if_free(adapter->ifp);
940 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
941 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
942 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
668 ixgbe_free_pci_resources(adapter);
943 ixgbe_free_pci_resources(adapter);
669 free(adapter->mta, M_DEVBUF);
944 free(adapter->mta, M_IXGBE);
945 IXGBE_CORE_LOCK_DESTROY(adapter);
946
670 return (error);
947 return (error);
671}
948} /* ixgbe_attach */
672
949
673/*********************************************************************
674 * Device removal routine
950/************************************************************************
951 * ixgbe_check_wol_support
675 *
952 *
676 * The detach entry point is called when the driver is being removed.
677 * This routine stops the adapter and deallocates all the resources
678 * that were allocated for driver operation.
953 * Checks whether the adapter's ports are capable of
954 * Wake On LAN by reading the adapter's NVM.
679 *
955 *
680 * return 0 on success, positive on failure
681 *********************************************************************/
682
683static int
684ixgbe_detach(device_t dev)
956 * Sets each port's hw->wol_enabled value depending
957 * on the value read here.
958 ************************************************************************/
959static void
960ixgbe_check_wol_support(struct adapter *adapter)
685{
961{
686 struct adapter *adapter = device_get_softc(dev);
687 struct ix_queue *que = adapter->queues;
688 struct tx_ring *txr = adapter->tx_rings;
689 u32 ctrl_ext;
962 struct ixgbe_hw *hw = &adapter->hw;
963 u16 dev_caps = 0;
690
964
691 INIT_DEBUGOUT("ixgbe_detach: begin");
965 /* Find out WoL support for port */
966 adapter->wol_support = hw->wol_enabled = 0;
967 ixgbe_get_device_caps(hw, &dev_caps);
968 if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
969 ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) &&
970 hw->bus.func == 0))
971 adapter->wol_support = hw->wol_enabled = 1;
692
972
693 /* Make sure VLANS are not using driver */
694 if (adapter->ifp->if_vlantrunk != NULL) {
695 device_printf(dev,"Vlan in use, detach first\n");
696 return (EBUSY);
697 }
973 /* Save initial wake up filter configuration */
974 adapter->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC);
698
975
699#ifdef PCI_IOV
700 if (pci_iov_detach(dev) != 0) {
701 device_printf(dev, "SR-IOV in use; detach first.\n");
702 return (EBUSY);
703 }
704#endif /* PCI_IOV */
976 return;
977} /* ixgbe_check_wol_support */
705
978
706 ether_ifdetach(adapter->ifp);
707 /* Stop the adapter */
708 IXGBE_CORE_LOCK(adapter);
709 ixgbe_setup_low_power_mode(adapter);
710 IXGBE_CORE_UNLOCK(adapter);
979/************************************************************************
980 * ixgbe_setup_interface
981 *
982 * Setup networking device structure and register an interface.
983 ************************************************************************/
984static int
985ixgbe_setup_interface(device_t dev, struct adapter *adapter)
986{
987 struct ifnet *ifp;
711
988
712 for (int i = 0; i < adapter->num_queues; i++, que++, txr++) {
713 if (que->tq) {
714#ifndef IXGBE_LEGACY_TX
715 taskqueue_drain(que->tq, &txr->txq_task);
716#endif
717 taskqueue_drain(que->tq, &que->que_task);
718 taskqueue_free(que->tq);
719 }
720 }
989 INIT_DEBUGOUT("ixgbe_setup_interface: begin");
721
990
722 /* Drain the Link queue */
723 if (adapter->tq) {
724 taskqueue_drain(adapter->tq, &adapter->link_task);
725 taskqueue_drain(adapter->tq, &adapter->mod_task);
726 taskqueue_drain(adapter->tq, &adapter->msf_task);
727#ifdef PCI_IOV
728 taskqueue_drain(adapter->tq, &adapter->mbx_task);
991 ifp = adapter->ifp = if_alloc(IFT_ETHER);
992 if (ifp == NULL) {
993 device_printf(dev, "can not allocate ifnet structure\n");
994 return (-1);
995 }
996 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
997 ifp->if_baudrate = IF_Gbps(10);
998 ifp->if_init = ixgbe_init;
999 ifp->if_softc = adapter;
1000 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1001 ifp->if_ioctl = ixgbe_ioctl;
1002#if __FreeBSD_version >= 1100036
1003 if_setgetcounterfn(ifp, ixgbe_get_counter);
729#endif
1004#endif
730 taskqueue_drain(adapter->tq, &adapter->phy_task);
731#ifdef IXGBE_FDIR
732 taskqueue_drain(adapter->tq, &adapter->fdir_task);
1005#if __FreeBSD_version >= 1100045
1006 /* TSO parameters */
1007 ifp->if_hw_tsomax = 65518;
1008 ifp->if_hw_tsomaxsegcount = IXGBE_82599_SCATTER;
1009 ifp->if_hw_tsomaxsegsize = 2048;
733#endif
1010#endif
734 taskqueue_free(adapter->tq);
1011 if (adapter->feat_en & IXGBE_FEATURE_LEGACY_TX) {
1012 ifp->if_start = ixgbe_legacy_start;
1013 IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2);
1014 ifp->if_snd.ifq_drv_maxlen = adapter->num_tx_desc - 2;
1015 IFQ_SET_READY(&ifp->if_snd);
1016 ixgbe_start_locked = ixgbe_legacy_start_locked;
1017 ixgbe_ring_empty = ixgbe_legacy_ring_empty;
1018 } else {
1019 ifp->if_transmit = ixgbe_mq_start;
1020 ifp->if_qflush = ixgbe_qflush;
1021 ixgbe_start_locked = ixgbe_mq_start_locked;
1022 ixgbe_ring_empty = drbr_empty;
735 }
736
1023 }
1024
737 /* let hardware know driver is unloading */
738 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
739 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
740 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
1025 ether_ifattach(ifp, adapter->hw.mac.addr);
741
1026
742 /* Unregister VLAN events */
743 if (adapter->vlan_attach != NULL)
744 EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
745 if (adapter->vlan_detach != NULL)
746 EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
1027 adapter->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
747
1028
748 callout_drain(&adapter->timer);
749#ifdef DEV_NETMAP
750 netmap_detach(adapter->ifp);
751#endif /* DEV_NETMAP */
752 ixgbe_free_pci_resources(adapter);
753 bus_generic_detach(dev);
754 if_free(adapter->ifp);
1029 /*
1030 * Tell the upper layer(s) we support long frames.
1031 */
1032 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
755
1033
756 ixgbe_free_transmit_structures(adapter);
757 ixgbe_free_receive_structures(adapter);
758 free(adapter->mta, M_DEVBUF);
1034 /* Set capability flags */
1035 ifp->if_capabilities |= IFCAP_HWCSUM
1036 | IFCAP_HWCSUM_IPV6
1037 | IFCAP_TSO
1038 | IFCAP_LRO
1039 | IFCAP_VLAN_HWTAGGING
1040 | IFCAP_VLAN_HWTSO
1041 | IFCAP_VLAN_HWCSUM
1042 | IFCAP_JUMBO_MTU
1043 | IFCAP_VLAN_MTU
1044 | IFCAP_HWSTATS;
759
1045
760 IXGBE_CORE_LOCK_DESTROY(adapter);
761 return (0);
762}
1046 /* Enable the above capabilities by default */
1047 ifp->if_capenable = ifp->if_capabilities;
763
1048
764/*********************************************************************
765 *
766 * Shutdown entry point
767 *
768 **********************************************************************/
1049 /*
1050 * Don't turn this on by default, if vlans are
1051 * created on another pseudo device (eg. lagg)
1052 * then vlan events are not passed thru, breaking
1053 * operation, but with HW FILTER off it works. If
1054 * using vlans directly on the ixgbe driver you can
1055 * enable this and get full hardware tag filtering.
1056 */
1057 ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
769
1058
770static int
771ixgbe_shutdown(device_t dev)
772{
773 struct adapter *adapter = device_get_softc(dev);
774 int error = 0;
1059 /*
1060 * Specify the media types supported by this adapter and register
1061 * callbacks to update media and link information
1062 */
1063 ifmedia_init(&adapter->media, IFM_IMASK, ixgbe_media_change,
1064 ixgbe_media_status);
775
1065
776 INIT_DEBUGOUT("ixgbe_shutdown: begin");
1066 adapter->phy_layer = ixgbe_get_supported_physical_layer(&adapter->hw);
1067 ixgbe_add_media_types(adapter);
777
1068
778 IXGBE_CORE_LOCK(adapter);
779 error = ixgbe_setup_low_power_mode(adapter);
780 IXGBE_CORE_UNLOCK(adapter);
1069 /* Set autoselect media by default */
1070 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
781
1071
782 return (error);
783}
1072 return (0);
1073} /* ixgbe_setup_interface */
784
1074
785/**
786 * Methods for going from:
787 * D0 -> D3: ixgbe_suspend
788 * D3 -> D0: ixgbe_resume
789 */
790static int
791ixgbe_suspend(device_t dev)
1075#if __FreeBSD_version >= 1100036
1076/************************************************************************
1077 * ixgbe_get_counter
1078 ************************************************************************/
1079static uint64_t
1080ixgbe_get_counter(struct ifnet *ifp, ift_counter cnt)
792{
1081{
793 struct adapter *adapter = device_get_softc(dev);
794 int error = 0;
1082 struct adapter *adapter;
1083 struct tx_ring *txr;
1084 uint64_t rv;
795
1085
796 INIT_DEBUGOUT("ixgbe_suspend: begin");
1086 adapter = if_getsoftc(ifp);
797
1087
798 IXGBE_CORE_LOCK(adapter);
1088 switch (cnt) {
1089 case IFCOUNTER_IPACKETS:
1090 return (adapter->ipackets);
1091 case IFCOUNTER_OPACKETS:
1092 return (adapter->opackets);
1093 case IFCOUNTER_IBYTES:
1094 return (adapter->ibytes);
1095 case IFCOUNTER_OBYTES:
1096 return (adapter->obytes);
1097 case IFCOUNTER_IMCASTS:
1098 return (adapter->imcasts);
1099 case IFCOUNTER_OMCASTS:
1100 return (adapter->omcasts);
1101 case IFCOUNTER_COLLISIONS:
1102 return (0);
1103 case IFCOUNTER_IQDROPS:
1104 return (adapter->iqdrops);
1105 case IFCOUNTER_OQDROPS:
1106 rv = 0;
1107 txr = adapter->tx_rings;
1108 for (int i = 0; i < adapter->num_queues; i++, txr++)
1109 rv += txr->br->br_drops;
1110 return (rv);
1111 case IFCOUNTER_IERRORS:
1112 return (adapter->ierrors);
1113 default:
1114 return (if_get_counter_default(ifp, cnt));
1115 }
1116} /* ixgbe_get_counter */
1117#endif
799
1118
800 error = ixgbe_setup_low_power_mode(adapter);
801
802 IXGBE_CORE_UNLOCK(adapter);
803
804 return (error);
805}
806
807static int
808ixgbe_resume(device_t dev)
1119/************************************************************************
1120 * ixgbe_add_media_types
1121 ************************************************************************/
1122static void
1123ixgbe_add_media_types(struct adapter *adapter)
809{
1124{
810 struct adapter *adapter = device_get_softc(dev);
811 struct ifnet *ifp = adapter->ifp;
812 struct ixgbe_hw *hw = &adapter->hw;
1125 struct ixgbe_hw *hw = &adapter->hw;
813 u32 wus;
1126 device_t dev = adapter->dev;
1127 u64 layer;
814
1128
815 INIT_DEBUGOUT("ixgbe_resume: begin");
1129 layer = adapter->phy_layer;
816
1130
817 IXGBE_CORE_LOCK(adapter);
1131 /* Media types with matching FreeBSD media defines */
1132 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T)
1133 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_T, 0, NULL);
1134 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T)
1135 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL);
1136 if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX)
1137 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX, 0, NULL);
1138 if (layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
1139 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T, 0, NULL);
818
1140
819 /* Read & clear WUS register */
820 wus = IXGBE_READ_REG(hw, IXGBE_WUS);
821 if (wus)
822 device_printf(dev, "Woken up by (WUS): %#010x\n",
823 IXGBE_READ_REG(hw, IXGBE_WUS));
824 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
825 /* And clear WUFC until next low-power transition */
826 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
1141 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
1142 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
1143 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_TWINAX, 0,
1144 NULL);
827
1145
828 /*
829 * Required after D3->D0 transition;
830 * will re-advertise all previous advertised speeds
831 */
832 if (ifp->if_flags & IFF_UP)
833 ixgbe_init_locked(adapter);
1146 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) {
1147 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
1148 if (hw->phy.multispeed_fiber)
1149 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_LX, 0,
1150 NULL);
1151 }
1152 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
1153 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
1154 if (hw->phy.multispeed_fiber)
1155 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX, 0,
1156 NULL);
1157 } else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
1158 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
1159 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
1160 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
834
1161
835 IXGBE_CORE_UNLOCK(adapter);
1162#ifdef IFM_ETH_XTYPE
1163 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
1164 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_KR, 0, NULL);
1165 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4)
1166 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_KX4, 0, NULL);
1167 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
1168 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_KX, 0, NULL);
1169 if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX)
1170 ifmedia_add(&adapter->media, IFM_ETHER | IFM_2500_KX, 0, NULL);
1171#else
1172 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
1173 device_printf(dev, "Media supported: 10GbaseKR\n");
1174 device_printf(dev, "10GbaseKR mapped to 10GbaseSR\n");
1175 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
1176 }
1177 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
1178 device_printf(dev, "Media supported: 10GbaseKX4\n");
1179 device_printf(dev, "10GbaseKX4 mapped to 10GbaseCX4\n");
1180 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
1181 }
1182 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) {
1183 device_printf(dev, "Media supported: 1000baseKX\n");
1184 device_printf(dev, "1000baseKX mapped to 1000baseCX\n");
1185 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_CX, 0, NULL);
1186 }
1187 if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX) {
1188 device_printf(dev, "Media supported: 2500baseKX\n");
1189 device_printf(dev, "2500baseKX mapped to 2500baseSX\n");
1190 ifmedia_add(&adapter->media, IFM_ETHER | IFM_2500_SX, 0, NULL);
1191 }
1192#endif
1193 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX)
1194 device_printf(dev, "Media supported: 1000baseBX\n");
836
1195
837 return (0);
838}
1196 if (hw->device_id == IXGBE_DEV_ID_82598AT) {
1197 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T | IFM_FDX,
1198 0, NULL);
1199 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL);
1200 }
839
1201
1202 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1203} /* ixgbe_add_media_types */
840
1204
841/*********************************************************************
842 * Ioctl entry point
843 *
844 * ixgbe_ioctl is called when the user wants to configure the
845 * interface.
846 *
847 * return 0 on success, positive on failure
848 **********************************************************************/
1205/************************************************************************
1206 * ixgbe_is_sfp
1207 ************************************************************************/
1208static inline bool
1209ixgbe_is_sfp(struct ixgbe_hw *hw)
1210{
1211 switch (hw->mac.type) {
1212 case ixgbe_mac_82598EB:
1213 if (hw->phy.type == ixgbe_phy_nl)
1214 return TRUE;
1215 return FALSE;
1216 case ixgbe_mac_82599EB:
1217 switch (hw->mac.ops.get_media_type(hw)) {
1218 case ixgbe_media_type_fiber:
1219 case ixgbe_media_type_fiber_qsfp:
1220 return TRUE;
1221 default:
1222 return FALSE;
1223 }
1224 case ixgbe_mac_X550EM_x:
1225 case ixgbe_mac_X550EM_a:
1226 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber)
1227 return TRUE;
1228 return FALSE;
1229 default:
1230 return FALSE;
1231 }
1232} /* ixgbe_is_sfp */
849
1233
850static int
851ixgbe_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
1234/************************************************************************
1235 * ixgbe_config_link
1236 ************************************************************************/
1237static void
1238ixgbe_config_link(struct adapter *adapter)
852{
1239{
853 struct adapter *adapter = ifp->if_softc;
854 struct ifreq *ifr = (struct ifreq *) data;
855#if defined(INET) || defined(INET6)
856 struct ifaddr *ifa = (struct ifaddr *)data;
857#endif
858 int error = 0;
859 bool avoid_reset = FALSE;
1240 struct ixgbe_hw *hw = &adapter->hw;
1241 u32 autoneg, err = 0;
1242 bool sfp, negotiate;
860
1243
861 switch (command) {
1244 sfp = ixgbe_is_sfp(hw);
862
1245
863 case SIOCSIFADDR:
864#ifdef INET
865 if (ifa->ifa_addr->sa_family == AF_INET)
866 avoid_reset = TRUE;
867#endif
868#ifdef INET6
869 if (ifa->ifa_addr->sa_family == AF_INET6)
870 avoid_reset = TRUE;
871#endif
872 /*
873 ** Calling init results in link renegotiation,
874 ** so we avoid doing it when possible.
875 */
876 if (avoid_reset) {
877 ifp->if_flags |= IFF_UP;
878 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
879 ixgbe_init(adapter);
880#ifdef INET
881 if (!(ifp->if_flags & IFF_NOARP))
882 arp_ifinit(ifp, ifa);
883#endif
1246 if (sfp) {
1247 if (hw->phy.multispeed_fiber) {
1248 hw->mac.ops.setup_sfp(hw);
1249 ixgbe_enable_tx_laser(hw);
1250 taskqueue_enqueue(adapter->tq, &adapter->msf_task);
884 } else
1251 } else
885 error = ether_ioctl(ifp, command, data);
886 break;
887 case SIOCSIFMTU:
888 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
889 if (ifr->ifr_mtu > IXGBE_MAX_MTU) {
890 error = EINVAL;
891 } else {
892 IXGBE_CORE_LOCK(adapter);
893 ifp->if_mtu = ifr->ifr_mtu;
894 adapter->max_frame_size =
895 ifp->if_mtu + IXGBE_MTU_HDR;
896 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
897 ixgbe_init_locked(adapter);
898#ifdef PCI_IOV
899 ixgbe_recalculate_max_frame(adapter);
900#endif
901 IXGBE_CORE_UNLOCK(adapter);
902 }
903 break;
904 case SIOCSIFFLAGS:
905 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
906 IXGBE_CORE_LOCK(adapter);
907 if (ifp->if_flags & IFF_UP) {
908 if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
909 if ((ifp->if_flags ^ adapter->if_flags) &
910 (IFF_PROMISC | IFF_ALLMULTI)) {
911 ixgbe_set_promisc(adapter);
912 }
913 } else
914 ixgbe_init_locked(adapter);
915 } else
916 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
917 ixgbe_stop(adapter);
918 adapter->if_flags = ifp->if_flags;
919 IXGBE_CORE_UNLOCK(adapter);
920 break;
921 case SIOCADDMULTI:
922 case SIOCDELMULTI:
923 IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
924 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
925 IXGBE_CORE_LOCK(adapter);
926 ixgbe_disable_intr(adapter);
927 ixgbe_set_multi(adapter);
928 ixgbe_enable_intr(adapter);
929 IXGBE_CORE_UNLOCK(adapter);
930 }
931 break;
932 case SIOCSIFMEDIA:
933 case SIOCGIFMEDIA:
934 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
935 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
936 break;
937 case SIOCSIFCAP:
938 {
939 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
1252 taskqueue_enqueue(adapter->tq, &adapter->mod_task);
1253 } else {
1254 if (hw->mac.ops.check_link)
1255 err = ixgbe_check_link(hw, &adapter->link_speed,
1256 &adapter->link_up, FALSE);
1257 if (err)
1258 goto out;
1259 autoneg = hw->phy.autoneg_advertised;
1260 if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
1261 err = hw->mac.ops.get_link_capabilities(hw, &autoneg,
1262 &negotiate);
1263 if (err)
1264 goto out;
1265 if (hw->mac.ops.setup_link)
1266 err = hw->mac.ops.setup_link(hw, autoneg,
1267 adapter->link_up);
1268 }
1269out:
940
1270
941 int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
942 if (!mask)
943 break;
1271 return;
1272} /* ixgbe_config_link */
944
1273
945 /* HW cannot turn these on/off separately */
946 if (mask & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6)) {
947 ifp->if_capenable ^= IFCAP_RXCSUM;
948 ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
949 }
950 if (mask & IFCAP_TXCSUM)
951 ifp->if_capenable ^= IFCAP_TXCSUM;
952 if (mask & IFCAP_TXCSUM_IPV6)
953 ifp->if_capenable ^= IFCAP_TXCSUM_IPV6;
954 if (mask & IFCAP_TSO4)
955 ifp->if_capenable ^= IFCAP_TSO4;
956 if (mask & IFCAP_TSO6)
957 ifp->if_capenable ^= IFCAP_TSO6;
958 if (mask & IFCAP_LRO)
959 ifp->if_capenable ^= IFCAP_LRO;
960 if (mask & IFCAP_VLAN_HWTAGGING)
961 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
962 if (mask & IFCAP_VLAN_HWFILTER)
963 ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
964 if (mask & IFCAP_VLAN_HWTSO)
965 ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
1274/************************************************************************
1275 * ixgbe_update_stats_counters - Update board statistics counters.
1276 ************************************************************************/
1277static void
1278ixgbe_update_stats_counters(struct adapter *adapter)
1279{
1280 struct ixgbe_hw *hw = &adapter->hw;
1281 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
1282 u32 missed_rx = 0, bprc, lxon, lxoff, total;
1283 u64 total_missed_rx = 0;
966
1284
967 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
968 IXGBE_CORE_LOCK(adapter);
969 ixgbe_init_locked(adapter);
970 IXGBE_CORE_UNLOCK(adapter);
971 }
972 VLAN_CAPABILITIES(ifp);
973 break;
1285 stats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
1286 stats->illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
1287 stats->errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC);
1288 stats->mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC);
1289 stats->mpc[0] += IXGBE_READ_REG(hw, IXGBE_MPC(0));
1290
1291 for (int i = 0; i < 16; i++) {
1292 stats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
1293 stats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
1294 stats->qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
974 }
1295 }
975#if __FreeBSD_version >= 1100036
976 case SIOCGI2C:
977 {
978 struct ixgbe_hw *hw = &adapter->hw;
979 struct ifi2creq i2c;
980 int i;
981 IOCTL_DEBUGOUT("ioctl: SIOCGI2C (Get I2C Data)");
982 error = copyin(ifr->ifr_data, &i2c, sizeof(i2c));
983 if (error != 0)
984 break;
985 if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) {
986 error = EINVAL;
987 break;
988 }
989 if (i2c.len > sizeof(i2c.data)) {
990 error = EINVAL;
991 break;
992 }
1296 stats->mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC);
1297 stats->mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC);
1298 stats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
993
1299
994 for (i = 0; i < i2c.len; i++)
995 hw->phy.ops.read_i2c_byte(hw, i2c.offset + i,
996 i2c.dev_addr, &i2c.data[i]);
997 error = copyout(&i2c, ifr->ifr_data, sizeof(i2c));
998 break;
1300 /* Hardware workaround, gprc counts missed packets */
1301 stats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
1302 stats->gprc -= missed_rx;
1303
1304 if (hw->mac.type != ixgbe_mac_82598EB) {
1305 stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL) +
1306 ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
1307 stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
1308 ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32);
1309 stats->tor += IXGBE_READ_REG(hw, IXGBE_TORL) +
1310 ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
1311 stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
1312 stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
1313 } else {
1314 stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
1315 stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
1316 /* 82598 only has a counter in the high register */
1317 stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
1318 stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
1319 stats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
999 }
1320 }
1000#endif
1001 default:
1002 IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)\n", (int)command);
1003 error = ether_ioctl(ifp, command, data);
1004 break;
1005 }
1006
1321
1007 return (error);
1008}
1322 /*
1323 * Workaround: mprc hardware is incorrectly counting
1324 * broadcasts, so for now we subtract those.
1325 */
1326 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
1327 stats->bprc += bprc;
1328 stats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
1329 if (hw->mac.type == ixgbe_mac_82598EB)
1330 stats->mprc -= bprc;
1009
1331
1010/*
1011 * Set the various hardware offload abilities.
1012 *
1013 * This takes the ifnet's if_capenable flags (e.g. set by the user using
1014 * ifconfig) and indicates to the OS via the ifnet's if_hwassist field what
1015 * mbuf offload flags the driver will understand.
1016 */
1017static void
1018ixgbe_set_if_hwassist(struct adapter *adapter)
1019{
1020 struct ifnet *ifp = adapter->ifp;
1021 struct ixgbe_hw *hw = &adapter->hw;
1332 stats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
1333 stats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
1334 stats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
1335 stats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
1336 stats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
1337 stats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
1022
1338
1023 ifp->if_hwassist = 0;
1024#if __FreeBSD_version >= 1000000
1025 if (ifp->if_capenable & IFCAP_TSO4)
1026 ifp->if_hwassist |= CSUM_IP_TSO;
1027 if (ifp->if_capenable & IFCAP_TSO6)
1028 ifp->if_hwassist |= CSUM_IP6_TSO;
1029 if (ifp->if_capenable & IFCAP_TXCSUM) {
1030 ifp->if_hwassist |= (CSUM_IP | CSUM_IP_UDP | CSUM_IP_TCP);
1031 if (hw->mac.type != ixgbe_mac_82598EB)
1032 ifp->if_hwassist |= CSUM_IP_SCTP;
1339 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
1340 stats->lxontxc += lxon;
1341 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
1342 stats->lxofftxc += lxoff;
1343 total = lxon + lxoff;
1344
1345 stats->gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
1346 stats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
1347 stats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
1348 stats->gptc -= total;
1349 stats->mptc -= total;
1350 stats->ptc64 -= total;
1351 stats->gotc -= total * ETHER_MIN_LEN;
1352
1353 stats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
1354 stats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
1355 stats->roc += IXGBE_READ_REG(hw, IXGBE_ROC);
1356 stats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
1357 stats->mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
1358 stats->mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
1359 stats->mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
1360 stats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
1361 stats->tpt += IXGBE_READ_REG(hw, IXGBE_TPT);
1362 stats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
1363 stats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
1364 stats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
1365 stats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
1366 stats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
1367 stats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
1368 stats->xec += IXGBE_READ_REG(hw, IXGBE_XEC);
1369 stats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
1370 stats->fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST);
1371 /* Only read FCOE on 82599 */
1372 if (hw->mac.type != ixgbe_mac_82598EB) {
1373 stats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
1374 stats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
1375 stats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
1376 stats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
1377 stats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
1033 }
1378 }
1034 if (ifp->if_capenable & IFCAP_TXCSUM_IPV6) {
1035 ifp->if_hwassist |= (CSUM_IP6_UDP | CSUM_IP6_TCP);
1036 if (hw->mac.type != ixgbe_mac_82598EB)
1037 ifp->if_hwassist |= CSUM_IP6_SCTP;
1038 }
1039#else
1040 if (ifp->if_capenable & IFCAP_TSO)
1041 ifp->if_hwassist |= CSUM_TSO;
1042 if (ifp->if_capenable & IFCAP_TXCSUM) {
1043 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
1044 if (hw->mac.type != ixgbe_mac_82598EB)
1045 ifp->if_hwassist |= CSUM_SCTP;
1046 }
1047#endif
1048}
1049
1379
1050/*********************************************************************
1051 * Init entry point
1052 *
1053 * This routine is used in two ways. It is used by the stack as
1054 * init entry point in network interface structure. It is also used
1055 * by the driver as a hw/sw initialization routine to get to a
1056 * consistent state.
1057 *
1058 * return 0 on success, positive on failure
1059 **********************************************************************/
1060#define IXGBE_MHADD_MFS_SHIFT 16
1380 /* Fill out the OS statistics structure */
1381 IXGBE_SET_IPACKETS(adapter, stats->gprc);
1382 IXGBE_SET_OPACKETS(adapter, stats->gptc);
1383 IXGBE_SET_IBYTES(adapter, stats->gorc);
1384 IXGBE_SET_OBYTES(adapter, stats->gotc);
1385 IXGBE_SET_IMCASTS(adapter, stats->mprc);
1386 IXGBE_SET_OMCASTS(adapter, stats->mptc);
1387 IXGBE_SET_COLLISIONS(adapter, 0);
1388 IXGBE_SET_IQDROPS(adapter, total_missed_rx);
1389 IXGBE_SET_IERRORS(adapter, stats->crcerrs + stats->rlec);
1390} /* ixgbe_update_stats_counters */
1061
1391
1392/************************************************************************
1393 * ixgbe_add_hw_stats
1394 *
1395 * Add sysctl variables, one per statistic, to the system.
1396 ************************************************************************/
1062static void
1397static void
1063ixgbe_init_locked(struct adapter *adapter)
1398ixgbe_add_hw_stats(struct adapter *adapter)
1064{
1399{
1065 struct ifnet *ifp = adapter->ifp;
1066 device_t dev = adapter->dev;
1067 struct ixgbe_hw *hw = &adapter->hw;
1068 struct tx_ring *txr;
1069 struct rx_ring *rxr;
1070 u32 txdctl, mhadd;
1071 u32 rxdctl, rxctrl;
1072 int err = 0;
1073#ifdef PCI_IOV
1074 enum ixgbe_iov_mode mode;
1075#endif
1400 device_t dev = adapter->dev;
1401 struct tx_ring *txr = adapter->tx_rings;
1402 struct rx_ring *rxr = adapter->rx_rings;
1403 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
1404 struct sysctl_oid *tree = device_get_sysctl_tree(dev);
1405 struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
1406 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
1407 struct sysctl_oid *stat_node, *queue_node;
1408 struct sysctl_oid_list *stat_list, *queue_list;
1076
1409
1077 mtx_assert(&adapter->core_mtx, MA_OWNED);
1078 INIT_DEBUGOUT("ixgbe_init_locked: begin");
1410#define QUEUE_NAME_LEN 32
1411 char namebuf[QUEUE_NAME_LEN];
1079
1412
1080 hw->adapter_stopped = FALSE;
1081 ixgbe_stop_adapter(hw);
1082 callout_stop(&adapter->timer);
1413 /* Driver Statistics */
1414 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped",
1415 CTLFLAG_RD, &adapter->dropped_pkts, "Driver dropped packets");
1416 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "mbuf_defrag_failed",
1417 CTLFLAG_RD, &adapter->mbuf_defrag_failed, "m_defrag() failed");
1418 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
1419 CTLFLAG_RD, &adapter->watchdog_events, "Watchdog timeouts");
1420 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq",
1421 CTLFLAG_RD, &adapter->link_irq, "Link MSI-X IRQ Handled");
1083
1422
1084#ifdef PCI_IOV
1085 mode = ixgbe_get_iov_mode(adapter);
1086 adapter->pool = ixgbe_max_vfs(mode);
1087 /* Queue indices may change with IOV mode */
1088 for (int i = 0; i < adapter->num_queues; i++) {
1089 adapter->rx_rings[i].me = ixgbe_pf_que_index(mode, i);
1090 adapter->tx_rings[i].me = ixgbe_pf_que_index(mode, i);
1423 for (int i = 0; i < adapter->num_queues; i++, txr++) {
1424 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
1425 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
1426 CTLFLAG_RD, NULL, "Queue Name");
1427 queue_list = SYSCTL_CHILDREN(queue_node);
1428
1429 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "interrupt_rate",
1430 CTLTYPE_UINT | CTLFLAG_RW, &adapter->queues[i],
1431 sizeof(&adapter->queues[i]),
1432 ixgbe_sysctl_interrupt_rate_handler, "IU",
1433 "Interrupt Rate");
1434 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
1435 CTLFLAG_RD, &(adapter->queues[i].irqs),
1436 "irqs on this queue");
1437 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_head",
1438 CTLTYPE_UINT | CTLFLAG_RD, txr, sizeof(txr),
1439 ixgbe_sysctl_tdh_handler, "IU", "Transmit Descriptor Head");
1440 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_tail",
1441 CTLTYPE_UINT | CTLFLAG_RD, txr, sizeof(txr),
1442 ixgbe_sysctl_tdt_handler, "IU", "Transmit Descriptor Tail");
1443 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx",
1444 CTLFLAG_RD, &txr->tso_tx, "TSO");
1445 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "no_tx_dma_setup",
1446 CTLFLAG_RD, &txr->no_tx_dma_setup,
1447 "Driver tx dma failure in xmit");
1448 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "no_desc_avail",
1449 CTLFLAG_RD, &txr->no_desc_avail,
1450 "Queue No Descriptor Available");
1451 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
1452 CTLFLAG_RD, &txr->total_packets,
1453 "Queue Packets Transmitted");
1454 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "br_drops",
1455 CTLFLAG_RD, &txr->br->br_drops,
1456 "Packets dropped in buf_ring");
1091 }
1457 }
1092#endif
1093 /* reprogram the RAR[0] in case user changed it. */
1094 ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, IXGBE_RAH_AV);
1095
1458
1096 /* Get the latest mac address, User can use a LAA */
1097 bcopy(IF_LLADDR(ifp), hw->mac.addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
1098 ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, 1);
1099 hw->addr_ctrl.rar_used_count = 1;
1459 for (int i = 0; i < adapter->num_queues; i++, rxr++) {
1460 struct lro_ctrl *lro = &rxr->lro;
1100
1461
1101 /* Set hardware offload abilities from ifnet flags */
1102 ixgbe_set_if_hwassist(adapter);
1462 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
1463 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
1464 CTLFLAG_RD, NULL, "Queue Name");
1465 queue_list = SYSCTL_CHILDREN(queue_node);
1103
1466
1104 /* Prepare transmit descriptors and buffers */
1105 if (ixgbe_setup_transmit_structures(adapter)) {
1106 device_printf(dev, "Could not setup transmit structures\n");
1107 ixgbe_stop(adapter);
1108 return;
1467 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_head",
1468 CTLTYPE_UINT | CTLFLAG_RD, rxr, sizeof(rxr),
1469 ixgbe_sysctl_rdh_handler, "IU", "Receive Descriptor Head");
1470 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_tail",
1471 CTLTYPE_UINT | CTLFLAG_RD, rxr, sizeof(rxr),
1472 ixgbe_sysctl_rdt_handler, "IU", "Receive Descriptor Tail");
1473 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
1474 CTLFLAG_RD, &rxr->rx_packets, "Queue Packets Received");
1475 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
1476 CTLFLAG_RD, &rxr->rx_bytes, "Queue Bytes Received");
1477 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_copies",
1478 CTLFLAG_RD, &rxr->rx_copies, "Copied RX Frames");
1479 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_discarded",
1480 CTLFLAG_RD, &rxr->rx_discarded, "Discarded RX packets");
1481 SYSCTL_ADD_U64(ctx, queue_list, OID_AUTO, "lro_queued",
1482 CTLFLAG_RD, &lro->lro_queued, 0, "LRO Queued");
1483 SYSCTL_ADD_U64(ctx, queue_list, OID_AUTO, "lro_flushed",
1484 CTLFLAG_RD, &lro->lro_flushed, 0, "LRO Flushed");
1109 }
1110
1485 }
1486
1111 ixgbe_init_hw(hw);
1112#ifdef PCI_IOV
1113 ixgbe_initialize_iov(adapter);
1114#endif
1115 ixgbe_initialize_transmit_units(adapter);
1487 /* MAC stats get their own sub node */
1116
1488
1117 /* Setup Multicast table */
1118 ixgbe_set_multi(adapter);
1489 stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats",
1490 CTLFLAG_RD, NULL, "MAC Statistics");
1491 stat_list = SYSCTL_CHILDREN(stat_node);
1119
1492
1120 /* Determine the correct mbuf pool, based on frame size */
1121 if (adapter->max_frame_size <= MCLBYTES)
1122 adapter->rx_mbuf_sz = MCLBYTES;
1123 else
1124 adapter->rx_mbuf_sz = MJUMPAGESIZE;
1493 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "crc_errs",
1494 CTLFLAG_RD, &stats->crcerrs, "CRC Errors");
1495 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "ill_errs",
1496 CTLFLAG_RD, &stats->illerrc, "Illegal Byte Errors");
1497 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "byte_errs",
1498 CTLFLAG_RD, &stats->errbc, "Byte Errors");
1499 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "short_discards",
1500 CTLFLAG_RD, &stats->mspdc, "MAC Short Packets Discarded");
1501 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "local_faults",
1502 CTLFLAG_RD, &stats->mlfc, "MAC Local Faults");
1503 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "remote_faults",
1504 CTLFLAG_RD, &stats->mrfc, "MAC Remote Faults");
1505 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rec_len_errs",
1506 CTLFLAG_RD, &stats->rlec, "Receive Length Errors");
1507 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_missed_packets",
1508 CTLFLAG_RD, &stats->mpc[0], "RX Missed Packet Count");
1125
1509
1126 /* Prepare receive descriptors and buffers */
1127 if (ixgbe_setup_receive_structures(adapter)) {
1128 device_printf(dev, "Could not setup receive structures\n");
1129 ixgbe_stop(adapter);
1130 return;
1131 }
1510 /* Flow Control stats */
1511 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_txd",
1512 CTLFLAG_RD, &stats->lxontxc, "Link XON Transmitted");
1513 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_recvd",
1514 CTLFLAG_RD, &stats->lxonrxc, "Link XON Received");
1515 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_txd",
1516 CTLFLAG_RD, &stats->lxofftxc, "Link XOFF Transmitted");
1517 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_recvd",
1518 CTLFLAG_RD, &stats->lxoffrxc, "Link XOFF Received");
1132
1519
1133 /* Configure RX settings */
1134 ixgbe_initialize_receive_units(adapter);
1520 /* Packet Reception Stats */
1521 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_octets_rcvd",
1522 CTLFLAG_RD, &stats->tor, "Total Octets Received");
1523 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd",
1524 CTLFLAG_RD, &stats->gorc, "Good Octets Received");
1525 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_rcvd",
1526 CTLFLAG_RD, &stats->tpr, "Total Packets Received");
1527 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd",
1528 CTLFLAG_RD, &stats->gprc, "Good Packets Received");
1529 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd",
1530 CTLFLAG_RD, &stats->mprc, "Multicast Packets Received");
1531 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_rcvd",
1532 CTLFLAG_RD, &stats->bprc, "Broadcast Packets Received");
1533 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_64",
1534 CTLFLAG_RD, &stats->prc64, "64 byte frames received ");
1535 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127",
1536 CTLFLAG_RD, &stats->prc127, "65-127 byte frames received");
1537 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255",
1538 CTLFLAG_RD, &stats->prc255, "128-255 byte frames received");
1539 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511",
1540 CTLFLAG_RD, &stats->prc511, "256-511 byte frames received");
1541 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023",
1542 CTLFLAG_RD, &stats->prc1023, "512-1023 byte frames received");
1543 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522",
1544 CTLFLAG_RD, &stats->prc1522, "1023-1522 byte frames received");
1545 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_undersized",
1546 CTLFLAG_RD, &stats->ruc, "Receive Undersized");
1547 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_fragmented",
1548 CTLFLAG_RD, &stats->rfc, "Fragmented Packets Received ");
1549 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_oversized",
1550 CTLFLAG_RD, &stats->roc, "Oversized Packets Received");
1551 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_jabberd",
1552 CTLFLAG_RD, &stats->rjc, "Received Jabber");
1553 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_rcvd",
1554 CTLFLAG_RD, &stats->mngprc, "Management Packets Received");
1555 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_drpd",
1556 CTLFLAG_RD, &stats->mngptc, "Management Packets Dropped");
1557 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "checksum_errs",
1558 CTLFLAG_RD, &stats->xec, "Checksum Errors");
1135
1559
1136 /* Enable SDP & MSIX interrupts based on adapter */
1137 ixgbe_config_gpie(adapter);
1560 /* Packet Transmission Stats */
1561 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
1562 CTLFLAG_RD, &stats->gotc, "Good Octets Transmitted");
1563 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd",
1564 CTLFLAG_RD, &stats->tpt, "Total Packets Transmitted");
1565 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
1566 CTLFLAG_RD, &stats->gptc, "Good Packets Transmitted");
1567 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd",
1568 CTLFLAG_RD, &stats->bptc, "Broadcast Packets Transmitted");
1569 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd",
1570 CTLFLAG_RD, &stats->mptc, "Multicast Packets Transmitted");
1571 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_txd",
1572 CTLFLAG_RD, &stats->mngptc, "Management Packets Transmitted");
1573 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_64",
1574 CTLFLAG_RD, &stats->ptc64, "64 byte frames transmitted ");
1575 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127",
1576 CTLFLAG_RD, &stats->ptc127, "65-127 byte frames transmitted");
1577 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255",
1578 CTLFLAG_RD, &stats->ptc255, "128-255 byte frames transmitted");
1579 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511",
1580 CTLFLAG_RD, &stats->ptc511, "256-511 byte frames transmitted");
1581 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023",
1582 CTLFLAG_RD, &stats->ptc1023, "512-1023 byte frames transmitted");
1583 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522",
1584 CTLFLAG_RD, &stats->ptc1522, "1024-1522 byte frames transmitted");
1585} /* ixgbe_add_hw_stats */
1138
1586
1139 /* Set MTU size */
1140 if (ifp->if_mtu > ETHERMTU) {
1141 /* aka IXGBE_MAXFRS on 82599 and newer */
1142 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
1143 mhadd &= ~IXGBE_MHADD_MFS_MASK;
1144 mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
1145 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
1146 }
1147
1148 /* Now enable all the queues */
1149 for (int i = 0; i < adapter->num_queues; i++) {
1150 txr = &adapter->tx_rings[i];
1151 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me));
1152 txdctl |= IXGBE_TXDCTL_ENABLE;
1153 /* Set WTHRESH to 8, burst writeback */
1154 txdctl |= (8 << 16);
1155 /*
1156 * When the internal queue falls below PTHRESH (32),
1157 * start prefetching as long as there are at least
1158 * HTHRESH (1) buffers ready. The values are taken
1159 * from the Intel linux driver 3.8.21.
1160 * Prefetching enables tx line rate even with 1 queue.
1161 */
1162 txdctl |= (32 << 0) | (1 << 8);
1163 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl);
1164 }
1587/************************************************************************
1588 * ixgbe_sysctl_tdh_handler - Transmit Descriptor Head handler function
1589 *
1590 * Retrieves the TDH value from the hardware
1591 ************************************************************************/
1592static int
1593ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS)
1594{
1595 struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
1596 int error;
1597 unsigned int val;
1165
1598
1166 for (int i = 0, j = 0; i < adapter->num_queues; i++) {
1167 rxr = &adapter->rx_rings[i];
1168 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
1169 if (hw->mac.type == ixgbe_mac_82598EB) {
1170 /*
1171 ** PTHRESH = 21
1172 ** HTHRESH = 4
1173 ** WTHRESH = 8
1174 */
1175 rxdctl &= ~0x3FFFFF;
1176 rxdctl |= 0x080420;
1177 }
1178 rxdctl |= IXGBE_RXDCTL_ENABLE;
1179 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), rxdctl);
1180 for (; j < 10; j++) {
1181 if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)) &
1182 IXGBE_RXDCTL_ENABLE)
1183 break;
1184 else
1185 msec_delay(1);
1186 }
1187 wmb();
1188#ifdef DEV_NETMAP
1189 /*
1190 * In netmap mode, we must preserve the buffers made
1191 * available to userspace before the if_init()
1192 * (this is true by default on the TX side, because
1193 * init makes all buffers available to userspace).
1194 *
1195 * netmap_reset() and the device specific routines
1196 * (e.g. ixgbe_setup_receive_rings()) map these
1197 * buffers at the end of the NIC ring, so here we
1198 * must set the RDT (tail) register to make sure
1199 * they are not overwritten.
1200 *
1201 * In this driver the NIC ring starts at RDH = 0,
1202 * RDT points to the last slot available for reception (?),
1203 * so RDT = num_rx_desc - 1 means the whole ring is available.
1204 */
1205 if (ifp->if_capenable & IFCAP_NETMAP) {
1206 struct netmap_adapter *na = NA(adapter->ifp);
1207 struct netmap_kring *kring = &na->rx_rings[i];
1208 int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
1599 if (!txr)
1600 return (0);
1209
1601
1210 IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me), t);
1211 } else
1212#endif /* DEV_NETMAP */
1213 IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me), adapter->num_rx_desc - 1);
1214 }
1602 val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDH(txr->me));
1603 error = sysctl_handle_int(oidp, &val, 0, req);
1604 if (error || !req->newptr)
1605 return error;
1215
1606
1216 /* Enable Receive engine */
1217 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
1218 if (hw->mac.type == ixgbe_mac_82598EB)
1219 rxctrl |= IXGBE_RXCTRL_DMBYPS;
1220 rxctrl |= IXGBE_RXCTRL_RXEN;
1221 ixgbe_enable_rx_dma(hw, rxctrl);
1607 return (0);
1608} /* ixgbe_sysctl_tdh_handler */
1222
1609
1223 callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
1610/************************************************************************
1611 * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function
1612 *
1613 * Retrieves the TDT value from the hardware
1614 ************************************************************************/
1615static int
1616ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS)
1617{
1618 struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
1619 int error;
1620 unsigned int val;
1224
1621
1225 /* Set up MSI/X routing */
1226 if (ixgbe_enable_msix) {
1227 ixgbe_configure_ivars(adapter);
1228 /* Set up auto-mask */
1229 if (hw->mac.type == ixgbe_mac_82598EB)
1230 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
1231 else {
1232 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
1233 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
1234 }
1235 } else { /* Simple settings for Legacy/MSI */
1236 ixgbe_set_ivar(adapter, 0, 0, 0);
1237 ixgbe_set_ivar(adapter, 0, 0, 1);
1238 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
1239 }
1622 if (!txr)
1623 return (0);
1240
1624
1241#ifdef IXGBE_FDIR
1242 /* Init Flow director */
1243 if (hw->mac.type != ixgbe_mac_82598EB) {
1244 u32 hdrm = 32 << fdir_pballoc;
1625 val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDT(txr->me));
1626 error = sysctl_handle_int(oidp, &val, 0, req);
1627 if (error || !req->newptr)
1628 return error;
1245
1629
1246 hw->mac.ops.setup_rxpba(hw, 0, hdrm, PBA_STRATEGY_EQUAL);
1247 ixgbe_init_fdir_signature_82599(&adapter->hw, fdir_pballoc);
1248 }
1249#endif
1630 return (0);
1631} /* ixgbe_sysctl_tdt_handler */
1250
1632
1251 /*
1252 * Check on any SFP devices that
1253 * need to be kick-started
1254 */
1255 if (hw->phy.type == ixgbe_phy_none) {
1256 err = hw->phy.ops.identify(hw);
1257 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
1258 device_printf(dev,
1259 "Unsupported SFP+ module type was detected.\n");
1260 return;
1261 }
1262 }
1633/************************************************************************
1634 * ixgbe_sysctl_rdh_handler - Receive Descriptor Head handler function
1635 *
1636 * Retrieves the RDH value from the hardware
1637 ************************************************************************/
1638static int
1639ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS)
1640{
1641 struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
1642 int error;
1643 unsigned int val;
1263
1644
1264 /* Set moderation on the Link interrupt */
1265 IXGBE_WRITE_REG(hw, IXGBE_EITR(adapter->vector), IXGBE_LINK_ITR);
1645 if (!rxr)
1646 return (0);
1266
1647
1267 /* Configure Energy Efficient Ethernet for supported devices */
1268 if (hw->mac.ops.setup_eee) {
1269 err = hw->mac.ops.setup_eee(hw, adapter->eee_enabled);
1270 if (err)
1271 device_printf(dev, "Error setting up EEE: %d\n", err);
1272 }
1648 val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDH(rxr->me));
1649 error = sysctl_handle_int(oidp, &val, 0, req);
1650 if (error || !req->newptr)
1651 return error;
1273
1652
1274 /* Enable power to the phy. */
1275 ixgbe_set_phy_power(hw, TRUE);
1653 return (0);
1654} /* ixgbe_sysctl_rdh_handler */
1276
1655
1277 /* Config/Enable Link */
1278 ixgbe_config_link(adapter);
1656/************************************************************************
1657 * ixgbe_sysctl_rdt_handler - Receive Descriptor Tail handler function
1658 *
1659 * Retrieves the RDT value from the hardware
1660 ************************************************************************/
1661static int
1662ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS)
1663{
1664 struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
1665 int error;
1666 unsigned int val;
1279
1667
1280 /* Hardware Packet Buffer & Flow Control setup */
1281 ixgbe_config_delay_values(adapter);
1668 if (!rxr)
1669 return (0);
1282
1670
1283 /* Initialize the FC settings */
1284 ixgbe_start_hw(hw);
1671 val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDT(rxr->me));
1672 error = sysctl_handle_int(oidp, &val, 0, req);
1673 if (error || !req->newptr)
1674 return error;
1285
1675
1286 /* Set up VLAN support and filter */
1287 ixgbe_setup_vlan_hw_support(adapter);
1676 return (0);
1677} /* ixgbe_sysctl_rdt_handler */
1288
1678
1289 /* Setup DMA Coalescing */
1290 ixgbe_config_dmac(adapter);
1679/************************************************************************
1680 * ixgbe_register_vlan
1681 *
1682 * Run via vlan config EVENT, it enables us to use the
1683 * HW Filter table since we can get the vlan id. This
1684 * just creates the entry in the soft version of the
1685 * VFTA, init will repopulate the real table.
1686 ************************************************************************/
1687static void
1688ixgbe_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
1689{
1690 struct adapter *adapter = ifp->if_softc;
1691 u16 index, bit;
1291
1692
1292 /* And now turn on interrupts */
1293 ixgbe_enable_intr(adapter);
1693 if (ifp->if_softc != arg) /* Not our event */
1694 return;
1294
1695
1295#ifdef PCI_IOV
1296 /* Enable the use of the MBX by the VF's */
1297 {
1298 u32 reg = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
1299 reg |= IXGBE_CTRL_EXT_PFRSTD;
1300 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, reg);
1301 }
1302#endif
1696 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
1697 return;
1303
1698
1304 /* Now inform the stack we're ready */
1305 ifp->if_drv_flags |= IFF_DRV_RUNNING;
1699 IXGBE_CORE_LOCK(adapter);
1700 index = (vtag >> 5) & 0x7F;
1701 bit = vtag & 0x1F;
1702 adapter->shadow_vfta[index] |= (1 << bit);
1703 ++adapter->num_vlans;
1704 ixgbe_setup_vlan_hw_support(adapter);
1705 IXGBE_CORE_UNLOCK(adapter);
1706} /* ixgbe_register_vlan */
1306
1707
1307 return;
1308}
1309
1708/************************************************************************
1709 * ixgbe_unregister_vlan
1710 *
1711 * Run via vlan unconfig EVENT, remove our entry in the soft vfta.
1712 ************************************************************************/
1310static void
1713static void
1311ixgbe_init(void *arg)
1714ixgbe_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
1312{
1715{
1313 struct adapter *adapter = arg;
1716 struct adapter *adapter = ifp->if_softc;
1717 u16 index, bit;
1314
1718
1719 if (ifp->if_softc != arg)
1720 return;
1721
1722 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
1723 return;
1724
1315 IXGBE_CORE_LOCK(adapter);
1725 IXGBE_CORE_LOCK(adapter);
1316 ixgbe_init_locked(adapter);
1726 index = (vtag >> 5) & 0x7F;
1727 bit = vtag & 0x1F;
1728 adapter->shadow_vfta[index] &= ~(1 << bit);
1729 --adapter->num_vlans;
1730 /* Re-init to load the changes */
1731 ixgbe_setup_vlan_hw_support(adapter);
1317 IXGBE_CORE_UNLOCK(adapter);
1732 IXGBE_CORE_UNLOCK(adapter);
1318 return;
1319}
1733} /* ixgbe_unregister_vlan */
1320
1734
1735/************************************************************************
1736 * ixgbe_setup_vlan_hw_support
1737 ************************************************************************/
1321static void
1738static void
1322ixgbe_config_gpie(struct adapter *adapter)
1739ixgbe_setup_vlan_hw_support(struct adapter *adapter)
1323{
1740{
1741 struct ifnet *ifp = adapter->ifp;
1324 struct ixgbe_hw *hw = &adapter->hw;
1742 struct ixgbe_hw *hw = &adapter->hw;
1325 u32 gpie;
1743 struct rx_ring *rxr;
1744 int i;
1745 u32 ctrl;
1326
1746
1327 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
1328
1747
1329 /* Fan Failure Interrupt */
1330 if (hw->device_id == IXGBE_DEV_ID_82598AT)
1331 gpie |= IXGBE_SDP1_GPIEN;
1332
1333 /*
1748 /*
1334 * Module detection (SDP2)
1335 * Media ready (SDP1)
1749 * We get here thru init_locked, meaning
1750 * a soft reset, this has already cleared
1751 * the VFTA and other state, so if there
1752 * have been no vlan's registered do nothing.
1336 */
1753 */
1337 if (hw->mac.type == ixgbe_mac_82599EB) {
1338 gpie |= IXGBE_SDP2_GPIEN;
1339 if (hw->device_id != IXGBE_DEV_ID_82599_QSFP_SF_QP)
1340 gpie |= IXGBE_SDP1_GPIEN;
1754 if (adapter->num_vlans == 0)
1755 return;
1756
1757 /* Setup the queues for vlans */
1758 for (i = 0; i < adapter->num_queues; i++) {
1759 rxr = &adapter->rx_rings[i];
1760 /* On 82599 the VLAN enable is per/queue in RXDCTL */
1761 if (hw->mac.type != ixgbe_mac_82598EB) {
1762 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
1763 ctrl |= IXGBE_RXDCTL_VME;
1764 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl);
1765 }
1766 rxr->vtag_strip = TRUE;
1341 }
1342
1767 }
1768
1769 if ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0)
1770 return;
1343 /*
1771 /*
1344 * Thermal Failure Detection (X540)
1345 * Link Detection (X552 SFP+, X552/X557-AT)
1772 * A soft reset zero's out the VFTA, so
1773 * we need to repopulate it now.
1346 */
1774 */
1347 if (hw->mac.type == ixgbe_mac_X540 ||
1348 hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
1349 hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
1350 gpie |= IXGBE_SDP0_GPIEN_X540;
1775 for (i = 0; i < IXGBE_VFTA_SIZE; i++)
1776 if (adapter->shadow_vfta[i] != 0)
1777 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i),
1778 adapter->shadow_vfta[i]);
1351
1779
1352 if (adapter->msix > 1) {
1353 /* Enable Enhanced MSIX mode */
1354 gpie |= IXGBE_GPIE_MSIX_MODE;
1355 gpie |= IXGBE_GPIE_EIAME | IXGBE_GPIE_PBA_SUPPORT |
1356 IXGBE_GPIE_OCD;
1780 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
1781 /* Enable the Filter Table if enabled */
1782 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) {
1783 ctrl &= ~IXGBE_VLNCTRL_CFIEN;
1784 ctrl |= IXGBE_VLNCTRL_VFE;
1357 }
1785 }
1786 if (hw->mac.type == ixgbe_mac_82598EB)
1787 ctrl |= IXGBE_VLNCTRL_VME;
1788 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
1789} /* ixgbe_setup_vlan_hw_support */
1358
1790
1359 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
1360 return;
1361}
1362
1363/*
1364 * Requires adapter->max_frame_size to be set.
1365 */
1791/************************************************************************
1792 * ixgbe_get_slot_info
1793 *
1794 * Get the width and transaction speed of
1795 * the slot this adapter is plugged into.
1796 ************************************************************************/
1366static void
1797static void
1367ixgbe_config_delay_values(struct adapter *adapter)
1798ixgbe_get_slot_info(struct adapter *adapter)
1368{
1799{
1369 struct ixgbe_hw *hw = &adapter->hw;
1370 u32 rxpb, frame, size, tmp;
1800 device_t dev = adapter->dev;
1801 struct ixgbe_hw *hw = &adapter->hw;
1802 u32 offset;
1803 u16 link;
1804 int bus_info_valid = TRUE;
1371
1805
1372 frame = adapter->max_frame_size;
1373
1374 /* Calculate High Water */
1375 switch (hw->mac.type) {
1376 case ixgbe_mac_X540:
1377 case ixgbe_mac_X550:
1378 case ixgbe_mac_X550EM_x:
1379 tmp = IXGBE_DV_X540(frame, frame);
1380 break;
1806 /* Some devices are behind an internal bridge */
1807 switch (hw->device_id) {
1808 case IXGBE_DEV_ID_82599_SFP_SF_QP:
1809 case IXGBE_DEV_ID_82599_QSFP_SF_QP:
1810 goto get_parent_info;
1381 default:
1811 default:
1382 tmp = IXGBE_DV(frame, frame);
1383 break;
1384 }
1812 break;
1813 }
1385 size = IXGBE_BT2KB(tmp);
1386 rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10;
1387 hw->fc.high_water[0] = rxpb - size;
1388
1814
1389 /* Now calculate Low Water */
1815 ixgbe_get_bus_info(hw);
1816
1817 /*
1818 * Some devices don't use PCI-E, but there is no need
1819 * to display "Unknown" for bus speed and width.
1820 */
1390 switch (hw->mac.type) {
1821 switch (hw->mac.type) {
1391 case ixgbe_mac_X540:
1392 case ixgbe_mac_X550:
1393 case ixgbe_mac_X550EM_x:
1822 case ixgbe_mac_X550EM_x:
1394 tmp = IXGBE_LOW_DV_X540(frame);
1395 break;
1823 case ixgbe_mac_X550EM_a:
1824 return;
1396 default:
1825 default:
1397 tmp = IXGBE_LOW_DV(frame);
1398 break;
1826 goto display;
1399 }
1827 }
1400 hw->fc.low_water[0] = IXGBE_BT2KB(tmp);
1401
1828
1402 hw->fc.requested_mode = adapter->fc;
1403 hw->fc.pause_time = IXGBE_FC_PAUSE;
1404 hw->fc.send_xon = TRUE;
1405}
1829get_parent_info:
1830 /*
1831 * For the Quad port adapter we need to parse back
1832 * up the PCI tree to find the speed of the expansion
1833 * slot into which this adapter is plugged. A bit more work.
1834 */
1835 dev = device_get_parent(device_get_parent(dev));
1836#ifdef IXGBE_DEBUG
1837 device_printf(dev, "parent pcib = %x,%x,%x\n", pci_get_bus(dev),
1838 pci_get_slot(dev), pci_get_function(dev));
1839#endif
1840 dev = device_get_parent(device_get_parent(dev));
1841#ifdef IXGBE_DEBUG
1842 device_printf(dev, "slot pcib = %x,%x,%x\n", pci_get_bus(dev),
1843 pci_get_slot(dev), pci_get_function(dev));
1844#endif
1845 /* Now get the PCI Express Capabilities offset */
1846 if (pci_find_cap(dev, PCIY_EXPRESS, &offset)) {
1847 /*
1848 * Hmm...can't get PCI-Express capabilities.
1849 * Falling back to default method.
1850 */
1851 bus_info_valid = FALSE;
1852 ixgbe_get_bus_info(hw);
1853 goto display;
1854 }
1855 /* ...and read the Link Status Register */
1856 link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
1857 ixgbe_set_pci_config_data_generic(hw, link);
1406
1858
1407/*
1408**
1409** MSIX Interrupt Handlers and Tasklets
1410**
1411*/
1859display:
1860 device_printf(dev, "PCI Express Bus: Speed %s %s\n",
1861 ((hw->bus.speed == ixgbe_bus_speed_8000) ? "8.0GT/s" :
1862 (hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0GT/s" :
1863 (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5GT/s" :
1864 "Unknown"),
1865 ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" :
1866 (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "Width x4" :
1867 (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "Width x1" :
1868 "Unknown"));
1412
1869
1870 if (bus_info_valid) {
1871 if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) &&
1872 ((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
1873 (hw->bus.speed == ixgbe_bus_speed_2500))) {
1874 device_printf(dev, "PCI-Express bandwidth available for this card\n is not sufficient for optimal performance.\n");
1875 device_printf(dev, "For optimal performance a x8 PCIE, or x4 PCIE Gen2 slot is required.\n");
1876 }
1877 if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) &&
1878 ((hw->bus.width <= ixgbe_bus_width_pcie_x8) &&
1879 (hw->bus.speed < ixgbe_bus_speed_8000))) {
1880 device_printf(dev, "PCI-Express bandwidth available for this card\n is not sufficient for optimal performance.\n");
1881 device_printf(dev, "For optimal performance a x8 PCIE Gen3 slot is required.\n");
1882 }
1883 } else
1884 device_printf(dev, "Unable to determine slot speed/width. The speed/width reported are that of the internal switch.\n");
1885
1886 return;
1887} /* ixgbe_get_slot_info */
1888
1889/************************************************************************
1890 * ixgbe_enable_queue - MSI-X Interrupt Handlers and Tasklets
1891 ************************************************************************/
1413static inline void
1414ixgbe_enable_queue(struct adapter *adapter, u32 vector)
1415{
1416 struct ixgbe_hw *hw = &adapter->hw;
1892static inline void
1893ixgbe_enable_queue(struct adapter *adapter, u32 vector)
1894{
1895 struct ixgbe_hw *hw = &adapter->hw;
1417 u64 queue = (u64)(1 << vector);
1418 u32 mask;
1896 u64 queue = (u64)(1 << vector);
1897 u32 mask;
1419
1420 if (hw->mac.type == ixgbe_mac_82598EB) {
1898
1899 if (hw->mac.type == ixgbe_mac_82598EB) {
1421 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
1422 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
1900 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
1901 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
1423 } else {
1902 } else {
1424 mask = (queue & 0xFFFFFFFF);
1425 if (mask)
1426 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
1427 mask = (queue >> 32);
1428 if (mask)
1429 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
1903 mask = (queue & 0xFFFFFFFF);
1904 if (mask)
1905 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
1906 mask = (queue >> 32);
1907 if (mask)
1908 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
1430 }
1909 }
1431}
1910} /* ixgbe_enable_queue */
1432
1911
1912/************************************************************************
1913 * ixgbe_disable_queue
1914 ************************************************************************/
1433static inline void
1434ixgbe_disable_queue(struct adapter *adapter, u32 vector)
1435{
1436 struct ixgbe_hw *hw = &adapter->hw;
1915static inline void
1916ixgbe_disable_queue(struct adapter *adapter, u32 vector)
1917{
1918 struct ixgbe_hw *hw = &adapter->hw;
1437 u64 queue = (u64)(1 << vector);
1438 u32 mask;
1919 u64 queue = (u64)(1 << vector);
1920 u32 mask;
1439
1440 if (hw->mac.type == ixgbe_mac_82598EB) {
1921
1922 if (hw->mac.type == ixgbe_mac_82598EB) {
1441 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
1442 IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
1923 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
1924 IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
1443 } else {
1925 } else {
1444 mask = (queue & 0xFFFFFFFF);
1445 if (mask)
1446 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
1447 mask = (queue >> 32);
1448 if (mask)
1449 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
1926 mask = (queue & 0xFFFFFFFF);
1927 if (mask)
1928 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
1929 mask = (queue >> 32);
1930 if (mask)
1931 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
1450 }
1932 }
1451}
1933} /* ixgbe_disable_queue */
1452
1934
1453static void
1454ixgbe_handle_que(void *context, int pending)
1455{
1456 struct ix_queue *que = context;
1457 struct adapter *adapter = que->adapter;
1458 struct tx_ring *txr = que->txr;
1459 struct ifnet *ifp = adapter->ifp;
1460
1461 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1462 ixgbe_rxeof(que);
1463 IXGBE_TX_LOCK(txr);
1464 ixgbe_txeof(txr);
1465#ifndef IXGBE_LEGACY_TX
1466 if (!drbr_empty(ifp, txr->br))
1467 ixgbe_mq_start_locked(ifp, txr);
1468#else
1469 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1470 ixgbe_start_locked(txr, ifp);
1471#endif
1472 IXGBE_TX_UNLOCK(txr);
1473 }
1474
1475 /* Reenable this interrupt */
1476 if (que->res != NULL)
1477 ixgbe_enable_queue(adapter, que->msix);
1478 else
1479 ixgbe_enable_intr(adapter);
1480 return;
1481}
1482
1483
1484/*********************************************************************
1485 *
1486 * Legacy Interrupt Service routine
1487 *
1488 **********************************************************************/
1489
1490static void
1491ixgbe_legacy_irq(void *arg)
1492{
1493 struct ix_queue *que = arg;
1494 struct adapter *adapter = que->adapter;
1495 struct ixgbe_hw *hw = &adapter->hw;
1496 struct ifnet *ifp = adapter->ifp;
1497 struct tx_ring *txr = adapter->tx_rings;
1498 bool more;
1499 u32 reg_eicr;
1500
1501
1502 reg_eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
1503
1504 ++que->irqs;
1505 if (reg_eicr == 0) {
1506 ixgbe_enable_intr(adapter);
1507 return;
1508 }
1509
1510 more = ixgbe_rxeof(que);
1511
1512 IXGBE_TX_LOCK(txr);
1513 ixgbe_txeof(txr);
1514#ifdef IXGBE_LEGACY_TX
1515 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1516 ixgbe_start_locked(txr, ifp);
1517#else
1518 if (!drbr_empty(ifp, txr->br))
1519 ixgbe_mq_start_locked(ifp, txr);
1520#endif
1521 IXGBE_TX_UNLOCK(txr);
1522
1523 /* Check for fan failure */
1524 if ((hw->device_id == IXGBE_DEV_ID_82598AT) &&
1525 (reg_eicr & IXGBE_EICR_GPI_SDP1)) {
1526 device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! "
1527 "REPLACE IMMEDIATELY!!\n");
1528 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
1529 }
1530
1531 /* Link status change */
1532 if (reg_eicr & IXGBE_EICR_LSC)
1533 taskqueue_enqueue(adapter->tq, &adapter->link_task);
1534
1535 /* External PHY interrupt */
1536 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
1537 (reg_eicr & IXGBE_EICR_GPI_SDP0_X540))
1538 taskqueue_enqueue(adapter->tq, &adapter->phy_task);
1539
1540 if (more)
1541 taskqueue_enqueue(que->tq, &que->que_task);
1542 else
1543 ixgbe_enable_intr(adapter);
1544 return;
1545}
1546
1547
1548/*********************************************************************
1549 *
1550 * MSIX Queue Interrupt Service routine
1551 *
1552 **********************************************************************/
1935/************************************************************************
1936 * ixgbe_msix_que - MSI-X Queue Interrupt Service routine
1937 ************************************************************************/
1553void
1554ixgbe_msix_que(void *arg)
1555{
1938void
1939ixgbe_msix_que(void *arg)
1940{
1556 struct ix_queue *que = arg;
1941 struct ix_queue *que = arg;
1557 struct adapter *adapter = que->adapter;
1558 struct ifnet *ifp = adapter->ifp;
1942 struct adapter *adapter = que->adapter;
1943 struct ifnet *ifp = adapter->ifp;
1559 struct tx_ring *txr = que->txr;
1560 struct rx_ring *rxr = que->rxr;
1561 bool more;
1562 u32 newitr = 0;
1944 struct tx_ring *txr = que->txr;
1945 struct rx_ring *rxr = que->rxr;
1946 bool more;
1947 u32 newitr = 0;
1563
1564
1565 /* Protect against spurious interrupts */
1566 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1567 return;
1568
1569 ixgbe_disable_queue(adapter, que->msix);
1570 ++que->irqs;
1571
1572 more = ixgbe_rxeof(que);
1573
1574 IXGBE_TX_LOCK(txr);
1575 ixgbe_txeof(txr);
1948
1949
1950 /* Protect against spurious interrupts */
1951 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1952 return;
1953
1954 ixgbe_disable_queue(adapter, que->msix);
1955 ++que->irqs;
1956
1957 more = ixgbe_rxeof(que);
1958
1959 IXGBE_TX_LOCK(txr);
1960 ixgbe_txeof(txr);
1576#ifdef IXGBE_LEGACY_TX
1577 if (!IFQ_DRV_IS_EMPTY(ifp->if_snd))
1578 ixgbe_start_locked(txr, ifp);
1579#else
1580 if (!drbr_empty(ifp, txr->br))
1581 ixgbe_mq_start_locked(ifp, txr);
1582#endif
1961 if (!ixgbe_ring_empty(ifp, txr->br))
1962 ixgbe_start_locked(ifp, txr);
1583 IXGBE_TX_UNLOCK(txr);
1584
1585 /* Do AIM now? */
1586
1587 if (adapter->enable_aim == FALSE)
1588 goto no_calc;
1589 /*
1963 IXGBE_TX_UNLOCK(txr);
1964
1965 /* Do AIM now? */
1966
1967 if (adapter->enable_aim == FALSE)
1968 goto no_calc;
1969 /*
1590 ** Do Adaptive Interrupt Moderation:
1591 ** - Write out last calculated setting
1592 ** - Calculate based on average size over
1593 ** the last interval.
1594 */
1595 if (que->eitr_setting)
1596 IXGBE_WRITE_REG(&adapter->hw,
1597 IXGBE_EITR(que->msix), que->eitr_setting);
1598
1599 que->eitr_setting = 0;
1970 * Do Adaptive Interrupt Moderation:
1971 * - Write out last calculated setting
1972 * - Calculate based on average size over
1973 * the last interval.
1974 */
1975 if (que->eitr_setting)
1976 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(que->msix),
1977 que->eitr_setting);
1600
1978
1601 /* Idle, do nothing */
1602 if ((txr->bytes == 0) && (rxr->bytes == 0))
1603 goto no_calc;
1604
1979 que->eitr_setting = 0;
1980
1981 /* Idle, do nothing */
1982 if ((txr->bytes == 0) && (rxr->bytes == 0))
1983 goto no_calc;
1984
1605 if ((txr->bytes) && (txr->packets))
1985 if ((txr->bytes) && (txr->packets))
1606 newitr = txr->bytes/txr->packets;
1986 newitr = txr->bytes/txr->packets;
1607 if ((rxr->bytes) && (rxr->packets))
1987 if ((rxr->bytes) && (rxr->packets))
1608 newitr = max(newitr,
1609 (rxr->bytes / rxr->packets));
1988 newitr = max(newitr, (rxr->bytes / rxr->packets));
1610 newitr += 24; /* account for hardware frame, crc */
1611
1612 /* set an upper boundary */
1613 newitr = min(newitr, 3000);
1614
1615 /* Be nice to the mid range */
1616 if ((newitr > 300) && (newitr < 1200))
1617 newitr = (newitr / 3);
1618 else
1619 newitr = (newitr / 2);
1620
1989 newitr += 24; /* account for hardware frame, crc */
1990
1991 /* set an upper boundary */
1992 newitr = min(newitr, 3000);
1993
1994 /* Be nice to the mid range */
1995 if ((newitr > 300) && (newitr < 1200))
1996 newitr = (newitr / 3);
1997 else
1998 newitr = (newitr / 2);
1999
1621 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
1622 newitr |= newitr << 16;
1623 else
1624 newitr |= IXGBE_EITR_CNT_WDIS;
1625
1626 /* save for next interrupt */
1627 que->eitr_setting = newitr;
2000 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
2001 newitr |= newitr << 16;
2002 else
2003 newitr |= IXGBE_EITR_CNT_WDIS;
1628
2004
1629 /* Reset state */
1630 txr->bytes = 0;
1631 txr->packets = 0;
1632 rxr->bytes = 0;
1633 rxr->packets = 0;
2005 /* save for next interrupt */
2006 que->eitr_setting = newitr;
1634
2007
2008 /* Reset state */
2009 txr->bytes = 0;
2010 txr->packets = 0;
2011 rxr->bytes = 0;
2012 rxr->packets = 0;
2013
1635no_calc:
1636 if (more)
1637 taskqueue_enqueue(que->tq, &que->que_task);
1638 else
1639 ixgbe_enable_queue(adapter, que->msix);
2014no_calc:
2015 if (more)
2016 taskqueue_enqueue(que->tq, &que->que_task);
2017 else
2018 ixgbe_enable_queue(adapter, que->msix);
1640 return;
1641}
1642
2019
1643
1644static void
1645ixgbe_msix_link(void *arg)
1646{
1647 struct adapter *adapter = arg;
1648 struct ixgbe_hw *hw = &adapter->hw;
1649 u32 reg_eicr, mod_mask;
1650
1651 ++adapter->link_irq;
1652
1653 /* Pause other interrupts */
1654 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_OTHER);
1655
1656 /* First get the cause */
1657 reg_eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
1658 /* Be sure the queue bits are not cleared */
1659 reg_eicr &= ~IXGBE_EICR_RTX_QUEUE;
1660 /* Clear interrupt with write */
1661 IXGBE_WRITE_REG(hw, IXGBE_EICR, reg_eicr);
1662
1663 /* Link status change */
1664 if (reg_eicr & IXGBE_EICR_LSC) {
1665 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
1666 taskqueue_enqueue(adapter->tq, &adapter->link_task);
1667 }
1668
1669 if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
1670#ifdef IXGBE_FDIR
1671 if (reg_eicr & IXGBE_EICR_FLOW_DIR) {
1672 /* This is probably overkill :) */
1673 if (!atomic_cmpset_int(&adapter->fdir_reinit, 0, 1))
1674 return;
1675 /* Disable the interrupt */
1676 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EICR_FLOW_DIR);
1677 taskqueue_enqueue(adapter->tq, &adapter->fdir_task);
1678 } else
1679#endif
1680 if (reg_eicr & IXGBE_EICR_ECC) {
1681 device_printf(adapter->dev, "CRITICAL: ECC ERROR!! "
1682 "Please Reboot!!\n");
1683 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
1684 }
1685
1686 /* Check for over temp condition */
1687 if (reg_eicr & IXGBE_EICR_TS) {
1688 device_printf(adapter->dev, "CRITICAL: OVER TEMP!! "
1689 "PHY IS SHUT DOWN!!\n");
1690 device_printf(adapter->dev, "System shutdown required!\n");
1691 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS);
1692 }
1693#ifdef PCI_IOV
1694 if (reg_eicr & IXGBE_EICR_MAILBOX)
1695 taskqueue_enqueue(adapter->tq, &adapter->mbx_task);
1696#endif
1697 }
1698
1699 /* Pluggable optics-related interrupt */
1700 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP)
1701 mod_mask = IXGBE_EICR_GPI_SDP0_X540;
1702 else
1703 mod_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
1704
1705 if (ixgbe_is_sfp(hw)) {
1706 if (reg_eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw)) {
1707 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
1708 taskqueue_enqueue(adapter->tq, &adapter->msf_task);
1709 } else if (reg_eicr & mod_mask) {
1710 IXGBE_WRITE_REG(hw, IXGBE_EICR, mod_mask);
1711 taskqueue_enqueue(adapter->tq, &adapter->mod_task);
1712 }
1713 }
1714
1715 /* Check for fan failure */
1716 if ((hw->device_id == IXGBE_DEV_ID_82598AT) &&
1717 (reg_eicr & IXGBE_EICR_GPI_SDP1)) {
1718 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
1719 device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! "
1720 "REPLACE IMMEDIATELY!!\n");
1721 }
1722
1723 /* External PHY interrupt */
1724 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
1725 (reg_eicr & IXGBE_EICR_GPI_SDP0_X540)) {
1726 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540);
1727 taskqueue_enqueue(adapter->tq, &adapter->phy_task);
1728 }
1729
1730 /* Re-enable other interrupts */
1731 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
1732 return;
2020 return;
1733}
2021} /* ixgbe_msix_que */
1734
2022
1735/*********************************************************************
2023/************************************************************************
2024 * ixgbe_media_status - Media Ioctl callback
1736 *
2025 *
1737 * Media Ioctl callback
1738 *
1739 * This routine is called whenever the user queries the status of
1740 * the interface using ifconfig.
1741 *
1742 **********************************************************************/
2026 * Called whenever the user queries the status of
2027 * the interface using ifconfig.
2028 ************************************************************************/
1743static void
2029static void
1744ixgbe_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
2030ixgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1745{
2031{
1746 struct adapter *adapter = ifp->if_softc;
2032 struct adapter *adapter = ifp->if_softc;
1747 struct ixgbe_hw *hw = &adapter->hw;
2033 struct ixgbe_hw *hw = &adapter->hw;
1748 int layer;
2034 int layer;
1749
1750 INIT_DEBUGOUT("ixgbe_media_status: begin");
1751 IXGBE_CORE_LOCK(adapter);
1752 ixgbe_update_link_status(adapter);
1753
1754 ifmr->ifm_status = IFM_AVALID;
1755 ifmr->ifm_active = IFM_ETHER;
1756
1757 if (!adapter->link_active) {
1758 IXGBE_CORE_UNLOCK(adapter);
1759 return;
1760 }
1761
1762 ifmr->ifm_status |= IFM_ACTIVE;
1763 layer = adapter->phy_layer;
1764
1765 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T ||
1766 layer & IXGBE_PHYSICAL_LAYER_1000BASE_T ||
2035
2036 INIT_DEBUGOUT("ixgbe_media_status: begin");
2037 IXGBE_CORE_LOCK(adapter);
2038 ixgbe_update_link_status(adapter);
2039
2040 ifmr->ifm_status = IFM_AVALID;
2041 ifmr->ifm_active = IFM_ETHER;
2042
2043 if (!adapter->link_active) {
2044 IXGBE_CORE_UNLOCK(adapter);
2045 return;
2046 }
2047
2048 ifmr->ifm_status |= IFM_ACTIVE;
2049 layer = adapter->phy_layer;
2050
2051 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T ||
2052 layer & IXGBE_PHYSICAL_LAYER_1000BASE_T ||
1767 layer & IXGBE_PHYSICAL_LAYER_100BASE_TX)
2053 layer & IXGBE_PHYSICAL_LAYER_100BASE_TX ||
2054 layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
1768 switch (adapter->link_speed) {
1769 case IXGBE_LINK_SPEED_10GB_FULL:
1770 ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
1771 break;
1772 case IXGBE_LINK_SPEED_1GB_FULL:
1773 ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
1774 break;
1775 case IXGBE_LINK_SPEED_100_FULL:
1776 ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
1777 break;
2055 switch (adapter->link_speed) {
2056 case IXGBE_LINK_SPEED_10GB_FULL:
2057 ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
2058 break;
2059 case IXGBE_LINK_SPEED_1GB_FULL:
2060 ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
2061 break;
2062 case IXGBE_LINK_SPEED_100_FULL:
2063 ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
2064 break;
2065 case IXGBE_LINK_SPEED_10_FULL:
2066 ifmr->ifm_active |= IFM_10_T | IFM_FDX;
2067 break;
1778 }
1779 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
1780 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
1781 switch (adapter->link_speed) {
1782 case IXGBE_LINK_SPEED_10GB_FULL:
1783 ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX;
1784 break;
1785 }

--- 27 unchanged lines hidden (view full) ---

1813 }
1814 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
1815 switch (adapter->link_speed) {
1816 case IXGBE_LINK_SPEED_10GB_FULL:
1817 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
1818 break;
1819 }
1820 /*
2068 }
2069 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
2070 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
2071 switch (adapter->link_speed) {
2072 case IXGBE_LINK_SPEED_10GB_FULL:
2073 ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX;
2074 break;
2075 }

--- 27 unchanged lines hidden (view full) ---

2103 }
2104 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
2105 switch (adapter->link_speed) {
2106 case IXGBE_LINK_SPEED_10GB_FULL:
2107 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
2108 break;
2109 }
2110 /*
1821 ** XXX: These need to use the proper media types once
1822 ** they're added.
1823 */
2111 * XXX: These need to use the proper media types once
2112 * they're added.
2113 */
1824#ifndef IFM_ETH_XTYPE
1825 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
1826 switch (adapter->link_speed) {
1827 case IXGBE_LINK_SPEED_10GB_FULL:
1828 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
1829 break;
1830 case IXGBE_LINK_SPEED_2_5GB_FULL:
1831 ifmr->ifm_active |= IFM_2500_SX | IFM_FDX;
1832 break;
1833 case IXGBE_LINK_SPEED_1GB_FULL:
1834 ifmr->ifm_active |= IFM_1000_CX | IFM_FDX;
1835 break;
1836 }
2114#ifndef IFM_ETH_XTYPE
2115 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
2116 switch (adapter->link_speed) {
2117 case IXGBE_LINK_SPEED_10GB_FULL:
2118 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
2119 break;
2120 case IXGBE_LINK_SPEED_2_5GB_FULL:
2121 ifmr->ifm_active |= IFM_2500_SX | IFM_FDX;
2122 break;
2123 case IXGBE_LINK_SPEED_1GB_FULL:
2124 ifmr->ifm_active |= IFM_1000_CX | IFM_FDX;
2125 break;
2126 }
1837 else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4
1838 || layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
2127 else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
2128 layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
2129 layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
1839 switch (adapter->link_speed) {
1840 case IXGBE_LINK_SPEED_10GB_FULL:
1841 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
1842 break;
1843 case IXGBE_LINK_SPEED_2_5GB_FULL:
1844 ifmr->ifm_active |= IFM_2500_SX | IFM_FDX;
1845 break;
1846 case IXGBE_LINK_SPEED_1GB_FULL:

--- 8 unchanged lines hidden (view full) ---

1855 break;
1856 case IXGBE_LINK_SPEED_2_5GB_FULL:
1857 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
1858 break;
1859 case IXGBE_LINK_SPEED_1GB_FULL:
1860 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
1861 break;
1862 }
2130 switch (adapter->link_speed) {
2131 case IXGBE_LINK_SPEED_10GB_FULL:
2132 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
2133 break;
2134 case IXGBE_LINK_SPEED_2_5GB_FULL:
2135 ifmr->ifm_active |= IFM_2500_SX | IFM_FDX;
2136 break;
2137 case IXGBE_LINK_SPEED_1GB_FULL:

--- 8 unchanged lines hidden (view full) ---

2146 break;
2147 case IXGBE_LINK_SPEED_2_5GB_FULL:
2148 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2149 break;
2150 case IXGBE_LINK_SPEED_1GB_FULL:
2151 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2152 break;
2153 }
1863 else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4
1864 || layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
2154 else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
2155 layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
2156 layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
1865 switch (adapter->link_speed) {
1866 case IXGBE_LINK_SPEED_10GB_FULL:
1867 ifmr->ifm_active |= IFM_10G_KX4 | IFM_FDX;
1868 break;
1869 case IXGBE_LINK_SPEED_2_5GB_FULL:
1870 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
1871 break;
1872 case IXGBE_LINK_SPEED_1GB_FULL:
1873 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
1874 break;
1875 }
1876#endif
2157 switch (adapter->link_speed) {
2158 case IXGBE_LINK_SPEED_10GB_FULL:
2159 ifmr->ifm_active |= IFM_10G_KX4 | IFM_FDX;
2160 break;
2161 case IXGBE_LINK_SPEED_2_5GB_FULL:
2162 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2163 break;
2164 case IXGBE_LINK_SPEED_1GB_FULL:
2165 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2166 break;
2167 }
2168#endif
1877
2169
1878 /* If nothing is recognized... */
1879 if (IFM_SUBTYPE(ifmr->ifm_active) == 0)
1880 ifmr->ifm_active |= IFM_UNKNOWN;
2170 /* If nothing is recognized... */
2171 if (IFM_SUBTYPE(ifmr->ifm_active) == 0)
2172 ifmr->ifm_active |= IFM_UNKNOWN;
1881
2173
1882#if __FreeBSD_version >= 900025
1883 /* Display current flow control setting used on link */
1884 if (hw->fc.current_mode == ixgbe_fc_rx_pause ||
1885 hw->fc.current_mode == ixgbe_fc_full)
1886 ifmr->ifm_active |= IFM_ETH_RXPAUSE;
1887 if (hw->fc.current_mode == ixgbe_fc_tx_pause ||
1888 hw->fc.current_mode == ixgbe_fc_full)
1889 ifmr->ifm_active |= IFM_ETH_TXPAUSE;
1890#endif
1891
1892 IXGBE_CORE_UNLOCK(adapter);
1893
1894 return;
2174#if __FreeBSD_version >= 900025
2175 /* Display current flow control setting used on link */
2176 if (hw->fc.current_mode == ixgbe_fc_rx_pause ||
2177 hw->fc.current_mode == ixgbe_fc_full)
2178 ifmr->ifm_active |= IFM_ETH_RXPAUSE;
2179 if (hw->fc.current_mode == ixgbe_fc_tx_pause ||
2180 hw->fc.current_mode == ixgbe_fc_full)
2181 ifmr->ifm_active |= IFM_ETH_TXPAUSE;
2182#endif
2183
2184 IXGBE_CORE_UNLOCK(adapter);
2185
2186 return;
1895}
2187} /* ixgbe_media_status */
1896
2188
1897/*********************************************************************
2189/************************************************************************
2190 * ixgbe_media_change - Media Ioctl callback
1898 *
2191 *
1899 * Media Ioctl callback
1900 *
1901 * This routine is called when the user changes speed/duplex using
1902 * media/mediopt option with ifconfig.
1903 *
1904 **********************************************************************/
2192 * Called when the user changes speed/duplex using
2193 * media/mediopt option with ifconfig.
2194 ************************************************************************/
1905static int
2195static int
1906ixgbe_media_change(struct ifnet * ifp)
2196ixgbe_media_change(struct ifnet *ifp)
1907{
2197{
1908 struct adapter *adapter = ifp->if_softc;
1909 struct ifmedia *ifm = &adapter->media;
1910 struct ixgbe_hw *hw = &adapter->hw;
2198 struct adapter *adapter = ifp->if_softc;
2199 struct ifmedia *ifm = &adapter->media;
2200 struct ixgbe_hw *hw = &adapter->hw;
1911 ixgbe_link_speed speed = 0;
1912
1913 INIT_DEBUGOUT("ixgbe_media_change: begin");
1914
1915 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1916 return (EINVAL);
1917
1918 if (hw->phy.media_type == ixgbe_media_type_backplane)
1919 return (ENODEV);
1920
1921 /*
2201 ixgbe_link_speed speed = 0;
2202
2203 INIT_DEBUGOUT("ixgbe_media_change: begin");
2204
2205 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2206 return (EINVAL);
2207
2208 if (hw->phy.media_type == ixgbe_media_type_backplane)
2209 return (ENODEV);
2210
2211 /*
1922 ** We don't actually need to check against the supported
1923 ** media types of the adapter; ifmedia will take care of
1924 ** that for us.
1925 */
1926#ifndef IFM_ETH_XTYPE
2212 * We don't actually need to check against the supported
2213 * media types of the adapter; ifmedia will take care of
2214 * that for us.
2215 */
1927 switch (IFM_SUBTYPE(ifm->ifm_media)) {
1928 case IFM_AUTO:
1929 case IFM_10G_T:
1930 speed |= IXGBE_LINK_SPEED_100_FULL;
2216 switch (IFM_SUBTYPE(ifm->ifm_media)) {
2217 case IFM_AUTO:
2218 case IFM_10G_T:
2219 speed |= IXGBE_LINK_SPEED_100_FULL;
2220 speed |= IXGBE_LINK_SPEED_1GB_FULL;
2221 speed |= IXGBE_LINK_SPEED_10GB_FULL;
2222 break;
1931 case IFM_10G_LRM:
2223 case IFM_10G_LRM:
1932 case IFM_10G_SR: /* KR, too */
1933 case IFM_10G_LR:
2224 case IFM_10G_LR:
2225#ifndef IFM_ETH_XTYPE
2226 case IFM_10G_SR: /* KR, too */
1934 case IFM_10G_CX4: /* KX4 */
2227 case IFM_10G_CX4: /* KX4 */
2228#else
2229 case IFM_10G_KR:
2230 case IFM_10G_KX4:
2231#endif
1935 speed |= IXGBE_LINK_SPEED_1GB_FULL;
2232 speed |= IXGBE_LINK_SPEED_1GB_FULL;
1936 case IFM_10G_TWINAX:
1937 speed |= IXGBE_LINK_SPEED_10GB_FULL;
1938 break;
2233 speed |= IXGBE_LINK_SPEED_10GB_FULL;
2234 break;
1939 case IFM_1000_T:
1940 speed |= IXGBE_LINK_SPEED_100_FULL;
2235#ifndef IFM_ETH_XTYPE
2236 case IFM_1000_CX: /* KX */
2237#else
2238 case IFM_1000_KX:
2239#endif
1941 case IFM_1000_LX:
1942 case IFM_1000_SX:
2240 case IFM_1000_LX:
2241 case IFM_1000_SX:
1943 case IFM_1000_CX: /* KX */
1944 speed |= IXGBE_LINK_SPEED_1GB_FULL;
1945 break;
2242 speed |= IXGBE_LINK_SPEED_1GB_FULL;
2243 break;
1946 case IFM_100_TX:
2244 case IFM_1000_T:
1947 speed |= IXGBE_LINK_SPEED_100_FULL;
2245 speed |= IXGBE_LINK_SPEED_100_FULL;
1948 break;
1949 default:
1950 goto invalid;
1951 }
1952#else
1953 switch (IFM_SUBTYPE(ifm->ifm_media)) {
1954 case IFM_AUTO:
1955 case IFM_10G_T:
1956 speed |= IXGBE_LINK_SPEED_100_FULL;
1957 case IFM_10G_LRM:
1958 case IFM_10G_KR:
1959 case IFM_10G_LR:
1960 case IFM_10G_KX4:
1961 speed |= IXGBE_LINK_SPEED_1GB_FULL;
2246 speed |= IXGBE_LINK_SPEED_1GB_FULL;
2247 break;
1962 case IFM_10G_TWINAX:
1963 speed |= IXGBE_LINK_SPEED_10GB_FULL;
1964 break;
2248 case IFM_10G_TWINAX:
2249 speed |= IXGBE_LINK_SPEED_10GB_FULL;
2250 break;
1965 case IFM_1000_T:
1966 speed |= IXGBE_LINK_SPEED_100_FULL;
1967 case IFM_1000_LX:
1968 case IFM_1000_SX:
1969 case IFM_1000_KX:
1970 speed |= IXGBE_LINK_SPEED_1GB_FULL;
1971 break;
1972 case IFM_100_TX:
1973 speed |= IXGBE_LINK_SPEED_100_FULL;
1974 break;
2251 case IFM_100_TX:
2252 speed |= IXGBE_LINK_SPEED_100_FULL;
2253 break;
2254 case IFM_10_T:
2255 speed |= IXGBE_LINK_SPEED_10_FULL;
2256 break;
1975 default:
1976 goto invalid;
1977 }
2257 default:
2258 goto invalid;
2259 }
1978#endif
1979
1980 hw->mac.autotry_restart = TRUE;
1981 hw->mac.ops.setup_link(hw, speed, TRUE);
2260
2261 hw->mac.autotry_restart = TRUE;
2262 hw->mac.ops.setup_link(hw, speed, TRUE);
1982 if (IFM_SUBTYPE(ifm->ifm_media) == IFM_AUTO) {
1983 adapter->advertise = 0;
1984 } else {
1985 if ((speed & IXGBE_LINK_SPEED_10GB_FULL) != 0)
1986 adapter->advertise |= 1 << 2;
1987 if ((speed & IXGBE_LINK_SPEED_1GB_FULL) != 0)
1988 adapter->advertise |= 1 << 1;
1989 if ((speed & IXGBE_LINK_SPEED_100_FULL) != 0)
1990 adapter->advertise |= 1 << 0;
1991 }
2263 adapter->advertise =
2264 ((speed & IXGBE_LINK_SPEED_10GB_FULL) ? 4 : 0) |
2265 ((speed & IXGBE_LINK_SPEED_1GB_FULL) ? 2 : 0) |
2266 ((speed & IXGBE_LINK_SPEED_100_FULL) ? 1 : 0) |
2267 ((speed & IXGBE_LINK_SPEED_10_FULL) ? 8 : 0);
1992
1993 return (0);
1994
1995invalid:
1996 device_printf(adapter->dev, "Invalid media type!\n");
2268
2269 return (0);
2270
2271invalid:
2272 device_printf(adapter->dev, "Invalid media type!\n");
2273
1997 return (EINVAL);
2274 return (EINVAL);
1998}
2275} /* ixgbe_media_change */
1999
2276
2277/************************************************************************
2278 * ixgbe_set_promisc
2279 ************************************************************************/
2000static void
2001ixgbe_set_promisc(struct adapter *adapter)
2002{
2280static void
2281ixgbe_set_promisc(struct adapter *adapter)
2282{
2003 u_int32_t reg_rctl;
2004 struct ifnet *ifp = adapter->ifp;
2005 int mcnt = 0;
2283 struct ifnet *ifp = adapter->ifp;
2284 int mcnt = 0;
2285 u32 rctl;
2006
2286
2007 reg_rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
2008 reg_rctl &= (~IXGBE_FCTRL_UPE);
2287 rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
2288 rctl &= (~IXGBE_FCTRL_UPE);
2009 if (ifp->if_flags & IFF_ALLMULTI)
2010 mcnt = MAX_NUM_MULTICAST_ADDRESSES;
2011 else {
2289 if (ifp->if_flags & IFF_ALLMULTI)
2290 mcnt = MAX_NUM_MULTICAST_ADDRESSES;
2291 else {
2012 struct ifmultiaddr *ifma;
2292 struct ifmultiaddr *ifma;
2013#if __FreeBSD_version < 800000
2014 IF_ADDR_LOCK(ifp);
2015#else
2016 if_maddr_rlock(ifp);
2017#endif
2018 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2019 if (ifma->ifma_addr->sa_family != AF_LINK)
2020 continue;
2021 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
2022 break;
2023 mcnt++;
2024 }
2025#if __FreeBSD_version < 800000
2026 IF_ADDR_UNLOCK(ifp);
2027#else
2028 if_maddr_runlock(ifp);
2029#endif
2030 }
2031 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES)
2293#if __FreeBSD_version < 800000
2294 IF_ADDR_LOCK(ifp);
2295#else
2296 if_maddr_rlock(ifp);
2297#endif
2298 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2299 if (ifma->ifma_addr->sa_family != AF_LINK)
2300 continue;
2301 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
2302 break;
2303 mcnt++;
2304 }
2305#if __FreeBSD_version < 800000
2306 IF_ADDR_UNLOCK(ifp);
2307#else
2308 if_maddr_runlock(ifp);
2309#endif
2310 }
2311 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES)
2032 reg_rctl &= (~IXGBE_FCTRL_MPE);
2033 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
2312 rctl &= (~IXGBE_FCTRL_MPE);
2313 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
2034
2035 if (ifp->if_flags & IFF_PROMISC) {
2314
2315 if (ifp->if_flags & IFF_PROMISC) {
2036 reg_rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
2037 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
2316 rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
2317 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
2038 } else if (ifp->if_flags & IFF_ALLMULTI) {
2318 } else if (ifp->if_flags & IFF_ALLMULTI) {
2039 reg_rctl |= IXGBE_FCTRL_MPE;
2040 reg_rctl &= ~IXGBE_FCTRL_UPE;
2041 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
2319 rctl |= IXGBE_FCTRL_MPE;
2320 rctl &= ~IXGBE_FCTRL_UPE;
2321 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
2042 }
2322 }
2043 return;
2044}
2323} /* ixgbe_set_promisc */
2045
2324
2046
2047/*********************************************************************
2048 * Multicast Update
2049 *
2050 * This routine is called whenever multicast address list is updated.
2051 *
2052 **********************************************************************/
2053#define IXGBE_RAR_ENTRIES 16
2054
2325/************************************************************************
2326 * ixgbe_msix_link - Link status change ISR (MSI/MSI-X)
2327 ************************************************************************/
2055static void
2328static void
2056ixgbe_set_multi(struct adapter *adapter)
2329ixgbe_msix_link(void *arg)
2057{
2330{
2058 u32 fctrl;
2059 u8 *update_ptr;
2060 struct ifmultiaddr *ifma;
2061 struct ixgbe_mc_addr *mta;
2062 int mcnt = 0;
2063 struct ifnet *ifp = adapter->ifp;
2331 struct adapter *adapter = arg;
2332 struct ixgbe_hw *hw = &adapter->hw;
2333 u32 eicr, eicr_mask;
2334 s32 retval;
2064
2335
2065 IOCTL_DEBUGOUT("ixgbe_set_multi: begin");
2336 ++adapter->link_irq;
2066
2337
2067 mta = adapter->mta;
2068 bzero(mta, sizeof(*mta) * MAX_NUM_MULTICAST_ADDRESSES);
2338 /* Pause other interrupts */
2339 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_OTHER);
2069
2340
2070#if __FreeBSD_version < 800000
2071 IF_ADDR_LOCK(ifp);
2072#else
2073 if_maddr_rlock(ifp);
2074#endif
2075 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2076 if (ifma->ifma_addr->sa_family != AF_LINK)
2077 continue;
2078 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
2079 break;
2080 bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
2081 mta[mcnt].addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
2082 mta[mcnt].vmdq = adapter->pool;
2083 mcnt++;
2084 }
2085#if __FreeBSD_version < 800000
2086 IF_ADDR_UNLOCK(ifp);
2087#else
2088 if_maddr_runlock(ifp);
2089#endif
2341 /* First get the cause */
2342 eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
2343 /* Be sure the queue bits are not cleared */
2344 eicr &= ~IXGBE_EICR_RTX_QUEUE;
2345 /* Clear interrupt with write */
2346 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
2090
2347
2091 fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
2092 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
2093 if (ifp->if_flags & IFF_PROMISC)
2094 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
2095 else if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES ||
2096 ifp->if_flags & IFF_ALLMULTI) {
2097 fctrl |= IXGBE_FCTRL_MPE;
2098 fctrl &= ~IXGBE_FCTRL_UPE;
2099 } else
2100 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
2101
2102 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
2103
2104 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) {
2105 update_ptr = (u8 *)mta;
2106 ixgbe_update_mc_addr_list(&adapter->hw,
2107 update_ptr, mcnt, ixgbe_mc_array_itr, TRUE);
2348 /* Link status change */
2349 if (eicr & IXGBE_EICR_LSC) {
2350 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
2351 taskqueue_enqueue(adapter->tq, &adapter->link_task);
2108 }
2109
2352 }
2353
2110 return;
2111}
2354 if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
2355 if ((adapter->feat_en & IXGBE_FEATURE_FDIR) &&
2356 (eicr & IXGBE_EICR_FLOW_DIR)) {
2357 /* This is probably overkill :) */
2358 if (!atomic_cmpset_int(&adapter->fdir_reinit, 0, 1))
2359 return;
2360 /* Disable the interrupt */
2361 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_FLOW_DIR);
2362 taskqueue_enqueue(adapter->tq, &adapter->fdir_task);
2363 }
2112
2364
2113/*
2114 * This is an iterator function now needed by the multicast
2115 * shared code. It simply feeds the shared code routine the
2116 * addresses in the array of ixgbe_set_multi() one by one.
2117 */
2118static u8 *
2119ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
2120{
2121 struct ixgbe_mc_addr *mta;
2365 if (eicr & IXGBE_EICR_ECC) {
2366 device_printf(adapter->dev,
2367 "CRITICAL: ECC ERROR!! Please Reboot!!\n");
2368 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
2369 }
2122
2370
2123 mta = (struct ixgbe_mc_addr *)*update_ptr;
2124 *vmdq = mta->vmdq;
2371 /* Check for over temp condition */
2372 if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR) {
2373 switch (adapter->hw.mac.type) {
2374 case ixgbe_mac_X550EM_a:
2375 if (!(eicr & IXGBE_EICR_GPI_SDP0_X550EM_a))
2376 break;
2377 IXGBE_WRITE_REG(hw, IXGBE_EIMC,
2378 IXGBE_EICR_GPI_SDP0_X550EM_a);
2379 IXGBE_WRITE_REG(hw, IXGBE_EICR,
2380 IXGBE_EICR_GPI_SDP0_X550EM_a);
2381 retval = hw->phy.ops.check_overtemp(hw);
2382 if (retval != IXGBE_ERR_OVERTEMP)
2383 break;
2384 device_printf(adapter->dev, "CRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
2385 device_printf(adapter->dev, "System shutdown required!\n");
2386 break;
2387 default:
2388 if (!(eicr & IXGBE_EICR_TS))
2389 break;
2390 retval = hw->phy.ops.check_overtemp(hw);
2391 if (retval != IXGBE_ERR_OVERTEMP)
2392 break;
2393 device_printf(adapter->dev, "CRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
2394 device_printf(adapter->dev, "System shutdown required!\n");
2395 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS);
2396 break;
2397 }
2398 }
2125
2399
2126 *update_ptr = (u8*)(mta + 1);
2127 return (mta->addr);
2128}
2400 /* Check for VF message */
2401 if ((adapter->feat_en & IXGBE_FEATURE_SRIOV) &&
2402 (eicr & IXGBE_EICR_MAILBOX))
2403 taskqueue_enqueue(adapter->tq, &adapter->mbx_task);
2404 }
2129
2405
2406 if (ixgbe_is_sfp(hw)) {
2407 /* Pluggable optics-related interrupt */
2408 if (hw->mac.type >= ixgbe_mac_X540)
2409 eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
2410 else
2411 eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
2130
2412
2131/*********************************************************************
2132 * Timer routine
2133 *
2134 * This routine checks for link status,updates statistics,
2135 * and runs the watchdog check.
2136 *
2137 **********************************************************************/
2413 if (eicr & eicr_mask) {
2414 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
2415 taskqueue_enqueue(adapter->tq, &adapter->mod_task);
2416 }
2138
2417
2139static void
2140ixgbe_local_timer(void *arg)
2141{
2142 struct adapter *adapter = arg;
2143 device_t dev = adapter->dev;
2144 struct ix_queue *que = adapter->queues;
2145 u64 queues = 0;
2146 int hung = 0;
2147
2148 mtx_assert(&adapter->core_mtx, MA_OWNED);
2149
2150 /* Check for pluggable optics */
2151 if (adapter->sfp_probe)
2152 if (!ixgbe_sfp_probe(adapter))
2153 goto out; /* Nothing to do */
2154
2155 ixgbe_update_link_status(adapter);
2156 ixgbe_update_stats_counters(adapter);
2157
2158 /*
2159 ** Check the TX queues status
2160 ** - mark hung queues so we don't schedule on them
2161 ** - watchdog only if all queues show hung
2162 */
2163 for (int i = 0; i < adapter->num_queues; i++, que++) {
2164 /* Keep track of queues with work for soft irq */
2165 if (que->txr->busy)
2166 queues |= ((u64)1 << que->me);
2167 /*
2168 ** Each time txeof runs without cleaning, but there
2169 ** are uncleaned descriptors it increments busy. If
2170 ** we get to the MAX we declare it hung.
2171 */
2172 if (que->busy == IXGBE_QUEUE_HUNG) {
2173 ++hung;
2174 /* Mark the queue as inactive */
2175 adapter->active_queues &= ~((u64)1 << que->me);
2176 continue;
2177 } else {
2178 /* Check if we've come back from hung */
2179 if ((adapter->active_queues & ((u64)1 << que->me)) == 0)
2180 adapter->active_queues |= ((u64)1 << que->me);
2418 if ((hw->mac.type == ixgbe_mac_82599EB) &&
2419 (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
2420 IXGBE_WRITE_REG(hw, IXGBE_EICR,
2421 IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
2422 taskqueue_enqueue(adapter->tq, &adapter->msf_task);
2181 }
2423 }
2182 if (que->busy >= IXGBE_MAX_TX_BUSY) {
2183 device_printf(dev,"Warning queue %d "
2184 "appears to be hung!\n", i);
2185 que->txr->busy = IXGBE_QUEUE_HUNG;
2186 ++hung;
2187 }
2424 }
2188
2425
2426 /* Check for fan failure */
2427 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
2428 ixgbe_check_fan_failure(adapter, eicr, TRUE);
2429 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
2189 }
2190
2430 }
2431
2191 /* Only truly watchdog if all queues show hung */
2192 if (hung == adapter->num_queues)
2193 goto watchdog;
2194 else if (queues != 0) { /* Force an IRQ on queues with work */
2195 ixgbe_rearm_queues(adapter, queues);
2432 /* External PHY interrupt */
2433 if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
2434 (eicr & IXGBE_EICR_GPI_SDP0_X540)) {
2435 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540);
2436 taskqueue_enqueue(adapter->tq, &adapter->phy_task);
2196 }
2197
2437 }
2438
2198out:
2199 callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
2200 return;
2439 /* Re-enable other interrupts */
2440 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
2441} /* ixgbe_msix_link */
2201
2442
2202watchdog:
2203 device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
2204 adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2205 adapter->watchdog_events++;
2206 ixgbe_init_locked(adapter);
2207}
2208
2209
2210/*
2211** Note: this routine updates the OS on the link state
2212** the real check of the hardware only happens with
2213** a link interrupt.
2214*/
2215static void
2216ixgbe_update_link_status(struct adapter *adapter)
2443/************************************************************************
2444 * ixgbe_sysctl_interrupt_rate_handler
2445 ************************************************************************/
2446static int
2447ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS)
2217{
2448{
2218 struct ifnet *ifp = adapter->ifp;
2219 device_t dev = adapter->dev;
2449 struct ix_queue *que = ((struct ix_queue *)oidp->oid_arg1);
2450 int error;
2451 unsigned int reg, usec, rate;
2220
2452
2221 if (adapter->link_up){
2222 if (adapter->link_active == FALSE) {
2223 if (bootverbose)
2224 device_printf(dev,"Link is up %d Gbps %s \n",
2225 ((adapter->link_speed == 128)? 10:1),
2226 "Full Duplex");
2227 adapter->link_active = TRUE;
2228 /* Update any Flow Control changes */
2229 ixgbe_fc_enable(&adapter->hw);
2230 /* Update DMA coalescing config */
2231 ixgbe_config_dmac(adapter);
2232 if_link_state_change(ifp, LINK_STATE_UP);
2233#ifdef PCI_IOV
2234 ixgbe_ping_all_vfs(adapter);
2235#endif
2236 }
2237 } else { /* Link down */
2238 if (adapter->link_active == TRUE) {
2239 if (bootverbose)
2240 device_printf(dev,"Link is Down\n");
2241 if_link_state_change(ifp, LINK_STATE_DOWN);
2242 adapter->link_active = FALSE;
2243#ifdef PCI_IOV
2244 ixgbe_ping_all_vfs(adapter);
2245#endif
2246 }
2453 reg = IXGBE_READ_REG(&que->adapter->hw, IXGBE_EITR(que->msix));
2454 usec = ((reg & 0x0FF8) >> 3);
2455 if (usec > 0)
2456 rate = 500000 / usec;
2457 else
2458 rate = 0;
2459 error = sysctl_handle_int(oidp, &rate, 0, req);
2460 if (error || !req->newptr)
2461 return error;
2462 reg &= ~0xfff; /* default, no limitation */
2463 ixgbe_max_interrupt_rate = 0;
2464 if (rate > 0 && rate < 500000) {
2465 if (rate < 1000)
2466 rate = 1000;
2467 ixgbe_max_interrupt_rate = rate;
2468 reg |= ((4000000/rate) & 0xff8);
2247 }
2469 }
2470 IXGBE_WRITE_REG(&que->adapter->hw, IXGBE_EITR(que->msix), reg);
2248
2471
2249 return;
2250}
2472 return (0);
2473} /* ixgbe_sysctl_interrupt_rate_handler */
2251
2474
2252
2253/*********************************************************************
2254 *
2255 * This routine disables all traffic on the adapter by issuing a
2256 * global reset on the MAC and deallocates TX/RX buffers.
2257 *
2258 **********************************************************************/
2259
2475/************************************************************************
2476 * ixgbe_add_device_sysctls
2477 ************************************************************************/
2260static void
2478static void
2261ixgbe_stop(void *arg)
2479ixgbe_add_device_sysctls(struct adapter *adapter)
2262{
2480{
2263 struct ifnet *ifp;
2264 struct adapter *adapter = arg;
2265 struct ixgbe_hw *hw = &adapter->hw;
2266 ifp = adapter->ifp;
2481 device_t dev = adapter->dev;
2482 struct ixgbe_hw *hw = &adapter->hw;
2483 struct sysctl_oid_list *child;
2484 struct sysctl_ctx_list *ctx;
2267
2485
2268 mtx_assert(&adapter->core_mtx, MA_OWNED);
2486 ctx = device_get_sysctl_ctx(dev);
2487 child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
2269
2488
2270 INIT_DEBUGOUT("ixgbe_stop: begin\n");
2271 ixgbe_disable_intr(adapter);
2272 callout_stop(&adapter->timer);
2489 /* Sysctls for all devices */
2490 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "fc", CTLTYPE_INT | CTLFLAG_RW,
2491 adapter, 0, ixgbe_sysctl_flowcntl, "I", IXGBE_SYSCTL_DESC_SET_FC);
2273
2492
2274 /* Let the stack know...*/
2275 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2493 adapter->enable_aim = ixgbe_enable_aim;
2494 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "enable_aim", CTLFLAG_RW,
2495 &adapter->enable_aim, 1, "Interrupt Moderation");
2276
2496
2277 ixgbe_reset_hw(hw);
2278 hw->adapter_stopped = FALSE;
2279 ixgbe_stop_adapter(hw);
2280 if (hw->mac.type == ixgbe_mac_82599EB)
2281 ixgbe_stop_mac_link_on_d3_82599(hw);
2282 /* Turn off the laser - noop with no optics */
2283 ixgbe_disable_tx_laser(hw);
2497 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "advertise_speed",
2498 CTLTYPE_INT | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_advertise, "I",
2499 IXGBE_SYSCTL_DESC_ADV_SPEED);
2284
2500
2285 /* Update the stack */
2286 adapter->link_up = FALSE;
2287 ixgbe_update_link_status(adapter);
2501#ifdef IXGBE_DEBUG
2502 /* testing sysctls (for all devices) */
2503 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "power_state",
2504 CTLTYPE_INT | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_power_state,
2505 "I", "PCI Power State");
2288
2506
2289 /* reprogram the RAR[0] in case user changed it. */
2290 ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
2507 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "print_rss_config",
2508 CTLTYPE_STRING | CTLFLAG_RD, adapter, 0,
2509 ixgbe_sysctl_print_rss_config, "A", "Prints RSS Configuration");
2510#endif
2511 /* for X550 series devices */
2512 if (hw->mac.type >= ixgbe_mac_X550)
2513 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "dmac",
2514 CTLTYPE_INT | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_dmac,
2515 "I", "DMA Coalesce");
2291
2516
2292 return;
2293}
2517 /* for WoL-capable devices */
2518 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
2519 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "wol_enable",
2520 CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
2521 ixgbe_sysctl_wol_enable, "I", "Enable/Disable Wake on LAN");
2294
2522
2295
2296/*********************************************************************
2297 *
2298 * Determine hardware revision.
2299 *
2300 **********************************************************************/
2301static void
2302ixgbe_identify_hardware(struct adapter *adapter)
2303{
2304 device_t dev = adapter->dev;
2305 struct ixgbe_hw *hw = &adapter->hw;
2306
2307 /* Save off the information about this board */
2308 hw->vendor_id = pci_get_vendor(dev);
2309 hw->device_id = pci_get_device(dev);
2310 hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
2311 hw->subsystem_vendor_id =
2312 pci_read_config(dev, PCIR_SUBVEND_0, 2);
2313 hw->subsystem_device_id =
2314 pci_read_config(dev, PCIR_SUBDEV_0, 2);
2315
2316 /*
2317 ** Make sure BUSMASTER is set
2318 */
2319 pci_enable_busmaster(dev);
2320
2321 /* We need this here to set the num_segs below */
2322 ixgbe_set_mac_type(hw);
2323
2324 /* Pick up the 82599 settings */
2325 if (hw->mac.type != ixgbe_mac_82598EB) {
2326 hw->phy.smart_speed = ixgbe_smart_speed;
2327 adapter->num_segs = IXGBE_82599_SCATTER;
2328 } else
2329 adapter->num_segs = IXGBE_82598_SCATTER;
2330
2331 return;
2332}
2333
2334/*********************************************************************
2335 *
2336 * Determine optic type
2337 *
2338 **********************************************************************/
2339static void
2340ixgbe_setup_optics(struct adapter *adapter)
2341{
2342 struct ixgbe_hw *hw = &adapter->hw;
2343 int layer;
2344
2345 layer = adapter->phy_layer = ixgbe_get_supported_physical_layer(hw);
2346
2347 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T) {
2348 adapter->optics = IFM_10G_T;
2349 return;
2523 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "wufc",
2524 CTLTYPE_INT | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_wufc,
2525 "I", "Enable/Disable Wake Up Filters");
2350 }
2351
2526 }
2527
2352 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T) {
2353 adapter->optics = IFM_1000_T;
2354 return;
2355 }
2528 /* for X552/X557-AT devices */
2529 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
2530 struct sysctl_oid *phy_node;
2531 struct sysctl_oid_list *phy_list;
2356
2532
2357 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX) {
2358 adapter->optics = IFM_1000_SX;
2359 return;
2360 }
2533 phy_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "phy",
2534 CTLFLAG_RD, NULL, "External PHY sysctls");
2535 phy_list = SYSCTL_CHILDREN(phy_node);
2361
2536
2362 if (layer & (IXGBE_PHYSICAL_LAYER_10GBASE_LR |
2363 IXGBE_PHYSICAL_LAYER_10GBASE_LRM)) {
2364 adapter->optics = IFM_10G_LR;
2365 return;
2366 }
2537 SYSCTL_ADD_PROC(ctx, phy_list, OID_AUTO, "temp",
2538 CTLTYPE_INT | CTLFLAG_RD, adapter, 0, ixgbe_sysctl_phy_temp,
2539 "I", "Current External PHY Temperature (Celsius)");
2367
2540
2368 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
2369 adapter->optics = IFM_10G_SR;
2370 return;
2541 SYSCTL_ADD_PROC(ctx, phy_list, OID_AUTO, "overtemp_occurred",
2542 CTLTYPE_INT | CTLFLAG_RD, adapter, 0,
2543 ixgbe_sysctl_phy_overtemp_occurred, "I",
2544 "External PHY High Temperature Event Occurred");
2371 }
2372
2545 }
2546
2373 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU) {
2374 adapter->optics = IFM_10G_TWINAX;
2375 return;
2547 if (adapter->feat_cap & IXGBE_FEATURE_EEE) {
2548 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "eee_state",
2549 CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
2550 ixgbe_sysctl_eee_state, "I", "EEE Power Save State");
2376 }
2551 }
2552} /* ixgbe_add_device_sysctls */
2377
2553
2378 if (layer & (IXGBE_PHYSICAL_LAYER_10GBASE_KX4 |
2379 IXGBE_PHYSICAL_LAYER_10GBASE_CX4)) {
2380 adapter->optics = IFM_10G_CX4;
2381 return;
2382 }
2383
2384 /* If we get here just set the default */
2385 adapter->optics = IFM_ETHER | IFM_AUTO;
2386 return;
2387}
2388
2389/*********************************************************************
2390 *
2391 * Setup the Legacy or MSI Interrupt handler
2392 *
2393 **********************************************************************/
2554/************************************************************************
2555 * ixgbe_allocate_pci_resources
2556 ************************************************************************/
2394static int
2557static int
2395ixgbe_allocate_legacy(struct adapter *adapter)
2558ixgbe_allocate_pci_resources(struct adapter *adapter)
2396{
2559{
2397 device_t dev = adapter->dev;
2398 struct ix_queue *que = adapter->queues;
2399#ifndef IXGBE_LEGACY_TX
2400 struct tx_ring *txr = adapter->tx_rings;
2401#endif
2402 int error, rid = 0;
2560 device_t dev = adapter->dev;
2561 int rid;
2403
2562
2404 /* MSI RID at 1 */
2405 if (adapter->msix == 1)
2406 rid = 1;
2563 rid = PCIR_BAR(0);
2564 adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
2565 RF_ACTIVE);
2407
2566
2408 /* We allocate a single interrupt resource */
2409 adapter->res = bus_alloc_resource_any(dev,
2410 SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
2411 if (adapter->res == NULL) {
2412 device_printf(dev, "Unable to allocate bus resource: "
2413 "interrupt\n");
2567 if (!(adapter->pci_mem)) {
2568 device_printf(dev, "Unable to allocate bus resource: memory\n");
2414 return (ENXIO);
2415 }
2416
2569 return (ENXIO);
2570 }
2571
2417 /*
2418 * Try allocating a fast interrupt and the associated deferred
2419 * processing contexts.
2420 */
2421#ifndef IXGBE_LEGACY_TX
2422 TASK_INIT(&txr->txq_task, 0, ixgbe_deferred_mq_start, txr);
2423#endif
2424 TASK_INIT(&que->que_task, 0, ixgbe_handle_que, que);
2425 que->tq = taskqueue_create_fast("ixgbe_que", M_NOWAIT,
2426 taskqueue_thread_enqueue, &que->tq);
2427 taskqueue_start_threads(&que->tq, 1, PI_NET, "%s ixq",
2428 device_get_nameunit(adapter->dev));
2572 /* Save bus_space values for READ/WRITE_REG macros */
2573 adapter->osdep.mem_bus_space_tag = rman_get_bustag(adapter->pci_mem);
2574 adapter->osdep.mem_bus_space_handle =
2575 rman_get_bushandle(adapter->pci_mem);
2576 /* Set hw values for shared code */
2577 adapter->hw.hw_addr = (u8 *)&adapter->osdep.mem_bus_space_handle;
2429
2578
2430 /* Tasklets for Link, SFP and Multispeed Fiber */
2431 TASK_INIT(&adapter->link_task, 0, ixgbe_handle_link, adapter);
2432 TASK_INIT(&adapter->mod_task, 0, ixgbe_handle_mod, adapter);
2433 TASK_INIT(&adapter->msf_task, 0, ixgbe_handle_msf, adapter);
2434 TASK_INIT(&adapter->phy_task, 0, ixgbe_handle_phy, adapter);
2435#ifdef IXGBE_FDIR
2436 TASK_INIT(&adapter->fdir_task, 0, ixgbe_reinit_fdir, adapter);
2437#endif
2438 adapter->tq = taskqueue_create_fast("ixgbe_link", M_NOWAIT,
2439 taskqueue_thread_enqueue, &adapter->tq);
2440 taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s linkq",
2441 device_get_nameunit(adapter->dev));
2442
2443 if ((error = bus_setup_intr(dev, adapter->res,
2444 INTR_TYPE_NET | INTR_MPSAFE, NULL, ixgbe_legacy_irq,
2445 que, &adapter->tag)) != 0) {
2446 device_printf(dev, "Failed to register fast interrupt "
2447 "handler: %d\n", error);
2448 taskqueue_free(que->tq);
2449 taskqueue_free(adapter->tq);
2450 que->tq = NULL;
2451 adapter->tq = NULL;
2452 return (error);
2453 }
2454 /* For simplicity in the handlers */
2455 adapter->active_queues = IXGBE_EIMS_ENABLE_MASK;
2456
2457 return (0);
2579 return (0);
2458}
2580} /* ixgbe_allocate_pci_resources */
2459
2581
2460
2461/*********************************************************************
2582/************************************************************************
2583 * ixgbe_detach - Device removal routine
2462 *
2584 *
2463 * Setup MSIX Interrupt resources and handlers
2585 * Called when the driver is being removed.
2586 * Stops the adapter and deallocates all the resources
2587 * that were allocated for driver operation.
2464 *
2588 *
2465 **********************************************************************/
2589 * return 0 on success, positive on failure
2590 ************************************************************************/
2466static int
2591static int
2467ixgbe_allocate_msix(struct adapter *adapter)
2592ixgbe_detach(device_t dev)
2468{
2593{
2469 device_t dev = adapter->dev;
2470 struct ix_queue *que = adapter->queues;
2471 struct tx_ring *txr = adapter->tx_rings;
2472 int error, rid, vector = 0;
2473 int cpu_id = 0;
2474#ifdef RSS
2475 cpuset_t cpu_mask;
2476#endif
2594 struct adapter *adapter = device_get_softc(dev);
2595 struct ix_queue *que = adapter->queues;
2596 struct tx_ring *txr = adapter->tx_rings;
2597 u32 ctrl_ext;
2477
2598
2478#ifdef RSS
2479 /*
2480 * If we're doing RSS, the number of queues needs to
2481 * match the number of RSS buckets that are configured.
2482 *
2483 * + If there's more queues than RSS buckets, we'll end
2484 * up with queues that get no traffic.
2485 *
2486 * + If there's more RSS buckets than queues, we'll end
2487 * up having multiple RSS buckets map to the same queue,
2488 * so there'll be some contention.
2489 */
2490 if (adapter->num_queues != rss_getnumbuckets()) {
2491 device_printf(dev,
2492 "%s: number of queues (%d) != number of RSS buckets (%d)"
2493 "; performance will be impacted.\n",
2494 __func__,
2495 adapter->num_queues,
2496 rss_getnumbuckets());
2599 INIT_DEBUGOUT("ixgbe_detach: begin");
2600
2601 /* Make sure VLANS are not using driver */
2602 if (adapter->ifp->if_vlantrunk != NULL) {
2603 device_printf(dev, "Vlan in use, detach first\n");
2604 return (EBUSY);
2497 }
2605 }
2498#endif
2499
2606
2500 for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
2501 rid = vector + 1;
2502 que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
2503 RF_SHAREABLE | RF_ACTIVE);
2504 if (que->res == NULL) {
2505 device_printf(dev,"Unable to allocate"
2506 " bus resource: que interrupt [%d]\n", vector);
2507 return (ENXIO);
2508 }
2509 /* Set the handler function */
2510 error = bus_setup_intr(dev, que->res,
2511 INTR_TYPE_NET | INTR_MPSAFE, NULL,
2512 ixgbe_msix_que, que, &que->tag);
2513 if (error) {
2514 que->res = NULL;
2515 device_printf(dev, "Failed to register QUE handler");
2516 return (error);
2517 }
2518#if __FreeBSD_version >= 800504
2519 bus_describe_intr(dev, que->res, que->tag, "q%d", i);
2520#endif
2521 que->msix = vector;
2522 adapter->active_queues |= (u64)(1 << que->msix);
2523#ifdef RSS
2524 /*
2525 * The queue ID is used as the RSS layer bucket ID.
2526 * We look up the queue ID -> RSS CPU ID and select
2527 * that.
2528 */
2529 cpu_id = rss_getcpu(i % rss_getnumbuckets());
2530#else
2531 /*
2532 * Bind the msix vector, and thus the
2533 * rings to the corresponding cpu.
2534 *
2535 * This just happens to match the default RSS round-robin
2536 * bucket -> queue -> CPU allocation.
2537 */
2538 if (adapter->num_queues > 1)
2539 cpu_id = i;
2540#endif
2541 if (adapter->num_queues > 1)
2542 bus_bind_intr(dev, que->res, cpu_id);
2543#ifdef IXGBE_DEBUG
2544#ifdef RSS
2545 device_printf(dev,
2546 "Bound RSS bucket %d to CPU %d\n",
2547 i, cpu_id);
2548#else
2549 device_printf(dev,
2550 "Bound queue %d to cpu %d\n",
2551 i, cpu_id);
2552#endif
2553#endif /* IXGBE_DEBUG */
2607 if (ixgbe_pci_iov_detach(dev) != 0) {
2608 device_printf(dev, "SR-IOV in use; detach first.\n");
2609 return (EBUSY);
2610 }
2554
2611
2612 ether_ifdetach(adapter->ifp);
2613 /* Stop the adapter */
2614 IXGBE_CORE_LOCK(adapter);
2615 ixgbe_setup_low_power_mode(adapter);
2616 IXGBE_CORE_UNLOCK(adapter);
2555
2617
2556#ifndef IXGBE_LEGACY_TX
2557 TASK_INIT(&txr->txq_task, 0, ixgbe_deferred_mq_start, txr);
2558#endif
2559 TASK_INIT(&que->que_task, 0, ixgbe_handle_que, que);
2560 que->tq = taskqueue_create_fast("ixgbe_que", M_NOWAIT,
2561 taskqueue_thread_enqueue, &que->tq);
2562#ifdef RSS
2563 CPU_SETOF(cpu_id, &cpu_mask);
2564 taskqueue_start_threads_cpuset(&que->tq, 1, PI_NET,
2565 &cpu_mask,
2566 "%s (bucket %d)",
2567 device_get_nameunit(adapter->dev),
2568 cpu_id);
2569#else
2570 taskqueue_start_threads(&que->tq, 1, PI_NET, "%s:q%d",
2571 device_get_nameunit(adapter->dev), i);
2572#endif
2618 for (int i = 0; i < adapter->num_queues; i++, que++, txr++) {
2619 if (que->tq) {
2620 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
2621 taskqueue_drain(que->tq, &txr->txq_task);
2622 taskqueue_drain(que->tq, &que->que_task);
2623 taskqueue_free(que->tq);
2624 }
2573 }
2574
2625 }
2626
2575 /* and Link */
2576 rid = vector + 1;
2577 adapter->res = bus_alloc_resource_any(dev,
2578 SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
2579 if (!adapter->res) {
2580 device_printf(dev,"Unable to allocate"
2581 " bus resource: Link interrupt [%d]\n", rid);
2582 return (ENXIO);
2627 /* Drain the Link queue */
2628 if (adapter->tq) {
2629 taskqueue_drain(adapter->tq, &adapter->link_task);
2630 taskqueue_drain(adapter->tq, &adapter->mod_task);
2631 taskqueue_drain(adapter->tq, &adapter->msf_task);
2632 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
2633 taskqueue_drain(adapter->tq, &adapter->mbx_task);
2634 taskqueue_drain(adapter->tq, &adapter->phy_task);
2635 if (adapter->feat_en & IXGBE_FEATURE_FDIR)
2636 taskqueue_drain(adapter->tq, &adapter->fdir_task);
2637 taskqueue_free(adapter->tq);
2583 }
2638 }
2584 /* Set the link handler function */
2585 error = bus_setup_intr(dev, adapter->res,
2586 INTR_TYPE_NET | INTR_MPSAFE, NULL,
2587 ixgbe_msix_link, adapter, &adapter->tag);
2588 if (error) {
2589 adapter->res = NULL;
2590 device_printf(dev, "Failed to register LINK handler");
2591 return (error);
2592 }
2593#if __FreeBSD_version >= 800504
2594 bus_describe_intr(dev, adapter->res, adapter->tag, "link");
2595#endif
2596 adapter->vector = vector;
2597 /* Tasklets for Link, SFP and Multispeed Fiber */
2598 TASK_INIT(&adapter->link_task, 0, ixgbe_handle_link, adapter);
2599 TASK_INIT(&adapter->mod_task, 0, ixgbe_handle_mod, adapter);
2600 TASK_INIT(&adapter->msf_task, 0, ixgbe_handle_msf, adapter);
2601#ifdef PCI_IOV
2602 TASK_INIT(&adapter->mbx_task, 0, ixgbe_handle_mbx, adapter);
2603#endif
2604 TASK_INIT(&adapter->phy_task, 0, ixgbe_handle_phy, adapter);
2605#ifdef IXGBE_FDIR
2606 TASK_INIT(&adapter->fdir_task, 0, ixgbe_reinit_fdir, adapter);
2607#endif
2608 adapter->tq = taskqueue_create_fast("ixgbe_link", M_NOWAIT,
2609 taskqueue_thread_enqueue, &adapter->tq);
2610 taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s linkq",
2611 device_get_nameunit(adapter->dev));
2612
2639
2613 return (0);
2614}
2640 /* let hardware know driver is unloading */
2641 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
2642 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
2643 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
2615
2644
2616/*
2617 * Setup Either MSI/X or MSI
2618 */
2619static int
2620ixgbe_setup_msix(struct adapter *adapter)
2621{
2622 device_t dev = adapter->dev;
2623 int rid, want, queues, msgs;
2645 /* Unregister VLAN events */
2646 if (adapter->vlan_attach != NULL)
2647 EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
2648 if (adapter->vlan_detach != NULL)
2649 EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
2624
2650
2625 /* Override by tuneable */
2626 if (ixgbe_enable_msix == 0)
2627 goto msi;
2651 callout_drain(&adapter->timer);
2628
2652
2629 /* First try MSI/X */
2630 msgs = pci_msix_count(dev);
2631 if (msgs == 0)
2632 goto msi;
2633 rid = PCIR_BAR(MSIX_82598_BAR);
2634 adapter->msix_mem = bus_alloc_resource_any(dev,
2635 SYS_RES_MEMORY, &rid, RF_ACTIVE);
2636 if (adapter->msix_mem == NULL) {
2637 rid += 4; /* 82599 maps in higher BAR */
2638 adapter->msix_mem = bus_alloc_resource_any(dev,
2639 SYS_RES_MEMORY, &rid, RF_ACTIVE);
2640 }
2641 if (adapter->msix_mem == NULL) {
2642 /* May not be enabled */
2643 device_printf(adapter->dev,
2644 "Unable to map MSIX table \n");
2645 goto msi;
2646 }
2653 if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
2654 netmap_detach(adapter->ifp);
2647
2655
2648 /* Figure out a reasonable auto config value */
2649 queues = (mp_ncpus > (msgs - 1)) ? (msgs - 1) : mp_ncpus;
2656 ixgbe_free_pci_resources(adapter);
2657 bus_generic_detach(dev);
2658 if_free(adapter->ifp);
2650
2659
2651#ifdef RSS
2652 /* If we're doing RSS, clamp at the number of RSS buckets */
2653 if (queues > rss_getnumbuckets())
2654 queues = rss_getnumbuckets();
2655#endif
2660 ixgbe_free_transmit_structures(adapter);
2661 ixgbe_free_receive_structures(adapter);
2662 free(adapter->queues, M_DEVBUF);
2663 free(adapter->mta, M_IXGBE);
2656
2664
2657 if (ixgbe_num_queues != 0)
2658 queues = ixgbe_num_queues;
2659 /* Set max queues to 8 when autoconfiguring */
2660 else if ((ixgbe_num_queues == 0) && (queues > 8))
2661 queues = 8;
2665 IXGBE_CORE_LOCK_DESTROY(adapter);
2662
2666
2663 /* reflect correct sysctl value */
2664 ixgbe_num_queues = queues;
2665
2666 /*
2667 ** Want one vector (RX/TX pair) per queue
2668 ** plus an additional for Link.
2669 */
2670 want = queues + 1;
2671 if (msgs >= want)
2672 msgs = want;
2673 else {
2674 device_printf(adapter->dev,
2675 "MSIX Configuration Problem, "
2676 "%d vectors but %d queues wanted!\n",
2677 msgs, want);
2678 goto msi;
2679 }
2680 if ((pci_alloc_msix(dev, &msgs) == 0) && (msgs == want)) {
2681 device_printf(adapter->dev,
2682 "Using MSIX interrupts with %d vectors\n", msgs);
2683 adapter->num_queues = queues;
2684 return (msgs);
2685 }
2686 /*
2687 ** If MSIX alloc failed or provided us with
2688 ** less than needed, free and fall through to MSI
2689 */
2690 pci_release_msi(dev);
2691
2692msi:
2693 if (adapter->msix_mem != NULL) {
2694 bus_release_resource(dev, SYS_RES_MEMORY,
2695 rid, adapter->msix_mem);
2696 adapter->msix_mem = NULL;
2697 }
2698 msgs = 1;
2699 if (pci_alloc_msi(dev, &msgs) == 0) {
2700 device_printf(adapter->dev, "Using an MSI interrupt\n");
2701 return (msgs);
2702 }
2703 device_printf(adapter->dev, "Using a Legacy interrupt\n");
2704 return (0);
2667 return (0);
2705}
2668} /* ixgbe_detach */
2706
2669
2707
2670/************************************************************************
2671 * ixgbe_setup_low_power_mode - LPLU/WoL preparation
2672 *
2673 * Prepare the adapter/port for LPLU and/or WoL
2674 ************************************************************************/
2708static int
2675static int
2709ixgbe_allocate_pci_resources(struct adapter *adapter)
2676ixgbe_setup_low_power_mode(struct adapter *adapter)
2710{
2677{
2711 int rid;
2678 struct ixgbe_hw *hw = &adapter->hw;
2712 device_t dev = adapter->dev;
2679 device_t dev = adapter->dev;
2680 s32 error = 0;
2713
2681
2714 rid = PCIR_BAR(0);
2715 adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
2716 &rid, RF_ACTIVE);
2682 mtx_assert(&adapter->core_mtx, MA_OWNED);
2717
2683
2718 if (!(adapter->pci_mem)) {
2719 device_printf(dev, "Unable to allocate bus resource: memory\n");
2720 return (ENXIO);
2721 }
2684 /* Limit power management flow to X550EM baseT */
2685 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
2686 hw->phy.ops.enter_lplu) {
2687 /* Turn off support for APM wakeup. (Using ACPI instead) */
2688 IXGBE_WRITE_REG(hw, IXGBE_GRC,
2689 IXGBE_READ_REG(hw, IXGBE_GRC) & ~(u32)2);
2722
2690
2723 /* Save bus_space values for READ/WRITE_REG macros */
2724 adapter->osdep.mem_bus_space_tag =
2725 rman_get_bustag(adapter->pci_mem);
2726 adapter->osdep.mem_bus_space_handle =
2727 rman_get_bushandle(adapter->pci_mem);
2728 /* Set hw values for shared code */
2729 adapter->hw.hw_addr = (u8 *) &adapter->osdep.mem_bus_space_handle;
2730 adapter->hw.back = adapter;
2691 /*
2692 * Clear Wake Up Status register to prevent any previous wakeup
2693 * events from waking us up immediately after we suspend.
2694 */
2695 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
2731
2696
2732 /* Default to 1 queue if MSI-X setup fails */
2733 adapter->num_queues = 1;
2697 /*
2698 * Program the Wakeup Filter Control register with user filter
2699 * settings
2700 */
2701 IXGBE_WRITE_REG(hw, IXGBE_WUFC, adapter->wufc);
2734
2702
2735 /*
2736 ** Now setup MSI or MSI-X, should
2737 ** return us the number of supported
2738 ** vectors. (Will be 1 for MSI)
2739 */
2740 adapter->msix = ixgbe_setup_msix(adapter);
2741 return (0);
2742}
2703 /* Enable wakeups and power management in Wakeup Control */
2704 IXGBE_WRITE_REG(hw, IXGBE_WUC,
2705 IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN);
2743
2706
2744static void
2745ixgbe_free_pci_resources(struct adapter * adapter)
2746{
2747 struct ix_queue *que = adapter->queues;
2748 device_t dev = adapter->dev;
2749 int rid, memrid;
2750
2751 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
2752 memrid = PCIR_BAR(MSIX_82598_BAR);
2753 else
2754 memrid = PCIR_BAR(MSIX_82599_BAR);
2755
2756 /*
2757 ** There is a slight possibility of a failure mode
2758 ** in attach that will result in entering this function
2759 ** before interrupt resources have been initialized, and
2760 ** in that case we do not want to execute the loops below
2761 ** We can detect this reliably by the state of the adapter
2762 ** res pointer.
2763 */
2764 if (adapter->res == NULL)
2765 goto mem;
2766
2767 /*
2768 ** Release all msix queue resources:
2769 */
2770 for (int i = 0; i < adapter->num_queues; i++, que++) {
2771 rid = que->msix + 1;
2772 if (que->tag != NULL) {
2773 bus_teardown_intr(dev, que->res, que->tag);
2774 que->tag = NULL;
2775 }
2776 if (que->res != NULL)
2777 bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
2707 /* X550EM baseT adapters need a special LPLU flow */
2708 hw->phy.reset_disable = true;
2709 ixgbe_stop(adapter);
2710 error = hw->phy.ops.enter_lplu(hw);
2711 if (error)
2712 device_printf(dev, "Error entering LPLU: %d\n", error);
2713 hw->phy.reset_disable = false;
2714 } else {
2715 /* Just stop for other adapters */
2716 ixgbe_stop(adapter);
2778 }
2779
2717 }
2718
2719 return error;
2720} /* ixgbe_setup_low_power_mode */
2780
2721
2781 /* Clean the Legacy or Link interrupt last */
2782 if (adapter->vector) /* we are doing MSIX */
2783 rid = adapter->vector + 1;
2784 else
2785 (adapter->msix != 0) ? (rid = 1):(rid = 0);
2722/************************************************************************
2723 * ixgbe_shutdown - Shutdown entry point
2724 ************************************************************************/
2725static int
2726ixgbe_shutdown(device_t dev)
2727{
2728 struct adapter *adapter = device_get_softc(dev);
2729 int error = 0;
2786
2730
2787 if (adapter->tag != NULL) {
2788 bus_teardown_intr(dev, adapter->res, adapter->tag);
2789 adapter->tag = NULL;
2790 }
2791 if (adapter->res != NULL)
2792 bus_release_resource(dev, SYS_RES_IRQ, rid, adapter->res);
2731 INIT_DEBUGOUT("ixgbe_shutdown: begin");
2793
2732
2794mem:
2795 if (adapter->msix)
2796 pci_release_msi(dev);
2733 IXGBE_CORE_LOCK(adapter);
2734 error = ixgbe_setup_low_power_mode(adapter);
2735 IXGBE_CORE_UNLOCK(adapter);
2797
2736
2798 if (adapter->msix_mem != NULL)
2799 bus_release_resource(dev, SYS_RES_MEMORY,
2800 memrid, adapter->msix_mem);
2737 return (error);
2738} /* ixgbe_shutdown */
2801
2739
2802 if (adapter->pci_mem != NULL)
2803 bus_release_resource(dev, SYS_RES_MEMORY,
2804 PCIR_BAR(0), adapter->pci_mem);
2805
2806 return;
2807}
2808
2809/*********************************************************************
2740/************************************************************************
2741 * ixgbe_suspend
2810 *
2742 *
2811 * Setup networking device structure and register an interface.
2812 *
2813 **********************************************************************/
2743 * From D0 to D3
2744 ************************************************************************/
2814static int
2745static int
2815ixgbe_setup_interface(device_t dev, struct adapter *adapter)
2746ixgbe_suspend(device_t dev)
2816{
2747{
2817 struct ifnet *ifp;
2748 struct adapter *adapter = device_get_softc(dev);
2749 int error = 0;
2818
2750
2819 INIT_DEBUGOUT("ixgbe_setup_interface: begin");
2751 INIT_DEBUGOUT("ixgbe_suspend: begin");
2820
2752
2821 ifp = adapter->ifp = if_alloc(IFT_ETHER);
2822 if (ifp == NULL) {
2823 device_printf(dev, "can not allocate ifnet structure\n");
2824 return (-1);
2825 }
2826 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2827 ifp->if_baudrate = IF_Gbps(10);
2828 ifp->if_init = ixgbe_init;
2829 ifp->if_softc = adapter;
2830 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2831 ifp->if_ioctl = ixgbe_ioctl;
2832#if __FreeBSD_version >= 1100036
2833 if_setgetcounterfn(ifp, ixgbe_get_counter);
2834#endif
2835#if __FreeBSD_version >= 1100045
2836 /* TSO parameters */
2837 ifp->if_hw_tsomax = 65518;
2838 ifp->if_hw_tsomaxsegcount = IXGBE_82599_SCATTER;
2839 ifp->if_hw_tsomaxsegsize = 2048;
2840#endif
2841#ifndef IXGBE_LEGACY_TX
2842 ifp->if_transmit = ixgbe_mq_start;
2843 ifp->if_qflush = ixgbe_qflush;
2844#else
2845 ifp->if_start = ixgbe_start;
2846 IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2);
2847 ifp->if_snd.ifq_drv_maxlen = adapter->num_tx_desc - 2;
2848 IFQ_SET_READY(&ifp->if_snd);
2849#endif
2753 IXGBE_CORE_LOCK(adapter);
2850
2754
2851 ether_ifattach(ifp, adapter->hw.mac.addr);
2755 error = ixgbe_setup_low_power_mode(adapter);
2852
2756
2853 adapter->max_frame_size =
2854 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
2757 IXGBE_CORE_UNLOCK(adapter);
2855
2758
2856 /*
2857 * Tell the upper layer(s) we support long frames.
2858 */
2859 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
2759 return (error);
2760} /* ixgbe_suspend */
2860
2761
2861 /* Set capability flags */
2862 ifp->if_capabilities |= IFCAP_RXCSUM
2863 | IFCAP_TXCSUM
2864 | IFCAP_RXCSUM_IPV6
2865 | IFCAP_TXCSUM_IPV6
2866 | IFCAP_TSO4
2867 | IFCAP_TSO6
2868 | IFCAP_LRO
2869 | IFCAP_VLAN_HWTAGGING
2870 | IFCAP_VLAN_HWTSO
2871 | IFCAP_VLAN_HWCSUM
2872 | IFCAP_JUMBO_MTU
2873 | IFCAP_VLAN_MTU
2874 | IFCAP_HWSTATS;
2762/************************************************************************
2763 * ixgbe_resume
2764 *
2765 * From D3 to D0
2766 ************************************************************************/
2767static int
2768ixgbe_resume(device_t dev)
2769{
2770 struct adapter *adapter = device_get_softc(dev);
2771 struct ifnet *ifp = adapter->ifp;
2772 struct ixgbe_hw *hw = &adapter->hw;
2773 u32 wus;
2875
2774
2876 /* Enable the above capabilities by default */
2877 ifp->if_capenable = ifp->if_capabilities;
2775 INIT_DEBUGOUT("ixgbe_resume: begin");
2878
2776
2879 /*
2880 ** Don't turn this on by default, if vlans are
2881 ** created on another pseudo device (eg. lagg)
2882 ** then vlan events are not passed thru, breaking
2883 ** operation, but with HW FILTER off it works. If
2884 ** using vlans directly on the ixgbe driver you can
2885 ** enable this and get full hardware tag filtering.
2886 */
2887 ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
2777 IXGBE_CORE_LOCK(adapter);
2888
2778
2779 /* Read & clear WUS register */
2780 wus = IXGBE_READ_REG(hw, IXGBE_WUS);
2781 if (wus)
2782 device_printf(dev, "Woken up by (WUS): %#010x\n",
2783 IXGBE_READ_REG(hw, IXGBE_WUS));
2784 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
2785 /* And clear WUFC until next low-power transition */
2786 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
2787
2889 /*
2788 /*
2890 * Specify the media types supported by this adapter and register
2891 * callbacks to update media and link information
2789 * Required after D3->D0 transition;
2790 * will re-advertise all previous advertised speeds
2892 */
2791 */
2893 ifmedia_init(&adapter->media, IFM_IMASK, ixgbe_media_change,
2894 ixgbe_media_status);
2792 if (ifp->if_flags & IFF_UP)
2793 ixgbe_init_locked(adapter);
2895
2794
2896 adapter->phy_layer = ixgbe_get_supported_physical_layer(&adapter->hw);
2897 ixgbe_add_media_types(adapter);
2795 IXGBE_CORE_UNLOCK(adapter);
2898
2796
2899 /* Set autoselect media by default */
2900 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
2901
2902 return (0);
2797 return (0);
2903}
2798} /* ixgbe_resume */
2904
2799
2800/************************************************************************
2801 * ixgbe_set_if_hwassist - Set the various hardware offload abilities.
2802 *
2803 * Takes the ifnet's if_capenable flags (e.g. set by the user using
2804 * ifconfig) and indicates to the OS via the ifnet's if_hwassist
2805 * field what mbuf offload flags the driver will understand.
2806 ************************************************************************/
2905static void
2807static void
2906ixgbe_add_media_types(struct adapter *adapter)
2808ixgbe_set_if_hwassist(struct adapter *adapter)
2907{
2809{
2908 struct ixgbe_hw *hw = &adapter->hw;
2909 device_t dev = adapter->dev;
2910 int layer;
2810 struct ifnet *ifp = adapter->ifp;
2911
2811
2912 layer = adapter->phy_layer;
2913
2914 /* Media types with matching FreeBSD media defines */
2915 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T)
2916 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_T, 0, NULL);
2917 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T)
2918 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL);
2919 if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX)
2920 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX, 0, NULL);
2921
2922 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
2923 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
2924 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL);
2925
2926 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) {
2927 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
2928 if (hw->phy.multispeed_fiber)
2929 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_LX, 0, NULL);
2812 ifp->if_hwassist = 0;
2813#if __FreeBSD_version >= 1000000
2814 if (ifp->if_capenable & IFCAP_TSO4)
2815 ifp->if_hwassist |= CSUM_IP_TSO;
2816 if (ifp->if_capenable & IFCAP_TSO6)
2817 ifp->if_hwassist |= CSUM_IP6_TSO;
2818 if (ifp->if_capenable & IFCAP_TXCSUM) {
2819 ifp->if_hwassist |= (CSUM_IP | CSUM_IP_UDP | CSUM_IP_TCP);
2820 if (adapter->hw.mac.type != ixgbe_mac_82598EB)
2821 ifp->if_hwassist |= CSUM_IP_SCTP;
2930 }
2822 }
2931 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
2932 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
2933 if (hw->phy.multispeed_fiber)
2934 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
2935 } else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
2936 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
2937 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
2938 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
2939
2940#ifdef IFM_ETH_XTYPE
2941 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
2942 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_KR, 0, NULL);
2943 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4)
2944 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_KX4, 0, NULL);
2945 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
2946 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_KX, 0, NULL);
2823 if (ifp->if_capenable & IFCAP_TXCSUM_IPV6) {
2824 ifp->if_hwassist |= (CSUM_IP6_UDP | CSUM_IP6_TCP);
2825 if (adapter->hw.mac.type != ixgbe_mac_82598EB)
2826 ifp->if_hwassist |= CSUM_IP6_SCTP;
2827 }
2947#else
2828#else
2948 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
2949 device_printf(dev, "Media supported: 10GbaseKR\n");
2950 device_printf(dev, "10GbaseKR mapped to 10GbaseSR\n");
2951 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
2829 if (ifp->if_capenable & IFCAP_TSO)
2830 ifp->if_hwassist |= CSUM_TSO;
2831 if (ifp->if_capenable & IFCAP_TXCSUM) {
2832 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
2833 if (adapter->hw.mac.type != ixgbe_mac_82598EB)
2834 ifp->if_hwassist |= CSUM_SCTP;
2952 }
2835 }
2953 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
2954 device_printf(dev, "Media supported: 10GbaseKX4\n");
2955 device_printf(dev, "10GbaseKX4 mapped to 10GbaseCX4\n");
2956 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
2957 }
2958 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) {
2959 device_printf(dev, "Media supported: 1000baseKX\n");
2960 device_printf(dev, "1000baseKX mapped to 1000baseCX\n");
2961 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_CX, 0, NULL);
2962 }
2963#endif
2836#endif
2964 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX)
2965 device_printf(dev, "Media supported: 1000baseBX\n");
2966
2967 if (hw->device_id == IXGBE_DEV_ID_82598AT) {
2968 ifmedia_add(&adapter->media,
2969 IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
2970 ifmedia_add(&adapter->media,
2971 IFM_ETHER | IFM_1000_T, 0, NULL);
2972 }
2837} /* ixgbe_set_if_hwassist */
2973
2838
2974 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2975}
2976
2977static void
2978ixgbe_config_link(struct adapter *adapter)
2839/************************************************************************
2840 * ixgbe_init_locked - Init entry point
2841 *
2842 * Used in two ways: It is used by the stack as an init
2843 * entry point in network interface structure. It is also
2844 * used by the driver as a hw/sw initialization routine to
2845 * get to a consistent state.
2846 *
2847 * return 0 on success, positive on failure
2848 ************************************************************************/
2849void
2850ixgbe_init_locked(struct adapter *adapter)
2979{
2851{
2852 struct ifnet *ifp = adapter->ifp;
2853 device_t dev = adapter->dev;
2980 struct ixgbe_hw *hw = &adapter->hw;
2854 struct ixgbe_hw *hw = &adapter->hw;
2981 u32 autoneg, err = 0;
2982 bool sfp, negotiate;
2855 struct tx_ring *txr;
2856 struct rx_ring *rxr;
2857 u32 txdctl, mhadd;
2858 u32 rxdctl, rxctrl;
2859 u32 ctrl_ext;
2860 int err = 0;
2983
2861
2984 sfp = ixgbe_is_sfp(hw);
2862 mtx_assert(&adapter->core_mtx, MA_OWNED);
2863 INIT_DEBUGOUT("ixgbe_init_locked: begin");
2985
2864
2986 if (sfp) {
2987 taskqueue_enqueue(adapter->tq, &adapter->mod_task);
2988 } else {
2989 if (hw->mac.ops.check_link)
2990 err = ixgbe_check_link(hw, &adapter->link_speed,
2991 &adapter->link_up, FALSE);
2992 if (err)
2993 goto out;
2994 autoneg = hw->phy.autoneg_advertised;
2995 if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
2996 err = hw->mac.ops.get_link_capabilities(hw,
2997 &autoneg, &negotiate);
2998 if (err)
2999 goto out;
3000 if (hw->mac.ops.setup_link)
3001 err = hw->mac.ops.setup_link(hw,
3002 autoneg, adapter->link_up);
3003 }
3004out:
3005 return;
3006}
2865 hw->adapter_stopped = FALSE;
2866 ixgbe_stop_adapter(hw);
2867 callout_stop(&adapter->timer);
3007
2868
2869 /* Queue indices may change with IOV mode */
2870 ixgbe_align_all_queue_indices(adapter);
3008
2871
3009/*********************************************************************
3010 *
3011 * Enable transmit units.
3012 *
3013 **********************************************************************/
3014static void
3015ixgbe_initialize_transmit_units(struct adapter *adapter)
3016{
3017 struct tx_ring *txr = adapter->tx_rings;
3018 struct ixgbe_hw *hw = &adapter->hw;
2872 /* reprogram the RAR[0] in case user changed it. */
2873 ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, IXGBE_RAH_AV);
3019
2874
3020 /* Setup the Base and Length of the Tx Descriptor Ring */
3021 for (int i = 0; i < adapter->num_queues; i++, txr++) {
3022 u64 tdba = txr->txdma.dma_paddr;
3023 u32 txctrl = 0;
3024 int j = txr->me;
2875 /* Get the latest mac address, User can use a LAA */
2876 bcopy(IF_LLADDR(ifp), hw->mac.addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
2877 ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, 1);
2878 hw->addr_ctrl.rar_used_count = 1;
3025
2879
3026 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
3027 (tdba & 0x00000000ffffffffULL));
3028 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
3029 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j),
3030 adapter->num_tx_desc * sizeof(union ixgbe_adv_tx_desc));
2880 /* Set hardware offload abilities from ifnet flags */
2881 ixgbe_set_if_hwassist(adapter);
3031
2882
3032 /* Setup the HW Tx Head and Tail descriptor pointers */
3033 IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
3034 IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
2883 /* Prepare transmit descriptors and buffers */
2884 if (ixgbe_setup_transmit_structures(adapter)) {
2885 device_printf(dev, "Could not setup transmit structures\n");
2886 ixgbe_stop(adapter);
2887 return;
2888 }
3035
2889
3036 /* Cache the tail address */
3037 txr->tail = IXGBE_TDT(j);
2890 ixgbe_init_hw(hw);
2891 ixgbe_initialize_iov(adapter);
2892 ixgbe_initialize_transmit_units(adapter);
3038
2893
3039 /* Disable Head Writeback */
3040 /*
3041 * Note: for X550 series devices, these registers are actually
3042 * prefixed with TPH_ isntead of DCA_, but the addresses and
3043 * fields remain the same.
3044 */
3045 switch (hw->mac.type) {
3046 case ixgbe_mac_82598EB:
3047 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
3048 break;
3049 default:
3050 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j));
3051 break;
3052 }
3053 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
3054 switch (hw->mac.type) {
3055 case ixgbe_mac_82598EB:
3056 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
3057 break;
3058 default:
3059 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl);
3060 break;
3061 }
2894 /* Setup Multicast table */
2895 ixgbe_set_multi(adapter);
3062
2896
3063 }
2897 /* Determine the correct mbuf pool, based on frame size */
2898 if (adapter->max_frame_size <= MCLBYTES)
2899 adapter->rx_mbuf_sz = MCLBYTES;
2900 else
2901 adapter->rx_mbuf_sz = MJUMPAGESIZE;
3064
2902
3065 if (hw->mac.type != ixgbe_mac_82598EB) {
3066 u32 dmatxctl, rttdcs;
3067#ifdef PCI_IOV
3068 enum ixgbe_iov_mode mode = ixgbe_get_iov_mode(adapter);
3069#endif
3070 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
3071 dmatxctl |= IXGBE_DMATXCTL_TE;
3072 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
3073 /* Disable arbiter to set MTQC */
3074 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
3075 rttdcs |= IXGBE_RTTDCS_ARBDIS;
3076 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
3077#ifdef PCI_IOV
3078 IXGBE_WRITE_REG(hw, IXGBE_MTQC, ixgbe_get_mtqc(mode));
3079#else
3080 IXGBE_WRITE_REG(hw, IXGBE_MTQC, IXGBE_MTQC_64Q_1PB);
3081#endif
3082 rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
3083 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
2903 /* Prepare receive descriptors and buffers */
2904 if (ixgbe_setup_receive_structures(adapter)) {
2905 device_printf(dev, "Could not setup receive structures\n");
2906 ixgbe_stop(adapter);
2907 return;
3084 }
3085
2908 }
2909
3086 return;
3087}
2910 /* Configure RX settings */
2911 ixgbe_initialize_receive_units(adapter);
3088
2912
3089static void
3090ixgbe_initialize_rss_mapping(struct adapter *adapter)
3091{
3092 struct ixgbe_hw *hw = &adapter->hw;
3093 u32 reta = 0, mrqc, rss_key[10];
3094 int queue_id, table_size, index_mult;
3095#ifdef RSS
3096 u32 rss_hash_config;
3097#endif
3098#ifdef PCI_IOV
3099 enum ixgbe_iov_mode mode;
3100#endif
2913 /* Enable SDP & MSI-X interrupts based on adapter */
2914 ixgbe_config_gpie(adapter);
3101
2915
3102#ifdef RSS
3103 /* Fetch the configured RSS key */
3104 rss_getkey((uint8_t *) &rss_key);
3105#else
3106 /* set up random bits */
3107 arc4rand(&rss_key, sizeof(rss_key), 0);
3108#endif
3109
3110 /* Set multiplier for RETA setup and table size based on MAC */
3111 index_mult = 0x1;
3112 table_size = 128;
3113 switch (adapter->hw.mac.type) {
3114 case ixgbe_mac_82598EB:
3115 index_mult = 0x11;
3116 break;
3117 case ixgbe_mac_X550:
3118 case ixgbe_mac_X550EM_x:
3119 table_size = 512;
3120 break;
3121 default:
3122 break;
2916 /* Set MTU size */
2917 if (ifp->if_mtu > ETHERMTU) {
2918 /* aka IXGBE_MAXFRS on 82599 and newer */
2919 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
2920 mhadd &= ~IXGBE_MHADD_MFS_MASK;
2921 mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
2922 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
3123 }
3124
2923 }
2924
3125 /* Set up the redirection table */
3126 for (int i = 0, j = 0; i < table_size; i++, j++) {
3127 if (j == adapter->num_queues) j = 0;
3128#ifdef RSS
2925 /* Now enable all the queues */
2926 for (int i = 0; i < adapter->num_queues; i++) {
2927 txr = &adapter->tx_rings[i];
2928 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me));
2929 txdctl |= IXGBE_TXDCTL_ENABLE;
2930 /* Set WTHRESH to 8, burst writeback */
2931 txdctl |= (8 << 16);
3129 /*
2932 /*
3130 * Fetch the RSS bucket id for the given indirection entry.
3131 * Cap it at the number of configured buckets (which is
3132 * num_queues.)
2933 * When the internal queue falls below PTHRESH (32),
2934 * start prefetching as long as there are at least
2935 * HTHRESH (1) buffers ready. The values are taken
2936 * from the Intel linux driver 3.8.21.
2937 * Prefetching enables tx line rate even with 1 queue.
3133 */
2938 */
3134 queue_id = rss_get_indirection_to_bucket(i);
3135 queue_id = queue_id % adapter->num_queues;
3136#else
3137 queue_id = (j * index_mult);
3138#endif
3139 /*
3140 * The low 8 bits are for hash value (n+0);
3141 * The next 8 bits are for hash value (n+1), etc.
3142 */
3143 reta = reta >> 8;
3144 reta = reta | ( ((uint32_t) queue_id) << 24);
3145 if ((i & 3) == 3) {
3146 if (i < 128)
3147 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
2939 txdctl |= (32 << 0) | (1 << 8);
2940 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl);
2941 }
2942
2943 for (int i = 0, j = 0; i < adapter->num_queues; i++) {
2944 rxr = &adapter->rx_rings[i];
2945 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
2946 if (hw->mac.type == ixgbe_mac_82598EB) {
2947 /*
2948 * PTHRESH = 21
2949 * HTHRESH = 4
2950 * WTHRESH = 8
2951 */
2952 rxdctl &= ~0x3FFFFF;
2953 rxdctl |= 0x080420;
2954 }
2955 rxdctl |= IXGBE_RXDCTL_ENABLE;
2956 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), rxdctl);
2957 for (; j < 10; j++) {
2958 if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)) &
2959 IXGBE_RXDCTL_ENABLE)
2960 break;
3148 else
2961 else
3149 IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32), reta);
3150 reta = 0;
2962 msec_delay(1);
3151 }
2963 }
3152 }
2964 wmb();
3153
2965
3154 /* Now fill our hash function seeds */
3155 for (int i = 0; i < 10; i++)
3156 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]);
3157
3158 /* Perform hash on these packet types */
3159#ifdef RSS
3160 mrqc = IXGBE_MRQC_RSSEN;
3161 rss_hash_config = rss_gethashconfig();
3162 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
3163 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
3164 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
3165 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
3166 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
3167 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
3168 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
3169 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
3170 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
3171 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
3172 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
3173 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
3174 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
3175 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
3176 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4_EX)
3177 device_printf(adapter->dev,
3178 "%s: RSS_HASHTYPE_RSS_UDP_IPV4_EX defined, "
3179 "but not supported\n", __func__);
3180 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
3181 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
3182 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
3183 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
3184#else
3185 /*
3186 * Disable UDP - IP fragments aren't currently being handled
3187 * and so we end up with a mix of 2-tuple and 4-tuple
3188 * traffic.
3189 */
3190 mrqc = IXGBE_MRQC_RSSEN
3191 | IXGBE_MRQC_RSS_FIELD_IPV4
3192 | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
3193 | IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP
3194 | IXGBE_MRQC_RSS_FIELD_IPV6_EX
3195 | IXGBE_MRQC_RSS_FIELD_IPV6
3196 | IXGBE_MRQC_RSS_FIELD_IPV6_TCP
3197 ;
3198#endif /* RSS */
3199#ifdef PCI_IOV
3200 mode = ixgbe_get_iov_mode(adapter);
3201 mrqc |= ixgbe_get_mrqc(mode);
3202#endif
3203 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
3204}
3205
3206
3207/*********************************************************************
3208 *
3209 * Setup receive registers and features.
3210 *
3211 **********************************************************************/
3212#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
3213
3214#define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1)
3215
3216static void
3217ixgbe_initialize_receive_units(struct adapter *adapter)
3218{
3219 struct rx_ring *rxr = adapter->rx_rings;
3220 struct ixgbe_hw *hw = &adapter->hw;
3221 struct ifnet *ifp = adapter->ifp;
3222 u32 bufsz, fctrl, srrctl, rxcsum;
3223 u32 hlreg;
3224
3225 /*
3226 * Make sure receives are disabled while
3227 * setting up the descriptor ring
3228 */
3229 ixgbe_disable_rx(hw);
3230
3231 /* Enable broadcasts */
3232 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
3233 fctrl |= IXGBE_FCTRL_BAM;
3234 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
3235 fctrl |= IXGBE_FCTRL_DPF;
3236 fctrl |= IXGBE_FCTRL_PMCF;
3237 }
3238 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
3239
3240 /* Set for Jumbo Frames? */
3241 hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
3242 if (ifp->if_mtu > ETHERMTU)
3243 hlreg |= IXGBE_HLREG0_JUMBOEN;
3244 else
3245 hlreg &= ~IXGBE_HLREG0_JUMBOEN;
3246#ifdef DEV_NETMAP
3247 /* crcstrip is conditional in netmap (in RDRXCTL too ?) */
3248 if (ifp->if_capenable & IFCAP_NETMAP && !ix_crcstrip)
3249 hlreg &= ~IXGBE_HLREG0_RXCRCSTRP;
3250 else
3251 hlreg |= IXGBE_HLREG0_RXCRCSTRP;
3252#endif /* DEV_NETMAP */
3253 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
3254
3255 bufsz = (adapter->rx_mbuf_sz +
3256 BSIZEPKT_ROUNDUP) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
3257
3258 for (int i = 0; i < adapter->num_queues; i++, rxr++) {
3259 u64 rdba = rxr->rxdma.dma_paddr;
3260 int j = rxr->me;
3261
3262 /* Setup the Base and Length of the Rx Descriptor Ring */
3263 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j),
3264 (rdba & 0x00000000ffffffffULL));
3265 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
3266 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j),
3267 adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
3268
3269 /* Set up the SRRCTL register */
3270 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j));
3271 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
3272 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
3273 srrctl |= bufsz;
3274 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
3275
3276 /*
2966 /*
3277 * Set DROP_EN iff we have no flow control and >1 queue.
3278 * Note that srrctl was cleared shortly before during reset,
3279 * so we do not need to clear the bit, but do it just in case
3280 * this code is moved elsewhere.
2967 * In netmap mode, we must preserve the buffers made
2968 * available to userspace before the if_init()
2969 * (this is true by default on the TX side, because
2970 * init makes all buffers available to userspace).
2971 *
2972 * netmap_reset() and the device specific routines
2973 * (e.g. ixgbe_setup_receive_rings()) map these
2974 * buffers at the end of the NIC ring, so here we
2975 * must set the RDT (tail) register to make sure
2976 * they are not overwritten.
2977 *
2978 * In this driver the NIC ring starts at RDH = 0,
2979 * RDT points to the last slot available for reception (?),
2980 * so RDT = num_rx_desc - 1 means the whole ring is available.
3281 */
2981 */
3282 if (adapter->num_queues > 1 &&
3283 adapter->hw.fc.requested_mode == ixgbe_fc_none) {
3284 srrctl |= IXGBE_SRRCTL_DROP_EN;
3285 } else {
3286 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
3287 }
2982#ifdef DEV_NETMAP
2983 if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
2984 (ifp->if_capenable & IFCAP_NETMAP)) {
2985 struct netmap_adapter *na = NA(adapter->ifp);
2986 struct netmap_kring *kring = &na->rx_rings[i];
2987 int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
3288
2988
3289 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl);
3290
3291 /* Setup the HW Rx Head and Tail Descriptor Pointers */
3292 IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
3293 IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
3294
3295 /* Set the driver rx tail address */
3296 rxr->tail = IXGBE_RDT(rxr->me);
2989 IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me), t);
2990 } else
2991#endif /* DEV_NETMAP */
2992 IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me),
2993 adapter->num_rx_desc - 1);
3297 }
3298
2994 }
2995
3299 if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
3300 u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
3301 IXGBE_PSRTYPE_UDPHDR |
3302 IXGBE_PSRTYPE_IPV4HDR |
3303 IXGBE_PSRTYPE_IPV6HDR;
3304 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
3305 }
2996 /* Enable Receive engine */
2997 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
2998 if (hw->mac.type == ixgbe_mac_82598EB)
2999 rxctrl |= IXGBE_RXCTRL_DMBYPS;
3000 rxctrl |= IXGBE_RXCTRL_RXEN;
3001 ixgbe_enable_rx_dma(hw, rxctrl);
3306
3002
3307 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
3003 callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
3308
3004
3309 ixgbe_initialize_rss_mapping(adapter);
3310
3311 if (adapter->num_queues > 1) {
3312 /* RSS and RX IPP Checksum are mutually exclusive */
3313 rxcsum |= IXGBE_RXCSUM_PCSD;
3005 /* Set up MSI-X routing */
3006 if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
3007 ixgbe_configure_ivars(adapter);
3008 /* Set up auto-mask */
3009 if (hw->mac.type == ixgbe_mac_82598EB)
3010 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
3011 else {
3012 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
3013 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
3014 }
3015 } else { /* Simple settings for Legacy/MSI */
3016 ixgbe_set_ivar(adapter, 0, 0, 0);
3017 ixgbe_set_ivar(adapter, 0, 0, 1);
3018 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
3314 }
3315
3019 }
3020
3316 if (ifp->if_capenable & IFCAP_RXCSUM)
3317 rxcsum |= IXGBE_RXCSUM_PCSD;
3021 ixgbe_init_fdir(adapter);
3318
3022
3319 /* This is useful for calculating UDP/IP fragment checksums */
3320 if (!(rxcsum & IXGBE_RXCSUM_PCSD))
3321 rxcsum |= IXGBE_RXCSUM_IPPCSE;
3322
3323 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
3324
3325 return;
3326}
3327
3328
3329/*
3330** This routine is run via an vlan config EVENT,
3331** it enables us to use the HW Filter table since
3332** we can get the vlan id. This just creates the
3333** entry in the soft version of the VFTA, init will
3334** repopulate the real table.
3335*/
3336static void
3337ixgbe_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3338{
3339 struct adapter *adapter = ifp->if_softc;
3340 u16 index, bit;
3341
3342 if (ifp->if_softc != arg) /* Not our event */
3343 return;
3344
3345 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
3346 return;
3347
3348 IXGBE_CORE_LOCK(adapter);
3349 index = (vtag >> 5) & 0x7F;
3350 bit = vtag & 0x1F;
3351 adapter->shadow_vfta[index] |= (1 << bit);
3352 ++adapter->num_vlans;
3353 ixgbe_setup_vlan_hw_support(adapter);
3354 IXGBE_CORE_UNLOCK(adapter);
3355}
3356
3357/*
3358** This routine is run via an vlan
3359** unconfig EVENT, remove our entry
3360** in the soft vfta.
3361*/
3362static void
3363ixgbe_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3364{
3365 struct adapter *adapter = ifp->if_softc;
3366 u16 index, bit;
3367
3368 if (ifp->if_softc != arg)
3369 return;
3370
3371 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
3372 return;
3373
3374 IXGBE_CORE_LOCK(adapter);
3375 index = (vtag >> 5) & 0x7F;
3376 bit = vtag & 0x1F;
3377 adapter->shadow_vfta[index] &= ~(1 << bit);
3378 --adapter->num_vlans;
3379 /* Re-init to load the changes */
3380 ixgbe_setup_vlan_hw_support(adapter);
3381 IXGBE_CORE_UNLOCK(adapter);
3382}
3383
3384static void
3385ixgbe_setup_vlan_hw_support(struct adapter *adapter)
3386{
3387 struct ifnet *ifp = adapter->ifp;
3388 struct ixgbe_hw *hw = &adapter->hw;
3389 struct rx_ring *rxr;
3390 u32 ctrl;
3391
3392
3393 /*
3023 /*
3394 ** We get here thru init_locked, meaning
3395 ** a soft reset, this has already cleared
3396 ** the VFTA and other state, so if there
3397 ** have been no vlan's registered do nothing.
3398 */
3399 if (adapter->num_vlans == 0)
3400 return;
3401
3402 /* Setup the queues for vlans */
3403 for (int i = 0; i < adapter->num_queues; i++) {
3404 rxr = &adapter->rx_rings[i];
3405 /* On 82599 the VLAN enable is per/queue in RXDCTL */
3406 if (hw->mac.type != ixgbe_mac_82598EB) {
3407 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
3408 ctrl |= IXGBE_RXDCTL_VME;
3409 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl);
3024 * Check on any SFP devices that
3025 * need to be kick-started
3026 */
3027 if (hw->phy.type == ixgbe_phy_none) {
3028 err = hw->phy.ops.identify(hw);
3029 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3030 device_printf(dev,
3031 "Unsupported SFP+ module type was detected.\n");
3032 return;
3410 }
3033 }
3411 rxr->vtag_strip = TRUE;
3412 }
3413
3034 }
3035
3414 if ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0)
3415 return;
3416 /*
3417 ** A soft reset zero's out the VFTA, so
3418 ** we need to repopulate it now.
3419 */
3420 for (int i = 0; i < IXGBE_VFTA_SIZE; i++)
3421 if (adapter->shadow_vfta[i] != 0)
3422 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i),
3423 adapter->shadow_vfta[i]);
3036 /* Set moderation on the Link interrupt */
3037 IXGBE_WRITE_REG(hw, IXGBE_EITR(adapter->vector), IXGBE_LINK_ITR);
3424
3038
3425 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
3426 /* Enable the Filter Table if enabled */
3427 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) {
3428 ctrl &= ~IXGBE_VLNCTRL_CFIEN;
3429 ctrl |= IXGBE_VLNCTRL_VFE;
3430 }
3431 if (hw->mac.type == ixgbe_mac_82598EB)
3432 ctrl |= IXGBE_VLNCTRL_VME;
3433 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
3434}
3039 /* Config/Enable Link */
3040 ixgbe_config_link(adapter);
3435
3041
3436static void
3437ixgbe_enable_intr(struct adapter *adapter)
3438{
3439 struct ixgbe_hw *hw = &adapter->hw;
3440 struct ix_queue *que = adapter->queues;
3441 u32 mask, fwsm;
3042 /* Hardware Packet Buffer & Flow Control setup */
3043 ixgbe_config_delay_values(adapter);
3442
3044
3443 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
3444 /* Enable Fan Failure detection */
3445 if (hw->device_id == IXGBE_DEV_ID_82598AT)
3446 mask |= IXGBE_EIMS_GPI_SDP1;
3045 /* Initialize the FC settings */
3046 ixgbe_start_hw(hw);
3447
3047
3448 switch (adapter->hw.mac.type) {
3449 case ixgbe_mac_82599EB:
3450 mask |= IXGBE_EIMS_ECC;
3451 /* Temperature sensor on some adapters */
3452 mask |= IXGBE_EIMS_GPI_SDP0;
3453 /* SFP+ (RX_LOS_N & MOD_ABS_N) */
3454 mask |= IXGBE_EIMS_GPI_SDP1;
3455 mask |= IXGBE_EIMS_GPI_SDP2;
3456#ifdef IXGBE_FDIR
3457 mask |= IXGBE_EIMS_FLOW_DIR;
3458#endif
3459#ifdef PCI_IOV
3460 mask |= IXGBE_EIMS_MAILBOX;
3461#endif
3462 break;
3463 case ixgbe_mac_X540:
3464 /* Detect if Thermal Sensor is enabled */
3465 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
3466 if (fwsm & IXGBE_FWSM_TS_ENABLED)
3467 mask |= IXGBE_EIMS_TS;
3468 mask |= IXGBE_EIMS_ECC;
3469#ifdef IXGBE_FDIR
3470 mask |= IXGBE_EIMS_FLOW_DIR;
3471#endif
3472 break;
3473 case ixgbe_mac_X550:
3474 case ixgbe_mac_X550EM_x:
3475 /* MAC thermal sensor is automatically enabled */
3476 mask |= IXGBE_EIMS_TS;
3477 /* Some devices use SDP0 for important information */
3478 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
3479 hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
3480 mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw);
3481 mask |= IXGBE_EIMS_ECC;
3482#ifdef IXGBE_FDIR
3483 mask |= IXGBE_EIMS_FLOW_DIR;
3484#endif
3485#ifdef PCI_IOV
3486 mask |= IXGBE_EIMS_MAILBOX;
3487#endif
3488 /* falls through */
3489 default:
3490 break;
3491 }
3048 /* Set up VLAN support and filter */
3049 ixgbe_setup_vlan_hw_support(adapter);
3492
3050
3493 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
3051 /* Setup DMA Coalescing */
3052 ixgbe_config_dmac(adapter);
3494
3053
3495 /* With MSI-X we use auto clear */
3496 if (adapter->msix_mem) {
3497 mask = IXGBE_EIMS_ENABLE_MASK;
3498 /* Don't autoclear Link */
3499 mask &= ~IXGBE_EIMS_OTHER;
3500 mask &= ~IXGBE_EIMS_LSC;
3501#ifdef PCI_IOV
3502 mask &= ~IXGBE_EIMS_MAILBOX;
3503#endif
3504 IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
3054 /* And now turn on interrupts */
3055 ixgbe_enable_intr(adapter);
3056
3057 /* Enable the use of the MBX by the VF's */
3058 if (adapter->feat_en & IXGBE_FEATURE_SRIOV) {
3059 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
3060 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
3061 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
3505 }
3506
3062 }
3063
3507 /*
3508 ** Now enable all queues, this is done separately to
3509 ** allow for handling the extended (beyond 32) MSIX
3510 ** vectors that can be used by 82599
3511 */
3512 for (int i = 0; i < adapter->num_queues; i++, que++)
3513 ixgbe_enable_queue(adapter, que->msix);
3064 /* Now inform the stack we're ready */
3065 ifp->if_drv_flags |= IFF_DRV_RUNNING;
3514
3066
3515 IXGBE_WRITE_FLUSH(hw);
3516
3517 return;
3067 return;
3518}
3068} /* ixgbe_init_locked */
3519
3069
3070/************************************************************************
3071 * ixgbe_init
3072 ************************************************************************/
3520static void
3073static void
3521ixgbe_disable_intr(struct adapter *adapter)
3074ixgbe_init(void *arg)
3522{
3075{
3523 if (adapter->msix_mem)
3524 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 0);
3525 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
3526 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
3527 } else {
3528 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
3529 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
3530 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
3531 }
3532 IXGBE_WRITE_FLUSH(&adapter->hw);
3533 return;
3534}
3076 struct adapter *adapter = arg;
3535
3077
3536/*
3537** Get the width and transaction speed of
3538** the slot this adapter is plugged into.
3539*/
3540static void
3541ixgbe_get_slot_info(struct adapter *adapter)
3542{
3543 device_t dev = adapter->dev;
3544 struct ixgbe_hw *hw = &adapter->hw;
3545 struct ixgbe_mac_info *mac = &hw->mac;
3546 u16 link;
3547 u32 offset;
3078 IXGBE_CORE_LOCK(adapter);
3079 ixgbe_init_locked(adapter);
3080 IXGBE_CORE_UNLOCK(adapter);
3548
3081
3549 /* For most devices simply call the shared code routine */
3550 if (hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) {
3551 ixgbe_get_bus_info(hw);
3552 /* These devices don't use PCI-E */
3553 switch (hw->mac.type) {
3554 case ixgbe_mac_X550EM_x:
3555 return;
3556 default:
3557 goto display;
3558 }
3559 }
3560
3561 /*
3562 ** For the Quad port adapter we need to parse back
3563 ** up the PCI tree to find the speed of the expansion
3564 ** slot into which this adapter is plugged. A bit more work.
3565 */
3566 dev = device_get_parent(device_get_parent(dev));
3567#ifdef IXGBE_DEBUG
3568 device_printf(dev, "parent pcib = %x,%x,%x\n",
3569 pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev));
3570#endif
3571 dev = device_get_parent(device_get_parent(dev));
3572#ifdef IXGBE_DEBUG
3573 device_printf(dev, "slot pcib = %x,%x,%x\n",
3574 pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev));
3575#endif
3576 /* Now get the PCI Express Capabilities offset */
3577 pci_find_cap(dev, PCIY_EXPRESS, &offset);
3578 /* ...and read the Link Status Register */
3579 link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
3580 switch (link & IXGBE_PCI_LINK_WIDTH) {
3581 case IXGBE_PCI_LINK_WIDTH_1:
3582 hw->bus.width = ixgbe_bus_width_pcie_x1;
3583 break;
3584 case IXGBE_PCI_LINK_WIDTH_2:
3585 hw->bus.width = ixgbe_bus_width_pcie_x2;
3586 break;
3587 case IXGBE_PCI_LINK_WIDTH_4:
3588 hw->bus.width = ixgbe_bus_width_pcie_x4;
3589 break;
3590 case IXGBE_PCI_LINK_WIDTH_8:
3591 hw->bus.width = ixgbe_bus_width_pcie_x8;
3592 break;
3593 default:
3594 hw->bus.width = ixgbe_bus_width_unknown;
3595 break;
3596 }
3597
3598 switch (link & IXGBE_PCI_LINK_SPEED) {
3599 case IXGBE_PCI_LINK_SPEED_2500:
3600 hw->bus.speed = ixgbe_bus_speed_2500;
3601 break;
3602 case IXGBE_PCI_LINK_SPEED_5000:
3603 hw->bus.speed = ixgbe_bus_speed_5000;
3604 break;
3605 case IXGBE_PCI_LINK_SPEED_8000:
3606 hw->bus.speed = ixgbe_bus_speed_8000;
3607 break;
3608 default:
3609 hw->bus.speed = ixgbe_bus_speed_unknown;
3610 break;
3611 }
3612
3613 mac->ops.set_lan_id(hw);
3614
3615display:
3616 device_printf(dev,"PCI Express Bus: Speed %s %s\n",
3617 ((hw->bus.speed == ixgbe_bus_speed_8000) ? "8.0GT/s":
3618 (hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0GT/s":
3619 (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5GT/s":"Unknown"),
3620 (hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" :
3621 (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "Width x4" :
3622 (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "Width x1" :
3623 ("Unknown"));
3624
3625 if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) &&
3626 ((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
3627 (hw->bus.speed == ixgbe_bus_speed_2500))) {
3628 device_printf(dev, "PCI-Express bandwidth available"
3629 " for this card\n is not sufficient for"
3630 " optimal performance.\n");
3631 device_printf(dev, "For optimal performance a x8 "
3632 "PCIE, or x4 PCIE Gen2 slot is required.\n");
3633 }
3634 if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) &&
3635 ((hw->bus.width <= ixgbe_bus_width_pcie_x8) &&
3636 (hw->bus.speed < ixgbe_bus_speed_8000))) {
3637 device_printf(dev, "PCI-Express bandwidth available"
3638 " for this card\n is not sufficient for"
3639 " optimal performance.\n");
3640 device_printf(dev, "For optimal performance a x8 "
3641 "PCIE Gen3 slot is required.\n");
3642 }
3643
3644 return;
3082 return;
3645}
3083} /* ixgbe_init */
3646
3084
3647
3648/*
3649** Setup the correct IVAR register for a particular MSIX interrupt
3650** (yes this is all very magic and confusing :)
3651** - entry is the register array entry
3652** - vector is the MSIX vector for this queue
3653** - type is RX/TX/MISC
3654*/
3085/************************************************************************
3086 * ixgbe_set_ivar
3087 *
3088 * Setup the correct IVAR register for a particular MSI-X interrupt
3089 * (yes this is all very magic and confusing :)
3090 * - entry is the register array entry
3091 * - vector is the MSI-X vector for this queue
3092 * - type is RX/TX/MISC
3093 ************************************************************************/
3655static void
3656ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
3657{
3658 struct ixgbe_hw *hw = &adapter->hw;
3659 u32 ivar, index;
3660
3661 vector |= IXGBE_IVAR_ALLOC_VAL;
3662

--- 10 unchanged lines hidden (view full) ---

3673 ivar |= (vector << (8 * (entry & 0x3)));
3674 IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
3675 break;
3676
3677 case ixgbe_mac_82599EB:
3678 case ixgbe_mac_X540:
3679 case ixgbe_mac_X550:
3680 case ixgbe_mac_X550EM_x:
3094static void
3095ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
3096{
3097 struct ixgbe_hw *hw = &adapter->hw;
3098 u32 ivar, index;
3099
3100 vector |= IXGBE_IVAR_ALLOC_VAL;
3101

--- 10 unchanged lines hidden (view full) ---

3112 ivar |= (vector << (8 * (entry & 0x3)));
3113 IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
3114 break;
3115
3116 case ixgbe_mac_82599EB:
3117 case ixgbe_mac_X540:
3118 case ixgbe_mac_X550:
3119 case ixgbe_mac_X550EM_x:
3120 case ixgbe_mac_X550EM_a:
3681 if (type == -1) { /* MISC IVAR */
3682 index = (entry & 1) * 8;
3683 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
3684 ivar &= ~(0xFF << index);
3685 ivar |= (vector << index);
3686 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
3121 if (type == -1) { /* MISC IVAR */
3122 index = (entry & 1) * 8;
3123 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
3124 ivar &= ~(0xFF << index);
3125 ivar |= (vector << index);
3126 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
3687 } else { /* RX/TX IVARS */
3127 } else { /* RX/TX IVARS */
3688 index = (16 * (entry & 1)) + (8 * type);
3689 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
3690 ivar &= ~(0xFF << index);
3691 ivar |= (vector << index);
3692 IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
3693 }
3694
3695 default:
3696 break;
3697 }
3128 index = (16 * (entry & 1)) + (8 * type);
3129 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
3130 ivar &= ~(0xFF << index);
3131 ivar |= (vector << index);
3132 IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
3133 }
3134
3135 default:
3136 break;
3137 }
3698}
3138} /* ixgbe_set_ivar */
3699
3139
3140/************************************************************************
3141 * ixgbe_configure_ivars
3142 ************************************************************************/
3700static void
3701ixgbe_configure_ivars(struct adapter *adapter)
3702{
3143static void
3144ixgbe_configure_ivars(struct adapter *adapter)
3145{
3703 struct ix_queue *que = adapter->queues;
3704 u32 newitr;
3146 struct ix_queue *que = adapter->queues;
3147 u32 newitr;
3705
3706 if (ixgbe_max_interrupt_rate > 0)
3707 newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
3708 else {
3709 /*
3148
3149 if (ixgbe_max_interrupt_rate > 0)
3150 newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
3151 else {
3152 /*
3710 ** Disable DMA coalescing if interrupt moderation is
3711 ** disabled.
3712 */
3153 * Disable DMA coalescing if interrupt moderation is
3154 * disabled.
3155 */
3713 adapter->dmac = 0;
3714 newitr = 0;
3715 }
3716
3156 adapter->dmac = 0;
3157 newitr = 0;
3158 }
3159
3717 for (int i = 0; i < adapter->num_queues; i++, que++) {
3160 for (int i = 0; i < adapter->num_queues; i++, que++) {
3718 struct rx_ring *rxr = &adapter->rx_rings[i];
3719 struct tx_ring *txr = &adapter->tx_rings[i];
3720 /* First the RX queue entry */
3161 struct rx_ring *rxr = &adapter->rx_rings[i];
3162 struct tx_ring *txr = &adapter->tx_rings[i];
3163 /* First the RX queue entry */
3721 ixgbe_set_ivar(adapter, rxr->me, que->msix, 0);
3164 ixgbe_set_ivar(adapter, rxr->me, que->msix, 0);
3722 /* ... and the TX */
3723 ixgbe_set_ivar(adapter, txr->me, que->msix, 1);
3724 /* Set an Initial EITR value */
3165 /* ... and the TX */
3166 ixgbe_set_ivar(adapter, txr->me, que->msix, 1);
3167 /* Set an Initial EITR value */
3725 IXGBE_WRITE_REG(&adapter->hw,
3726 IXGBE_EITR(que->msix), newitr);
3168 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(que->msix), newitr);
3727 }
3728
3729 /* For the Link interrupt */
3169 }
3170
3171 /* For the Link interrupt */
3730 ixgbe_set_ivar(adapter, 1, adapter->vector, -1);
3731}
3172 ixgbe_set_ivar(adapter, 1, adapter->vector, -1);
3173} /* ixgbe_configure_ivars */
3732
3174
3733/*
3734** ixgbe_sfp_probe - called in the local timer to
3735** determine if a port had optics inserted.
3736*/
3175/************************************************************************
3176 * ixgbe_config_gpie
3177 ************************************************************************/
3178static void
3179ixgbe_config_gpie(struct adapter *adapter)
3180{
3181 struct ixgbe_hw *hw = &adapter->hw;
3182 u32 gpie;
3183
3184 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
3185
3186 if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
3187 /* Enable Enhanced MSI-X mode */
3188 gpie |= IXGBE_GPIE_MSIX_MODE
3189 | IXGBE_GPIE_EIAME
3190 | IXGBE_GPIE_PBA_SUPPORT
3191 | IXGBE_GPIE_OCD;
3192 }
3193
3194 /* Fan Failure Interrupt */
3195 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
3196 gpie |= IXGBE_SDP1_GPIEN;
3197
3198 /* Thermal Sensor Interrupt */
3199 if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR)
3200 gpie |= IXGBE_SDP0_GPIEN_X540;
3201
3202 /* Link detection */
3203 switch (hw->mac.type) {
3204 case ixgbe_mac_82599EB:
3205 gpie |= IXGBE_SDP1_GPIEN | IXGBE_SDP2_GPIEN;
3206 break;
3207 case ixgbe_mac_X550EM_x:
3208 case ixgbe_mac_X550EM_a:
3209 gpie |= IXGBE_SDP0_GPIEN_X540;
3210 break;
3211 default:
3212 break;
3213 }
3214
3215 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
3216
3217 return;
3218} /* ixgbe_config_gpie */
3219
3220/************************************************************************
3221 * ixgbe_config_delay_values
3222 *
3223 * Requires adapter->max_frame_size to be set.
3224 ************************************************************************/
3225static void
3226ixgbe_config_delay_values(struct adapter *adapter)
3227{
3228 struct ixgbe_hw *hw = &adapter->hw;
3229 u32 rxpb, frame, size, tmp;
3230
3231 frame = adapter->max_frame_size;
3232
3233 /* Calculate High Water */
3234 switch (hw->mac.type) {
3235 case ixgbe_mac_X540:
3236 case ixgbe_mac_X550:
3237 case ixgbe_mac_X550EM_x:
3238 case ixgbe_mac_X550EM_a:
3239 tmp = IXGBE_DV_X540(frame, frame);
3240 break;
3241 default:
3242 tmp = IXGBE_DV(frame, frame);
3243 break;
3244 }
3245 size = IXGBE_BT2KB(tmp);
3246 rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10;
3247 hw->fc.high_water[0] = rxpb - size;
3248
3249 /* Now calculate Low Water */
3250 switch (hw->mac.type) {
3251 case ixgbe_mac_X540:
3252 case ixgbe_mac_X550:
3253 case ixgbe_mac_X550EM_x:
3254 case ixgbe_mac_X550EM_a:
3255 tmp = IXGBE_LOW_DV_X540(frame);
3256 break;
3257 default:
3258 tmp = IXGBE_LOW_DV(frame);
3259 break;
3260 }
3261 hw->fc.low_water[0] = IXGBE_BT2KB(tmp);
3262
3263 hw->fc.pause_time = IXGBE_FC_PAUSE;
3264 hw->fc.send_xon = TRUE;
3265} /* ixgbe_config_delay_values */
3266
3267/************************************************************************
3268 * ixgbe_set_multi - Multicast Update
3269 *
3270 * Called whenever multicast address list is updated.
3271 ************************************************************************/
3272static void
3273ixgbe_set_multi(struct adapter *adapter)
3274{
3275 struct ifmultiaddr *ifma;
3276 struct ixgbe_mc_addr *mta;
3277 struct ifnet *ifp = adapter->ifp;
3278 u8 *update_ptr;
3279 int mcnt = 0;
3280 u32 fctrl;
3281
3282 IOCTL_DEBUGOUT("ixgbe_set_multi: begin");
3283
3284 mta = adapter->mta;
3285 bzero(mta, sizeof(*mta) * MAX_NUM_MULTICAST_ADDRESSES);
3286
3287#if __FreeBSD_version < 800000
3288 IF_ADDR_LOCK(ifp);
3289#else
3290 if_maddr_rlock(ifp);
3291#endif
3292 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
3293 if (ifma->ifma_addr->sa_family != AF_LINK)
3294 continue;
3295 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
3296 break;
3297 bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
3298 mta[mcnt].addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
3299 mta[mcnt].vmdq = adapter->pool;
3300 mcnt++;
3301 }
3302#if __FreeBSD_version < 800000
3303 IF_ADDR_UNLOCK(ifp);
3304#else
3305 if_maddr_runlock(ifp);
3306#endif
3307
3308 fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
3309 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3310 if (ifp->if_flags & IFF_PROMISC)
3311 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3312 else if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES ||
3313 ifp->if_flags & IFF_ALLMULTI) {
3314 fctrl |= IXGBE_FCTRL_MPE;
3315 fctrl &= ~IXGBE_FCTRL_UPE;
3316 } else
3317 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3318
3319 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
3320
3321 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) {
3322 update_ptr = (u8 *)mta;
3323 ixgbe_update_mc_addr_list(&adapter->hw, update_ptr, mcnt,
3324 ixgbe_mc_array_itr, TRUE);
3325 }
3326
3327 return;
3328} /* ixgbe_set_multi */
3329
3330/************************************************************************
3331 * ixgbe_mc_array_itr
3332 *
3333 * An iterator function needed by the multicast shared code.
3334 * It feeds the shared code routine the addresses in the
3335 * array of ixgbe_set_multi() one by one.
3336 ************************************************************************/
3337static u8 *
3338ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
3339{
3340 struct ixgbe_mc_addr *mta;
3341
3342 mta = (struct ixgbe_mc_addr *)*update_ptr;
3343 *vmdq = mta->vmdq;
3344
3345 *update_ptr = (u8*)(mta + 1);
3346
3347 return (mta->addr);
3348} /* ixgbe_mc_array_itr */
3349
3350/************************************************************************
3351 * ixgbe_local_timer - Timer routine
3352 *
3353 * Checks for link status, updates statistics,
3354 * and runs the watchdog check.
3355 ************************************************************************/
3356static void
3357ixgbe_local_timer(void *arg)
3358{
3359 struct adapter *adapter = arg;
3360 device_t dev = adapter->dev;
3361 struct ix_queue *que = adapter->queues;
3362 u64 queues = 0;
3363 int hung = 0;
3364
3365 mtx_assert(&adapter->core_mtx, MA_OWNED);
3366
3367 /* Check for pluggable optics */
3368 if (adapter->sfp_probe)
3369 if (!ixgbe_sfp_probe(adapter))
3370 goto out; /* Nothing to do */
3371
3372 ixgbe_update_link_status(adapter);
3373 ixgbe_update_stats_counters(adapter);
3374
3375 /*
3376 * Check the TX queues status
3377 * - mark hung queues so we don't schedule on them
3378 * - watchdog only if all queues show hung
3379 */
3380 for (int i = 0; i < adapter->num_queues; i++, que++) {
3381 /* Keep track of queues with work for soft irq */
3382 if (que->txr->busy)
3383 queues |= ((u64)1 << que->me);
3384 /*
3385 * Each time txeof runs without cleaning, but there
3386 * are uncleaned descriptors it increments busy. If
3387 * we get to the MAX we declare it hung.
3388 */
3389 if (que->busy == IXGBE_QUEUE_HUNG) {
3390 ++hung;
3391 /* Mark the queue as inactive */
3392 adapter->active_queues &= ~((u64)1 << que->me);
3393 continue;
3394 } else {
3395 /* Check if we've come back from hung */
3396 if ((adapter->active_queues & ((u64)1 << que->me)) == 0)
3397 adapter->active_queues |= ((u64)1 << que->me);
3398 }
3399 if (que->busy >= IXGBE_MAX_TX_BUSY) {
3400 device_printf(dev,
3401 "Warning queue %d appears to be hung!\n", i);
3402 que->txr->busy = IXGBE_QUEUE_HUNG;
3403 ++hung;
3404 }
3405 }
3406
3407 /* Only truly watchdog if all queues show hung */
3408 if (hung == adapter->num_queues)
3409 goto watchdog;
3410 else if (queues != 0) { /* Force an IRQ on queues with work */
3411 ixgbe_rearm_queues(adapter, queues);
3412 }
3413
3414out:
3415 callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
3416 return;
3417
3418watchdog:
3419 device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
3420 adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
3421 adapter->watchdog_events++;
3422 ixgbe_init_locked(adapter);
3423} /* ixgbe_local_timer */
3424
3425/************************************************************************
3426 * ixgbe_sfp_probe
3427 *
3428 * Determine if a port had optics inserted.
3429 ************************************************************************/
3737static bool
3738ixgbe_sfp_probe(struct adapter *adapter)
3739{
3430static bool
3431ixgbe_sfp_probe(struct adapter *adapter)
3432{
3740 struct ixgbe_hw *hw = &adapter->hw;
3741 device_t dev = adapter->dev;
3742 bool result = FALSE;
3433 struct ixgbe_hw *hw = &adapter->hw;
3434 device_t dev = adapter->dev;
3435 bool result = FALSE;
3743
3744 if ((hw->phy.type == ixgbe_phy_nl) &&
3745 (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
3746 s32 ret = hw->phy.ops.identify_sfp(hw);
3747 if (ret)
3436
3437 if ((hw->phy.type == ixgbe_phy_nl) &&
3438 (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
3439 s32 ret = hw->phy.ops.identify_sfp(hw);
3440 if (ret)
3748 goto out;
3441 goto out;
3749 ret = hw->phy.ops.reset(hw);
3442 ret = hw->phy.ops.reset(hw);
3443 adapter->sfp_probe = FALSE;
3750 if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3751 device_printf(dev, "Unsupported SFP+ module detected!");
3444 if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3445 device_printf(dev, "Unsupported SFP+ module detected!");
3752 device_printf(dev, "Reload driver with supported module.\n");
3753 adapter->sfp_probe = FALSE;
3754 goto out;
3446 device_printf(dev,
3447 "Reload driver with supported module.\n");
3448 goto out;
3755 } else
3756 device_printf(dev, "SFP+ module detected!\n");
3757 /* We now have supported optics */
3449 } else
3450 device_printf(dev, "SFP+ module detected!\n");
3451 /* We now have supported optics */
3758 adapter->sfp_probe = FALSE;
3759 /* Set the optics type so system reports correctly */
3760 ixgbe_setup_optics(adapter);
3761 result = TRUE;
3762 }
3763out:
3452 result = TRUE;
3453 }
3454out:
3455
3764 return (result);
3456 return (result);
3765}
3457} /* ixgbe_sfp_probe */
3766
3458
3767/*
3768** Tasklet handler for MSIX Link interrupts
3769** - do outside interrupt since it might sleep
3770*/
3459/************************************************************************
3460 * ixgbe_handle_mod - Tasklet for SFP module interrupts
3461 ************************************************************************/
3771static void
3462static void
3772ixgbe_handle_link(void *context, int pending)
3773{
3774 struct adapter *adapter = context;
3775 struct ixgbe_hw *hw = &adapter->hw;
3776
3777 ixgbe_check_link(hw,
3778 &adapter->link_speed, &adapter->link_up, 0);
3779 ixgbe_update_link_status(adapter);
3780
3781 /* Re-enable link interrupts */
3782 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_LSC);
3783}
3784
3785/*
3786** Tasklet for handling SFP module interrupts
3787*/
3788static void
3789ixgbe_handle_mod(void *context, int pending)
3790{
3791 struct adapter *adapter = context;
3792 struct ixgbe_hw *hw = &adapter->hw;
3463ixgbe_handle_mod(void *context, int pending)
3464{
3465 struct adapter *adapter = context;
3466 struct ixgbe_hw *hw = &adapter->hw;
3793 enum ixgbe_phy_type orig_type = hw->phy.type;
3794 device_t dev = adapter->dev;
3795 u32 err;
3467 device_t dev = adapter->dev;
3468 u32 err, cage_full = 0;
3796
3469
3797 IXGBE_CORE_LOCK(adapter);
3798
3799 /* Check to see if the PHY type changed */
3800 if (hw->phy.ops.identify) {
3801 hw->phy.type = ixgbe_phy_unknown;
3802 hw->phy.ops.identify(hw);
3803 }
3804
3805 if (hw->phy.type != orig_type) {
3806 device_printf(dev, "Detected phy_type %d\n", hw->phy.type);
3807
3808 if (hw->phy.type == ixgbe_phy_none) {
3809 hw->phy.sfp_type = ixgbe_sfp_type_unknown;
3810 goto out;
3470 if (adapter->hw.need_crosstalk_fix) {
3471 switch (hw->mac.type) {
3472 case ixgbe_mac_82599EB:
3473 cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
3474 IXGBE_ESDP_SDP2;
3475 break;
3476 case ixgbe_mac_X550EM_x:
3477 case ixgbe_mac_X550EM_a:
3478 cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
3479 IXGBE_ESDP_SDP0;
3480 break;
3481 default:
3482 break;
3811 }
3812
3483 }
3484
3813 /* Try to do the initialization that was skipped before */
3814 if (hw->phy.ops.init)
3815 hw->phy.ops.init(hw);
3816 if (hw->phy.ops.reset)
3817 hw->phy.ops.reset(hw);
3485 if (!cage_full)
3486 return;
3818 }
3819
3820 err = hw->phy.ops.identify_sfp(hw);
3821 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3822 device_printf(dev,
3823 "Unsupported SFP+ module type was detected.\n");
3487 }
3488
3489 err = hw->phy.ops.identify_sfp(hw);
3490 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3491 device_printf(dev,
3492 "Unsupported SFP+ module type was detected.\n");
3824 goto out;
3493 return;
3825 }
3826
3827 err = hw->mac.ops.setup_sfp(hw);
3828 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3829 device_printf(dev,
3830 "Setup failure - unsupported SFP+ module type.\n");
3494 }
3495
3496 err = hw->mac.ops.setup_sfp(hw);
3497 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3498 device_printf(dev,
3499 "Setup failure - unsupported SFP+ module type.\n");
3831 goto out;
3500 return;
3832 }
3501 }
3833 if (hw->phy.multispeed_fiber)
3834 taskqueue_enqueue(adapter->tq, &adapter->msf_task);
3835out:
3836 /* Update media type */
3837 switch (hw->mac.ops.get_media_type(hw)) {
3838 case ixgbe_media_type_fiber:
3839 adapter->optics = IFM_10G_SR;
3840 break;
3841 case ixgbe_media_type_copper:
3842 adapter->optics = IFM_10G_TWINAX;
3843 break;
3844 case ixgbe_media_type_cx4:
3845 adapter->optics = IFM_10G_CX4;
3846 break;
3847 default:
3848 adapter->optics = 0;
3849 break;
3850 }
3502 taskqueue_enqueue(adapter->tq, &adapter->msf_task);
3503} /* ixgbe_handle_mod */
3851
3504
3852 IXGBE_CORE_UNLOCK(adapter);
3853 return;
3854}
3855
3505
3856
3857/*
3858** Tasklet for handling MSF (multispeed fiber) interrupts
3859*/
3506/************************************************************************
3507 * ixgbe_handle_msf - Tasklet for MSF (multispeed fiber) interrupts
3508 ************************************************************************/
3860static void
3861ixgbe_handle_msf(void *context, int pending)
3862{
3863 struct adapter *adapter = context;
3864 struct ixgbe_hw *hw = &adapter->hw;
3509static void
3510ixgbe_handle_msf(void *context, int pending)
3511{
3512 struct adapter *adapter = context;
3513 struct ixgbe_hw *hw = &adapter->hw;
3865 u32 autoneg;
3866 bool negotiate;
3514 u32 autoneg;
3515 bool negotiate;
3867
3516
3868 IXGBE_CORE_LOCK(adapter);
3869 /* get_supported_phy_layer will call hw->phy.ops.identify_sfp() */
3870 adapter->phy_layer = ixgbe_get_supported_physical_layer(hw);
3871
3872 autoneg = hw->phy.autoneg_advertised;
3873 if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
3874 hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate);
3875 if (hw->mac.ops.setup_link)
3876 hw->mac.ops.setup_link(hw, autoneg, TRUE);
3877
3878 /* Adjust media types shown in ifconfig */
3879 ifmedia_removeall(&adapter->media);
3880 ixgbe_add_media_types(adapter);
3881 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
3517 /* get_supported_phy_layer will call hw->phy.ops.identify_sfp() */
3518 adapter->phy_layer = ixgbe_get_supported_physical_layer(hw);
3519
3520 autoneg = hw->phy.autoneg_advertised;
3521 if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
3522 hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate);
3523 if (hw->mac.ops.setup_link)
3524 hw->mac.ops.setup_link(hw, autoneg, TRUE);
3525
3526 /* Adjust media types shown in ifconfig */
3527 ifmedia_removeall(&adapter->media);
3528 ixgbe_add_media_types(adapter);
3529 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
3882 IXGBE_CORE_UNLOCK(adapter);
3883 return;
3884}
3530} /* ixgbe_handle_msf */
3885
3531
3886/*
3887** Tasklet for handling interrupts from an external PHY
3888*/
3532/************************************************************************
3533 * ixgbe_handle_phy - Tasklet for external PHY interrupts
3534 ************************************************************************/
3889static void
3890ixgbe_handle_phy(void *context, int pending)
3891{
3892 struct adapter *adapter = context;
3893 struct ixgbe_hw *hw = &adapter->hw;
3535static void
3536ixgbe_handle_phy(void *context, int pending)
3537{
3538 struct adapter *adapter = context;
3539 struct ixgbe_hw *hw = &adapter->hw;
3894 int error;
3540 int error;
3895
3896 error = hw->phy.ops.handle_lasi(hw);
3897 if (error == IXGBE_ERR_OVERTEMP)
3541
3542 error = hw->phy.ops.handle_lasi(hw);
3543 if (error == IXGBE_ERR_OVERTEMP)
3898 device_printf(adapter->dev,
3899 "CRITICAL: EXTERNAL PHY OVER TEMP!! "
3900 " PHY will downshift to lower power state!\n");
3544 device_printf(adapter->dev, "CRITICAL: EXTERNAL PHY OVER TEMP!! PHY will downshift to lower power state!\n");
3901 else if (error)
3902 device_printf(adapter->dev,
3545 else if (error)
3546 device_printf(adapter->dev,
3903 "Error handling LASI interrupt: %d\n",
3904 error);
3905 return;
3906}
3547 "Error handling LASI interrupt: %d\n", error);
3548} /* ixgbe_handle_phy */
3907
3549
3908#ifdef IXGBE_FDIR
3909/*
3910** Tasklet for reinitializing the Flow Director filter table
3911*/
3550/************************************************************************
3551 * ixgbe_stop - Stop the hardware
3552 *
3553 * Disables all traffic on the adapter by issuing a
3554 * global reset on the MAC and deallocates TX/RX buffers.
3555 ************************************************************************/
3912static void
3556static void
3913ixgbe_reinit_fdir(void *context, int pending)
3557ixgbe_stop(void *arg)
3914{
3558{
3915 struct adapter *adapter = context;
3916 struct ifnet *ifp = adapter->ifp;
3559 struct ifnet *ifp;
3560 struct adapter *adapter = arg;
3561 struct ixgbe_hw *hw = &adapter->hw;
3917
3562
3918 if (adapter->fdir_reinit != 1) /* Shouldn't happen */
3919 return;
3920 ixgbe_reinit_fdir_tables_82599(&adapter->hw);
3921 adapter->fdir_reinit = 0;
3922 /* re-enable flow director interrupts */
3923 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, IXGBE_EIMS_FLOW_DIR);
3924 /* Restart the interface */
3925 ifp->if_drv_flags |= IFF_DRV_RUNNING;
3563 ifp = adapter->ifp;
3564
3565 mtx_assert(&adapter->core_mtx, MA_OWNED);
3566
3567 INIT_DEBUGOUT("ixgbe_stop: begin\n");
3568 ixgbe_disable_intr(adapter);
3569 callout_stop(&adapter->timer);
3570
3571 /* Let the stack know...*/
3572 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
3573
3574 ixgbe_reset_hw(hw);
3575 hw->adapter_stopped = FALSE;
3576 ixgbe_stop_adapter(hw);
3577 if (hw->mac.type == ixgbe_mac_82599EB)
3578 ixgbe_stop_mac_link_on_d3_82599(hw);
3579 /* Turn off the laser - noop with no optics */
3580 ixgbe_disable_tx_laser(hw);
3581
3582 /* Update the stack */
3583 adapter->link_up = FALSE;
3584 ixgbe_update_link_status(adapter);
3585
3586 /* reprogram the RAR[0] in case user changed it. */
3587 ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
3588
3926 return;
3589 return;
3927}
3928#endif
3590} /* ixgbe_stop */
3929
3591
3930/*********************************************************************
3592/************************************************************************
3593 * ixgbe_update_link_status - Update OS on link state
3931 *
3594 *
3932 * Configure DMA Coalescing
3933 *
3934 **********************************************************************/
3595 * Note: Only updates the OS on the cached link state.
3596 * The real check of the hardware only happens with
3597 * a link interrupt.
3598 ************************************************************************/
3935static void
3599static void
3600ixgbe_update_link_status(struct adapter *adapter)
3601{
3602 struct ifnet *ifp = adapter->ifp;
3603 device_t dev = adapter->dev;
3604
3605 if (adapter->link_up) {
3606 if (adapter->link_active == FALSE) {
3607 if (bootverbose)
3608 device_printf(dev, "Link is up %d Gbps %s \n",
3609 ((adapter->link_speed == 128) ? 10 : 1),
3610 "Full Duplex");
3611 adapter->link_active = TRUE;
3612 /* Update any Flow Control changes */
3613 ixgbe_fc_enable(&adapter->hw);
3614 /* Update DMA coalescing config */
3615 ixgbe_config_dmac(adapter);
3616 if_link_state_change(ifp, LINK_STATE_UP);
3617 if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
3618 ixgbe_ping_all_vfs(adapter);
3619 }
3620 } else { /* Link down */
3621 if (adapter->link_active == TRUE) {
3622 if (bootverbose)
3623 device_printf(dev, "Link is Down\n");
3624 if_link_state_change(ifp, LINK_STATE_DOWN);
3625 adapter->link_active = FALSE;
3626 if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
3627 ixgbe_ping_all_vfs(adapter);
3628 }
3629 }
3630
3631 return;
3632} /* ixgbe_update_link_status */
3633
3634/************************************************************************
3635 * ixgbe_config_dmac - Configure DMA Coalescing
3636 ************************************************************************/
3637static void
3936ixgbe_config_dmac(struct adapter *adapter)
3937{
3638ixgbe_config_dmac(struct adapter *adapter)
3639{
3938 struct ixgbe_hw *hw = &adapter->hw;
3640 struct ixgbe_hw *hw = &adapter->hw;
3939 struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config;
3940
3641 struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config;
3642
3941 if (hw->mac.type < ixgbe_mac_X550 ||
3942 !hw->mac.ops.dmac_config)
3643 if (hw->mac.type < ixgbe_mac_X550 || !hw->mac.ops.dmac_config)
3943 return;
3944
3945 if (dcfg->watchdog_timer ^ adapter->dmac ||
3946 dcfg->link_speed ^ adapter->link_speed) {
3947 dcfg->watchdog_timer = adapter->dmac;
3948 dcfg->fcoe_en = false;
3949 dcfg->link_speed = adapter->link_speed;
3950 dcfg->num_tcs = 1;
3644 return;
3645
3646 if (dcfg->watchdog_timer ^ adapter->dmac ||
3647 dcfg->link_speed ^ adapter->link_speed) {
3648 dcfg->watchdog_timer = adapter->dmac;
3649 dcfg->fcoe_en = false;
3650 dcfg->link_speed = adapter->link_speed;
3651 dcfg->num_tcs = 1;
3951
3652
3952 INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n",
3953 dcfg->watchdog_timer, dcfg->link_speed);
3954
3955 hw->mac.ops.dmac_config(hw);
3956 }
3653 INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n",
3654 dcfg->watchdog_timer, dcfg->link_speed);
3655
3656 hw->mac.ops.dmac_config(hw);
3657 }
3957}
3658} /* ixgbe_config_dmac */
3958
3659
3959/*
3960 * Checks whether the adapter's ports are capable of
3961 * Wake On LAN by reading the adapter's NVM.
3962 *
3963 * Sets each port's hw->wol_enabled value depending
3964 * on the value read here.
3965 */
3660/************************************************************************
3661 * ixgbe_enable_intr
3662 ************************************************************************/
3966static void
3663static void
3967ixgbe_check_wol_support(struct adapter *adapter)
3664ixgbe_enable_intr(struct adapter *adapter)
3968{
3969 struct ixgbe_hw *hw = &adapter->hw;
3665{
3666 struct ixgbe_hw *hw = &adapter->hw;
3970 u16 dev_caps = 0;
3667 struct ix_queue *que = adapter->queues;
3668 u32 mask, fwsm;
3971
3669
3972 /* Find out WoL support for port */
3973 adapter->wol_support = hw->wol_enabled = 0;
3974 ixgbe_get_device_caps(hw, &dev_caps);
3975 if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
3976 ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) &&
3977 hw->bus.func == 0))
3978 adapter->wol_support = hw->wol_enabled = 1;
3670 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
3979
3671
3980 /* Save initial wake up filter configuration */
3981 adapter->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC);
3982
3983 return;
3984}
3985
3986/*
3987 * Prepare the adapter/port for LPLU and/or WoL
3988 */
3989static int
3990ixgbe_setup_low_power_mode(struct adapter *adapter)
3991{
3992 struct ixgbe_hw *hw = &adapter->hw;
3993 device_t dev = adapter->dev;
3994 s32 error = 0;
3995
3996 mtx_assert(&adapter->core_mtx, MA_OWNED);
3997
3998 if (!hw->wol_enabled)
3999 ixgbe_set_phy_power(hw, FALSE);
4000
4001 /* Limit power management flow to X550EM baseT */
4002 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T
4003 && hw->phy.ops.enter_lplu) {
4004 /* Turn off support for APM wakeup. (Using ACPI instead) */
4005 IXGBE_WRITE_REG(hw, IXGBE_GRC,
4006 IXGBE_READ_REG(hw, IXGBE_GRC) & ~(u32)2);
4007
4008 /*
4009 * Clear Wake Up Status register to prevent any previous wakeup
4010 * events from waking us up immediately after we suspend.
4011 */
4012 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
4013
4014 /*
4015 * Program the Wakeup Filter Control register with user filter
4016 * settings
4017 */
4018 IXGBE_WRITE_REG(hw, IXGBE_WUFC, adapter->wufc);
4019
4020 /* Enable wakeups and power management in Wakeup Control */
4021 IXGBE_WRITE_REG(hw, IXGBE_WUC,
4022 IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN);
4023
4024 /* X550EM baseT adapters need a special LPLU flow */
4025 hw->phy.reset_disable = true;
4026 ixgbe_stop(adapter);
4027 error = hw->phy.ops.enter_lplu(hw);
4028 if (error)
4029 device_printf(dev,
4030 "Error entering LPLU: %d\n", error);
4031 hw->phy.reset_disable = false;
4032 } else {
4033 /* Just stop for other adapters */
4034 ixgbe_stop(adapter);
3672 switch (adapter->hw.mac.type) {
3673 case ixgbe_mac_82599EB:
3674 mask |= IXGBE_EIMS_ECC;
3675 /* Temperature sensor on some adapters */
3676 mask |= IXGBE_EIMS_GPI_SDP0;
3677 /* SFP+ (RX_LOS_N & MOD_ABS_N) */
3678 mask |= IXGBE_EIMS_GPI_SDP1;
3679 mask |= IXGBE_EIMS_GPI_SDP2;
3680 break;
3681 case ixgbe_mac_X540:
3682 /* Detect if Thermal Sensor is enabled */
3683 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
3684 if (fwsm & IXGBE_FWSM_TS_ENABLED)
3685 mask |= IXGBE_EIMS_TS;
3686 mask |= IXGBE_EIMS_ECC;
3687 break;
3688 case ixgbe_mac_X550:
3689 /* MAC thermal sensor is automatically enabled */
3690 mask |= IXGBE_EIMS_TS;
3691 mask |= IXGBE_EIMS_ECC;
3692 break;
3693 case ixgbe_mac_X550EM_x:
3694 case ixgbe_mac_X550EM_a:
3695 /* Some devices use SDP0 for important information */
3696 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
3697 hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP ||
3698 hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N ||
3699 hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
3700 mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw);
3701 if (hw->phy.type == ixgbe_phy_x550em_ext_t)
3702 mask |= IXGBE_EICR_GPI_SDP0_X540;
3703 mask |= IXGBE_EIMS_ECC;
3704 break;
3705 default:
3706 break;
4035 }
4036
3707 }
3708
4037 return error;
4038}
3709 /* Enable Fan Failure detection */
3710 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
3711 mask |= IXGBE_EIMS_GPI_SDP1;
3712 /* Enable SR-IOV */
3713 if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
3714 mask |= IXGBE_EIMS_MAILBOX;
3715 /* Enable Flow Director */
3716 if (adapter->feat_en & IXGBE_FEATURE_FDIR)
3717 mask |= IXGBE_EIMS_FLOW_DIR;
4039
3718
4040/**********************************************************************
4041 *
4042 * Update the board statistics counters.
4043 *
4044 **********************************************************************/
4045static void
4046ixgbe_update_stats_counters(struct adapter *adapter)
4047{
4048 struct ixgbe_hw *hw = &adapter->hw;
4049 u32 missed_rx = 0, bprc, lxon, lxoff, total;
4050 u64 total_missed_rx = 0;
3719 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
4051
3720
4052 adapter->stats.pf.crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
4053 adapter->stats.pf.illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
4054 adapter->stats.pf.errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC);
4055 adapter->stats.pf.mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC);
4056
4057 for (int i = 0; i < 16; i++) {
4058 adapter->stats.pf.qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
4059 adapter->stats.pf.qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
4060 adapter->stats.pf.qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
3721 /* With MSI-X we use auto clear */
3722 if (adapter->msix_mem) {
3723 mask = IXGBE_EIMS_ENABLE_MASK;
3724 /* Don't autoclear Link */
3725 mask &= ~IXGBE_EIMS_OTHER;
3726 mask &= ~IXGBE_EIMS_LSC;
3727 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
3728 mask &= ~IXGBE_EIMS_MAILBOX;
3729 IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
4061 }
3730 }
4062 adapter->stats.pf.mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC);
4063 adapter->stats.pf.mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC);
4064 adapter->stats.pf.rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
4065
3731
4066 /* Hardware workaround, gprc counts missed packets */
4067 adapter->stats.pf.gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
4068 adapter->stats.pf.gprc -= missed_rx;
4069
4070 if (hw->mac.type != ixgbe_mac_82598EB) {
4071 adapter->stats.pf.gorc += IXGBE_READ_REG(hw, IXGBE_GORCL) +
4072 ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
4073 adapter->stats.pf.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
4074 ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32);
4075 adapter->stats.pf.tor += IXGBE_READ_REG(hw, IXGBE_TORL) +
4076 ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
4077 adapter->stats.pf.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
4078 adapter->stats.pf.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
4079 } else {
4080 adapter->stats.pf.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
4081 adapter->stats.pf.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
4082 /* 82598 only has a counter in the high register */
4083 adapter->stats.pf.gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
4084 adapter->stats.pf.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
4085 adapter->stats.pf.tor += IXGBE_READ_REG(hw, IXGBE_TORH);
4086 }
4087
4088 /*
3732 /*
4089 * Workaround: mprc hardware is incorrectly counting
4090 * broadcasts, so for now we subtract those.
3733 * Now enable all queues, this is done separately to
3734 * allow for handling the extended (beyond 32) MSI-X
3735 * vectors that can be used by 82599
4091 */
3736 */
4092 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
4093 adapter->stats.pf.bprc += bprc;
4094 adapter->stats.pf.mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
4095 if (hw->mac.type == ixgbe_mac_82598EB)
4096 adapter->stats.pf.mprc -= bprc;
3737 for (int i = 0; i < adapter->num_queues; i++, que++)
3738 ixgbe_enable_queue(adapter, que->msix);
4097
3739
4098 adapter->stats.pf.prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
4099 adapter->stats.pf.prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
4100 adapter->stats.pf.prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
4101 adapter->stats.pf.prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
4102 adapter->stats.pf.prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
4103 adapter->stats.pf.prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
3740 IXGBE_WRITE_FLUSH(hw);
4104
3741
4105 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
4106 adapter->stats.pf.lxontxc += lxon;
4107 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
4108 adapter->stats.pf.lxofftxc += lxoff;
4109 total = lxon + lxoff;
3742 return;
3743} /* ixgbe_enable_intr */
4110
3744
4111 adapter->stats.pf.gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
4112 adapter->stats.pf.mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
4113 adapter->stats.pf.ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
4114 adapter->stats.pf.gptc -= total;
4115 adapter->stats.pf.mptc -= total;
4116 adapter->stats.pf.ptc64 -= total;
4117 adapter->stats.pf.gotc -= total * ETHER_MIN_LEN;
4118
4119 adapter->stats.pf.ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
4120 adapter->stats.pf.rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
4121 adapter->stats.pf.roc += IXGBE_READ_REG(hw, IXGBE_ROC);
4122 adapter->stats.pf.rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
4123 adapter->stats.pf.mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
4124 adapter->stats.pf.mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
4125 adapter->stats.pf.mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
4126 adapter->stats.pf.tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
4127 adapter->stats.pf.tpt += IXGBE_READ_REG(hw, IXGBE_TPT);
4128 adapter->stats.pf.ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
4129 adapter->stats.pf.ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
4130 adapter->stats.pf.ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
4131 adapter->stats.pf.ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
4132 adapter->stats.pf.ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
4133 adapter->stats.pf.bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
4134 adapter->stats.pf.xec += IXGBE_READ_REG(hw, IXGBE_XEC);
4135 adapter->stats.pf.fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
4136 adapter->stats.pf.fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST);
4137 /* Only read FCOE on 82599 */
4138 if (hw->mac.type != ixgbe_mac_82598EB) {
4139 adapter->stats.pf.fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
4140 adapter->stats.pf.fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
4141 adapter->stats.pf.fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
4142 adapter->stats.pf.fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
4143 adapter->stats.pf.fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
4144 }
4145
4146 /* Fill out the OS statistics structure */
4147 IXGBE_SET_IPACKETS(adapter, adapter->stats.pf.gprc);
4148 IXGBE_SET_OPACKETS(adapter, adapter->stats.pf.gptc);
4149 IXGBE_SET_IBYTES(adapter, adapter->stats.pf.gorc);
4150 IXGBE_SET_OBYTES(adapter, adapter->stats.pf.gotc);
4151 IXGBE_SET_IMCASTS(adapter, adapter->stats.pf.mprc);
4152 IXGBE_SET_OMCASTS(adapter, adapter->stats.pf.mptc);
4153 IXGBE_SET_COLLISIONS(adapter, 0);
4154 IXGBE_SET_IQDROPS(adapter, total_missed_rx);
4155 IXGBE_SET_IERRORS(adapter, adapter->stats.pf.crcerrs
4156 + adapter->stats.pf.rlec);
4157}
4158
4159#if __FreeBSD_version >= 1100036
4160static uint64_t
4161ixgbe_get_counter(struct ifnet *ifp, ift_counter cnt)
3745/************************************************************************
3746 * ixgbe_disable_intr
3747 ************************************************************************/
3748static void
3749ixgbe_disable_intr(struct adapter *adapter)
4162{
3750{
4163 struct adapter *adapter;
4164 struct tx_ring *txr;
4165 uint64_t rv;
4166
4167 adapter = if_getsoftc(ifp);
4168
4169 switch (cnt) {
4170 case IFCOUNTER_IPACKETS:
4171 return (adapter->ipackets);
4172 case IFCOUNTER_OPACKETS:
4173 return (adapter->opackets);
4174 case IFCOUNTER_IBYTES:
4175 return (adapter->ibytes);
4176 case IFCOUNTER_OBYTES:
4177 return (adapter->obytes);
4178 case IFCOUNTER_IMCASTS:
4179 return (adapter->imcasts);
4180 case IFCOUNTER_OMCASTS:
4181 return (adapter->omcasts);
4182 case IFCOUNTER_COLLISIONS:
4183 return (0);
4184 case IFCOUNTER_IQDROPS:
4185 return (adapter->iqdrops);
4186 case IFCOUNTER_OQDROPS:
4187 rv = 0;
4188 txr = adapter->tx_rings;
4189 for (int i = 0; i < adapter->num_queues; i++, txr++)
4190 rv += txr->br->br_drops;
4191 return (rv);
4192 case IFCOUNTER_IERRORS:
4193 return (adapter->ierrors);
4194 default:
4195 return (if_get_counter_default(ifp, cnt));
3751 if (adapter->msix_mem)
3752 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 0);
3753 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
3754 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
3755 } else {
3756 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
3757 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
3758 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
4196 }
3759 }
4197}
4198#endif
3760 IXGBE_WRITE_FLUSH(&adapter->hw);
4199
3761
4200/** ixgbe_sysctl_tdh_handler - Handler function
4201 * Retrieves the TDH value from the hardware
4202 */
4203static int
4204ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS)
4205{
4206 int error;
3762 return;
3763} /* ixgbe_disable_intr */
4207
3764
4208 struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
4209 if (!txr) return 0;
4210
4211 unsigned val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDH(txr->me));
4212 error = sysctl_handle_int(oidp, &val, 0, req);
4213 if (error || !req->newptr)
4214 return error;
4215 return 0;
4216}
4217
4218/** ixgbe_sysctl_tdt_handler - Handler function
4219 * Retrieves the TDT value from the hardware
4220 */
4221static int
4222ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS)
4223{
4224 int error;
4225
4226 struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
4227 if (!txr) return 0;
4228
4229 unsigned val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDT(txr->me));
4230 error = sysctl_handle_int(oidp, &val, 0, req);
4231 if (error || !req->newptr)
4232 return error;
4233 return 0;
4234}
4235
4236/** ixgbe_sysctl_rdh_handler - Handler function
4237 * Retrieves the RDH value from the hardware
4238 */
4239static int
4240ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS)
4241{
4242 int error;
4243
4244 struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
4245 if (!rxr) return 0;
4246
4247 unsigned val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDH(rxr->me));
4248 error = sysctl_handle_int(oidp, &val, 0, req);
4249 if (error || !req->newptr)
4250 return error;
4251 return 0;
4252}
4253
4254/** ixgbe_sysctl_rdt_handler - Handler function
4255 * Retrieves the RDT value from the hardware
4256 */
4257static int
4258ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS)
4259{
4260 int error;
4261
4262 struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
4263 if (!rxr) return 0;
4264
4265 unsigned val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDT(rxr->me));
4266 error = sysctl_handle_int(oidp, &val, 0, req);
4267 if (error || !req->newptr)
4268 return error;
4269 return 0;
4270}
4271
4272static int
4273ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS)
4274{
4275 int error;
4276 struct ix_queue *que = ((struct ix_queue *)oidp->oid_arg1);
4277 unsigned int reg, usec, rate;
4278
4279 reg = IXGBE_READ_REG(&que->adapter->hw, IXGBE_EITR(que->msix));
4280 usec = ((reg & 0x0FF8) >> 3);
4281 if (usec > 0)
4282 rate = 500000 / usec;
4283 else
4284 rate = 0;
4285 error = sysctl_handle_int(oidp, &rate, 0, req);
4286 if (error || !req->newptr)
4287 return error;
4288 reg &= ~0xfff; /* default, no limitation */
4289 ixgbe_max_interrupt_rate = 0;
4290 if (rate > 0 && rate < 500000) {
4291 if (rate < 1000)
4292 rate = 1000;
4293 ixgbe_max_interrupt_rate = rate;
4294 reg |= ((4000000/rate) & 0xff8 );
4295 }
4296 IXGBE_WRITE_REG(&que->adapter->hw, IXGBE_EITR(que->msix), reg);
4297 return 0;
4298}
4299
3765/************************************************************************
3766 * ixgbe_legacy_irq - Legacy Interrupt Service routine
3767 ************************************************************************/
4300static void
3768static void
4301ixgbe_add_device_sysctls(struct adapter *adapter)
3769ixgbe_legacy_irq(void *arg)
4302{
3770{
4303 device_t dev = adapter->dev;
3771 struct ix_queue *que = arg;
3772 struct adapter *adapter = que->adapter;
4304 struct ixgbe_hw *hw = &adapter->hw;
3773 struct ixgbe_hw *hw = &adapter->hw;
4305 struct sysctl_oid_list *child;
4306 struct sysctl_ctx_list *ctx;
3774 struct ifnet *ifp = adapter->ifp;
3775 struct tx_ring *txr = adapter->tx_rings;
3776 bool more = false;
3777 u32 eicr, eicr_mask;
4307
3778
4308 ctx = device_get_sysctl_ctx(dev);
4309 child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
3779 /* Silicon errata #26 on 82598 */
3780 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
4310
3781
4311 /* Sysctls for all devices */
4312 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "fc",
4313 CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
4314 ixgbe_sysctl_flowcntl, "I", IXGBE_SYSCTL_DESC_SET_FC);
3782 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
4315
3783
4316 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "enable_aim",
4317 CTLFLAG_RW,
4318 &ixgbe_enable_aim, 1, "Interrupt Moderation");
3784 ++que->irqs;
3785 if (eicr == 0) {
3786 ixgbe_enable_intr(adapter);
3787 return;
3788 }
4319
3789
4320 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "advertise_speed",
4321 CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
4322 ixgbe_sysctl_advertise, "I", IXGBE_SYSCTL_DESC_ADV_SPEED);
3790 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
3791 more = ixgbe_rxeof(que);
4323
3792
4324 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "thermal_test",
4325 CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
4326 ixgbe_sysctl_thermal_test, "I", "Thermal Test");
3793 IXGBE_TX_LOCK(txr);
3794 ixgbe_txeof(txr);
3795 if (!ixgbe_ring_empty(ifp, txr->br))
3796 ixgbe_start_locked(ifp, txr);
3797 IXGBE_TX_UNLOCK(txr);
3798 }
4327
3799
4328#ifdef IXGBE_DEBUG
4329 /* testing sysctls (for all devices) */
4330 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "power_state",
4331 CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
4332 ixgbe_sysctl_power_state, "I", "PCI Power State");
3800 /* Check for fan failure */
3801 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
3802 ixgbe_check_fan_failure(adapter, eicr, true);
3803 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
3804 }
4333
3805
4334 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "print_rss_config",
4335 CTLTYPE_STRING | CTLFLAG_RD, adapter, 0,
4336 ixgbe_sysctl_print_rss_config, "A", "Prints RSS Configuration");
4337#endif
4338 /* for X550 series devices */
4339 if (hw->mac.type >= ixgbe_mac_X550)
4340 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "dmac",
4341 CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
4342 ixgbe_sysctl_dmac, "I", "DMA Coalesce");
3806 /* Link status change */
3807 if (eicr & IXGBE_EICR_LSC)
3808 taskqueue_enqueue(adapter->tq, &adapter->link_task);
4343
3809
4344 /* for X552 backplane devices */
4345 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_KR) {
4346 struct sysctl_oid *eee_node;
4347 struct sysctl_oid_list *eee_list;
3810 if (ixgbe_is_sfp(hw)) {
3811 /* Pluggable optics-related interrupt */
3812 if (hw->mac.type >= ixgbe_mac_X540)
3813 eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
3814 else
3815 eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
4348
3816
4349 eee_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "eee",
4350 CTLFLAG_RD, NULL,
4351 "Energy Efficient Ethernet sysctls");
4352 eee_list = SYSCTL_CHILDREN(eee_node);
3817 if (eicr & eicr_mask) {
3818 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
3819 taskqueue_enqueue(adapter->tq, &adapter->mod_task);
3820 }
4353
3821
4354 SYSCTL_ADD_PROC(ctx, eee_list, OID_AUTO, "enable",
4355 CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
4356 ixgbe_sysctl_eee_enable, "I",
4357 "Enable or Disable EEE");
4358
4359 SYSCTL_ADD_PROC(ctx, eee_list, OID_AUTO, "negotiated",
4360 CTLTYPE_INT | CTLFLAG_RD, adapter, 0,
4361 ixgbe_sysctl_eee_negotiated, "I",
4362 "EEE negotiated on link");
4363
4364 SYSCTL_ADD_PROC(ctx, eee_list, OID_AUTO, "tx_lpi_status",
4365 CTLTYPE_INT | CTLFLAG_RD, adapter, 0,
4366 ixgbe_sysctl_eee_tx_lpi_status, "I",
4367 "Whether or not TX link is in LPI state");
4368
4369 SYSCTL_ADD_PROC(ctx, eee_list, OID_AUTO, "rx_lpi_status",
4370 CTLTYPE_INT | CTLFLAG_RD, adapter, 0,
4371 ixgbe_sysctl_eee_rx_lpi_status, "I",
4372 "Whether or not RX link is in LPI state");
4373
4374 SYSCTL_ADD_PROC(ctx, eee_list, OID_AUTO, "tx_lpi_delay",
4375 CTLTYPE_INT | CTLFLAG_RD, adapter, 0,
4376 ixgbe_sysctl_eee_tx_lpi_delay, "I",
4377 "TX LPI entry delay in microseconds");
3822 if ((hw->mac.type == ixgbe_mac_82599EB) &&
3823 (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
3824 IXGBE_WRITE_REG(hw, IXGBE_EICR,
3825 IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
3826 taskqueue_enqueue(adapter->tq, &adapter->msf_task);
3827 }
4378 }
4379
3828 }
3829
4380 /* for WoL-capable devices */
4381 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
4382 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "wol_enable",
4383 CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
4384 ixgbe_sysctl_wol_enable, "I",
4385 "Enable/Disable Wake on LAN");
3830 /* External PHY interrupt */
3831 if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
3832 (eicr & IXGBE_EICR_GPI_SDP0_X540))
3833 taskqueue_enqueue(adapter->tq, &adapter->phy_task);
4386
3834
4387 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "wufc",
4388 CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
4389 ixgbe_sysctl_wufc, "I",
4390 "Enable/Disable Wake Up Filters");
4391 }
3835 if (more)
3836 taskqueue_enqueue(que->tq, &que->que_task);
3837 else
3838 ixgbe_enable_intr(adapter);
4392
3839
4393 /* for X552/X557-AT devices */
4394 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
4395 struct sysctl_oid *phy_node;
4396 struct sysctl_oid_list *phy_list;
3840 return;
3841} /* ixgbe_legacy_irq */
4397
3842
4398 phy_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "phy",
4399 CTLFLAG_RD, NULL,
4400 "External PHY sysctls");
4401 phy_list = SYSCTL_CHILDREN(phy_node);
4402
4403 SYSCTL_ADD_PROC(ctx, phy_list, OID_AUTO, "temp",
4404 CTLTYPE_INT | CTLFLAG_RD, adapter, 0,
4405 ixgbe_sysctl_phy_temp, "I",
4406 "Current External PHY Temperature (Celsius)");
4407
4408 SYSCTL_ADD_PROC(ctx, phy_list, OID_AUTO, "overtemp_occurred",
4409 CTLTYPE_INT | CTLFLAG_RD, adapter, 0,
4410 ixgbe_sysctl_phy_overtemp_occurred, "I",
4411 "External PHY High Temperature Event Occurred");
4412 }
4413}
4414
4415/*
4416 * Add sysctl variables, one per statistic, to the system.
4417 */
3843/************************************************************************
3844 * ixgbe_free_pci_resources
3845 ************************************************************************/
4418static void
3846static void
4419ixgbe_add_hw_stats(struct adapter *adapter)
3847ixgbe_free_pci_resources(struct adapter *adapter)
4420{
3848{
4421 device_t dev = adapter->dev;
3849 struct ix_queue *que = adapter->queues;
3850 device_t dev = adapter->dev;
3851 int rid, memrid;
4422
3852
4423 struct tx_ring *txr = adapter->tx_rings;
4424 struct rx_ring *rxr = adapter->rx_rings;
3853 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
3854 memrid = PCIR_BAR(MSIX_82598_BAR);
3855 else
3856 memrid = PCIR_BAR(MSIX_82599_BAR);
4425
3857
4426 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
4427 struct sysctl_oid *tree = device_get_sysctl_tree(dev);
4428 struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
4429 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
3858 /*
3859 * There is a slight possibility of a failure mode
3860 * in attach that will result in entering this function
3861 * before interrupt resources have been initialized, and
3862 * in that case we do not want to execute the loops below
3863 * We can detect this reliably by the state of the adapter
3864 * res pointer.
3865 */
3866 if (adapter->res == NULL)
3867 goto mem;
4430
3868
4431 struct sysctl_oid *stat_node, *queue_node;
4432 struct sysctl_oid_list *stat_list, *queue_list;
4433
4434#define QUEUE_NAME_LEN 32
4435 char namebuf[QUEUE_NAME_LEN];
4436
4437 /* Driver Statistics */
4438 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped",
4439 CTLFLAG_RD, &adapter->dropped_pkts,
4440 "Driver dropped packets");
4441 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "mbuf_defrag_failed",
4442 CTLFLAG_RD, &adapter->mbuf_defrag_failed,
4443 "m_defrag() failed");
4444 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
4445 CTLFLAG_RD, &adapter->watchdog_events,
4446 "Watchdog timeouts");
4447 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq",
4448 CTLFLAG_RD, &adapter->link_irq,
4449 "Link MSIX IRQ Handled");
4450
4451 for (int i = 0; i < adapter->num_queues; i++, txr++) {
4452 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
4453 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
4454 CTLFLAG_RD, NULL, "Queue Name");
4455 queue_list = SYSCTL_CHILDREN(queue_node);
4456
4457 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "interrupt_rate",
4458 CTLTYPE_UINT | CTLFLAG_RW, &adapter->queues[i],
4459 sizeof(&adapter->queues[i]),
4460 ixgbe_sysctl_interrupt_rate_handler, "IU",
4461 "Interrupt Rate");
4462 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
4463 CTLFLAG_RD, &(adapter->queues[i].irqs),
4464 "irqs on this queue");
4465 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_head",
4466 CTLTYPE_UINT | CTLFLAG_RD, txr, sizeof(txr),
4467 ixgbe_sysctl_tdh_handler, "IU",
4468 "Transmit Descriptor Head");
4469 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_tail",
4470 CTLTYPE_UINT | CTLFLAG_RD, txr, sizeof(txr),
4471 ixgbe_sysctl_tdt_handler, "IU",
4472 "Transmit Descriptor Tail");
4473 SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO, "tso_tx",
4474 CTLFLAG_RD, &txr->tso_tx,
4475 "TSO");
4476 SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO, "no_tx_dma_setup",
4477 CTLFLAG_RD, &txr->no_tx_dma_setup,
4478 "Driver tx dma failure in xmit");
4479 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "no_desc_avail",
4480 CTLFLAG_RD, &txr->no_desc_avail,
4481 "Queue No Descriptor Available");
4482 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
4483 CTLFLAG_RD, &txr->total_packets,
4484 "Queue Packets Transmitted");
4485 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "br_drops",
4486 CTLFLAG_RD, &txr->br->br_drops,
4487 "Packets dropped in buf_ring");
3869 /*
3870 * Release all msix queue resources:
3871 */
3872 for (int i = 0; i < adapter->num_queues; i++, que++) {
3873 rid = que->msix + 1;
3874 if (que->tag != NULL) {
3875 bus_teardown_intr(dev, que->res, que->tag);
3876 que->tag = NULL;
3877 }
3878 if (que->res != NULL)
3879 bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
4488 }
4489
3880 }
3881
4490 for (int i = 0; i < adapter->num_queues; i++, rxr++) {
4491 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
4492 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
4493 CTLFLAG_RD, NULL, "Queue Name");
4494 queue_list = SYSCTL_CHILDREN(queue_node);
4495
3882
4496 struct lro_ctrl *lro = &rxr->lro;
4497
4498 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
4499 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
4500 CTLFLAG_RD, NULL, "Queue Name");
4501 queue_list = SYSCTL_CHILDREN(queue_node);
4502
4503 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_head",
4504 CTLTYPE_UINT | CTLFLAG_RD, rxr, sizeof(rxr),
4505 ixgbe_sysctl_rdh_handler, "IU",
4506 "Receive Descriptor Head");
4507 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_tail",
4508 CTLTYPE_UINT | CTLFLAG_RD, rxr, sizeof(rxr),
4509 ixgbe_sysctl_rdt_handler, "IU",
4510 "Receive Descriptor Tail");
4511 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
4512 CTLFLAG_RD, &rxr->rx_packets,
4513 "Queue Packets Received");
4514 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
4515 CTLFLAG_RD, &rxr->rx_bytes,
4516 "Queue Bytes Received");
4517 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_copies",
4518 CTLFLAG_RD, &rxr->rx_copies,
4519 "Copied RX Frames");
4520 SYSCTL_ADD_U64(ctx, queue_list, OID_AUTO, "lro_queued",
4521 CTLFLAG_RD, &lro->lro_queued, 0,
4522 "LRO Queued");
4523 SYSCTL_ADD_U64(ctx, queue_list, OID_AUTO, "lro_flushed",
4524 CTLFLAG_RD, &lro->lro_flushed, 0,
4525 "LRO Flushed");
3883 if (adapter->tag != NULL) {
3884 bus_teardown_intr(dev, adapter->res, adapter->tag);
3885 adapter->tag = NULL;
4526 }
4527
3886 }
3887
4528 /* MAC stats get the own sub node */
3888 /* Clean the Legacy or Link interrupt last */
3889 if (adapter->res != NULL)
3890 bus_release_resource(dev, SYS_RES_IRQ, adapter->link_rid,
3891 adapter->res);
4529
3892
4530 stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats",
4531 CTLFLAG_RD, NULL, "MAC Statistics");
4532 stat_list = SYSCTL_CHILDREN(stat_node);
3893mem:
3894 if ((adapter->feat_en & IXGBE_FEATURE_MSI) ||
3895 (adapter->feat_en & IXGBE_FEATURE_MSIX))
3896 pci_release_msi(dev);
4533
3897
4534 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "crc_errs",
4535 CTLFLAG_RD, &stats->crcerrs,
4536 "CRC Errors");
4537 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "ill_errs",
4538 CTLFLAG_RD, &stats->illerrc,
4539 "Illegal Byte Errors");
4540 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "byte_errs",
4541 CTLFLAG_RD, &stats->errbc,
4542 "Byte Errors");
4543 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "short_discards",
4544 CTLFLAG_RD, &stats->mspdc,
4545 "MAC Short Packets Discarded");
4546 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "local_faults",
4547 CTLFLAG_RD, &stats->mlfc,
4548 "MAC Local Faults");
4549 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "remote_faults",
4550 CTLFLAG_RD, &stats->mrfc,
4551 "MAC Remote Faults");
4552 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rec_len_errs",
4553 CTLFLAG_RD, &stats->rlec,
4554 "Receive Length Errors");
3898 if (adapter->msix_mem != NULL)
3899 bus_release_resource(dev, SYS_RES_MEMORY, memrid,
3900 adapter->msix_mem);
4555
3901
4556 /* Flow Control stats */
4557 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_txd",
4558 CTLFLAG_RD, &stats->lxontxc,
4559 "Link XON Transmitted");
4560 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_recvd",
4561 CTLFLAG_RD, &stats->lxonrxc,
4562 "Link XON Received");
4563 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_txd",
4564 CTLFLAG_RD, &stats->lxofftxc,
4565 "Link XOFF Transmitted");
4566 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_recvd",
4567 CTLFLAG_RD, &stats->lxoffrxc,
4568 "Link XOFF Received");
3902 if (adapter->pci_mem != NULL)
3903 bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0),
3904 adapter->pci_mem);
4569
3905
4570 /* Packet Reception Stats */
4571 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_octets_rcvd",
4572 CTLFLAG_RD, &stats->tor,
4573 "Total Octets Received");
4574 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd",
4575 CTLFLAG_RD, &stats->gorc,
4576 "Good Octets Received");
4577 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_rcvd",
4578 CTLFLAG_RD, &stats->tpr,
4579 "Total Packets Received");
4580 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd",
4581 CTLFLAG_RD, &stats->gprc,
4582 "Good Packets Received");
4583 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd",
4584 CTLFLAG_RD, &stats->mprc,
4585 "Multicast Packets Received");
4586 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_rcvd",
4587 CTLFLAG_RD, &stats->bprc,
4588 "Broadcast Packets Received");
4589 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_64",
4590 CTLFLAG_RD, &stats->prc64,
4591 "64 byte frames received ");
4592 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127",
4593 CTLFLAG_RD, &stats->prc127,
4594 "65-127 byte frames received");
4595 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255",
4596 CTLFLAG_RD, &stats->prc255,
4597 "128-255 byte frames received");
4598 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511",
4599 CTLFLAG_RD, &stats->prc511,
4600 "256-511 byte frames received");
4601 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023",
4602 CTLFLAG_RD, &stats->prc1023,
4603 "512-1023 byte frames received");
4604 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522",
4605 CTLFLAG_RD, &stats->prc1522,
4606 "1023-1522 byte frames received");
4607 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_undersized",
4608 CTLFLAG_RD, &stats->ruc,
4609 "Receive Undersized");
4610 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_fragmented",
4611 CTLFLAG_RD, &stats->rfc,
4612 "Fragmented Packets Received ");
4613 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_oversized",
4614 CTLFLAG_RD, &stats->roc,
4615 "Oversized Packets Received");
4616 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_jabberd",
4617 CTLFLAG_RD, &stats->rjc,
4618 "Received Jabber");
4619 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_rcvd",
4620 CTLFLAG_RD, &stats->mngprc,
4621 "Management Packets Received");
4622 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_drpd",
4623 CTLFLAG_RD, &stats->mngptc,
4624 "Management Packets Dropped");
4625 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "checksum_errs",
4626 CTLFLAG_RD, &stats->xec,
4627 "Checksum Errors");
3906 return;
3907} /* ixgbe_free_pci_resources */
4628
3908
4629 /* Packet Transmission Stats */
4630 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
4631 CTLFLAG_RD, &stats->gotc,
4632 "Good Octets Transmitted");
4633 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd",
4634 CTLFLAG_RD, &stats->tpt,
4635 "Total Packets Transmitted");
4636 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
4637 CTLFLAG_RD, &stats->gptc,
4638 "Good Packets Transmitted");
4639 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd",
4640 CTLFLAG_RD, &stats->bptc,
4641 "Broadcast Packets Transmitted");
4642 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd",
4643 CTLFLAG_RD, &stats->mptc,
4644 "Multicast Packets Transmitted");
4645 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_txd",
4646 CTLFLAG_RD, &stats->mngptc,
4647 "Management Packets Transmitted");
4648 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_64",
4649 CTLFLAG_RD, &stats->ptc64,
4650 "64 byte frames transmitted ");
4651 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127",
4652 CTLFLAG_RD, &stats->ptc127,
4653 "65-127 byte frames transmitted");
4654 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255",
4655 CTLFLAG_RD, &stats->ptc255,
4656 "128-255 byte frames transmitted");
4657 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511",
4658 CTLFLAG_RD, &stats->ptc511,
4659 "256-511 byte frames transmitted");
4660 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023",
4661 CTLFLAG_RD, &stats->ptc1023,
4662 "512-1023 byte frames transmitted");
4663 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522",
4664 CTLFLAG_RD, &stats->ptc1522,
4665 "1024-1522 byte frames transmitted");
4666}
4667
3909/************************************************************************
3910 * ixgbe_set_sysctl_value
3911 ************************************************************************/
4668static void
4669ixgbe_set_sysctl_value(struct adapter *adapter, const char *name,
4670 const char *description, int *limit, int value)
4671{
4672 *limit = value;
4673 SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev),
4674 SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
4675 OID_AUTO, name, CTLFLAG_RW, limit, value, description);
3912static void
3913ixgbe_set_sysctl_value(struct adapter *adapter, const char *name,
3914 const char *description, int *limit, int value)
3915{
3916 *limit = value;
3917 SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev),
3918 SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
3919 OID_AUTO, name, CTLFLAG_RW, limit, value, description);
4676}
3920} /* ixgbe_set_sysctl_value */
4677
3921
4678/*
4679** Set flow control using sysctl:
4680** Flow control values:
4681** 0 - off
4682** 1 - rx pause
4683** 2 - tx pause
4684** 3 - full
4685*/
3922/************************************************************************
3923 * ixgbe_sysctl_flowcntl
3924 *
3925 * SYSCTL wrapper around setting Flow Control
3926 ************************************************************************/
4686static int
4687ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS)
4688{
3927static int
3928ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS)
3929{
4689 int error, fc;
4690 struct adapter *adapter;
3930 struct adapter *adapter;
3931 int error, fc;
4691
3932
4692 adapter = (struct adapter *) arg1;
4693 fc = adapter->fc;
3933 adapter = (struct adapter *)arg1;
3934 fc = adapter->hw.fc.current_mode;
4694
4695 error = sysctl_handle_int(oidp, &fc, 0, req);
4696 if ((error) || (req->newptr == NULL))
4697 return (error);
4698
4699 /* Don't bother if it's not changed */
3935
3936 error = sysctl_handle_int(oidp, &fc, 0, req);
3937 if ((error) || (req->newptr == NULL))
3938 return (error);
3939
3940 /* Don't bother if it's not changed */
4700 if (adapter->fc == fc)
3941 if (fc == adapter->hw.fc.current_mode)
4701 return (0);
4702
4703 return ixgbe_set_flowcntl(adapter, fc);
3942 return (0);
3943
3944 return ixgbe_set_flowcntl(adapter, fc);
4704}
3945} /* ixgbe_sysctl_flowcntl */
4705
3946
4706
3947/************************************************************************
3948 * ixgbe_set_flowcntl - Set flow control
3949 *
3950 * Flow control values:
3951 * 0 - off
3952 * 1 - rx pause
3953 * 2 - tx pause
3954 * 3 - full
3955 ************************************************************************/
4707static int
4708ixgbe_set_flowcntl(struct adapter *adapter, int fc)
4709{
3956static int
3957ixgbe_set_flowcntl(struct adapter *adapter, int fc)
3958{
4710
4711 switch (fc) {
4712 case ixgbe_fc_rx_pause:
4713 case ixgbe_fc_tx_pause:
4714 case ixgbe_fc_full:
3959 switch (fc) {
3960 case ixgbe_fc_rx_pause:
3961 case ixgbe_fc_tx_pause:
3962 case ixgbe_fc_full:
4715 adapter->hw.fc.requested_mode = adapter->fc;
3963 adapter->hw.fc.requested_mode = fc;
4716 if (adapter->num_queues > 1)
4717 ixgbe_disable_rx_drop(adapter);
4718 break;
4719 case ixgbe_fc_none:
4720 adapter->hw.fc.requested_mode = ixgbe_fc_none;
4721 if (adapter->num_queues > 1)
4722 ixgbe_enable_rx_drop(adapter);
4723 break;
4724 default:
4725 return (EINVAL);
4726 }
3964 if (adapter->num_queues > 1)
3965 ixgbe_disable_rx_drop(adapter);
3966 break;
3967 case ixgbe_fc_none:
3968 adapter->hw.fc.requested_mode = ixgbe_fc_none;
3969 if (adapter->num_queues > 1)
3970 ixgbe_enable_rx_drop(adapter);
3971 break;
3972 default:
3973 return (EINVAL);
3974 }
4727 adapter->fc = fc;
3975
4728 /* Don't autoneg if forcing a value */
4729 adapter->hw.fc.disable_fc_autoneg = TRUE;
4730 ixgbe_fc_enable(&adapter->hw);
3976 /* Don't autoneg if forcing a value */
3977 adapter->hw.fc.disable_fc_autoneg = TRUE;
3978 ixgbe_fc_enable(&adapter->hw);
3979
4731 return (0);
3980 return (0);
4732}
3981} /* ixgbe_set_flowcntl */
4733
3982
4734/*
4735** Control advertised link speed:
4736** Flags:
4737** 0x1 - advertise 100 Mb
4738** 0x2 - advertise 1G
4739** 0x4 - advertise 10G
4740*/
3983/************************************************************************
3984 * ixgbe_enable_rx_drop
3985 *
3986 * Enable the hardware to drop packets when the buffer is
3987 * full. This is useful with multiqueue, so that no single
3988 * queue being full stalls the entire RX engine. We only
3989 * enable this when Multiqueue is enabled AND Flow Control
3990 * is disabled.
3991 ************************************************************************/
3992static void
3993ixgbe_enable_rx_drop(struct adapter *adapter)
3994{
3995 struct ixgbe_hw *hw = &adapter->hw;
3996 struct rx_ring *rxr;
3997 u32 srrctl;
3998
3999 for (int i = 0; i < adapter->num_queues; i++) {
4000 rxr = &adapter->rx_rings[i];
4001 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
4002 srrctl |= IXGBE_SRRCTL_DROP_EN;
4003 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
4004 }
4005
4006 /* enable drop for each vf */
4007 for (int i = 0; i < adapter->num_vfs; i++) {
4008 IXGBE_WRITE_REG(hw, IXGBE_QDE,
4009 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT) |
4010 IXGBE_QDE_ENABLE));
4011 }
4012} /* ixgbe_enable_rx_drop */
4013
4014/************************************************************************
4015 * ixgbe_disable_rx_drop
4016 ************************************************************************/
4017static void
4018ixgbe_disable_rx_drop(struct adapter *adapter)
4019{
4020 struct ixgbe_hw *hw = &adapter->hw;
4021 struct rx_ring *rxr;
4022 u32 srrctl;
4023
4024 for (int i = 0; i < adapter->num_queues; i++) {
4025 rxr = &adapter->rx_rings[i];
4026 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
4027 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
4028 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
4029 }
4030
4031 /* disable drop for each vf */
4032 for (int i = 0; i < adapter->num_vfs; i++) {
4033 IXGBE_WRITE_REG(hw, IXGBE_QDE,
4034 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT)));
4035 }
4036} /* ixgbe_disable_rx_drop */
4037
4038/************************************************************************
4039 * ixgbe_sysctl_advertise
4040 *
4041 * SYSCTL wrapper around setting advertised speed
4042 ************************************************************************/
4741static int
4742ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS)
4743{
4043static int
4044ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS)
4045{
4744 int error, advertise;
4745 struct adapter *adapter;
4046 struct adapter *adapter;
4047 int error, advertise;
4746
4048
4747 adapter = (struct adapter *) arg1;
4049 adapter = (struct adapter *)arg1;
4748 advertise = adapter->advertise;
4749
4750 error = sysctl_handle_int(oidp, &advertise, 0, req);
4751 if ((error) || (req->newptr == NULL))
4752 return (error);
4753
4754 return ixgbe_set_advertise(adapter, advertise);
4050 advertise = adapter->advertise;
4051
4052 error = sysctl_handle_int(oidp, &advertise, 0, req);
4053 if ((error) || (req->newptr == NULL))
4054 return (error);
4055
4056 return ixgbe_set_advertise(adapter, advertise);
4755}
4057} /* ixgbe_sysctl_advertise */
4756
4058
4059/************************************************************************
4060 * ixgbe_set_advertise - Control advertised link speed
4061 *
4062 * Flags:
4063 * 0x1 - advertise 100 Mb
4064 * 0x2 - advertise 1G
4065 * 0x4 - advertise 10G
4066 * 0x8 - advertise 10 Mb (yes, Mb)
4067 ************************************************************************/
4757static int
4758ixgbe_set_advertise(struct adapter *adapter, int advertise)
4759{
4068static int
4069ixgbe_set_advertise(struct adapter *adapter, int advertise)
4070{
4760 device_t dev;
4761 struct ixgbe_hw *hw;
4762 ixgbe_link_speed speed;
4071 device_t dev;
4072 struct ixgbe_hw *hw;
4073 ixgbe_link_speed speed = 0;
4074 ixgbe_link_speed link_caps = 0;
4075 s32 err = IXGBE_NOT_IMPLEMENTED;
4076 bool negotiate = FALSE;
4763
4764 /* Checks to validate new value */
4765 if (adapter->advertise == advertise) /* no change */
4766 return (0);
4767
4077
4078 /* Checks to validate new value */
4079 if (adapter->advertise == advertise) /* no change */
4080 return (0);
4081
4768 hw = &adapter->hw;
4769 dev = adapter->dev;
4082 dev = adapter->dev;
4083 hw = &adapter->hw;
4770
4771 /* No speed changes for backplane media */
4772 if (hw->phy.media_type == ixgbe_media_type_backplane)
4773 return (ENODEV);
4774
4775 if (!((hw->phy.media_type == ixgbe_media_type_copper) ||
4084
4085 /* No speed changes for backplane media */
4086 if (hw->phy.media_type == ixgbe_media_type_backplane)
4087 return (ENODEV);
4088
4089 if (!((hw->phy.media_type == ixgbe_media_type_copper) ||
4776 (hw->phy.multispeed_fiber))) {
4777 device_printf(dev,
4778 "Advertised speed can only be set on copper or "
4779 "multispeed fiber media types.\n");
4090 (hw->phy.multispeed_fiber))) {
4091 device_printf(dev, "Advertised speed can only be set on copper or multispeed fiber media types.\n");
4780 return (EINVAL);
4781 }
4782
4092 return (EINVAL);
4093 }
4094
4783 if (advertise < 0x1 || advertise > 0x7) {
4784 device_printf(dev,
4785 "Invalid advertised speed; valid modes are 0x1 through 0x7\n");
4095 if (advertise < 0x1 || advertise > 0xF) {
4096 device_printf(dev, "Invalid advertised speed; valid modes are 0x1 through 0xF\n");
4786 return (EINVAL);
4787 }
4788
4097 return (EINVAL);
4098 }
4099
4789 if ((advertise & 0x1)
4790 && (hw->mac.type != ixgbe_mac_X540)
4791 && (hw->mac.type != ixgbe_mac_X550)) {
4792 device_printf(dev, "Set Advertise: 100Mb on X540/X550 only\n");
4793 return (EINVAL);
4100 if (hw->mac.ops.get_link_capabilities) {
4101 err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
4102 &negotiate);
4103 if (err != IXGBE_SUCCESS) {
4104 device_printf(dev, "Unable to determine supported advertise speeds\n");
4105 return (ENODEV);
4106 }
4794 }
4795
4796 /* Set new value and report new advertised mode */
4107 }
4108
4109 /* Set new value and report new advertised mode */
4797 speed = 0;
4798 if (advertise & 0x1)
4110 if (advertise & 0x1) {
4111 if (!(link_caps & IXGBE_LINK_SPEED_100_FULL)) {
4112 device_printf(dev, "Interface does not support 100Mb advertised speed\n");
4113 return (EINVAL);
4114 }
4799 speed |= IXGBE_LINK_SPEED_100_FULL;
4115 speed |= IXGBE_LINK_SPEED_100_FULL;
4800 if (advertise & 0x2)
4116 }
4117 if (advertise & 0x2) {
4118 if (!(link_caps & IXGBE_LINK_SPEED_1GB_FULL)) {
4119 device_printf(dev, "Interface does not support 1Gb advertised speed\n");
4120 return (EINVAL);
4121 }
4801 speed |= IXGBE_LINK_SPEED_1GB_FULL;
4122 speed |= IXGBE_LINK_SPEED_1GB_FULL;
4802 if (advertise & 0x4)
4123 }
4124 if (advertise & 0x4) {
4125 if (!(link_caps & IXGBE_LINK_SPEED_10GB_FULL)) {
4126 device_printf(dev, "Interface does not support 10Gb advertised speed\n");
4127 return (EINVAL);
4128 }
4803 speed |= IXGBE_LINK_SPEED_10GB_FULL;
4129 speed |= IXGBE_LINK_SPEED_10GB_FULL;
4804 adapter->advertise = advertise;
4130 }
4131 if (advertise & 0x8) {
4132 if (!(link_caps & IXGBE_LINK_SPEED_10_FULL)) {
4133 device_printf(dev, "Interface does not support 10Mb advertised speed\n");
4134 return (EINVAL);
4135 }
4136 speed |= IXGBE_LINK_SPEED_10_FULL;
4137 }
4805
4806 hw->mac.autotry_restart = TRUE;
4807 hw->mac.ops.setup_link(hw, speed, TRUE);
4138
4139 hw->mac.autotry_restart = TRUE;
4140 hw->mac.ops.setup_link(hw, speed, TRUE);
4141 adapter->advertise = advertise;
4808
4809 return (0);
4142
4143 return (0);
4810}
4144} /* ixgbe_set_advertise */
4811
4145
4812/*
4813 * The following two sysctls are for X552/X557-AT devices;
4814 * they deal with the external PHY used in them.
4815 */
4146/************************************************************************
4147 * ixgbe_get_advertise - Get current advertised speed settings
4148 *
4149 * Formatted for sysctl usage.
4150 * Flags:
4151 * 0x1 - advertise 100 Mb
4152 * 0x2 - advertise 1G
4153 * 0x4 - advertise 10G
4154 * 0x8 - advertise 10 Mb (yes, Mb)
4155 ************************************************************************/
4816static int
4156static int
4817ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS)
4157ixgbe_get_advertise(struct adapter *adapter)
4818{
4158{
4819 struct adapter *adapter = (struct adapter *) arg1;
4820 struct ixgbe_hw *hw = &adapter->hw;
4821 u16 reg;
4159 struct ixgbe_hw *hw = &adapter->hw;
4160 int speed;
4161 ixgbe_link_speed link_caps = 0;
4162 s32 err;
4163 bool negotiate = FALSE;
4822
4164
4823 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
4824 device_printf(adapter->dev,
4825 "Device has no supported external thermal sensor.\n");
4826 return (ENODEV);
4827 }
4165 /*
4166 * Advertised speed means nothing unless it's copper or
4167 * multi-speed fiber
4168 */
4169 if (!(hw->phy.media_type == ixgbe_media_type_copper) &&
4170 !(hw->phy.multispeed_fiber))
4171 return (0);
4828
4172
4829 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP,
4830 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
4831 &reg)) {
4832 device_printf(adapter->dev,
4833 "Error reading from PHY's current temperature register\n");
4834 return (EAGAIN);
4835 }
4173 err = hw->mac.ops.get_link_capabilities(hw, &link_caps, &negotiate);
4174 if (err != IXGBE_SUCCESS)
4175 return (0);
4836
4176
4837 /* Shift temp for output */
4838 reg = reg >> 8;
4177 speed =
4178 ((link_caps & IXGBE_LINK_SPEED_10GB_FULL) ? 4 : 0) |
4179 ((link_caps & IXGBE_LINK_SPEED_1GB_FULL) ? 2 : 0) |
4180 ((link_caps & IXGBE_LINK_SPEED_100_FULL) ? 1 : 0) |
4181 ((link_caps & IXGBE_LINK_SPEED_10_FULL) ? 8 : 0);
4839
4182
4840 return (sysctl_handle_int(oidp, NULL, reg, req));
4841}
4183 return speed;
4184} /* ixgbe_get_advertise */
4842
4185
4843/*
4844 * Reports whether the current PHY temperature is over
4845 * the overtemp threshold.
4846 * - This is reported directly from the PHY
4847 */
4186/************************************************************************
4187 * ixgbe_sysctl_dmac - Manage DMA Coalescing
4188 *
4189 * Control values:
4190 * 0/1 - off / on (use default value of 1000)
4191 *
4192 * Legal timer values are:
4193 * 50,100,250,500,1000,2000,5000,10000
4194 *
4195 * Turning off interrupt moderation will also turn this off.
4196 ************************************************************************/
4848static int
4197static int
4849ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS)
4850{
4851 struct adapter *adapter = (struct adapter *) arg1;
4852 struct ixgbe_hw *hw = &adapter->hw;
4853 u16 reg;
4854
4855 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
4856 device_printf(adapter->dev,
4857 "Device has no supported external thermal sensor.\n");
4858 return (ENODEV);
4859 }
4860
4861 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS,
4862 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
4863 &reg)) {
4864 device_printf(adapter->dev,
4865 "Error reading from PHY's temperature status register\n");
4866 return (EAGAIN);
4867 }
4868
4869 /* Get occurrence bit */
4870 reg = !!(reg & 0x4000);
4871 return (sysctl_handle_int(oidp, 0, reg, req));
4872}
4873
4874/*
4875** Thermal Shutdown Trigger (internal MAC)
4876** - Set this to 1 to cause an overtemp event to occur
4877*/
4878static int
4879ixgbe_sysctl_thermal_test(SYSCTL_HANDLER_ARGS)
4880{
4881 struct adapter *adapter = (struct adapter *) arg1;
4882 struct ixgbe_hw *hw = &adapter->hw;
4883 int error, fire = 0;
4884
4885 error = sysctl_handle_int(oidp, &fire, 0, req);
4886 if ((error) || (req->newptr == NULL))
4887 return (error);
4888
4889 if (fire) {
4890 u32 reg = IXGBE_READ_REG(hw, IXGBE_EICS);
4891 reg |= IXGBE_EICR_TS;
4892 IXGBE_WRITE_REG(hw, IXGBE_EICS, reg);
4893 }
4894
4895 return (0);
4896}
4897
4898/*
4899** Manage DMA Coalescing.
4900** Control values:
4901** 0/1 - off / on (use default value of 1000)
4902**
4903** Legal timer values are:
4904** 50,100,250,500,1000,2000,5000,10000
4905**
4906** Turning off interrupt moderation will also turn this off.
4907*/
4908static int
4909ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS)
4910{
4198ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS)
4199{
4911 struct adapter *adapter = (struct adapter *) arg1;
4912 struct ifnet *ifp = adapter->ifp;
4913 int error;
4914 u32 newval;
4200 struct adapter *adapter = (struct adapter *)arg1;
4201 struct ifnet *ifp = adapter->ifp;
4202 int error;
4203 u32 newval;
4915
4916 newval = adapter->dmac;
4917 error = sysctl_handle_int(oidp, &newval, 0, req);
4918 if ((error) || (req->newptr == NULL))
4919 return (error);
4920
4921 switch (newval) {
4922 case 0:

--- 20 unchanged lines hidden (view full) ---

4943 return (EINVAL);
4944 }
4945
4946 /* Re-initialize hardware if it's already running */
4947 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4948 ixgbe_init(adapter);
4949
4950 return (0);
4204
4205 newval = adapter->dmac;
4206 error = sysctl_handle_int(oidp, &newval, 0, req);
4207 if ((error) || (req->newptr == NULL))
4208 return (error);
4209
4210 switch (newval) {
4211 case 0:

--- 20 unchanged lines hidden (view full) ---

4232 return (EINVAL);
4233 }
4234
4235 /* Re-initialize hardware if it's already running */
4236 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4237 ixgbe_init(adapter);
4238
4239 return (0);
4951}
4240} /* ixgbe_sysctl_dmac */
4952
4953#ifdef IXGBE_DEBUG
4241
4242#ifdef IXGBE_DEBUG
4954/**
4955 * Sysctl to test power states
4956 * Values:
4957 * 0 - set device to D0
4958 * 3 - set device to D3
4959 * (none) - get current device power state
4960 */
4243/************************************************************************
4244 * ixgbe_sysctl_power_state
4245 *
4246 * Sysctl to test power states
4247 * Values:
4248 * 0 - set device to D0
4249 * 3 - set device to D3
4250 * (none) - get current device power state
4251 ************************************************************************/
4961static int
4962ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS)
4963{
4252static int
4253ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS)
4254{
4964 struct adapter *adapter = (struct adapter *) arg1;
4965 device_t dev = adapter->dev;
4966 int curr_ps, new_ps, error = 0;
4255 struct adapter *adapter = (struct adapter *)arg1;
4256 device_t dev = adapter->dev;
4257 int curr_ps, new_ps, error = 0;
4967
4968 curr_ps = new_ps = pci_get_powerstate(dev);
4969
4970 error = sysctl_handle_int(oidp, &new_ps, 0, req);
4971 if ((error) || (req->newptr == NULL))
4972 return (error);
4973
4974 if (new_ps == curr_ps)

--- 4 unchanged lines hidden (view full) ---

4979 else if (new_ps == 0 && curr_ps == 3)
4980 error = DEVICE_RESUME(dev);
4981 else
4982 return (EINVAL);
4983
4984 device_printf(dev, "New state: %d\n", pci_get_powerstate(dev));
4985
4986 return (error);
4258
4259 curr_ps = new_ps = pci_get_powerstate(dev);
4260
4261 error = sysctl_handle_int(oidp, &new_ps, 0, req);
4262 if ((error) || (req->newptr == NULL))
4263 return (error);
4264
4265 if (new_ps == curr_ps)

--- 4 unchanged lines hidden (view full) ---

4270 else if (new_ps == 0 && curr_ps == 3)
4271 error = DEVICE_RESUME(dev);
4272 else
4273 return (EINVAL);
4274
4275 device_printf(dev, "New state: %d\n", pci_get_powerstate(dev));
4276
4277 return (error);
4987}
4278} /* ixgbe_sysctl_power_state */
4988#endif
4279#endif
4989/*
4990 * Sysctl to enable/disable the WoL capability, if supported by the adapter.
4991 * Values:
4992 * 0 - disabled
4993 * 1 - enabled
4994 */
4280
4281/************************************************************************
4282 * ixgbe_sysctl_wol_enable
4283 *
4284 * Sysctl to enable/disable the WoL capability,
4285 * if supported by the adapter.
4286 *
4287 * Values:
4288 * 0 - disabled
4289 * 1 - enabled
4290 ************************************************************************/
4995static int
4996ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS)
4997{
4291static int
4292ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS)
4293{
4998 struct adapter *adapter = (struct adapter *) arg1;
4294 struct adapter *adapter = (struct adapter *)arg1;
4999 struct ixgbe_hw *hw = &adapter->hw;
4295 struct ixgbe_hw *hw = &adapter->hw;
5000 int new_wol_enabled;
5001 int error = 0;
4296 int new_wol_enabled;
4297 int error = 0;
5002
5003 new_wol_enabled = hw->wol_enabled;
5004 error = sysctl_handle_int(oidp, &new_wol_enabled, 0, req);
5005 if ((error) || (req->newptr == NULL))
5006 return (error);
5007 new_wol_enabled = !!(new_wol_enabled);
5008 if (new_wol_enabled == hw->wol_enabled)
5009 return (0);
5010
5011 if (new_wol_enabled > 0 && !adapter->wol_support)
5012 return (ENODEV);
5013 else
5014 hw->wol_enabled = new_wol_enabled;
5015
5016 return (0);
4298
4299 new_wol_enabled = hw->wol_enabled;
4300 error = sysctl_handle_int(oidp, &new_wol_enabled, 0, req);
4301 if ((error) || (req->newptr == NULL))
4302 return (error);
4303 new_wol_enabled = !!(new_wol_enabled);
4304 if (new_wol_enabled == hw->wol_enabled)
4305 return (0);
4306
4307 if (new_wol_enabled > 0 && !adapter->wol_support)
4308 return (ENODEV);
4309 else
4310 hw->wol_enabled = new_wol_enabled;
4311
4312 return (0);
5017}
4313} /* ixgbe_sysctl_wol_enable */
5018
4314
5019/*
5020 * Sysctl to enable/disable the Energy Efficient Ethernet capability,
5021 * if supported by the adapter.
5022 * Values:
5023 * 0 - disabled
5024 * 1 - enabled
5025 */
5026static int
5027ixgbe_sysctl_eee_enable(SYSCTL_HANDLER_ARGS)
5028{
5029 struct adapter *adapter = (struct adapter *) arg1;
5030 struct ixgbe_hw *hw = &adapter->hw;
5031 struct ifnet *ifp = adapter->ifp;
5032 int new_eee_enabled, error = 0;
5033
5034 new_eee_enabled = adapter->eee_enabled;
5035 error = sysctl_handle_int(oidp, &new_eee_enabled, 0, req);
5036 if ((error) || (req->newptr == NULL))
5037 return (error);
5038 new_eee_enabled = !!(new_eee_enabled);
5039 if (new_eee_enabled == adapter->eee_enabled)
5040 return (0);
5041
5042 if (new_eee_enabled > 0 && !hw->mac.ops.setup_eee)
5043 return (ENODEV);
5044 else
5045 adapter->eee_enabled = new_eee_enabled;
5046
5047 /* Re-initialize hardware if it's already running */
5048 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
5049 ixgbe_init(adapter);
5050
5051 return (0);
5052}
5053
5054/*
5055 * Read-only sysctl indicating whether EEE support was negotiated
5056 * on the link.
5057 */
5058static int
5059ixgbe_sysctl_eee_negotiated(SYSCTL_HANDLER_ARGS)
5060{
5061 struct adapter *adapter = (struct adapter *) arg1;
5062 struct ixgbe_hw *hw = &adapter->hw;
5063 bool status;
5064
5065 status = !!(IXGBE_READ_REG(hw, IXGBE_EEE_STAT) & IXGBE_EEE_STAT_NEG);
5066
5067 return (sysctl_handle_int(oidp, 0, status, req));
5068}
5069
5070/*
5071 * Read-only sysctl indicating whether RX Link is in LPI state.
5072 */
5073static int
5074ixgbe_sysctl_eee_rx_lpi_status(SYSCTL_HANDLER_ARGS)
5075{
5076 struct adapter *adapter = (struct adapter *) arg1;
5077 struct ixgbe_hw *hw = &adapter->hw;
5078 bool status;
5079
5080 status = !!(IXGBE_READ_REG(hw, IXGBE_EEE_STAT) &
5081 IXGBE_EEE_RX_LPI_STATUS);
5082
5083 return (sysctl_handle_int(oidp, 0, status, req));
5084}
5085
5086/*
5087 * Read-only sysctl indicating whether TX Link is in LPI state.
5088 */
5089static int
5090ixgbe_sysctl_eee_tx_lpi_status(SYSCTL_HANDLER_ARGS)
5091{
5092 struct adapter *adapter = (struct adapter *) arg1;
5093 struct ixgbe_hw *hw = &adapter->hw;
5094 bool status;
5095
5096 status = !!(IXGBE_READ_REG(hw, IXGBE_EEE_STAT) &
5097 IXGBE_EEE_TX_LPI_STATUS);
5098
5099 return (sysctl_handle_int(oidp, 0, status, req));
5100}
5101
5102/*
5103 * Read-only sysctl indicating TX Link LPI delay
5104 */
5105static int
5106ixgbe_sysctl_eee_tx_lpi_delay(SYSCTL_HANDLER_ARGS)
5107{
5108 struct adapter *adapter = (struct adapter *) arg1;
5109 struct ixgbe_hw *hw = &adapter->hw;
5110 u32 reg;
5111
5112 reg = IXGBE_READ_REG(hw, IXGBE_EEE_SU);
5113
5114 return (sysctl_handle_int(oidp, 0, reg >> 26, req));
5115}
5116
5117/*
5118 * Sysctl to enable/disable the types of packets that the
5119 * adapter will wake up on upon receipt.
5120 * WUFC - Wake Up Filter Control
5121 * Flags:
5122 * 0x1 - Link Status Change
5123 * 0x2 - Magic Packet
5124 * 0x4 - Direct Exact
5125 * 0x8 - Directed Multicast
5126 * 0x10 - Broadcast
5127 * 0x20 - ARP/IPv4 Request Packet
5128 * 0x40 - Direct IPv4 Packet
5129 * 0x80 - Direct IPv6 Packet
4315/************************************************************************
4316 * ixgbe_sysctl_wufc - Wake Up Filter Control
5130 *
4317 *
5131 * Setting another flag will cause the sysctl to return an
5132 * error.
5133 */
4318 * Sysctl to enable/disable the types of packets that the
4319 * adapter will wake up on upon receipt.
4320 * Flags:
4321 * 0x1 - Link Status Change
4322 * 0x2 - Magic Packet
4323 * 0x4 - Direct Exact
4324 * 0x8 - Directed Multicast
4325 * 0x10 - Broadcast
4326 * 0x20 - ARP/IPv4 Request Packet
4327 * 0x40 - Direct IPv4 Packet
4328 * 0x80 - Direct IPv6 Packet
4329 *
4330 * Settings not listed above will cause the sysctl to return an error.
4331 ************************************************************************/
5134static int
5135ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS)
5136{
4332static int
4333ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS)
4334{
5137 struct adapter *adapter = (struct adapter *) arg1;
5138 int error = 0;
5139 u32 new_wufc;
4335 struct adapter *adapter = (struct adapter *)arg1;
4336 int error = 0;
4337 u32 new_wufc;
5140
5141 new_wufc = adapter->wufc;
5142
5143 error = sysctl_handle_int(oidp, &new_wufc, 0, req);
5144 if ((error) || (req->newptr == NULL))
5145 return (error);
5146 if (new_wufc == adapter->wufc)
5147 return (0);
5148
5149 if (new_wufc & 0xffffff00)
5150 return (EINVAL);
4338
4339 new_wufc = adapter->wufc;
4340
4341 error = sysctl_handle_int(oidp, &new_wufc, 0, req);
4342 if ((error) || (req->newptr == NULL))
4343 return (error);
4344 if (new_wufc == adapter->wufc)
4345 return (0);
4346
4347 if (new_wufc & 0xffffff00)
4348 return (EINVAL);
5151 else {
5152 new_wufc &= 0xff;
5153 new_wufc |= (0xffffff & adapter->wufc);
5154 adapter->wufc = new_wufc;
5155 }
5156
4349
4350 new_wufc &= 0xff;
4351 new_wufc |= (0xffffff & adapter->wufc);
4352 adapter->wufc = new_wufc;
4353
5157 return (0);
4354 return (0);
5158}
4355} /* ixgbe_sysctl_wufc */
5159
5160#ifdef IXGBE_DEBUG
4356
4357#ifdef IXGBE_DEBUG
4358/************************************************************************
4359 * ixgbe_sysctl_print_rss_config
4360 ************************************************************************/
5161static int
5162ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS)
5163{
4361static int
4362ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS)
4363{
5164 struct adapter *adapter = (struct adapter *)arg1;
4364 struct adapter *adapter = (struct adapter *)arg1;
5165 struct ixgbe_hw *hw = &adapter->hw;
4365 struct ixgbe_hw *hw = &adapter->hw;
5166 device_t dev = adapter->dev;
5167 int error = 0, reta_size;
5168 struct sbuf *buf;
5169 u32 reg;
4366 device_t dev = adapter->dev;
4367 struct sbuf *buf;
4368 int error = 0, reta_size;
4369 u32 reg;
5170
5171 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5172 if (!buf) {
5173 device_printf(dev, "Could not allocate sbuf for output.\n");
5174 return (ENOMEM);
5175 }
5176
5177 // TODO: use sbufs to make a string to print out
5178 /* Set multiplier for RETA setup and table size based on MAC */
5179 switch (adapter->hw.mac.type) {
5180 case ixgbe_mac_X550:
5181 case ixgbe_mac_X550EM_x:
4370
4371 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4372 if (!buf) {
4373 device_printf(dev, "Could not allocate sbuf for output.\n");
4374 return (ENOMEM);
4375 }
4376
4377 // TODO: use sbufs to make a string to print out
4378 /* Set multiplier for RETA setup and table size based on MAC */
4379 switch (adapter->hw.mac.type) {
4380 case ixgbe_mac_X550:
4381 case ixgbe_mac_X550EM_x:
4382 case ixgbe_mac_X550EM_a:
5182 reta_size = 128;
5183 break;
5184 default:
5185 reta_size = 32;
5186 break;
5187 }
5188
5189 /* Print out the redirection table */

--- 10 unchanged lines hidden (view full) ---

5200
5201 // TODO: print more config
5202
5203 error = sbuf_finish(buf);
5204 if (error)
5205 device_printf(dev, "Error finishing sbuf: %d\n", error);
5206
5207 sbuf_delete(buf);
4383 reta_size = 128;
4384 break;
4385 default:
4386 reta_size = 32;
4387 break;
4388 }
4389
4390 /* Print out the redirection table */

--- 10 unchanged lines hidden (view full) ---

4401
4402 // TODO: print more config
4403
4404 error = sbuf_finish(buf);
4405 if (error)
4406 device_printf(dev, "Error finishing sbuf: %d\n", error);
4407
4408 sbuf_delete(buf);
4409
5208 return (0);
4410 return (0);
5209}
4411} /* ixgbe_sysctl_print_rss_config */
5210#endif /* IXGBE_DEBUG */
5211
4412#endif /* IXGBE_DEBUG */
4413
5212/*
5213** Enable the hardware to drop packets when the buffer is
5214** full. This is useful when multiqueue,so that no single
5215** queue being full stalls the entire RX engine. We only
5216** enable this when Multiqueue AND when Flow Control is
5217** disabled.
5218*/
5219static void
5220ixgbe_enable_rx_drop(struct adapter *adapter)
4414/************************************************************************
4415 * ixgbe_sysctl_phy_temp - Retrieve temperature of PHY
4416 *
4417 * For X552/X557-AT devices using an external PHY
4418 ************************************************************************/
4419static int
4420ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS)
5221{
4421{
5222 struct ixgbe_hw *hw = &adapter->hw;
4422 struct adapter *adapter = (struct adapter *)arg1;
4423 struct ixgbe_hw *hw = &adapter->hw;
4424 u16 reg;
5223
4425
5224 for (int i = 0; i < adapter->num_queues; i++) {
5225 struct rx_ring *rxr = &adapter->rx_rings[i];
5226 u32 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
5227 srrctl |= IXGBE_SRRCTL_DROP_EN;
5228 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
4426 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
4427 device_printf(adapter->dev,
4428 "Device has no supported external thermal sensor.\n");
4429 return (ENODEV);
5229 }
4430 }
5230#ifdef PCI_IOV
5231 /* enable drop for each vf */
5232 for (int i = 0; i < adapter->num_vfs; i++) {
5233 IXGBE_WRITE_REG(hw, IXGBE_QDE,
5234 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT) |
5235 IXGBE_QDE_ENABLE));
4431
4432 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP,
4433 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &reg)) {
4434 device_printf(adapter->dev,
4435 "Error reading from PHY's current temperature register\n");
4436 return (EAGAIN);
5236 }
4437 }
5237#endif
5238}
5239
4438
5240static void
5241ixgbe_disable_rx_drop(struct adapter *adapter)
5242{
5243 struct ixgbe_hw *hw = &adapter->hw;
4439 /* Shift temp for output */
4440 reg = reg >> 8;
5244
4441
5245 for (int i = 0; i < adapter->num_queues; i++) {
5246 struct rx_ring *rxr = &adapter->rx_rings[i];
5247 u32 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
5248 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
5249 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
5250 }
5251#ifdef PCI_IOV
5252 /* disable drop for each vf */
5253 for (int i = 0; i < adapter->num_vfs; i++) {
5254 IXGBE_WRITE_REG(hw, IXGBE_QDE,
5255 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT)));
5256 }
5257#endif
5258}
4442 return (sysctl_handle_int(oidp, NULL, reg, req));
4443} /* ixgbe_sysctl_phy_temp */
5259
4444
5260static void
5261ixgbe_rearm_queues(struct adapter *adapter, u64 queues)
4445/************************************************************************
4446 * ixgbe_sysctl_phy_overtemp_occurred
4447 *
4448 * Reports (directly from the PHY) whether the current PHY
4449 * temperature is over the overtemp threshold.
4450 ************************************************************************/
4451static int
4452ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS)
5262{
4453{
5263 u32 mask;
4454 struct adapter *adapter = (struct adapter *)arg1;
4455 struct ixgbe_hw *hw = &adapter->hw;
4456 u16 reg;
5264
4457
5265 switch (adapter->hw.mac.type) {
5266 case ixgbe_mac_82598EB:
5267 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
5268 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
5269 break;
5270 case ixgbe_mac_82599EB:
5271 case ixgbe_mac_X540:
5272 case ixgbe_mac_X550:
5273 case ixgbe_mac_X550EM_x:
5274 mask = (queues & 0xFFFFFFFF);
5275 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
5276 mask = (queues >> 32);
5277 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
5278 break;
5279 default:
5280 break;
4458 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
4459 device_printf(adapter->dev,
4460 "Device has no supported external thermal sensor.\n");
4461 return (ENODEV);
5281 }
4462 }
5282}
5283
4463
5284#ifdef PCI_IOV
5285
5286/*
5287** Support functions for SRIOV/VF management
5288*/
5289
5290static void
5291ixgbe_ping_all_vfs(struct adapter *adapter)
5292{
5293 struct ixgbe_vf *vf;
5294
5295 for (int i = 0; i < adapter->num_vfs; i++) {
5296 vf = &adapter->vfs[i];
5297 if (vf->flags & IXGBE_VF_ACTIVE)
5298 ixgbe_send_vf_msg(adapter, vf, IXGBE_PF_CONTROL_MSG);
4464 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS,
4465 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &reg)) {
4466 device_printf(adapter->dev,
4467 "Error reading from PHY's temperature status register\n");
4468 return (EAGAIN);
5299 }
4469 }
5300}
5301
4470
4471 /* Get occurrence bit */
4472 reg = !!(reg & 0x4000);
5302
4473
5303static void
5304ixgbe_vf_set_default_vlan(struct adapter *adapter, struct ixgbe_vf *vf,
5305 uint16_t tag)
4474 return (sysctl_handle_int(oidp, 0, reg, req));
4475} /* ixgbe_sysctl_phy_overtemp_occurred */
4476
4477/************************************************************************
4478 * ixgbe_sysctl_eee_state
4479 *
4480 * Sysctl to set EEE power saving feature
4481 * Values:
4482 * 0 - disable EEE
4483 * 1 - enable EEE
4484 * (none) - get current device EEE state
4485 ************************************************************************/
4486static int
4487ixgbe_sysctl_eee_state(SYSCTL_HANDLER_ARGS)
5306{
4488{
5307 struct ixgbe_hw *hw;
5308 uint32_t vmolr, vmvir;
4489 struct adapter *adapter = (struct adapter *)arg1;
4490 device_t dev = adapter->dev;
4491 int curr_eee, new_eee, error = 0;
4492 s32 retval;
5309
4493
5310 hw = &adapter->hw;
4494 curr_eee = new_eee = !!(adapter->feat_en & IXGBE_FEATURE_EEE);
5311
4495
5312 vf->vlan_tag = tag;
5313
5314 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf->pool));
4496 error = sysctl_handle_int(oidp, &new_eee, 0, req);
4497 if ((error) || (req->newptr == NULL))
4498 return (error);
5315
4499
5316 /* Do not receive packets that pass inexact filters. */
5317 vmolr &= ~(IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_ROPE);
4500 /* Nothing to do */
4501 if (new_eee == curr_eee)
4502 return (0);
5318
4503
5319 /* Disable Multicast Promicuous Mode. */
5320 vmolr &= ~IXGBE_VMOLR_MPE;
4504 /* Not supported */
4505 if (!(adapter->feat_cap & IXGBE_FEATURE_EEE))
4506 return (EINVAL);
5321
4507
5322 /* Accept broadcasts. */
5323 vmolr |= IXGBE_VMOLR_BAM;
4508 /* Bounds checking */
4509 if ((new_eee < 0) || (new_eee > 1))
4510 return (EINVAL);
5324
4511
5325 if (tag == 0) {
5326 /* Accept non-vlan tagged traffic. */
5327 //vmolr |= IXGBE_VMOLR_AUPE;
5328
5329 /* Allow VM to tag outgoing traffic; no default tag. */
5330 vmvir = 0;
5331 } else {
5332 /* Require vlan-tagged traffic. */
5333 vmolr &= ~IXGBE_VMOLR_AUPE;
5334
5335 /* Tag all traffic with provided vlan tag. */
5336 vmvir = (tag | IXGBE_VMVIR_VLANA_DEFAULT);
4512 retval = adapter->hw.mac.ops.setup_eee(&adapter->hw, new_eee);
4513 if (retval) {
4514 device_printf(dev, "Error in EEE setup: 0x%08X\n", retval);
4515 return (EINVAL);
5337 }
4516 }
5338 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf->pool), vmolr);
5339 IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf->pool), vmvir);
5340}
5341
4517
4518 /* Restart auto-neg */
4519 ixgbe_init(adapter);
5342
4520
5343static boolean_t
5344ixgbe_vf_frame_size_compatible(struct adapter *adapter, struct ixgbe_vf *vf)
5345{
4521 device_printf(dev, "New EEE state: %d\n", new_eee);
5346
4522
5347 /*
5348 * Frame size compatibility between PF and VF is only a problem on
5349 * 82599-based cards. X540 and later support any combination of jumbo
5350 * frames on PFs and VFs.
5351 */
5352 if (adapter->hw.mac.type != ixgbe_mac_82599EB)
5353 return (TRUE);
4523 /* Cache new value */
4524 if (new_eee)
4525 adapter->feat_en |= IXGBE_FEATURE_EEE;
4526 else
4527 adapter->feat_en &= ~IXGBE_FEATURE_EEE;
5354
4528
5355 switch (vf->api_ver) {
5356 case IXGBE_API_VER_1_0:
5357 case IXGBE_API_VER_UNKNOWN:
5358 /*
5359 * On legacy (1.0 and older) VF versions, we don't support jumbo
5360 * frames on either the PF or the VF.
5361 */
5362 if (adapter->max_frame_size > ETHER_MAX_LEN ||
5363 vf->max_frame_size > ETHER_MAX_LEN)
5364 return (FALSE);
4529 return (error);
4530} /* ixgbe_sysctl_eee_state */
5365
4531
5366 return (TRUE);
4532/************************************************************************
4533 * ixgbe_init_device_features
4534 ************************************************************************/
4535static void
4536ixgbe_init_device_features(struct adapter *adapter)
4537{
4538 adapter->feat_cap = IXGBE_FEATURE_NETMAP
4539 | IXGBE_FEATURE_RSS
4540 | IXGBE_FEATURE_MSI
4541 | IXGBE_FEATURE_MSIX
4542 | IXGBE_FEATURE_LEGACY_IRQ
4543 | IXGBE_FEATURE_LEGACY_TX;
5367
4544
4545 /* Set capabilities first... */
4546 switch (adapter->hw.mac.type) {
4547 case ixgbe_mac_82598EB:
4548 if (adapter->hw.device_id == IXGBE_DEV_ID_82598AT)
4549 adapter->feat_cap |= IXGBE_FEATURE_FAN_FAIL;
5368 break;
4550 break;
5369 case IXGBE_API_VER_1_1:
4551 case ixgbe_mac_X540:
4552 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4553 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4554 if ((adapter->hw.device_id == IXGBE_DEV_ID_X540_BYPASS) &&
4555 (adapter->hw.bus.func == 0))
4556 adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
4557 break;
4558 case ixgbe_mac_X550:
4559 adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
4560 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4561 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4562 break;
4563 case ixgbe_mac_X550EM_x:
4564 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4565 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4566 if (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_X_KR)
4567 adapter->feat_cap |= IXGBE_FEATURE_EEE;
4568 break;
4569 case ixgbe_mac_X550EM_a:
4570 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4571 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4572 adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
4573 if ((adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T) ||
4574 (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)) {
4575 adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
4576 adapter->feat_cap |= IXGBE_FEATURE_EEE;
4577 }
4578 break;
4579 case ixgbe_mac_82599EB:
4580 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4581 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4582 if ((adapter->hw.device_id == IXGBE_DEV_ID_82599_BYPASS) &&
4583 (adapter->hw.bus.func == 0))
4584 adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
4585 if (adapter->hw.device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP)
4586 adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
4587 break;
5370 default:
4588 default:
5371 /*
5372 * 1.1 or later VF versions always work if they aren't using
5373 * jumbo frames.
5374 */
5375 if (vf->max_frame_size <= ETHER_MAX_LEN)
5376 return (TRUE);
4589 break;
4590 }
5377
4591
5378 /*
5379 * Jumbo frames only work with VFs if the PF is also using jumbo
5380 * frames.
5381 */
5382 if (adapter->max_frame_size <= ETHER_MAX_LEN)
5383 return (TRUE);
4592 /* Enabled by default... */
4593 /* Fan failure detection */
4594 if (adapter->feat_cap & IXGBE_FEATURE_FAN_FAIL)
4595 adapter->feat_en |= IXGBE_FEATURE_FAN_FAIL;
4596 /* Netmap */
4597 if (adapter->feat_cap & IXGBE_FEATURE_NETMAP)
4598 adapter->feat_en |= IXGBE_FEATURE_NETMAP;
4599 /* EEE */
4600 if (adapter->feat_cap & IXGBE_FEATURE_EEE)
4601 adapter->feat_en |= IXGBE_FEATURE_EEE;
4602 /* Thermal Sensor */
4603 if (adapter->feat_cap & IXGBE_FEATURE_TEMP_SENSOR)
4604 adapter->feat_en |= IXGBE_FEATURE_TEMP_SENSOR;
5384
4605
5385 return (FALSE);
5386
4606 /* Enabled via global sysctl... */
4607 /* Flow Director */
4608 if (ixgbe_enable_fdir) {
4609 if (adapter->feat_cap & IXGBE_FEATURE_FDIR)
4610 adapter->feat_en |= IXGBE_FEATURE_FDIR;
4611 else
4612 device_printf(adapter->dev, "Device does not support Flow Director. Leaving disabled.");
5387 }
4613 }
5388}
4614 /* Legacy (single queue) transmit */
4615 if ((adapter->feat_cap & IXGBE_FEATURE_LEGACY_TX) &&
4616 ixgbe_enable_legacy_tx)
4617 adapter->feat_en |= IXGBE_FEATURE_LEGACY_TX;
4618 /*
4619 * Message Signal Interrupts - Extended (MSI-X)
4620 * Normal MSI is only enabled if MSI-X calls fail.
4621 */
4622 if (!ixgbe_enable_msix)
4623 adapter->feat_cap &= ~IXGBE_FEATURE_MSIX;
4624 /* Receive-Side Scaling (RSS) */
4625 if ((adapter->feat_cap & IXGBE_FEATURE_RSS) && ixgbe_enable_rss)
4626 adapter->feat_en |= IXGBE_FEATURE_RSS;
5389
4627
4628 /* Disable features with unmet dependencies... */
4629 /* No MSI-X */
4630 if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX)) {
4631 adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
4632 adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
4633 adapter->feat_en &= ~IXGBE_FEATURE_RSS;
4634 adapter->feat_en &= ~IXGBE_FEATURE_SRIOV;
4635 }
4636} /* ixgbe_init_device_features */
5390
4637
5391static void
5392ixgbe_process_vf_reset(struct adapter *adapter, struct ixgbe_vf *vf)
4638/************************************************************************
4639 * ixgbe_probe - Device identification routine
4640 *
4641 * Determines if the driver should be loaded on
4642 * adapter based on its PCI vendor/device ID.
4643 *
4644 * return BUS_PROBE_DEFAULT on success, positive on failure
4645 ************************************************************************/
4646static int
4647ixgbe_probe(device_t dev)
5393{
4648{
5394 ixgbe_vf_set_default_vlan(adapter, vf, vf->default_vlan);
4649 ixgbe_vendor_info_t *ent;
5395
4650
5396 // XXX clear multicast addresses
4651 u16 pci_vendor_id = 0;
4652 u16 pci_device_id = 0;
4653 u16 pci_subvendor_id = 0;
4654 u16 pci_subdevice_id = 0;
4655 char adapter_name[256];
5397
4656
5398 ixgbe_clear_rar(&adapter->hw, vf->rar_index);
4657 INIT_DEBUGOUT("ixgbe_probe: begin");
5399
4658
5400 vf->api_ver = IXGBE_API_VER_UNKNOWN;
5401}
4659 pci_vendor_id = pci_get_vendor(dev);
4660 if (pci_vendor_id != IXGBE_INTEL_VENDOR_ID)
4661 return (ENXIO);
5402
4662
4663 pci_device_id = pci_get_device(dev);
4664 pci_subvendor_id = pci_get_subvendor(dev);
4665 pci_subdevice_id = pci_get_subdevice(dev);
5403
4666
5404static void
5405ixgbe_vf_enable_transmit(struct adapter *adapter, struct ixgbe_vf *vf)
5406{
5407 struct ixgbe_hw *hw;
5408 uint32_t vf_index, vfte;
4667 ent = ixgbe_vendor_info_array;
4668 while (ent->vendor_id != 0) {
4669 if ((pci_vendor_id == ent->vendor_id) &&
4670 (pci_device_id == ent->device_id) &&
4671 ((pci_subvendor_id == ent->subvendor_id) ||
4672 (ent->subvendor_id == 0)) &&
4673 ((pci_subdevice_id == ent->subdevice_id) ||
4674 (ent->subdevice_id == 0))) {
4675 sprintf(adapter_name, "%s, Version - %s",
4676 ixgbe_strings[ent->index],
4677 ixgbe_driver_version);
4678 device_set_desc_copy(dev, adapter_name);
4679 ++ixgbe_total_ports;
4680 return (BUS_PROBE_DEFAULT);
4681 }
4682 ent++;
4683 }
5409
4684
5410 hw = &adapter->hw;
4685 return (ENXIO);
4686} /* ixgbe_probe */
5411
4687
5412 vf_index = IXGBE_VF_INDEX(vf->pool);
5413 vfte = IXGBE_READ_REG(hw, IXGBE_VFTE(vf_index));
5414 vfte |= IXGBE_VF_BIT(vf->pool);
5415 IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_index), vfte);
5416}
5417
4688
5418
5419static void
5420ixgbe_vf_enable_receive(struct adapter *adapter, struct ixgbe_vf *vf)
4689/************************************************************************
4690 * ixgbe_ioctl - Ioctl entry point
4691 *
4692 * Called when the user wants to configure the interface.
4693 *
4694 * return 0 on success, positive on failure
4695 ************************************************************************/
4696static int
4697ixgbe_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
5421{
4698{
5422 struct ixgbe_hw *hw;
5423 uint32_t vf_index, vfre;
4699 struct adapter *adapter = ifp->if_softc;
4700 struct ifreq *ifr = (struct ifreq *) data;
4701#if defined(INET) || defined(INET6)
4702 struct ifaddr *ifa = (struct ifaddr *)data;
4703#endif
4704 int error = 0;
4705 bool avoid_reset = FALSE;
5424
4706
5425 hw = &adapter->hw;
5426
5427 vf_index = IXGBE_VF_INDEX(vf->pool);
5428 vfre = IXGBE_READ_REG(hw, IXGBE_VFRE(vf_index));
5429 if (ixgbe_vf_frame_size_compatible(adapter, vf))
5430 vfre |= IXGBE_VF_BIT(vf->pool);
5431 else
5432 vfre &= ~IXGBE_VF_BIT(vf->pool);
5433 IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_index), vfre);
5434}
4707 switch (command) {
4708 case SIOCSIFADDR:
4709#ifdef INET
4710 if (ifa->ifa_addr->sa_family == AF_INET)
4711 avoid_reset = TRUE;
4712#endif
4713#ifdef INET6
4714 if (ifa->ifa_addr->sa_family == AF_INET6)
4715 avoid_reset = TRUE;
4716#endif
4717 /*
4718 * Calling init results in link renegotiation,
4719 * so we avoid doing it when possible.
4720 */
4721 if (avoid_reset) {
4722 ifp->if_flags |= IFF_UP;
4723 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
4724 ixgbe_init(adapter);
4725#ifdef INET
4726 if (!(ifp->if_flags & IFF_NOARP))
4727 arp_ifinit(ifp, ifa);
4728#endif
4729 } else
4730 error = ether_ioctl(ifp, command, data);
4731 break;
4732 case SIOCSIFMTU:
4733 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
4734 if (ifr->ifr_mtu > IXGBE_MAX_MTU) {
4735 error = EINVAL;
4736 } else {
4737 IXGBE_CORE_LOCK(adapter);
4738 ifp->if_mtu = ifr->ifr_mtu;
4739 adapter->max_frame_size = ifp->if_mtu + IXGBE_MTU_HDR;
4740 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4741 ixgbe_init_locked(adapter);
4742 ixgbe_recalculate_max_frame(adapter);
4743 IXGBE_CORE_UNLOCK(adapter);
4744 }
4745 break;
4746 case SIOCSIFFLAGS:
4747 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
4748 IXGBE_CORE_LOCK(adapter);
4749 if (ifp->if_flags & IFF_UP) {
4750 if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
4751 if ((ifp->if_flags ^ adapter->if_flags) &
4752 (IFF_PROMISC | IFF_ALLMULTI)) {
4753 ixgbe_set_promisc(adapter);
4754 }
4755 } else
4756 ixgbe_init_locked(adapter);
4757 } else
4758 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4759 ixgbe_stop(adapter);
4760 adapter->if_flags = ifp->if_flags;
4761 IXGBE_CORE_UNLOCK(adapter);
4762 break;
4763 case SIOCADDMULTI:
4764 case SIOCDELMULTI:
4765 IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
4766 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4767 IXGBE_CORE_LOCK(adapter);
4768 ixgbe_disable_intr(adapter);
4769 ixgbe_set_multi(adapter);
4770 ixgbe_enable_intr(adapter);
4771 IXGBE_CORE_UNLOCK(adapter);
4772 }
4773 break;
4774 case SIOCSIFMEDIA:
4775 case SIOCGIFMEDIA:
4776 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
4777 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
4778 break;
4779 case SIOCSIFCAP:
4780 {
4781 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
5435
4782
4783 int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
5436
4784
5437static void
5438ixgbe_vf_reset_msg(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
5439{
5440 struct ixgbe_hw *hw;
5441 uint32_t ack;
5442 uint32_t resp[IXGBE_VF_PERMADDR_MSG_LEN];
4785 if (!mask)
4786 break;
5443
4787
5444 hw = &adapter->hw;
4788 /* HW cannot turn these on/off separately */
4789 if (mask & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6)) {
4790 ifp->if_capenable ^= IFCAP_RXCSUM;
4791 ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
4792 }
4793 if (mask & IFCAP_TXCSUM)
4794 ifp->if_capenable ^= IFCAP_TXCSUM;
4795 if (mask & IFCAP_TXCSUM_IPV6)
4796 ifp->if_capenable ^= IFCAP_TXCSUM_IPV6;
4797 if (mask & IFCAP_TSO4)
4798 ifp->if_capenable ^= IFCAP_TSO4;
4799 if (mask & IFCAP_TSO6)
4800 ifp->if_capenable ^= IFCAP_TSO6;
4801 if (mask & IFCAP_LRO)
4802 ifp->if_capenable ^= IFCAP_LRO;
4803 if (mask & IFCAP_VLAN_HWTAGGING)
4804 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
4805 if (mask & IFCAP_VLAN_HWFILTER)
4806 ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
4807 if (mask & IFCAP_VLAN_HWTSO)
4808 ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
5445
4809
5446 ixgbe_process_vf_reset(adapter, vf);
4810 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4811 IXGBE_CORE_LOCK(adapter);
4812 ixgbe_init_locked(adapter);
4813 IXGBE_CORE_UNLOCK(adapter);
4814 }
4815 VLAN_CAPABILITIES(ifp);
4816 break;
4817 }
4818#if __FreeBSD_version >= 1100036
4819 case SIOCGI2C:
4820 {
4821 struct ixgbe_hw *hw = &adapter->hw;
4822 struct ifi2creq i2c;
4823 int i;
5447
4824
5448 if (ixgbe_validate_mac_addr(vf->ether_addr) == 0) {
5449 ixgbe_set_rar(&adapter->hw, vf->rar_index,
5450 vf->ether_addr, vf->pool, TRUE);
5451 ack = IXGBE_VT_MSGTYPE_ACK;
5452 } else
5453 ack = IXGBE_VT_MSGTYPE_NACK;
4825 IOCTL_DEBUGOUT("ioctl: SIOCGI2C (Get I2C Data)");
4826 error = copyin(ifr->ifr_data, &i2c, sizeof(i2c));
4827 if (error != 0)
4828 break;
4829 if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) {
4830 error = EINVAL;
4831 break;
4832 }
4833 if (i2c.len > sizeof(i2c.data)) {
4834 error = EINVAL;
4835 break;
4836 }
5454
4837
5455 ixgbe_vf_enable_transmit(adapter, vf);
5456 ixgbe_vf_enable_receive(adapter, vf);
5457
5458 vf->flags |= IXGBE_VF_CTS;
5459
5460 resp[0] = IXGBE_VF_RESET | ack | IXGBE_VT_MSGTYPE_CTS;
5461 bcopy(vf->ether_addr, &resp[1], ETHER_ADDR_LEN);
5462 resp[3] = hw->mac.mc_filter_type;
5463 ixgbe_write_mbx(hw, resp, IXGBE_VF_PERMADDR_MSG_LEN, vf->pool);
5464}
5465
5466
5467static void
5468ixgbe_vf_set_mac(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
5469{
5470 uint8_t *mac;
5471
5472 mac = (uint8_t*)&msg[1];
5473
5474 /* Check that the VF has permission to change the MAC address. */
5475 if (!(vf->flags & IXGBE_VF_CAP_MAC) && ixgbe_vf_mac_changed(vf, mac)) {
5476 ixgbe_send_vf_nack(adapter, vf, msg[0]);
5477 return;
4838 for (i = 0; i < i2c.len; i++)
4839 hw->phy.ops.read_i2c_byte(hw, i2c.offset + i,
4840 i2c.dev_addr, &i2c.data[i]);
4841 error = copyout(&i2c, ifr->ifr_data, sizeof(i2c));
4842 break;
5478 }
4843 }
5479
5480 if (ixgbe_validate_mac_addr(mac) != 0) {
5481 ixgbe_send_vf_nack(adapter, vf, msg[0]);
5482 return;
4844#endif
4845 default:
4846 IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)\n", (int)command);
4847 error = ether_ioctl(ifp, command, data);
4848 break;
5483 }
5484
4849 }
4850
5485 bcopy(mac, vf->ether_addr, ETHER_ADDR_LEN);
4851 return (error);
4852} /* ixgbe_ioctl */
5486
4853
5487 ixgbe_set_rar(&adapter->hw, vf->rar_index, vf->ether_addr,
5488 vf->pool, TRUE);
5489
5490 ixgbe_send_vf_ack(adapter, vf, msg[0]);
5491}
5492
5493
5494/*
5495** VF multicast addresses are set by using the appropriate bit in
5496** 1 of 128 32 bit addresses (4096 possible).
5497*/
4854/************************************************************************
4855 * ixgbe_check_fan_failure
4856 ************************************************************************/
5498static void
4857static void
5499ixgbe_vf_set_mc_addr(struct adapter *adapter, struct ixgbe_vf *vf, u32 *msg)
4858ixgbe_check_fan_failure(struct adapter *adapter, u32 reg, bool in_interrupt)
5500{
4859{
5501 u16 *list = (u16*)&msg[1];
5502 int entries;
5503 u32 vmolr, vec_bit, vec_reg, mta_reg;
4860 u32 mask;
5504
4861
5505 entries = (msg[0] & IXGBE_VT_MSGINFO_MASK) >> IXGBE_VT_MSGINFO_SHIFT;
5506 entries = min(entries, IXGBE_MAX_VF_MC);
4862 mask = (in_interrupt) ? IXGBE_EICR_GPI_SDP1_BY_MAC(&adapter->hw) :
4863 IXGBE_ESDP_SDP1;
5507
4864
5508 vmolr = IXGBE_READ_REG(&adapter->hw, IXGBE_VMOLR(vf->pool));
4865 if (reg & mask)
4866 device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n");
4867} /* ixgbe_check_fan_failure */
5509
4868
5510 vf->num_mc_hashes = entries;
5511
5512 /* Set the appropriate MTA bit */
5513 for (int i = 0; i < entries; i++) {
5514 vf->mc_hash[i] = list[i];
5515 vec_reg = (vf->mc_hash[i] >> 5) & 0x7F;
5516 vec_bit = vf->mc_hash[i] & 0x1F;
5517 mta_reg = IXGBE_READ_REG(&adapter->hw, IXGBE_MTA(vec_reg));
5518 mta_reg |= (1 << vec_bit);
5519 IXGBE_WRITE_REG(&adapter->hw, IXGBE_MTA(vec_reg), mta_reg);
5520 }
5521
5522 vmolr |= IXGBE_VMOLR_ROMPE;
5523 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VMOLR(vf->pool), vmolr);
5524 ixgbe_send_vf_ack(adapter, vf, msg[0]);
5525 return;
5526}
5527
5528
4869/************************************************************************
4870 * ixgbe_handle_que
4871 ************************************************************************/
5529static void
4872static void
5530ixgbe_vf_set_vlan(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
4873ixgbe_handle_que(void *context, int pending)
5531{
4874{
5532 struct ixgbe_hw *hw;
5533 int enable;
5534 uint16_t tag;
4875 struct ix_queue *que = context;
4876 struct adapter *adapter = que->adapter;
4877 struct tx_ring *txr = que->txr;
4878 struct ifnet *ifp = adapter->ifp;
5535
4879
5536 hw = &adapter->hw;
5537 enable = IXGBE_VT_MSGINFO(msg[0]);
5538 tag = msg[1] & IXGBE_VLVF_VLANID_MASK;
5539
5540 if (!(vf->flags & IXGBE_VF_CAP_VLAN)) {
5541 ixgbe_send_vf_nack(adapter, vf, msg[0]);
5542 return;
4880 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4881 ixgbe_rxeof(que);
4882 IXGBE_TX_LOCK(txr);
4883 ixgbe_txeof(txr);
4884 if (!ixgbe_ring_empty(ifp, txr->br))
4885 ixgbe_start_locked(ifp, txr);
4886 IXGBE_TX_UNLOCK(txr);
5543 }
5544
4887 }
4888
5545 /* It is illegal to enable vlan tag 0. */
5546 if (tag == 0 && enable != 0){
5547 ixgbe_send_vf_nack(adapter, vf, msg[0]);
5548 return;
5549 }
5550
5551 ixgbe_set_vfta(hw, tag, vf->pool, enable);
5552 ixgbe_send_vf_ack(adapter, vf, msg[0]);
5553}
4889 /* Re-enable this interrupt */
4890 if (que->res != NULL)
4891 ixgbe_enable_queue(adapter, que->msix);
4892 else
4893 ixgbe_enable_intr(adapter);
5554
4894
4895 return;
4896} /* ixgbe_handle_que */
5555
4897
5556static void
5557ixgbe_vf_set_lpe(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
5558{
5559 struct ixgbe_hw *hw;
5560 uint32_t vf_max_size, pf_max_size, mhadd;
5561
4898
5562 hw = &adapter->hw;
5563 vf_max_size = msg[1];
5564
4899
5565 if (vf_max_size < ETHER_CRC_LEN) {
5566 /* We intentionally ACK invalid LPE requests. */
5567 ixgbe_send_vf_ack(adapter, vf, msg[0]);
5568 return;
5569 }
4900/************************************************************************
4901 * ixgbe_allocate_legacy - Setup the Legacy or MSI Interrupt handler
4902 ************************************************************************/
4903static int
4904ixgbe_allocate_legacy(struct adapter *adapter)
4905{
4906 device_t dev = adapter->dev;
4907 struct ix_queue *que = adapter->queues;
4908 struct tx_ring *txr = adapter->tx_rings;
4909 int error;
5570
4910
5571 vf_max_size -= ETHER_CRC_LEN;
5572
5573 if (vf_max_size > IXGBE_MAX_FRAME_SIZE) {
5574 /* We intentionally ACK invalid LPE requests. */
5575 ixgbe_send_vf_ack(adapter, vf, msg[0]);
5576 return;
4911 /* We allocate a single interrupt resource */
4912 adapter->res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
4913 &adapter->link_rid, RF_SHAREABLE | RF_ACTIVE);
4914 if (adapter->res == NULL) {
4915 device_printf(dev,
4916 "Unable to allocate bus resource: interrupt\n");
4917 return (ENXIO);
5577 }
5578
4918 }
4919
5579 vf->max_frame_size = vf_max_size;
5580 ixgbe_update_max_frame(adapter, vf->max_frame_size);
5581
5582 /*
4920 /*
5583 * We might have to disable reception to this VF if the frame size is
5584 * not compatible with the config on the PF.
4921 * Try allocating a fast interrupt and the associated deferred
4922 * processing contexts.
5585 */
4923 */
5586 ixgbe_vf_enable_receive(adapter, vf);
4924 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
4925 TASK_INIT(&txr->txq_task, 0, ixgbe_deferred_mq_start, txr);
4926 TASK_INIT(&que->que_task, 0, ixgbe_handle_que, que);
4927 que->tq = taskqueue_create_fast("ixgbe_que", M_NOWAIT,
4928 taskqueue_thread_enqueue, &que->tq);
4929 taskqueue_start_threads(&que->tq, 1, PI_NET, "%s ixq",
4930 device_get_nameunit(adapter->dev));
5587
4931
5588 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
5589 pf_max_size = (mhadd & IXGBE_MHADD_MFS_MASK) >> IXGBE_MHADD_MFS_SHIFT;
4932 /* Tasklets for Link, SFP and Multispeed Fiber */
4933 TASK_INIT(&adapter->link_task, 0, ixgbe_handle_link, adapter);
4934 TASK_INIT(&adapter->mod_task, 0, ixgbe_handle_mod, adapter);
4935 TASK_INIT(&adapter->msf_task, 0, ixgbe_handle_msf, adapter);
4936 TASK_INIT(&adapter->phy_task, 0, ixgbe_handle_phy, adapter);
4937 if (adapter->feat_en & IXGBE_FEATURE_FDIR)
4938 TASK_INIT(&adapter->fdir_task, 0, ixgbe_reinit_fdir, adapter);
4939 adapter->tq = taskqueue_create_fast("ixgbe_link", M_NOWAIT,
4940 taskqueue_thread_enqueue, &adapter->tq);
4941 taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s linkq",
4942 device_get_nameunit(adapter->dev));
5590
4943
5591 if (pf_max_size < adapter->max_frame_size) {
5592 mhadd &= ~IXGBE_MHADD_MFS_MASK;
5593 mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
5594 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
4944 if ((error = bus_setup_intr(dev, adapter->res,
4945 INTR_TYPE_NET | INTR_MPSAFE, NULL, ixgbe_legacy_irq, que,
4946 &adapter->tag)) != 0) {
4947 device_printf(dev,
4948 "Failed to register fast interrupt handler: %d\n", error);
4949 taskqueue_free(que->tq);
4950 taskqueue_free(adapter->tq);
4951 que->tq = NULL;
4952 adapter->tq = NULL;
4953
4954 return (error);
5595 }
4955 }
4956 /* For simplicity in the handlers */
4957 adapter->active_queues = IXGBE_EIMS_ENABLE_MASK;
5596
4958
5597 ixgbe_send_vf_ack(adapter, vf, msg[0]);
5598}
4959 return (0);
4960} /* ixgbe_allocate_legacy */
5599
5600
4961
4962
5601static void
5602ixgbe_vf_set_macvlan(struct adapter *adapter, struct ixgbe_vf *vf,
5603 uint32_t *msg)
4963/************************************************************************
4964 * ixgbe_allocate_msix - Setup MSI-X Interrupt resources and handlers
4965 ************************************************************************/
4966static int
4967ixgbe_allocate_msix(struct adapter *adapter)
5604{
4968{
5605 //XXX implement this
5606 ixgbe_send_vf_nack(adapter, vf, msg[0]);
5607}
4969 device_t dev = adapter->dev;
4970 struct ix_queue *que = adapter->queues;
4971 struct tx_ring *txr = adapter->tx_rings;
4972 int error, rid, vector = 0;
4973 int cpu_id = 0;
4974 unsigned int rss_buckets = 0;
4975 cpuset_t cpu_mask;
5608
4976
5609
5610static void
5611ixgbe_vf_api_negotiate(struct adapter *adapter, struct ixgbe_vf *vf,
5612 uint32_t *msg)
5613{
5614
5615 switch (msg[1]) {
5616 case IXGBE_API_VER_1_0:
5617 case IXGBE_API_VER_1_1:
5618 vf->api_ver = msg[1];
5619 ixgbe_send_vf_ack(adapter, vf, msg[0]);
5620 break;
5621 default:
5622 vf->api_ver = IXGBE_API_VER_UNKNOWN;
5623 ixgbe_send_vf_nack(adapter, vf, msg[0]);
5624 break;
4977 /*
4978 * If we're doing RSS, the number of queues needs to
4979 * match the number of RSS buckets that are configured.
4980 *
4981 * + If there's more queues than RSS buckets, we'll end
4982 * up with queues that get no traffic.
4983 *
4984 * + If there's more RSS buckets than queues, we'll end
4985 * up having multiple RSS buckets map to the same queue,
4986 * so there'll be some contention.
4987 */
4988 rss_buckets = rss_getnumbuckets();
4989 if ((adapter->feat_en & IXGBE_FEATURE_RSS) &&
4990 (adapter->num_queues != rss_buckets)) {
4991 device_printf(dev, "%s: number of queues (%d) != number of RSS buckets (%d); performance will be impacted.\n",
4992 __func__, adapter->num_queues, rss_buckets);
5625 }
4993 }
5626}
5627
4994
4995 for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
4996 rid = vector + 1;
4997 que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
4998 RF_SHAREABLE | RF_ACTIVE);
4999 if (que->res == NULL) {
5000 device_printf(dev, "Unable to allocate bus resource: que interrupt [%d]\n",
5001 vector);
5002 return (ENXIO);
5003 }
5004 /* Set the handler function */
5005 error = bus_setup_intr(dev, que->res,
5006 INTR_TYPE_NET | INTR_MPSAFE, NULL, ixgbe_msix_que, que,
5007 &que->tag);
5008 if (error) {
5009 que->res = NULL;
5010 device_printf(dev, "Failed to register QUE handler");
5011 return (error);
5012 }
5013#if __FreeBSD_version >= 800504
5014 bus_describe_intr(dev, que->res, que->tag, "q%d", i);
5015#endif
5016 que->msix = vector;
5017 adapter->active_queues |= (u64)(1 << que->msix);
5628
5018
5629static void
5630ixgbe_vf_get_queues(struct adapter *adapter, struct ixgbe_vf *vf,
5631 uint32_t *msg)
5632{
5633 struct ixgbe_hw *hw;
5634 uint32_t resp[IXGBE_VF_GET_QUEUES_RESP_LEN];
5635 int num_queues;
5019 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
5020 /*
5021 * The queue ID is used as the RSS layer bucket ID.
5022 * We look up the queue ID -> RSS CPU ID and select
5023 * that.
5024 */
5025 cpu_id = rss_getcpu(i % rss_buckets);
5026 CPU_SETOF(cpu_id, &cpu_mask);
5027 } else {
5028 /*
5029 * Bind the MSI-X vector, and thus the
5030 * rings to the corresponding CPU.
5031 *
5032 * This just happens to match the default RSS
5033 * round-robin bucket -> queue -> CPU allocation.
5034 */
5035 if (adapter->num_queues > 1)
5036 cpu_id = i;
5037 }
5038 if (adapter->num_queues > 1)
5039 bus_bind_intr(dev, que->res, cpu_id);
5040#ifdef IXGBE_DEBUG
5041 if (adapter->feat_en & IXGBE_FEATURE_RSS)
5042 device_printf(dev, "Bound RSS bucket %d to CPU %d\n", i,
5043 cpu_id);
5044 else
5045 device_printf(dev, "Bound queue %d to cpu %d\n", i,
5046 cpu_id);
5047#endif /* IXGBE_DEBUG */
5636
5048
5637 hw = &adapter->hw;
5638
5049
5639 /* GET_QUEUES is not supported on pre-1.1 APIs. */
5640 switch (msg[0]) {
5641 case IXGBE_API_VER_1_0:
5642 case IXGBE_API_VER_UNKNOWN:
5643 ixgbe_send_vf_nack(adapter, vf, msg[0]);
5644 return;
5050 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
5051 TASK_INIT(&txr->txq_task, 0, ixgbe_deferred_mq_start,
5052 txr);
5053 TASK_INIT(&que->que_task, 0, ixgbe_handle_que, que);
5054 que->tq = taskqueue_create_fast("ixgbe_que", M_NOWAIT,
5055 taskqueue_thread_enqueue, &que->tq);
5056#if __FreeBSD_version < 1100000
5057 taskqueue_start_threads(&que->tq, 1, PI_NET, "%s:q%d",
5058 device_get_nameunit(adapter->dev), i);
5059#else
5060 if (adapter->feat_en & IXGBE_FEATURE_RSS)
5061 taskqueue_start_threads_cpuset(&que->tq, 1, PI_NET,
5062 &cpu_mask, "%s (bucket %d)",
5063 device_get_nameunit(adapter->dev), cpu_id);
5064 else
5065 taskqueue_start_threads_cpuset(&que->tq, 1, PI_NET,
5066 NULL, "%s:q%d", device_get_nameunit(adapter->dev),
5067 i);
5068#endif
5645 }
5646
5069 }
5070
5647 resp[0] = IXGBE_VF_GET_QUEUES | IXGBE_VT_MSGTYPE_ACK |
5648 IXGBE_VT_MSGTYPE_CTS;
5071 /* and Link */
5072 adapter->link_rid = vector + 1;
5073 adapter->res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
5074 &adapter->link_rid, RF_SHAREABLE | RF_ACTIVE);
5075 if (!adapter->res) {
5076 device_printf(dev,
5077 "Unable to allocate bus resource: Link interrupt [%d]\n",
5078 adapter->link_rid);
5079 return (ENXIO);
5080 }
5081 /* Set the link handler function */
5082 error = bus_setup_intr(dev, adapter->res, INTR_TYPE_NET | INTR_MPSAFE,
5083 NULL, ixgbe_msix_link, adapter, &adapter->tag);
5084 if (error) {
5085 adapter->res = NULL;
5086 device_printf(dev, "Failed to register LINK handler");
5087 return (error);
5088 }
5089#if __FreeBSD_version >= 800504
5090 bus_describe_intr(dev, adapter->res, adapter->tag, "link");
5091#endif
5092 adapter->vector = vector;
5093 /* Tasklets for Link, SFP and Multispeed Fiber */
5094 TASK_INIT(&adapter->link_task, 0, ixgbe_handle_link, adapter);
5095 TASK_INIT(&adapter->mod_task, 0, ixgbe_handle_mod, adapter);
5096 TASK_INIT(&adapter->msf_task, 0, ixgbe_handle_msf, adapter);
5097 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
5098 TASK_INIT(&adapter->mbx_task, 0, ixgbe_handle_mbx, adapter);
5099 TASK_INIT(&adapter->phy_task, 0, ixgbe_handle_phy, adapter);
5100 if (adapter->feat_en & IXGBE_FEATURE_FDIR)
5101 TASK_INIT(&adapter->fdir_task, 0, ixgbe_reinit_fdir, adapter);
5102 adapter->tq = taskqueue_create_fast("ixgbe_link", M_NOWAIT,
5103 taskqueue_thread_enqueue, &adapter->tq);
5104 taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s linkq",
5105 device_get_nameunit(adapter->dev));
5649
5106
5650 num_queues = ixgbe_vf_queues(ixgbe_get_iov_mode(adapter));
5651 resp[IXGBE_VF_TX_QUEUES] = num_queues;
5652 resp[IXGBE_VF_RX_QUEUES] = num_queues;
5653 resp[IXGBE_VF_TRANS_VLAN] = (vf->default_vlan != 0);
5654 resp[IXGBE_VF_DEF_QUEUE] = 0;
5107 return (0);
5108} /* ixgbe_allocate_msix */
5655
5109
5656 ixgbe_write_mbx(hw, resp, IXGBE_VF_GET_QUEUES_RESP_LEN, vf->pool);
5657}
5658
5659
5660static void
5661ixgbe_process_vf_msg(struct adapter *adapter, struct ixgbe_vf *vf)
5110/************************************************************************
5111 * ixgbe_configure_interrupts
5112 *
5113 * Setup MSI-X, MSI, or legacy interrupts (in that order).
5114 * This will also depend on user settings.
5115 ************************************************************************/
5116static int
5117ixgbe_configure_interrupts(struct adapter *adapter)
5662{
5118{
5663 struct ixgbe_hw *hw;
5664 uint32_t msg[IXGBE_VFMAILBOX_SIZE];
5665 int error;
5119 device_t dev = adapter->dev;
5120 int rid, want, queues, msgs;
5666
5121
5667 hw = &adapter->hw;
5122 /* Default to 1 queue if MSI-X setup fails */
5123 adapter->num_queues = 1;
5668
5124
5669 error = ixgbe_read_mbx(hw, msg, IXGBE_VFMAILBOX_SIZE, vf->pool);
5125 /* Override by tuneable */
5126 if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX))
5127 goto msi;
5670
5128
5671 if (error != 0)
5672 return;
5673
5674 CTR3(KTR_MALLOC, "%s: received msg %x from %d",
5675 adapter->ifp->if_xname, msg[0], vf->pool);
5676 if (msg[0] == IXGBE_VF_RESET) {
5677 ixgbe_vf_reset_msg(adapter, vf, msg);
5678 return;
5129 /* First try MSI-X */
5130 msgs = pci_msix_count(dev);
5131 if (msgs == 0)
5132 goto msi;
5133 rid = PCIR_BAR(MSIX_82598_BAR);
5134 adapter->msix_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
5135 RF_ACTIVE);
5136 if (adapter->msix_mem == NULL) {
5137 rid += 4; /* 82599 maps in higher BAR */
5138 adapter->msix_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
5139 &rid, RF_ACTIVE);
5679 }
5140 }
5680
5681 if (!(vf->flags & IXGBE_VF_CTS)) {
5682 ixgbe_send_vf_nack(adapter, vf, msg[0]);
5683 return;
5141 if (adapter->msix_mem == NULL) {
5142 /* May not be enabled */
5143 device_printf(adapter->dev, "Unable to map MSI-X table.\n");
5144 goto msi;
5684 }
5685
5145 }
5146
5686 switch (msg[0] & IXGBE_VT_MSG_MASK) {
5687 case IXGBE_VF_SET_MAC_ADDR:
5688 ixgbe_vf_set_mac(adapter, vf, msg);
5689 break;
5690 case IXGBE_VF_SET_MULTICAST:
5691 ixgbe_vf_set_mc_addr(adapter, vf, msg);
5692 break;
5693 case IXGBE_VF_SET_VLAN:
5694 ixgbe_vf_set_vlan(adapter, vf, msg);
5695 break;
5696 case IXGBE_VF_SET_LPE:
5697 ixgbe_vf_set_lpe(adapter, vf, msg);
5698 break;
5699 case IXGBE_VF_SET_MACVLAN:
5700 ixgbe_vf_set_macvlan(adapter, vf, msg);
5701 break;
5702 case IXGBE_VF_API_NEGOTIATE:
5703 ixgbe_vf_api_negotiate(adapter, vf, msg);
5704 break;
5705 case IXGBE_VF_GET_QUEUES:
5706 ixgbe_vf_get_queues(adapter, vf, msg);
5707 break;
5708 default:
5709 ixgbe_send_vf_nack(adapter, vf, msg[0]);
5147 /* Figure out a reasonable auto config value */
5148 queues = min(mp_ncpus, msgs - 1);
5149 /* If we're doing RSS, clamp at the number of RSS buckets */
5150 if (adapter->feat_en & IXGBE_FEATURE_RSS)
5151 queues = min(queues, rss_getnumbuckets());
5152 if (ixgbe_num_queues > queues) {
5153 device_printf(adapter->dev, "ixgbe_num_queues (%d) is too large, using reduced amount (%d).\n", ixgbe_num_queues, queues);
5154 ixgbe_num_queues = queues;
5710 }
5155 }
5711}
5712
5156
5157 if (ixgbe_num_queues != 0)
5158 queues = ixgbe_num_queues;
5159 /* Set max queues to 8 when autoconfiguring */
5160 else
5161 queues = min(queues, 8);
5713
5162
5714/*
5715 * Tasklet for handling VF -> PF mailbox messages.
5716 */
5717static void
5718ixgbe_handle_mbx(void *context, int pending)
5719{
5720 struct adapter *adapter;
5721 struct ixgbe_hw *hw;
5722 struct ixgbe_vf *vf;
5723 int i;
5163 /* reflect correct sysctl value */
5164 ixgbe_num_queues = queues;
5724
5165
5725 adapter = context;
5726 hw = &adapter->hw;
5727
5728 IXGBE_CORE_LOCK(adapter);
5729 for (i = 0; i < adapter->num_vfs; i++) {
5730 vf = &adapter->vfs[i];
5731
5732 if (vf->flags & IXGBE_VF_ACTIVE) {
5733 if (ixgbe_check_for_rst(hw, vf->pool) == 0)
5734 ixgbe_process_vf_reset(adapter, vf);
5735
5736 if (ixgbe_check_for_msg(hw, vf->pool) == 0)
5737 ixgbe_process_vf_msg(adapter, vf);
5738
5739 if (ixgbe_check_for_ack(hw, vf->pool) == 0)
5740 ixgbe_process_vf_ack(adapter, vf);
5741 }
5166 /*
5167 * Want one vector (RX/TX pair) per queue
5168 * plus an additional for Link.
5169 */
5170 want = queues + 1;
5171 if (msgs >= want)
5172 msgs = want;
5173 else {
5174 device_printf(adapter->dev, "MSI-X Configuration Problem, %d vectors but %d queues wanted!\n",
5175 msgs, want);
5176 goto msi;
5742 }
5177 }
5743 IXGBE_CORE_UNLOCK(adapter);
5744}
5178 if ((pci_alloc_msix(dev, &msgs) == 0) && (msgs == want)) {
5179 device_printf(adapter->dev,
5180 "Using MSI-X interrupts with %d vectors\n", msgs);
5181 adapter->num_queues = queues;
5182 adapter->feat_en |= IXGBE_FEATURE_MSIX;
5183 return (0);
5184 }
5185 /*
5186 * MSI-X allocation failed or provided us with
5187 * less vectors than needed. Free MSI-X resources
5188 * and we'll try enabling MSI.
5189 */
5190 pci_release_msi(dev);
5745
5191
5192msi:
5193 /* Without MSI-X, some features are no longer supported */
5194 adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
5195 adapter->feat_en &= ~IXGBE_FEATURE_RSS;
5196 adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
5197 adapter->feat_en &= ~IXGBE_FEATURE_SRIOV;
5746
5198
5747static int
5748ixgbe_init_iov(device_t dev, u16 num_vfs, const nvlist_t *config)
5749{
5750 struct adapter *adapter;
5751 enum ixgbe_iov_mode mode;
5752
5753 adapter = device_get_softc(dev);
5754 adapter->num_vfs = num_vfs;
5755 mode = ixgbe_get_iov_mode(adapter);
5756
5757 if (num_vfs > ixgbe_max_vfs(mode)) {
5758 adapter->num_vfs = 0;
5759 return (ENOSPC);
5199 if (adapter->msix_mem != NULL) {
5200 bus_release_resource(dev, SYS_RES_MEMORY, rid,
5201 adapter->msix_mem);
5202 adapter->msix_mem = NULL;
5760 }
5203 }
5204 msgs = 1;
5205 if (pci_alloc_msi(dev, &msgs) == 0) {
5206 adapter->feat_en |= IXGBE_FEATURE_MSI;
5207 adapter->link_rid = 1;
5208 device_printf(adapter->dev, "Using an MSI interrupt\n");
5209 return (0);
5210 }
5761
5211
5762 IXGBE_CORE_LOCK(adapter);
5763
5764 adapter->vfs = malloc(sizeof(*adapter->vfs) * num_vfs, M_IXGBE,
5765 M_NOWAIT | M_ZERO);
5766
5767 if (adapter->vfs == NULL) {
5768 adapter->num_vfs = 0;
5769 IXGBE_CORE_UNLOCK(adapter);
5770 return (ENOMEM);
5212 if (!(adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ)) {
5213 device_printf(adapter->dev,
5214 "Device does not support legacy interrupts.\n");
5215 return 1;
5771 }
5772
5216 }
5217
5773 ixgbe_init_locked(adapter);
5218 adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
5219 adapter->link_rid = 0;
5220 device_printf(adapter->dev, "Using a Legacy interrupt\n");
5774
5221
5775 IXGBE_CORE_UNLOCK(adapter);
5776
5777 return (0);
5222 return (0);
5778}
5223} /* ixgbe_configure_interrupts */
5779
5780
5224
5225
5226/************************************************************************
5227 * ixgbe_handle_link - Tasklet for MSI-X Link interrupts
5228 *
5229 * Done outside of interrupt context since the driver might sleep
5230 ************************************************************************/
5781static void
5231static void
5782ixgbe_uninit_iov(device_t dev)
5232ixgbe_handle_link(void *context, int pending)
5783{
5233{
5784 struct ixgbe_hw *hw;
5785 struct adapter *adapter;
5786 uint32_t pf_reg, vf_reg;
5234 struct adapter *adapter = context;
5235 struct ixgbe_hw *hw = &adapter->hw;
5787
5236
5788 adapter = device_get_softc(dev);
5789 hw = &adapter->hw;
5237 ixgbe_check_link(hw, &adapter->link_speed, &adapter->link_up, 0);
5238 ixgbe_update_link_status(adapter);
5790
5239
5791 IXGBE_CORE_LOCK(adapter);
5240 /* Re-enable link interrupts */
5241 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_LSC);
5242} /* ixgbe_handle_link */
5792
5243
5793 /* Enable rx/tx for the PF and disable it for all VFs. */
5794 pf_reg = IXGBE_VF_INDEX(adapter->pool);
5795 IXGBE_WRITE_REG(hw, IXGBE_VFRE(pf_reg),
5796 IXGBE_VF_BIT(adapter->pool));
5797 IXGBE_WRITE_REG(hw, IXGBE_VFTE(pf_reg),
5798 IXGBE_VF_BIT(adapter->pool));
5799
5800 if (pf_reg == 0)
5801 vf_reg = 1;
5802 else
5803 vf_reg = 0;
5804 IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_reg), 0);
5805 IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_reg), 0);
5806
5807 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, 0);
5808
5809 free(adapter->vfs, M_IXGBE);
5810 adapter->vfs = NULL;
5811 adapter->num_vfs = 0;
5812
5813 IXGBE_CORE_UNLOCK(adapter);
5814}
5815
5816
5244/************************************************************************
5245 * ixgbe_rearm_queues
5246 ************************************************************************/
5817static void
5247static void
5818ixgbe_initialize_iov(struct adapter *adapter)
5248ixgbe_rearm_queues(struct adapter *adapter, u64 queues)
5819{
5249{
5820 struct ixgbe_hw *hw = &adapter->hw;
5821 uint32_t mrqc, mtqc, vt_ctl, vf_reg, gcr_ext, gpie;
5822 enum ixgbe_iov_mode mode;
5823 int i;
5250 u32 mask;
5824
5251
5825 mode = ixgbe_get_iov_mode(adapter);
5826 if (mode == IXGBE_NO_VM)
5827 return;
5828
5829 IXGBE_CORE_LOCK_ASSERT(adapter);
5830
5831 mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC);
5832 mrqc &= ~IXGBE_MRQC_MRQE_MASK;
5833
5834 switch (mode) {
5835 case IXGBE_64_VM:
5836 mrqc |= IXGBE_MRQC_VMDQRSS64EN;
5252 switch (adapter->hw.mac.type) {
5253 case ixgbe_mac_82598EB:
5254 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
5255 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
5837 break;
5256 break;
5838 case IXGBE_32_VM:
5839 mrqc |= IXGBE_MRQC_VMDQRSS32EN;
5257 case ixgbe_mac_82599EB:
5258 case ixgbe_mac_X540:
5259 case ixgbe_mac_X550:
5260 case ixgbe_mac_X550EM_x:
5261 case ixgbe_mac_X550EM_a:
5262 mask = (queues & 0xFFFFFFFF);
5263 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
5264 mask = (queues >> 32);
5265 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
5840 break;
5841 default:
5266 break;
5267 default:
5842 panic("Unexpected SR-IOV mode %d", mode);
5843 }
5844 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
5845
5846 mtqc = IXGBE_MTQC_VT_ENA;
5847 switch (mode) {
5848 case IXGBE_64_VM:
5849 mtqc |= IXGBE_MTQC_64VF;
5850 break;
5268 break;
5851 case IXGBE_32_VM:
5852 mtqc |= IXGBE_MTQC_32VF;
5853 break;
5854 default:
5855 panic("Unexpected SR-IOV mode %d", mode);
5856 }
5269 }
5857 IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc);
5858
5270} /* ixgbe_rearm_queues */
5859
5271
5860 gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
5861 gcr_ext |= IXGBE_GCR_EXT_MSIX_EN;
5862 gcr_ext &= ~IXGBE_GCR_EXT_VT_MODE_MASK;
5863 switch (mode) {
5864 case IXGBE_64_VM:
5865 gcr_ext |= IXGBE_GCR_EXT_VT_MODE_64;
5866 break;
5867 case IXGBE_32_VM:
5868 gcr_ext |= IXGBE_GCR_EXT_VT_MODE_32;
5869 break;
5870 default:
5871 panic("Unexpected SR-IOV mode %d", mode);
5872 }
5873 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
5874
5875
5876 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
5877 gcr_ext &= ~IXGBE_GPIE_VTMODE_MASK;
5878 switch (mode) {
5879 case IXGBE_64_VM:
5880 gpie |= IXGBE_GPIE_VTMODE_64;
5881 break;
5882 case IXGBE_32_VM:
5883 gpie |= IXGBE_GPIE_VTMODE_32;
5884 break;
5885 default:
5886 panic("Unexpected SR-IOV mode %d", mode);
5887 }
5888 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
5889
5890 /* Enable rx/tx for the PF. */
5891 vf_reg = IXGBE_VF_INDEX(adapter->pool);
5892 IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_reg),
5893 IXGBE_VF_BIT(adapter->pool));
5894 IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_reg),
5895 IXGBE_VF_BIT(adapter->pool));
5896
5897 /* Allow VM-to-VM communication. */
5898 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
5899
5900 vt_ctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
5901 vt_ctl |= (adapter->pool << IXGBE_VT_CTL_POOL_SHIFT);
5902 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vt_ctl);
5903
5904 for (i = 0; i < adapter->num_vfs; i++)
5905 ixgbe_init_vf(adapter, &adapter->vfs[i]);
5906}
5907
5908
5909/*
5910** Check the max frame setting of all active VF's
5911*/
5912static void
5913ixgbe_recalculate_max_frame(struct adapter *adapter)
5914{
5915 struct ixgbe_vf *vf;
5916
5917 IXGBE_CORE_LOCK_ASSERT(adapter);
5918
5919 for (int i = 0; i < adapter->num_vfs; i++) {
5920 vf = &adapter->vfs[i];
5921 if (vf->flags & IXGBE_VF_ACTIVE)
5922 ixgbe_update_max_frame(adapter, vf->max_frame_size);
5923 }
5924}
5925
5926
5927static void
5928ixgbe_init_vf(struct adapter *adapter, struct ixgbe_vf *vf)
5929{
5930 struct ixgbe_hw *hw;
5931 uint32_t vf_index, pfmbimr;
5932
5933 IXGBE_CORE_LOCK_ASSERT(adapter);
5934
5935 hw = &adapter->hw;
5936
5937 if (!(vf->flags & IXGBE_VF_ACTIVE))
5938 return;
5939
5940 vf_index = IXGBE_VF_INDEX(vf->pool);
5941 pfmbimr = IXGBE_READ_REG(hw, IXGBE_PFMBIMR(vf_index));
5942 pfmbimr |= IXGBE_VF_BIT(vf->pool);
5943 IXGBE_WRITE_REG(hw, IXGBE_PFMBIMR(vf_index), pfmbimr);
5944
5945 ixgbe_vf_set_default_vlan(adapter, vf, vf->vlan_tag);
5946
5947 // XXX multicast addresses
5948
5949 if (ixgbe_validate_mac_addr(vf->ether_addr) == 0) {
5950 ixgbe_set_rar(&adapter->hw, vf->rar_index,
5951 vf->ether_addr, vf->pool, TRUE);
5952 }
5953
5954 ixgbe_vf_enable_transmit(adapter, vf);
5955 ixgbe_vf_enable_receive(adapter, vf);
5956
5957 ixgbe_send_vf_msg(adapter, vf, IXGBE_PF_CONTROL_MSG);
5958}
5959
5960static int
5961ixgbe_add_vf(device_t dev, u16 vfnum, const nvlist_t *config)
5962{
5963 struct adapter *adapter;
5964 struct ixgbe_vf *vf;
5965 const void *mac;
5966
5967 adapter = device_get_softc(dev);
5968
5969 KASSERT(vfnum < adapter->num_vfs, ("VF index %d is out of range %d",
5970 vfnum, adapter->num_vfs));
5971
5972 IXGBE_CORE_LOCK(adapter);
5973 vf = &adapter->vfs[vfnum];
5974 vf->pool= vfnum;
5975
5976 /* RAR[0] is used by the PF so use vfnum + 1 for VF RAR. */
5977 vf->rar_index = vfnum + 1;
5978 vf->default_vlan = 0;
5979 vf->max_frame_size = ETHER_MAX_LEN;
5980 ixgbe_update_max_frame(adapter, vf->max_frame_size);
5981
5982 if (nvlist_exists_binary(config, "mac-addr")) {
5983 mac = nvlist_get_binary(config, "mac-addr", NULL);
5984 bcopy(mac, vf->ether_addr, ETHER_ADDR_LEN);
5985 if (nvlist_get_bool(config, "allow-set-mac"))
5986 vf->flags |= IXGBE_VF_CAP_MAC;
5987 } else
5988 /*
5989 * If the administrator has not specified a MAC address then
5990 * we must allow the VF to choose one.
5991 */
5992 vf->flags |= IXGBE_VF_CAP_MAC;
5993
5994 vf->flags = IXGBE_VF_ACTIVE;
5995
5996 ixgbe_init_vf(adapter, vf);
5997 IXGBE_CORE_UNLOCK(adapter);
5998
5999 return (0);
6000}
6001#endif /* PCI_IOV */
6002