Deleted Added
full compact
3c3
< Copyright (c) 2001-2015, Intel Corporation
---
> Copyright (c) 2001-2017, Intel Corporation
5,6c5,6
<
< Redistribution and use in source and binary forms, with or without
---
>
> Redistribution and use in source and binary forms, with or without
8,9c8,9
<
< 1. Redistributions of source code must retain the above copyright notice,
---
>
> 1. Redistributions of source code must retain the above copyright notice,
11,13c11,13
<
< 2. Redistributions in binary form must reproduce the above copyright
< notice, this list of conditions and the following disclaimer in the
---
>
> 2. Redistributions in binary form must reproduce the above copyright
> notice, this list of conditions and the following disclaimer in the
15,17c15,17
<
< 3. Neither the name of the Intel Corporation nor the names of its
< contributors may be used to endorse or promote products derived from
---
>
> 3. Neither the name of the Intel Corporation nor the names of its
> contributors may be used to endorse or promote products derived from
19c19
<
---
>
21,28c21,28
< AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
< IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
< ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
< LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
< CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
< SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
< INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
< CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
---
> AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
> IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
> ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
> LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
> CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
> SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
> INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
> CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33c33
< /*$FreeBSD: stable/11/sys/dev/ixgbe/if_ix.c 313388 2017-02-07 15:13:19Z rstone $*/
---
> /*$FreeBSD: stable/11/sys/dev/ixgbe/if_ix.c 320897 2017-07-11 21:25:07Z erj $*/
44,47c44,47
< #ifdef RSS
< #include <net/rss_config.h>
< #include <netinet/in_rss.h>
< #endif
---
> /************************************************************************
> * Driver version
> ************************************************************************/
> char ixgbe_driver_version[] = "3.2.12-k";
49,52d48
< /*********************************************************************
< * Driver version
< *********************************************************************/
< char ixgbe_driver_version[] = "3.1.13-k";
54,56c50,51
<
< /*********************************************************************
< * PCI Device ID Table
---
> /************************************************************************
> * PCI Device ID Table
58,60c53,55
< * Used by probe to select devices to load on
< * Last field stores an index into ixgbe_strings
< * Last entry must be all 0s
---
> * Used by probe to select devices to load on
> * Last field stores an index into ixgbe_strings
> * Last entry must be all 0s
62,64c57,58
< * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
< *********************************************************************/
<
---
> * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
> ************************************************************************/
97a92
> {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T, 0, 0, 0},
98a94,104
> {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR, 0, 0, 0},
> {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L, 0, 0, 0},
> {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP, 0, 0, 0},
> {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N, 0, 0, 0},
> {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII, 0, 0, 0},
> {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L, 0, 0, 0},
> {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T, 0, 0, 0},
> {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T, 0, 0, 0},
> {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L, 0, 0, 0},
> {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_BYPASS, 0, 0, 0},
> {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS, 0, 0, 0},
103,106c109,111
< /*********************************************************************
< * Table of branding strings
< *********************************************************************/
<
---
> /************************************************************************
> * Table of branding strings
> ************************************************************************/
111,113c116,118
< /*********************************************************************
< * Function prototypes
< *********************************************************************/
---
> /************************************************************************
> * Function prototypes
> ************************************************************************/
118,119c123,124
< static int ixgbe_suspend(device_t);
< static int ixgbe_resume(device_t);
---
> static int ixgbe_suspend(device_t);
> static int ixgbe_resume(device_t);
121,122c126,127
< static void ixgbe_init(void *);
< static void ixgbe_init_locked(struct adapter *);
---
> static void ixgbe_init(void *);
> static void ixgbe_init_locked(struct adapter *);
125c130
< static uint64_t ixgbe_get_counter(struct ifnet *, ift_counter);
---
> static uint64_t ixgbe_get_counter(struct ifnet *, ift_counter);
127c132,134
< static void ixgbe_add_media_types(struct adapter *);
---
> static void ixgbe_init_device_features(struct adapter *);
> static void ixgbe_check_fan_failure(struct adapter *, u32, bool);
> static void ixgbe_add_media_types(struct adapter *);
130d136
< static void ixgbe_identify_hardware(struct adapter *);
132c138
< static void ixgbe_get_slot_info(struct adapter *);
---
> static void ixgbe_get_slot_info(struct adapter *);
135,145c141,151
< static int ixgbe_setup_msix(struct adapter *);
< static void ixgbe_free_pci_resources(struct adapter *);
< static void ixgbe_local_timer(void *);
< static int ixgbe_setup_interface(device_t, struct adapter *);
< static void ixgbe_config_gpie(struct adapter *);
< static void ixgbe_config_dmac(struct adapter *);
< static void ixgbe_config_delay_values(struct adapter *);
< static void ixgbe_config_link(struct adapter *);
< static void ixgbe_check_wol_support(struct adapter *);
< static int ixgbe_setup_low_power_mode(struct adapter *);
< static void ixgbe_rearm_queues(struct adapter *, u64);
---
> static int ixgbe_configure_interrupts(struct adapter *);
> static void ixgbe_free_pci_resources(struct adapter *);
> static void ixgbe_local_timer(void *);
> static int ixgbe_setup_interface(device_t, struct adapter *);
> static void ixgbe_config_gpie(struct adapter *);
> static void ixgbe_config_dmac(struct adapter *);
> static void ixgbe_config_delay_values(struct adapter *);
> static void ixgbe_config_link(struct adapter *);
> static void ixgbe_check_wol_support(struct adapter *);
> static int ixgbe_setup_low_power_mode(struct adapter *);
> static void ixgbe_rearm_queues(struct adapter *, u64);
149,151c155,157
< static void ixgbe_enable_rx_drop(struct adapter *);
< static void ixgbe_disable_rx_drop(struct adapter *);
< static void ixgbe_initialize_rss_mapping(struct adapter *);
---
> static void ixgbe_enable_rx_drop(struct adapter *);
> static void ixgbe_disable_rx_drop(struct adapter *);
> static void ixgbe_initialize_rss_mapping(struct adapter *);
159,161c165,167
< static void ixgbe_set_ivar(struct adapter *, u8, u8, s8);
< static void ixgbe_configure_ivars(struct adapter *);
< static u8 * ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
---
> static void ixgbe_set_ivar(struct adapter *, u8, u8, s8);
> static void ixgbe_configure_ivars(struct adapter *);
> static u8 *ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
163,165c169,171
< static void ixgbe_setup_vlan_hw_support(struct adapter *);
< static void ixgbe_register_vlan(void *, struct ifnet *, u16);
< static void ixgbe_unregister_vlan(void *, struct ifnet *, u16);
---
> static void ixgbe_setup_vlan_hw_support(struct adapter *);
> static void ixgbe_register_vlan(void *, struct ifnet *, u16);
> static void ixgbe_unregister_vlan(void *, struct ifnet *, u16);
167c173
< static void ixgbe_add_device_sysctls(struct adapter *);
---
> static void ixgbe_add_device_sysctls(struct adapter *);
169,170c175,177
< static int ixgbe_set_flowcntl(struct adapter *, int);
< static int ixgbe_set_advertise(struct adapter *, int);
---
> static int ixgbe_set_flowcntl(struct adapter *, int);
> static int ixgbe_set_advertise(struct adapter *, int);
> static int ixgbe_get_advertise(struct adapter *);
173,180c180,187
< static void ixgbe_set_sysctl_value(struct adapter *, const char *,
< const char *, int *, int);
< static int ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS);
< static int ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS);
< static int ixgbe_sysctl_thermal_test(SYSCTL_HANDLER_ARGS);
< static int ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS);
< static int ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS);
< static int ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS);
---
> static void ixgbe_set_sysctl_value(struct adapter *, const char *,
> const char *, int *, int);
> static int ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS);
> static int ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS);
> static int ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS);
> static int ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS);
> static int ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS);
> static int ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS);
182,183c189,190
< static int ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS);
< static int ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS);
---
> static int ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS);
> static int ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS);
185,191c192,198
< static int ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS);
< static int ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS);
< static int ixgbe_sysctl_eee_enable(SYSCTL_HANDLER_ARGS);
< static int ixgbe_sysctl_eee_negotiated(SYSCTL_HANDLER_ARGS);
< static int ixgbe_sysctl_eee_rx_lpi_status(SYSCTL_HANDLER_ARGS);
< static int ixgbe_sysctl_eee_tx_lpi_status(SYSCTL_HANDLER_ARGS);
< static int ixgbe_sysctl_eee_tx_lpi_delay(SYSCTL_HANDLER_ARGS);
---
> static int ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS);
> static int ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS);
> static int ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS);
> static int ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS);
> static int ixgbe_sysctl_eee_state(SYSCTL_HANDLER_ARGS);
> static int ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS);
> static int ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS);
194,195c201
< static bool ixgbe_sfp_probe(struct adapter *);
< static void ixgbe_setup_optics(struct adapter *);
---
> static bool ixgbe_sfp_probe(struct adapter *);
197,198c203,204
< /* Legacy (single vector interrupt handler */
< static void ixgbe_legacy_irq(void *);
---
> /* Legacy (single vector) interrupt handler */
> static void ixgbe_legacy_irq(void *);
200,202c206,208
< /* The MSI/X Interrupt handlers */
< static void ixgbe_msix_que(void *);
< static void ixgbe_msix_link(void *);
---
> /* The MSI/MSI-X Interrupt handlers */
> static void ixgbe_msix_que(void *);
> static void ixgbe_msix_link(void *);
205,209c211,215
< static void ixgbe_handle_que(void *, int);
< static void ixgbe_handle_link(void *, int);
< static void ixgbe_handle_msf(void *, int);
< static void ixgbe_handle_mod(void *, int);
< static void ixgbe_handle_phy(void *, int);
---
> static void ixgbe_handle_que(void *, int);
> static void ixgbe_handle_link(void *, int);
> static void ixgbe_handle_msf(void *, int);
> static void ixgbe_handle_mod(void *, int);
> static void ixgbe_handle_phy(void *, int);
211,213d216
< #ifdef IXGBE_FDIR
< static void ixgbe_reinit_fdir(void *, int);
< #endif
215,227c218
< #ifdef PCI_IOV
< static void ixgbe_ping_all_vfs(struct adapter *);
< static void ixgbe_handle_mbx(void *, int);
< static int ixgbe_init_iov(device_t, u16, const nvlist_t *);
< static void ixgbe_uninit_iov(device_t);
< static int ixgbe_add_vf(device_t, u16, const nvlist_t *);
< static void ixgbe_initialize_iov(struct adapter *);
< static void ixgbe_recalculate_max_frame(struct adapter *);
< static void ixgbe_init_vf(struct adapter *, struct ixgbe_vf *);
< #endif /* PCI_IOV */
<
<
< /*********************************************************************
---
> /************************************************************************
229,230c220
< *********************************************************************/
<
---
> ************************************************************************/
256d245
< #ifdef DEV_NETMAP
258d246
< #endif /* DEV_NETMAP */
261,262c249,250
< ** TUNEABLE PARAMETERS:
< */
---
> * TUNEABLE PARAMETERS:
> */
264,265c252
< static SYSCTL_NODE(_hw, OID_AUTO, ix, CTLFLAG_RD, 0,
< "IXGBE driver parameters");
---
> static SYSCTL_NODE(_hw, OID_AUTO, ix, CTLFLAG_RD, 0, "IXGBE driver parameters");
268,272c255,259
< ** AIM: Adaptive Interrupt Moderation
< ** which means that the interrupt rate
< ** is varied over time based on the
< ** traffic for that interrupt vector
< */
---
> * AIM: Adaptive Interrupt Moderation
> * which means that the interrupt rate
> * is varied over time based on the
> * traffic for that interrupt vector
> */
274c261
< SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RWTUN, &ixgbe_enable_aim, 0,
---
> SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RDTUN, &ixgbe_enable_aim, 0,
284,286c271
< &ixgbe_rx_process_limit, 0,
< "Maximum number of received packets to process at a time,"
< "-1 means unlimited");
---
> &ixgbe_rx_process_limit, 0, "Maximum number of received packets to process at a time, -1 means unlimited");
292,293c277
< "Maximum number of sent packets to process at a time,"
< "-1 means unlimited");
---
> "Maximum number of sent packets to process at a time, -1 means unlimited");
306,311c290,295
< ** Smart speed setting, default to on
< ** this only works as a compile option
< ** right now as its during attach, set
< ** this to 'ixgbe_smart_speed_off' to
< ** disable.
< */
---
> * Smart speed setting, default to on
> * this only works as a compile option
> * right now as its during attach, set
> * this to 'ixgbe_smart_speed_off' to
> * disable.
> */
315c299
< * MSIX should be the default for best performance,
---
> * MSI-X should be the default for best performance,
333,336c317,320
< ** Number of TX descriptors per ring,
< ** setting higher than RX as this seems
< ** the better performing choice.
< */
---
> * Number of TX descriptors per ring,
> * setting higher than RX as this seems
> * the better performing choice.
> */
347,350c331,334
< ** Defining this on will allow the use
< ** of unsupported SFP+ modules, note that
< ** doing so you are on your own :)
< */
---
> * Defining this on will allow the use
> * of unsupported SFP+ modules, note that
> * doing so you are on your own :)
> */
352c336,338
< TUNABLE_INT("hw.ix.unsupported_sfp", &allow_unsupported_sfp);
---
> SYSCTL_INT(_hw_ix, OID_AUTO, allow_unsupported_sfp, CTLFLAG_RDTUN,
> &allow_unsupported_sfp, 0,
> "Allow unsupported SFP modules...use at your own risk");
353a340,357
> /*
> * Not sure if Flow Director is fully baked,
> * so we'll default to turning it off.
> */
> static int ixgbe_enable_fdir = 0;
> SYSCTL_INT(_hw_ix, OID_AUTO, enable_fdir, CTLFLAG_RDTUN, &ixgbe_enable_fdir, 0,
> "Enable Flow Director");
>
> /* Legacy Transmit (single queue) */
> static int ixgbe_enable_legacy_tx = 0;
> SYSCTL_INT(_hw_ix, OID_AUTO, enable_legacy_tx, CTLFLAG_RDTUN,
> &ixgbe_enable_legacy_tx, 0, "Enable Legacy TX flow");
>
> /* Receive-Side Scaling */
> static int ixgbe_enable_rss = 1;
> SYSCTL_INT(_hw_ix, OID_AUTO, enable_rss, CTLFLAG_RDTUN, &ixgbe_enable_rss, 0,
> "Enable Receive-Side Scaling (RSS)");
>
357,366c361,362
< #ifdef IXGBE_FDIR
< /*
< ** Flow Director actually 'steals'
< ** part of the packet buffer as its
< ** filter pool, this variable controls
< ** how much it uses:
< ** 0 = 64K, 1 = 128K, 2 = 256K
< */
< static int fdir_pballoc = 1;
< #endif
---
> static int (*ixgbe_start_locked)(struct ifnet *, struct tx_ring *);
> static int (*ixgbe_ring_empty)(struct ifnet *, struct buf_ring *);
367a364,517
> MALLOC_DEFINE(M_IXGBE, "ix", "ix driver allocations");
>
> /************************************************************************
> * ixgbe_initialize_rss_mapping
> ************************************************************************/
> static void
> ixgbe_initialize_rss_mapping(struct adapter *adapter)
> {
> struct ixgbe_hw *hw = &adapter->hw;
> u32 reta = 0, mrqc, rss_key[10];
> int queue_id, table_size, index_mult;
> int i, j;
> u32 rss_hash_config;
>
> if (adapter->feat_en & IXGBE_FEATURE_RSS) {
> /* Fetch the configured RSS key */
> rss_getkey((uint8_t *)&rss_key);
> } else {
> /* set up random bits */
> arc4rand(&rss_key, sizeof(rss_key), 0);
> }
>
> /* Set multiplier for RETA setup and table size based on MAC */
> index_mult = 0x1;
> table_size = 128;
> switch (adapter->hw.mac.type) {
> case ixgbe_mac_82598EB:
> index_mult = 0x11;
> break;
> case ixgbe_mac_X550:
> case ixgbe_mac_X550EM_x:
> case ixgbe_mac_X550EM_a:
> table_size = 512;
> break;
> default:
> break;
> }
>
> /* Set up the redirection table */
> for (i = 0, j = 0; i < table_size; i++, j++) {
> if (j == adapter->num_queues)
> j = 0;
>
> if (adapter->feat_en & IXGBE_FEATURE_RSS) {
> /*
> * Fetch the RSS bucket id for the given indirection
> * entry. Cap it at the number of configured buckets
> * (which is num_queues.)
> */
> queue_id = rss_get_indirection_to_bucket(i);
> queue_id = queue_id % adapter->num_queues;
> } else
> queue_id = (j * index_mult);
>
> /*
> * The low 8 bits are for hash value (n+0);
> * The next 8 bits are for hash value (n+1), etc.
> */
> reta = reta >> 8;
> reta = reta | (((uint32_t)queue_id) << 24);
> if ((i & 3) == 3) {
> if (i < 128)
> IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
> else
> IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32),
> reta);
> reta = 0;
> }
> }
>
> /* Now fill our hash function seeds */
> for (i = 0; i < 10; i++)
> IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]);
>
> /* Perform hash on these packet types */
> if (adapter->feat_en & IXGBE_FEATURE_RSS)
> rss_hash_config = rss_gethashconfig();
> else {
> /*
> * Disable UDP - IP fragments aren't currently being handled
> * and so we end up with a mix of 2-tuple and 4-tuple
> * traffic.
> */
> rss_hash_config = RSS_HASHTYPE_RSS_IPV4
> | RSS_HASHTYPE_RSS_TCP_IPV4
> | RSS_HASHTYPE_RSS_IPV6
> | RSS_HASHTYPE_RSS_TCP_IPV6
> | RSS_HASHTYPE_RSS_IPV6_EX
> | RSS_HASHTYPE_RSS_TCP_IPV6_EX;
> }
>
> mrqc = IXGBE_MRQC_RSSEN;
> if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
> mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
> if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
> mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
> if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
> mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
> if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
> mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
> if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
> mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
> if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
> mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
> if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
> mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
> if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4_EX)
> device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_UDP_IPV4_EX defined, but not supported\n",
> __func__);
> if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
> mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
> if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
> mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
> mrqc |= ixgbe_get_mrqc(adapter->iov_mode);
> IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
> } /* ixgbe_initialize_rss_mapping */
>
> /************************************************************************
> * ixgbe_initialize_receive_units - Setup receive registers and features.
> ************************************************************************/
> #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1)
>
> static void
> ixgbe_initialize_receive_units(struct adapter *adapter)
> {
> struct rx_ring *rxr = adapter->rx_rings;
> struct ixgbe_hw *hw = &adapter->hw;
> struct ifnet *ifp = adapter->ifp;
> int i, j;
> u32 bufsz, fctrl, srrctl, rxcsum;
> u32 hlreg;
>
> /*
> * Make sure receives are disabled while
> * setting up the descriptor ring
> */
> ixgbe_disable_rx(hw);
>
> /* Enable broadcasts */
> fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
> fctrl |= IXGBE_FCTRL_BAM;
> if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
> fctrl |= IXGBE_FCTRL_DPF;
> fctrl |= IXGBE_FCTRL_PMCF;
> }
> IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
>
> /* Set for Jumbo Frames? */
> hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
> if (ifp->if_mtu > ETHERMTU)
> hlreg |= IXGBE_HLREG0_JUMBOEN;
> else
> hlreg &= ~IXGBE_HLREG0_JUMBOEN;
>
369,377c519,524
< /*
< * The #ifdef DEV_NETMAP / #endif blocks in this file are meant to
< * be a reference on how to implement netmap support in a driver.
< * Additional comments are in ixgbe_netmap.h .
< *
< * <dev/netmap/ixgbe_netmap.h> contains functions for netmap support
< * that extend the standard driver.
< */
< #include <dev/netmap/ixgbe_netmap.h>
---
> /* CRC stripping is conditional in Netmap */
> if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
> (ifp->if_capenable & IFCAP_NETMAP) &&
> !ix_crcstrip)
> hlreg &= ~IXGBE_HLREG0_RXCRCSTRP;
> else
378a526
> hlreg |= IXGBE_HLREG0_RXCRCSTRP;
380c528
< static MALLOC_DEFINE(M_IXGBE, "ix", "ix driver allocations");
---
> IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
382,389c530,531
< /*********************************************************************
< * Device identification routine
< *
< * ixgbe_probe determines if the driver should be loaded on
< * adapter based on PCI vendor/device id of the adapter.
< *
< * return BUS_PROBE_DEFAULT on success, positive on failure
< *********************************************************************/
---
> bufsz = (adapter->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >>
> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
391,394c533,535
< static int
< ixgbe_probe(device_t dev)
< {
< ixgbe_vendor_info_t *ent;
---
> for (i = 0; i < adapter->num_queues; i++, rxr++) {
> u64 rdba = rxr->rxdma.dma_paddr;
> j = rxr->me;
396,400c537,542
< u16 pci_vendor_id = 0;
< u16 pci_device_id = 0;
< u16 pci_subvendor_id = 0;
< u16 pci_subdevice_id = 0;
< char adapter_name[256];
---
> /* Setup the Base and Length of the Rx Descriptor Ring */
> IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j),
> (rdba & 0x00000000ffffffffULL));
> IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
> IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j),
> adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
402c544,549
< INIT_DEBUGOUT("ixgbe_probe: begin");
---
> /* Set up the SRRCTL register */
> srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j));
> srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
> srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
> srrctl |= bufsz;
> srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
404,406c551,562
< pci_vendor_id = pci_get_vendor(dev);
< if (pci_vendor_id != IXGBE_INTEL_VENDOR_ID)
< return (ENXIO);
---
> /*
> * Set DROP_EN iff we have no flow control and >1 queue.
> * Note that srrctl was cleared shortly before during reset,
> * so we do not need to clear the bit, but do it just in case
> * this code is moved elsewhere.
> */
> if (adapter->num_queues > 1 &&
> adapter->hw.fc.requested_mode == ixgbe_fc_none) {
> srrctl |= IXGBE_SRRCTL_DROP_EN;
> } else {
> srrctl &= ~IXGBE_SRRCTL_DROP_EN;
> }
408,410c564
< pci_device_id = pci_get_device(dev);
< pci_subvendor_id = pci_get_subvendor(dev);
< pci_subdevice_id = pci_get_subdevice(dev);
---
> IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl);
412,415c566,568
< ent = ixgbe_vendor_info_array;
< while (ent->vendor_id != 0) {
< if ((pci_vendor_id == ent->vendor_id) &&
< (pci_device_id == ent->device_id) &&
---
> /* Setup the HW Rx Head and Tail Descriptor Pointers */
> IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
> IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
417,418c570,572
< ((pci_subvendor_id == ent->subvendor_id) ||
< (ent->subvendor_id == 0)) &&
---
> /* Set the driver rx tail address */
> rxr->tail = IXGBE_RDT(rxr->me);
> }
420,427c574,643
< ((pci_subdevice_id == ent->subdevice_id) ||
< (ent->subdevice_id == 0))) {
< sprintf(adapter_name, "%s, Version - %s",
< ixgbe_strings[ent->index],
< ixgbe_driver_version);
< device_set_desc_copy(dev, adapter_name);
< ++ixgbe_total_ports;
< return (BUS_PROBE_DEFAULT);
---
> if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
> u32 psrtype = IXGBE_PSRTYPE_TCPHDR
> | IXGBE_PSRTYPE_UDPHDR
> | IXGBE_PSRTYPE_IPV4HDR
> | IXGBE_PSRTYPE_IPV6HDR;
> IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
> }
>
> rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
>
> ixgbe_initialize_rss_mapping(adapter);
>
> if (adapter->num_queues > 1) {
> /* RSS and RX IPP Checksum are mutually exclusive */
> rxcsum |= IXGBE_RXCSUM_PCSD;
> }
>
> if (ifp->if_capenable & IFCAP_RXCSUM)
> rxcsum |= IXGBE_RXCSUM_PCSD;
>
> /* This is useful for calculating UDP/IP fragment checksums */
> if (!(rxcsum & IXGBE_RXCSUM_PCSD))
> rxcsum |= IXGBE_RXCSUM_IPPCSE;
>
> IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
>
> return;
> } /* ixgbe_initialize_receive_units */
>
> /************************************************************************
> * ixgbe_initialize_transmit_units - Enable transmit units.
> ************************************************************************/
> static void
> ixgbe_initialize_transmit_units(struct adapter *adapter)
> {
> struct tx_ring *txr = adapter->tx_rings;
> struct ixgbe_hw *hw = &adapter->hw;
>
> /* Setup the Base and Length of the Tx Descriptor Ring */
> for (int i = 0; i < adapter->num_queues; i++, txr++) {
> u64 tdba = txr->txdma.dma_paddr;
> u32 txctrl = 0;
> int j = txr->me;
>
> IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
> (tdba & 0x00000000ffffffffULL));
> IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
> IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j),
> adapter->num_tx_desc * sizeof(union ixgbe_adv_tx_desc));
>
> /* Setup the HW Tx Head and Tail descriptor pointers */
> IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
> IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
>
> /* Cache the tail address */
> txr->tail = IXGBE_TDT(j);
>
> /* Disable Head Writeback */
> /*
> * Note: for X550 series devices, these registers are actually
> * prefixed with TPH_ isntead of DCA_, but the addresses and
> * fields remain the same.
> */
> switch (hw->mac.type) {
> case ixgbe_mac_82598EB:
> txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
> break;
> default:
> txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j));
> break;
429c645,654
< ent++;
---
> txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
> switch (hw->mac.type) {
> case ixgbe_mac_82598EB:
> IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
> break;
> default:
> IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl);
> break;
> }
>
431,432d655
< return (ENXIO);
< }
434,435c657,677
< /*********************************************************************
< * Device initialization routine
---
> if (hw->mac.type != ixgbe_mac_82598EB) {
> u32 dmatxctl, rttdcs;
>
> dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
> dmatxctl |= IXGBE_DMATXCTL_TE;
> IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
> /* Disable arbiter to set MTQC */
> rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
> rttdcs |= IXGBE_RTTDCS_ARBDIS;
> IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
> IXGBE_WRITE_REG(hw, IXGBE_MTQC,
> ixgbe_get_mtqc(adapter->iov_mode));
> rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
> IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
> }
>
> return;
> } /* ixgbe_initialize_transmit_units */
>
> /************************************************************************
> * ixgbe_attach - Device initialization routine
437,439c679,681
< * The attach entry point is called when the driver is being loaded.
< * This routine identifies the type of hardware, allocates all resources
< * and initializes the hardware.
---
> * Called when the driver is being loaded.
> * Identifies the type of hardware, allocates all resources
> * and initializes the hardware.
441,443c683,684
< * return 0 on success, positive on failure
< *********************************************************************/
<
---
> * return 0 on success, positive on failure
> ************************************************************************/
447c688
< struct adapter *adapter;
---
> struct adapter *adapter;
450,451c691
< u16 csum;
< u32 ctrl_ext;
---
> u32 ctrl_ext;
456a697
> adapter->hw.back = adapter;
460,464d700
< #ifdef DEV_NETMAP
< adapter->init_locked = ixgbe_init_locked;
< adapter->stop_locked = ixgbe_stop;
< #endif
<
472c708,712
< ixgbe_identify_hardware(adapter);
---
> hw->vendor_id = pci_get_vendor(dev);
> hw->device_id = pci_get_device(dev);
> hw->revision_id = pci_get_revid(dev);
> hw->subsystem_vendor_id = pci_get_subvendor(dev);
> hw->subsystem_device_id = pci_get_subdevice(dev);
473a714,718
> /*
> * Make sure BUSMASTER is set
> */
> pci_enable_busmaster(dev);
>
480a726,792
> /* let hardware know driver is loaded */
> ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
> ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
> IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
>
> /*
> * Initialize the shared code
> */
> if (ixgbe_init_shared_code(hw)) {
> device_printf(dev, "Unable to initialize the shared code\n");
> error = ENXIO;
> goto err_out;
> }
>
> if (hw->mbx.ops.init_params)
> hw->mbx.ops.init_params(hw);
>
> hw->allow_unsupported_sfp = allow_unsupported_sfp;
>
> /* Pick up the 82599 settings */
> if (hw->mac.type != ixgbe_mac_82598EB) {
> hw->phy.smart_speed = ixgbe_smart_speed;
> adapter->num_segs = IXGBE_82599_SCATTER;
> } else
> adapter->num_segs = IXGBE_82598_SCATTER;
>
> ixgbe_init_device_features(adapter);
>
> if (ixgbe_configure_interrupts(adapter)) {
> error = ENXIO;
> goto err_out;
> }
>
> /* Allocate multicast array memory. */
> adapter->mta = malloc(sizeof(*adapter->mta) *
> MAX_NUM_MULTICAST_ADDRESSES, M_IXGBE, M_NOWAIT);
> if (adapter->mta == NULL) {
> device_printf(dev, "Can not allocate multicast setup array\n");
> error = ENOMEM;
> goto err_out;
> }
>
> /* Enable WoL (if supported) */
> ixgbe_check_wol_support(adapter);
>
> /* Register for VLAN events */
> adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
> ixgbe_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
> adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
> ixgbe_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
>
> /* Verify adapter fan is still functional (if applicable) */
> if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
> u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
> ixgbe_check_fan_failure(adapter, esdp, FALSE);
> }
>
> /* Ensure SW/FW semaphore is free */
> ixgbe_init_swfw_semaphore(hw);
>
> /* Enable EEE power saving */
> if (adapter->feat_en & IXGBE_FEATURE_EEE)
> hw->mac.ops.setup_eee(hw, TRUE);
>
> /* Set an initial default flow control value */
> hw->fc.requested_mode = ixgbe_flow_control;
>
488c800
< &adapter->tx_process_limit, ixgbe_tx_process_limit);
---
> &adapter->tx_process_limit, ixgbe_tx_process_limit);
499,502c811,814
< ** With many RX rings it is easy to exceed the
< ** system mbuf allocation. Tuning nmbclusters
< ** can alleviate this.
< */
---
> * With many RX rings it is easy to exceed the
> * system mbuf allocation. Tuning nmbclusters
> * can alleviate this.
> */
507,508c819
< device_printf(dev, "RX Descriptors exceed "
< "system mbuf max, using default instead!\n");
---
> device_printf(dev, "RX Descriptors exceed system mbuf max, using default instead!\n");
526,537c837,839
< /* Allocate multicast array memory. */
< adapter->mta = malloc(sizeof(*adapter->mta) *
< MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT);
< if (adapter->mta == NULL) {
< device_printf(dev, "Can not allocate multicast setup array\n");
< error = ENOMEM;
< goto err_late;
< }
<
< /* Initialize the shared code */
< hw->allow_unsupported_sfp = allow_unsupported_sfp;
< error = ixgbe_init_shared_code(hw);
---
> hw->phy.reset_if_overtemp = TRUE;
> error = ixgbe_reset_hw(hw);
> hw->phy.reset_if_overtemp = FALSE;
540,543c842,845
< ** No optics in this port, set up
< ** so the timer routine will probe
< ** for later insertion.
< */
---
> * No optics in this port, set up
> * so the timer routine will probe
> * for later insertion.
> */
545c847
< error = 0;
---
> error = IXGBE_SUCCESS;
551c853
< device_printf(dev, "Unable to initialize the shared code\n");
---
> device_printf(dev, "Hardware initialization failed\n");
557c859
< if (ixgbe_validate_eeprom_checksum(&adapter->hw, &csum) < 0) {
---
> if (ixgbe_validate_eeprom_checksum(&adapter->hw, NULL) < 0) {
563c865,876
< error = ixgbe_init_hw(hw);
---
> /* Setup OS specific network interface */
> if (ixgbe_setup_interface(dev, adapter) != 0)
> goto err_late;
>
> if (adapter->feat_en & IXGBE_FEATURE_MSIX)
> error = ixgbe_allocate_msix(adapter);
> else
> error = ixgbe_allocate_legacy(adapter);
> if (error)
> goto err_late;
>
> error = ixgbe_start_hw(hw);
566,570c879
< device_printf(dev, "This device is a pre-production adapter/"
< "LOM. Please be aware there may be issues associated "
< "with your hardware.\nIf you are experiencing problems "
< "please contact your Intel or hardware representative "
< "who provided you with this hardware.\n");
---
> device_printf(dev, "This device is a pre-production adapter/LOM. Please be aware there may be issues associated with your hardware.\nIf you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n");
583,594d891
< /* hw.ix defaults init */
< ixgbe_set_advertise(adapter, ixgbe_advertise_speed);
< ixgbe_set_flowcntl(adapter, ixgbe_flow_control);
< adapter->enable_aim = ixgbe_enable_aim;
<
< if ((adapter->msix > 1) && (ixgbe_enable_msix))
< error = ixgbe_allocate_msix(adapter);
< else
< error = ixgbe_allocate_legacy(adapter);
< if (error)
< goto err_late;
<
601,604d897
< /* Setup OS specific network interface */
< if (ixgbe_setup_interface(dev, adapter) != 0)
< goto err_late;
<
608,614c901
< /* Register for VLAN events */
< adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
< ixgbe_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
< adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
< ixgbe_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
<
< /* Check PCIE slot type/speed/width */
---
> /* Check PCIE slot type/speed/width */
617,618c904,910
< /* Set an initial default flow control & dmac value */
< adapter->fc = ixgbe_fc_full;
---
> /*
> * Do time init and sysctl init here, but
> * only on the first port of a bypass adapter.
> */
> ixgbe_bypass_init(adapter);
>
> /* Set an initial dmac value */
620c912,913
< adapter->eee_enabled = 0;
---
> /* Set initial advertised speeds (if applicable) */
> adapter->advertise = ixgbe_get_advertise(adapter);
622,624c915,916
< #ifdef PCI_IOV
< if ((hw->mac.type != ixgbe_mac_82598EB) && (adapter->msix > 1)) {
< nvlist_t *pf_schema, *vf_schema;
---
> if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
> ixgbe_define_iov_schemas(dev, &error);
626,646d917
< hw->mbx.ops.init_params(hw);
< pf_schema = pci_iov_schema_alloc_node();
< vf_schema = pci_iov_schema_alloc_node();
< pci_iov_schema_add_unicast_mac(vf_schema, "mac-addr", 0, NULL);
< pci_iov_schema_add_bool(vf_schema, "mac-anti-spoof",
< IOV_SCHEMA_HASDEFAULT, TRUE);
< pci_iov_schema_add_bool(vf_schema, "allow-set-mac",
< IOV_SCHEMA_HASDEFAULT, FALSE);
< pci_iov_schema_add_bool(vf_schema, "allow-promisc",
< IOV_SCHEMA_HASDEFAULT, FALSE);
< error = pci_iov_attach(dev, pf_schema, vf_schema);
< if (error != 0) {
< device_printf(dev,
< "Error %d setting up SR-IOV\n", error);
< }
< }
< #endif /* PCI_IOV */
<
< /* Check for certain supported features */
< ixgbe_check_wol_support(adapter);
<
651,654c922,924
< /* let hardware know driver is loaded */
< ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
< ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
< IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
---
> /* For Netmap */
> adapter->init_locked = ixgbe_init_locked;
> adapter->stop_locked = ixgbe_stop;
656,658c926,928
< #ifdef DEV_NETMAP
< ixgbe_netmap_attach(adapter);
< #endif /* DEV_NETMAP */
---
> if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
> ixgbe_netmap_attach(adapter);
>
659a930
>
664a936
> free(adapter->queues, M_DEVBUF);
667a940,942
> ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
> ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
> IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
669c944,946
< free(adapter->mta, M_DEVBUF);
---
> free(adapter->mta, M_IXGBE);
> IXGBE_CORE_LOCK_DESTROY(adapter);
>
671c948
< }
---
> } /* ixgbe_attach */
673,674c950,951
< /*********************************************************************
< * Device removal routine
---
> /************************************************************************
> * ixgbe_check_wol_support
676,678c953,954
< * The detach entry point is called when the driver is being removed.
< * This routine stops the adapter and deallocates all the resources
< * that were allocated for driver operation.
---
> * Checks whether the adapter's ports are capable of
> * Wake On LAN by reading the adapter's NVM.
680,684c956,960
< * return 0 on success, positive on failure
< *********************************************************************/
<
< static int
< ixgbe_detach(device_t dev)
---
> * Sets each port's hw->wol_enabled value depending
> * on the value read here.
> ************************************************************************/
> static void
> ixgbe_check_wol_support(struct adapter *adapter)
686,689c962,963
< struct adapter *adapter = device_get_softc(dev);
< struct ix_queue *que = adapter->queues;
< struct tx_ring *txr = adapter->tx_rings;
< u32 ctrl_ext;
---
> struct ixgbe_hw *hw = &adapter->hw;
> u16 dev_caps = 0;
691c965,971
< INIT_DEBUGOUT("ixgbe_detach: begin");
---
> /* Find out WoL support for port */
> adapter->wol_support = hw->wol_enabled = 0;
> ixgbe_get_device_caps(hw, &dev_caps);
> if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
> ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) &&
> hw->bus.func == 0))
> adapter->wol_support = hw->wol_enabled = 1;
693,697c973,974
< /* Make sure VLANS are not using driver */
< if (adapter->ifp->if_vlantrunk != NULL) {
< device_printf(dev,"Vlan in use, detach first\n");
< return (EBUSY);
< }
---
> /* Save initial wake up filter configuration */
> adapter->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC);
699,704c976,977
< #ifdef PCI_IOV
< if (pci_iov_detach(dev) != 0) {
< device_printf(dev, "SR-IOV in use; detach first.\n");
< return (EBUSY);
< }
< #endif /* PCI_IOV */
---
> return;
> } /* ixgbe_check_wol_support */
706,710c979,987
< ether_ifdetach(adapter->ifp);
< /* Stop the adapter */
< IXGBE_CORE_LOCK(adapter);
< ixgbe_setup_low_power_mode(adapter);
< IXGBE_CORE_UNLOCK(adapter);
---
> /************************************************************************
> * ixgbe_setup_interface
> *
> * Setup networking device structure and register an interface.
> ************************************************************************/
> static int
> ixgbe_setup_interface(device_t dev, struct adapter *adapter)
> {
> struct ifnet *ifp;
712,720c989
< for (int i = 0; i < adapter->num_queues; i++, que++, txr++) {
< if (que->tq) {
< #ifndef IXGBE_LEGACY_TX
< taskqueue_drain(que->tq, &txr->txq_task);
< #endif
< taskqueue_drain(que->tq, &que->que_task);
< taskqueue_free(que->tq);
< }
< }
---
> INIT_DEBUGOUT("ixgbe_setup_interface: begin");
722,728c991,1003
< /* Drain the Link queue */
< if (adapter->tq) {
< taskqueue_drain(adapter->tq, &adapter->link_task);
< taskqueue_drain(adapter->tq, &adapter->mod_task);
< taskqueue_drain(adapter->tq, &adapter->msf_task);
< #ifdef PCI_IOV
< taskqueue_drain(adapter->tq, &adapter->mbx_task);
---
> ifp = adapter->ifp = if_alloc(IFT_ETHER);
> if (ifp == NULL) {
> device_printf(dev, "can not allocate ifnet structure\n");
> return (-1);
> }
> if_initname(ifp, device_get_name(dev), device_get_unit(dev));
> ifp->if_baudrate = IF_Gbps(10);
> ifp->if_init = ixgbe_init;
> ifp->if_softc = adapter;
> ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
> ifp->if_ioctl = ixgbe_ioctl;
> #if __FreeBSD_version >= 1100036
> if_setgetcounterfn(ifp, ixgbe_get_counter);
730,732c1005,1009
< taskqueue_drain(adapter->tq, &adapter->phy_task);
< #ifdef IXGBE_FDIR
< taskqueue_drain(adapter->tq, &adapter->fdir_task);
---
> #if __FreeBSD_version >= 1100045
> /* TSO parameters */
> ifp->if_hw_tsomax = 65518;
> ifp->if_hw_tsomaxsegcount = IXGBE_82599_SCATTER;
> ifp->if_hw_tsomaxsegsize = 2048;
734c1011,1022
< taskqueue_free(adapter->tq);
---
> if (adapter->feat_en & IXGBE_FEATURE_LEGACY_TX) {
> ifp->if_start = ixgbe_legacy_start;
> IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2);
> ifp->if_snd.ifq_drv_maxlen = adapter->num_tx_desc - 2;
> IFQ_SET_READY(&ifp->if_snd);
> ixgbe_start_locked = ixgbe_legacy_start_locked;
> ixgbe_ring_empty = ixgbe_legacy_ring_empty;
> } else {
> ifp->if_transmit = ixgbe_mq_start;
> ifp->if_qflush = ixgbe_qflush;
> ixgbe_start_locked = ixgbe_mq_start_locked;
> ixgbe_ring_empty = drbr_empty;
737,740c1025
< /* let hardware know driver is unloading */
< ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
< ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
< IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
---
> ether_ifattach(ifp, adapter->hw.mac.addr);
742,746c1027
< /* Unregister VLAN events */
< if (adapter->vlan_attach != NULL)
< EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
< if (adapter->vlan_detach != NULL)
< EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
---
> adapter->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
748,754c1029,1032
< callout_drain(&adapter->timer);
< #ifdef DEV_NETMAP
< netmap_detach(adapter->ifp);
< #endif /* DEV_NETMAP */
< ixgbe_free_pci_resources(adapter);
< bus_generic_detach(dev);
< if_free(adapter->ifp);
---
> /*
> * Tell the upper layer(s) we support long frames.
> */
> ifp->if_hdrlen = sizeof(struct ether_vlan_header);
756,758c1034,1044
< ixgbe_free_transmit_structures(adapter);
< ixgbe_free_receive_structures(adapter);
< free(adapter->mta, M_DEVBUF);
---
> /* Set capability flags */
> ifp->if_capabilities |= IFCAP_HWCSUM
> | IFCAP_HWCSUM_IPV6
> | IFCAP_TSO
> | IFCAP_LRO
> | IFCAP_VLAN_HWTAGGING
> | IFCAP_VLAN_HWTSO
> | IFCAP_VLAN_HWCSUM
> | IFCAP_JUMBO_MTU
> | IFCAP_VLAN_MTU
> | IFCAP_HWSTATS;
760,762c1046,1047
< IXGBE_CORE_LOCK_DESTROY(adapter);
< return (0);
< }
---
> /* Enable the above capabilities by default */
> ifp->if_capenable = ifp->if_capabilities;
764,768c1049,1057
< /*********************************************************************
< *
< * Shutdown entry point
< *
< **********************************************************************/
---
> /*
> * Don't turn this on by default, if vlans are
> * created on another pseudo device (eg. lagg)
> * then vlan events are not passed thru, breaking
> * operation, but with HW FILTER off it works. If
> * using vlans directly on the ixgbe driver you can
> * enable this and get full hardware tag filtering.
> */
> ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
770,774c1059,1064
< static int
< ixgbe_shutdown(device_t dev)
< {
< struct adapter *adapter = device_get_softc(dev);
< int error = 0;
---
> /*
> * Specify the media types supported by this adapter and register
> * callbacks to update media and link information
> */
> ifmedia_init(&adapter->media, IFM_IMASK, ixgbe_media_change,
> ixgbe_media_status);
776c1066,1067
< INIT_DEBUGOUT("ixgbe_shutdown: begin");
---
> adapter->phy_layer = ixgbe_get_supported_physical_layer(&adapter->hw);
> ixgbe_add_media_types(adapter);
778,780c1069,1070
< IXGBE_CORE_LOCK(adapter);
< error = ixgbe_setup_low_power_mode(adapter);
< IXGBE_CORE_UNLOCK(adapter);
---
> /* Set autoselect media by default */
> ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
782,783c1072,1073
< return (error);
< }
---
> return (0);
> } /* ixgbe_setup_interface */
785,791c1075,1080
< /**
< * Methods for going from:
< * D0 -> D3: ixgbe_suspend
< * D3 -> D0: ixgbe_resume
< */
< static int
< ixgbe_suspend(device_t dev)
---
> #if __FreeBSD_version >= 1100036
> /************************************************************************
> * ixgbe_get_counter
> ************************************************************************/
> static uint64_t
> ixgbe_get_counter(struct ifnet *ifp, ift_counter cnt)
793,794c1082,1084
< struct adapter *adapter = device_get_softc(dev);
< int error = 0;
---
> struct adapter *adapter;
> struct tx_ring *txr;
> uint64_t rv;
796c1086
< INIT_DEBUGOUT("ixgbe_suspend: begin");
---
> adapter = if_getsoftc(ifp);
798c1088,1117
< IXGBE_CORE_LOCK(adapter);
---
> switch (cnt) {
> case IFCOUNTER_IPACKETS:
> return (adapter->ipackets);
> case IFCOUNTER_OPACKETS:
> return (adapter->opackets);
> case IFCOUNTER_IBYTES:
> return (adapter->ibytes);
> case IFCOUNTER_OBYTES:
> return (adapter->obytes);
> case IFCOUNTER_IMCASTS:
> return (adapter->imcasts);
> case IFCOUNTER_OMCASTS:
> return (adapter->omcasts);
> case IFCOUNTER_COLLISIONS:
> return (0);
> case IFCOUNTER_IQDROPS:
> return (adapter->iqdrops);
> case IFCOUNTER_OQDROPS:
> rv = 0;
> txr = adapter->tx_rings;
> for (int i = 0; i < adapter->num_queues; i++, txr++)
> rv += txr->br->br_drops;
> return (rv);
> case IFCOUNTER_IERRORS:
> return (adapter->ierrors);
> default:
> return (if_get_counter_default(ifp, cnt));
> }
> } /* ixgbe_get_counter */
> #endif
800,808c1119,1123
< error = ixgbe_setup_low_power_mode(adapter);
<
< IXGBE_CORE_UNLOCK(adapter);
<
< return (error);
< }
<
< static int
< ixgbe_resume(device_t dev)
---
> /************************************************************************
> * ixgbe_add_media_types
> ************************************************************************/
> static void
> ixgbe_add_media_types(struct adapter *adapter)
810,811d1124
< struct adapter *adapter = device_get_softc(dev);
< struct ifnet *ifp = adapter->ifp;
813c1126,1127
< u32 wus;
---
> device_t dev = adapter->dev;
> u64 layer;
815c1129
< INIT_DEBUGOUT("ixgbe_resume: begin");
---
> layer = adapter->phy_layer;
817c1131,1139
< IXGBE_CORE_LOCK(adapter);
---
> /* Media types with matching FreeBSD media defines */
> if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T)
> ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_T, 0, NULL);
> if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T)
> ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL);
> if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX)
> ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX, 0, NULL);
> if (layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
> ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T, 0, NULL);
819,826c1141,1144
< /* Read & clear WUS register */
< wus = IXGBE_READ_REG(hw, IXGBE_WUS);
< if (wus)
< device_printf(dev, "Woken up by (WUS): %#010x\n",
< IXGBE_READ_REG(hw, IXGBE_WUS));
< IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
< /* And clear WUFC until next low-power transition */
< IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
---
> if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
> layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
> ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_TWINAX, 0,
> NULL);
828,833c1146,1160
< /*
< * Required after D3->D0 transition;
< * will re-advertise all previous advertised speeds
< */
< if (ifp->if_flags & IFF_UP)
< ixgbe_init_locked(adapter);
---
> if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) {
> ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
> if (hw->phy.multispeed_fiber)
> ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_LX, 0,
> NULL);
> }
> if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
> ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
> if (hw->phy.multispeed_fiber)
> ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX, 0,
> NULL);
> } else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
> ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
> if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
> ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
835c1162,1194
< IXGBE_CORE_UNLOCK(adapter);
---
> #ifdef IFM_ETH_XTYPE
> if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
> ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_KR, 0, NULL);
> if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4)
> ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_KX4, 0, NULL);
> if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
> ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_KX, 0, NULL);
> if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX)
> ifmedia_add(&adapter->media, IFM_ETHER | IFM_2500_KX, 0, NULL);
> #else
> if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
> device_printf(dev, "Media supported: 10GbaseKR\n");
> device_printf(dev, "10GbaseKR mapped to 10GbaseSR\n");
> ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
> }
> if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
> device_printf(dev, "Media supported: 10GbaseKX4\n");
> device_printf(dev, "10GbaseKX4 mapped to 10GbaseCX4\n");
> ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
> }
> if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) {
> device_printf(dev, "Media supported: 1000baseKX\n");
> device_printf(dev, "1000baseKX mapped to 1000baseCX\n");
> ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_CX, 0, NULL);
> }
> if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX) {
> device_printf(dev, "Media supported: 2500baseKX\n");
> device_printf(dev, "2500baseKX mapped to 2500baseSX\n");
> ifmedia_add(&adapter->media, IFM_ETHER | IFM_2500_SX, 0, NULL);
> }
> #endif
> if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX)
> device_printf(dev, "Media supported: 1000baseBX\n");
837,838c1196,1200
< return (0);
< }
---
> if (hw->device_id == IXGBE_DEV_ID_82598AT) {
> ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T | IFM_FDX,
> 0, NULL);
> ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL);
> }
839a1202,1203
> ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
> } /* ixgbe_add_media_types */
841,848c1205,1232
< /*********************************************************************
< * Ioctl entry point
< *
< * ixgbe_ioctl is called when the user wants to configure the
< * interface.
< *
< * return 0 on success, positive on failure
< **********************************************************************/
---
> /************************************************************************
> * ixgbe_is_sfp
> ************************************************************************/
> static inline bool
> ixgbe_is_sfp(struct ixgbe_hw *hw)
> {
> switch (hw->mac.type) {
> case ixgbe_mac_82598EB:
> if (hw->phy.type == ixgbe_phy_nl)
> return TRUE;
> return FALSE;
> case ixgbe_mac_82599EB:
> switch (hw->mac.ops.get_media_type(hw)) {
> case ixgbe_media_type_fiber:
> case ixgbe_media_type_fiber_qsfp:
> return TRUE;
> default:
> return FALSE;
> }
> case ixgbe_mac_X550EM_x:
> case ixgbe_mac_X550EM_a:
> if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber)
> return TRUE;
> return FALSE;
> default:
> return FALSE;
> }
> } /* ixgbe_is_sfp */
850,851c1234,1238
< static int
< ixgbe_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
---
> /************************************************************************
> * ixgbe_config_link
> ************************************************************************/
> static void
> ixgbe_config_link(struct adapter *adapter)
853,859c1240,1242
< struct adapter *adapter = ifp->if_softc;
< struct ifreq *ifr = (struct ifreq *) data;
< #if defined(INET) || defined(INET6)
< struct ifaddr *ifa = (struct ifaddr *)data;
< #endif
< int error = 0;
< bool avoid_reset = FALSE;
---
> struct ixgbe_hw *hw = &adapter->hw;
> u32 autoneg, err = 0;
> bool sfp, negotiate;
861c1244
< switch (command) {
---
> sfp = ixgbe_is_sfp(hw);
863,883c1246,1250
< case SIOCSIFADDR:
< #ifdef INET
< if (ifa->ifa_addr->sa_family == AF_INET)
< avoid_reset = TRUE;
< #endif
< #ifdef INET6
< if (ifa->ifa_addr->sa_family == AF_INET6)
< avoid_reset = TRUE;
< #endif
< /*
< ** Calling init results in link renegotiation,
< ** so we avoid doing it when possible.
< */
< if (avoid_reset) {
< ifp->if_flags |= IFF_UP;
< if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
< ixgbe_init(adapter);
< #ifdef INET
< if (!(ifp->if_flags & IFF_NOARP))
< arp_ifinit(ifp, ifa);
< #endif
---
> if (sfp) {
> if (hw->phy.multispeed_fiber) {
> hw->mac.ops.setup_sfp(hw);
> ixgbe_enable_tx_laser(hw);
> taskqueue_enqueue(adapter->tq, &adapter->msf_task);
885,939c1252,1269
< error = ether_ioctl(ifp, command, data);
< break;
< case SIOCSIFMTU:
< IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
< if (ifr->ifr_mtu > IXGBE_MAX_MTU) {
< error = EINVAL;
< } else {
< IXGBE_CORE_LOCK(adapter);
< ifp->if_mtu = ifr->ifr_mtu;
< adapter->max_frame_size =
< ifp->if_mtu + IXGBE_MTU_HDR;
< if (ifp->if_drv_flags & IFF_DRV_RUNNING)
< ixgbe_init_locked(adapter);
< #ifdef PCI_IOV
< ixgbe_recalculate_max_frame(adapter);
< #endif
< IXGBE_CORE_UNLOCK(adapter);
< }
< break;
< case SIOCSIFFLAGS:
< IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
< IXGBE_CORE_LOCK(adapter);
< if (ifp->if_flags & IFF_UP) {
< if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
< if ((ifp->if_flags ^ adapter->if_flags) &
< (IFF_PROMISC | IFF_ALLMULTI)) {
< ixgbe_set_promisc(adapter);
< }
< } else
< ixgbe_init_locked(adapter);
< } else
< if (ifp->if_drv_flags & IFF_DRV_RUNNING)
< ixgbe_stop(adapter);
< adapter->if_flags = ifp->if_flags;
< IXGBE_CORE_UNLOCK(adapter);
< break;
< case SIOCADDMULTI:
< case SIOCDELMULTI:
< IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
< if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
< IXGBE_CORE_LOCK(adapter);
< ixgbe_disable_intr(adapter);
< ixgbe_set_multi(adapter);
< ixgbe_enable_intr(adapter);
< IXGBE_CORE_UNLOCK(adapter);
< }
< break;
< case SIOCSIFMEDIA:
< case SIOCGIFMEDIA:
< IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
< error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
< break;
< case SIOCSIFCAP:
< {
< IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
---
> taskqueue_enqueue(adapter->tq, &adapter->mod_task);
> } else {
> if (hw->mac.ops.check_link)
> err = ixgbe_check_link(hw, &adapter->link_speed,
> &adapter->link_up, FALSE);
> if (err)
> goto out;
> autoneg = hw->phy.autoneg_advertised;
> if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
> err = hw->mac.ops.get_link_capabilities(hw, &autoneg,
> &negotiate);
> if (err)
> goto out;
> if (hw->mac.ops.setup_link)
> err = hw->mac.ops.setup_link(hw, autoneg,
> adapter->link_up);
> }
> out:
941,943c1271,1272
< int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
< if (!mask)
< break;
---
> return;
> } /* ixgbe_config_link */
945,965c1274,1283
< /* HW cannot turn these on/off separately */
< if (mask & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6)) {
< ifp->if_capenable ^= IFCAP_RXCSUM;
< ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
< }
< if (mask & IFCAP_TXCSUM)
< ifp->if_capenable ^= IFCAP_TXCSUM;
< if (mask & IFCAP_TXCSUM_IPV6)
< ifp->if_capenable ^= IFCAP_TXCSUM_IPV6;
< if (mask & IFCAP_TSO4)
< ifp->if_capenable ^= IFCAP_TSO4;
< if (mask & IFCAP_TSO6)
< ifp->if_capenable ^= IFCAP_TSO6;
< if (mask & IFCAP_LRO)
< ifp->if_capenable ^= IFCAP_LRO;
< if (mask & IFCAP_VLAN_HWTAGGING)
< ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
< if (mask & IFCAP_VLAN_HWFILTER)
< ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
< if (mask & IFCAP_VLAN_HWTSO)
< ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
---
> /************************************************************************
> * ixgbe_update_stats_counters - Update board statistics counters.
> ************************************************************************/
> static void
> ixgbe_update_stats_counters(struct adapter *adapter)
> {
> struct ixgbe_hw *hw = &adapter->hw;
> struct ixgbe_hw_stats *stats = &adapter->stats.pf;
> u32 missed_rx = 0, bprc, lxon, lxoff, total;
> u64 total_missed_rx = 0;
967,973c1285,1294
< if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
< IXGBE_CORE_LOCK(adapter);
< ixgbe_init_locked(adapter);
< IXGBE_CORE_UNLOCK(adapter);
< }
< VLAN_CAPABILITIES(ifp);
< break;
---
> stats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
> stats->illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
> stats->errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC);
> stats->mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC);
> stats->mpc[0] += IXGBE_READ_REG(hw, IXGBE_MPC(0));
>
> for (int i = 0; i < 16; i++) {
> stats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
> stats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
> stats->qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
975,992c1296,1298
< #if __FreeBSD_version >= 1100036
< case SIOCGI2C:
< {
< struct ixgbe_hw *hw = &adapter->hw;
< struct ifi2creq i2c;
< int i;
< IOCTL_DEBUGOUT("ioctl: SIOCGI2C (Get I2C Data)");
< error = copyin(ifr->ifr_data, &i2c, sizeof(i2c));
< if (error != 0)
< break;
< if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) {
< error = EINVAL;
< break;
< }
< if (i2c.len > sizeof(i2c.data)) {
< error = EINVAL;
< break;
< }
---
> stats->mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC);
> stats->mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC);
> stats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
994,998c1300,1319
< for (i = 0; i < i2c.len; i++)
< hw->phy.ops.read_i2c_byte(hw, i2c.offset + i,
< i2c.dev_addr, &i2c.data[i]);
< error = copyout(&i2c, ifr->ifr_data, sizeof(i2c));
< break;
---
> /* Hardware workaround, gprc counts missed packets */
> stats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
> stats->gprc -= missed_rx;
>
> if (hw->mac.type != ixgbe_mac_82598EB) {
> stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL) +
> ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
> stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
> ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32);
> stats->tor += IXGBE_READ_REG(hw, IXGBE_TORL) +
> ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
> stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
> stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
> } else {
> stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
> stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
> /* 82598 only has a counter in the high register */
> stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
> stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
> stats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
1000,1005d1320
< #endif
< default:
< IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)\n", (int)command);
< error = ether_ioctl(ifp, command, data);
< break;
< }
1007,1008c1322,1330
< return (error);
< }
---
> /*
> * Workaround: mprc hardware is incorrectly counting
> * broadcasts, so for now we subtract those.
> */
> bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
> stats->bprc += bprc;
> stats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
> if (hw->mac.type == ixgbe_mac_82598EB)
> stats->mprc -= bprc;
1010,1021c1332,1337
< /*
< * Set the various hardware offload abilities.
< *
< * This takes the ifnet's if_capenable flags (e.g. set by the user using
< * ifconfig) and indicates to the OS via the ifnet's if_hwassist field what
< * mbuf offload flags the driver will understand.
< */
< static void
< ixgbe_set_if_hwassist(struct adapter *adapter)
< {
< struct ifnet *ifp = adapter->ifp;
< struct ixgbe_hw *hw = &adapter->hw;
---
> stats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
> stats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
> stats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
> stats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
> stats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
> stats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
1023,1032c1339,1377
< ifp->if_hwassist = 0;
< #if __FreeBSD_version >= 1000000
< if (ifp->if_capenable & IFCAP_TSO4)
< ifp->if_hwassist |= CSUM_IP_TSO;
< if (ifp->if_capenable & IFCAP_TSO6)
< ifp->if_hwassist |= CSUM_IP6_TSO;
< if (ifp->if_capenable & IFCAP_TXCSUM) {
< ifp->if_hwassist |= (CSUM_IP | CSUM_IP_UDP | CSUM_IP_TCP);
< if (hw->mac.type != ixgbe_mac_82598EB)
< ifp->if_hwassist |= CSUM_IP_SCTP;
---
> lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
> stats->lxontxc += lxon;
> lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
> stats->lxofftxc += lxoff;
> total = lxon + lxoff;
>
> stats->gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
> stats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
> stats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
> stats->gptc -= total;
> stats->mptc -= total;
> stats->ptc64 -= total;
> stats->gotc -= total * ETHER_MIN_LEN;
>
> stats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
> stats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
> stats->roc += IXGBE_READ_REG(hw, IXGBE_ROC);
> stats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
> stats->mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
> stats->mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
> stats->mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
> stats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
> stats->tpt += IXGBE_READ_REG(hw, IXGBE_TPT);
> stats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
> stats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
> stats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
> stats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
> stats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
> stats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
> stats->xec += IXGBE_READ_REG(hw, IXGBE_XEC);
> stats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
> stats->fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST);
> /* Only read FCOE on 82599 */
> if (hw->mac.type != ixgbe_mac_82598EB) {
> stats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
> stats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
> stats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
> stats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
> stats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
1034,1048d1378
< if (ifp->if_capenable & IFCAP_TXCSUM_IPV6) {
< ifp->if_hwassist |= (CSUM_IP6_UDP | CSUM_IP6_TCP);
< if (hw->mac.type != ixgbe_mac_82598EB)
< ifp->if_hwassist |= CSUM_IP6_SCTP;
< }
< #else
< if (ifp->if_capenable & IFCAP_TSO)
< ifp->if_hwassist |= CSUM_TSO;
< if (ifp->if_capenable & IFCAP_TXCSUM) {
< ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
< if (hw->mac.type != ixgbe_mac_82598EB)
< ifp->if_hwassist |= CSUM_SCTP;
< }
< #endif
< }
1050,1060c1380,1390
< /*********************************************************************
< * Init entry point
< *
< * This routine is used in two ways. It is used by the stack as
< * init entry point in network interface structure. It is also used
< * by the driver as a hw/sw initialization routine to get to a
< * consistent state.
< *
< * return 0 on success, positive on failure
< **********************************************************************/
< #define IXGBE_MHADD_MFS_SHIFT 16
---
> /* Fill out the OS statistics structure */
> IXGBE_SET_IPACKETS(adapter, stats->gprc);
> IXGBE_SET_OPACKETS(adapter, stats->gptc);
> IXGBE_SET_IBYTES(adapter, stats->gorc);
> IXGBE_SET_OBYTES(adapter, stats->gotc);
> IXGBE_SET_IMCASTS(adapter, stats->mprc);
> IXGBE_SET_OMCASTS(adapter, stats->mptc);
> IXGBE_SET_COLLISIONS(adapter, 0);
> IXGBE_SET_IQDROPS(adapter, total_missed_rx);
> IXGBE_SET_IERRORS(adapter, stats->crcerrs + stats->rlec);
> } /* ixgbe_update_stats_counters */
1061a1392,1396
> /************************************************************************
> * ixgbe_add_hw_stats
> *
> * Add sysctl variables, one per statistic, to the system.
> ************************************************************************/
1063c1398
< ixgbe_init_locked(struct adapter *adapter)
---
> ixgbe_add_hw_stats(struct adapter *adapter)
1065,1075c1400,1408
< struct ifnet *ifp = adapter->ifp;
< device_t dev = adapter->dev;
< struct ixgbe_hw *hw = &adapter->hw;
< struct tx_ring *txr;
< struct rx_ring *rxr;
< u32 txdctl, mhadd;
< u32 rxdctl, rxctrl;
< int err = 0;
< #ifdef PCI_IOV
< enum ixgbe_iov_mode mode;
< #endif
---
> device_t dev = adapter->dev;
> struct tx_ring *txr = adapter->tx_rings;
> struct rx_ring *rxr = adapter->rx_rings;
> struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
> struct sysctl_oid *tree = device_get_sysctl_tree(dev);
> struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
> struct ixgbe_hw_stats *stats = &adapter->stats.pf;
> struct sysctl_oid *stat_node, *queue_node;
> struct sysctl_oid_list *stat_list, *queue_list;
1077,1078c1410,1411
< mtx_assert(&adapter->core_mtx, MA_OWNED);
< INIT_DEBUGOUT("ixgbe_init_locked: begin");
---
> #define QUEUE_NAME_LEN 32
> char namebuf[QUEUE_NAME_LEN];
1080,1082c1413,1421
< hw->adapter_stopped = FALSE;
< ixgbe_stop_adapter(hw);
< callout_stop(&adapter->timer);
---
> /* Driver Statistics */
> SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped",
> CTLFLAG_RD, &adapter->dropped_pkts, "Driver dropped packets");
> SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "mbuf_defrag_failed",
> CTLFLAG_RD, &adapter->mbuf_defrag_failed, "m_defrag() failed");
> SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
> CTLFLAG_RD, &adapter->watchdog_events, "Watchdog timeouts");
> SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq",
> CTLFLAG_RD, &adapter->link_irq, "Link MSI-X IRQ Handled");
1084,1090c1423,1456
< #ifdef PCI_IOV
< mode = ixgbe_get_iov_mode(adapter);
< adapter->pool = ixgbe_max_vfs(mode);
< /* Queue indices may change with IOV mode */
< for (int i = 0; i < adapter->num_queues; i++) {
< adapter->rx_rings[i].me = ixgbe_pf_que_index(mode, i);
< adapter->tx_rings[i].me = ixgbe_pf_que_index(mode, i);
---
> for (int i = 0; i < adapter->num_queues; i++, txr++) {
> snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
> queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
> CTLFLAG_RD, NULL, "Queue Name");
> queue_list = SYSCTL_CHILDREN(queue_node);
>
> SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "interrupt_rate",
> CTLTYPE_UINT | CTLFLAG_RW, &adapter->queues[i],
> sizeof(&adapter->queues[i]),
> ixgbe_sysctl_interrupt_rate_handler, "IU",
> "Interrupt Rate");
> SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
> CTLFLAG_RD, &(adapter->queues[i].irqs),
> "irqs on this queue");
> SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_head",
> CTLTYPE_UINT | CTLFLAG_RD, txr, sizeof(txr),
> ixgbe_sysctl_tdh_handler, "IU", "Transmit Descriptor Head");
> SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_tail",
> CTLTYPE_UINT | CTLFLAG_RD, txr, sizeof(txr),
> ixgbe_sysctl_tdt_handler, "IU", "Transmit Descriptor Tail");
> SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx",
> CTLFLAG_RD, &txr->tso_tx, "TSO");
> SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "no_tx_dma_setup",
> CTLFLAG_RD, &txr->no_tx_dma_setup,
> "Driver tx dma failure in xmit");
> SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "no_desc_avail",
> CTLFLAG_RD, &txr->no_desc_avail,
> "Queue No Descriptor Available");
> SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
> CTLFLAG_RD, &txr->total_packets,
> "Queue Packets Transmitted");
> SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "br_drops",
> CTLFLAG_RD, &txr->br->br_drops,
> "Packets dropped in buf_ring");
1092,1094d1457
< #endif
< /* reprogram the RAR[0] in case user changed it. */
< ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, IXGBE_RAH_AV);
1096,1099c1459,1460
< /* Get the latest mac address, User can use a LAA */
< bcopy(IF_LLADDR(ifp), hw->mac.addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
< ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, 1);
< hw->addr_ctrl.rar_used_count = 1;
---
> for (int i = 0; i < adapter->num_queues; i++, rxr++) {
> struct lro_ctrl *lro = &rxr->lro;
1101,1102c1462,1465
< /* Set hardware offload abilities from ifnet flags */
< ixgbe_set_if_hwassist(adapter);
---
> snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
> queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
> CTLFLAG_RD, NULL, "Queue Name");
> queue_list = SYSCTL_CHILDREN(queue_node);
1104,1108c1467,1484
< /* Prepare transmit descriptors and buffers */
< if (ixgbe_setup_transmit_structures(adapter)) {
< device_printf(dev, "Could not setup transmit structures\n");
< ixgbe_stop(adapter);
< return;
---
> SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_head",
> CTLTYPE_UINT | CTLFLAG_RD, rxr, sizeof(rxr),
> ixgbe_sysctl_rdh_handler, "IU", "Receive Descriptor Head");
> SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_tail",
> CTLTYPE_UINT | CTLFLAG_RD, rxr, sizeof(rxr),
> ixgbe_sysctl_rdt_handler, "IU", "Receive Descriptor Tail");
> SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
> CTLFLAG_RD, &rxr->rx_packets, "Queue Packets Received");
> SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
> CTLFLAG_RD, &rxr->rx_bytes, "Queue Bytes Received");
> SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_copies",
> CTLFLAG_RD, &rxr->rx_copies, "Copied RX Frames");
> SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_discarded",
> CTLFLAG_RD, &rxr->rx_discarded, "Discarded RX packets");
> SYSCTL_ADD_U64(ctx, queue_list, OID_AUTO, "lro_queued",
> CTLFLAG_RD, &lro->lro_queued, 0, "LRO Queued");
> SYSCTL_ADD_U64(ctx, queue_list, OID_AUTO, "lro_flushed",
> CTLFLAG_RD, &lro->lro_flushed, 0, "LRO Flushed");
1111,1115c1487
< ixgbe_init_hw(hw);
< #ifdef PCI_IOV
< ixgbe_initialize_iov(adapter);
< #endif
< ixgbe_initialize_transmit_units(adapter);
---
> /* MAC stats get their own sub node */
1117,1118c1489,1491
< /* Setup Multicast table */
< ixgbe_set_multi(adapter);
---
> stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats",
> CTLFLAG_RD, NULL, "MAC Statistics");
> stat_list = SYSCTL_CHILDREN(stat_node);
1120,1124c1493,1508
< /* Determine the correct mbuf pool, based on frame size */
< if (adapter->max_frame_size <= MCLBYTES)
< adapter->rx_mbuf_sz = MCLBYTES;
< else
< adapter->rx_mbuf_sz = MJUMPAGESIZE;
---
> SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "crc_errs",
> CTLFLAG_RD, &stats->crcerrs, "CRC Errors");
> SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "ill_errs",
> CTLFLAG_RD, &stats->illerrc, "Illegal Byte Errors");
> SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "byte_errs",
> CTLFLAG_RD, &stats->errbc, "Byte Errors");
> SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "short_discards",
> CTLFLAG_RD, &stats->mspdc, "MAC Short Packets Discarded");
> SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "local_faults",
> CTLFLAG_RD, &stats->mlfc, "MAC Local Faults");
> SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "remote_faults",
> CTLFLAG_RD, &stats->mrfc, "MAC Remote Faults");
> SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rec_len_errs",
> CTLFLAG_RD, &stats->rlec, "Receive Length Errors");
> SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_missed_packets",
> CTLFLAG_RD, &stats->mpc[0], "RX Missed Packet Count");
1126,1131c1510,1518
< /* Prepare receive descriptors and buffers */
< if (ixgbe_setup_receive_structures(adapter)) {
< device_printf(dev, "Could not setup receive structures\n");
< ixgbe_stop(adapter);
< return;
< }
---
> /* Flow Control stats */
> SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_txd",
> CTLFLAG_RD, &stats->lxontxc, "Link XON Transmitted");
> SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_recvd",
> CTLFLAG_RD, &stats->lxonrxc, "Link XON Received");
> SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_txd",
> CTLFLAG_RD, &stats->lxofftxc, "Link XOFF Transmitted");
> SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_recvd",
> CTLFLAG_RD, &stats->lxoffrxc, "Link XOFF Received");
1133,1134c1520,1558
< /* Configure RX settings */
< ixgbe_initialize_receive_units(adapter);
---
> /* Packet Reception Stats */
> SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_octets_rcvd",
> CTLFLAG_RD, &stats->tor, "Total Octets Received");
> SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd",
> CTLFLAG_RD, &stats->gorc, "Good Octets Received");
> SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_rcvd",
> CTLFLAG_RD, &stats->tpr, "Total Packets Received");
> SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd",
> CTLFLAG_RD, &stats->gprc, "Good Packets Received");
> SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd",
> CTLFLAG_RD, &stats->mprc, "Multicast Packets Received");
> SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_rcvd",
> CTLFLAG_RD, &stats->bprc, "Broadcast Packets Received");
> SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_64",
> CTLFLAG_RD, &stats->prc64, "64 byte frames received ");
> SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127",
> CTLFLAG_RD, &stats->prc127, "65-127 byte frames received");
> SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255",
> CTLFLAG_RD, &stats->prc255, "128-255 byte frames received");
> SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511",
> CTLFLAG_RD, &stats->prc511, "256-511 byte frames received");
> SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023",
> CTLFLAG_RD, &stats->prc1023, "512-1023 byte frames received");
> SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522",
> CTLFLAG_RD, &stats->prc1522, "1023-1522 byte frames received");
> SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_undersized",
> CTLFLAG_RD, &stats->ruc, "Receive Undersized");
> SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_fragmented",
> CTLFLAG_RD, &stats->rfc, "Fragmented Packets Received ");
> SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_oversized",
> CTLFLAG_RD, &stats->roc, "Oversized Packets Received");
> SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_jabberd",
> CTLFLAG_RD, &stats->rjc, "Received Jabber");
> SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_rcvd",
> CTLFLAG_RD, &stats->mngprc, "Management Packets Received");
> SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_drpd",
> CTLFLAG_RD, &stats->mngptc, "Management Packets Dropped");
> SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "checksum_errs",
> CTLFLAG_RD, &stats->xec, "Checksum Errors");
1136,1137c1560,1585
< /* Enable SDP & MSIX interrupts based on adapter */
< ixgbe_config_gpie(adapter);
---
> /* Packet Transmission Stats */
> SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
> CTLFLAG_RD, &stats->gotc, "Good Octets Transmitted");
> SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd",
> CTLFLAG_RD, &stats->tpt, "Total Packets Transmitted");
> SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
> CTLFLAG_RD, &stats->gptc, "Good Packets Transmitted");
> SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd",
> CTLFLAG_RD, &stats->bptc, "Broadcast Packets Transmitted");
> SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd",
> CTLFLAG_RD, &stats->mptc, "Multicast Packets Transmitted");
> SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_txd",
> CTLFLAG_RD, &stats->mngptc, "Management Packets Transmitted");
> SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_64",
> CTLFLAG_RD, &stats->ptc64, "64 byte frames transmitted ");
> SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127",
> CTLFLAG_RD, &stats->ptc127, "65-127 byte frames transmitted");
> SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255",
> CTLFLAG_RD, &stats->ptc255, "128-255 byte frames transmitted");
> SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511",
> CTLFLAG_RD, &stats->ptc511, "256-511 byte frames transmitted");
> SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023",
> CTLFLAG_RD, &stats->ptc1023, "512-1023 byte frames transmitted");
> SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522",
> CTLFLAG_RD, &stats->ptc1522, "1024-1522 byte frames transmitted");
> } /* ixgbe_add_hw_stats */
1139,1164c1587,1597
< /* Set MTU size */
< if (ifp->if_mtu > ETHERMTU) {
< /* aka IXGBE_MAXFRS on 82599 and newer */
< mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
< mhadd &= ~IXGBE_MHADD_MFS_MASK;
< mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
< IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
< }
<
< /* Now enable all the queues */
< for (int i = 0; i < adapter->num_queues; i++) {
< txr = &adapter->tx_rings[i];
< txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me));
< txdctl |= IXGBE_TXDCTL_ENABLE;
< /* Set WTHRESH to 8, burst writeback */
< txdctl |= (8 << 16);
< /*
< * When the internal queue falls below PTHRESH (32),
< * start prefetching as long as there are at least
< * HTHRESH (1) buffers ready. The values are taken
< * from the Intel linux driver 3.8.21.
< * Prefetching enables tx line rate even with 1 queue.
< */
< txdctl |= (32 << 0) | (1 << 8);
< IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl);
< }
---
> /************************************************************************
> * ixgbe_sysctl_tdh_handler - Transmit Descriptor Head handler function
> *
> * Retrieves the TDH value from the hardware
> ************************************************************************/
> static int
> ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS)
> {
> struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
> int error;
> unsigned int val;
1166,1208c1599,1600
< for (int i = 0, j = 0; i < adapter->num_queues; i++) {
< rxr = &adapter->rx_rings[i];
< rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
< if (hw->mac.type == ixgbe_mac_82598EB) {
< /*
< ** PTHRESH = 21
< ** HTHRESH = 4
< ** WTHRESH = 8
< */
< rxdctl &= ~0x3FFFFF;
< rxdctl |= 0x080420;
< }
< rxdctl |= IXGBE_RXDCTL_ENABLE;
< IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), rxdctl);
< for (; j < 10; j++) {
< if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)) &
< IXGBE_RXDCTL_ENABLE)
< break;
< else
< msec_delay(1);
< }
< wmb();
< #ifdef DEV_NETMAP
< /*
< * In netmap mode, we must preserve the buffers made
< * available to userspace before the if_init()
< * (this is true by default on the TX side, because
< * init makes all buffers available to userspace).
< *
< * netmap_reset() and the device specific routines
< * (e.g. ixgbe_setup_receive_rings()) map these
< * buffers at the end of the NIC ring, so here we
< * must set the RDT (tail) register to make sure
< * they are not overwritten.
< *
< * In this driver the NIC ring starts at RDH = 0,
< * RDT points to the last slot available for reception (?),
< * so RDT = num_rx_desc - 1 means the whole ring is available.
< */
< if (ifp->if_capenable & IFCAP_NETMAP) {
< struct netmap_adapter *na = NA(adapter->ifp);
< struct netmap_kring *kring = &na->rx_rings[i];
< int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
---
> if (!txr)
> return (0);
1210,1214c1602,1605
< IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me), t);
< } else
< #endif /* DEV_NETMAP */
< IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me), adapter->num_rx_desc - 1);
< }
---
> val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDH(txr->me));
> error = sysctl_handle_int(oidp, &val, 0, req);
> if (error || !req->newptr)
> return error;
1216,1221c1607,1608
< /* Enable Receive engine */
< rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
< if (hw->mac.type == ixgbe_mac_82598EB)
< rxctrl |= IXGBE_RXCTRL_DMBYPS;
< rxctrl |= IXGBE_RXCTRL_RXEN;
< ixgbe_enable_rx_dma(hw, rxctrl);
---
> return (0);
> } /* ixgbe_sysctl_tdh_handler */
1223c1610,1620
< callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
---
> /************************************************************************
> * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function
> *
> * Retrieves the TDT value from the hardware
> ************************************************************************/
> static int
> ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS)
> {
> struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
> int error;
> unsigned int val;
1225,1239c1622,1623
< /* Set up MSI/X routing */
< if (ixgbe_enable_msix) {
< ixgbe_configure_ivars(adapter);
< /* Set up auto-mask */
< if (hw->mac.type == ixgbe_mac_82598EB)
< IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
< else {
< IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
< IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
< }
< } else { /* Simple settings for Legacy/MSI */
< ixgbe_set_ivar(adapter, 0, 0, 0);
< ixgbe_set_ivar(adapter, 0, 0, 1);
< IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
< }
---
> if (!txr)
> return (0);
1241,1244c1625,1628
< #ifdef IXGBE_FDIR
< /* Init Flow director */
< if (hw->mac.type != ixgbe_mac_82598EB) {
< u32 hdrm = 32 << fdir_pballoc;
---
> val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDT(txr->me));
> error = sysctl_handle_int(oidp, &val, 0, req);
> if (error || !req->newptr)
> return error;
1246,1249c1630,1631
< hw->mac.ops.setup_rxpba(hw, 0, hdrm, PBA_STRATEGY_EQUAL);
< ixgbe_init_fdir_signature_82599(&adapter->hw, fdir_pballoc);
< }
< #endif
---
> return (0);
> } /* ixgbe_sysctl_tdt_handler */
1251,1262c1633,1643
< /*
< * Check on any SFP devices that
< * need to be kick-started
< */
< if (hw->phy.type == ixgbe_phy_none) {
< err = hw->phy.ops.identify(hw);
< if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
< device_printf(dev,
< "Unsupported SFP+ module type was detected.\n");
< return;
< }
< }
---
> /************************************************************************
> * ixgbe_sysctl_rdh_handler - Receive Descriptor Head handler function
> *
> * Retrieves the RDH value from the hardware
> ************************************************************************/
> static int
> ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS)
> {
> struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
> int error;
> unsigned int val;
1264,1265c1645,1646
< /* Set moderation on the Link interrupt */
< IXGBE_WRITE_REG(hw, IXGBE_EITR(adapter->vector), IXGBE_LINK_ITR);
---
> if (!rxr)
> return (0);
1267,1272c1648,1651
< /* Configure Energy Efficient Ethernet for supported devices */
< if (hw->mac.ops.setup_eee) {
< err = hw->mac.ops.setup_eee(hw, adapter->eee_enabled);
< if (err)
< device_printf(dev, "Error setting up EEE: %d\n", err);
< }
---
> val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDH(rxr->me));
> error = sysctl_handle_int(oidp, &val, 0, req);
> if (error || !req->newptr)
> return error;
1274,1275c1653,1654
< /* Enable power to the phy. */
< ixgbe_set_phy_power(hw, TRUE);
---
> return (0);
> } /* ixgbe_sysctl_rdh_handler */
1277,1278c1656,1666
< /* Config/Enable Link */
< ixgbe_config_link(adapter);
---
> /************************************************************************
> * ixgbe_sysctl_rdt_handler - Receive Descriptor Tail handler function
> *
> * Retrieves the RDT value from the hardware
> ************************************************************************/
> static int
> ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS)
> {
> struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
> int error;
> unsigned int val;
1280,1281c1668,1669
< /* Hardware Packet Buffer & Flow Control setup */
< ixgbe_config_delay_values(adapter);
---
> if (!rxr)
> return (0);
1283,1284c1671,1674
< /* Initialize the FC settings */
< ixgbe_start_hw(hw);
---
> val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDT(rxr->me));
> error = sysctl_handle_int(oidp, &val, 0, req);
> if (error || !req->newptr)
> return error;
1286,1287c1676,1677
< /* Set up VLAN support and filter */
< ixgbe_setup_vlan_hw_support(adapter);
---
> return (0);
> } /* ixgbe_sysctl_rdt_handler */
1289,1290c1679,1691
< /* Setup DMA Coalescing */
< ixgbe_config_dmac(adapter);
---
> /************************************************************************
> * ixgbe_register_vlan
> *
> * Run via vlan config EVENT, it enables us to use the
> * HW Filter table since we can get the vlan id. This
> * just creates the entry in the soft version of the
> * VFTA, init will repopulate the real table.
> ************************************************************************/
> static void
> ixgbe_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
> {
> struct adapter *adapter = ifp->if_softc;
> u16 index, bit;
1292,1293c1693,1694
< /* And now turn on interrupts */
< ixgbe_enable_intr(adapter);
---
> if (ifp->if_softc != arg) /* Not our event */
> return;
1295,1302c1696,1697
< #ifdef PCI_IOV
< /* Enable the use of the MBX by the VF's */
< {
< u32 reg = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
< reg |= IXGBE_CTRL_EXT_PFRSTD;
< IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, reg);
< }
< #endif
---
> if ((vtag == 0) || (vtag > 4095)) /* Invalid */
> return;
1304,1305c1699,1706
< /* Now inform the stack we're ready */
< ifp->if_drv_flags |= IFF_DRV_RUNNING;
---
> IXGBE_CORE_LOCK(adapter);
> index = (vtag >> 5) & 0x7F;
> bit = vtag & 0x1F;
> adapter->shadow_vfta[index] |= (1 << bit);
> ++adapter->num_vlans;
> ixgbe_setup_vlan_hw_support(adapter);
> IXGBE_CORE_UNLOCK(adapter);
> } /* ixgbe_register_vlan */
1307,1309c1708,1712
< return;
< }
<
---
> /************************************************************************
> * ixgbe_unregister_vlan
> *
> * Run via vlan unconfig EVENT, remove our entry in the soft vfta.
> ************************************************************************/
1311c1714
< ixgbe_init(void *arg)
---
> ixgbe_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
1313c1716,1717
< struct adapter *adapter = arg;
---
> struct adapter *adapter = ifp->if_softc;
> u16 index, bit;
1314a1719,1724
> if (ifp->if_softc != arg)
> return;
>
> if ((vtag == 0) || (vtag > 4095)) /* Invalid */
> return;
>
1316c1726,1731
< ixgbe_init_locked(adapter);
---
> index = (vtag >> 5) & 0x7F;
> bit = vtag & 0x1F;
> adapter->shadow_vfta[index] &= ~(1 << bit);
> --adapter->num_vlans;
> /* Re-init to load the changes */
> ixgbe_setup_vlan_hw_support(adapter);
1318,1319c1733
< return;
< }
---
> } /* ixgbe_unregister_vlan */
1320a1735,1737
> /************************************************************************
> * ixgbe_setup_vlan_hw_support
> ************************************************************************/
1322c1739
< ixgbe_config_gpie(struct adapter *adapter)
---
> ixgbe_setup_vlan_hw_support(struct adapter *adapter)
1323a1741
> struct ifnet *ifp = adapter->ifp;
1325c1743,1745
< u32 gpie;
---
> struct rx_ring *rxr;
> int i;
> u32 ctrl;
1327d1746
< gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
1329,1332d1747
< /* Fan Failure Interrupt */
< if (hw->device_id == IXGBE_DEV_ID_82598AT)
< gpie |= IXGBE_SDP1_GPIEN;
<
1334,1335c1749,1752
< * Module detection (SDP2)
< * Media ready (SDP1)
---
> * We get here thru init_locked, meaning
> * a soft reset, this has already cleared
> * the VFTA and other state, so if there
> * have been no vlan's registered do nothing.
1337,1340c1754,1766
< if (hw->mac.type == ixgbe_mac_82599EB) {
< gpie |= IXGBE_SDP2_GPIEN;
< if (hw->device_id != IXGBE_DEV_ID_82599_QSFP_SF_QP)
< gpie |= IXGBE_SDP1_GPIEN;
---
> if (adapter->num_vlans == 0)
> return;
>
> /* Setup the queues for vlans */
> for (i = 0; i < adapter->num_queues; i++) {
> rxr = &adapter->rx_rings[i];
> /* On 82599 the VLAN enable is per/queue in RXDCTL */
> if (hw->mac.type != ixgbe_mac_82598EB) {
> ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
> ctrl |= IXGBE_RXDCTL_VME;
> IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl);
> }
> rxr->vtag_strip = TRUE;
1342a1769,1770
> if ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0)
> return;
1344,1345c1772,1773
< * Thermal Failure Detection (X540)
< * Link Detection (X552 SFP+, X552/X557-AT)
---
> * A soft reset zero's out the VFTA, so
> * we need to repopulate it now.
1347,1350c1775,1778
< if (hw->mac.type == ixgbe_mac_X540 ||
< hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
< hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
< gpie |= IXGBE_SDP0_GPIEN_X540;
---
> for (i = 0; i < IXGBE_VFTA_SIZE; i++)
> if (adapter->shadow_vfta[i] != 0)
> IXGBE_WRITE_REG(hw, IXGBE_VFTA(i),
> adapter->shadow_vfta[i]);
1352,1356c1780,1784
< if (adapter->msix > 1) {
< /* Enable Enhanced MSIX mode */
< gpie |= IXGBE_GPIE_MSIX_MODE;
< gpie |= IXGBE_GPIE_EIAME | IXGBE_GPIE_PBA_SUPPORT |
< IXGBE_GPIE_OCD;
---
> ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
> /* Enable the Filter Table if enabled */
> if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) {
> ctrl &= ~IXGBE_VLNCTRL_CFIEN;
> ctrl |= IXGBE_VLNCTRL_VFE;
1357a1786,1789
> if (hw->mac.type == ixgbe_mac_82598EB)
> ctrl |= IXGBE_VLNCTRL_VME;
> IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
> } /* ixgbe_setup_vlan_hw_support */
1359,1365c1791,1796
< IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
< return;
< }
<
< /*
< * Requires adapter->max_frame_size to be set.
< */
---
> /************************************************************************
> * ixgbe_get_slot_info
> *
> * Get the width and transaction speed of
> * the slot this adapter is plugged into.
> ************************************************************************/
1367c1798
< ixgbe_config_delay_values(struct adapter *adapter)
---
> ixgbe_get_slot_info(struct adapter *adapter)
1369,1370c1800,1804
< struct ixgbe_hw *hw = &adapter->hw;
< u32 rxpb, frame, size, tmp;
---
> device_t dev = adapter->dev;
> struct ixgbe_hw *hw = &adapter->hw;
> u32 offset;
> u16 link;
> int bus_info_valid = TRUE;
1372,1380c1806,1810
< frame = adapter->max_frame_size;
<
< /* Calculate High Water */
< switch (hw->mac.type) {
< case ixgbe_mac_X540:
< case ixgbe_mac_X550:
< case ixgbe_mac_X550EM_x:
< tmp = IXGBE_DV_X540(frame, frame);
< break;
---
> /* Some devices are behind an internal bridge */
> switch (hw->device_id) {
> case IXGBE_DEV_ID_82599_SFP_SF_QP:
> case IXGBE_DEV_ID_82599_QSFP_SF_QP:
> goto get_parent_info;
1382d1811
< tmp = IXGBE_DV(frame, frame);
1385,1387d1813
< size = IXGBE_BT2KB(tmp);
< rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10;
< hw->fc.high_water[0] = rxpb - size;
1389c1815,1820
< /* Now calculate Low Water */
---
> ixgbe_get_bus_info(hw);
>
> /*
> * Some devices don't use PCI-E, but there is no need
> * to display "Unknown" for bus speed and width.
> */
1391,1392d1821
< case ixgbe_mac_X540:
< case ixgbe_mac_X550:
1394,1395c1823,1824
< tmp = IXGBE_LOW_DV_X540(frame);
< break;
---
> case ixgbe_mac_X550EM_a:
> return;
1397,1398c1826
< tmp = IXGBE_LOW_DV(frame);
< break;
---
> goto display;
1400d1827
< hw->fc.low_water[0] = IXGBE_BT2KB(tmp);
1402,1405c1829,1857
< hw->fc.requested_mode = adapter->fc;
< hw->fc.pause_time = IXGBE_FC_PAUSE;
< hw->fc.send_xon = TRUE;
< }
---
> get_parent_info:
> /*
> * For the Quad port adapter we need to parse back
> * up the PCI tree to find the speed of the expansion
> * slot into which this adapter is plugged. A bit more work.
> */
> dev = device_get_parent(device_get_parent(dev));
> #ifdef IXGBE_DEBUG
> device_printf(dev, "parent pcib = %x,%x,%x\n", pci_get_bus(dev),
> pci_get_slot(dev), pci_get_function(dev));
> #endif
> dev = device_get_parent(device_get_parent(dev));
> #ifdef IXGBE_DEBUG
> device_printf(dev, "slot pcib = %x,%x,%x\n", pci_get_bus(dev),
> pci_get_slot(dev), pci_get_function(dev));
> #endif
> /* Now get the PCI Express Capabilities offset */
> if (pci_find_cap(dev, PCIY_EXPRESS, &offset)) {
> /*
> * Hmm...can't get PCI-Express capabilities.
> * Falling back to default method.
> */
> bus_info_valid = FALSE;
> ixgbe_get_bus_info(hw);
> goto display;
> }
> /* ...and read the Link Status Register */
> link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
> ixgbe_set_pci_config_data_generic(hw, link);
1407,1411c1859,1868
< /*
< **
< ** MSIX Interrupt Handlers and Tasklets
< **
< */
---
> display:
> device_printf(dev, "PCI Express Bus: Speed %s %s\n",
> ((hw->bus.speed == ixgbe_bus_speed_8000) ? "8.0GT/s" :
> (hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0GT/s" :
> (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5GT/s" :
> "Unknown"),
> ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" :
> (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "Width x4" :
> (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "Width x1" :
> "Unknown"));
1412a1870,1891
> if (bus_info_valid) {
> if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) &&
> ((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
> (hw->bus.speed == ixgbe_bus_speed_2500))) {
> device_printf(dev, "PCI-Express bandwidth available for this card\n is not sufficient for optimal performance.\n");
> device_printf(dev, "For optimal performance a x8 PCIE, or x4 PCIE Gen2 slot is required.\n");
> }
> if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) &&
> ((hw->bus.width <= ixgbe_bus_width_pcie_x8) &&
> (hw->bus.speed < ixgbe_bus_speed_8000))) {
> device_printf(dev, "PCI-Express bandwidth available for this card\n is not sufficient for optimal performance.\n");
> device_printf(dev, "For optimal performance a x8 PCIE Gen3 slot is required.\n");
> }
> } else
> device_printf(dev, "Unable to determine slot speed/width. The speed/width reported are that of the internal switch.\n");
>
> return;
> } /* ixgbe_get_slot_info */
>
> /************************************************************************
> * ixgbe_enable_queue - MSI-X Interrupt Handlers and Tasklets
> ************************************************************************/
1417,1418c1896,1897
< u64 queue = (u64)(1 << vector);
< u32 mask;
---
> u64 queue = (u64)(1 << vector);
> u32 mask;
1421,1422c1900,1901
< mask = (IXGBE_EIMS_RTX_QUEUE & queue);
< IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
---
> mask = (IXGBE_EIMS_RTX_QUEUE & queue);
> IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
1424,1429c1903,1908
< mask = (queue & 0xFFFFFFFF);
< if (mask)
< IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
< mask = (queue >> 32);
< if (mask)
< IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
---
> mask = (queue & 0xFFFFFFFF);
> if (mask)
> IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
> mask = (queue >> 32);
> if (mask)
> IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
1431c1910
< }
---
> } /* ixgbe_enable_queue */
1432a1912,1914
> /************************************************************************
> * ixgbe_disable_queue
> ************************************************************************/
1437,1438c1919,1920
< u64 queue = (u64)(1 << vector);
< u32 mask;
---
> u64 queue = (u64)(1 << vector);
> u32 mask;
1441,1442c1923,1924
< mask = (IXGBE_EIMS_RTX_QUEUE & queue);
< IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
---
> mask = (IXGBE_EIMS_RTX_QUEUE & queue);
> IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
1444,1449c1926,1931
< mask = (queue & 0xFFFFFFFF);
< if (mask)
< IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
< mask = (queue >> 32);
< if (mask)
< IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
---
> mask = (queue & 0xFFFFFFFF);
> if (mask)
> IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
> mask = (queue >> 32);
> if (mask)
> IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
1451c1933
< }
---
> } /* ixgbe_disable_queue */
1453,1552c1935,1937
< static void
< ixgbe_handle_que(void *context, int pending)
< {
< struct ix_queue *que = context;
< struct adapter *adapter = que->adapter;
< struct tx_ring *txr = que->txr;
< struct ifnet *ifp = adapter->ifp;
<
< if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
< ixgbe_rxeof(que);
< IXGBE_TX_LOCK(txr);
< ixgbe_txeof(txr);
< #ifndef IXGBE_LEGACY_TX
< if (!drbr_empty(ifp, txr->br))
< ixgbe_mq_start_locked(ifp, txr);
< #else
< if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
< ixgbe_start_locked(txr, ifp);
< #endif
< IXGBE_TX_UNLOCK(txr);
< }
<
< /* Reenable this interrupt */
< if (que->res != NULL)
< ixgbe_enable_queue(adapter, que->msix);
< else
< ixgbe_enable_intr(adapter);
< return;
< }
<
<
< /*********************************************************************
< *
< * Legacy Interrupt Service routine
< *
< **********************************************************************/
<
< static void
< ixgbe_legacy_irq(void *arg)
< {
< struct ix_queue *que = arg;
< struct adapter *adapter = que->adapter;
< struct ixgbe_hw *hw = &adapter->hw;
< struct ifnet *ifp = adapter->ifp;
< struct tx_ring *txr = adapter->tx_rings;
< bool more;
< u32 reg_eicr;
<
<
< reg_eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
<
< ++que->irqs;
< if (reg_eicr == 0) {
< ixgbe_enable_intr(adapter);
< return;
< }
<
< more = ixgbe_rxeof(que);
<
< IXGBE_TX_LOCK(txr);
< ixgbe_txeof(txr);
< #ifdef IXGBE_LEGACY_TX
< if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
< ixgbe_start_locked(txr, ifp);
< #else
< if (!drbr_empty(ifp, txr->br))
< ixgbe_mq_start_locked(ifp, txr);
< #endif
< IXGBE_TX_UNLOCK(txr);
<
< /* Check for fan failure */
< if ((hw->device_id == IXGBE_DEV_ID_82598AT) &&
< (reg_eicr & IXGBE_EICR_GPI_SDP1)) {
< device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! "
< "REPLACE IMMEDIATELY!!\n");
< IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
< }
<
< /* Link status change */
< if (reg_eicr & IXGBE_EICR_LSC)
< taskqueue_enqueue(adapter->tq, &adapter->link_task);
<
< /* External PHY interrupt */
< if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
< (reg_eicr & IXGBE_EICR_GPI_SDP0_X540))
< taskqueue_enqueue(adapter->tq, &adapter->phy_task);
<
< if (more)
< taskqueue_enqueue(que->tq, &que->que_task);
< else
< ixgbe_enable_intr(adapter);
< return;
< }
<
<
< /*********************************************************************
< *
< * MSIX Queue Interrupt Service routine
< *
< **********************************************************************/
---
> /************************************************************************
> * ixgbe_msix_que - MSI-X Queue Interrupt Service routine
> ************************************************************************/
1556c1941
< struct ix_queue *que = arg;
---
> struct ix_queue *que = arg;
1559,1562c1944,1947
< struct tx_ring *txr = que->txr;
< struct rx_ring *rxr = que->rxr;
< bool more;
< u32 newitr = 0;
---
> struct tx_ring *txr = que->txr;
> struct rx_ring *rxr = que->rxr;
> bool more;
> u32 newitr = 0;
1576,1582c1961,1962
< #ifdef IXGBE_LEGACY_TX
< if (!IFQ_DRV_IS_EMPTY(ifp->if_snd))
< ixgbe_start_locked(txr, ifp);
< #else
< if (!drbr_empty(ifp, txr->br))
< ixgbe_mq_start_locked(ifp, txr);
< #endif
---
> if (!ixgbe_ring_empty(ifp, txr->br))
> ixgbe_start_locked(ifp, txr);
1590,1599c1970,1977
< ** Do Adaptive Interrupt Moderation:
< ** - Write out last calculated setting
< ** - Calculate based on average size over
< ** the last interval.
< */
< if (que->eitr_setting)
< IXGBE_WRITE_REG(&adapter->hw,
< IXGBE_EITR(que->msix), que->eitr_setting);
<
< que->eitr_setting = 0;
---
> * Do Adaptive Interrupt Moderation:
> * - Write out last calculated setting
> * - Calculate based on average size over
> * the last interval.
> */
> if (que->eitr_setting)
> IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(que->msix),
> que->eitr_setting);
1601,1604c1979,1984
< /* Idle, do nothing */
< if ((txr->bytes == 0) && (rxr->bytes == 0))
< goto no_calc;
<
---
> que->eitr_setting = 0;
>
> /* Idle, do nothing */
> if ((txr->bytes == 0) && (rxr->bytes == 0))
> goto no_calc;
>
1606c1986
< newitr = txr->bytes/txr->packets;
---
> newitr = txr->bytes/txr->packets;
1608,1609c1988
< newitr = max(newitr,
< (rxr->bytes / rxr->packets));
---
> newitr = max(newitr, (rxr->bytes / rxr->packets));
1621,1627c2000,2003
< if (adapter->hw.mac.type == ixgbe_mac_82598EB)
< newitr |= newitr << 16;
< else
< newitr |= IXGBE_EITR_CNT_WDIS;
<
< /* save for next interrupt */
< que->eitr_setting = newitr;
---
> if (adapter->hw.mac.type == ixgbe_mac_82598EB)
> newitr |= newitr << 16;
> else
> newitr |= IXGBE_EITR_CNT_WDIS;
1629,1633c2005,2006
< /* Reset state */
< txr->bytes = 0;
< txr->packets = 0;
< rxr->bytes = 0;
< rxr->packets = 0;
---
> /* save for next interrupt */
> que->eitr_setting = newitr;
1634a2008,2013
> /* Reset state */
> txr->bytes = 0;
> txr->packets = 0;
> rxr->bytes = 0;
> rxr->packets = 0;
>
1640,1641d2018
< return;
< }
1643,1731d2019
<
< static void
< ixgbe_msix_link(void *arg)
< {
< struct adapter *adapter = arg;
< struct ixgbe_hw *hw = &adapter->hw;
< u32 reg_eicr, mod_mask;
<
< ++adapter->link_irq;
<
< /* Pause other interrupts */
< IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_OTHER);
<
< /* First get the cause */
< reg_eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
< /* Be sure the queue bits are not cleared */
< reg_eicr &= ~IXGBE_EICR_RTX_QUEUE;
< /* Clear interrupt with write */
< IXGBE_WRITE_REG(hw, IXGBE_EICR, reg_eicr);
<
< /* Link status change */
< if (reg_eicr & IXGBE_EICR_LSC) {
< IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
< taskqueue_enqueue(adapter->tq, &adapter->link_task);
< }
<
< if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
< #ifdef IXGBE_FDIR
< if (reg_eicr & IXGBE_EICR_FLOW_DIR) {
< /* This is probably overkill :) */
< if (!atomic_cmpset_int(&adapter->fdir_reinit, 0, 1))
< return;
< /* Disable the interrupt */
< IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EICR_FLOW_DIR);
< taskqueue_enqueue(adapter->tq, &adapter->fdir_task);
< } else
< #endif
< if (reg_eicr & IXGBE_EICR_ECC) {
< device_printf(adapter->dev, "CRITICAL: ECC ERROR!! "
< "Please Reboot!!\n");
< IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
< }
<
< /* Check for over temp condition */
< if (reg_eicr & IXGBE_EICR_TS) {
< device_printf(adapter->dev, "CRITICAL: OVER TEMP!! "
< "PHY IS SHUT DOWN!!\n");
< device_printf(adapter->dev, "System shutdown required!\n");
< IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS);
< }
< #ifdef PCI_IOV
< if (reg_eicr & IXGBE_EICR_MAILBOX)
< taskqueue_enqueue(adapter->tq, &adapter->mbx_task);
< #endif
< }
<
< /* Pluggable optics-related interrupt */
< if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP)
< mod_mask = IXGBE_EICR_GPI_SDP0_X540;
< else
< mod_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
<
< if (ixgbe_is_sfp(hw)) {
< if (reg_eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw)) {
< IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
< taskqueue_enqueue(adapter->tq, &adapter->msf_task);
< } else if (reg_eicr & mod_mask) {
< IXGBE_WRITE_REG(hw, IXGBE_EICR, mod_mask);
< taskqueue_enqueue(adapter->tq, &adapter->mod_task);
< }
< }
<
< /* Check for fan failure */
< if ((hw->device_id == IXGBE_DEV_ID_82598AT) &&
< (reg_eicr & IXGBE_EICR_GPI_SDP1)) {
< IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
< device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! "
< "REPLACE IMMEDIATELY!!\n");
< }
<
< /* External PHY interrupt */
< if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
< (reg_eicr & IXGBE_EICR_GPI_SDP0_X540)) {
< IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540);
< taskqueue_enqueue(adapter->tq, &adapter->phy_task);
< }
<
< /* Re-enable other interrupts */
< IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
1733c2021
< }
---
> } /* ixgbe_msix_que */
1735c2023,2024
< /*********************************************************************
---
> /************************************************************************
> * ixgbe_media_status - Media Ioctl callback
1737,1742c2026,2028
< * Media Ioctl callback
< *
< * This routine is called whenever the user queries the status of
< * the interface using ifconfig.
< *
< **********************************************************************/
---
> * Called whenever the user queries the status of
> * the interface using ifconfig.
> ************************************************************************/
1744c2030
< ixgbe_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
---
> ixgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1746c2032
< struct adapter *adapter = ifp->if_softc;
---
> struct adapter *adapter = ifp->if_softc;
1748c2034
< int layer;
---
> int layer;
1767c2053,2054
< layer & IXGBE_PHYSICAL_LAYER_100BASE_TX)
---
> layer & IXGBE_PHYSICAL_LAYER_100BASE_TX ||
> layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
1777a2065,2067
> case IXGBE_LINK_SPEED_10_FULL:
> ifmr->ifm_active |= IFM_10_T | IFM_FDX;
> break;
1821,1823c2111,2113
< ** XXX: These need to use the proper media types once
< ** they're added.
< */
---
> * XXX: These need to use the proper media types once
> * they're added.
> */
1837,1838c2127,2129
< else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4
< || layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
---
> else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
> layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
> layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
1863,1864c2154,2156
< else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4
< || layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
---
> else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
> layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
> layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
1877c2169
<
---
>
1881c2173
<
---
>
1895c2187
< }
---
> } /* ixgbe_media_status */
1897c2189,2190
< /*********************************************************************
---
> /************************************************************************
> * ixgbe_media_change - Media Ioctl callback
1899,1904c2192,2194
< * Media Ioctl callback
< *
< * This routine is called when the user changes speed/duplex using
< * media/mediopt option with ifconfig.
< *
< **********************************************************************/
---
> * Called when the user changes speed/duplex using
> * media/mediopt option with ifconfig.
> ************************************************************************/
1906c2196
< ixgbe_media_change(struct ifnet * ifp)
---
> ixgbe_media_change(struct ifnet *ifp)
1908,1910c2198,2200
< struct adapter *adapter = ifp->if_softc;
< struct ifmedia *ifm = &adapter->media;
< struct ixgbe_hw *hw = &adapter->hw;
---
> struct adapter *adapter = ifp->if_softc;
> struct ifmedia *ifm = &adapter->media;
> struct ixgbe_hw *hw = &adapter->hw;
1922,1926c2212,2215
< ** We don't actually need to check against the supported
< ** media types of the adapter; ifmedia will take care of
< ** that for us.
< */
< #ifndef IFM_ETH_XTYPE
---
> * We don't actually need to check against the supported
> * media types of the adapter; ifmedia will take care of
> * that for us.
> */
1930a2220,2222
> speed |= IXGBE_LINK_SPEED_1GB_FULL;
> speed |= IXGBE_LINK_SPEED_10GB_FULL;
> break;
1932d2223
< case IFM_10G_SR: /* KR, too */
1933a2225,2226
> #ifndef IFM_ETH_XTYPE
> case IFM_10G_SR: /* KR, too */
1934a2228,2231
> #else
> case IFM_10G_KR:
> case IFM_10G_KX4:
> #endif
1936d2232
< case IFM_10G_TWINAX:
1939,1940c2235,2239
< case IFM_1000_T:
< speed |= IXGBE_LINK_SPEED_100_FULL;
---
> #ifndef IFM_ETH_XTYPE
> case IFM_1000_CX: /* KX */
> #else
> case IFM_1000_KX:
> #endif
1943d2241
< case IFM_1000_CX: /* KX */
1946c2244
< case IFM_100_TX:
---
> case IFM_1000_T:
1948,1960d2245
< break;
< default:
< goto invalid;
< }
< #else
< switch (IFM_SUBTYPE(ifm->ifm_media)) {
< case IFM_AUTO:
< case IFM_10G_T:
< speed |= IXGBE_LINK_SPEED_100_FULL;
< case IFM_10G_LRM:
< case IFM_10G_KR:
< case IFM_10G_LR:
< case IFM_10G_KX4:
1961a2247
> break;
1965,1971d2250
< case IFM_1000_T:
< speed |= IXGBE_LINK_SPEED_100_FULL;
< case IFM_1000_LX:
< case IFM_1000_SX:
< case IFM_1000_KX:
< speed |= IXGBE_LINK_SPEED_1GB_FULL;
< break;
1974a2254,2256
> case IFM_10_T:
> speed |= IXGBE_LINK_SPEED_10_FULL;
> break;
1978d2259
< #endif
1982,1991c2263,2267
< if (IFM_SUBTYPE(ifm->ifm_media) == IFM_AUTO) {
< adapter->advertise = 0;
< } else {
< if ((speed & IXGBE_LINK_SPEED_10GB_FULL) != 0)
< adapter->advertise |= 1 << 2;
< if ((speed & IXGBE_LINK_SPEED_1GB_FULL) != 0)
< adapter->advertise |= 1 << 1;
< if ((speed & IXGBE_LINK_SPEED_100_FULL) != 0)
< adapter->advertise |= 1 << 0;
< }
---
> adapter->advertise =
> ((speed & IXGBE_LINK_SPEED_10GB_FULL) ? 4 : 0) |
> ((speed & IXGBE_LINK_SPEED_1GB_FULL) ? 2 : 0) |
> ((speed & IXGBE_LINK_SPEED_100_FULL) ? 1 : 0) |
> ((speed & IXGBE_LINK_SPEED_10_FULL) ? 8 : 0);
1996a2273
>
1998c2275
< }
---
> } /* ixgbe_media_change */
1999a2277,2279
> /************************************************************************
> * ixgbe_set_promisc
> ************************************************************************/
2003,2005c2283,2285
< u_int32_t reg_rctl;
< struct ifnet *ifp = adapter->ifp;
< int mcnt = 0;
---
> struct ifnet *ifp = adapter->ifp;
> int mcnt = 0;
> u32 rctl;
2007,2008c2287,2288
< reg_rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
< reg_rctl &= (~IXGBE_FCTRL_UPE);
---
> rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
> rctl &= (~IXGBE_FCTRL_UPE);
2012c2292
< struct ifmultiaddr *ifma;
---
> struct ifmultiaddr *ifma;
2032,2033c2312,2313
< reg_rctl &= (~IXGBE_FCTRL_MPE);
< IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
---
> rctl &= (~IXGBE_FCTRL_MPE);
> IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
2036,2037c2316,2317
< reg_rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
< IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
---
> rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
> IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
2039,2041c2319,2321
< reg_rctl |= IXGBE_FCTRL_MPE;
< reg_rctl &= ~IXGBE_FCTRL_UPE;
< IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
---
> rctl |= IXGBE_FCTRL_MPE;
> rctl &= ~IXGBE_FCTRL_UPE;
> IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
2043,2044c2323
< return;
< }
---
> } /* ixgbe_set_promisc */
2046,2054c2325,2327
<
< /*********************************************************************
< * Multicast Update
< *
< * This routine is called whenever multicast address list is updated.
< *
< **********************************************************************/
< #define IXGBE_RAR_ENTRIES 16
<
---
> /************************************************************************
> * ixgbe_msix_link - Link status change ISR (MSI/MSI-X)
> ************************************************************************/
2056c2329
< ixgbe_set_multi(struct adapter *adapter)
---
> ixgbe_msix_link(void *arg)
2058,2063c2331,2334
< u32 fctrl;
< u8 *update_ptr;
< struct ifmultiaddr *ifma;
< struct ixgbe_mc_addr *mta;
< int mcnt = 0;
< struct ifnet *ifp = adapter->ifp;
---
> struct adapter *adapter = arg;
> struct ixgbe_hw *hw = &adapter->hw;
> u32 eicr, eicr_mask;
> s32 retval;
2065c2336
< IOCTL_DEBUGOUT("ixgbe_set_multi: begin");
---
> ++adapter->link_irq;
2067,2068c2338,2339
< mta = adapter->mta;
< bzero(mta, sizeof(*mta) * MAX_NUM_MULTICAST_ADDRESSES);
---
> /* Pause other interrupts */
> IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_OTHER);
2070,2089c2341,2346
< #if __FreeBSD_version < 800000
< IF_ADDR_LOCK(ifp);
< #else
< if_maddr_rlock(ifp);
< #endif
< TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
< if (ifma->ifma_addr->sa_family != AF_LINK)
< continue;
< if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
< break;
< bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
< mta[mcnt].addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
< mta[mcnt].vmdq = adapter->pool;
< mcnt++;
< }
< #if __FreeBSD_version < 800000
< IF_ADDR_UNLOCK(ifp);
< #else
< if_maddr_runlock(ifp);
< #endif
---
> /* First get the cause */
> eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
> /* Be sure the queue bits are not cleared */
> eicr &= ~IXGBE_EICR_RTX_QUEUE;
> /* Clear interrupt with write */
> IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
2091,2107c2348,2351
< fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
< fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
< if (ifp->if_flags & IFF_PROMISC)
< fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
< else if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES ||
< ifp->if_flags & IFF_ALLMULTI) {
< fctrl |= IXGBE_FCTRL_MPE;
< fctrl &= ~IXGBE_FCTRL_UPE;
< } else
< fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
<
< IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
<
< if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) {
< update_ptr = (u8 *)mta;
< ixgbe_update_mc_addr_list(&adapter->hw,
< update_ptr, mcnt, ixgbe_mc_array_itr, TRUE);
---
> /* Link status change */
> if (eicr & IXGBE_EICR_LSC) {
> IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
> taskqueue_enqueue(adapter->tq, &adapter->link_task);
2110,2111c2354,2363
< return;
< }
---
> if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
> if ((adapter->feat_en & IXGBE_FEATURE_FDIR) &&
> (eicr & IXGBE_EICR_FLOW_DIR)) {
> /* This is probably overkill :) */
> if (!atomic_cmpset_int(&adapter->fdir_reinit, 0, 1))
> return;
> /* Disable the interrupt */
> IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_FLOW_DIR);
> taskqueue_enqueue(adapter->tq, &adapter->fdir_task);
> }
2113,2121c2365,2369
< /*
< * This is an iterator function now needed by the multicast
< * shared code. It simply feeds the shared code routine the
< * addresses in the array of ixgbe_set_multi() one by one.
< */
< static u8 *
< ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
< {
< struct ixgbe_mc_addr *mta;
---
> if (eicr & IXGBE_EICR_ECC) {
> device_printf(adapter->dev,
> "CRITICAL: ECC ERROR!! Please Reboot!!\n");
> IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
> }
2123,2124c2371,2398
< mta = (struct ixgbe_mc_addr *)*update_ptr;
< *vmdq = mta->vmdq;
---
> /* Check for over temp condition */
> if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR) {
> switch (adapter->hw.mac.type) {
> case ixgbe_mac_X550EM_a:
> if (!(eicr & IXGBE_EICR_GPI_SDP0_X550EM_a))
> break;
> IXGBE_WRITE_REG(hw, IXGBE_EIMC,
> IXGBE_EICR_GPI_SDP0_X550EM_a);
> IXGBE_WRITE_REG(hw, IXGBE_EICR,
> IXGBE_EICR_GPI_SDP0_X550EM_a);
> retval = hw->phy.ops.check_overtemp(hw);
> if (retval != IXGBE_ERR_OVERTEMP)
> break;
> device_printf(adapter->dev, "CRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
> device_printf(adapter->dev, "System shutdown required!\n");
> break;
> default:
> if (!(eicr & IXGBE_EICR_TS))
> break;
> retval = hw->phy.ops.check_overtemp(hw);
> if (retval != IXGBE_ERR_OVERTEMP)
> break;
> device_printf(adapter->dev, "CRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
> device_printf(adapter->dev, "System shutdown required!\n");
> IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS);
> break;
> }
> }
2126,2128c2400,2404
< *update_ptr = (u8*)(mta + 1);
< return (mta->addr);
< }
---
> /* Check for VF message */
> if ((adapter->feat_en & IXGBE_FEATURE_SRIOV) &&
> (eicr & IXGBE_EICR_MAILBOX))
> taskqueue_enqueue(adapter->tq, &adapter->mbx_task);
> }
2129a2406,2411
> if (ixgbe_is_sfp(hw)) {
> /* Pluggable optics-related interrupt */
> if (hw->mac.type >= ixgbe_mac_X540)
> eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
> else
> eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
2131,2137c2413,2416
< /*********************************************************************
< * Timer routine
< *
< * This routine checks for link status,updates statistics,
< * and runs the watchdog check.
< *
< **********************************************************************/
---
> if (eicr & eicr_mask) {
> IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
> taskqueue_enqueue(adapter->tq, &adapter->mod_task);
> }
2139,2180c2418,2422
< static void
< ixgbe_local_timer(void *arg)
< {
< struct adapter *adapter = arg;
< device_t dev = adapter->dev;
< struct ix_queue *que = adapter->queues;
< u64 queues = 0;
< int hung = 0;
<
< mtx_assert(&adapter->core_mtx, MA_OWNED);
<
< /* Check for pluggable optics */
< if (adapter->sfp_probe)
< if (!ixgbe_sfp_probe(adapter))
< goto out; /* Nothing to do */
<
< ixgbe_update_link_status(adapter);
< ixgbe_update_stats_counters(adapter);
<
< /*
< ** Check the TX queues status
< ** - mark hung queues so we don't schedule on them
< ** - watchdog only if all queues show hung
< */
< for (int i = 0; i < adapter->num_queues; i++, que++) {
< /* Keep track of queues with work for soft irq */
< if (que->txr->busy)
< queues |= ((u64)1 << que->me);
< /*
< ** Each time txeof runs without cleaning, but there
< ** are uncleaned descriptors it increments busy. If
< ** we get to the MAX we declare it hung.
< */
< if (que->busy == IXGBE_QUEUE_HUNG) {
< ++hung;
< /* Mark the queue as inactive */
< adapter->active_queues &= ~((u64)1 << que->me);
< continue;
< } else {
< /* Check if we've come back from hung */
< if ((adapter->active_queues & ((u64)1 << que->me)) == 0)
< adapter->active_queues |= ((u64)1 << que->me);
---
> if ((hw->mac.type == ixgbe_mac_82599EB) &&
> (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
> IXGBE_WRITE_REG(hw, IXGBE_EICR,
> IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
> taskqueue_enqueue(adapter->tq, &adapter->msf_task);
2182,2187c2424
< if (que->busy >= IXGBE_MAX_TX_BUSY) {
< device_printf(dev,"Warning queue %d "
< "appears to be hung!\n", i);
< que->txr->busy = IXGBE_QUEUE_HUNG;
< ++hung;
< }
---
> }
2188a2426,2429
> /* Check for fan failure */
> if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
> ixgbe_check_fan_failure(adapter, eicr, TRUE);
> IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
2191,2195c2432,2436
< /* Only truly watchdog if all queues show hung */
< if (hung == adapter->num_queues)
< goto watchdog;
< else if (queues != 0) { /* Force an IRQ on queues with work */
< ixgbe_rearm_queues(adapter, queues);
---
> /* External PHY interrupt */
> if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
> (eicr & IXGBE_EICR_GPI_SDP0_X540)) {
> IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540);
> taskqueue_enqueue(adapter->tq, &adapter->phy_task);
2198,2200c2439,2441
< out:
< callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
< return;
---
> /* Re-enable other interrupts */
> IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
> } /* ixgbe_msix_link */
2202,2216c2443,2447
< watchdog:
< device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
< adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
< adapter->watchdog_events++;
< ixgbe_init_locked(adapter);
< }
<
<
< /*
< ** Note: this routine updates the OS on the link state
< ** the real check of the hardware only happens with
< ** a link interrupt.
< */
< static void
< ixgbe_update_link_status(struct adapter *adapter)
---
> /************************************************************************
> * ixgbe_sysctl_interrupt_rate_handler
> ************************************************************************/
> static int
> ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS)
2218,2219c2449,2451
< struct ifnet *ifp = adapter->ifp;
< device_t dev = adapter->dev;
---
> struct ix_queue *que = ((struct ix_queue *)oidp->oid_arg1);
> int error;
> unsigned int reg, usec, rate;
2221,2246c2453,2468
< if (adapter->link_up){
< if (adapter->link_active == FALSE) {
< if (bootverbose)
< device_printf(dev,"Link is up %d Gbps %s \n",
< ((adapter->link_speed == 128)? 10:1),
< "Full Duplex");
< adapter->link_active = TRUE;
< /* Update any Flow Control changes */
< ixgbe_fc_enable(&adapter->hw);
< /* Update DMA coalescing config */
< ixgbe_config_dmac(adapter);
< if_link_state_change(ifp, LINK_STATE_UP);
< #ifdef PCI_IOV
< ixgbe_ping_all_vfs(adapter);
< #endif
< }
< } else { /* Link down */
< if (adapter->link_active == TRUE) {
< if (bootverbose)
< device_printf(dev,"Link is Down\n");
< if_link_state_change(ifp, LINK_STATE_DOWN);
< adapter->link_active = FALSE;
< #ifdef PCI_IOV
< ixgbe_ping_all_vfs(adapter);
< #endif
< }
---
> reg = IXGBE_READ_REG(&que->adapter->hw, IXGBE_EITR(que->msix));
> usec = ((reg & 0x0FF8) >> 3);
> if (usec > 0)
> rate = 500000 / usec;
> else
> rate = 0;
> error = sysctl_handle_int(oidp, &rate, 0, req);
> if (error || !req->newptr)
> return error;
> reg &= ~0xfff; /* default, no limitation */
> ixgbe_max_interrupt_rate = 0;
> if (rate > 0 && rate < 500000) {
> if (rate < 1000)
> rate = 1000;
> ixgbe_max_interrupt_rate = rate;
> reg |= ((4000000/rate) & 0xff8);
2247a2470
> IXGBE_WRITE_REG(&que->adapter->hw, IXGBE_EITR(que->msix), reg);
2249,2250c2472,2473
< return;
< }
---
> return (0);
> } /* ixgbe_sysctl_interrupt_rate_handler */
2252,2259c2475,2477
<
< /*********************************************************************
< *
< * This routine disables all traffic on the adapter by issuing a
< * global reset on the MAC and deallocates TX/RX buffers.
< *
< **********************************************************************/
<
---
> /************************************************************************
> * ixgbe_add_device_sysctls
> ************************************************************************/
2261c2479
< ixgbe_stop(void *arg)
---
> ixgbe_add_device_sysctls(struct adapter *adapter)
2263,2266c2481,2484
< struct ifnet *ifp;
< struct adapter *adapter = arg;
< struct ixgbe_hw *hw = &adapter->hw;
< ifp = adapter->ifp;
---
> device_t dev = adapter->dev;
> struct ixgbe_hw *hw = &adapter->hw;
> struct sysctl_oid_list *child;
> struct sysctl_ctx_list *ctx;
2268c2486,2487
< mtx_assert(&adapter->core_mtx, MA_OWNED);
---
> ctx = device_get_sysctl_ctx(dev);
> child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
2270,2272c2489,2491
< INIT_DEBUGOUT("ixgbe_stop: begin\n");
< ixgbe_disable_intr(adapter);
< callout_stop(&adapter->timer);
---
> /* Sysctls for all devices */
> SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "fc", CTLTYPE_INT | CTLFLAG_RW,
> adapter, 0, ixgbe_sysctl_flowcntl, "I", IXGBE_SYSCTL_DESC_SET_FC);
2274,2275c2493,2495
< /* Let the stack know...*/
< ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
---
> adapter->enable_aim = ixgbe_enable_aim;
> SYSCTL_ADD_INT(ctx, child, OID_AUTO, "enable_aim", CTLFLAG_RW,
> &adapter->enable_aim, 1, "Interrupt Moderation");
2277,2283c2497,2499
< ixgbe_reset_hw(hw);
< hw->adapter_stopped = FALSE;
< ixgbe_stop_adapter(hw);
< if (hw->mac.type == ixgbe_mac_82599EB)
< ixgbe_stop_mac_link_on_d3_82599(hw);
< /* Turn off the laser - noop with no optics */
< ixgbe_disable_tx_laser(hw);
---
> SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "advertise_speed",
> CTLTYPE_INT | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_advertise, "I",
> IXGBE_SYSCTL_DESC_ADV_SPEED);
2285,2287c2501,2505
< /* Update the stack */
< adapter->link_up = FALSE;
< ixgbe_update_link_status(adapter);
---
> #ifdef IXGBE_DEBUG
> /* testing sysctls (for all devices) */
> SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "power_state",
> CTLTYPE_INT | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_power_state,
> "I", "PCI Power State");
2289,2290c2507,2515
< /* reprogram the RAR[0] in case user changed it. */
< ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
---
> SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "print_rss_config",
> CTLTYPE_STRING | CTLFLAG_RD, adapter, 0,
> ixgbe_sysctl_print_rss_config, "A", "Prints RSS Configuration");
> #endif
> /* for X550 series devices */
> if (hw->mac.type >= ixgbe_mac_X550)
> SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "dmac",
> CTLTYPE_INT | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_dmac,
> "I", "DMA Coalesce");
2292,2293c2517,2521
< return;
< }
---
> /* for WoL-capable devices */
> if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
> SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "wol_enable",
> CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
> ixgbe_sysctl_wol_enable, "I", "Enable/Disable Wake on LAN");
2295,2349c2523,2525
<
< /*********************************************************************
< *
< * Determine hardware revision.
< *
< **********************************************************************/
< static void
< ixgbe_identify_hardware(struct adapter *adapter)
< {
< device_t dev = adapter->dev;
< struct ixgbe_hw *hw = &adapter->hw;
<
< /* Save off the information about this board */
< hw->vendor_id = pci_get_vendor(dev);
< hw->device_id = pci_get_device(dev);
< hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
< hw->subsystem_vendor_id =
< pci_read_config(dev, PCIR_SUBVEND_0, 2);
< hw->subsystem_device_id =
< pci_read_config(dev, PCIR_SUBDEV_0, 2);
<
< /*
< ** Make sure BUSMASTER is set
< */
< pci_enable_busmaster(dev);
<
< /* We need this here to set the num_segs below */
< ixgbe_set_mac_type(hw);
<
< /* Pick up the 82599 settings */
< if (hw->mac.type != ixgbe_mac_82598EB) {
< hw->phy.smart_speed = ixgbe_smart_speed;
< adapter->num_segs = IXGBE_82599_SCATTER;
< } else
< adapter->num_segs = IXGBE_82598_SCATTER;
<
< return;
< }
<
< /*********************************************************************
< *
< * Determine optic type
< *
< **********************************************************************/
< static void
< ixgbe_setup_optics(struct adapter *adapter)
< {
< struct ixgbe_hw *hw = &adapter->hw;
< int layer;
<
< layer = adapter->phy_layer = ixgbe_get_supported_physical_layer(hw);
<
< if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T) {
< adapter->optics = IFM_10G_T;
< return;
---
> SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "wufc",
> CTLTYPE_INT | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_wufc,
> "I", "Enable/Disable Wake Up Filters");
2352,2355c2528,2531
< if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T) {
< adapter->optics = IFM_1000_T;
< return;
< }
---
> /* for X552/X557-AT devices */
> if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
> struct sysctl_oid *phy_node;
> struct sysctl_oid_list *phy_list;
2357,2360c2533,2535
< if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX) {
< adapter->optics = IFM_1000_SX;
< return;
< }
---
> phy_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "phy",
> CTLFLAG_RD, NULL, "External PHY sysctls");
> phy_list = SYSCTL_CHILDREN(phy_node);
2362,2366c2537,2539
< if (layer & (IXGBE_PHYSICAL_LAYER_10GBASE_LR |
< IXGBE_PHYSICAL_LAYER_10GBASE_LRM)) {
< adapter->optics = IFM_10G_LR;
< return;
< }
---
> SYSCTL_ADD_PROC(ctx, phy_list, OID_AUTO, "temp",
> CTLTYPE_INT | CTLFLAG_RD, adapter, 0, ixgbe_sysctl_phy_temp,
> "I", "Current External PHY Temperature (Celsius)");
2368,2370c2541,2544
< if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
< adapter->optics = IFM_10G_SR;
< return;
---
> SYSCTL_ADD_PROC(ctx, phy_list, OID_AUTO, "overtemp_occurred",
> CTLTYPE_INT | CTLFLAG_RD, adapter, 0,
> ixgbe_sysctl_phy_overtemp_occurred, "I",
> "External PHY High Temperature Event Occurred");
2373,2375c2547,2550
< if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU) {
< adapter->optics = IFM_10G_TWINAX;
< return;
---
> if (adapter->feat_cap & IXGBE_FEATURE_EEE) {
> SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "eee_state",
> CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
> ixgbe_sysctl_eee_state, "I", "EEE Power Save State");
2376a2552
> } /* ixgbe_add_device_sysctls */
2378,2393c2554,2556
< if (layer & (IXGBE_PHYSICAL_LAYER_10GBASE_KX4 |
< IXGBE_PHYSICAL_LAYER_10GBASE_CX4)) {
< adapter->optics = IFM_10G_CX4;
< return;
< }
<
< /* If we get here just set the default */
< adapter->optics = IFM_ETHER | IFM_AUTO;
< return;
< }
<
< /*********************************************************************
< *
< * Setup the Legacy or MSI Interrupt handler
< *
< **********************************************************************/
---
> /************************************************************************
> * ixgbe_allocate_pci_resources
> ************************************************************************/
2395c2558
< ixgbe_allocate_legacy(struct adapter *adapter)
---
> ixgbe_allocate_pci_resources(struct adapter *adapter)
2397,2402c2560,2561
< device_t dev = adapter->dev;
< struct ix_queue *que = adapter->queues;
< #ifndef IXGBE_LEGACY_TX
< struct tx_ring *txr = adapter->tx_rings;
< #endif
< int error, rid = 0;
---
> device_t dev = adapter->dev;
> int rid;
2404,2406c2563,2565
< /* MSI RID at 1 */
< if (adapter->msix == 1)
< rid = 1;
---
> rid = PCIR_BAR(0);
> adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
> RF_ACTIVE);
2408,2413c2567,2568
< /* We allocate a single interrupt resource */
< adapter->res = bus_alloc_resource_any(dev,
< SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
< if (adapter->res == NULL) {
< device_printf(dev, "Unable to allocate bus resource: "
< "interrupt\n");
---
> if (!(adapter->pci_mem)) {
> device_printf(dev, "Unable to allocate bus resource: memory\n");
2417,2428c2572,2577
< /*
< * Try allocating a fast interrupt and the associated deferred
< * processing contexts.
< */
< #ifndef IXGBE_LEGACY_TX
< TASK_INIT(&txr->txq_task, 0, ixgbe_deferred_mq_start, txr);
< #endif
< TASK_INIT(&que->que_task, 0, ixgbe_handle_que, que);
< que->tq = taskqueue_create_fast("ixgbe_que", M_NOWAIT,
< taskqueue_thread_enqueue, &que->tq);
< taskqueue_start_threads(&que->tq, 1, PI_NET, "%s ixq",
< device_get_nameunit(adapter->dev));
---
> /* Save bus_space values for READ/WRITE_REG macros */
> adapter->osdep.mem_bus_space_tag = rman_get_bustag(adapter->pci_mem);
> adapter->osdep.mem_bus_space_handle =
> rman_get_bushandle(adapter->pci_mem);
> /* Set hw values for shared code */
> adapter->hw.hw_addr = (u8 *)&adapter->osdep.mem_bus_space_handle;
2430,2456d2578
< /* Tasklets for Link, SFP and Multispeed Fiber */
< TASK_INIT(&adapter->link_task, 0, ixgbe_handle_link, adapter);
< TASK_INIT(&adapter->mod_task, 0, ixgbe_handle_mod, adapter);
< TASK_INIT(&adapter->msf_task, 0, ixgbe_handle_msf, adapter);
< TASK_INIT(&adapter->phy_task, 0, ixgbe_handle_phy, adapter);
< #ifdef IXGBE_FDIR
< TASK_INIT(&adapter->fdir_task, 0, ixgbe_reinit_fdir, adapter);
< #endif
< adapter->tq = taskqueue_create_fast("ixgbe_link", M_NOWAIT,
< taskqueue_thread_enqueue, &adapter->tq);
< taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s linkq",
< device_get_nameunit(adapter->dev));
<
< if ((error = bus_setup_intr(dev, adapter->res,
< INTR_TYPE_NET | INTR_MPSAFE, NULL, ixgbe_legacy_irq,
< que, &adapter->tag)) != 0) {
< device_printf(dev, "Failed to register fast interrupt "
< "handler: %d\n", error);
< taskqueue_free(que->tq);
< taskqueue_free(adapter->tq);
< que->tq = NULL;
< adapter->tq = NULL;
< return (error);
< }
< /* For simplicity in the handlers */
< adapter->active_queues = IXGBE_EIMS_ENABLE_MASK;
<
2458c2580
< }
---
> } /* ixgbe_allocate_pci_resources */
2460,2461c2582,2583
<
< /*********************************************************************
---
> /************************************************************************
> * ixgbe_detach - Device removal routine
2463c2585,2587
< * Setup MSIX Interrupt resources and handlers
---
> * Called when the driver is being removed.
> * Stops the adapter and deallocates all the resources
> * that were allocated for driver operation.
2465c2589,2590
< **********************************************************************/
---
> * return 0 on success, positive on failure
> ************************************************************************/
2467c2592
< ixgbe_allocate_msix(struct adapter *adapter)
---
> ixgbe_detach(device_t dev)
2469,2476c2594,2597
< device_t dev = adapter->dev;
< struct ix_queue *que = adapter->queues;
< struct tx_ring *txr = adapter->tx_rings;
< int error, rid, vector = 0;
< int cpu_id = 0;
< #ifdef RSS
< cpuset_t cpu_mask;
< #endif
---
> struct adapter *adapter = device_get_softc(dev);
> struct ix_queue *que = adapter->queues;
> struct tx_ring *txr = adapter->tx_rings;
> u32 ctrl_ext;
2478,2496c2599,2604
< #ifdef RSS
< /*
< * If we're doing RSS, the number of queues needs to
< * match the number of RSS buckets that are configured.
< *
< * + If there's more queues than RSS buckets, we'll end
< * up with queues that get no traffic.
< *
< * + If there's more RSS buckets than queues, we'll end
< * up having multiple RSS buckets map to the same queue,
< * so there'll be some contention.
< */
< if (adapter->num_queues != rss_getnumbuckets()) {
< device_printf(dev,
< "%s: number of queues (%d) != number of RSS buckets (%d)"
< "; performance will be impacted.\n",
< __func__,
< adapter->num_queues,
< rss_getnumbuckets());
---
> INIT_DEBUGOUT("ixgbe_detach: begin");
>
> /* Make sure VLANS are not using driver */
> if (adapter->ifp->if_vlantrunk != NULL) {
> device_printf(dev, "Vlan in use, detach first\n");
> return (EBUSY);
2498d2605
< #endif
2500,2553c2607,2610
< for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
< rid = vector + 1;
< que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
< RF_SHAREABLE | RF_ACTIVE);
< if (que->res == NULL) {
< device_printf(dev,"Unable to allocate"
< " bus resource: que interrupt [%d]\n", vector);
< return (ENXIO);
< }
< /* Set the handler function */
< error = bus_setup_intr(dev, que->res,
< INTR_TYPE_NET | INTR_MPSAFE, NULL,
< ixgbe_msix_que, que, &que->tag);
< if (error) {
< que->res = NULL;
< device_printf(dev, "Failed to register QUE handler");
< return (error);
< }
< #if __FreeBSD_version >= 800504
< bus_describe_intr(dev, que->res, que->tag, "q%d", i);
< #endif
< que->msix = vector;
< adapter->active_queues |= (u64)(1 << que->msix);
< #ifdef RSS
< /*
< * The queue ID is used as the RSS layer bucket ID.
< * We look up the queue ID -> RSS CPU ID and select
< * that.
< */
< cpu_id = rss_getcpu(i % rss_getnumbuckets());
< #else
< /*
< * Bind the msix vector, and thus the
< * rings to the corresponding cpu.
< *
< * This just happens to match the default RSS round-robin
< * bucket -> queue -> CPU allocation.
< */
< if (adapter->num_queues > 1)
< cpu_id = i;
< #endif
< if (adapter->num_queues > 1)
< bus_bind_intr(dev, que->res, cpu_id);
< #ifdef IXGBE_DEBUG
< #ifdef RSS
< device_printf(dev,
< "Bound RSS bucket %d to CPU %d\n",
< i, cpu_id);
< #else
< device_printf(dev,
< "Bound queue %d to cpu %d\n",
< i, cpu_id);
< #endif
< #endif /* IXGBE_DEBUG */
---
> if (ixgbe_pci_iov_detach(dev) != 0) {
> device_printf(dev, "SR-IOV in use; detach first.\n");
> return (EBUSY);
> }
2554a2612,2616
> ether_ifdetach(adapter->ifp);
> /* Stop the adapter */
> IXGBE_CORE_LOCK(adapter);
> ixgbe_setup_low_power_mode(adapter);
> IXGBE_CORE_UNLOCK(adapter);
2556,2572c2618,2624
< #ifndef IXGBE_LEGACY_TX
< TASK_INIT(&txr->txq_task, 0, ixgbe_deferred_mq_start, txr);
< #endif
< TASK_INIT(&que->que_task, 0, ixgbe_handle_que, que);
< que->tq = taskqueue_create_fast("ixgbe_que", M_NOWAIT,
< taskqueue_thread_enqueue, &que->tq);
< #ifdef RSS
< CPU_SETOF(cpu_id, &cpu_mask);
< taskqueue_start_threads_cpuset(&que->tq, 1, PI_NET,
< &cpu_mask,
< "%s (bucket %d)",
< device_get_nameunit(adapter->dev),
< cpu_id);
< #else
< taskqueue_start_threads(&que->tq, 1, PI_NET, "%s:q%d",
< device_get_nameunit(adapter->dev), i);
< #endif
---
> for (int i = 0; i < adapter->num_queues; i++, que++, txr++) {
> if (que->tq) {
> if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
> taskqueue_drain(que->tq, &txr->txq_task);
> taskqueue_drain(que->tq, &que->que_task);
> taskqueue_free(que->tq);
> }
2575,2582c2627,2637
< /* and Link */
< rid = vector + 1;
< adapter->res = bus_alloc_resource_any(dev,
< SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
< if (!adapter->res) {
< device_printf(dev,"Unable to allocate"
< " bus resource: Link interrupt [%d]\n", rid);
< return (ENXIO);
---
> /* Drain the Link queue */
> if (adapter->tq) {
> taskqueue_drain(adapter->tq, &adapter->link_task);
> taskqueue_drain(adapter->tq, &adapter->mod_task);
> taskqueue_drain(adapter->tq, &adapter->msf_task);
> if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
> taskqueue_drain(adapter->tq, &adapter->mbx_task);
> taskqueue_drain(adapter->tq, &adapter->phy_task);
> if (adapter->feat_en & IXGBE_FEATURE_FDIR)
> taskqueue_drain(adapter->tq, &adapter->fdir_task);
> taskqueue_free(adapter->tq);
2584,2611d2638
< /* Set the link handler function */
< error = bus_setup_intr(dev, adapter->res,
< INTR_TYPE_NET | INTR_MPSAFE, NULL,
< ixgbe_msix_link, adapter, &adapter->tag);
< if (error) {
< adapter->res = NULL;
< device_printf(dev, "Failed to register LINK handler");
< return (error);
< }
< #if __FreeBSD_version >= 800504
< bus_describe_intr(dev, adapter->res, adapter->tag, "link");
< #endif
< adapter->vector = vector;
< /* Tasklets for Link, SFP and Multispeed Fiber */
< TASK_INIT(&adapter->link_task, 0, ixgbe_handle_link, adapter);
< TASK_INIT(&adapter->mod_task, 0, ixgbe_handle_mod, adapter);
< TASK_INIT(&adapter->msf_task, 0, ixgbe_handle_msf, adapter);
< #ifdef PCI_IOV
< TASK_INIT(&adapter->mbx_task, 0, ixgbe_handle_mbx, adapter);
< #endif
< TASK_INIT(&adapter->phy_task, 0, ixgbe_handle_phy, adapter);
< #ifdef IXGBE_FDIR
< TASK_INIT(&adapter->fdir_task, 0, ixgbe_reinit_fdir, adapter);
< #endif
< adapter->tq = taskqueue_create_fast("ixgbe_link", M_NOWAIT,
< taskqueue_thread_enqueue, &adapter->tq);
< taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s linkq",
< device_get_nameunit(adapter->dev));
2613,2614c2640,2643
< return (0);
< }
---
> /* let hardware know driver is unloading */
> ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
> ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
> IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
2616,2623c2645,2649
< /*
< * Setup Either MSI/X or MSI
< */
< static int
< ixgbe_setup_msix(struct adapter *adapter)
< {
< device_t dev = adapter->dev;
< int rid, want, queues, msgs;
---
> /* Unregister VLAN events */
> if (adapter->vlan_attach != NULL)
> EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
> if (adapter->vlan_detach != NULL)
> EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
2625,2627c2651
< /* Override by tuneable */
< if (ixgbe_enable_msix == 0)
< goto msi;
---
> callout_drain(&adapter->timer);
2629,2646c2653,2654
< /* First try MSI/X */
< msgs = pci_msix_count(dev);
< if (msgs == 0)
< goto msi;
< rid = PCIR_BAR(MSIX_82598_BAR);
< adapter->msix_mem = bus_alloc_resource_any(dev,
< SYS_RES_MEMORY, &rid, RF_ACTIVE);
< if (adapter->msix_mem == NULL) {
< rid += 4; /* 82599 maps in higher BAR */
< adapter->msix_mem = bus_alloc_resource_any(dev,
< SYS_RES_MEMORY, &rid, RF_ACTIVE);
< }
< if (adapter->msix_mem == NULL) {
< /* May not be enabled */
< device_printf(adapter->dev,
< "Unable to map MSIX table \n");
< goto msi;
< }
---
> if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
> netmap_detach(adapter->ifp);
2648,2649c2656,2658
< /* Figure out a reasonable auto config value */
< queues = (mp_ncpus > (msgs - 1)) ? (msgs - 1) : mp_ncpus;
---
> ixgbe_free_pci_resources(adapter);
> bus_generic_detach(dev);
> if_free(adapter->ifp);
2651,2655c2660,2663
< #ifdef RSS
< /* If we're doing RSS, clamp at the number of RSS buckets */
< if (queues > rss_getnumbuckets())
< queues = rss_getnumbuckets();
< #endif
---
> ixgbe_free_transmit_structures(adapter);
> ixgbe_free_receive_structures(adapter);
> free(adapter->queues, M_DEVBUF);
> free(adapter->mta, M_IXGBE);
2657,2661c2665
< if (ixgbe_num_queues != 0)
< queues = ixgbe_num_queues;
< /* Set max queues to 8 when autoconfiguring */
< else if ((ixgbe_num_queues == 0) && (queues > 8))
< queues = 8;
---
> IXGBE_CORE_LOCK_DESTROY(adapter);
2663,2703d2666
< /* reflect correct sysctl value */
< ixgbe_num_queues = queues;
<
< /*
< ** Want one vector (RX/TX pair) per queue
< ** plus an additional for Link.
< */
< want = queues + 1;
< if (msgs >= want)
< msgs = want;
< else {
< device_printf(adapter->dev,
< "MSIX Configuration Problem, "
< "%d vectors but %d queues wanted!\n",
< msgs, want);
< goto msi;
< }
< if ((pci_alloc_msix(dev, &msgs) == 0) && (msgs == want)) {
< device_printf(adapter->dev,
< "Using MSIX interrupts with %d vectors\n", msgs);
< adapter->num_queues = queues;
< return (msgs);
< }
< /*
< ** If MSIX alloc failed or provided us with
< ** less than needed, free and fall through to MSI
< */
< pci_release_msi(dev);
<
< msi:
< if (adapter->msix_mem != NULL) {
< bus_release_resource(dev, SYS_RES_MEMORY,
< rid, adapter->msix_mem);
< adapter->msix_mem = NULL;
< }
< msgs = 1;
< if (pci_alloc_msi(dev, &msgs) == 0) {
< device_printf(adapter->dev, "Using an MSI interrupt\n");
< return (msgs);
< }
< device_printf(adapter->dev, "Using a Legacy interrupt\n");
2705c2668
< }
---
> } /* ixgbe_detach */
2707c2670,2674
<
---
> /************************************************************************
> * ixgbe_setup_low_power_mode - LPLU/WoL preparation
> *
> * Prepare the adapter/port for LPLU and/or WoL
> ************************************************************************/
2709c2676
< ixgbe_allocate_pci_resources(struct adapter *adapter)
---
> ixgbe_setup_low_power_mode(struct adapter *adapter)
2711c2678
< int rid;
---
> struct ixgbe_hw *hw = &adapter->hw;
2712a2680
> s32 error = 0;
2714,2716c2682
< rid = PCIR_BAR(0);
< adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
< &rid, RF_ACTIVE);
---
> mtx_assert(&adapter->core_mtx, MA_OWNED);
2718,2721c2684,2689
< if (!(adapter->pci_mem)) {
< device_printf(dev, "Unable to allocate bus resource: memory\n");
< return (ENXIO);
< }
---
> /* Limit power management flow to X550EM baseT */
> if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
> hw->phy.ops.enter_lplu) {
> /* Turn off support for APM wakeup. (Using ACPI instead) */
> IXGBE_WRITE_REG(hw, IXGBE_GRC,
> IXGBE_READ_REG(hw, IXGBE_GRC) & ~(u32)2);
2723,2730c2691,2695
< /* Save bus_space values for READ/WRITE_REG macros */
< adapter->osdep.mem_bus_space_tag =
< rman_get_bustag(adapter->pci_mem);
< adapter->osdep.mem_bus_space_handle =
< rman_get_bushandle(adapter->pci_mem);
< /* Set hw values for shared code */
< adapter->hw.hw_addr = (u8 *) &adapter->osdep.mem_bus_space_handle;
< adapter->hw.back = adapter;
---
> /*
> * Clear Wake Up Status register to prevent any previous wakeup
> * events from waking us up immediately after we suspend.
> */
> IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
2732,2733c2697,2701
< /* Default to 1 queue if MSI-X setup fails */
< adapter->num_queues = 1;
---
> /*
> * Program the Wakeup Filter Control register with user filter
> * settings
> */
> IXGBE_WRITE_REG(hw, IXGBE_WUFC, adapter->wufc);
2735,2742c2703,2705
< /*
< ** Now setup MSI or MSI-X, should
< ** return us the number of supported
< ** vectors. (Will be 1 for MSI)
< */
< adapter->msix = ixgbe_setup_msix(adapter);
< return (0);
< }
---
> /* Enable wakeups and power management in Wakeup Control */
> IXGBE_WRITE_REG(hw, IXGBE_WUC,
> IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN);
2744,2777c2707,2716
< static void
< ixgbe_free_pci_resources(struct adapter * adapter)
< {
< struct ix_queue *que = adapter->queues;
< device_t dev = adapter->dev;
< int rid, memrid;
<
< if (adapter->hw.mac.type == ixgbe_mac_82598EB)
< memrid = PCIR_BAR(MSIX_82598_BAR);
< else
< memrid = PCIR_BAR(MSIX_82599_BAR);
<
< /*
< ** There is a slight possibility of a failure mode
< ** in attach that will result in entering this function
< ** before interrupt resources have been initialized, and
< ** in that case we do not want to execute the loops below
< ** We can detect this reliably by the state of the adapter
< ** res pointer.
< */
< if (adapter->res == NULL)
< goto mem;
<
< /*
< ** Release all msix queue resources:
< */
< for (int i = 0; i < adapter->num_queues; i++, que++) {
< rid = que->msix + 1;
< if (que->tag != NULL) {
< bus_teardown_intr(dev, que->res, que->tag);
< que->tag = NULL;
< }
< if (que->res != NULL)
< bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
---
> /* X550EM baseT adapters need a special LPLU flow */
> hw->phy.reset_disable = true;
> ixgbe_stop(adapter);
> error = hw->phy.ops.enter_lplu(hw);
> if (error)
> device_printf(dev, "Error entering LPLU: %d\n", error);
> hw->phy.reset_disable = false;
> } else {
> /* Just stop for other adapters */
> ixgbe_stop(adapter);
2779a2719,2720
> return error;
> } /* ixgbe_setup_low_power_mode */
2781,2785c2722,2729
< /* Clean the Legacy or Link interrupt last */
< if (adapter->vector) /* we are doing MSIX */
< rid = adapter->vector + 1;
< else
< (adapter->msix != 0) ? (rid = 1):(rid = 0);
---
> /************************************************************************
> * ixgbe_shutdown - Shutdown entry point
> ************************************************************************/
> static int
> ixgbe_shutdown(device_t dev)
> {
> struct adapter *adapter = device_get_softc(dev);
> int error = 0;
2787,2792c2731
< if (adapter->tag != NULL) {
< bus_teardown_intr(dev, adapter->res, adapter->tag);
< adapter->tag = NULL;
< }
< if (adapter->res != NULL)
< bus_release_resource(dev, SYS_RES_IRQ, rid, adapter->res);
---
> INIT_DEBUGOUT("ixgbe_shutdown: begin");
2794,2796c2733,2735
< mem:
< if (adapter->msix)
< pci_release_msi(dev);
---
> IXGBE_CORE_LOCK(adapter);
> error = ixgbe_setup_low_power_mode(adapter);
> IXGBE_CORE_UNLOCK(adapter);
2798,2800c2737,2738
< if (adapter->msix_mem != NULL)
< bus_release_resource(dev, SYS_RES_MEMORY,
< memrid, adapter->msix_mem);
---
> return (error);
> } /* ixgbe_shutdown */
2802,2809c2740,2741
< if (adapter->pci_mem != NULL)
< bus_release_resource(dev, SYS_RES_MEMORY,
< PCIR_BAR(0), adapter->pci_mem);
<
< return;
< }
<
< /*********************************************************************
---
> /************************************************************************
> * ixgbe_suspend
2811,2813c2743,2744
< * Setup networking device structure and register an interface.
< *
< **********************************************************************/
---
> * From D0 to D3
> ************************************************************************/
2815c2746
< ixgbe_setup_interface(device_t dev, struct adapter *adapter)
---
> ixgbe_suspend(device_t dev)
2817c2748,2749
< struct ifnet *ifp;
---
> struct adapter *adapter = device_get_softc(dev);
> int error = 0;
2819c2751
< INIT_DEBUGOUT("ixgbe_setup_interface: begin");
---
> INIT_DEBUGOUT("ixgbe_suspend: begin");
2821,2849c2753
< ifp = adapter->ifp = if_alloc(IFT_ETHER);
< if (ifp == NULL) {
< device_printf(dev, "can not allocate ifnet structure\n");
< return (-1);
< }
< if_initname(ifp, device_get_name(dev), device_get_unit(dev));
< ifp->if_baudrate = IF_Gbps(10);
< ifp->if_init = ixgbe_init;
< ifp->if_softc = adapter;
< ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
< ifp->if_ioctl = ixgbe_ioctl;
< #if __FreeBSD_version >= 1100036
< if_setgetcounterfn(ifp, ixgbe_get_counter);
< #endif
< #if __FreeBSD_version >= 1100045
< /* TSO parameters */
< ifp->if_hw_tsomax = 65518;
< ifp->if_hw_tsomaxsegcount = IXGBE_82599_SCATTER;
< ifp->if_hw_tsomaxsegsize = 2048;
< #endif
< #ifndef IXGBE_LEGACY_TX
< ifp->if_transmit = ixgbe_mq_start;
< ifp->if_qflush = ixgbe_qflush;
< #else
< ifp->if_start = ixgbe_start;
< IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2);
< ifp->if_snd.ifq_drv_maxlen = adapter->num_tx_desc - 2;
< IFQ_SET_READY(&ifp->if_snd);
< #endif
---
> IXGBE_CORE_LOCK(adapter);
2851c2755
< ether_ifattach(ifp, adapter->hw.mac.addr);
---
> error = ixgbe_setup_low_power_mode(adapter);
2853,2854c2757
< adapter->max_frame_size =
< ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
---
> IXGBE_CORE_UNLOCK(adapter);
2856,2859c2759,2760
< /*
< * Tell the upper layer(s) we support long frames.
< */
< ifp->if_hdrlen = sizeof(struct ether_vlan_header);
---
> return (error);
> } /* ixgbe_suspend */
2861,2874c2762,2773
< /* Set capability flags */
< ifp->if_capabilities |= IFCAP_RXCSUM
< | IFCAP_TXCSUM
< | IFCAP_RXCSUM_IPV6
< | IFCAP_TXCSUM_IPV6
< | IFCAP_TSO4
< | IFCAP_TSO6
< | IFCAP_LRO
< | IFCAP_VLAN_HWTAGGING
< | IFCAP_VLAN_HWTSO
< | IFCAP_VLAN_HWCSUM
< | IFCAP_JUMBO_MTU
< | IFCAP_VLAN_MTU
< | IFCAP_HWSTATS;
---
> /************************************************************************
> * ixgbe_resume
> *
> * From D3 to D0
> ************************************************************************/
> static int
> ixgbe_resume(device_t dev)
> {
> struct adapter *adapter = device_get_softc(dev);
> struct ifnet *ifp = adapter->ifp;
> struct ixgbe_hw *hw = &adapter->hw;
> u32 wus;
2876,2877c2775
< /* Enable the above capabilities by default */
< ifp->if_capenable = ifp->if_capabilities;
---
> INIT_DEBUGOUT("ixgbe_resume: begin");
2879,2887c2777
< /*
< ** Don't turn this on by default, if vlans are
< ** created on another pseudo device (eg. lagg)
< ** then vlan events are not passed thru, breaking
< ** operation, but with HW FILTER off it works. If
< ** using vlans directly on the ixgbe driver you can
< ** enable this and get full hardware tag filtering.
< */
< ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
---
> IXGBE_CORE_LOCK(adapter);
2888a2779,2787
> /* Read & clear WUS register */
> wus = IXGBE_READ_REG(hw, IXGBE_WUS);
> if (wus)
> device_printf(dev, "Woken up by (WUS): %#010x\n",
> IXGBE_READ_REG(hw, IXGBE_WUS));
> IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
> /* And clear WUFC until next low-power transition */
> IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
>
2890,2891c2789,2790
< * Specify the media types supported by this adapter and register
< * callbacks to update media and link information
---
> * Required after D3->D0 transition;
> * will re-advertise all previous advertised speeds
2893,2894c2792,2793
< ifmedia_init(&adapter->media, IFM_IMASK, ixgbe_media_change,
< ixgbe_media_status);
---
> if (ifp->if_flags & IFF_UP)
> ixgbe_init_locked(adapter);
2896,2897c2795
< adapter->phy_layer = ixgbe_get_supported_physical_layer(&adapter->hw);
< ixgbe_add_media_types(adapter);
---
> IXGBE_CORE_UNLOCK(adapter);
2899,2901d2796
< /* Set autoselect media by default */
< ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
<
2903c2798
< }
---
> } /* ixgbe_resume */
2904a2800,2806
> /************************************************************************
> * ixgbe_set_if_hwassist - Set the various hardware offload abilities.
> *
> * Takes the ifnet's if_capenable flags (e.g. set by the user using
> * ifconfig) and indicates to the OS via the ifnet's if_hwassist
> * field what mbuf offload flags the driver will understand.
> ************************************************************************/
2906c2808
< ixgbe_add_media_types(struct adapter *adapter)
---
> ixgbe_set_if_hwassist(struct adapter *adapter)
2908,2910c2810
< struct ixgbe_hw *hw = &adapter->hw;
< device_t dev = adapter->dev;
< int layer;
---
> struct ifnet *ifp = adapter->ifp;
2912,2929c2812,2821
< layer = adapter->phy_layer;
<
< /* Media types with matching FreeBSD media defines */
< if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T)
< ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_T, 0, NULL);
< if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T)
< ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL);
< if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX)
< ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX, 0, NULL);
<
< if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
< layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
< ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL);
<
< if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) {
< ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
< if (hw->phy.multispeed_fiber)
< ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_LX, 0, NULL);
---
> ifp->if_hwassist = 0;
> #if __FreeBSD_version >= 1000000
> if (ifp->if_capenable & IFCAP_TSO4)
> ifp->if_hwassist |= CSUM_IP_TSO;
> if (ifp->if_capenable & IFCAP_TSO6)
> ifp->if_hwassist |= CSUM_IP6_TSO;
> if (ifp->if_capenable & IFCAP_TXCSUM) {
> ifp->if_hwassist |= (CSUM_IP | CSUM_IP_UDP | CSUM_IP_TCP);
> if (adapter->hw.mac.type != ixgbe_mac_82598EB)
> ifp->if_hwassist |= CSUM_IP_SCTP;
2931,2946c2823,2827
< if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
< ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
< if (hw->phy.multispeed_fiber)
< ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
< } else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
< ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
< if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
< ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
<
< #ifdef IFM_ETH_XTYPE
< if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
< ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_KR, 0, NULL);
< if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4)
< ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_KX4, 0, NULL);
< if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
< ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_KX, 0, NULL);
---
> if (ifp->if_capenable & IFCAP_TXCSUM_IPV6) {
> ifp->if_hwassist |= (CSUM_IP6_UDP | CSUM_IP6_TCP);
> if (adapter->hw.mac.type != ixgbe_mac_82598EB)
> ifp->if_hwassist |= CSUM_IP6_SCTP;
> }
2948,2951c2829,2834
< if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
< device_printf(dev, "Media supported: 10GbaseKR\n");
< device_printf(dev, "10GbaseKR mapped to 10GbaseSR\n");
< ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
---
> if (ifp->if_capenable & IFCAP_TSO)
> ifp->if_hwassist |= CSUM_TSO;
> if (ifp->if_capenable & IFCAP_TXCSUM) {
> ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
> if (adapter->hw.mac.type != ixgbe_mac_82598EB)
> ifp->if_hwassist |= CSUM_SCTP;
2953,2962d2835
< if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
< device_printf(dev, "Media supported: 10GbaseKX4\n");
< device_printf(dev, "10GbaseKX4 mapped to 10GbaseCX4\n");
< ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
< }
< if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) {
< device_printf(dev, "Media supported: 1000baseKX\n");
< device_printf(dev, "1000baseKX mapped to 1000baseCX\n");
< ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_CX, 0, NULL);
< }
2964,2972c2837
< if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX)
< device_printf(dev, "Media supported: 1000baseBX\n");
<
< if (hw->device_id == IXGBE_DEV_ID_82598AT) {
< ifmedia_add(&adapter->media,
< IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
< ifmedia_add(&adapter->media,
< IFM_ETHER | IFM_1000_T, 0, NULL);
< }
---
> } /* ixgbe_set_if_hwassist */
2974,2978c2839,2850
< ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
< }
<
< static void
< ixgbe_config_link(struct adapter *adapter)
---
> /************************************************************************
> * ixgbe_init_locked - Init entry point
> *
> * Used in two ways: It is used by the stack as an init
> * entry point in network interface structure. It is also
> * used by the driver as a hw/sw initialization routine to
> * get to a consistent state.
> *
> * return 0 on success, positive on failure
> ************************************************************************/
> void
> ixgbe_init_locked(struct adapter *adapter)
2979a2852,2853
> struct ifnet *ifp = adapter->ifp;
> device_t dev = adapter->dev;
2981,2982c2855,2860
< u32 autoneg, err = 0;
< bool sfp, negotiate;
---
> struct tx_ring *txr;
> struct rx_ring *rxr;
> u32 txdctl, mhadd;
> u32 rxdctl, rxctrl;
> u32 ctrl_ext;
> int err = 0;
2984c2862,2863
< sfp = ixgbe_is_sfp(hw);
---
> mtx_assert(&adapter->core_mtx, MA_OWNED);
> INIT_DEBUGOUT("ixgbe_init_locked: begin");
2986,3006c2865,2867
< if (sfp) {
< taskqueue_enqueue(adapter->tq, &adapter->mod_task);
< } else {
< if (hw->mac.ops.check_link)
< err = ixgbe_check_link(hw, &adapter->link_speed,
< &adapter->link_up, FALSE);
< if (err)
< goto out;
< autoneg = hw->phy.autoneg_advertised;
< if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
< err = hw->mac.ops.get_link_capabilities(hw,
< &autoneg, &negotiate);
< if (err)
< goto out;
< if (hw->mac.ops.setup_link)
< err = hw->mac.ops.setup_link(hw,
< autoneg, adapter->link_up);
< }
< out:
< return;
< }
---
> hw->adapter_stopped = FALSE;
> ixgbe_stop_adapter(hw);
> callout_stop(&adapter->timer);
3007a2869,2870
> /* Queue indices may change with IOV mode */
> ixgbe_align_all_queue_indices(adapter);
3009,3018c2872,2873
< /*********************************************************************
< *
< * Enable transmit units.
< *
< **********************************************************************/
< static void
< ixgbe_initialize_transmit_units(struct adapter *adapter)
< {
< struct tx_ring *txr = adapter->tx_rings;
< struct ixgbe_hw *hw = &adapter->hw;
---
> /* reprogram the RAR[0] in case user changed it. */
> ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, IXGBE_RAH_AV);
3020,3024c2875,2878
< /* Setup the Base and Length of the Tx Descriptor Ring */
< for (int i = 0; i < adapter->num_queues; i++, txr++) {
< u64 tdba = txr->txdma.dma_paddr;
< u32 txctrl = 0;
< int j = txr->me;
---
> /* Get the latest mac address, User can use a LAA */
> bcopy(IF_LLADDR(ifp), hw->mac.addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
> ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, 1);
> hw->addr_ctrl.rar_used_count = 1;
3026,3030c2880,2881
< IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
< (tdba & 0x00000000ffffffffULL));
< IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
< IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j),
< adapter->num_tx_desc * sizeof(union ixgbe_adv_tx_desc));
---
> /* Set hardware offload abilities from ifnet flags */
> ixgbe_set_if_hwassist(adapter);
3032,3034c2883,2888
< /* Setup the HW Tx Head and Tail descriptor pointers */
< IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
< IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
---
> /* Prepare transmit descriptors and buffers */
> if (ixgbe_setup_transmit_structures(adapter)) {
> device_printf(dev, "Could not setup transmit structures\n");
> ixgbe_stop(adapter);
> return;
> }
3036,3037c2890,2892
< /* Cache the tail address */
< txr->tail = IXGBE_TDT(j);
---
> ixgbe_init_hw(hw);
> ixgbe_initialize_iov(adapter);
> ixgbe_initialize_transmit_units(adapter);
3039,3061c2894,2895
< /* Disable Head Writeback */
< /*
< * Note: for X550 series devices, these registers are actually
< * prefixed with TPH_ isntead of DCA_, but the addresses and
< * fields remain the same.
< */
< switch (hw->mac.type) {
< case ixgbe_mac_82598EB:
< txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
< break;
< default:
< txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j));
< break;
< }
< txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
< switch (hw->mac.type) {
< case ixgbe_mac_82598EB:
< IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
< break;
< default:
< IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl);
< break;
< }
---
> /* Setup Multicast table */
> ixgbe_set_multi(adapter);
3063c2897,2901
< }
---
> /* Determine the correct mbuf pool, based on frame size */
> if (adapter->max_frame_size <= MCLBYTES)
> adapter->rx_mbuf_sz = MCLBYTES;
> else
> adapter->rx_mbuf_sz = MJUMPAGESIZE;
3065,3083c2903,2907
< if (hw->mac.type != ixgbe_mac_82598EB) {
< u32 dmatxctl, rttdcs;
< #ifdef PCI_IOV
< enum ixgbe_iov_mode mode = ixgbe_get_iov_mode(adapter);
< #endif
< dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
< dmatxctl |= IXGBE_DMATXCTL_TE;
< IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
< /* Disable arbiter to set MTQC */
< rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
< rttdcs |= IXGBE_RTTDCS_ARBDIS;
< IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
< #ifdef PCI_IOV
< IXGBE_WRITE_REG(hw, IXGBE_MTQC, ixgbe_get_mtqc(mode));
< #else
< IXGBE_WRITE_REG(hw, IXGBE_MTQC, IXGBE_MTQC_64Q_1PB);
< #endif
< rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
< IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
---
> /* Prepare receive descriptors and buffers */
> if (ixgbe_setup_receive_structures(adapter)) {
> device_printf(dev, "Could not setup receive structures\n");
> ixgbe_stop(adapter);
> return;
3086,3087c2910,2911
< return;
< }
---
> /* Configure RX settings */
> ixgbe_initialize_receive_units(adapter);
3089,3100c2913,2914
< static void
< ixgbe_initialize_rss_mapping(struct adapter *adapter)
< {
< struct ixgbe_hw *hw = &adapter->hw;
< u32 reta = 0, mrqc, rss_key[10];
< int queue_id, table_size, index_mult;
< #ifdef RSS
< u32 rss_hash_config;
< #endif
< #ifdef PCI_IOV
< enum ixgbe_iov_mode mode;
< #endif
---
> /* Enable SDP & MSI-X interrupts based on adapter */
> ixgbe_config_gpie(adapter);
3102,3122c2916,2922
< #ifdef RSS
< /* Fetch the configured RSS key */
< rss_getkey((uint8_t *) &rss_key);
< #else
< /* set up random bits */
< arc4rand(&rss_key, sizeof(rss_key), 0);
< #endif
<
< /* Set multiplier for RETA setup and table size based on MAC */
< index_mult = 0x1;
< table_size = 128;
< switch (adapter->hw.mac.type) {
< case ixgbe_mac_82598EB:
< index_mult = 0x11;
< break;
< case ixgbe_mac_X550:
< case ixgbe_mac_X550EM_x:
< table_size = 512;
< break;
< default:
< break;
---
> /* Set MTU size */
> if (ifp->if_mtu > ETHERMTU) {
> /* aka IXGBE_MAXFRS on 82599 and newer */
> mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
> mhadd &= ~IXGBE_MHADD_MFS_MASK;
> mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
> IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
3125,3128c2925,2931
< /* Set up the redirection table */
< for (int i = 0, j = 0; i < table_size; i++, j++) {
< if (j == adapter->num_queues) j = 0;
< #ifdef RSS
---
> /* Now enable all the queues */
> for (int i = 0; i < adapter->num_queues; i++) {
> txr = &adapter->tx_rings[i];
> txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me));
> txdctl |= IXGBE_TXDCTL_ENABLE;
> /* Set WTHRESH to 8, burst writeback */
> txdctl |= (8 << 16);
3130,3132c2933,2937
< * Fetch the RSS bucket id for the given indirection entry.
< * Cap it at the number of configured buckets (which is
< * num_queues.)
---
> * When the internal queue falls below PTHRESH (32),
> * start prefetching as long as there are at least
> * HTHRESH (1) buffers ready. The values are taken
> * from the Intel linux driver 3.8.21.
> * Prefetching enables tx line rate even with 1 queue.
3134,3147c2939,2960
< queue_id = rss_get_indirection_to_bucket(i);
< queue_id = queue_id % adapter->num_queues;
< #else
< queue_id = (j * index_mult);
< #endif
< /*
< * The low 8 bits are for hash value (n+0);
< * The next 8 bits are for hash value (n+1), etc.
< */
< reta = reta >> 8;
< reta = reta | ( ((uint32_t) queue_id) << 24);
< if ((i & 3) == 3) {
< if (i < 128)
< IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
---
> txdctl |= (32 << 0) | (1 << 8);
> IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl);
> }
>
> for (int i = 0, j = 0; i < adapter->num_queues; i++) {
> rxr = &adapter->rx_rings[i];
> rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
> if (hw->mac.type == ixgbe_mac_82598EB) {
> /*
> * PTHRESH = 21
> * HTHRESH = 4
> * WTHRESH = 8
> */
> rxdctl &= ~0x3FFFFF;
> rxdctl |= 0x080420;
> }
> rxdctl |= IXGBE_RXDCTL_ENABLE;
> IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), rxdctl);
> for (; j < 10; j++) {
> if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)) &
> IXGBE_RXDCTL_ENABLE)
> break;
3149,3150c2962
< IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32), reta);
< reta = 0;
---
> msec_delay(1);
3152c2964
< }
---
> wmb();
3154,3275d2965
< /* Now fill our hash function seeds */
< for (int i = 0; i < 10; i++)
< IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]);
<
< /* Perform hash on these packet types */
< #ifdef RSS
< mrqc = IXGBE_MRQC_RSSEN;
< rss_hash_config = rss_gethashconfig();
< if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
< mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
< if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
< mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
< if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
< mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
< if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
< mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
< if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
< mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
< if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
< mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
< if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
< mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
< if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4_EX)
< device_printf(adapter->dev,
< "%s: RSS_HASHTYPE_RSS_UDP_IPV4_EX defined, "
< "but not supported\n", __func__);
< if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
< mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
< if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
< mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
< #else
< /*
< * Disable UDP - IP fragments aren't currently being handled
< * and so we end up with a mix of 2-tuple and 4-tuple
< * traffic.
< */
< mrqc = IXGBE_MRQC_RSSEN
< | IXGBE_MRQC_RSS_FIELD_IPV4
< | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
< | IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP
< | IXGBE_MRQC_RSS_FIELD_IPV6_EX
< | IXGBE_MRQC_RSS_FIELD_IPV6
< | IXGBE_MRQC_RSS_FIELD_IPV6_TCP
< ;
< #endif /* RSS */
< #ifdef PCI_IOV
< mode = ixgbe_get_iov_mode(adapter);
< mrqc |= ixgbe_get_mrqc(mode);
< #endif
< IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
< }
<
<
< /*********************************************************************
< *
< * Setup receive registers and features.
< *
< **********************************************************************/
< #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
<
< #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1)
<
< static void
< ixgbe_initialize_receive_units(struct adapter *adapter)
< {
< struct rx_ring *rxr = adapter->rx_rings;
< struct ixgbe_hw *hw = &adapter->hw;
< struct ifnet *ifp = adapter->ifp;
< u32 bufsz, fctrl, srrctl, rxcsum;
< u32 hlreg;
<
< /*
< * Make sure receives are disabled while
< * setting up the descriptor ring
< */
< ixgbe_disable_rx(hw);
<
< /* Enable broadcasts */
< fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
< fctrl |= IXGBE_FCTRL_BAM;
< if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
< fctrl |= IXGBE_FCTRL_DPF;
< fctrl |= IXGBE_FCTRL_PMCF;
< }
< IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
<
< /* Set for Jumbo Frames? */
< hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
< if (ifp->if_mtu > ETHERMTU)
< hlreg |= IXGBE_HLREG0_JUMBOEN;
< else
< hlreg &= ~IXGBE_HLREG0_JUMBOEN;
< #ifdef DEV_NETMAP
< /* crcstrip is conditional in netmap (in RDRXCTL too ?) */
< if (ifp->if_capenable & IFCAP_NETMAP && !ix_crcstrip)
< hlreg &= ~IXGBE_HLREG0_RXCRCSTRP;
< else
< hlreg |= IXGBE_HLREG0_RXCRCSTRP;
< #endif /* DEV_NETMAP */
< IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
<
< bufsz = (adapter->rx_mbuf_sz +
< BSIZEPKT_ROUNDUP) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
<
< for (int i = 0; i < adapter->num_queues; i++, rxr++) {
< u64 rdba = rxr->rxdma.dma_paddr;
< int j = rxr->me;
<
< /* Setup the Base and Length of the Rx Descriptor Ring */
< IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j),
< (rdba & 0x00000000ffffffffULL));
< IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
< IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j),
< adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
<
< /* Set up the SRRCTL register */
< srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j));
< srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
< srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
< srrctl |= bufsz;
< srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
<
3277,3280c2967,2980
< * Set DROP_EN iff we have no flow control and >1 queue.
< * Note that srrctl was cleared shortly before during reset,
< * so we do not need to clear the bit, but do it just in case
< * this code is moved elsewhere.
---
> * In netmap mode, we must preserve the buffers made
> * available to userspace before the if_init()
> * (this is true by default on the TX side, because
> * init makes all buffers available to userspace).
> *
> * netmap_reset() and the device specific routines
> * (e.g. ixgbe_setup_receive_rings()) map these
> * buffers at the end of the NIC ring, so here we
> * must set the RDT (tail) register to make sure
> * they are not overwritten.
> *
> * In this driver the NIC ring starts at RDH = 0,
> * RDT points to the last slot available for reception (?),
> * so RDT = num_rx_desc - 1 means the whole ring is available.
3282,3287c2982,2987
< if (adapter->num_queues > 1 &&
< adapter->hw.fc.requested_mode == ixgbe_fc_none) {
< srrctl |= IXGBE_SRRCTL_DROP_EN;
< } else {
< srrctl &= ~IXGBE_SRRCTL_DROP_EN;
< }
---
> #ifdef DEV_NETMAP
> if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
> (ifp->if_capenable & IFCAP_NETMAP)) {
> struct netmap_adapter *na = NA(adapter->ifp);
> struct netmap_kring *kring = &na->rx_rings[i];
> int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
3289,3296c2989,2993
< IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl);
<
< /* Setup the HW Rx Head and Tail Descriptor Pointers */
< IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
< IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
<
< /* Set the driver rx tail address */
< rxr->tail = IXGBE_RDT(rxr->me);
---
> IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me), t);
> } else
> #endif /* DEV_NETMAP */
> IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me),
> adapter->num_rx_desc - 1);
3299,3305c2996,3001
< if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
< u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
< IXGBE_PSRTYPE_UDPHDR |
< IXGBE_PSRTYPE_IPV4HDR |
< IXGBE_PSRTYPE_IPV6HDR;
< IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
< }
---
> /* Enable Receive engine */
> rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
> if (hw->mac.type == ixgbe_mac_82598EB)
> rxctrl |= IXGBE_RXCTRL_DMBYPS;
> rxctrl |= IXGBE_RXCTRL_RXEN;
> ixgbe_enable_rx_dma(hw, rxctrl);
3307c3003
< rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
---
> callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
3309,3313c3005,3018
< ixgbe_initialize_rss_mapping(adapter);
<
< if (adapter->num_queues > 1) {
< /* RSS and RX IPP Checksum are mutually exclusive */
< rxcsum |= IXGBE_RXCSUM_PCSD;
---
> /* Set up MSI-X routing */
> if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
> ixgbe_configure_ivars(adapter);
> /* Set up auto-mask */
> if (hw->mac.type == ixgbe_mac_82598EB)
> IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
> else {
> IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
> IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
> }
> } else { /* Simple settings for Legacy/MSI */
> ixgbe_set_ivar(adapter, 0, 0, 0);
> ixgbe_set_ivar(adapter, 0, 0, 1);
> IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
3316,3317c3021
< if (ifp->if_capenable & IFCAP_RXCSUM)
< rxcsum |= IXGBE_RXCSUM_PCSD;
---
> ixgbe_init_fdir(adapter);
3319,3392d3022
< /* This is useful for calculating UDP/IP fragment checksums */
< if (!(rxcsum & IXGBE_RXCSUM_PCSD))
< rxcsum |= IXGBE_RXCSUM_IPPCSE;
<
< IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
<
< return;
< }
<
<
< /*
< ** This routine is run via an vlan config EVENT,
< ** it enables us to use the HW Filter table since
< ** we can get the vlan id. This just creates the
< ** entry in the soft version of the VFTA, init will
< ** repopulate the real table.
< */
< static void
< ixgbe_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
< {
< struct adapter *adapter = ifp->if_softc;
< u16 index, bit;
<
< if (ifp->if_softc != arg) /* Not our event */
< return;
<
< if ((vtag == 0) || (vtag > 4095)) /* Invalid */
< return;
<
< IXGBE_CORE_LOCK(adapter);
< index = (vtag >> 5) & 0x7F;
< bit = vtag & 0x1F;
< adapter->shadow_vfta[index] |= (1 << bit);
< ++adapter->num_vlans;
< ixgbe_setup_vlan_hw_support(adapter);
< IXGBE_CORE_UNLOCK(adapter);
< }
<
< /*
< ** This routine is run via an vlan
< ** unconfig EVENT, remove our entry
< ** in the soft vfta.
< */
< static void
< ixgbe_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
< {
< struct adapter *adapter = ifp->if_softc;
< u16 index, bit;
<
< if (ifp->if_softc != arg)
< return;
<
< if ((vtag == 0) || (vtag > 4095)) /* Invalid */
< return;
<
< IXGBE_CORE_LOCK(adapter);
< index = (vtag >> 5) & 0x7F;
< bit = vtag & 0x1F;
< adapter->shadow_vfta[index] &= ~(1 << bit);
< --adapter->num_vlans;
< /* Re-init to load the changes */
< ixgbe_setup_vlan_hw_support(adapter);
< IXGBE_CORE_UNLOCK(adapter);
< }
<
< static void
< ixgbe_setup_vlan_hw_support(struct adapter *adapter)
< {
< struct ifnet *ifp = adapter->ifp;
< struct ixgbe_hw *hw = &adapter->hw;
< struct rx_ring *rxr;
< u32 ctrl;
<
<
3394,3409c3024,3032
< ** We get here thru init_locked, meaning
< ** a soft reset, this has already cleared
< ** the VFTA and other state, so if there
< ** have been no vlan's registered do nothing.
< */
< if (adapter->num_vlans == 0)
< return;
<
< /* Setup the queues for vlans */
< for (int i = 0; i < adapter->num_queues; i++) {
< rxr = &adapter->rx_rings[i];
< /* On 82599 the VLAN enable is per/queue in RXDCTL */
< if (hw->mac.type != ixgbe_mac_82598EB) {
< ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
< ctrl |= IXGBE_RXDCTL_VME;
< IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl);
---
> * Check on any SFP devices that
> * need to be kick-started
> */
> if (hw->phy.type == ixgbe_phy_none) {
> err = hw->phy.ops.identify(hw);
> if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
> device_printf(dev,
> "Unsupported SFP+ module type was detected.\n");
> return;
3411d3033
< rxr->vtag_strip = TRUE;
3414,3423c3036,3037
< if ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0)
< return;
< /*
< ** A soft reset zero's out the VFTA, so
< ** we need to repopulate it now.
< */
< for (int i = 0; i < IXGBE_VFTA_SIZE; i++)
< if (adapter->shadow_vfta[i] != 0)
< IXGBE_WRITE_REG(hw, IXGBE_VFTA(i),
< adapter->shadow_vfta[i]);
---
> /* Set moderation on the Link interrupt */
> IXGBE_WRITE_REG(hw, IXGBE_EITR(adapter->vector), IXGBE_LINK_ITR);
3425,3434c3039,3040
< ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
< /* Enable the Filter Table if enabled */
< if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) {
< ctrl &= ~IXGBE_VLNCTRL_CFIEN;
< ctrl |= IXGBE_VLNCTRL_VFE;
< }
< if (hw->mac.type == ixgbe_mac_82598EB)
< ctrl |= IXGBE_VLNCTRL_VME;
< IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
< }
---
> /* Config/Enable Link */
> ixgbe_config_link(adapter);
3436,3441c3042,3043
< static void
< ixgbe_enable_intr(struct adapter *adapter)
< {
< struct ixgbe_hw *hw = &adapter->hw;
< struct ix_queue *que = adapter->queues;
< u32 mask, fwsm;
---
> /* Hardware Packet Buffer & Flow Control setup */
> ixgbe_config_delay_values(adapter);
3443,3446c3045,3046
< mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
< /* Enable Fan Failure detection */
< if (hw->device_id == IXGBE_DEV_ID_82598AT)
< mask |= IXGBE_EIMS_GPI_SDP1;
---
> /* Initialize the FC settings */
> ixgbe_start_hw(hw);
3448,3491c3048,3049
< switch (adapter->hw.mac.type) {
< case ixgbe_mac_82599EB:
< mask |= IXGBE_EIMS_ECC;
< /* Temperature sensor on some adapters */
< mask |= IXGBE_EIMS_GPI_SDP0;
< /* SFP+ (RX_LOS_N & MOD_ABS_N) */
< mask |= IXGBE_EIMS_GPI_SDP1;
< mask |= IXGBE_EIMS_GPI_SDP2;
< #ifdef IXGBE_FDIR
< mask |= IXGBE_EIMS_FLOW_DIR;
< #endif
< #ifdef PCI_IOV
< mask |= IXGBE_EIMS_MAILBOX;
< #endif
< break;
< case ixgbe_mac_X540:
< /* Detect if Thermal Sensor is enabled */
< fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
< if (fwsm & IXGBE_FWSM_TS_ENABLED)
< mask |= IXGBE_EIMS_TS;
< mask |= IXGBE_EIMS_ECC;
< #ifdef IXGBE_FDIR
< mask |= IXGBE_EIMS_FLOW_DIR;
< #endif
< break;
< case ixgbe_mac_X550:
< case ixgbe_mac_X550EM_x:
< /* MAC thermal sensor is automatically enabled */
< mask |= IXGBE_EIMS_TS;
< /* Some devices use SDP0 for important information */
< if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
< hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
< mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw);
< mask |= IXGBE_EIMS_ECC;
< #ifdef IXGBE_FDIR
< mask |= IXGBE_EIMS_FLOW_DIR;
< #endif
< #ifdef PCI_IOV
< mask |= IXGBE_EIMS_MAILBOX;
< #endif
< /* falls through */
< default:
< break;
< }
---
> /* Set up VLAN support and filter */
> ixgbe_setup_vlan_hw_support(adapter);
3493c3051,3052
< IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
---
> /* Setup DMA Coalescing */
> ixgbe_config_dmac(adapter);
3495,3504c3054,3061
< /* With MSI-X we use auto clear */
< if (adapter->msix_mem) {
< mask = IXGBE_EIMS_ENABLE_MASK;
< /* Don't autoclear Link */
< mask &= ~IXGBE_EIMS_OTHER;
< mask &= ~IXGBE_EIMS_LSC;
< #ifdef PCI_IOV
< mask &= ~IXGBE_EIMS_MAILBOX;
< #endif
< IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
---
> /* And now turn on interrupts */
> ixgbe_enable_intr(adapter);
>
> /* Enable the use of the MBX by the VF's */
> if (adapter->feat_en & IXGBE_FEATURE_SRIOV) {
> ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
> ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
> IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
3507,3513c3064,3065
< /*
< ** Now enable all queues, this is done separately to
< ** allow for handling the extended (beyond 32) MSIX
< ** vectors that can be used by 82599
< */
< for (int i = 0; i < adapter->num_queues; i++, que++)
< ixgbe_enable_queue(adapter, que->msix);
---
> /* Now inform the stack we're ready */
> ifp->if_drv_flags |= IFF_DRV_RUNNING;
3515,3516d3066
< IXGBE_WRITE_FLUSH(hw);
<
3518c3068
< }
---
> } /* ixgbe_init_locked */
3519a3070,3072
> /************************************************************************
> * ixgbe_init
> ************************************************************************/
3521c3074
< ixgbe_disable_intr(struct adapter *adapter)
---
> ixgbe_init(void *arg)
3523,3534c3076
< if (adapter->msix_mem)
< IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 0);
< if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
< IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
< } else {
< IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
< IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
< IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
< }
< IXGBE_WRITE_FLUSH(&adapter->hw);
< return;
< }
---
> struct adapter *adapter = arg;
3536,3547c3078,3080
< /*
< ** Get the width and transaction speed of
< ** the slot this adapter is plugged into.
< */
< static void
< ixgbe_get_slot_info(struct adapter *adapter)
< {
< device_t dev = adapter->dev;
< struct ixgbe_hw *hw = &adapter->hw;
< struct ixgbe_mac_info *mac = &hw->mac;
< u16 link;
< u32 offset;
---
> IXGBE_CORE_LOCK(adapter);
> ixgbe_init_locked(adapter);
> IXGBE_CORE_UNLOCK(adapter);
3549,3643d3081
< /* For most devices simply call the shared code routine */
< if (hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) {
< ixgbe_get_bus_info(hw);
< /* These devices don't use PCI-E */
< switch (hw->mac.type) {
< case ixgbe_mac_X550EM_x:
< return;
< default:
< goto display;
< }
< }
<
< /*
< ** For the Quad port adapter we need to parse back
< ** up the PCI tree to find the speed of the expansion
< ** slot into which this adapter is plugged. A bit more work.
< */
< dev = device_get_parent(device_get_parent(dev));
< #ifdef IXGBE_DEBUG
< device_printf(dev, "parent pcib = %x,%x,%x\n",
< pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev));
< #endif
< dev = device_get_parent(device_get_parent(dev));
< #ifdef IXGBE_DEBUG
< device_printf(dev, "slot pcib = %x,%x,%x\n",
< pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev));
< #endif
< /* Now get the PCI Express Capabilities offset */
< pci_find_cap(dev, PCIY_EXPRESS, &offset);
< /* ...and read the Link Status Register */
< link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
< switch (link & IXGBE_PCI_LINK_WIDTH) {
< case IXGBE_PCI_LINK_WIDTH_1:
< hw->bus.width = ixgbe_bus_width_pcie_x1;
< break;
< case IXGBE_PCI_LINK_WIDTH_2:
< hw->bus.width = ixgbe_bus_width_pcie_x2;
< break;
< case IXGBE_PCI_LINK_WIDTH_4:
< hw->bus.width = ixgbe_bus_width_pcie_x4;
< break;
< case IXGBE_PCI_LINK_WIDTH_8:
< hw->bus.width = ixgbe_bus_width_pcie_x8;
< break;
< default:
< hw->bus.width = ixgbe_bus_width_unknown;
< break;
< }
<
< switch (link & IXGBE_PCI_LINK_SPEED) {
< case IXGBE_PCI_LINK_SPEED_2500:
< hw->bus.speed = ixgbe_bus_speed_2500;
< break;
< case IXGBE_PCI_LINK_SPEED_5000:
< hw->bus.speed = ixgbe_bus_speed_5000;
< break;
< case IXGBE_PCI_LINK_SPEED_8000:
< hw->bus.speed = ixgbe_bus_speed_8000;
< break;
< default:
< hw->bus.speed = ixgbe_bus_speed_unknown;
< break;
< }
<
< mac->ops.set_lan_id(hw);
<
< display:
< device_printf(dev,"PCI Express Bus: Speed %s %s\n",
< ((hw->bus.speed == ixgbe_bus_speed_8000) ? "8.0GT/s":
< (hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0GT/s":
< (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5GT/s":"Unknown"),
< (hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" :
< (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "Width x4" :
< (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "Width x1" :
< ("Unknown"));
<
< if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) &&
< ((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
< (hw->bus.speed == ixgbe_bus_speed_2500))) {
< device_printf(dev, "PCI-Express bandwidth available"
< " for this card\n is not sufficient for"
< " optimal performance.\n");
< device_printf(dev, "For optimal performance a x8 "
< "PCIE, or x4 PCIE Gen2 slot is required.\n");
< }
< if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) &&
< ((hw->bus.width <= ixgbe_bus_width_pcie_x8) &&
< (hw->bus.speed < ixgbe_bus_speed_8000))) {
< device_printf(dev, "PCI-Express bandwidth available"
< " for this card\n is not sufficient for"
< " optimal performance.\n");
< device_printf(dev, "For optimal performance a x8 "
< "PCIE Gen3 slot is required.\n");
< }
<
3645c3083
< }
---
> } /* ixgbe_init */
3647,3654c3085,3093
<
< /*
< ** Setup the correct IVAR register for a particular MSIX interrupt
< ** (yes this is all very magic and confusing :)
< ** - entry is the register array entry
< ** - vector is the MSIX vector for this queue
< ** - type is RX/TX/MISC
< */
---
> /************************************************************************
> * ixgbe_set_ivar
> *
> * Setup the correct IVAR register for a particular MSI-X interrupt
> * (yes this is all very magic and confusing :)
> * - entry is the register array entry
> * - vector is the MSI-X vector for this queue
> * - type is RX/TX/MISC
> ************************************************************************/
3680a3120
> case ixgbe_mac_X550EM_a:
3687c3127
< } else { /* RX/TX IVARS */
---
> } else { /* RX/TX IVARS */
3698c3138
< }
---
> } /* ixgbe_set_ivar */
3699a3140,3142
> /************************************************************************
> * ixgbe_configure_ivars
> ************************************************************************/
3703,3704c3146,3147
< struct ix_queue *que = adapter->queues;
< u32 newitr;
---
> struct ix_queue *que = adapter->queues;
> u32 newitr;
3710,3712c3153,3155
< ** Disable DMA coalescing if interrupt moderation is
< ** disabled.
< */
---
> * Disable DMA coalescing if interrupt moderation is
> * disabled.
> */
3717c3160
< for (int i = 0; i < adapter->num_queues; i++, que++) {
---
> for (int i = 0; i < adapter->num_queues; i++, que++) {
3721c3164
< ixgbe_set_ivar(adapter, rxr->me, que->msix, 0);
---
> ixgbe_set_ivar(adapter, rxr->me, que->msix, 0);
3725,3726c3168
< IXGBE_WRITE_REG(&adapter->hw,
< IXGBE_EITR(que->msix), newitr);
---
> IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(que->msix), newitr);
3730,3731c3172,3173
< ixgbe_set_ivar(adapter, 1, adapter->vector, -1);
< }
---
> ixgbe_set_ivar(adapter, 1, adapter->vector, -1);
> } /* ixgbe_configure_ivars */
3733,3736c3175,3429
< /*
< ** ixgbe_sfp_probe - called in the local timer to
< ** determine if a port had optics inserted.
< */
---
> /************************************************************************
> * ixgbe_config_gpie
> ************************************************************************/
> static void
> ixgbe_config_gpie(struct adapter *adapter)
> {
> struct ixgbe_hw *hw = &adapter->hw;
> u32 gpie;
>
> gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
>
> if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
> /* Enable Enhanced MSI-X mode */
> gpie |= IXGBE_GPIE_MSIX_MODE
> | IXGBE_GPIE_EIAME
> | IXGBE_GPIE_PBA_SUPPORT
> | IXGBE_GPIE_OCD;
> }
>
> /* Fan Failure Interrupt */
> if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
> gpie |= IXGBE_SDP1_GPIEN;
>
> /* Thermal Sensor Interrupt */
> if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR)
> gpie |= IXGBE_SDP0_GPIEN_X540;
>
> /* Link detection */
> switch (hw->mac.type) {
> case ixgbe_mac_82599EB:
> gpie |= IXGBE_SDP1_GPIEN | IXGBE_SDP2_GPIEN;
> break;
> case ixgbe_mac_X550EM_x:
> case ixgbe_mac_X550EM_a:
> gpie |= IXGBE_SDP0_GPIEN_X540;
> break;
> default:
> break;
> }
>
> IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
>
> return;
> } /* ixgbe_config_gpie */
>
> /************************************************************************
> * ixgbe_config_delay_values
> *
> * Requires adapter->max_frame_size to be set.
> ************************************************************************/
> static void
> ixgbe_config_delay_values(struct adapter *adapter)
> {
> struct ixgbe_hw *hw = &adapter->hw;
> u32 rxpb, frame, size, tmp;
>
> frame = adapter->max_frame_size;
>
> /* Calculate High Water */
> switch (hw->mac.type) {
> case ixgbe_mac_X540:
> case ixgbe_mac_X550:
> case ixgbe_mac_X550EM_x:
> case ixgbe_mac_X550EM_a:
> tmp = IXGBE_DV_X540(frame, frame);
> break;
> default:
> tmp = IXGBE_DV(frame, frame);
> break;
> }
> size = IXGBE_BT2KB(tmp);
> rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10;
> hw->fc.high_water[0] = rxpb - size;
>
> /* Now calculate Low Water */
> switch (hw->mac.type) {
> case ixgbe_mac_X540:
> case ixgbe_mac_X550:
> case ixgbe_mac_X550EM_x:
> case ixgbe_mac_X550EM_a:
> tmp = IXGBE_LOW_DV_X540(frame);
> break;
> default:
> tmp = IXGBE_LOW_DV(frame);
> break;
> }
> hw->fc.low_water[0] = IXGBE_BT2KB(tmp);
>
> hw->fc.pause_time = IXGBE_FC_PAUSE;
> hw->fc.send_xon = TRUE;
> } /* ixgbe_config_delay_values */
>
> /************************************************************************
> * ixgbe_set_multi - Multicast Update
> *
> * Called whenever multicast address list is updated.
> ************************************************************************/
> static void
> ixgbe_set_multi(struct adapter *adapter)
> {
> struct ifmultiaddr *ifma;
> struct ixgbe_mc_addr *mta;
> struct ifnet *ifp = adapter->ifp;
> u8 *update_ptr;
> int mcnt = 0;
> u32 fctrl;
>
> IOCTL_DEBUGOUT("ixgbe_set_multi: begin");
>
> mta = adapter->mta;
> bzero(mta, sizeof(*mta) * MAX_NUM_MULTICAST_ADDRESSES);
>
> #if __FreeBSD_version < 800000
> IF_ADDR_LOCK(ifp);
> #else
> if_maddr_rlock(ifp);
> #endif
> TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
> if (ifma->ifma_addr->sa_family != AF_LINK)
> continue;
> if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
> break;
> bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
> mta[mcnt].addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
> mta[mcnt].vmdq = adapter->pool;
> mcnt++;
> }
> #if __FreeBSD_version < 800000
> IF_ADDR_UNLOCK(ifp);
> #else
> if_maddr_runlock(ifp);
> #endif
>
> fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
> fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
> if (ifp->if_flags & IFF_PROMISC)
> fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
> else if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES ||
> ifp->if_flags & IFF_ALLMULTI) {
> fctrl |= IXGBE_FCTRL_MPE;
> fctrl &= ~IXGBE_FCTRL_UPE;
> } else
> fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
>
> IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
>
> if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) {
> update_ptr = (u8 *)mta;
> ixgbe_update_mc_addr_list(&adapter->hw, update_ptr, mcnt,
> ixgbe_mc_array_itr, TRUE);
> }
>
> return;
> } /* ixgbe_set_multi */
>
> /************************************************************************
> * ixgbe_mc_array_itr
> *
> * An iterator function needed by the multicast shared code.
> * It feeds the shared code routine the addresses in the
> * array of ixgbe_set_multi() one by one.
> ************************************************************************/
> static u8 *
> ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
> {
> struct ixgbe_mc_addr *mta;
>
> mta = (struct ixgbe_mc_addr *)*update_ptr;
> *vmdq = mta->vmdq;
>
> *update_ptr = (u8*)(mta + 1);
>
> return (mta->addr);
> } /* ixgbe_mc_array_itr */
>
> /************************************************************************
> * ixgbe_local_timer - Timer routine
> *
> * Checks for link status, updates statistics,
> * and runs the watchdog check.
> ************************************************************************/
> static void
> ixgbe_local_timer(void *arg)
> {
> struct adapter *adapter = arg;
> device_t dev = adapter->dev;
> struct ix_queue *que = adapter->queues;
> u64 queues = 0;
> int hung = 0;
>
> mtx_assert(&adapter->core_mtx, MA_OWNED);
>
> /* Check for pluggable optics */
> if (adapter->sfp_probe)
> if (!ixgbe_sfp_probe(adapter))
> goto out; /* Nothing to do */
>
> ixgbe_update_link_status(adapter);
> ixgbe_update_stats_counters(adapter);
>
> /*
> * Check the TX queues status
> * - mark hung queues so we don't schedule on them
> * - watchdog only if all queues show hung
> */
> for (int i = 0; i < adapter->num_queues; i++, que++) {
> /* Keep track of queues with work for soft irq */
> if (que->txr->busy)
> queues |= ((u64)1 << que->me);
> /*
> * Each time txeof runs without cleaning, but there
> * are uncleaned descriptors it increments busy. If
> * we get to the MAX we declare it hung.
> */
> if (que->busy == IXGBE_QUEUE_HUNG) {
> ++hung;
> /* Mark the queue as inactive */
> adapter->active_queues &= ~((u64)1 << que->me);
> continue;
> } else {
> /* Check if we've come back from hung */
> if ((adapter->active_queues & ((u64)1 << que->me)) == 0)
> adapter->active_queues |= ((u64)1 << que->me);
> }
> if (que->busy >= IXGBE_MAX_TX_BUSY) {
> device_printf(dev,
> "Warning queue %d appears to be hung!\n", i);
> que->txr->busy = IXGBE_QUEUE_HUNG;
> ++hung;
> }
> }
>
> /* Only truly watchdog if all queues show hung */
> if (hung == adapter->num_queues)
> goto watchdog;
> else if (queues != 0) { /* Force an IRQ on queues with work */
> ixgbe_rearm_queues(adapter, queues);
> }
>
> out:
> callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
> return;
>
> watchdog:
> device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
> adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
> adapter->watchdog_events++;
> ixgbe_init_locked(adapter);
> } /* ixgbe_local_timer */
>
> /************************************************************************
> * ixgbe_sfp_probe
> *
> * Determine if a port had optics inserted.
> ************************************************************************/
3740,3742c3433,3435
< struct ixgbe_hw *hw = &adapter->hw;
< device_t dev = adapter->dev;
< bool result = FALSE;
---
> struct ixgbe_hw *hw = &adapter->hw;
> device_t dev = adapter->dev;
> bool result = FALSE;
3748c3441
< goto out;
---
> goto out;
3749a3443
> adapter->sfp_probe = FALSE;
3752,3754c3446,3448
< device_printf(dev, "Reload driver with supported module.\n");
< adapter->sfp_probe = FALSE;
< goto out;
---
> device_printf(dev,
> "Reload driver with supported module.\n");
> goto out;
3758,3760d3451
< adapter->sfp_probe = FALSE;
< /* Set the optics type so system reports correctly */
< ixgbe_setup_optics(adapter);
3763a3455
>
3765c3457
< }
---
> } /* ixgbe_sfp_probe */
3767,3770c3459,3461
< /*
< ** Tasklet handler for MSIX Link interrupts
< ** - do outside interrupt since it might sleep
< */
---
> /************************************************************************
> * ixgbe_handle_mod - Tasklet for SFP module interrupts
> ************************************************************************/
3772,3788d3462
< ixgbe_handle_link(void *context, int pending)
< {
< struct adapter *adapter = context;
< struct ixgbe_hw *hw = &adapter->hw;
<
< ixgbe_check_link(hw,
< &adapter->link_speed, &adapter->link_up, 0);
< ixgbe_update_link_status(adapter);
<
< /* Re-enable link interrupts */
< IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_LSC);
< }
<
< /*
< ** Tasklet for handling SFP module interrupts
< */
< static void
3793,3795c3467,3468
< enum ixgbe_phy_type orig_type = hw->phy.type;
< device_t dev = adapter->dev;
< u32 err;
---
> device_t dev = adapter->dev;
> u32 err, cage_full = 0;
3797,3810c3470,3482
< IXGBE_CORE_LOCK(adapter);
<
< /* Check to see if the PHY type changed */
< if (hw->phy.ops.identify) {
< hw->phy.type = ixgbe_phy_unknown;
< hw->phy.ops.identify(hw);
< }
<
< if (hw->phy.type != orig_type) {
< device_printf(dev, "Detected phy_type %d\n", hw->phy.type);
<
< if (hw->phy.type == ixgbe_phy_none) {
< hw->phy.sfp_type = ixgbe_sfp_type_unknown;
< goto out;
---
> if (adapter->hw.need_crosstalk_fix) {
> switch (hw->mac.type) {
> case ixgbe_mac_82599EB:
> cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
> IXGBE_ESDP_SDP2;
> break;
> case ixgbe_mac_X550EM_x:
> case ixgbe_mac_X550EM_a:
> cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
> IXGBE_ESDP_SDP0;
> break;
> default:
> break;
3813,3817c3485,3486
< /* Try to do the initialization that was skipped before */
< if (hw->phy.ops.init)
< hw->phy.ops.init(hw);
< if (hw->phy.ops.reset)
< hw->phy.ops.reset(hw);
---
> if (!cage_full)
> return;
3824c3493
< goto out;
---
> return;
3831c3500
< goto out;
---
> return;
3833,3850c3502,3503
< if (hw->phy.multispeed_fiber)
< taskqueue_enqueue(adapter->tq, &adapter->msf_task);
< out:
< /* Update media type */
< switch (hw->mac.ops.get_media_type(hw)) {
< case ixgbe_media_type_fiber:
< adapter->optics = IFM_10G_SR;
< break;
< case ixgbe_media_type_copper:
< adapter->optics = IFM_10G_TWINAX;
< break;
< case ixgbe_media_type_cx4:
< adapter->optics = IFM_10G_CX4;
< break;
< default:
< adapter->optics = 0;
< break;
< }
---
> taskqueue_enqueue(adapter->tq, &adapter->msf_task);
> } /* ixgbe_handle_mod */
3852,3854d3504
< IXGBE_CORE_UNLOCK(adapter);
< return;
< }
3856,3859c3506,3508
<
< /*
< ** Tasklet for handling MSF (multispeed fiber) interrupts
< */
---
> /************************************************************************
> * ixgbe_handle_msf - Tasklet for MSF (multispeed fiber) interrupts
> ************************************************************************/
3865,3866c3514,3515
< u32 autoneg;
< bool negotiate;
---
> u32 autoneg;
> bool negotiate;
3868d3516
< IXGBE_CORE_LOCK(adapter);
3882,3884c3530
< IXGBE_CORE_UNLOCK(adapter);
< return;
< }
---
> } /* ixgbe_handle_msf */
3886,3888c3532,3534
< /*
< ** Tasklet for handling interrupts from an external PHY
< */
---
> /************************************************************************
> * ixgbe_handle_phy - Tasklet for external PHY interrupts
> ************************************************************************/
3894c3540
< int error;
---
> int error;
3898,3900c3544
< device_printf(adapter->dev,
< "CRITICAL: EXTERNAL PHY OVER TEMP!! "
< " PHY will downshift to lower power state!\n");
---
> device_printf(adapter->dev, "CRITICAL: EXTERNAL PHY OVER TEMP!! PHY will downshift to lower power state!\n");
3903,3906c3547,3548
< "Error handling LASI interrupt: %d\n",
< error);
< return;
< }
---
> "Error handling LASI interrupt: %d\n", error);
> } /* ixgbe_handle_phy */
3908,3911c3550,3555
< #ifdef IXGBE_FDIR
< /*
< ** Tasklet for reinitializing the Flow Director filter table
< */
---
> /************************************************************************
> * ixgbe_stop - Stop the hardware
> *
> * Disables all traffic on the adapter by issuing a
> * global reset on the MAC and deallocates TX/RX buffers.
> ************************************************************************/
3913c3557
< ixgbe_reinit_fdir(void *context, int pending)
---
> ixgbe_stop(void *arg)
3915,3916c3559,3561
< struct adapter *adapter = context;
< struct ifnet *ifp = adapter->ifp;
---
> struct ifnet *ifp;
> struct adapter *adapter = arg;
> struct ixgbe_hw *hw = &adapter->hw;
3918,3925c3563,3588
< if (adapter->fdir_reinit != 1) /* Shouldn't happen */
< return;
< ixgbe_reinit_fdir_tables_82599(&adapter->hw);
< adapter->fdir_reinit = 0;
< /* re-enable flow director interrupts */
< IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, IXGBE_EIMS_FLOW_DIR);
< /* Restart the interface */
< ifp->if_drv_flags |= IFF_DRV_RUNNING;
---
> ifp = adapter->ifp;
>
> mtx_assert(&adapter->core_mtx, MA_OWNED);
>
> INIT_DEBUGOUT("ixgbe_stop: begin\n");
> ixgbe_disable_intr(adapter);
> callout_stop(&adapter->timer);
>
> /* Let the stack know...*/
> ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
>
> ixgbe_reset_hw(hw);
> hw->adapter_stopped = FALSE;
> ixgbe_stop_adapter(hw);
> if (hw->mac.type == ixgbe_mac_82599EB)
> ixgbe_stop_mac_link_on_d3_82599(hw);
> /* Turn off the laser - noop with no optics */
> ixgbe_disable_tx_laser(hw);
>
> /* Update the stack */
> adapter->link_up = FALSE;
> ixgbe_update_link_status(adapter);
>
> /* reprogram the RAR[0] in case user changed it. */
> ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
>
3927,3928c3590
< }
< #endif
---
> } /* ixgbe_stop */
3930c3592,3593
< /*********************************************************************
---
> /************************************************************************
> * ixgbe_update_link_status - Update OS on link state
3932,3934c3595,3598
< * Configure DMA Coalescing
< *
< **********************************************************************/
---
> * Note: Only updates the OS on the cached link state.
> * The real check of the hardware only happens with
> * a link interrupt.
> ************************************************************************/
3935a3600,3637
> ixgbe_update_link_status(struct adapter *adapter)
> {
> struct ifnet *ifp = adapter->ifp;
> device_t dev = adapter->dev;
>
> if (adapter->link_up) {
> if (adapter->link_active == FALSE) {
> if (bootverbose)
> device_printf(dev, "Link is up %d Gbps %s \n",
> ((adapter->link_speed == 128) ? 10 : 1),
> "Full Duplex");
> adapter->link_active = TRUE;
> /* Update any Flow Control changes */
> ixgbe_fc_enable(&adapter->hw);
> /* Update DMA coalescing config */
> ixgbe_config_dmac(adapter);
> if_link_state_change(ifp, LINK_STATE_UP);
> if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
> ixgbe_ping_all_vfs(adapter);
> }
> } else { /* Link down */
> if (adapter->link_active == TRUE) {
> if (bootverbose)
> device_printf(dev, "Link is Down\n");
> if_link_state_change(ifp, LINK_STATE_DOWN);
> adapter->link_active = FALSE;
> if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
> ixgbe_ping_all_vfs(adapter);
> }
> }
>
> return;
> } /* ixgbe_update_link_status */
>
> /************************************************************************
> * ixgbe_config_dmac - Configure DMA Coalescing
> ************************************************************************/
> static void
3938c3640
< struct ixgbe_hw *hw = &adapter->hw;
---
> struct ixgbe_hw *hw = &adapter->hw;
3941,3942c3643
< if (hw->mac.type < ixgbe_mac_X550 ||
< !hw->mac.ops.dmac_config)
---
> if (hw->mac.type < ixgbe_mac_X550 || !hw->mac.ops.dmac_config)
3951c3652
<
---
>
3957c3658
< }
---
> } /* ixgbe_config_dmac */
3959,3965c3660,3662
< /*
< * Checks whether the adapter's ports are capable of
< * Wake On LAN by reading the adapter's NVM.
< *
< * Sets each port's hw->wol_enabled value depending
< * on the value read here.
< */
---
> /************************************************************************
> * ixgbe_enable_intr
> ************************************************************************/
3967c3664
< ixgbe_check_wol_support(struct adapter *adapter)
---
> ixgbe_enable_intr(struct adapter *adapter)
3970c3667,3668
< u16 dev_caps = 0;
---
> struct ix_queue *que = adapter->queues;
> u32 mask, fwsm;
3972,3978c3670
< /* Find out WoL support for port */
< adapter->wol_support = hw->wol_enabled = 0;
< ixgbe_get_device_caps(hw, &dev_caps);
< if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
< ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) &&
< hw->bus.func == 0))
< adapter->wol_support = hw->wol_enabled = 1;
---
> mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
3980,4034c3672,3706
< /* Save initial wake up filter configuration */
< adapter->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC);
<
< return;
< }
<
< /*
< * Prepare the adapter/port for LPLU and/or WoL
< */
< static int
< ixgbe_setup_low_power_mode(struct adapter *adapter)
< {
< struct ixgbe_hw *hw = &adapter->hw;
< device_t dev = adapter->dev;
< s32 error = 0;
<
< mtx_assert(&adapter->core_mtx, MA_OWNED);
<
< if (!hw->wol_enabled)
< ixgbe_set_phy_power(hw, FALSE);
<
< /* Limit power management flow to X550EM baseT */
< if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T
< && hw->phy.ops.enter_lplu) {
< /* Turn off support for APM wakeup. (Using ACPI instead) */
< IXGBE_WRITE_REG(hw, IXGBE_GRC,
< IXGBE_READ_REG(hw, IXGBE_GRC) & ~(u32)2);
<
< /*
< * Clear Wake Up Status register to prevent any previous wakeup
< * events from waking us up immediately after we suspend.
< */
< IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
<
< /*
< * Program the Wakeup Filter Control register with user filter
< * settings
< */
< IXGBE_WRITE_REG(hw, IXGBE_WUFC, adapter->wufc);
<
< /* Enable wakeups and power management in Wakeup Control */
< IXGBE_WRITE_REG(hw, IXGBE_WUC,
< IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN);
<
< /* X550EM baseT adapters need a special LPLU flow */
< hw->phy.reset_disable = true;
< ixgbe_stop(adapter);
< error = hw->phy.ops.enter_lplu(hw);
< if (error)
< device_printf(dev,
< "Error entering LPLU: %d\n", error);
< hw->phy.reset_disable = false;
< } else {
< /* Just stop for other adapters */
< ixgbe_stop(adapter);
---
> switch (adapter->hw.mac.type) {
> case ixgbe_mac_82599EB:
> mask |= IXGBE_EIMS_ECC;
> /* Temperature sensor on some adapters */
> mask |= IXGBE_EIMS_GPI_SDP0;
> /* SFP+ (RX_LOS_N & MOD_ABS_N) */
> mask |= IXGBE_EIMS_GPI_SDP1;
> mask |= IXGBE_EIMS_GPI_SDP2;
> break;
> case ixgbe_mac_X540:
> /* Detect if Thermal Sensor is enabled */
> fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
> if (fwsm & IXGBE_FWSM_TS_ENABLED)
> mask |= IXGBE_EIMS_TS;
> mask |= IXGBE_EIMS_ECC;
> break;
> case ixgbe_mac_X550:
> /* MAC thermal sensor is automatically enabled */
> mask |= IXGBE_EIMS_TS;
> mask |= IXGBE_EIMS_ECC;
> break;
> case ixgbe_mac_X550EM_x:
> case ixgbe_mac_X550EM_a:
> /* Some devices use SDP0 for important information */
> if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
> hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP ||
> hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N ||
> hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
> mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw);
> if (hw->phy.type == ixgbe_phy_x550em_ext_t)
> mask |= IXGBE_EICR_GPI_SDP0_X540;
> mask |= IXGBE_EIMS_ECC;
> break;
> default:
> break;
4037,4038c3709,3717
< return error;
< }
---
> /* Enable Fan Failure detection */
> if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
> mask |= IXGBE_EIMS_GPI_SDP1;
> /* Enable SR-IOV */
> if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
> mask |= IXGBE_EIMS_MAILBOX;
> /* Enable Flow Director */
> if (adapter->feat_en & IXGBE_FEATURE_FDIR)
> mask |= IXGBE_EIMS_FLOW_DIR;
4040,4050c3719
< /**********************************************************************
< *
< * Update the board statistics counters.
< *
< **********************************************************************/
< static void
< ixgbe_update_stats_counters(struct adapter *adapter)
< {
< struct ixgbe_hw *hw = &adapter->hw;
< u32 missed_rx = 0, bprc, lxon, lxoff, total;
< u64 total_missed_rx = 0;
---
> IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
4052,4060c3721,3729
< adapter->stats.pf.crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
< adapter->stats.pf.illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
< adapter->stats.pf.errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC);
< adapter->stats.pf.mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC);
<
< for (int i = 0; i < 16; i++) {
< adapter->stats.pf.qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
< adapter->stats.pf.qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
< adapter->stats.pf.qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
---
> /* With MSI-X we use auto clear */
> if (adapter->msix_mem) {
> mask = IXGBE_EIMS_ENABLE_MASK;
> /* Don't autoclear Link */
> mask &= ~IXGBE_EIMS_OTHER;
> mask &= ~IXGBE_EIMS_LSC;
> if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
> mask &= ~IXGBE_EIMS_MAILBOX;
> IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
4062,4064d3730
< adapter->stats.pf.mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC);
< adapter->stats.pf.mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC);
< adapter->stats.pf.rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
4066,4087d3731
< /* Hardware workaround, gprc counts missed packets */
< adapter->stats.pf.gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
< adapter->stats.pf.gprc -= missed_rx;
<
< if (hw->mac.type != ixgbe_mac_82598EB) {
< adapter->stats.pf.gorc += IXGBE_READ_REG(hw, IXGBE_GORCL) +
< ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
< adapter->stats.pf.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
< ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32);
< adapter->stats.pf.tor += IXGBE_READ_REG(hw, IXGBE_TORL) +
< ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
< adapter->stats.pf.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
< adapter->stats.pf.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
< } else {
< adapter->stats.pf.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
< adapter->stats.pf.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
< /* 82598 only has a counter in the high register */
< adapter->stats.pf.gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
< adapter->stats.pf.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
< adapter->stats.pf.tor += IXGBE_READ_REG(hw, IXGBE_TORH);
< }
<
4089,4090c3733,3735
< * Workaround: mprc hardware is incorrectly counting
< * broadcasts, so for now we subtract those.
---
> * Now enable all queues, this is done separately to
> * allow for handling the extended (beyond 32) MSI-X
> * vectors that can be used by 82599
4092,4096c3737,3738
< bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
< adapter->stats.pf.bprc += bprc;
< adapter->stats.pf.mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
< if (hw->mac.type == ixgbe_mac_82598EB)
< adapter->stats.pf.mprc -= bprc;
---
> for (int i = 0; i < adapter->num_queues; i++, que++)
> ixgbe_enable_queue(adapter, que->msix);
4098,4103c3740
< adapter->stats.pf.prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
< adapter->stats.pf.prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
< adapter->stats.pf.prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
< adapter->stats.pf.prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
< adapter->stats.pf.prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
< adapter->stats.pf.prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
---
> IXGBE_WRITE_FLUSH(hw);
4105,4109c3742,3743
< lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
< adapter->stats.pf.lxontxc += lxon;
< lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
< adapter->stats.pf.lxofftxc += lxoff;
< total = lxon + lxoff;
---
> return;
> } /* ixgbe_enable_intr */
4111,4161c3745,3749
< adapter->stats.pf.gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
< adapter->stats.pf.mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
< adapter->stats.pf.ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
< adapter->stats.pf.gptc -= total;
< adapter->stats.pf.mptc -= total;
< adapter->stats.pf.ptc64 -= total;
< adapter->stats.pf.gotc -= total * ETHER_MIN_LEN;
<
< adapter->stats.pf.ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
< adapter->stats.pf.rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
< adapter->stats.pf.roc += IXGBE_READ_REG(hw, IXGBE_ROC);
< adapter->stats.pf.rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
< adapter->stats.pf.mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
< adapter->stats.pf.mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
< adapter->stats.pf.mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
< adapter->stats.pf.tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
< adapter->stats.pf.tpt += IXGBE_READ_REG(hw, IXGBE_TPT);
< adapter->stats.pf.ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
< adapter->stats.pf.ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
< adapter->stats.pf.ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
< adapter->stats.pf.ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
< adapter->stats.pf.ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
< adapter->stats.pf.bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
< adapter->stats.pf.xec += IXGBE_READ_REG(hw, IXGBE_XEC);
< adapter->stats.pf.fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
< adapter->stats.pf.fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST);
< /* Only read FCOE on 82599 */
< if (hw->mac.type != ixgbe_mac_82598EB) {
< adapter->stats.pf.fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
< adapter->stats.pf.fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
< adapter->stats.pf.fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
< adapter->stats.pf.fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
< adapter->stats.pf.fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
< }
<
< /* Fill out the OS statistics structure */
< IXGBE_SET_IPACKETS(adapter, adapter->stats.pf.gprc);
< IXGBE_SET_OPACKETS(adapter, adapter->stats.pf.gptc);
< IXGBE_SET_IBYTES(adapter, adapter->stats.pf.gorc);
< IXGBE_SET_OBYTES(adapter, adapter->stats.pf.gotc);
< IXGBE_SET_IMCASTS(adapter, adapter->stats.pf.mprc);
< IXGBE_SET_OMCASTS(adapter, adapter->stats.pf.mptc);
< IXGBE_SET_COLLISIONS(adapter, 0);
< IXGBE_SET_IQDROPS(adapter, total_missed_rx);
< IXGBE_SET_IERRORS(adapter, adapter->stats.pf.crcerrs
< + adapter->stats.pf.rlec);
< }
<
< #if __FreeBSD_version >= 1100036
< static uint64_t
< ixgbe_get_counter(struct ifnet *ifp, ift_counter cnt)
---
> /************************************************************************
> * ixgbe_disable_intr
> ************************************************************************/
> static void
> ixgbe_disable_intr(struct adapter *adapter)
4163,4195c3751,3758
< struct adapter *adapter;
< struct tx_ring *txr;
< uint64_t rv;
<
< adapter = if_getsoftc(ifp);
<
< switch (cnt) {
< case IFCOUNTER_IPACKETS:
< return (adapter->ipackets);
< case IFCOUNTER_OPACKETS:
< return (adapter->opackets);
< case IFCOUNTER_IBYTES:
< return (adapter->ibytes);
< case IFCOUNTER_OBYTES:
< return (adapter->obytes);
< case IFCOUNTER_IMCASTS:
< return (adapter->imcasts);
< case IFCOUNTER_OMCASTS:
< return (adapter->omcasts);
< case IFCOUNTER_COLLISIONS:
< return (0);
< case IFCOUNTER_IQDROPS:
< return (adapter->iqdrops);
< case IFCOUNTER_OQDROPS:
< rv = 0;
< txr = adapter->tx_rings;
< for (int i = 0; i < adapter->num_queues; i++, txr++)
< rv += txr->br->br_drops;
< return (rv);
< case IFCOUNTER_IERRORS:
< return (adapter->ierrors);
< default:
< return (if_get_counter_default(ifp, cnt));
---
> if (adapter->msix_mem)
> IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 0);
> if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
> IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
> } else {
> IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
> IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
> IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
4197,4198c3760
< }
< #endif
---
> IXGBE_WRITE_FLUSH(&adapter->hw);
4200,4206c3762,3763
< /** ixgbe_sysctl_tdh_handler - Handler function
< * Retrieves the TDH value from the hardware
< */
< static int
< ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS)
< {
< int error;
---
> return;
> } /* ixgbe_disable_intr */
4208,4299c3765,3767
< struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
< if (!txr) return 0;
<
< unsigned val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDH(txr->me));
< error = sysctl_handle_int(oidp, &val, 0, req);
< if (error || !req->newptr)
< return error;
< return 0;
< }
<
< /** ixgbe_sysctl_tdt_handler - Handler function
< * Retrieves the TDT value from the hardware
< */
< static int
< ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS)
< {
< int error;
<
< struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
< if (!txr) return 0;
<
< unsigned val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDT(txr->me));
< error = sysctl_handle_int(oidp, &val, 0, req);
< if (error || !req->newptr)
< return error;
< return 0;
< }
<
< /** ixgbe_sysctl_rdh_handler - Handler function
< * Retrieves the RDH value from the hardware
< */
< static int
< ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS)
< {
< int error;
<
< struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
< if (!rxr) return 0;
<
< unsigned val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDH(rxr->me));
< error = sysctl_handle_int(oidp, &val, 0, req);
< if (error || !req->newptr)
< return error;
< return 0;
< }
<
< /** ixgbe_sysctl_rdt_handler - Handler function
< * Retrieves the RDT value from the hardware
< */
< static int
< ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS)
< {
< int error;
<
< struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
< if (!rxr) return 0;
<
< unsigned val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDT(rxr->me));
< error = sysctl_handle_int(oidp, &val, 0, req);
< if (error || !req->newptr)
< return error;
< return 0;
< }
<
< static int
< ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS)
< {
< int error;
< struct ix_queue *que = ((struct ix_queue *)oidp->oid_arg1);
< unsigned int reg, usec, rate;
<
< reg = IXGBE_READ_REG(&que->adapter->hw, IXGBE_EITR(que->msix));
< usec = ((reg & 0x0FF8) >> 3);
< if (usec > 0)
< rate = 500000 / usec;
< else
< rate = 0;
< error = sysctl_handle_int(oidp, &rate, 0, req);
< if (error || !req->newptr)
< return error;
< reg &= ~0xfff; /* default, no limitation */
< ixgbe_max_interrupt_rate = 0;
< if (rate > 0 && rate < 500000) {
< if (rate < 1000)
< rate = 1000;
< ixgbe_max_interrupt_rate = rate;
< reg |= ((4000000/rate) & 0xff8 );
< }
< IXGBE_WRITE_REG(&que->adapter->hw, IXGBE_EITR(que->msix), reg);
< return 0;
< }
<
---
> /************************************************************************
> * ixgbe_legacy_irq - Legacy Interrupt Service routine
> ************************************************************************/
4301c3769
< ixgbe_add_device_sysctls(struct adapter *adapter)
---
> ixgbe_legacy_irq(void *arg)
4303c3771,3772
< device_t dev = adapter->dev;
---
> struct ix_queue *que = arg;
> struct adapter *adapter = que->adapter;
4305,4306c3774,3777
< struct sysctl_oid_list *child;
< struct sysctl_ctx_list *ctx;
---
> struct ifnet *ifp = adapter->ifp;
> struct tx_ring *txr = adapter->tx_rings;
> bool more = false;
> u32 eicr, eicr_mask;
4308,4309c3779,3780
< ctx = device_get_sysctl_ctx(dev);
< child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
---
> /* Silicon errata #26 on 82598 */
> IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
4311,4314c3782
< /* Sysctls for all devices */
< SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "fc",
< CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
< ixgbe_sysctl_flowcntl, "I", IXGBE_SYSCTL_DESC_SET_FC);
---
> eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
4316,4318c3784,3788
< SYSCTL_ADD_INT(ctx, child, OID_AUTO, "enable_aim",
< CTLFLAG_RW,
< &ixgbe_enable_aim, 1, "Interrupt Moderation");
---
> ++que->irqs;
> if (eicr == 0) {
> ixgbe_enable_intr(adapter);
> return;
> }
4320,4322c3790,3791
< SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "advertise_speed",
< CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
< ixgbe_sysctl_advertise, "I", IXGBE_SYSCTL_DESC_ADV_SPEED);
---
> if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
> more = ixgbe_rxeof(que);
4324,4326c3793,3798
< SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "thermal_test",
< CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
< ixgbe_sysctl_thermal_test, "I", "Thermal Test");
---
> IXGBE_TX_LOCK(txr);
> ixgbe_txeof(txr);
> if (!ixgbe_ring_empty(ifp, txr->br))
> ixgbe_start_locked(ifp, txr);
> IXGBE_TX_UNLOCK(txr);
> }
4328,4332c3800,3804
< #ifdef IXGBE_DEBUG
< /* testing sysctls (for all devices) */
< SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "power_state",
< CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
< ixgbe_sysctl_power_state, "I", "PCI Power State");
---
> /* Check for fan failure */
> if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
> ixgbe_check_fan_failure(adapter, eicr, true);
> IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
> }
4334,4342c3806,3808
< SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "print_rss_config",
< CTLTYPE_STRING | CTLFLAG_RD, adapter, 0,
< ixgbe_sysctl_print_rss_config, "A", "Prints RSS Configuration");
< #endif
< /* for X550 series devices */
< if (hw->mac.type >= ixgbe_mac_X550)
< SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "dmac",
< CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
< ixgbe_sysctl_dmac, "I", "DMA Coalesce");
---
> /* Link status change */
> if (eicr & IXGBE_EICR_LSC)
> taskqueue_enqueue(adapter->tq, &adapter->link_task);
4344,4347c3810,3815
< /* for X552 backplane devices */
< if (hw->device_id == IXGBE_DEV_ID_X550EM_X_KR) {
< struct sysctl_oid *eee_node;
< struct sysctl_oid_list *eee_list;
---
> if (ixgbe_is_sfp(hw)) {
> /* Pluggable optics-related interrupt */
> if (hw->mac.type >= ixgbe_mac_X540)
> eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
> else
> eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
4349,4352c3817,3820
< eee_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "eee",
< CTLFLAG_RD, NULL,
< "Energy Efficient Ethernet sysctls");
< eee_list = SYSCTL_CHILDREN(eee_node);
---
> if (eicr & eicr_mask) {
> IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
> taskqueue_enqueue(adapter->tq, &adapter->mod_task);
> }
4354,4377c3822,3827
< SYSCTL_ADD_PROC(ctx, eee_list, OID_AUTO, "enable",
< CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
< ixgbe_sysctl_eee_enable, "I",
< "Enable or Disable EEE");
<
< SYSCTL_ADD_PROC(ctx, eee_list, OID_AUTO, "negotiated",
< CTLTYPE_INT | CTLFLAG_RD, adapter, 0,
< ixgbe_sysctl_eee_negotiated, "I",
< "EEE negotiated on link");
<
< SYSCTL_ADD_PROC(ctx, eee_list, OID_AUTO, "tx_lpi_status",
< CTLTYPE_INT | CTLFLAG_RD, adapter, 0,
< ixgbe_sysctl_eee_tx_lpi_status, "I",
< "Whether or not TX link is in LPI state");
<
< SYSCTL_ADD_PROC(ctx, eee_list, OID_AUTO, "rx_lpi_status",
< CTLTYPE_INT | CTLFLAG_RD, adapter, 0,
< ixgbe_sysctl_eee_rx_lpi_status, "I",
< "Whether or not RX link is in LPI state");
<
< SYSCTL_ADD_PROC(ctx, eee_list, OID_AUTO, "tx_lpi_delay",
< CTLTYPE_INT | CTLFLAG_RD, adapter, 0,
< ixgbe_sysctl_eee_tx_lpi_delay, "I",
< "TX LPI entry delay in microseconds");
---
> if ((hw->mac.type == ixgbe_mac_82599EB) &&
> (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
> IXGBE_WRITE_REG(hw, IXGBE_EICR,
> IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
> taskqueue_enqueue(adapter->tq, &adapter->msf_task);
> }
4380,4385c3830,3833
< /* for WoL-capable devices */
< if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
< SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "wol_enable",
< CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
< ixgbe_sysctl_wol_enable, "I",
< "Enable/Disable Wake on LAN");
---
> /* External PHY interrupt */
> if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
> (eicr & IXGBE_EICR_GPI_SDP0_X540))
> taskqueue_enqueue(adapter->tq, &adapter->phy_task);
4387,4391c3835,3838
< SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "wufc",
< CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
< ixgbe_sysctl_wufc, "I",
< "Enable/Disable Wake Up Filters");
< }
---
> if (more)
> taskqueue_enqueue(que->tq, &que->que_task);
> else
> ixgbe_enable_intr(adapter);
4393,4396c3840,3841
< /* for X552/X557-AT devices */
< if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
< struct sysctl_oid *phy_node;
< struct sysctl_oid_list *phy_list;
---
> return;
> } /* ixgbe_legacy_irq */
4398,4417c3843,3845
< phy_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "phy",
< CTLFLAG_RD, NULL,
< "External PHY sysctls");
< phy_list = SYSCTL_CHILDREN(phy_node);
<
< SYSCTL_ADD_PROC(ctx, phy_list, OID_AUTO, "temp",
< CTLTYPE_INT | CTLFLAG_RD, adapter, 0,
< ixgbe_sysctl_phy_temp, "I",
< "Current External PHY Temperature (Celsius)");
<
< SYSCTL_ADD_PROC(ctx, phy_list, OID_AUTO, "overtemp_occurred",
< CTLTYPE_INT | CTLFLAG_RD, adapter, 0,
< ixgbe_sysctl_phy_overtemp_occurred, "I",
< "External PHY High Temperature Event Occurred");
< }
< }
<
< /*
< * Add sysctl variables, one per statistic, to the system.
< */
---
> /************************************************************************
> * ixgbe_free_pci_resources
> ************************************************************************/
4419c3847
< ixgbe_add_hw_stats(struct adapter *adapter)
---
> ixgbe_free_pci_resources(struct adapter *adapter)
4421c3849,3851
< device_t dev = adapter->dev;
---
> struct ix_queue *que = adapter->queues;
> device_t dev = adapter->dev;
> int rid, memrid;
4423,4424c3853,3856
< struct tx_ring *txr = adapter->tx_rings;
< struct rx_ring *rxr = adapter->rx_rings;
---
> if (adapter->hw.mac.type == ixgbe_mac_82598EB)
> memrid = PCIR_BAR(MSIX_82598_BAR);
> else
> memrid = PCIR_BAR(MSIX_82599_BAR);
4426,4429c3858,3867
< struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
< struct sysctl_oid *tree = device_get_sysctl_tree(dev);
< struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
< struct ixgbe_hw_stats *stats = &adapter->stats.pf;
---
> /*
> * There is a slight possibility of a failure mode
> * in attach that will result in entering this function
> * before interrupt resources have been initialized, and
> * in that case we do not want to execute the loops below
> * We can detect this reliably by the state of the adapter
> * res pointer.
> */
> if (adapter->res == NULL)
> goto mem;
4431,4487c3869,3879
< struct sysctl_oid *stat_node, *queue_node;
< struct sysctl_oid_list *stat_list, *queue_list;
<
< #define QUEUE_NAME_LEN 32
< char namebuf[QUEUE_NAME_LEN];
<
< /* Driver Statistics */
< SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped",
< CTLFLAG_RD, &adapter->dropped_pkts,
< "Driver dropped packets");
< SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "mbuf_defrag_failed",
< CTLFLAG_RD, &adapter->mbuf_defrag_failed,
< "m_defrag() failed");
< SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
< CTLFLAG_RD, &adapter->watchdog_events,
< "Watchdog timeouts");
< SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq",
< CTLFLAG_RD, &adapter->link_irq,
< "Link MSIX IRQ Handled");
<
< for (int i = 0; i < adapter->num_queues; i++, txr++) {
< snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
< queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
< CTLFLAG_RD, NULL, "Queue Name");
< queue_list = SYSCTL_CHILDREN(queue_node);
<
< SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "interrupt_rate",
< CTLTYPE_UINT | CTLFLAG_RW, &adapter->queues[i],
< sizeof(&adapter->queues[i]),
< ixgbe_sysctl_interrupt_rate_handler, "IU",
< "Interrupt Rate");
< SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
< CTLFLAG_RD, &(adapter->queues[i].irqs),
< "irqs on this queue");
< SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_head",
< CTLTYPE_UINT | CTLFLAG_RD, txr, sizeof(txr),
< ixgbe_sysctl_tdh_handler, "IU",
< "Transmit Descriptor Head");
< SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_tail",
< CTLTYPE_UINT | CTLFLAG_RD, txr, sizeof(txr),
< ixgbe_sysctl_tdt_handler, "IU",
< "Transmit Descriptor Tail");
< SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO, "tso_tx",
< CTLFLAG_RD, &txr->tso_tx,
< "TSO");
< SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO, "no_tx_dma_setup",
< CTLFLAG_RD, &txr->no_tx_dma_setup,
< "Driver tx dma failure in xmit");
< SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "no_desc_avail",
< CTLFLAG_RD, &txr->no_desc_avail,
< "Queue No Descriptor Available");
< SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
< CTLFLAG_RD, &txr->total_packets,
< "Queue Packets Transmitted");
< SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "br_drops",
< CTLFLAG_RD, &txr->br->br_drops,
< "Packets dropped in buf_ring");
---
> /*
> * Release all msix queue resources:
> */
> for (int i = 0; i < adapter->num_queues; i++, que++) {
> rid = que->msix + 1;
> if (que->tag != NULL) {
> bus_teardown_intr(dev, que->res, que->tag);
> que->tag = NULL;
> }
> if (que->res != NULL)
> bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
4490,4494d3881
< for (int i = 0; i < adapter->num_queues; i++, rxr++) {
< snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
< queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
< CTLFLAG_RD, NULL, "Queue Name");
< queue_list = SYSCTL_CHILDREN(queue_node);
4496,4525c3883,3885
< struct lro_ctrl *lro = &rxr->lro;
<
< snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
< queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
< CTLFLAG_RD, NULL, "Queue Name");
< queue_list = SYSCTL_CHILDREN(queue_node);
<
< SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_head",
< CTLTYPE_UINT | CTLFLAG_RD, rxr, sizeof(rxr),
< ixgbe_sysctl_rdh_handler, "IU",
< "Receive Descriptor Head");
< SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_tail",
< CTLTYPE_UINT | CTLFLAG_RD, rxr, sizeof(rxr),
< ixgbe_sysctl_rdt_handler, "IU",
< "Receive Descriptor Tail");
< SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
< CTLFLAG_RD, &rxr->rx_packets,
< "Queue Packets Received");
< SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
< CTLFLAG_RD, &rxr->rx_bytes,
< "Queue Bytes Received");
< SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_copies",
< CTLFLAG_RD, &rxr->rx_copies,
< "Copied RX Frames");
< SYSCTL_ADD_U64(ctx, queue_list, OID_AUTO, "lro_queued",
< CTLFLAG_RD, &lro->lro_queued, 0,
< "LRO Queued");
< SYSCTL_ADD_U64(ctx, queue_list, OID_AUTO, "lro_flushed",
< CTLFLAG_RD, &lro->lro_flushed, 0,
< "LRO Flushed");
---
> if (adapter->tag != NULL) {
> bus_teardown_intr(dev, adapter->res, adapter->tag);
> adapter->tag = NULL;
4528c3888,3891
< /* MAC stats get the own sub node */
---
> /* Clean the Legacy or Link interrupt last */
> if (adapter->res != NULL)
> bus_release_resource(dev, SYS_RES_IRQ, adapter->link_rid,
> adapter->res);
4530,4532c3893,3896
< stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats",
< CTLFLAG_RD, NULL, "MAC Statistics");
< stat_list = SYSCTL_CHILDREN(stat_node);
---
> mem:
> if ((adapter->feat_en & IXGBE_FEATURE_MSI) ||
> (adapter->feat_en & IXGBE_FEATURE_MSIX))
> pci_release_msi(dev);
4534,4554c3898,3900
< SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "crc_errs",
< CTLFLAG_RD, &stats->crcerrs,
< "CRC Errors");
< SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "ill_errs",
< CTLFLAG_RD, &stats->illerrc,
< "Illegal Byte Errors");
< SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "byte_errs",
< CTLFLAG_RD, &stats->errbc,
< "Byte Errors");
< SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "short_discards",
< CTLFLAG_RD, &stats->mspdc,
< "MAC Short Packets Discarded");
< SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "local_faults",
< CTLFLAG_RD, &stats->mlfc,
< "MAC Local Faults");
< SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "remote_faults",
< CTLFLAG_RD, &stats->mrfc,
< "MAC Remote Faults");
< SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rec_len_errs",
< CTLFLAG_RD, &stats->rlec,
< "Receive Length Errors");
---
> if (adapter->msix_mem != NULL)
> bus_release_resource(dev, SYS_RES_MEMORY, memrid,
> adapter->msix_mem);
4556,4568c3902,3904
< /* Flow Control stats */
< SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_txd",
< CTLFLAG_RD, &stats->lxontxc,
< "Link XON Transmitted");
< SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_recvd",
< CTLFLAG_RD, &stats->lxonrxc,
< "Link XON Received");
< SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_txd",
< CTLFLAG_RD, &stats->lxofftxc,
< "Link XOFF Transmitted");
< SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_recvd",
< CTLFLAG_RD, &stats->lxoffrxc,
< "Link XOFF Received");
---
> if (adapter->pci_mem != NULL)
> bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0),
> adapter->pci_mem);
4570,4627c3906,3907
< /* Packet Reception Stats */
< SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_octets_rcvd",
< CTLFLAG_RD, &stats->tor,
< "Total Octets Received");
< SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd",
< CTLFLAG_RD, &stats->gorc,
< "Good Octets Received");
< SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_rcvd",
< CTLFLAG_RD, &stats->tpr,
< "Total Packets Received");
< SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd",
< CTLFLAG_RD, &stats->gprc,
< "Good Packets Received");
< SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd",
< CTLFLAG_RD, &stats->mprc,
< "Multicast Packets Received");
< SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_rcvd",
< CTLFLAG_RD, &stats->bprc,
< "Broadcast Packets Received");
< SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_64",
< CTLFLAG_RD, &stats->prc64,
< "64 byte frames received ");
< SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127",
< CTLFLAG_RD, &stats->prc127,
< "65-127 byte frames received");
< SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255",
< CTLFLAG_RD, &stats->prc255,
< "128-255 byte frames received");
< SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511",
< CTLFLAG_RD, &stats->prc511,
< "256-511 byte frames received");
< SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023",
< CTLFLAG_RD, &stats->prc1023,
< "512-1023 byte frames received");
< SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522",
< CTLFLAG_RD, &stats->prc1522,
< "1023-1522 byte frames received");
< SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_undersized",
< CTLFLAG_RD, &stats->ruc,
< "Receive Undersized");
< SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_fragmented",
< CTLFLAG_RD, &stats->rfc,
< "Fragmented Packets Received ");
< SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_oversized",
< CTLFLAG_RD, &stats->roc,
< "Oversized Packets Received");
< SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_jabberd",
< CTLFLAG_RD, &stats->rjc,
< "Received Jabber");
< SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_rcvd",
< CTLFLAG_RD, &stats->mngprc,
< "Management Packets Received");
< SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_drpd",
< CTLFLAG_RD, &stats->mngptc,
< "Management Packets Dropped");
< SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "checksum_errs",
< CTLFLAG_RD, &stats->xec,
< "Checksum Errors");
---
> return;
> } /* ixgbe_free_pci_resources */
4629,4667c3909,3911
< /* Packet Transmission Stats */
< SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
< CTLFLAG_RD, &stats->gotc,
< "Good Octets Transmitted");
< SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd",
< CTLFLAG_RD, &stats->tpt,
< "Total Packets Transmitted");
< SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
< CTLFLAG_RD, &stats->gptc,
< "Good Packets Transmitted");
< SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd",
< CTLFLAG_RD, &stats->bptc,
< "Broadcast Packets Transmitted");
< SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd",
< CTLFLAG_RD, &stats->mptc,
< "Multicast Packets Transmitted");
< SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_txd",
< CTLFLAG_RD, &stats->mngptc,
< "Management Packets Transmitted");
< SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_64",
< CTLFLAG_RD, &stats->ptc64,
< "64 byte frames transmitted ");
< SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127",
< CTLFLAG_RD, &stats->ptc127,
< "65-127 byte frames transmitted");
< SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255",
< CTLFLAG_RD, &stats->ptc255,
< "128-255 byte frames transmitted");
< SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511",
< CTLFLAG_RD, &stats->ptc511,
< "256-511 byte frames transmitted");
< SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023",
< CTLFLAG_RD, &stats->ptc1023,
< "512-1023 byte frames transmitted");
< SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522",
< CTLFLAG_RD, &stats->ptc1522,
< "1024-1522 byte frames transmitted");
< }
<
---
> /************************************************************************
> * ixgbe_set_sysctl_value
> ************************************************************************/
4676c3920
< }
---
> } /* ixgbe_set_sysctl_value */
4678,4685c3922,3926
< /*
< ** Set flow control using sysctl:
< ** Flow control values:
< ** 0 - off
< ** 1 - rx pause
< ** 2 - tx pause
< ** 3 - full
< */
---
> /************************************************************************
> * ixgbe_sysctl_flowcntl
> *
> * SYSCTL wrapper around setting Flow Control
> ************************************************************************/
4689d3929
< int error, fc;
4690a3931
> int error, fc;
4692,4693c3933,3934
< adapter = (struct adapter *) arg1;
< fc = adapter->fc;
---
> adapter = (struct adapter *)arg1;
> fc = adapter->hw.fc.current_mode;
4700c3941
< if (adapter->fc == fc)
---
> if (fc == adapter->hw.fc.current_mode)
4704c3945
< }
---
> } /* ixgbe_sysctl_flowcntl */
4706c3947,3955
<
---
> /************************************************************************
> * ixgbe_set_flowcntl - Set flow control
> *
> * Flow control values:
> * 0 - off
> * 1 - rx pause
> * 2 - tx pause
> * 3 - full
> ************************************************************************/
4710d3958
<
4715c3963
< adapter->hw.fc.requested_mode = adapter->fc;
---
> adapter->hw.fc.requested_mode = fc;
4727c3975
< adapter->fc = fc;
---
>
4730a3979
>
4732c3981
< }
---
> } /* ixgbe_set_flowcntl */
4734,4740c3983,4042
< /*
< ** Control advertised link speed:
< ** Flags:
< ** 0x1 - advertise 100 Mb
< ** 0x2 - advertise 1G
< ** 0x4 - advertise 10G
< */
---
> /************************************************************************
> * ixgbe_enable_rx_drop
> *
> * Enable the hardware to drop packets when the buffer is
> * full. This is useful with multiqueue, so that no single
> * queue being full stalls the entire RX engine. We only
> * enable this when Multiqueue is enabled AND Flow Control
> * is disabled.
> ************************************************************************/
> static void
> ixgbe_enable_rx_drop(struct adapter *adapter)
> {
> struct ixgbe_hw *hw = &adapter->hw;
> struct rx_ring *rxr;
> u32 srrctl;
>
> for (int i = 0; i < adapter->num_queues; i++) {
> rxr = &adapter->rx_rings[i];
> srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
> srrctl |= IXGBE_SRRCTL_DROP_EN;
> IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
> }
>
> /* enable drop for each vf */
> for (int i = 0; i < adapter->num_vfs; i++) {
> IXGBE_WRITE_REG(hw, IXGBE_QDE,
> (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT) |
> IXGBE_QDE_ENABLE));
> }
> } /* ixgbe_enable_rx_drop */
>
> /************************************************************************
> * ixgbe_disable_rx_drop
> ************************************************************************/
> static void
> ixgbe_disable_rx_drop(struct adapter *adapter)
> {
> struct ixgbe_hw *hw = &adapter->hw;
> struct rx_ring *rxr;
> u32 srrctl;
>
> for (int i = 0; i < adapter->num_queues; i++) {
> rxr = &adapter->rx_rings[i];
> srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
> srrctl &= ~IXGBE_SRRCTL_DROP_EN;
> IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
> }
>
> /* disable drop for each vf */
> for (int i = 0; i < adapter->num_vfs; i++) {
> IXGBE_WRITE_REG(hw, IXGBE_QDE,
> (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT)));
> }
> } /* ixgbe_disable_rx_drop */
>
> /************************************************************************
> * ixgbe_sysctl_advertise
> *
> * SYSCTL wrapper around setting advertised speed
> ************************************************************************/
4744d4045
< int error, advertise;
4745a4047
> int error, advertise;
4747c4049
< adapter = (struct adapter *) arg1;
---
> adapter = (struct adapter *)arg1;
4755c4057
< }
---
> } /* ixgbe_sysctl_advertise */
4756a4059,4067
> /************************************************************************
> * ixgbe_set_advertise - Control advertised link speed
> *
> * Flags:
> * 0x1 - advertise 100 Mb
> * 0x2 - advertise 1G
> * 0x4 - advertise 10G
> * 0x8 - advertise 10 Mb (yes, Mb)
> ************************************************************************/
4760,4762c4071,4076
< device_t dev;
< struct ixgbe_hw *hw;
< ixgbe_link_speed speed;
---
> device_t dev;
> struct ixgbe_hw *hw;
> ixgbe_link_speed speed = 0;
> ixgbe_link_speed link_caps = 0;
> s32 err = IXGBE_NOT_IMPLEMENTED;
> bool negotiate = FALSE;
4768d4081
< hw = &adapter->hw;
4769a4083
> hw = &adapter->hw;
4776,4779c4090,4091
< (hw->phy.multispeed_fiber))) {
< device_printf(dev,
< "Advertised speed can only be set on copper or "
< "multispeed fiber media types.\n");
---
> (hw->phy.multispeed_fiber))) {
> device_printf(dev, "Advertised speed can only be set on copper or multispeed fiber media types.\n");
4783,4785c4095,4096
< if (advertise < 0x1 || advertise > 0x7) {
< device_printf(dev,
< "Invalid advertised speed; valid modes are 0x1 through 0x7\n");
---
> if (advertise < 0x1 || advertise > 0xF) {
> device_printf(dev, "Invalid advertised speed; valid modes are 0x1 through 0xF\n");
4789,4793c4100,4106
< if ((advertise & 0x1)
< && (hw->mac.type != ixgbe_mac_X540)
< && (hw->mac.type != ixgbe_mac_X550)) {
< device_printf(dev, "Set Advertise: 100Mb on X540/X550 only\n");
< return (EINVAL);
---
> if (hw->mac.ops.get_link_capabilities) {
> err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
> &negotiate);
> if (err != IXGBE_SUCCESS) {
> device_printf(dev, "Unable to determine supported advertise speeds\n");
> return (ENODEV);
> }
4797,4798c4110,4114
< speed = 0;
< if (advertise & 0x1)
---
> if (advertise & 0x1) {
> if (!(link_caps & IXGBE_LINK_SPEED_100_FULL)) {
> device_printf(dev, "Interface does not support 100Mb advertised speed\n");
> return (EINVAL);
> }
4800c4116,4121
< if (advertise & 0x2)
---
> }
> if (advertise & 0x2) {
> if (!(link_caps & IXGBE_LINK_SPEED_1GB_FULL)) {
> device_printf(dev, "Interface does not support 1Gb advertised speed\n");
> return (EINVAL);
> }
4802c4123,4128
< if (advertise & 0x4)
---
> }
> if (advertise & 0x4) {
> if (!(link_caps & IXGBE_LINK_SPEED_10GB_FULL)) {
> device_printf(dev, "Interface does not support 10Gb advertised speed\n");
> return (EINVAL);
> }
4804c4130,4137
< adapter->advertise = advertise;
---
> }
> if (advertise & 0x8) {
> if (!(link_caps & IXGBE_LINK_SPEED_10_FULL)) {
> device_printf(dev, "Interface does not support 10Mb advertised speed\n");
> return (EINVAL);
> }
> speed |= IXGBE_LINK_SPEED_10_FULL;
> }
4807a4141
> adapter->advertise = advertise;
4810c4144
< }
---
> } /* ixgbe_set_advertise */
4812,4815c4146,4155
< /*
< * The following two sysctls are for X552/X557-AT devices;
< * they deal with the external PHY used in them.
< */
---
> /************************************************************************
> * ixgbe_get_advertise - Get current advertised speed settings
> *
> * Formatted for sysctl usage.
> * Flags:
> * 0x1 - advertise 100 Mb
> * 0x2 - advertise 1G
> * 0x4 - advertise 10G
> * 0x8 - advertise 10 Mb (yes, Mb)
> ************************************************************************/
4817c4157
< ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS)
---
> ixgbe_get_advertise(struct adapter *adapter)
4819,4821c4159,4163
< struct adapter *adapter = (struct adapter *) arg1;
< struct ixgbe_hw *hw = &adapter->hw;
< u16 reg;
---
> struct ixgbe_hw *hw = &adapter->hw;
> int speed;
> ixgbe_link_speed link_caps = 0;
> s32 err;
> bool negotiate = FALSE;
4823,4827c4165,4171
< if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
< device_printf(adapter->dev,
< "Device has no supported external thermal sensor.\n");
< return (ENODEV);
< }
---
> /*
> * Advertised speed means nothing unless it's copper or
> * multi-speed fiber
> */
> if (!(hw->phy.media_type == ixgbe_media_type_copper) &&
> !(hw->phy.multispeed_fiber))
> return (0);
4829,4835c4173,4175
< if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP,
< IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
< &reg)) {
< device_printf(adapter->dev,
< "Error reading from PHY's current temperature register\n");
< return (EAGAIN);
< }
---
> err = hw->mac.ops.get_link_capabilities(hw, &link_caps, &negotiate);
> if (err != IXGBE_SUCCESS)
> return (0);
4837,4838c4177,4181
< /* Shift temp for output */
< reg = reg >> 8;
---
> speed =
> ((link_caps & IXGBE_LINK_SPEED_10GB_FULL) ? 4 : 0) |
> ((link_caps & IXGBE_LINK_SPEED_1GB_FULL) ? 2 : 0) |
> ((link_caps & IXGBE_LINK_SPEED_100_FULL) ? 1 : 0) |
> ((link_caps & IXGBE_LINK_SPEED_10_FULL) ? 8 : 0);
4840,4841c4183,4184
< return (sysctl_handle_int(oidp, NULL, reg, req));
< }
---
> return speed;
> } /* ixgbe_get_advertise */
4843,4847c4186,4196
< /*
< * Reports whether the current PHY temperature is over
< * the overtemp threshold.
< * - This is reported directly from the PHY
< */
---
> /************************************************************************
> * ixgbe_sysctl_dmac - Manage DMA Coalescing
> *
> * Control values:
> * 0/1 - off / on (use default value of 1000)
> *
> * Legal timer values are:
> * 50,100,250,500,1000,2000,5000,10000
> *
> * Turning off interrupt moderation will also turn this off.
> ************************************************************************/
4849,4908d4197
< ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS)
< {
< struct adapter *adapter = (struct adapter *) arg1;
< struct ixgbe_hw *hw = &adapter->hw;
< u16 reg;
<
< if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
< device_printf(adapter->dev,
< "Device has no supported external thermal sensor.\n");
< return (ENODEV);
< }
<
< if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS,
< IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
< &reg)) {
< device_printf(adapter->dev,
< "Error reading from PHY's temperature status register\n");
< return (EAGAIN);
< }
<
< /* Get occurrence bit */
< reg = !!(reg & 0x4000);
< return (sysctl_handle_int(oidp, 0, reg, req));
< }
<
< /*
< ** Thermal Shutdown Trigger (internal MAC)
< ** - Set this to 1 to cause an overtemp event to occur
< */
< static int
< ixgbe_sysctl_thermal_test(SYSCTL_HANDLER_ARGS)
< {
< struct adapter *adapter = (struct adapter *) arg1;
< struct ixgbe_hw *hw = &adapter->hw;
< int error, fire = 0;
<
< error = sysctl_handle_int(oidp, &fire, 0, req);
< if ((error) || (req->newptr == NULL))
< return (error);
<
< if (fire) {
< u32 reg = IXGBE_READ_REG(hw, IXGBE_EICS);
< reg |= IXGBE_EICR_TS;
< IXGBE_WRITE_REG(hw, IXGBE_EICS, reg);
< }
<
< return (0);
< }
<
< /*
< ** Manage DMA Coalescing.
< ** Control values:
< ** 0/1 - off / on (use default value of 1000)
< **
< ** Legal timer values are:
< ** 50,100,250,500,1000,2000,5000,10000
< **
< ** Turning off interrupt moderation will also turn this off.
< */
< static int
4911,4914c4200,4203
< struct adapter *adapter = (struct adapter *) arg1;
< struct ifnet *ifp = adapter->ifp;
< int error;
< u32 newval;
---
> struct adapter *adapter = (struct adapter *)arg1;
> struct ifnet *ifp = adapter->ifp;
> int error;
> u32 newval;
4951c4240
< }
---
> } /* ixgbe_sysctl_dmac */
4954,4960c4243,4251
< /**
< * Sysctl to test power states
< * Values:
< * 0 - set device to D0
< * 3 - set device to D3
< * (none) - get current device power state
< */
---
> /************************************************************************
> * ixgbe_sysctl_power_state
> *
> * Sysctl to test power states
> * Values:
> * 0 - set device to D0
> * 3 - set device to D3
> * (none) - get current device power state
> ************************************************************************/
4964,4966c4255,4257
< struct adapter *adapter = (struct adapter *) arg1;
< device_t dev = adapter->dev;
< int curr_ps, new_ps, error = 0;
---
> struct adapter *adapter = (struct adapter *)arg1;
> device_t dev = adapter->dev;
> int curr_ps, new_ps, error = 0;
4987c4278
< }
---
> } /* ixgbe_sysctl_power_state */
4989,4994c4280,4290
< /*
< * Sysctl to enable/disable the WoL capability, if supported by the adapter.
< * Values:
< * 0 - disabled
< * 1 - enabled
< */
---
>
> /************************************************************************
> * ixgbe_sysctl_wol_enable
> *
> * Sysctl to enable/disable the WoL capability,
> * if supported by the adapter.
> *
> * Values:
> * 0 - disabled
> * 1 - enabled
> ************************************************************************/
4998c4294
< struct adapter *adapter = (struct adapter *) arg1;
---
> struct adapter *adapter = (struct adapter *)arg1;
5000,5001c4296,4297
< int new_wol_enabled;
< int error = 0;
---
> int new_wol_enabled;
> int error = 0;
5017c4313
< }
---
> } /* ixgbe_sysctl_wol_enable */
5019,5129c4315,4316
< /*
< * Sysctl to enable/disable the Energy Efficient Ethernet capability,
< * if supported by the adapter.
< * Values:
< * 0 - disabled
< * 1 - enabled
< */
< static int
< ixgbe_sysctl_eee_enable(SYSCTL_HANDLER_ARGS)
< {
< struct adapter *adapter = (struct adapter *) arg1;
< struct ixgbe_hw *hw = &adapter->hw;
< struct ifnet *ifp = adapter->ifp;
< int new_eee_enabled, error = 0;
<
< new_eee_enabled = adapter->eee_enabled;
< error = sysctl_handle_int(oidp, &new_eee_enabled, 0, req);
< if ((error) || (req->newptr == NULL))
< return (error);
< new_eee_enabled = !!(new_eee_enabled);
< if (new_eee_enabled == adapter->eee_enabled)
< return (0);
<
< if (new_eee_enabled > 0 && !hw->mac.ops.setup_eee)
< return (ENODEV);
< else
< adapter->eee_enabled = new_eee_enabled;
<
< /* Re-initialize hardware if it's already running */
< if (ifp->if_drv_flags & IFF_DRV_RUNNING)
< ixgbe_init(adapter);
<
< return (0);
< }
<
< /*
< * Read-only sysctl indicating whether EEE support was negotiated
< * on the link.
< */
< static int
< ixgbe_sysctl_eee_negotiated(SYSCTL_HANDLER_ARGS)
< {
< struct adapter *adapter = (struct adapter *) arg1;
< struct ixgbe_hw *hw = &adapter->hw;
< bool status;
<
< status = !!(IXGBE_READ_REG(hw, IXGBE_EEE_STAT) & IXGBE_EEE_STAT_NEG);
<
< return (sysctl_handle_int(oidp, 0, status, req));
< }
<
< /*
< * Read-only sysctl indicating whether RX Link is in LPI state.
< */
< static int
< ixgbe_sysctl_eee_rx_lpi_status(SYSCTL_HANDLER_ARGS)
< {
< struct adapter *adapter = (struct adapter *) arg1;
< struct ixgbe_hw *hw = &adapter->hw;
< bool status;
<
< status = !!(IXGBE_READ_REG(hw, IXGBE_EEE_STAT) &
< IXGBE_EEE_RX_LPI_STATUS);
<
< return (sysctl_handle_int(oidp, 0, status, req));
< }
<
< /*
< * Read-only sysctl indicating whether TX Link is in LPI state.
< */
< static int
< ixgbe_sysctl_eee_tx_lpi_status(SYSCTL_HANDLER_ARGS)
< {
< struct adapter *adapter = (struct adapter *) arg1;
< struct ixgbe_hw *hw = &adapter->hw;
< bool status;
<
< status = !!(IXGBE_READ_REG(hw, IXGBE_EEE_STAT) &
< IXGBE_EEE_TX_LPI_STATUS);
<
< return (sysctl_handle_int(oidp, 0, status, req));
< }
<
< /*
< * Read-only sysctl indicating TX Link LPI delay
< */
< static int
< ixgbe_sysctl_eee_tx_lpi_delay(SYSCTL_HANDLER_ARGS)
< {
< struct adapter *adapter = (struct adapter *) arg1;
< struct ixgbe_hw *hw = &adapter->hw;
< u32 reg;
<
< reg = IXGBE_READ_REG(hw, IXGBE_EEE_SU);
<
< return (sysctl_handle_int(oidp, 0, reg >> 26, req));
< }
<
< /*
< * Sysctl to enable/disable the types of packets that the
< * adapter will wake up on upon receipt.
< * WUFC - Wake Up Filter Control
< * Flags:
< * 0x1 - Link Status Change
< * 0x2 - Magic Packet
< * 0x4 - Direct Exact
< * 0x8 - Directed Multicast
< * 0x10 - Broadcast
< * 0x20 - ARP/IPv4 Request Packet
< * 0x40 - Direct IPv4 Packet
< * 0x80 - Direct IPv6 Packet
---
> /************************************************************************
> * ixgbe_sysctl_wufc - Wake Up Filter Control
5131,5133c4318,4331
< * Setting another flag will cause the sysctl to return an
< * error.
< */
---
> * Sysctl to enable/disable the types of packets that the
> * adapter will wake up on upon receipt.
> * Flags:
> * 0x1 - Link Status Change
> * 0x2 - Magic Packet
> * 0x4 - Direct Exact
> * 0x8 - Directed Multicast
> * 0x10 - Broadcast
> * 0x20 - ARP/IPv4 Request Packet
> * 0x40 - Direct IPv4 Packet
> * 0x80 - Direct IPv6 Packet
> *
> * Settings not listed above will cause the sysctl to return an error.
> ************************************************************************/
5137,5139c4335,4337
< struct adapter *adapter = (struct adapter *) arg1;
< int error = 0;
< u32 new_wufc;
---
> struct adapter *adapter = (struct adapter *)arg1;
> int error = 0;
> u32 new_wufc;
5151,5155d4348
< else {
< new_wufc &= 0xff;
< new_wufc |= (0xffffff & adapter->wufc);
< adapter->wufc = new_wufc;
< }
5156a4350,4353
> new_wufc &= 0xff;
> new_wufc |= (0xffffff & adapter->wufc);
> adapter->wufc = new_wufc;
>
5158c4355
< }
---
> } /* ixgbe_sysctl_wufc */
5160a4358,4360
> /************************************************************************
> * ixgbe_sysctl_print_rss_config
> ************************************************************************/
5164c4364
< struct adapter *adapter = (struct adapter *)arg1;
---
> struct adapter *adapter = (struct adapter *)arg1;
5166,5169c4366,4369
< device_t dev = adapter->dev;
< int error = 0, reta_size;
< struct sbuf *buf;
< u32 reg;
---
> device_t dev = adapter->dev;
> struct sbuf *buf;
> int error = 0, reta_size;
> u32 reg;
5181a4382
> case ixgbe_mac_X550EM_a:
5207a4409
>
5209c4411
< }
---
> } /* ixgbe_sysctl_print_rss_config */
5212,5220c4414,4420
< /*
< ** Enable the hardware to drop packets when the buffer is
< ** full. This is useful when multiqueue,so that no single
< ** queue being full stalls the entire RX engine. We only
< ** enable this when Multiqueue AND when Flow Control is
< ** disabled.
< */
< static void
< ixgbe_enable_rx_drop(struct adapter *adapter)
---
> /************************************************************************
> * ixgbe_sysctl_phy_temp - Retrieve temperature of PHY
> *
> * For X552/X557-AT devices using an external PHY
> ************************************************************************/
> static int
> ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS)
5222c4422,4424
< struct ixgbe_hw *hw = &adapter->hw;
---
> struct adapter *adapter = (struct adapter *)arg1;
> struct ixgbe_hw *hw = &adapter->hw;
> u16 reg;
5224,5228c4426,4429
< for (int i = 0; i < adapter->num_queues; i++) {
< struct rx_ring *rxr = &adapter->rx_rings[i];
< u32 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
< srrctl |= IXGBE_SRRCTL_DROP_EN;
< IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
---
> if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
> device_printf(adapter->dev,
> "Device has no supported external thermal sensor.\n");
> return (ENODEV);
5230,5235c4431,4436
< #ifdef PCI_IOV
< /* enable drop for each vf */
< for (int i = 0; i < adapter->num_vfs; i++) {
< IXGBE_WRITE_REG(hw, IXGBE_QDE,
< (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT) |
< IXGBE_QDE_ENABLE));
---
>
> if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP,
> IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &reg)) {
> device_printf(adapter->dev,
> "Error reading from PHY's current temperature register\n");
> return (EAGAIN);
5237,5238d4437
< #endif
< }
5240,5243c4439,4440
< static void
< ixgbe_disable_rx_drop(struct adapter *adapter)
< {
< struct ixgbe_hw *hw = &adapter->hw;
---
> /* Shift temp for output */
> reg = reg >> 8;
5245,5258c4442,4443
< for (int i = 0; i < adapter->num_queues; i++) {
< struct rx_ring *rxr = &adapter->rx_rings[i];
< u32 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
< srrctl &= ~IXGBE_SRRCTL_DROP_EN;
< IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
< }
< #ifdef PCI_IOV
< /* disable drop for each vf */
< for (int i = 0; i < adapter->num_vfs; i++) {
< IXGBE_WRITE_REG(hw, IXGBE_QDE,
< (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT)));
< }
< #endif
< }
---
> return (sysctl_handle_int(oidp, NULL, reg, req));
> } /* ixgbe_sysctl_phy_temp */
5260,5261c4445,4452
< static void
< ixgbe_rearm_queues(struct adapter *adapter, u64 queues)
---
> /************************************************************************
> * ixgbe_sysctl_phy_overtemp_occurred
> *
> * Reports (directly from the PHY) whether the current PHY
> * temperature is over the overtemp threshold.
> ************************************************************************/
> static int
> ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS)
5263c4454,4456
< u32 mask;
---
> struct adapter *adapter = (struct adapter *)arg1;
> struct ixgbe_hw *hw = &adapter->hw;
> u16 reg;
5265,5280c4458,4461
< switch (adapter->hw.mac.type) {
< case ixgbe_mac_82598EB:
< mask = (IXGBE_EIMS_RTX_QUEUE & queues);
< IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
< break;
< case ixgbe_mac_82599EB:
< case ixgbe_mac_X540:
< case ixgbe_mac_X550:
< case ixgbe_mac_X550EM_x:
< mask = (queues & 0xFFFFFFFF);
< IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
< mask = (queues >> 32);
< IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
< break;
< default:
< break;
---
> if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
> device_printf(adapter->dev,
> "Device has no supported external thermal sensor.\n");
> return (ENODEV);
5282d4462
< }
5284,5298c4464,4468
< #ifdef PCI_IOV
<
< /*
< ** Support functions for SRIOV/VF management
< */
<
< static void
< ixgbe_ping_all_vfs(struct adapter *adapter)
< {
< struct ixgbe_vf *vf;
<
< for (int i = 0; i < adapter->num_vfs; i++) {
< vf = &adapter->vfs[i];
< if (vf->flags & IXGBE_VF_ACTIVE)
< ixgbe_send_vf_msg(adapter, vf, IXGBE_PF_CONTROL_MSG);
---
> if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS,
> IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &reg)) {
> device_printf(adapter->dev,
> "Error reading from PHY's temperature status register\n");
> return (EAGAIN);
5300d4469
< }
5301a4471,4472
> /* Get occurrence bit */
> reg = !!(reg & 0x4000);
5303,5305c4474,4487
< static void
< ixgbe_vf_set_default_vlan(struct adapter *adapter, struct ixgbe_vf *vf,
< uint16_t tag)
---
> return (sysctl_handle_int(oidp, 0, reg, req));
> } /* ixgbe_sysctl_phy_overtemp_occurred */
>
> /************************************************************************
> * ixgbe_sysctl_eee_state
> *
> * Sysctl to set EEE power saving feature
> * Values:
> * 0 - disable EEE
> * 1 - enable EEE
> * (none) - get current device EEE state
> ************************************************************************/
> static int
> ixgbe_sysctl_eee_state(SYSCTL_HANDLER_ARGS)
5307,5308c4489,4492
< struct ixgbe_hw *hw;
< uint32_t vmolr, vmvir;
---
> struct adapter *adapter = (struct adapter *)arg1;
> device_t dev = adapter->dev;
> int curr_eee, new_eee, error = 0;
> s32 retval;
5310c4494
< hw = &adapter->hw;
---
> curr_eee = new_eee = !!(adapter->feat_en & IXGBE_FEATURE_EEE);
5312,5314c4496,4498
< vf->vlan_tag = tag;
<
< vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf->pool));
---
> error = sysctl_handle_int(oidp, &new_eee, 0, req);
> if ((error) || (req->newptr == NULL))
> return (error);
5316,5317c4500,4502
< /* Do not receive packets that pass inexact filters. */
< vmolr &= ~(IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_ROPE);
---
> /* Nothing to do */
> if (new_eee == curr_eee)
> return (0);
5319,5320c4504,4506
< /* Disable Multicast Promicuous Mode. */
< vmolr &= ~IXGBE_VMOLR_MPE;
---
> /* Not supported */
> if (!(adapter->feat_cap & IXGBE_FEATURE_EEE))
> return (EINVAL);
5322,5323c4508,4510
< /* Accept broadcasts. */
< vmolr |= IXGBE_VMOLR_BAM;
---
> /* Bounds checking */
> if ((new_eee < 0) || (new_eee > 1))
> return (EINVAL);
5325,5336c4512,4515
< if (tag == 0) {
< /* Accept non-vlan tagged traffic. */
< //vmolr |= IXGBE_VMOLR_AUPE;
<
< /* Allow VM to tag outgoing traffic; no default tag. */
< vmvir = 0;
< } else {
< /* Require vlan-tagged traffic. */
< vmolr &= ~IXGBE_VMOLR_AUPE;
<
< /* Tag all traffic with provided vlan tag. */
< vmvir = (tag | IXGBE_VMVIR_VLANA_DEFAULT);
---
> retval = adapter->hw.mac.ops.setup_eee(&adapter->hw, new_eee);
> if (retval) {
> device_printf(dev, "Error in EEE setup: 0x%08X\n", retval);
> return (EINVAL);
5338,5340d4516
< IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf->pool), vmolr);
< IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf->pool), vmvir);
< }
5341a4518,4519
> /* Restart auto-neg */
> ixgbe_init(adapter);
5343,5345c4521
< static boolean_t
< ixgbe_vf_frame_size_compatible(struct adapter *adapter, struct ixgbe_vf *vf)
< {
---
> device_printf(dev, "New EEE state: %d\n", new_eee);
5347,5353c4523,4527
< /*
< * Frame size compatibility between PF and VF is only a problem on
< * 82599-based cards. X540 and later support any combination of jumbo
< * frames on PFs and VFs.
< */
< if (adapter->hw.mac.type != ixgbe_mac_82599EB)
< return (TRUE);
---
> /* Cache new value */
> if (new_eee)
> adapter->feat_en |= IXGBE_FEATURE_EEE;
> else
> adapter->feat_en &= ~IXGBE_FEATURE_EEE;
5355,5364c4529,4530
< switch (vf->api_ver) {
< case IXGBE_API_VER_1_0:
< case IXGBE_API_VER_UNKNOWN:
< /*
< * On legacy (1.0 and older) VF versions, we don't support jumbo
< * frames on either the PF or the VF.
< */
< if (adapter->max_frame_size > ETHER_MAX_LEN ||
< vf->max_frame_size > ETHER_MAX_LEN)
< return (FALSE);
---
> return (error);
> } /* ixgbe_sysctl_eee_state */
5366c4532,4543
< return (TRUE);
---
> /************************************************************************
> * ixgbe_init_device_features
> ************************************************************************/
> static void
> ixgbe_init_device_features(struct adapter *adapter)
> {
> adapter->feat_cap = IXGBE_FEATURE_NETMAP
> | IXGBE_FEATURE_RSS
> | IXGBE_FEATURE_MSI
> | IXGBE_FEATURE_MSIX
> | IXGBE_FEATURE_LEGACY_IRQ
> | IXGBE_FEATURE_LEGACY_TX;
5367a4545,4549
> /* Set capabilities first... */
> switch (adapter->hw.mac.type) {
> case ixgbe_mac_82598EB:
> if (adapter->hw.device_id == IXGBE_DEV_ID_82598AT)
> adapter->feat_cap |= IXGBE_FEATURE_FAN_FAIL;
5369c4551,4587
< case IXGBE_API_VER_1_1:
---
> case ixgbe_mac_X540:
> adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
> adapter->feat_cap |= IXGBE_FEATURE_FDIR;
> if ((adapter->hw.device_id == IXGBE_DEV_ID_X540_BYPASS) &&
> (adapter->hw.bus.func == 0))
> adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
> break;
> case ixgbe_mac_X550:
> adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
> adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
> adapter->feat_cap |= IXGBE_FEATURE_FDIR;
> break;
> case ixgbe_mac_X550EM_x:
> adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
> adapter->feat_cap |= IXGBE_FEATURE_FDIR;
> if (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_X_KR)
> adapter->feat_cap |= IXGBE_FEATURE_EEE;
> break;
> case ixgbe_mac_X550EM_a:
> adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
> adapter->feat_cap |= IXGBE_FEATURE_FDIR;
> adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
> if ((adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T) ||
> (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)) {
> adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
> adapter->feat_cap |= IXGBE_FEATURE_EEE;
> }
> break;
> case ixgbe_mac_82599EB:
> adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
> adapter->feat_cap |= IXGBE_FEATURE_FDIR;
> if ((adapter->hw.device_id == IXGBE_DEV_ID_82599_BYPASS) &&
> (adapter->hw.bus.func == 0))
> adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
> if (adapter->hw.device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP)
> adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
> break;
5371,5376c4589,4590
< /*
< * 1.1 or later VF versions always work if they aren't using
< * jumbo frames.
< */
< if (vf->max_frame_size <= ETHER_MAX_LEN)
< return (TRUE);
---
> break;
> }
5378,5383c4592,4604
< /*
< * Jumbo frames only work with VFs if the PF is also using jumbo
< * frames.
< */
< if (adapter->max_frame_size <= ETHER_MAX_LEN)
< return (TRUE);
---
> /* Enabled by default... */
> /* Fan failure detection */
> if (adapter->feat_cap & IXGBE_FEATURE_FAN_FAIL)
> adapter->feat_en |= IXGBE_FEATURE_FAN_FAIL;
> /* Netmap */
> if (adapter->feat_cap & IXGBE_FEATURE_NETMAP)
> adapter->feat_en |= IXGBE_FEATURE_NETMAP;
> /* EEE */
> if (adapter->feat_cap & IXGBE_FEATURE_EEE)
> adapter->feat_en |= IXGBE_FEATURE_EEE;
> /* Thermal Sensor */
> if (adapter->feat_cap & IXGBE_FEATURE_TEMP_SENSOR)
> adapter->feat_en |= IXGBE_FEATURE_TEMP_SENSOR;
5385,5386c4606,4612
< return (FALSE);
<
---
> /* Enabled via global sysctl... */
> /* Flow Director */
> if (ixgbe_enable_fdir) {
> if (adapter->feat_cap & IXGBE_FEATURE_FDIR)
> adapter->feat_en |= IXGBE_FEATURE_FDIR;
> else
> device_printf(adapter->dev, "Device does not support Flow Director. Leaving disabled.");
5388c4614,4626
< }
---
> /* Legacy (single queue) transmit */
> if ((adapter->feat_cap & IXGBE_FEATURE_LEGACY_TX) &&
> ixgbe_enable_legacy_tx)
> adapter->feat_en |= IXGBE_FEATURE_LEGACY_TX;
> /*
> * Message Signal Interrupts - Extended (MSI-X)
> * Normal MSI is only enabled if MSI-X calls fail.
> */
> if (!ixgbe_enable_msix)
> adapter->feat_cap &= ~IXGBE_FEATURE_MSIX;
> /* Receive-Side Scaling (RSS) */
> if ((adapter->feat_cap & IXGBE_FEATURE_RSS) && ixgbe_enable_rss)
> adapter->feat_en |= IXGBE_FEATURE_RSS;
5389a4628,4636
> /* Disable features with unmet dependencies... */
> /* No MSI-X */
> if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX)) {
> adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
> adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
> adapter->feat_en &= ~IXGBE_FEATURE_RSS;
> adapter->feat_en &= ~IXGBE_FEATURE_SRIOV;
> }
> } /* ixgbe_init_device_features */
5391,5392c4638,4647
< static void
< ixgbe_process_vf_reset(struct adapter *adapter, struct ixgbe_vf *vf)
---
> /************************************************************************
> * ixgbe_probe - Device identification routine
> *
> * Determines if the driver should be loaded on
> * adapter based on its PCI vendor/device ID.
> *
> * return BUS_PROBE_DEFAULT on success, positive on failure
> ************************************************************************/
> static int
> ixgbe_probe(device_t dev)
5394c4649
< ixgbe_vf_set_default_vlan(adapter, vf, vf->default_vlan);
---
> ixgbe_vendor_info_t *ent;
5396c4651,4655
< // XXX clear multicast addresses
---
> u16 pci_vendor_id = 0;
> u16 pci_device_id = 0;
> u16 pci_subvendor_id = 0;
> u16 pci_subdevice_id = 0;
> char adapter_name[256];
5398c4657
< ixgbe_clear_rar(&adapter->hw, vf->rar_index);
---
> INIT_DEBUGOUT("ixgbe_probe: begin");
5400,5401c4659,4661
< vf->api_ver = IXGBE_API_VER_UNKNOWN;
< }
---
> pci_vendor_id = pci_get_vendor(dev);
> if (pci_vendor_id != IXGBE_INTEL_VENDOR_ID)
> return (ENXIO);
5402a4663,4665
> pci_device_id = pci_get_device(dev);
> pci_subvendor_id = pci_get_subvendor(dev);
> pci_subdevice_id = pci_get_subdevice(dev);
5404,5408c4667,4683
< static void
< ixgbe_vf_enable_transmit(struct adapter *adapter, struct ixgbe_vf *vf)
< {
< struct ixgbe_hw *hw;
< uint32_t vf_index, vfte;
---
> ent = ixgbe_vendor_info_array;
> while (ent->vendor_id != 0) {
> if ((pci_vendor_id == ent->vendor_id) &&
> (pci_device_id == ent->device_id) &&
> ((pci_subvendor_id == ent->subvendor_id) ||
> (ent->subvendor_id == 0)) &&
> ((pci_subdevice_id == ent->subdevice_id) ||
> (ent->subdevice_id == 0))) {
> sprintf(adapter_name, "%s, Version - %s",
> ixgbe_strings[ent->index],
> ixgbe_driver_version);
> device_set_desc_copy(dev, adapter_name);
> ++ixgbe_total_ports;
> return (BUS_PROBE_DEFAULT);
> }
> ent++;
> }
5410c4685,4686
< hw = &adapter->hw;
---
> return (ENXIO);
> } /* ixgbe_probe */
5412,5416d4687
< vf_index = IXGBE_VF_INDEX(vf->pool);
< vfte = IXGBE_READ_REG(hw, IXGBE_VFTE(vf_index));
< vfte |= IXGBE_VF_BIT(vf->pool);
< IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_index), vfte);
< }
5418,5420c4689,4697
<
< static void
< ixgbe_vf_enable_receive(struct adapter *adapter, struct ixgbe_vf *vf)
---
> /************************************************************************
> * ixgbe_ioctl - Ioctl entry point
> *
> * Called when the user wants to configure the interface.
> *
> * return 0 on success, positive on failure
> ************************************************************************/
> static int
> ixgbe_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
5422,5423c4699,4705
< struct ixgbe_hw *hw;
< uint32_t vf_index, vfre;
---
> struct adapter *adapter = ifp->if_softc;
> struct ifreq *ifr = (struct ifreq *) data;
> #if defined(INET) || defined(INET6)
> struct ifaddr *ifa = (struct ifaddr *)data;
> #endif
> int error = 0;
> bool avoid_reset = FALSE;
5425,5434c4707,4781
< hw = &adapter->hw;
<
< vf_index = IXGBE_VF_INDEX(vf->pool);
< vfre = IXGBE_READ_REG(hw, IXGBE_VFRE(vf_index));
< if (ixgbe_vf_frame_size_compatible(adapter, vf))
< vfre |= IXGBE_VF_BIT(vf->pool);
< else
< vfre &= ~IXGBE_VF_BIT(vf->pool);
< IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_index), vfre);
< }
---
> switch (command) {
> case SIOCSIFADDR:
> #ifdef INET
> if (ifa->ifa_addr->sa_family == AF_INET)
> avoid_reset = TRUE;
> #endif
> #ifdef INET6
> if (ifa->ifa_addr->sa_family == AF_INET6)
> avoid_reset = TRUE;
> #endif
> /*
> * Calling init results in link renegotiation,
> * so we avoid doing it when possible.
> */
> if (avoid_reset) {
> ifp->if_flags |= IFF_UP;
> if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
> ixgbe_init(adapter);
> #ifdef INET
> if (!(ifp->if_flags & IFF_NOARP))
> arp_ifinit(ifp, ifa);
> #endif
> } else
> error = ether_ioctl(ifp, command, data);
> break;
> case SIOCSIFMTU:
> IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
> if (ifr->ifr_mtu > IXGBE_MAX_MTU) {
> error = EINVAL;
> } else {
> IXGBE_CORE_LOCK(adapter);
> ifp->if_mtu = ifr->ifr_mtu;
> adapter->max_frame_size = ifp->if_mtu + IXGBE_MTU_HDR;
> if (ifp->if_drv_flags & IFF_DRV_RUNNING)
> ixgbe_init_locked(adapter);
> ixgbe_recalculate_max_frame(adapter);
> IXGBE_CORE_UNLOCK(adapter);
> }
> break;
> case SIOCSIFFLAGS:
> IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
> IXGBE_CORE_LOCK(adapter);
> if (ifp->if_flags & IFF_UP) {
> if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
> if ((ifp->if_flags ^ adapter->if_flags) &
> (IFF_PROMISC | IFF_ALLMULTI)) {
> ixgbe_set_promisc(adapter);
> }
> } else
> ixgbe_init_locked(adapter);
> } else
> if (ifp->if_drv_flags & IFF_DRV_RUNNING)
> ixgbe_stop(adapter);
> adapter->if_flags = ifp->if_flags;
> IXGBE_CORE_UNLOCK(adapter);
> break;
> case SIOCADDMULTI:
> case SIOCDELMULTI:
> IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
> if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
> IXGBE_CORE_LOCK(adapter);
> ixgbe_disable_intr(adapter);
> ixgbe_set_multi(adapter);
> ixgbe_enable_intr(adapter);
> IXGBE_CORE_UNLOCK(adapter);
> }
> break;
> case SIOCSIFMEDIA:
> case SIOCGIFMEDIA:
> IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
> error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
> break;
> case SIOCSIFCAP:
> {
> IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
5435a4783
> int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
5437,5442c4785,4786
< static void
< ixgbe_vf_reset_msg(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
< {
< struct ixgbe_hw *hw;
< uint32_t ack;
< uint32_t resp[IXGBE_VF_PERMADDR_MSG_LEN];
---
> if (!mask)
> break;
5444c4788,4808
< hw = &adapter->hw;
---
> /* HW cannot turn these on/off separately */
> if (mask & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6)) {
> ifp->if_capenable ^= IFCAP_RXCSUM;
> ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
> }
> if (mask & IFCAP_TXCSUM)
> ifp->if_capenable ^= IFCAP_TXCSUM;
> if (mask & IFCAP_TXCSUM_IPV6)
> ifp->if_capenable ^= IFCAP_TXCSUM_IPV6;
> if (mask & IFCAP_TSO4)
> ifp->if_capenable ^= IFCAP_TSO4;
> if (mask & IFCAP_TSO6)
> ifp->if_capenable ^= IFCAP_TSO6;
> if (mask & IFCAP_LRO)
> ifp->if_capenable ^= IFCAP_LRO;
> if (mask & IFCAP_VLAN_HWTAGGING)
> ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
> if (mask & IFCAP_VLAN_HWFILTER)
> ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
> if (mask & IFCAP_VLAN_HWTSO)
> ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
5446c4810,4823
< ixgbe_process_vf_reset(adapter, vf);
---
> if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
> IXGBE_CORE_LOCK(adapter);
> ixgbe_init_locked(adapter);
> IXGBE_CORE_UNLOCK(adapter);
> }
> VLAN_CAPABILITIES(ifp);
> break;
> }
> #if __FreeBSD_version >= 1100036
> case SIOCGI2C:
> {
> struct ixgbe_hw *hw = &adapter->hw;
> struct ifi2creq i2c;
> int i;
5448,5453c4825,4836
< if (ixgbe_validate_mac_addr(vf->ether_addr) == 0) {
< ixgbe_set_rar(&adapter->hw, vf->rar_index,
< vf->ether_addr, vf->pool, TRUE);
< ack = IXGBE_VT_MSGTYPE_ACK;
< } else
< ack = IXGBE_VT_MSGTYPE_NACK;
---
> IOCTL_DEBUGOUT("ioctl: SIOCGI2C (Get I2C Data)");
> error = copyin(ifr->ifr_data, &i2c, sizeof(i2c));
> if (error != 0)
> break;
> if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) {
> error = EINVAL;
> break;
> }
> if (i2c.len > sizeof(i2c.data)) {
> error = EINVAL;
> break;
> }
5455,5477c4838,4842
< ixgbe_vf_enable_transmit(adapter, vf);
< ixgbe_vf_enable_receive(adapter, vf);
<
< vf->flags |= IXGBE_VF_CTS;
<
< resp[0] = IXGBE_VF_RESET | ack | IXGBE_VT_MSGTYPE_CTS;
< bcopy(vf->ether_addr, &resp[1], ETHER_ADDR_LEN);
< resp[3] = hw->mac.mc_filter_type;
< ixgbe_write_mbx(hw, resp, IXGBE_VF_PERMADDR_MSG_LEN, vf->pool);
< }
<
<
< static void
< ixgbe_vf_set_mac(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
< {
< uint8_t *mac;
<
< mac = (uint8_t*)&msg[1];
<
< /* Check that the VF has permission to change the MAC address. */
< if (!(vf->flags & IXGBE_VF_CAP_MAC) && ixgbe_vf_mac_changed(vf, mac)) {
< ixgbe_send_vf_nack(adapter, vf, msg[0]);
< return;
---
> for (i = 0; i < i2c.len; i++)
> hw->phy.ops.read_i2c_byte(hw, i2c.offset + i,
> i2c.dev_addr, &i2c.data[i]);
> error = copyout(&i2c, ifr->ifr_data, sizeof(i2c));
> break;
5479,5482c4844,4848
<
< if (ixgbe_validate_mac_addr(mac) != 0) {
< ixgbe_send_vf_nack(adapter, vf, msg[0]);
< return;
---
> #endif
> default:
> IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)\n", (int)command);
> error = ether_ioctl(ifp, command, data);
> break;
5485c4851,4852
< bcopy(mac, vf->ether_addr, ETHER_ADDR_LEN);
---
> return (error);
> } /* ixgbe_ioctl */
5487,5497c4854,4856
< ixgbe_set_rar(&adapter->hw, vf->rar_index, vf->ether_addr,
< vf->pool, TRUE);
<
< ixgbe_send_vf_ack(adapter, vf, msg[0]);
< }
<
<
< /*
< ** VF multicast addresses are set by using the appropriate bit in
< ** 1 of 128 32 bit addresses (4096 possible).
< */
---
> /************************************************************************
> * ixgbe_check_fan_failure
> ************************************************************************/
5499c4858
< ixgbe_vf_set_mc_addr(struct adapter *adapter, struct ixgbe_vf *vf, u32 *msg)
---
> ixgbe_check_fan_failure(struct adapter *adapter, u32 reg, bool in_interrupt)
5501,5503c4860
< u16 *list = (u16*)&msg[1];
< int entries;
< u32 vmolr, vec_bit, vec_reg, mta_reg;
---
> u32 mask;
5505,5506c4862,4863
< entries = (msg[0] & IXGBE_VT_MSGINFO_MASK) >> IXGBE_VT_MSGINFO_SHIFT;
< entries = min(entries, IXGBE_MAX_VF_MC);
---
> mask = (in_interrupt) ? IXGBE_EICR_GPI_SDP1_BY_MAC(&adapter->hw) :
> IXGBE_ESDP_SDP1;
5508c4865,4867
< vmolr = IXGBE_READ_REG(&adapter->hw, IXGBE_VMOLR(vf->pool));
---
> if (reg & mask)
> device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n");
> } /* ixgbe_check_fan_failure */
5510,5528c4869,4871
< vf->num_mc_hashes = entries;
<
< /* Set the appropriate MTA bit */
< for (int i = 0; i < entries; i++) {
< vf->mc_hash[i] = list[i];
< vec_reg = (vf->mc_hash[i] >> 5) & 0x7F;
< vec_bit = vf->mc_hash[i] & 0x1F;
< mta_reg = IXGBE_READ_REG(&adapter->hw, IXGBE_MTA(vec_reg));
< mta_reg |= (1 << vec_bit);
< IXGBE_WRITE_REG(&adapter->hw, IXGBE_MTA(vec_reg), mta_reg);
< }
<
< vmolr |= IXGBE_VMOLR_ROMPE;
< IXGBE_WRITE_REG(&adapter->hw, IXGBE_VMOLR(vf->pool), vmolr);
< ixgbe_send_vf_ack(adapter, vf, msg[0]);
< return;
< }
<
<
---
> /************************************************************************
> * ixgbe_handle_que
> ************************************************************************/
5530c4873
< ixgbe_vf_set_vlan(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
---
> ixgbe_handle_que(void *context, int pending)
5532,5534c4875,4878
< struct ixgbe_hw *hw;
< int enable;
< uint16_t tag;
---
> struct ix_queue *que = context;
> struct adapter *adapter = que->adapter;
> struct tx_ring *txr = que->txr;
> struct ifnet *ifp = adapter->ifp;
5536,5542c4880,4886
< hw = &adapter->hw;
< enable = IXGBE_VT_MSGINFO(msg[0]);
< tag = msg[1] & IXGBE_VLVF_VLANID_MASK;
<
< if (!(vf->flags & IXGBE_VF_CAP_VLAN)) {
< ixgbe_send_vf_nack(adapter, vf, msg[0]);
< return;
---
> if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
> ixgbe_rxeof(que);
> IXGBE_TX_LOCK(txr);
> ixgbe_txeof(txr);
> if (!ixgbe_ring_empty(ifp, txr->br))
> ixgbe_start_locked(ifp, txr);
> IXGBE_TX_UNLOCK(txr);
5545,5553c4889,4893
< /* It is illegal to enable vlan tag 0. */
< if (tag == 0 && enable != 0){
< ixgbe_send_vf_nack(adapter, vf, msg[0]);
< return;
< }
<
< ixgbe_set_vfta(hw, tag, vf->pool, enable);
< ixgbe_send_vf_ack(adapter, vf, msg[0]);
< }
---
> /* Re-enable this interrupt */
> if (que->res != NULL)
> ixgbe_enable_queue(adapter, que->msix);
> else
> ixgbe_enable_intr(adapter);
5554a4895,4896
> return;
> } /* ixgbe_handle_que */
5556,5560d4897
< static void
< ixgbe_vf_set_lpe(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
< {
< struct ixgbe_hw *hw;
< uint32_t vf_max_size, pf_max_size, mhadd;
5562,5563d4898
< hw = &adapter->hw;
< vf_max_size = msg[1];
5565,5569c4900,4909
< if (vf_max_size < ETHER_CRC_LEN) {
< /* We intentionally ACK invalid LPE requests. */
< ixgbe_send_vf_ack(adapter, vf, msg[0]);
< return;
< }
---
> /************************************************************************
> * ixgbe_allocate_legacy - Setup the Legacy or MSI Interrupt handler
> ************************************************************************/
> static int
> ixgbe_allocate_legacy(struct adapter *adapter)
> {
> device_t dev = adapter->dev;
> struct ix_queue *que = adapter->queues;
> struct tx_ring *txr = adapter->tx_rings;
> int error;
5571,5576c4911,4917
< vf_max_size -= ETHER_CRC_LEN;
<
< if (vf_max_size > IXGBE_MAX_FRAME_SIZE) {
< /* We intentionally ACK invalid LPE requests. */
< ixgbe_send_vf_ack(adapter, vf, msg[0]);
< return;
---
> /* We allocate a single interrupt resource */
> adapter->res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
> &adapter->link_rid, RF_SHAREABLE | RF_ACTIVE);
> if (adapter->res == NULL) {
> device_printf(dev,
> "Unable to allocate bus resource: interrupt\n");
> return (ENXIO);
5579,5581d4919
< vf->max_frame_size = vf_max_size;
< ixgbe_update_max_frame(adapter, vf->max_frame_size);
<
5583,5584c4921,4922
< * We might have to disable reception to this VF if the frame size is
< * not compatible with the config on the PF.
---
> * Try allocating a fast interrupt and the associated deferred
> * processing contexts.
5586c4924,4930
< ixgbe_vf_enable_receive(adapter, vf);
---
> if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
> TASK_INIT(&txr->txq_task, 0, ixgbe_deferred_mq_start, txr);
> TASK_INIT(&que->que_task, 0, ixgbe_handle_que, que);
> que->tq = taskqueue_create_fast("ixgbe_que", M_NOWAIT,
> taskqueue_thread_enqueue, &que->tq);
> taskqueue_start_threads(&que->tq, 1, PI_NET, "%s ixq",
> device_get_nameunit(adapter->dev));
5588,5589c4932,4942
< mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
< pf_max_size = (mhadd & IXGBE_MHADD_MFS_MASK) >> IXGBE_MHADD_MFS_SHIFT;
---
> /* Tasklets for Link, SFP and Multispeed Fiber */
> TASK_INIT(&adapter->link_task, 0, ixgbe_handle_link, adapter);
> TASK_INIT(&adapter->mod_task, 0, ixgbe_handle_mod, adapter);
> TASK_INIT(&adapter->msf_task, 0, ixgbe_handle_msf, adapter);
> TASK_INIT(&adapter->phy_task, 0, ixgbe_handle_phy, adapter);
> if (adapter->feat_en & IXGBE_FEATURE_FDIR)
> TASK_INIT(&adapter->fdir_task, 0, ixgbe_reinit_fdir, adapter);
> adapter->tq = taskqueue_create_fast("ixgbe_link", M_NOWAIT,
> taskqueue_thread_enqueue, &adapter->tq);
> taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s linkq",
> device_get_nameunit(adapter->dev));
5591,5594c4944,4954
< if (pf_max_size < adapter->max_frame_size) {
< mhadd &= ~IXGBE_MHADD_MFS_MASK;
< mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
< IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
---
> if ((error = bus_setup_intr(dev, adapter->res,
> INTR_TYPE_NET | INTR_MPSAFE, NULL, ixgbe_legacy_irq, que,
> &adapter->tag)) != 0) {
> device_printf(dev,
> "Failed to register fast interrupt handler: %d\n", error);
> taskqueue_free(que->tq);
> taskqueue_free(adapter->tq);
> que->tq = NULL;
> adapter->tq = NULL;
>
> return (error);
5595a4956,4957
> /* For simplicity in the handlers */
> adapter->active_queues = IXGBE_EIMS_ENABLE_MASK;
5597,5598c4959,4960
< ixgbe_send_vf_ack(adapter, vf, msg[0]);
< }
---
> return (0);
> } /* ixgbe_allocate_legacy */
5601,5603c4963,4967
< static void
< ixgbe_vf_set_macvlan(struct adapter *adapter, struct ixgbe_vf *vf,
< uint32_t *msg)
---
> /************************************************************************
> * ixgbe_allocate_msix - Setup MSI-X Interrupt resources and handlers
> ************************************************************************/
> static int
> ixgbe_allocate_msix(struct adapter *adapter)
5605,5607c4969,4975
< //XXX implement this
< ixgbe_send_vf_nack(adapter, vf, msg[0]);
< }
---
> device_t dev = adapter->dev;
> struct ix_queue *que = adapter->queues;
> struct tx_ring *txr = adapter->tx_rings;
> int error, rid, vector = 0;
> int cpu_id = 0;
> unsigned int rss_buckets = 0;
> cpuset_t cpu_mask;
5609,5624c4977,4992
<
< static void
< ixgbe_vf_api_negotiate(struct adapter *adapter, struct ixgbe_vf *vf,
< uint32_t *msg)
< {
<
< switch (msg[1]) {
< case IXGBE_API_VER_1_0:
< case IXGBE_API_VER_1_1:
< vf->api_ver = msg[1];
< ixgbe_send_vf_ack(adapter, vf, msg[0]);
< break;
< default:
< vf->api_ver = IXGBE_API_VER_UNKNOWN;
< ixgbe_send_vf_nack(adapter, vf, msg[0]);
< break;
---
> /*
> * If we're doing RSS, the number of queues needs to
> * match the number of RSS buckets that are configured.
> *
> * + If there's more queues than RSS buckets, we'll end
> * up with queues that get no traffic.
> *
> * + If there's more RSS buckets than queues, we'll end
> * up having multiple RSS buckets map to the same queue,
> * so there'll be some contention.
> */
> rss_buckets = rss_getnumbuckets();
> if ((adapter->feat_en & IXGBE_FEATURE_RSS) &&
> (adapter->num_queues != rss_buckets)) {
> device_printf(dev, "%s: number of queues (%d) != number of RSS buckets (%d); performance will be impacted.\n",
> __func__, adapter->num_queues, rss_buckets);
5626d4993
< }
5627a4995,5017
> for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
> rid = vector + 1;
> que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
> RF_SHAREABLE | RF_ACTIVE);
> if (que->res == NULL) {
> device_printf(dev, "Unable to allocate bus resource: que interrupt [%d]\n",
> vector);
> return (ENXIO);
> }
> /* Set the handler function */
> error = bus_setup_intr(dev, que->res,
> INTR_TYPE_NET | INTR_MPSAFE, NULL, ixgbe_msix_que, que,
> &que->tag);
> if (error) {
> que->res = NULL;
> device_printf(dev, "Failed to register QUE handler");
> return (error);
> }
> #if __FreeBSD_version >= 800504
> bus_describe_intr(dev, que->res, que->tag, "q%d", i);
> #endif
> que->msix = vector;
> adapter->active_queues |= (u64)(1 << que->msix);
5629,5635c5019,5047
< static void
< ixgbe_vf_get_queues(struct adapter *adapter, struct ixgbe_vf *vf,
< uint32_t *msg)
< {
< struct ixgbe_hw *hw;
< uint32_t resp[IXGBE_VF_GET_QUEUES_RESP_LEN];
< int num_queues;
---
> if (adapter->feat_en & IXGBE_FEATURE_RSS) {
> /*
> * The queue ID is used as the RSS layer bucket ID.
> * We look up the queue ID -> RSS CPU ID and select
> * that.
> */
> cpu_id = rss_getcpu(i % rss_buckets);
> CPU_SETOF(cpu_id, &cpu_mask);
> } else {
> /*
> * Bind the MSI-X vector, and thus the
> * rings to the corresponding CPU.
> *
> * This just happens to match the default RSS
> * round-robin bucket -> queue -> CPU allocation.
> */
> if (adapter->num_queues > 1)
> cpu_id = i;
> }
> if (adapter->num_queues > 1)
> bus_bind_intr(dev, que->res, cpu_id);
> #ifdef IXGBE_DEBUG
> if (adapter->feat_en & IXGBE_FEATURE_RSS)
> device_printf(dev, "Bound RSS bucket %d to CPU %d\n", i,
> cpu_id);
> else
> device_printf(dev, "Bound queue %d to cpu %d\n", i,
> cpu_id);
> #endif /* IXGBE_DEBUG */
5637d5048
< hw = &adapter->hw;
5639,5644c5050,5068
< /* GET_QUEUES is not supported on pre-1.1 APIs. */
< switch (msg[0]) {
< case IXGBE_API_VER_1_0:
< case IXGBE_API_VER_UNKNOWN:
< ixgbe_send_vf_nack(adapter, vf, msg[0]);
< return;
---
> if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
> TASK_INIT(&txr->txq_task, 0, ixgbe_deferred_mq_start,
> txr);
> TASK_INIT(&que->que_task, 0, ixgbe_handle_que, que);
> que->tq = taskqueue_create_fast("ixgbe_que", M_NOWAIT,
> taskqueue_thread_enqueue, &que->tq);
> #if __FreeBSD_version < 1100000
> taskqueue_start_threads(&que->tq, 1, PI_NET, "%s:q%d",
> device_get_nameunit(adapter->dev), i);
> #else
> if (adapter->feat_en & IXGBE_FEATURE_RSS)
> taskqueue_start_threads_cpuset(&que->tq, 1, PI_NET,
> &cpu_mask, "%s (bucket %d)",
> device_get_nameunit(adapter->dev), cpu_id);
> else
> taskqueue_start_threads_cpuset(&que->tq, 1, PI_NET,
> NULL, "%s:q%d", device_get_nameunit(adapter->dev),
> i);
> #endif
5647,5648c5071,5105
< resp[0] = IXGBE_VF_GET_QUEUES | IXGBE_VT_MSGTYPE_ACK |
< IXGBE_VT_MSGTYPE_CTS;
---
> /* and Link */
> adapter->link_rid = vector + 1;
> adapter->res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
> &adapter->link_rid, RF_SHAREABLE | RF_ACTIVE);
> if (!adapter->res) {
> device_printf(dev,
> "Unable to allocate bus resource: Link interrupt [%d]\n",
> adapter->link_rid);
> return (ENXIO);
> }
> /* Set the link handler function */
> error = bus_setup_intr(dev, adapter->res, INTR_TYPE_NET | INTR_MPSAFE,
> NULL, ixgbe_msix_link, adapter, &adapter->tag);
> if (error) {
> adapter->res = NULL;
> device_printf(dev, "Failed to register LINK handler");
> return (error);
> }
> #if __FreeBSD_version >= 800504
> bus_describe_intr(dev, adapter->res, adapter->tag, "link");
> #endif
> adapter->vector = vector;
> /* Tasklets for Link, SFP and Multispeed Fiber */
> TASK_INIT(&adapter->link_task, 0, ixgbe_handle_link, adapter);
> TASK_INIT(&adapter->mod_task, 0, ixgbe_handle_mod, adapter);
> TASK_INIT(&adapter->msf_task, 0, ixgbe_handle_msf, adapter);
> if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
> TASK_INIT(&adapter->mbx_task, 0, ixgbe_handle_mbx, adapter);
> TASK_INIT(&adapter->phy_task, 0, ixgbe_handle_phy, adapter);
> if (adapter->feat_en & IXGBE_FEATURE_FDIR)
> TASK_INIT(&adapter->fdir_task, 0, ixgbe_reinit_fdir, adapter);
> adapter->tq = taskqueue_create_fast("ixgbe_link", M_NOWAIT,
> taskqueue_thread_enqueue, &adapter->tq);
> taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s linkq",
> device_get_nameunit(adapter->dev));
5650,5654c5107,5108
< num_queues = ixgbe_vf_queues(ixgbe_get_iov_mode(adapter));
< resp[IXGBE_VF_TX_QUEUES] = num_queues;
< resp[IXGBE_VF_RX_QUEUES] = num_queues;
< resp[IXGBE_VF_TRANS_VLAN] = (vf->default_vlan != 0);
< resp[IXGBE_VF_DEF_QUEUE] = 0;
---
> return (0);
> } /* ixgbe_allocate_msix */
5656,5661c5110,5117
< ixgbe_write_mbx(hw, resp, IXGBE_VF_GET_QUEUES_RESP_LEN, vf->pool);
< }
<
<
< static void
< ixgbe_process_vf_msg(struct adapter *adapter, struct ixgbe_vf *vf)
---
> /************************************************************************
> * ixgbe_configure_interrupts
> *
> * Setup MSI-X, MSI, or legacy interrupts (in that order).
> * This will also depend on user settings.
> ************************************************************************/
> static int
> ixgbe_configure_interrupts(struct adapter *adapter)
5663,5665c5119,5120
< struct ixgbe_hw *hw;
< uint32_t msg[IXGBE_VFMAILBOX_SIZE];
< int error;
---
> device_t dev = adapter->dev;
> int rid, want, queues, msgs;
5667c5122,5123
< hw = &adapter->hw;
---
> /* Default to 1 queue if MSI-X setup fails */
> adapter->num_queues = 1;
5669c5125,5127
< error = ixgbe_read_mbx(hw, msg, IXGBE_VFMAILBOX_SIZE, vf->pool);
---
> /* Override by tuneable */
> if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX))
> goto msi;
5671,5678c5129,5139
< if (error != 0)
< return;
<
< CTR3(KTR_MALLOC, "%s: received msg %x from %d",
< adapter->ifp->if_xname, msg[0], vf->pool);
< if (msg[0] == IXGBE_VF_RESET) {
< ixgbe_vf_reset_msg(adapter, vf, msg);
< return;
---
> /* First try MSI-X */
> msgs = pci_msix_count(dev);
> if (msgs == 0)
> goto msi;
> rid = PCIR_BAR(MSIX_82598_BAR);
> adapter->msix_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
> RF_ACTIVE);
> if (adapter->msix_mem == NULL) {
> rid += 4; /* 82599 maps in higher BAR */
> adapter->msix_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
> &rid, RF_ACTIVE);
5680,5683c5141,5144
<
< if (!(vf->flags & IXGBE_VF_CTS)) {
< ixgbe_send_vf_nack(adapter, vf, msg[0]);
< return;
---
> if (adapter->msix_mem == NULL) {
> /* May not be enabled */
> device_printf(adapter->dev, "Unable to map MSI-X table.\n");
> goto msi;
5686,5709c5147,5154
< switch (msg[0] & IXGBE_VT_MSG_MASK) {
< case IXGBE_VF_SET_MAC_ADDR:
< ixgbe_vf_set_mac(adapter, vf, msg);
< break;
< case IXGBE_VF_SET_MULTICAST:
< ixgbe_vf_set_mc_addr(adapter, vf, msg);
< break;
< case IXGBE_VF_SET_VLAN:
< ixgbe_vf_set_vlan(adapter, vf, msg);
< break;
< case IXGBE_VF_SET_LPE:
< ixgbe_vf_set_lpe(adapter, vf, msg);
< break;
< case IXGBE_VF_SET_MACVLAN:
< ixgbe_vf_set_macvlan(adapter, vf, msg);
< break;
< case IXGBE_VF_API_NEGOTIATE:
< ixgbe_vf_api_negotiate(adapter, vf, msg);
< break;
< case IXGBE_VF_GET_QUEUES:
< ixgbe_vf_get_queues(adapter, vf, msg);
< break;
< default:
< ixgbe_send_vf_nack(adapter, vf, msg[0]);
---
> /* Figure out a reasonable auto config value */
> queues = min(mp_ncpus, msgs - 1);
> /* If we're doing RSS, clamp at the number of RSS buckets */
> if (adapter->feat_en & IXGBE_FEATURE_RSS)
> queues = min(queues, rss_getnumbuckets());
> if (ixgbe_num_queues > queues) {
> device_printf(adapter->dev, "ixgbe_num_queues (%d) is too large, using reduced amount (%d).\n", ixgbe_num_queues, queues);
> ixgbe_num_queues = queues;
5711d5155
< }
5712a5157,5161
> if (ixgbe_num_queues != 0)
> queues = ixgbe_num_queues;
> /* Set max queues to 8 when autoconfiguring */
> else
> queues = min(queues, 8);
5714,5723c5163,5164
< /*
< * Tasklet for handling VF -> PF mailbox messages.
< */
< static void
< ixgbe_handle_mbx(void *context, int pending)
< {
< struct adapter *adapter;
< struct ixgbe_hw *hw;
< struct ixgbe_vf *vf;
< int i;
---
> /* reflect correct sysctl value */
> ixgbe_num_queues = queues;
5725,5741c5166,5176
< adapter = context;
< hw = &adapter->hw;
<
< IXGBE_CORE_LOCK(adapter);
< for (i = 0; i < adapter->num_vfs; i++) {
< vf = &adapter->vfs[i];
<
< if (vf->flags & IXGBE_VF_ACTIVE) {
< if (ixgbe_check_for_rst(hw, vf->pool) == 0)
< ixgbe_process_vf_reset(adapter, vf);
<
< if (ixgbe_check_for_msg(hw, vf->pool) == 0)
< ixgbe_process_vf_msg(adapter, vf);
<
< if (ixgbe_check_for_ack(hw, vf->pool) == 0)
< ixgbe_process_vf_ack(adapter, vf);
< }
---
> /*
> * Want one vector (RX/TX pair) per queue
> * plus an additional for Link.
> */
> want = queues + 1;
> if (msgs >= want)
> msgs = want;
> else {
> device_printf(adapter->dev, "MSI-X Configuration Problem, %d vectors but %d queues wanted!\n",
> msgs, want);
> goto msi;
5743,5744c5178,5190
< IXGBE_CORE_UNLOCK(adapter);
< }
---
> if ((pci_alloc_msix(dev, &msgs) == 0) && (msgs == want)) {
> device_printf(adapter->dev,
> "Using MSI-X interrupts with %d vectors\n", msgs);
> adapter->num_queues = queues;
> adapter->feat_en |= IXGBE_FEATURE_MSIX;
> return (0);
> }
> /*
> * MSI-X allocation failed or provided us with
> * less vectors than needed. Free MSI-X resources
> * and we'll try enabling MSI.
> */
> pci_release_msi(dev);
5745a5192,5197
> msi:
> /* Without MSI-X, some features are no longer supported */
> adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
> adapter->feat_en &= ~IXGBE_FEATURE_RSS;
> adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
> adapter->feat_en &= ~IXGBE_FEATURE_SRIOV;
5747,5759c5199,5202
< static int
< ixgbe_init_iov(device_t dev, u16 num_vfs, const nvlist_t *config)
< {
< struct adapter *adapter;
< enum ixgbe_iov_mode mode;
<
< adapter = device_get_softc(dev);
< adapter->num_vfs = num_vfs;
< mode = ixgbe_get_iov_mode(adapter);
<
< if (num_vfs > ixgbe_max_vfs(mode)) {
< adapter->num_vfs = 0;
< return (ENOSPC);
---
> if (adapter->msix_mem != NULL) {
> bus_release_resource(dev, SYS_RES_MEMORY, rid,
> adapter->msix_mem);
> adapter->msix_mem = NULL;
5760a5204,5210
> msgs = 1;
> if (pci_alloc_msi(dev, &msgs) == 0) {
> adapter->feat_en |= IXGBE_FEATURE_MSI;
> adapter->link_rid = 1;
> device_printf(adapter->dev, "Using an MSI interrupt\n");
> return (0);
> }
5762,5770c5212,5215
< IXGBE_CORE_LOCK(adapter);
<
< adapter->vfs = malloc(sizeof(*adapter->vfs) * num_vfs, M_IXGBE,
< M_NOWAIT | M_ZERO);
<
< if (adapter->vfs == NULL) {
< adapter->num_vfs = 0;
< IXGBE_CORE_UNLOCK(adapter);
< return (ENOMEM);
---
> if (!(adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ)) {
> device_printf(adapter->dev,
> "Device does not support legacy interrupts.\n");
> return 1;
5773c5218,5220
< ixgbe_init_locked(adapter);
---
> adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
> adapter->link_rid = 0;
> device_printf(adapter->dev, "Using a Legacy interrupt\n");
5775,5776d5221
< IXGBE_CORE_UNLOCK(adapter);
<
5778c5223
< }
---
> } /* ixgbe_configure_interrupts */
5780a5226,5230
> /************************************************************************
> * ixgbe_handle_link - Tasklet for MSI-X Link interrupts
> *
> * Done outside of interrupt context since the driver might sleep
> ************************************************************************/
5782c5232
< ixgbe_uninit_iov(device_t dev)
---
> ixgbe_handle_link(void *context, int pending)
5784,5786c5234,5235
< struct ixgbe_hw *hw;
< struct adapter *adapter;
< uint32_t pf_reg, vf_reg;
---
> struct adapter *adapter = context;
> struct ixgbe_hw *hw = &adapter->hw;
5788,5789c5237,5238
< adapter = device_get_softc(dev);
< hw = &adapter->hw;
---
> ixgbe_check_link(hw, &adapter->link_speed, &adapter->link_up, 0);
> ixgbe_update_link_status(adapter);
5791c5240,5242
< IXGBE_CORE_LOCK(adapter);
---
> /* Re-enable link interrupts */
> IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_LSC);
> } /* ixgbe_handle_link */
5793,5816c5244,5246
< /* Enable rx/tx for the PF and disable it for all VFs. */
< pf_reg = IXGBE_VF_INDEX(adapter->pool);
< IXGBE_WRITE_REG(hw, IXGBE_VFRE(pf_reg),
< IXGBE_VF_BIT(adapter->pool));
< IXGBE_WRITE_REG(hw, IXGBE_VFTE(pf_reg),
< IXGBE_VF_BIT(adapter->pool));
<
< if (pf_reg == 0)
< vf_reg = 1;
< else
< vf_reg = 0;
< IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_reg), 0);
< IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_reg), 0);
<
< IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, 0);
<
< free(adapter->vfs, M_IXGBE);
< adapter->vfs = NULL;
< adapter->num_vfs = 0;
<
< IXGBE_CORE_UNLOCK(adapter);
< }
<
<
---
> /************************************************************************
> * ixgbe_rearm_queues
> ************************************************************************/
5818c5248
< ixgbe_initialize_iov(struct adapter *adapter)
---
> ixgbe_rearm_queues(struct adapter *adapter, u64 queues)
5820,5823c5250
< struct ixgbe_hw *hw = &adapter->hw;
< uint32_t mrqc, mtqc, vt_ctl, vf_reg, gcr_ext, gpie;
< enum ixgbe_iov_mode mode;
< int i;
---
> u32 mask;
5825,5836c5252,5255
< mode = ixgbe_get_iov_mode(adapter);
< if (mode == IXGBE_NO_VM)
< return;
<
< IXGBE_CORE_LOCK_ASSERT(adapter);
<
< mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC);
< mrqc &= ~IXGBE_MRQC_MRQE_MASK;
<
< switch (mode) {
< case IXGBE_64_VM:
< mrqc |= IXGBE_MRQC_VMDQRSS64EN;
---
> switch (adapter->hw.mac.type) {
> case ixgbe_mac_82598EB:
> mask = (IXGBE_EIMS_RTX_QUEUE & queues);
> IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
5838,5839c5257,5265
< case IXGBE_32_VM:
< mrqc |= IXGBE_MRQC_VMDQRSS32EN;
---
> case ixgbe_mac_82599EB:
> case ixgbe_mac_X540:
> case ixgbe_mac_X550:
> case ixgbe_mac_X550EM_x:
> case ixgbe_mac_X550EM_a:
> mask = (queues & 0xFFFFFFFF);
> IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
> mask = (queues >> 32);
> IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
5842,5849d5267
< panic("Unexpected SR-IOV mode %d", mode);
< }
< IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
<
< mtqc = IXGBE_MTQC_VT_ENA;
< switch (mode) {
< case IXGBE_64_VM:
< mtqc |= IXGBE_MTQC_64VF;
5851,5855d5268
< case IXGBE_32_VM:
< mtqc |= IXGBE_MTQC_32VF;
< break;
< default:
< panic("Unexpected SR-IOV mode %d", mode);
5857,5858c5270
< IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc);
<
---
> } /* ixgbe_rearm_queues */
5860,6002d5271
< gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
< gcr_ext |= IXGBE_GCR_EXT_MSIX_EN;
< gcr_ext &= ~IXGBE_GCR_EXT_VT_MODE_MASK;
< switch (mode) {
< case IXGBE_64_VM:
< gcr_ext |= IXGBE_GCR_EXT_VT_MODE_64;
< break;
< case IXGBE_32_VM:
< gcr_ext |= IXGBE_GCR_EXT_VT_MODE_32;
< break;
< default:
< panic("Unexpected SR-IOV mode %d", mode);
< }
< IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
<
<
< gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
< gcr_ext &= ~IXGBE_GPIE_VTMODE_MASK;
< switch (mode) {
< case IXGBE_64_VM:
< gpie |= IXGBE_GPIE_VTMODE_64;
< break;
< case IXGBE_32_VM:
< gpie |= IXGBE_GPIE_VTMODE_32;
< break;
< default:
< panic("Unexpected SR-IOV mode %d", mode);
< }
< IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
<
< /* Enable rx/tx for the PF. */
< vf_reg = IXGBE_VF_INDEX(adapter->pool);
< IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_reg),
< IXGBE_VF_BIT(adapter->pool));
< IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_reg),
< IXGBE_VF_BIT(adapter->pool));
<
< /* Allow VM-to-VM communication. */
< IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
<
< vt_ctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
< vt_ctl |= (adapter->pool << IXGBE_VT_CTL_POOL_SHIFT);
< IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vt_ctl);
<
< for (i = 0; i < adapter->num_vfs; i++)
< ixgbe_init_vf(adapter, &adapter->vfs[i]);
< }
<
<
< /*
< ** Check the max frame setting of all active VF's
< */
< static void
< ixgbe_recalculate_max_frame(struct adapter *adapter)
< {
< struct ixgbe_vf *vf;
<
< IXGBE_CORE_LOCK_ASSERT(adapter);
<
< for (int i = 0; i < adapter->num_vfs; i++) {
< vf = &adapter->vfs[i];
< if (vf->flags & IXGBE_VF_ACTIVE)
< ixgbe_update_max_frame(adapter, vf->max_frame_size);
< }
< }
<
<
< static void
< ixgbe_init_vf(struct adapter *adapter, struct ixgbe_vf *vf)
< {
< struct ixgbe_hw *hw;
< uint32_t vf_index, pfmbimr;
<
< IXGBE_CORE_LOCK_ASSERT(adapter);
<
< hw = &adapter->hw;
<
< if (!(vf->flags & IXGBE_VF_ACTIVE))
< return;
<
< vf_index = IXGBE_VF_INDEX(vf->pool);
< pfmbimr = IXGBE_READ_REG(hw, IXGBE_PFMBIMR(vf_index));
< pfmbimr |= IXGBE_VF_BIT(vf->pool);
< IXGBE_WRITE_REG(hw, IXGBE_PFMBIMR(vf_index), pfmbimr);
<
< ixgbe_vf_set_default_vlan(adapter, vf, vf->vlan_tag);
<
< // XXX multicast addresses
<
< if (ixgbe_validate_mac_addr(vf->ether_addr) == 0) {
< ixgbe_set_rar(&adapter->hw, vf->rar_index,
< vf->ether_addr, vf->pool, TRUE);
< }
<
< ixgbe_vf_enable_transmit(adapter, vf);
< ixgbe_vf_enable_receive(adapter, vf);
<
< ixgbe_send_vf_msg(adapter, vf, IXGBE_PF_CONTROL_MSG);
< }
<
< static int
< ixgbe_add_vf(device_t dev, u16 vfnum, const nvlist_t *config)
< {
< struct adapter *adapter;
< struct ixgbe_vf *vf;
< const void *mac;
<
< adapter = device_get_softc(dev);
<
< KASSERT(vfnum < adapter->num_vfs, ("VF index %d is out of range %d",
< vfnum, adapter->num_vfs));
<
< IXGBE_CORE_LOCK(adapter);
< vf = &adapter->vfs[vfnum];
< vf->pool= vfnum;
<
< /* RAR[0] is used by the PF so use vfnum + 1 for VF RAR. */
< vf->rar_index = vfnum + 1;
< vf->default_vlan = 0;
< vf->max_frame_size = ETHER_MAX_LEN;
< ixgbe_update_max_frame(adapter, vf->max_frame_size);
<
< if (nvlist_exists_binary(config, "mac-addr")) {
< mac = nvlist_get_binary(config, "mac-addr", NULL);
< bcopy(mac, vf->ether_addr, ETHER_ADDR_LEN);
< if (nvlist_get_bool(config, "allow-set-mac"))
< vf->flags |= IXGBE_VF_CAP_MAC;
< } else
< /*
< * If the administrator has not specified a MAC address then
< * we must allow the VF to choose one.
< */
< vf->flags |= IXGBE_VF_CAP_MAC;
<
< vf->flags = IXGBE_VF_ACTIVE;
<
< ixgbe_init_vf(adapter, vf);
< IXGBE_CORE_UNLOCK(adapter);
<
< return (0);
< }
< #endif /* PCI_IOV */
<