1/****************************************************************************** 2 3 Copyright (c) 2001-2017, Intel Corporation 4 All rights reserved. 5 6 Redistribution and use in source and binary forms, with or without 7 modification, are permitted provided that the following conditions are met: 8 9 1. Redistributions of source code must retain the above copyright notice, 10 this list of conditions and the following disclaimer. 11 12 2. Redistributions in binary form must reproduce the above copyright 13 notice, this list of conditions and the following disclaimer in the 14 documentation and/or other materials provided with the distribution. 15 16 3. Neither the name of the Intel Corporation nor the names of its 17 contributors may be used to endorse or promote products derived from 18 this software without specific prior written permission. 19 20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 POSSIBILITY OF SUCH DAMAGE. 31 32******************************************************************************/ 33/*$FreeBSD: stable/11/sys/dev/ixgbe/if_ix.c 353669 2019-10-16 21:54:48Z erj $*/ 34 35 36#ifndef IXGBE_STANDALONE_BUILD 37#include "opt_inet.h" 38#include "opt_inet6.h" 39#include "opt_rss.h" 40#endif 41 42#include "ixgbe.h" 43 44/************************************************************************ 45 * Driver version 46 ************************************************************************/ 47char ixgbe_driver_version[] = "3.2.12-k"; 48 49 50/************************************************************************ 51 * PCI Device ID Table 52 * 53 * Used by probe to select devices to load on 54 * Last field stores an index into ixgbe_strings 55 * Last entry must be all 0s 56 * 57 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index } 58 ************************************************************************/ 59static ixgbe_vendor_info_t ixgbe_vendor_info_array[] = 60{ 61 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, 0, 0, 0}, 62 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, 0, 0, 0}, 63 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, 0, 0, 0}, 64 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, 0, 0, 0}, 65 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, 0, 0, 0}, 66 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, 0, 0, 0}, 67 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, 0, 0, 0}, 68 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, 0, 0, 0}, 69 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, 0, 0, 0}, 70 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, 0, 0, 0}, 71 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, 0, 0, 0}, 72 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, 0, 0, 0}, 73 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, 0, 0, 0}, 74 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, 0, 0, 0}, 75 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, 0, 0, 0}, 76 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, 0, 0, 0}, 77 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, 0, 0, 0}, 78 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, 0, 0, 0}, 79 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, 0, 0, 0}, 80 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2, 0, 0, 0}, 81 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, 0, 0, 0}, 82 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, 0, 0, 0}, 83 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP, 0, 0, 0}, 84 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP, 0, 0, 0}, 85 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, 0, 0, 0}, 86 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, 0, 0, 0}, 87 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, 0, 0, 0}, 88 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, 0, 0, 0}, 89 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR, 0, 0, 0}, 90 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4, 0, 0, 0}, 91 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T, 0, 0, 0}, 92 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T, 0, 0, 0}, 93 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP, 0, 0, 0}, 94 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR, 0, 0, 0}, 95 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L, 0, 0, 0}, 96 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP, 0, 0, 0}, 97 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N, 0, 0, 0}, 98 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII, 0, 0, 0}, 99 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L, 0, 0, 0}, 100 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T, 0, 0, 0}, 101 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T, 0, 0, 0}, 102 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L, 0, 0, 0}, 103 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_BYPASS, 0, 0, 0}, 104 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS, 0, 0, 0}, 105 /* required last entry */ 106 {0, 0, 0, 0, 0} 107}; 108 109/************************************************************************ 110 * Table of branding strings 111 ************************************************************************/ 112static char *ixgbe_strings[] = { 113 "Intel(R) PRO/10GbE PCI-Express Network Driver" 114}; 115 116/************************************************************************ 117 * Function prototypes 118 ************************************************************************/ 119static int ixgbe_probe(device_t); 120static int ixgbe_attach(device_t); 121static int ixgbe_detach(device_t); 122static int ixgbe_shutdown(device_t); 123static int ixgbe_suspend(device_t); 124static int ixgbe_resume(device_t); 125static int ixgbe_ioctl(struct ifnet *, u_long, caddr_t); 126static void ixgbe_init(void *); 127static void ixgbe_init_locked(struct adapter *); 128static void ixgbe_stop(void *); 129#if __FreeBSD_version >= 1100036 130static uint64_t ixgbe_get_counter(struct ifnet *, ift_counter); 131#endif 132static void ixgbe_init_device_features(struct adapter *); 133static void ixgbe_check_fan_failure(struct adapter *, u32, bool); 134static void ixgbe_add_media_types(struct adapter *); 135static void ixgbe_media_status(struct ifnet *, struct ifmediareq *); 136static int ixgbe_media_change(struct ifnet *); 137static int ixgbe_allocate_pci_resources(struct adapter *); 138static void ixgbe_get_slot_info(struct adapter *); 139static int ixgbe_allocate_msix(struct adapter *); 140static int ixgbe_allocate_legacy(struct adapter *); 141static int ixgbe_configure_interrupts(struct adapter *); 142static void ixgbe_free_pci_resources(struct adapter *); 143static void ixgbe_local_timer(void *); 144static int ixgbe_setup_interface(device_t, struct adapter *); 145static void ixgbe_config_gpie(struct adapter *); 146static void ixgbe_config_dmac(struct adapter *); 147static void ixgbe_config_delay_values(struct adapter *); 148static void ixgbe_config_link(struct adapter *); 149static void ixgbe_check_wol_support(struct adapter *); 150static int ixgbe_setup_low_power_mode(struct adapter *); 151static void ixgbe_rearm_queues(struct adapter *, u64); 152 153static void ixgbe_initialize_transmit_units(struct adapter *); 154static void ixgbe_initialize_receive_units(struct adapter *); 155static void ixgbe_enable_rx_drop(struct adapter *); 156static void ixgbe_disable_rx_drop(struct adapter *); 157static void ixgbe_initialize_rss_mapping(struct adapter *); 158 159static void ixgbe_enable_intr(struct adapter *, bool); 160static void ixgbe_disable_intr(struct adapter *, bool); 161static void ixgbe_update_stats_counters(struct adapter *); 162static void ixgbe_set_promisc(struct adapter *); 163static void ixgbe_set_multi(struct adapter *); 164static void ixgbe_update_link_status(struct adapter *); 165static void ixgbe_set_ivar(struct adapter *, u8, u8, s8); 166static void ixgbe_configure_ivars(struct adapter *); 167static u8 *ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *); 168 169static void ixgbe_setup_vlan_hw_support(struct adapter *); 170static void ixgbe_register_vlan(void *, struct ifnet *, u16); 171static void ixgbe_unregister_vlan(void *, struct ifnet *, u16); 172 173static void ixgbe_add_device_sysctls(struct adapter *); 174static void ixgbe_add_hw_stats(struct adapter *); 175static int ixgbe_set_flowcntl(struct adapter *, int); 176static int ixgbe_set_advertise(struct adapter *, int); 177static int ixgbe_get_advertise(struct adapter *); 178 179/* Sysctl handlers */ 180static void ixgbe_set_sysctl_value(struct adapter *, const char *, 181 const char *, int *, int); 182static int ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS); 183static int ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS); 184static int ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS); 185static int ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS); 186static int ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS); 187static int ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS); 188#ifdef IXGBE_DEBUG 189static int ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS); 190static int ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS); 191#endif 192static int ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS); 193static int ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS); 194static int ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS); 195static int ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS); 196static int ixgbe_sysctl_eee_state(SYSCTL_HANDLER_ARGS); 197static int ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS); 198static int ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS); 199 200/* Support for pluggable optic modules */ 201static bool ixgbe_sfp_probe(struct adapter *); 202 203/* Legacy (single vector) interrupt handler */ 204static void ixgbe_legacy_irq(void *); 205 206/* The MSI/MSI-X Interrupt handlers */ 207static void ixgbe_msix_que(void *); 208static void ixgbe_msix_link(void *); 209 210/* Deferred interrupt tasklets */ 211static void ixgbe_handle_que(void *, int); 212static void ixgbe_handle_link(void *); 213static void ixgbe_handle_msf(void *); 214static void ixgbe_handle_mod(void *); 215static void ixgbe_handle_phy(void *); 216static void ixgbe_handle_admin_task(void *, int); 217 218 219/************************************************************************ 220 * FreeBSD Device Interface Entry Points 221 ************************************************************************/ 222static device_method_t ix_methods[] = { 223 /* Device interface */ 224 DEVMETHOD(device_probe, ixgbe_probe), 225 DEVMETHOD(device_attach, ixgbe_attach), 226 DEVMETHOD(device_detach, ixgbe_detach), 227 DEVMETHOD(device_shutdown, ixgbe_shutdown), 228 DEVMETHOD(device_suspend, ixgbe_suspend), 229 DEVMETHOD(device_resume, ixgbe_resume), 230#ifdef PCI_IOV 231 DEVMETHOD(pci_iov_init, ixgbe_init_iov), 232 DEVMETHOD(pci_iov_uninit, ixgbe_uninit_iov), 233 DEVMETHOD(pci_iov_add_vf, ixgbe_add_vf), 234#endif /* PCI_IOV */ 235 DEVMETHOD_END 236}; 237 238static driver_t ix_driver = { 239 "ix", ix_methods, sizeof(struct adapter), 240}; 241 242devclass_t ix_devclass; 243DRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0); 244 245MODULE_DEPEND(ix, pci, 1, 1, 1); 246MODULE_DEPEND(ix, ether, 1, 1, 1); 247#ifdef DEV_NETMAP 248MODULE_DEPEND(ix, netmap, 1, 1, 1); 249#endif 250 251/* 252 * TUNEABLE PARAMETERS: 253 */ 254 255static SYSCTL_NODE(_hw, OID_AUTO, ix, CTLFLAG_RD, 0, "IXGBE driver parameters"); 256 257/* 258 * AIM: Adaptive Interrupt Moderation 259 * which means that the interrupt rate 260 * is varied over time based on the 261 * traffic for that interrupt vector 262 */ 263static int ixgbe_enable_aim = TRUE; 264SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RDTUN, &ixgbe_enable_aim, 0, 265 "Enable adaptive interrupt moderation"); 266 267static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY); 268SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN, 269 &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second"); 270 271/* How many packets rxeof tries to clean at a time */ 272static int ixgbe_rx_process_limit = 256; 273SYSCTL_INT(_hw_ix, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN, 274 &ixgbe_rx_process_limit, 0, "Maximum number of received packets to process at a time, -1 means unlimited"); 275 276/* How many packets txeof tries to clean at a time */ 277static int ixgbe_tx_process_limit = 256; 278SYSCTL_INT(_hw_ix, OID_AUTO, tx_process_limit, CTLFLAG_RDTUN, 279 &ixgbe_tx_process_limit, 0, 280 "Maximum number of sent packets to process at a time, -1 means unlimited"); 281 282/* Flow control setting, default to full */ 283static int ixgbe_flow_control = ixgbe_fc_full; 284SYSCTL_INT(_hw_ix, OID_AUTO, flow_control, CTLFLAG_RDTUN, 285 &ixgbe_flow_control, 0, "Default flow control used for all adapters"); 286 287/* Advertise Speed, default to 0 (auto) */ 288static int ixgbe_advertise_speed = 0; 289SYSCTL_INT(_hw_ix, OID_AUTO, advertise_speed, CTLFLAG_RDTUN, 290 &ixgbe_advertise_speed, 0, "Default advertised speed for all adapters"); 291 292/* 293 * Smart speed setting, default to on 294 * this only works as a compile option 295 * right now as its during attach, set 296 * this to 'ixgbe_smart_speed_off' to 297 * disable. 298 */ 299static int ixgbe_smart_speed = ixgbe_smart_speed_on; 300 301/* 302 * MSI-X should be the default for best performance, 303 * but this allows it to be forced off for testing. 304 */ 305static int ixgbe_enable_msix = 1; 306SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0, 307 "Enable MSI-X interrupts"); 308 309/* 310 * Number of Queues, can be set to 0, 311 * it then autoconfigures based on the 312 * number of cpus with a max of 8. This 313 * can be overriden manually here. 314 */ 315static int ixgbe_num_queues = 0; 316SYSCTL_INT(_hw_ix, OID_AUTO, num_queues, CTLFLAG_RDTUN, &ixgbe_num_queues, 0, 317 "Number of queues to configure, 0 indicates autoconfigure"); 318 319/* 320 * Number of TX descriptors per ring, 321 * setting higher than RX as this seems 322 * the better performing choice. 323 */ 324static int ixgbe_txd = PERFORM_TXD; 325SYSCTL_INT(_hw_ix, OID_AUTO, txd, CTLFLAG_RDTUN, &ixgbe_txd, 0, 326 "Number of transmit descriptors per queue"); 327 328/* Number of RX descriptors per ring */ 329static int ixgbe_rxd = PERFORM_RXD; 330SYSCTL_INT(_hw_ix, OID_AUTO, rxd, CTLFLAG_RDTUN, &ixgbe_rxd, 0, 331 "Number of receive descriptors per queue"); 332 333/* 334 * Defining this on will allow the use 335 * of unsupported SFP+ modules, note that 336 * doing so you are on your own :) 337 */ 338static int allow_unsupported_sfp = FALSE; 339SYSCTL_INT(_hw_ix, OID_AUTO, unsupported_sfp, CTLFLAG_RDTUN, 340 &allow_unsupported_sfp, 0, 341 "Allow unsupported SFP modules...use at your own risk"); 342 343/* 344 * Not sure if Flow Director is fully baked, 345 * so we'll default to turning it off. 346 */ 347static int ixgbe_enable_fdir = 0; 348SYSCTL_INT(_hw_ix, OID_AUTO, enable_fdir, CTLFLAG_RDTUN, &ixgbe_enable_fdir, 0, 349 "Enable Flow Director"); 350 351/* Legacy Transmit (single queue) */ 352static int ixgbe_enable_legacy_tx = 0; 353SYSCTL_INT(_hw_ix, OID_AUTO, enable_legacy_tx, CTLFLAG_RDTUN, 354 &ixgbe_enable_legacy_tx, 0, "Enable Legacy TX flow"); 355 356/* Receive-Side Scaling */ 357static int ixgbe_enable_rss = 1; 358SYSCTL_INT(_hw_ix, OID_AUTO, enable_rss, CTLFLAG_RDTUN, &ixgbe_enable_rss, 0, 359 "Enable Receive-Side Scaling (RSS)"); 360 361/* Keep running tab on them for sanity check */ 362static int ixgbe_total_ports; 363 364static int (*ixgbe_start_locked)(struct ifnet *, struct tx_ring *); 365static int (*ixgbe_ring_empty)(struct ifnet *, struct buf_ring *); 366 367MALLOC_DEFINE(M_IXGBE, "ix", "ix driver allocations"); 368 369/************************************************************************ 370 * ixgbe_initialize_rss_mapping 371 ************************************************************************/ 372static void 373ixgbe_initialize_rss_mapping(struct adapter *adapter) 374{ 375 struct ixgbe_hw *hw = &adapter->hw; 376 u32 reta = 0, mrqc, rss_key[10]; 377 int queue_id, table_size, index_mult; 378 int i, j; 379 u32 rss_hash_config; 380 381 if (adapter->feat_en & IXGBE_FEATURE_RSS) { 382 /* Fetch the configured RSS key */ 383 rss_getkey((uint8_t *)&rss_key); 384 } else { 385 /* set up random bits */ 386 arc4rand(&rss_key, sizeof(rss_key), 0); 387 } 388 389 /* Set multiplier for RETA setup and table size based on MAC */ 390 index_mult = 0x1; 391 table_size = 128; 392 switch (adapter->hw.mac.type) { 393 case ixgbe_mac_82598EB: 394 index_mult = 0x11; 395 break; 396 case ixgbe_mac_X550: 397 case ixgbe_mac_X550EM_x: 398 case ixgbe_mac_X550EM_a: 399 table_size = 512; 400 break; 401 default: 402 break; 403 } 404 405 /* Set up the redirection table */ 406 for (i = 0, j = 0; i < table_size; i++, j++) { 407 if (j == adapter->num_queues) 408 j = 0; 409 410 if (adapter->feat_en & IXGBE_FEATURE_RSS) { 411 /* 412 * Fetch the RSS bucket id for the given indirection 413 * entry. Cap it at the number of configured buckets 414 * (which is num_queues.) 415 */ 416 queue_id = rss_get_indirection_to_bucket(i); 417 queue_id = queue_id % adapter->num_queues; 418 } else 419 queue_id = (j * index_mult); 420 421 /* 422 * The low 8 bits are for hash value (n+0); 423 * The next 8 bits are for hash value (n+1), etc. 424 */ 425 reta = reta >> 8; 426 reta = reta | (((uint32_t)queue_id) << 24); 427 if ((i & 3) == 3) { 428 if (i < 128) 429 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta); 430 else 431 IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32), 432 reta); 433 reta = 0; 434 } 435 } 436 437 /* Now fill our hash function seeds */ 438 for (i = 0; i < 10; i++) 439 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]); 440 441 /* Perform hash on these packet types */ 442 if (adapter->feat_en & IXGBE_FEATURE_RSS) 443 rss_hash_config = rss_gethashconfig(); 444 else { 445 /* 446 * Disable UDP - IP fragments aren't currently being handled 447 * and so we end up with a mix of 2-tuple and 4-tuple 448 * traffic. 449 */ 450 rss_hash_config = RSS_HASHTYPE_RSS_IPV4 451 | RSS_HASHTYPE_RSS_TCP_IPV4 452 | RSS_HASHTYPE_RSS_IPV6 453 | RSS_HASHTYPE_RSS_TCP_IPV6 454 | RSS_HASHTYPE_RSS_IPV6_EX 455 | RSS_HASHTYPE_RSS_TCP_IPV6_EX; 456 } 457 458 mrqc = IXGBE_MRQC_RSSEN; 459 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4) 460 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4; 461 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4) 462 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP; 463 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6) 464 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6; 465 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6) 466 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP; 467 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX) 468 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX; 469 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX) 470 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP; 471 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4) 472 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP; 473 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4_EX) 474 device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_UDP_IPV4_EX defined, but not supported\n", 475 __func__); 476 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6) 477 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP; 478 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX) 479 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP; 480 mrqc |= ixgbe_get_mrqc(adapter->iov_mode); 481 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); 482} /* ixgbe_initialize_rss_mapping */ 483 484/************************************************************************ 485 * ixgbe_initialize_receive_units - Setup receive registers and features. 486 ************************************************************************/ 487#define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1) 488 489static void 490ixgbe_initialize_receive_units(struct adapter *adapter) 491{ 492 struct rx_ring *rxr = adapter->rx_rings; 493 struct ixgbe_hw *hw = &adapter->hw; 494 struct ifnet *ifp = adapter->ifp; 495 int i, j; 496 u32 bufsz, fctrl, srrctl, rxcsum; 497 u32 hlreg; 498 499 /* 500 * Make sure receives are disabled while 501 * setting up the descriptor ring 502 */ 503 ixgbe_disable_rx(hw); 504 505 /* Enable broadcasts */ 506 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); 507 fctrl |= IXGBE_FCTRL_BAM; 508 if (adapter->hw.mac.type == ixgbe_mac_82598EB) { 509 fctrl |= IXGBE_FCTRL_DPF; 510 fctrl |= IXGBE_FCTRL_PMCF; 511 } 512 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); 513 514 /* Set for Jumbo Frames? */ 515 hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0); 516 if (ifp->if_mtu > ETHERMTU) 517 hlreg |= IXGBE_HLREG0_JUMBOEN; 518 else 519 hlreg &= ~IXGBE_HLREG0_JUMBOEN; 520 521#ifdef DEV_NETMAP 522 /* CRC stripping is conditional in Netmap */ 523 if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) && 524 (ifp->if_capenable & IFCAP_NETMAP) && 525 !ix_crcstrip) 526 hlreg &= ~IXGBE_HLREG0_RXCRCSTRP; 527 else 528#endif /* DEV_NETMAP */ 529 hlreg |= IXGBE_HLREG0_RXCRCSTRP; 530 531 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg); 532 533 bufsz = (adapter->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >> 534 IXGBE_SRRCTL_BSIZEPKT_SHIFT; 535 536 for (i = 0; i < adapter->num_queues; i++, rxr++) { 537 u64 rdba = rxr->rxdma.dma_paddr; 538 j = rxr->me; 539 540 /* Setup the Base and Length of the Rx Descriptor Ring */ 541 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j), 542 (rdba & 0x00000000ffffffffULL)); 543 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32)); 544 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j), 545 adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc)); 546 547 /* Set up the SRRCTL register */ 548 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j)); 549 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK; 550 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK; 551 srrctl |= bufsz; 552 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; 553 554 /* 555 * Set DROP_EN iff we have no flow control and >1 queue. 556 * Note that srrctl was cleared shortly before during reset, 557 * so we do not need to clear the bit, but do it just in case 558 * this code is moved elsewhere. 559 */ 560 if (adapter->num_queues > 1 && 561 adapter->hw.fc.requested_mode == ixgbe_fc_none) { 562 srrctl |= IXGBE_SRRCTL_DROP_EN; 563 } else { 564 srrctl &= ~IXGBE_SRRCTL_DROP_EN; 565 } 566 567 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl); 568 569 /* Setup the HW Rx Head and Tail Descriptor Pointers */ 570 IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0); 571 IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0); 572 573 /* Set the driver rx tail address */ 574 rxr->tail = IXGBE_RDT(rxr->me); 575 } 576 577 if (adapter->hw.mac.type != ixgbe_mac_82598EB) { 578 u32 psrtype = IXGBE_PSRTYPE_TCPHDR 579 | IXGBE_PSRTYPE_UDPHDR 580 | IXGBE_PSRTYPE_IPV4HDR 581 | IXGBE_PSRTYPE_IPV6HDR; 582 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype); 583 } 584 585 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM); 586 587 ixgbe_initialize_rss_mapping(adapter); 588 589 if (adapter->num_queues > 1) { 590 /* RSS and RX IPP Checksum are mutually exclusive */ 591 rxcsum |= IXGBE_RXCSUM_PCSD; 592 } 593 594 if (ifp->if_capenable & IFCAP_RXCSUM) 595 rxcsum |= IXGBE_RXCSUM_PCSD; 596 597 /* This is useful for calculating UDP/IP fragment checksums */ 598 if (!(rxcsum & IXGBE_RXCSUM_PCSD)) 599 rxcsum |= IXGBE_RXCSUM_IPPCSE; 600 601 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum); 602 603 return; 604} /* ixgbe_initialize_receive_units */ 605 606/************************************************************************ 607 * ixgbe_initialize_transmit_units - Enable transmit units. 608 ************************************************************************/ 609static void 610ixgbe_initialize_transmit_units(struct adapter *adapter) 611{ 612 struct tx_ring *txr = adapter->tx_rings; 613 struct ixgbe_hw *hw = &adapter->hw; 614 615 /* Setup the Base and Length of the Tx Descriptor Ring */ 616 for (int i = 0; i < adapter->num_queues; i++, txr++) { 617 u64 tdba = txr->txdma.dma_paddr; 618 u32 txctrl = 0; 619 int j = txr->me; 620 621 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j), 622 (tdba & 0x00000000ffffffffULL)); 623 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32)); 624 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j), 625 adapter->num_tx_desc * sizeof(union ixgbe_adv_tx_desc)); 626 627 /* Setup the HW Tx Head and Tail descriptor pointers */ 628 IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0); 629 IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0); 630 631 /* Cache the tail address */ 632 txr->tail = IXGBE_TDT(j); 633 634 /* Disable Head Writeback */ 635 /* 636 * Note: for X550 series devices, these registers are actually 637 * prefixed with TPH_ isntead of DCA_, but the addresses and 638 * fields remain the same. 639 */ 640 switch (hw->mac.type) { 641 case ixgbe_mac_82598EB: 642 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j)); 643 break; 644 default: 645 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j)); 646 break; 647 } 648 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN; 649 switch (hw->mac.type) { 650 case ixgbe_mac_82598EB: 651 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl); 652 break; 653 default: 654 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl); 655 break; 656 } 657 658 } 659 660 if (hw->mac.type != ixgbe_mac_82598EB) { 661 u32 dmatxctl, rttdcs; 662 663 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); 664 dmatxctl |= IXGBE_DMATXCTL_TE; 665 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl); 666 /* Disable arbiter to set MTQC */ 667 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS); 668 rttdcs |= IXGBE_RTTDCS_ARBDIS; 669 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs); 670 IXGBE_WRITE_REG(hw, IXGBE_MTQC, 671 ixgbe_get_mtqc(adapter->iov_mode)); 672 rttdcs &= ~IXGBE_RTTDCS_ARBDIS; 673 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs); 674 } 675 676 return; 677} /* ixgbe_initialize_transmit_units */ 678 679/************************************************************************ 680 * ixgbe_attach - Device initialization routine 681 * 682 * Called when the driver is being loaded. 683 * Identifies the type of hardware, allocates all resources 684 * and initializes the hardware. 685 * 686 * return 0 on success, positive on failure 687 ************************************************************************/ 688static int 689ixgbe_attach(device_t dev) 690{ 691 struct adapter *adapter; 692 struct ixgbe_hw *hw; 693 int error = 0; 694 u32 ctrl_ext; 695 696 INIT_DEBUGOUT("ixgbe_attach: begin"); 697 698 /* Allocate, clear, and link in our adapter structure */ 699 adapter = device_get_softc(dev); 700 adapter->hw.back = adapter; 701 adapter->dev = dev; 702 hw = &adapter->hw; 703 704 /* Core Lock Init*/ 705 IXGBE_CORE_LOCK_INIT(adapter, device_get_nameunit(dev)); 706 707 /* Set up the timer callout */ 708 callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0); 709 710 /* Determine hardware revision */ 711 hw->vendor_id = pci_get_vendor(dev); 712 hw->device_id = pci_get_device(dev); 713 hw->revision_id = pci_get_revid(dev); 714 hw->subsystem_vendor_id = pci_get_subvendor(dev); 715 hw->subsystem_device_id = pci_get_subdevice(dev); 716 717 /* 718 * Make sure BUSMASTER is set 719 */ 720 pci_enable_busmaster(dev); 721 722 /* Do base PCI setup - map BAR0 */ 723 if (ixgbe_allocate_pci_resources(adapter)) { 724 device_printf(dev, "Allocation of PCI resources failed\n"); 725 error = ENXIO; 726 goto err_out; 727 } 728 729 /* let hardware know driver is loaded */ 730 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 731 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD; 732 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); 733 734 hw->allow_unsupported_sfp = allow_unsupported_sfp; 735 736 /* 737 * Initialize the shared code 738 */ 739 if (ixgbe_init_shared_code(hw)) { 740 device_printf(dev, "Unable to initialize the shared code\n"); 741 error = ENXIO; 742 goto err_out; 743 } 744 745 if (hw->mbx.ops.init_params) 746 hw->mbx.ops.init_params(hw); 747 748 749 /* Pick up the 82599 settings */ 750 if (hw->mac.type != ixgbe_mac_82598EB) { 751 hw->phy.smart_speed = ixgbe_smart_speed; 752 adapter->num_segs = IXGBE_82599_SCATTER; 753 } else 754 adapter->num_segs = IXGBE_82598_SCATTER; 755 756 ixgbe_init_device_features(adapter); 757 758 if (ixgbe_configure_interrupts(adapter)) { 759 error = ENXIO; 760 goto err_out; 761 } 762 763 /* Allocate multicast array memory. */ 764 adapter->mta = malloc(sizeof(*adapter->mta) * 765 MAX_NUM_MULTICAST_ADDRESSES, M_IXGBE, M_NOWAIT); 766 if (adapter->mta == NULL) { 767 device_printf(dev, "Can not allocate multicast setup array\n"); 768 error = ENOMEM; 769 goto err_out; 770 } 771 772 /* Enable WoL (if supported) */ 773 ixgbe_check_wol_support(adapter); 774 775 /* Register for VLAN events */ 776 adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config, 777 ixgbe_register_vlan, adapter, EVENTHANDLER_PRI_FIRST); 778 adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig, 779 ixgbe_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST); 780 781 /* Verify adapter fan is still functional (if applicable) */ 782 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) { 783 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); 784 ixgbe_check_fan_failure(adapter, esdp, FALSE); 785 } 786 787 /* Ensure SW/FW semaphore is free */ 788 ixgbe_init_swfw_semaphore(hw); 789 790 /* Enable EEE power saving */ 791 if (adapter->feat_en & IXGBE_FEATURE_EEE) 792 hw->mac.ops.setup_eee(hw, TRUE); 793 794 /* Set an initial default flow control value */ 795 hw->fc.requested_mode = ixgbe_flow_control; 796 797 /* Sysctls for limiting the amount of work done in the taskqueues */ 798 ixgbe_set_sysctl_value(adapter, "rx_processing_limit", 799 "max number of rx packets to process", 800 &adapter->rx_process_limit, ixgbe_rx_process_limit); 801 802 ixgbe_set_sysctl_value(adapter, "tx_processing_limit", 803 "max number of tx packets to process", 804 &adapter->tx_process_limit, ixgbe_tx_process_limit); 805 806 /* Do descriptor calc and sanity checks */ 807 if (((ixgbe_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 || 808 ixgbe_txd < MIN_TXD || ixgbe_txd > MAX_TXD) { 809 device_printf(dev, "TXD config issue, using default!\n"); 810 adapter->num_tx_desc = DEFAULT_TXD; 811 } else 812 adapter->num_tx_desc = ixgbe_txd; 813 814 /* 815 * With many RX rings it is easy to exceed the 816 * system mbuf allocation. Tuning nmbclusters 817 * can alleviate this. 818 */ 819 if (nmbclusters > 0) { 820 int s; 821 s = (ixgbe_rxd * adapter->num_queues) * ixgbe_total_ports; 822 if (s > nmbclusters) { 823 device_printf(dev, "RX Descriptors exceed system mbuf max, using default instead!\n"); 824 ixgbe_rxd = DEFAULT_RXD; 825 } 826 } 827 828 if (((ixgbe_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 || 829 ixgbe_rxd < MIN_RXD || ixgbe_rxd > MAX_RXD) { 830 device_printf(dev, "RXD config issue, using default!\n"); 831 adapter->num_rx_desc = DEFAULT_RXD; 832 } else 833 adapter->num_rx_desc = ixgbe_rxd; 834 835 /* Allocate our TX/RX Queues */ 836 if (ixgbe_allocate_queues(adapter)) { 837 error = ENOMEM; 838 goto err_out; 839 } 840 841 hw->phy.reset_if_overtemp = TRUE; 842 error = ixgbe_reset_hw(hw); 843 hw->phy.reset_if_overtemp = FALSE; 844 if (error == IXGBE_ERR_SFP_NOT_PRESENT) { 845 /* 846 * No optics in this port, set up 847 * so the timer routine will probe 848 * for later insertion. 849 */ 850 adapter->sfp_probe = TRUE; 851 error = IXGBE_SUCCESS; 852 } else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) { 853 device_printf(dev, "Unsupported SFP+ module detected!\n"); 854 error = EIO; 855 goto err_late; 856 } else if (error) { 857 device_printf(dev, "Hardware initialization failed\n"); 858 error = EIO; 859 goto err_late; 860 } 861 862 /* Make sure we have a good EEPROM before we read from it */ 863 if (ixgbe_validate_eeprom_checksum(&adapter->hw, NULL) < 0) { 864 device_printf(dev, "The EEPROM Checksum Is Not Valid\n"); 865 error = EIO; 866 goto err_late; 867 } 868 869 /* Setup OS specific network interface */ 870 if (ixgbe_setup_interface(dev, adapter) != 0) 871 goto err_late; 872 873 if (adapter->feat_en & IXGBE_FEATURE_MSIX) 874 error = ixgbe_allocate_msix(adapter); 875 else 876 error = ixgbe_allocate_legacy(adapter); 877 if (error) 878 goto err_late; 879 880 error = ixgbe_start_hw(hw); 881 switch (error) { 882 case IXGBE_ERR_EEPROM_VERSION: 883 device_printf(dev, "This device is a pre-production adapter/LOM. Please be aware there may be issues associated with your hardware.\nIf you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n"); 884 break; 885 case IXGBE_ERR_SFP_NOT_SUPPORTED: 886 device_printf(dev, "Unsupported SFP+ Module\n"); 887 error = EIO; 888 goto err_late; 889 case IXGBE_ERR_SFP_NOT_PRESENT: 890 device_printf(dev, "No SFP+ Module found\n"); 891 /* falls thru */ 892 default: 893 break; 894 } 895 896 /* Enable the optics for 82599 SFP+ fiber */ 897 ixgbe_enable_tx_laser(hw); 898 899 /* Enable power to the phy. */ 900 ixgbe_set_phy_power(hw, TRUE); 901 902 /* Initialize statistics */ 903 ixgbe_update_stats_counters(adapter); 904 905 /* Check PCIE slot type/speed/width */ 906 ixgbe_get_slot_info(adapter); 907 908 /* 909 * Do time init and sysctl init here, but 910 * only on the first port of a bypass adapter. 911 */ 912 ixgbe_bypass_init(adapter); 913 914 /* Set an initial dmac value */ 915 adapter->dmac = 0; 916 /* Set initial advertised speeds (if applicable) */ 917 adapter->advertise = ixgbe_get_advertise(adapter); 918 919 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV) 920 ixgbe_define_iov_schemas(dev, &error); 921 922 /* Add sysctls */ 923 ixgbe_add_device_sysctls(adapter); 924 ixgbe_add_hw_stats(adapter); 925 926 /* For Netmap */ 927 adapter->init_locked = ixgbe_init_locked; 928 adapter->stop_locked = ixgbe_stop; 929 930 if (adapter->feat_en & IXGBE_FEATURE_NETMAP) 931 ixgbe_netmap_attach(adapter); 932 933 /* Initialize Admin Task */ 934 TASK_INIT(&adapter->admin_task, 0, ixgbe_handle_admin_task, adapter); 935 936 /* Initialize task queue */ 937 adapter->tq = taskqueue_create_fast("ixgbe_admin", M_NOWAIT, 938 taskqueue_thread_enqueue, &adapter->tq); 939 taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s admintaskq", 940 device_get_nameunit(adapter->dev)); 941 942 INIT_DEBUGOUT("ixgbe_attach: end"); 943 944 return (0); 945 946err_late: 947 ixgbe_free_transmit_structures(adapter); 948 ixgbe_free_receive_structures(adapter); 949 free(adapter->queues, M_DEVBUF); 950err_out: 951 if (adapter->ifp != NULL) 952 if_free(adapter->ifp); 953 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT); 954 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD; 955 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext); 956 ixgbe_free_pci_resources(adapter); 957 free(adapter->mta, M_IXGBE); 958 IXGBE_CORE_LOCK_DESTROY(adapter); 959 960 return (error); 961} /* ixgbe_attach */ 962 963/************************************************************************ 964 * ixgbe_check_wol_support 965 * 966 * Checks whether the adapter's ports are capable of 967 * Wake On LAN by reading the adapter's NVM. 968 * 969 * Sets each port's hw->wol_enabled value depending 970 * on the value read here. 971 ************************************************************************/ 972static void 973ixgbe_check_wol_support(struct adapter *adapter) 974{ 975 struct ixgbe_hw *hw = &adapter->hw; 976 u16 dev_caps = 0; 977 978 /* Find out WoL support for port */ 979 adapter->wol_support = hw->wol_enabled = 0; 980 ixgbe_get_device_caps(hw, &dev_caps); 981 if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) || 982 ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) && 983 hw->bus.func == 0)) 984 adapter->wol_support = hw->wol_enabled = 1; 985 986 /* Save initial wake up filter configuration */ 987 adapter->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC); 988 989 return; 990} /* ixgbe_check_wol_support */ 991 992/************************************************************************ 993 * ixgbe_setup_interface 994 * 995 * Setup networking device structure and register an interface. 996 ************************************************************************/ 997static int 998ixgbe_setup_interface(device_t dev, struct adapter *adapter) 999{ 1000 struct ifnet *ifp; 1001 1002 INIT_DEBUGOUT("ixgbe_setup_interface: begin"); 1003 1004 ifp = adapter->ifp = if_alloc(IFT_ETHER); 1005 if (ifp == NULL) { 1006 device_printf(dev, "can not allocate ifnet structure\n"); 1007 return (-1); 1008 } 1009 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 1010 ifp->if_baudrate = IF_Gbps(10); 1011 ifp->if_init = ixgbe_init; 1012 ifp->if_softc = adapter; 1013 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1014 ifp->if_ioctl = ixgbe_ioctl; 1015#if __FreeBSD_version >= 1100036 1016 if_setgetcounterfn(ifp, ixgbe_get_counter); 1017#endif 1018#if __FreeBSD_version >= 1100045 1019 /* TSO parameters */ 1020 ifp->if_hw_tsomax = 65518; 1021 ifp->if_hw_tsomaxsegcount = IXGBE_82599_SCATTER; 1022 ifp->if_hw_tsomaxsegsize = 2048; 1023#endif 1024 if (adapter->feat_en & IXGBE_FEATURE_LEGACY_TX) { 1025 ifp->if_start = ixgbe_legacy_start; 1026 IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2); 1027 ifp->if_snd.ifq_drv_maxlen = adapter->num_tx_desc - 2; 1028 IFQ_SET_READY(&ifp->if_snd); 1029 ixgbe_start_locked = ixgbe_legacy_start_locked; 1030 ixgbe_ring_empty = ixgbe_legacy_ring_empty; 1031 } else { 1032 ifp->if_transmit = ixgbe_mq_start; 1033 ifp->if_qflush = ixgbe_qflush; 1034 ixgbe_start_locked = ixgbe_mq_start_locked; 1035 ixgbe_ring_empty = drbr_empty; 1036 } 1037 1038 ether_ifattach(ifp, adapter->hw.mac.addr); 1039 1040 adapter->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; 1041 1042 /* 1043 * Tell the upper layer(s) we support long frames. 1044 */ 1045 ifp->if_hdrlen = sizeof(struct ether_vlan_header); 1046 1047 /* Set capability flags */ 1048 ifp->if_capabilities |= IFCAP_HWCSUM 1049 | IFCAP_HWCSUM_IPV6 1050 | IFCAP_TSO 1051 | IFCAP_LRO 1052 | IFCAP_VLAN_HWTAGGING 1053 | IFCAP_VLAN_HWTSO 1054 | IFCAP_VLAN_HWCSUM 1055 | IFCAP_JUMBO_MTU 1056 | IFCAP_VLAN_MTU 1057 | IFCAP_HWSTATS; 1058 1059 /* Enable the above capabilities by default */ 1060 ifp->if_capenable = ifp->if_capabilities; 1061 1062 /* 1063 * Don't turn this on by default, if vlans are 1064 * created on another pseudo device (eg. lagg) 1065 * then vlan events are not passed thru, breaking 1066 * operation, but with HW FILTER off it works. If 1067 * using vlans directly on the ixgbe driver you can 1068 * enable this and get full hardware tag filtering. 1069 */ 1070 ifp->if_capabilities |= IFCAP_VLAN_HWFILTER; 1071 1072 /* 1073 * Specify the media types supported by this adapter and register 1074 * callbacks to update media and link information 1075 */ 1076 ifmedia_init(&adapter->media, IFM_IMASK, ixgbe_media_change, 1077 ixgbe_media_status); 1078 1079 adapter->phy_layer = ixgbe_get_supported_physical_layer(&adapter->hw); 1080 ixgbe_add_media_types(adapter); 1081 1082 /* Set autoselect media by default */ 1083 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO); 1084 1085 return (0); 1086} /* ixgbe_setup_interface */ 1087 1088#if __FreeBSD_version >= 1100036 1089/************************************************************************ 1090 * ixgbe_get_counter 1091 ************************************************************************/ 1092static uint64_t 1093ixgbe_get_counter(struct ifnet *ifp, ift_counter cnt) 1094{ 1095 struct adapter *adapter; 1096 struct tx_ring *txr; 1097 uint64_t rv; 1098 1099 adapter = if_getsoftc(ifp); 1100 1101 switch (cnt) { 1102 case IFCOUNTER_IPACKETS: 1103 return (adapter->ipackets); 1104 case IFCOUNTER_OPACKETS: 1105 return (adapter->opackets); 1106 case IFCOUNTER_IBYTES: 1107 return (adapter->ibytes); 1108 case IFCOUNTER_OBYTES: 1109 return (adapter->obytes); 1110 case IFCOUNTER_IMCASTS: 1111 return (adapter->imcasts); 1112 case IFCOUNTER_OMCASTS: 1113 return (adapter->omcasts); 1114 case IFCOUNTER_COLLISIONS: 1115 return (0); 1116 case IFCOUNTER_IQDROPS: 1117 return (adapter->iqdrops); 1118 case IFCOUNTER_OQDROPS: 1119 rv = 0; 1120 txr = adapter->tx_rings; 1121 for (int i = 0; i < adapter->num_queues; i++, txr++) 1122 rv += txr->br->br_drops; 1123 return (rv); 1124 case IFCOUNTER_IERRORS: 1125 return (adapter->ierrors); 1126 default: 1127 return (if_get_counter_default(ifp, cnt)); 1128 } 1129} /* ixgbe_get_counter */ 1130#endif 1131 1132/************************************************************************ 1133 * ixgbe_add_media_types 1134 ************************************************************************/ 1135static void 1136ixgbe_add_media_types(struct adapter *adapter) 1137{ 1138 struct ixgbe_hw *hw = &adapter->hw; 1139 device_t dev = adapter->dev; 1140 u64 layer; 1141 1142 layer = adapter->phy_layer; 1143 1144 /* Media types with matching FreeBSD media defines */ 1145 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T) 1146 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_T, 0, NULL); 1147 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T) 1148 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL); 1149 if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX) 1150 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX, 0, NULL); 1151 if (layer & IXGBE_PHYSICAL_LAYER_10BASE_T) 1152 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T, 0, NULL); 1153 1154 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU || 1155 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA) 1156 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_TWINAX, 0, 1157 NULL); 1158 1159 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) { 1160 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_LR, 0, NULL); 1161 if (hw->phy.multispeed_fiber) 1162 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_LX, 0, 1163 NULL); 1164 } 1165 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) { 1166 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL); 1167 if (hw->phy.multispeed_fiber) 1168 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX, 0, 1169 NULL); 1170 } else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX) 1171 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX, 0, NULL); 1172 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4) 1173 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL); 1174 1175#ifdef IFM_ETH_XTYPE 1176 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) 1177 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_KR, 0, NULL); 1178 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) 1179 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_KX4, 0, NULL); 1180 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) 1181 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_KX, 0, NULL); 1182 if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX) 1183 ifmedia_add(&adapter->media, IFM_ETHER | IFM_2500_KX, 0, NULL); 1184#else 1185 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) { 1186 device_printf(dev, "Media supported: 10GbaseKR\n"); 1187 device_printf(dev, "10GbaseKR mapped to 10GbaseSR\n"); 1188 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL); 1189 } 1190 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) { 1191 device_printf(dev, "Media supported: 10GbaseKX4\n"); 1192 device_printf(dev, "10GbaseKX4 mapped to 10GbaseCX4\n"); 1193 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL); 1194 } 1195 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) { 1196 device_printf(dev, "Media supported: 1000baseKX\n"); 1197 device_printf(dev, "1000baseKX mapped to 1000baseCX\n"); 1198 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_CX, 0, NULL); 1199 } 1200 if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX) { 1201 device_printf(dev, "Media supported: 2500baseKX\n"); 1202 device_printf(dev, "2500baseKX mapped to 2500baseSX\n"); 1203 ifmedia_add(&adapter->media, IFM_ETHER | IFM_2500_SX, 0, NULL); 1204 } 1205#endif 1206 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX) 1207 device_printf(dev, "Media supported: 1000baseBX\n"); 1208 1209 if (hw->device_id == IXGBE_DEV_ID_82598AT) { 1210 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T | IFM_FDX, 1211 0, NULL); 1212 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL); 1213 } 1214 1215 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL); 1216} /* ixgbe_add_media_types */ 1217 1218/************************************************************************ 1219 * ixgbe_is_sfp 1220 ************************************************************************/ 1221static inline bool 1222ixgbe_is_sfp(struct ixgbe_hw *hw) 1223{ 1224 switch (hw->mac.type) { 1225 case ixgbe_mac_82598EB: 1226 if (hw->phy.type == ixgbe_phy_nl) 1227 return TRUE; 1228 return FALSE; 1229 case ixgbe_mac_82599EB: 1230 switch (hw->mac.ops.get_media_type(hw)) { 1231 case ixgbe_media_type_fiber: 1232 case ixgbe_media_type_fiber_qsfp: 1233 return TRUE; 1234 default: 1235 return FALSE; 1236 } 1237 case ixgbe_mac_X550EM_x: 1238 case ixgbe_mac_X550EM_a: 1239 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) 1240 return TRUE; 1241 return FALSE; 1242 default: 1243 return FALSE; 1244 } 1245} /* ixgbe_is_sfp */ 1246 1247/************************************************************************ 1248 * ixgbe_config_link 1249 ************************************************************************/ 1250static void 1251ixgbe_config_link(struct adapter *adapter) 1252{ 1253 struct ixgbe_hw *hw = &adapter->hw; 1254 u32 autoneg, err = 0; 1255 bool sfp, negotiate; 1256 1257 sfp = ixgbe_is_sfp(hw); 1258 1259 if (sfp) { 1260 if (hw->phy.multispeed_fiber) { 1261 hw->mac.ops.setup_sfp(hw); 1262 ixgbe_enable_tx_laser(hw); 1263 adapter->task_requests |= IXGBE_REQUEST_TASK_MSF; 1264 taskqueue_enqueue(adapter->tq, &adapter->admin_task); 1265 } else { 1266 adapter->task_requests |= IXGBE_REQUEST_TASK_MOD; 1267 taskqueue_enqueue(adapter->tq, &adapter->admin_task); 1268 } 1269 } else { 1270 if (hw->mac.ops.check_link) 1271 err = ixgbe_check_link(hw, &adapter->link_speed, 1272 &adapter->link_up, FALSE); 1273 if (err) 1274 goto out; 1275 autoneg = hw->phy.autoneg_advertised; 1276 if ((!autoneg) && (hw->mac.ops.get_link_capabilities)) 1277 err = hw->mac.ops.get_link_capabilities(hw, &autoneg, 1278 &negotiate); 1279 if (err) 1280 goto out; 1281 if (hw->mac.ops.setup_link) 1282 err = hw->mac.ops.setup_link(hw, autoneg, 1283 adapter->link_up); 1284 } 1285out: 1286 1287 return; 1288} /* ixgbe_config_link */ 1289 1290/************************************************************************ 1291 * ixgbe_update_stats_counters - Update board statistics counters. 1292 ************************************************************************/ 1293static void 1294ixgbe_update_stats_counters(struct adapter *adapter) 1295{ 1296 struct ixgbe_hw *hw = &adapter->hw; 1297 struct ixgbe_hw_stats *stats = &adapter->stats.pf; 1298 u32 missed_rx = 0, bprc, lxon, lxoff, total; 1299 u64 total_missed_rx = 0; 1300 1301 stats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS); 1302 stats->illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC); 1303 stats->errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC); 1304 stats->mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC); 1305 stats->mpc[0] += IXGBE_READ_REG(hw, IXGBE_MPC(0)); 1306 1307 for (int i = 0; i < 16; i++) { 1308 stats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i)); 1309 stats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i)); 1310 stats->qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i)); 1311 } 1312 stats->mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC); 1313 stats->mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC); 1314 stats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC); 1315 1316 /* Hardware workaround, gprc counts missed packets */ 1317 stats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC); 1318 stats->gprc -= missed_rx; 1319 1320 if (hw->mac.type != ixgbe_mac_82598EB) { 1321 stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL) + 1322 ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32); 1323 stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL) + 1324 ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32); 1325 stats->tor += IXGBE_READ_REG(hw, IXGBE_TORL) + 1326 ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32); 1327 stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT); 1328 stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT); 1329 } else { 1330 stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC); 1331 stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC); 1332 /* 82598 only has a counter in the high register */ 1333 stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH); 1334 stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH); 1335 stats->tor += IXGBE_READ_REG(hw, IXGBE_TORH); 1336 } 1337 1338 /* 1339 * Workaround: mprc hardware is incorrectly counting 1340 * broadcasts, so for now we subtract those. 1341 */ 1342 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC); 1343 stats->bprc += bprc; 1344 stats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC); 1345 if (hw->mac.type == ixgbe_mac_82598EB) 1346 stats->mprc -= bprc; 1347 1348 stats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64); 1349 stats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127); 1350 stats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255); 1351 stats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511); 1352 stats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023); 1353 stats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522); 1354 1355 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC); 1356 stats->lxontxc += lxon; 1357 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC); 1358 stats->lxofftxc += lxoff; 1359 total = lxon + lxoff; 1360 1361 stats->gptc += IXGBE_READ_REG(hw, IXGBE_GPTC); 1362 stats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC); 1363 stats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64); 1364 stats->gptc -= total; 1365 stats->mptc -= total; 1366 stats->ptc64 -= total; 1367 stats->gotc -= total * ETHER_MIN_LEN; 1368 1369 stats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC); 1370 stats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC); 1371 stats->roc += IXGBE_READ_REG(hw, IXGBE_ROC); 1372 stats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC); 1373 stats->mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC); 1374 stats->mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC); 1375 stats->mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC); 1376 stats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR); 1377 stats->tpt += IXGBE_READ_REG(hw, IXGBE_TPT); 1378 stats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127); 1379 stats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255); 1380 stats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511); 1381 stats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023); 1382 stats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522); 1383 stats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC); 1384 stats->xec += IXGBE_READ_REG(hw, IXGBE_XEC); 1385 stats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC); 1386 stats->fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST); 1387 /* Only read FCOE on 82599 */ 1388 if (hw->mac.type != ixgbe_mac_82598EB) { 1389 stats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC); 1390 stats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC); 1391 stats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC); 1392 stats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC); 1393 stats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC); 1394 } 1395 1396 /* Fill out the OS statistics structure */ 1397 IXGBE_SET_IPACKETS(adapter, stats->gprc); 1398 IXGBE_SET_OPACKETS(adapter, stats->gptc); 1399 IXGBE_SET_IBYTES(adapter, stats->gorc); 1400 IXGBE_SET_OBYTES(adapter, stats->gotc); 1401 IXGBE_SET_IMCASTS(adapter, stats->mprc); 1402 IXGBE_SET_OMCASTS(adapter, stats->mptc); 1403 IXGBE_SET_COLLISIONS(adapter, 0); 1404 IXGBE_SET_IQDROPS(adapter, total_missed_rx); 1405 IXGBE_SET_IERRORS(adapter, stats->crcerrs + stats->rlec); 1406} /* ixgbe_update_stats_counters */ 1407 1408/************************************************************************ 1409 * ixgbe_add_hw_stats 1410 * 1411 * Add sysctl variables, one per statistic, to the system. 1412 ************************************************************************/ 1413static void 1414ixgbe_add_hw_stats(struct adapter *adapter) 1415{ 1416 device_t dev = adapter->dev; 1417 struct tx_ring *txr = adapter->tx_rings; 1418 struct rx_ring *rxr = adapter->rx_rings; 1419 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev); 1420 struct sysctl_oid *tree = device_get_sysctl_tree(dev); 1421 struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree); 1422 struct ixgbe_hw_stats *stats = &adapter->stats.pf; 1423 struct sysctl_oid *stat_node, *queue_node; 1424 struct sysctl_oid_list *stat_list, *queue_list; 1425 1426#define QUEUE_NAME_LEN 32 1427 char namebuf[QUEUE_NAME_LEN]; 1428 1429 /* Driver Statistics */ 1430 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped", 1431 CTLFLAG_RD, &adapter->dropped_pkts, "Driver dropped packets"); 1432 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "mbuf_defrag_failed", 1433 CTLFLAG_RD, &adapter->mbuf_defrag_failed, "m_defrag() failed"); 1434 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events", 1435 CTLFLAG_RD, &adapter->watchdog_events, "Watchdog timeouts"); 1436 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq", 1437 CTLFLAG_RD, &adapter->link_irq, "Link MSI-X IRQ Handled"); 1438 1439 for (int i = 0; i < adapter->num_queues; i++, txr++) { 1440 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i); 1441 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, 1442 CTLFLAG_RD, NULL, "Queue Name"); 1443 queue_list = SYSCTL_CHILDREN(queue_node); 1444 1445 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "interrupt_rate", 1446 CTLTYPE_UINT | CTLFLAG_RW, &adapter->queues[i], 1447 sizeof(&adapter->queues[i]), 1448 ixgbe_sysctl_interrupt_rate_handler, "IU", 1449 "Interrupt Rate"); 1450 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs", 1451 CTLFLAG_RD, &(adapter->queues[i].irqs), 1452 "irqs on this queue"); 1453 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_head", 1454 CTLTYPE_UINT | CTLFLAG_RD, txr, sizeof(txr), 1455 ixgbe_sysctl_tdh_handler, "IU", "Transmit Descriptor Head"); 1456 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_tail", 1457 CTLTYPE_UINT | CTLFLAG_RD, txr, sizeof(txr), 1458 ixgbe_sysctl_tdt_handler, "IU", "Transmit Descriptor Tail"); 1459 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx", 1460 CTLFLAG_RD, &txr->tso_tx, "TSO"); 1461 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "no_tx_dma_setup", 1462 CTLFLAG_RD, &txr->no_tx_dma_setup, 1463 "Driver tx dma failure in xmit"); 1464 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "no_desc_avail", 1465 CTLFLAG_RD, &txr->no_desc_avail, 1466 "Queue No Descriptor Available"); 1467 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets", 1468 CTLFLAG_RD, &txr->total_packets, 1469 "Queue Packets Transmitted"); 1470 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "br_drops", 1471 CTLFLAG_RD, &txr->br->br_drops, 1472 "Packets dropped in buf_ring"); 1473 } 1474 1475 for (int i = 0; i < adapter->num_queues; i++, rxr++) { 1476 struct lro_ctrl *lro = &rxr->lro; 1477 1478 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i); 1479 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, 1480 CTLFLAG_RD, NULL, "Queue Name"); 1481 queue_list = SYSCTL_CHILDREN(queue_node); 1482 1483 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_head", 1484 CTLTYPE_UINT | CTLFLAG_RD, rxr, sizeof(rxr), 1485 ixgbe_sysctl_rdh_handler, "IU", "Receive Descriptor Head"); 1486 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_tail", 1487 CTLTYPE_UINT | CTLFLAG_RD, rxr, sizeof(rxr), 1488 ixgbe_sysctl_rdt_handler, "IU", "Receive Descriptor Tail"); 1489 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets", 1490 CTLFLAG_RD, &rxr->rx_packets, "Queue Packets Received"); 1491 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes", 1492 CTLFLAG_RD, &rxr->rx_bytes, "Queue Bytes Received"); 1493 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_copies", 1494 CTLFLAG_RD, &rxr->rx_copies, "Copied RX Frames"); 1495 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_discarded", 1496 CTLFLAG_RD, &rxr->rx_discarded, "Discarded RX packets"); 1497 SYSCTL_ADD_U64(ctx, queue_list, OID_AUTO, "lro_queued", 1498 CTLFLAG_RD, &lro->lro_queued, 0, "LRO Queued"); 1499 SYSCTL_ADD_U64(ctx, queue_list, OID_AUTO, "lro_flushed", 1500 CTLFLAG_RD, &lro->lro_flushed, 0, "LRO Flushed"); 1501 } 1502 1503 /* MAC stats get their own sub node */ 1504 1505 stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats", 1506 CTLFLAG_RD, NULL, "MAC Statistics"); 1507 stat_list = SYSCTL_CHILDREN(stat_node); 1508 1509 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "crc_errs", 1510 CTLFLAG_RD, &stats->crcerrs, "CRC Errors"); 1511 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "ill_errs", 1512 CTLFLAG_RD, &stats->illerrc, "Illegal Byte Errors"); 1513 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "byte_errs", 1514 CTLFLAG_RD, &stats->errbc, "Byte Errors"); 1515 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "short_discards", 1516 CTLFLAG_RD, &stats->mspdc, "MAC Short Packets Discarded"); 1517 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "local_faults", 1518 CTLFLAG_RD, &stats->mlfc, "MAC Local Faults"); 1519 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "remote_faults", 1520 CTLFLAG_RD, &stats->mrfc, "MAC Remote Faults"); 1521 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rec_len_errs", 1522 CTLFLAG_RD, &stats->rlec, "Receive Length Errors"); 1523 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_missed_packets", 1524 CTLFLAG_RD, &stats->mpc[0], "RX Missed Packet Count"); 1525 1526 /* Flow Control stats */ 1527 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_txd", 1528 CTLFLAG_RD, &stats->lxontxc, "Link XON Transmitted"); 1529 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_recvd", 1530 CTLFLAG_RD, &stats->lxonrxc, "Link XON Received"); 1531 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_txd", 1532 CTLFLAG_RD, &stats->lxofftxc, "Link XOFF Transmitted"); 1533 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_recvd", 1534 CTLFLAG_RD, &stats->lxoffrxc, "Link XOFF Received"); 1535 1536 /* Packet Reception Stats */ 1537 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_octets_rcvd", 1538 CTLFLAG_RD, &stats->tor, "Total Octets Received"); 1539 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd", 1540 CTLFLAG_RD, &stats->gorc, "Good Octets Received"); 1541 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_rcvd", 1542 CTLFLAG_RD, &stats->tpr, "Total Packets Received"); 1543 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd", 1544 CTLFLAG_RD, &stats->gprc, "Good Packets Received"); 1545 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd", 1546 CTLFLAG_RD, &stats->mprc, "Multicast Packets Received"); 1547 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_rcvd", 1548 CTLFLAG_RD, &stats->bprc, "Broadcast Packets Received"); 1549 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_64", 1550 CTLFLAG_RD, &stats->prc64, "64 byte frames received "); 1551 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127", 1552 CTLFLAG_RD, &stats->prc127, "65-127 byte frames received"); 1553 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255", 1554 CTLFLAG_RD, &stats->prc255, "128-255 byte frames received"); 1555 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511", 1556 CTLFLAG_RD, &stats->prc511, "256-511 byte frames received"); 1557 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023", 1558 CTLFLAG_RD, &stats->prc1023, "512-1023 byte frames received"); 1559 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522", 1560 CTLFLAG_RD, &stats->prc1522, "1023-1522 byte frames received"); 1561 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_undersized", 1562 CTLFLAG_RD, &stats->ruc, "Receive Undersized"); 1563 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_fragmented", 1564 CTLFLAG_RD, &stats->rfc, "Fragmented Packets Received "); 1565 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_oversized", 1566 CTLFLAG_RD, &stats->roc, "Oversized Packets Received"); 1567 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_jabberd", 1568 CTLFLAG_RD, &stats->rjc, "Received Jabber"); 1569 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_rcvd", 1570 CTLFLAG_RD, &stats->mngprc, "Management Packets Received"); 1571 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_drpd", 1572 CTLFLAG_RD, &stats->mngptc, "Management Packets Dropped"); 1573 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "checksum_errs", 1574 CTLFLAG_RD, &stats->xec, "Checksum Errors"); 1575 1576 /* Packet Transmission Stats */ 1577 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd", 1578 CTLFLAG_RD, &stats->gotc, "Good Octets Transmitted"); 1579 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd", 1580 CTLFLAG_RD, &stats->tpt, "Total Packets Transmitted"); 1581 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd", 1582 CTLFLAG_RD, &stats->gptc, "Good Packets Transmitted"); 1583 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd", 1584 CTLFLAG_RD, &stats->bptc, "Broadcast Packets Transmitted"); 1585 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd", 1586 CTLFLAG_RD, &stats->mptc, "Multicast Packets Transmitted"); 1587 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_txd", 1588 CTLFLAG_RD, &stats->mngptc, "Management Packets Transmitted"); 1589 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_64", 1590 CTLFLAG_RD, &stats->ptc64, "64 byte frames transmitted "); 1591 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127", 1592 CTLFLAG_RD, &stats->ptc127, "65-127 byte frames transmitted"); 1593 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255", 1594 CTLFLAG_RD, &stats->ptc255, "128-255 byte frames transmitted"); 1595 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511", 1596 CTLFLAG_RD, &stats->ptc511, "256-511 byte frames transmitted"); 1597 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023", 1598 CTLFLAG_RD, &stats->ptc1023, "512-1023 byte frames transmitted"); 1599 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522", 1600 CTLFLAG_RD, &stats->ptc1522, "1024-1522 byte frames transmitted"); 1601} /* ixgbe_add_hw_stats */ 1602 1603/************************************************************************ 1604 * ixgbe_sysctl_tdh_handler - Transmit Descriptor Head handler function 1605 * 1606 * Retrieves the TDH value from the hardware 1607 ************************************************************************/ 1608static int 1609ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS) 1610{ 1611 struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1); 1612 int error; 1613 unsigned int val; 1614 1615 if (!txr) 1616 return (0); 1617 1618 val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDH(txr->me)); 1619 error = sysctl_handle_int(oidp, &val, 0, req); 1620 if (error || !req->newptr) 1621 return error; 1622 1623 return (0); 1624} /* ixgbe_sysctl_tdh_handler */ 1625 1626/************************************************************************ 1627 * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function 1628 * 1629 * Retrieves the TDT value from the hardware 1630 ************************************************************************/ 1631static int 1632ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS) 1633{ 1634 struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1); 1635 int error; 1636 unsigned int val; 1637 1638 if (!txr) 1639 return (0); 1640 1641 val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDT(txr->me)); 1642 error = sysctl_handle_int(oidp, &val, 0, req); 1643 if (error || !req->newptr) 1644 return error; 1645 1646 return (0); 1647} /* ixgbe_sysctl_tdt_handler */ 1648 1649/************************************************************************ 1650 * ixgbe_sysctl_rdh_handler - Receive Descriptor Head handler function 1651 * 1652 * Retrieves the RDH value from the hardware 1653 ************************************************************************/ 1654static int 1655ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS) 1656{ 1657 struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1); 1658 int error; 1659 unsigned int val; 1660 1661 if (!rxr) 1662 return (0); 1663 1664 val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDH(rxr->me)); 1665 error = sysctl_handle_int(oidp, &val, 0, req); 1666 if (error || !req->newptr) 1667 return error; 1668 1669 return (0); 1670} /* ixgbe_sysctl_rdh_handler */ 1671 1672/************************************************************************ 1673 * ixgbe_sysctl_rdt_handler - Receive Descriptor Tail handler function 1674 * 1675 * Retrieves the RDT value from the hardware 1676 ************************************************************************/ 1677static int 1678ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS) 1679{ 1680 struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1); 1681 int error; 1682 unsigned int val; 1683 1684 if (!rxr) 1685 return (0); 1686 1687 val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDT(rxr->me)); 1688 error = sysctl_handle_int(oidp, &val, 0, req); 1689 if (error || !req->newptr) 1690 return error; 1691 1692 return (0); 1693} /* ixgbe_sysctl_rdt_handler */ 1694 1695/************************************************************************ 1696 * ixgbe_register_vlan 1697 * 1698 * Run via vlan config EVENT, it enables us to use the 1699 * HW Filter table since we can get the vlan id. This 1700 * just creates the entry in the soft version of the 1701 * VFTA, init will repopulate the real table. 1702 ************************************************************************/ 1703static void 1704ixgbe_register_vlan(void *arg, struct ifnet *ifp, u16 vtag) 1705{ 1706 struct adapter *adapter = ifp->if_softc; 1707 u16 index, bit; 1708 1709 if (ifp->if_softc != arg) /* Not our event */ 1710 return; 1711 1712 if ((vtag == 0) || (vtag > 4095)) /* Invalid */ 1713 return; 1714 1715 IXGBE_CORE_LOCK(adapter); 1716 index = (vtag >> 5) & 0x7F; 1717 bit = vtag & 0x1F; 1718 adapter->shadow_vfta[index] |= (1 << bit); 1719 ++adapter->num_vlans; 1720 ixgbe_setup_vlan_hw_support(adapter); 1721 IXGBE_CORE_UNLOCK(adapter); 1722} /* ixgbe_register_vlan */ 1723 1724/************************************************************************ 1725 * ixgbe_unregister_vlan 1726 * 1727 * Run via vlan unconfig EVENT, remove our entry in the soft vfta. 1728 ************************************************************************/ 1729static void 1730ixgbe_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag) 1731{ 1732 struct adapter *adapter = ifp->if_softc; 1733 u16 index, bit; 1734 1735 if (ifp->if_softc != arg) 1736 return; 1737 1738 if ((vtag == 0) || (vtag > 4095)) /* Invalid */ 1739 return; 1740 1741 IXGBE_CORE_LOCK(adapter); 1742 index = (vtag >> 5) & 0x7F; 1743 bit = vtag & 0x1F; 1744 adapter->shadow_vfta[index] &= ~(1 << bit); 1745 --adapter->num_vlans; 1746 /* Re-init to load the changes */ 1747 ixgbe_setup_vlan_hw_support(adapter); 1748 IXGBE_CORE_UNLOCK(adapter); 1749} /* ixgbe_unregister_vlan */ 1750 1751/************************************************************************ 1752 * ixgbe_setup_vlan_hw_support 1753 ************************************************************************/ 1754static void 1755ixgbe_setup_vlan_hw_support(struct adapter *adapter) 1756{ 1757 struct ifnet *ifp = adapter->ifp; 1758 struct ixgbe_hw *hw = &adapter->hw; 1759 struct rx_ring *rxr; 1760 int i; 1761 u32 ctrl; 1762 1763 1764 /* 1765 * We get here thru init_locked, meaning 1766 * a soft reset, this has already cleared 1767 * the VFTA and other state, so if there 1768 * have been no vlan's registered do nothing. 1769 */ 1770 if (adapter->num_vlans == 0) 1771 return; 1772 1773 /* Setup the queues for vlans */ 1774 for (i = 0; i < adapter->num_queues; i++) { 1775 rxr = &adapter->rx_rings[i]; 1776 /* On 82599 the VLAN enable is per/queue in RXDCTL */ 1777 if (hw->mac.type != ixgbe_mac_82598EB) { 1778 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)); 1779 ctrl |= IXGBE_RXDCTL_VME; 1780 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl); 1781 } 1782 rxr->vtag_strip = TRUE; 1783 } 1784 1785 if ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0) 1786 return; 1787 /* 1788 * A soft reset zero's out the VFTA, so 1789 * we need to repopulate it now. 1790 */ 1791 for (i = 0; i < IXGBE_VFTA_SIZE; i++) 1792 if (adapter->shadow_vfta[i] != 0) 1793 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), 1794 adapter->shadow_vfta[i]); 1795 1796 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 1797 /* Enable the Filter Table if enabled */ 1798 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) { 1799 ctrl &= ~IXGBE_VLNCTRL_CFIEN; 1800 ctrl |= IXGBE_VLNCTRL_VFE; 1801 } 1802 if (hw->mac.type == ixgbe_mac_82598EB) 1803 ctrl |= IXGBE_VLNCTRL_VME; 1804 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl); 1805} /* ixgbe_setup_vlan_hw_support */ 1806 1807/************************************************************************ 1808 * ixgbe_get_slot_info 1809 * 1810 * Get the width and transaction speed of 1811 * the slot this adapter is plugged into. 1812 ************************************************************************/ 1813static void 1814ixgbe_get_slot_info(struct adapter *adapter) 1815{ 1816 device_t dev = adapter->dev; 1817 struct ixgbe_hw *hw = &adapter->hw; 1818 u32 offset; 1819 u16 link; 1820 int bus_info_valid = TRUE; 1821 1822 /* Some devices are behind an internal bridge */ 1823 switch (hw->device_id) { 1824 case IXGBE_DEV_ID_82599_SFP_SF_QP: 1825 case IXGBE_DEV_ID_82599_QSFP_SF_QP: 1826 goto get_parent_info; 1827 default: 1828 break; 1829 } 1830 1831 ixgbe_get_bus_info(hw); 1832 1833 /* 1834 * Some devices don't use PCI-E, but there is no need 1835 * to display "Unknown" for bus speed and width. 1836 */ 1837 switch (hw->mac.type) { 1838 case ixgbe_mac_X550EM_x: 1839 case ixgbe_mac_X550EM_a: 1840 return; 1841 default: 1842 goto display; 1843 } 1844 1845get_parent_info: 1846 /* 1847 * For the Quad port adapter we need to parse back 1848 * up the PCI tree to find the speed of the expansion 1849 * slot into which this adapter is plugged. A bit more work. 1850 */ 1851 dev = device_get_parent(device_get_parent(dev)); 1852#ifdef IXGBE_DEBUG 1853 device_printf(dev, "parent pcib = %x,%x,%x\n", pci_get_bus(dev), 1854 pci_get_slot(dev), pci_get_function(dev)); 1855#endif 1856 dev = device_get_parent(device_get_parent(dev)); 1857#ifdef IXGBE_DEBUG 1858 device_printf(dev, "slot pcib = %x,%x,%x\n", pci_get_bus(dev), 1859 pci_get_slot(dev), pci_get_function(dev)); 1860#endif 1861 /* Now get the PCI Express Capabilities offset */ 1862 if (pci_find_cap(dev, PCIY_EXPRESS, &offset)) { 1863 /* 1864 * Hmm...can't get PCI-Express capabilities. 1865 * Falling back to default method. 1866 */ 1867 bus_info_valid = FALSE; 1868 ixgbe_get_bus_info(hw); 1869 goto display; 1870 } 1871 /* ...and read the Link Status Register */ 1872 link = pci_read_config(dev, offset + PCIER_LINK_STA, 2); 1873 ixgbe_set_pci_config_data_generic(hw, link); 1874 1875display: 1876 device_printf(dev, "PCI Express Bus: Speed %s %s\n", 1877 ((hw->bus.speed == ixgbe_bus_speed_8000) ? "8.0GT/s" : 1878 (hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0GT/s" : 1879 (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5GT/s" : 1880 "Unknown"), 1881 ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" : 1882 (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "Width x4" : 1883 (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "Width x1" : 1884 "Unknown")); 1885 1886 if (bus_info_valid) { 1887 if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) && 1888 ((hw->bus.width <= ixgbe_bus_width_pcie_x4) && 1889 (hw->bus.speed == ixgbe_bus_speed_2500))) { 1890 device_printf(dev, "PCI-Express bandwidth available for this card\n is not sufficient for optimal performance.\n"); 1891 device_printf(dev, "For optimal performance a x8 PCIE, or x4 PCIE Gen2 slot is required.\n"); 1892 } 1893 if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) && 1894 ((hw->bus.width <= ixgbe_bus_width_pcie_x8) && 1895 (hw->bus.speed < ixgbe_bus_speed_8000))) { 1896 device_printf(dev, "PCI-Express bandwidth available for this card\n is not sufficient for optimal performance.\n"); 1897 device_printf(dev, "For optimal performance a x8 PCIE Gen3 slot is required.\n"); 1898 } 1899 } else 1900 device_printf(dev, "Unable to determine slot speed/width. The speed/width reported are that of the internal switch.\n"); 1901 1902 return; 1903} /* ixgbe_get_slot_info */ 1904 1905/************************************************************************ 1906 * ixgbe_enable_queue - MSI-X Interrupt Handlers and Tasklets 1907 ************************************************************************/ 1908static inline void 1909ixgbe_enable_queue(struct adapter *adapter, u32 vector) 1910{ 1911 struct ixgbe_hw *hw = &adapter->hw; 1912 u64 queue = (u64)(1 << vector); 1913 u32 mask; 1914 1915 if (hw->mac.type == ixgbe_mac_82598EB) { 1916 mask = (IXGBE_EIMS_RTX_QUEUE & queue); 1917 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask); 1918 } else { 1919 mask = (queue & 0xFFFFFFFF); 1920 if (mask) 1921 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask); 1922 mask = (queue >> 32); 1923 if (mask) 1924 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask); 1925 } 1926} /* ixgbe_enable_queue */ 1927 1928/************************************************************************ 1929 * ixgbe_disable_queue 1930 ************************************************************************/ 1931static inline void 1932ixgbe_disable_queue(struct adapter *adapter, u32 vector) 1933{ 1934 struct ixgbe_hw *hw = &adapter->hw; 1935 u64 queue = (u64)(1 << vector); 1936 u32 mask; 1937 1938 if (hw->mac.type == ixgbe_mac_82598EB) { 1939 mask = (IXGBE_EIMS_RTX_QUEUE & queue); 1940 IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask); 1941 } else { 1942 mask = (queue & 0xFFFFFFFF); 1943 if (mask) 1944 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask); 1945 mask = (queue >> 32); 1946 if (mask) 1947 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask); 1948 } 1949} /* ixgbe_disable_queue */ 1950 1951/************************************************************************ 1952 * ixgbe_msix_que - MSI-X Queue Interrupt Service routine 1953 ************************************************************************/ 1954void 1955ixgbe_msix_que(void *arg) 1956{ 1957 struct ix_queue *que = arg; 1958 struct adapter *adapter = que->adapter; 1959 struct ifnet *ifp = adapter->ifp; 1960 struct tx_ring *txr = que->txr; 1961 struct rx_ring *rxr = que->rxr; 1962 bool more; 1963 u32 newitr = 0; 1964 1965 1966 /* Protect against spurious interrupts */ 1967 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 1968 return; 1969 1970 ixgbe_disable_queue(adapter, que->msix); 1971 ++que->irqs; 1972 1973 more = ixgbe_rxeof(que); 1974 1975 IXGBE_TX_LOCK(txr); 1976 ixgbe_txeof(txr); 1977 if (!ixgbe_ring_empty(ifp, txr->br)) 1978 ixgbe_start_locked(ifp, txr); 1979 IXGBE_TX_UNLOCK(txr); 1980 1981 /* Do AIM now? */ 1982 1983 if (adapter->enable_aim == FALSE) 1984 goto no_calc; 1985 /* 1986 * Do Adaptive Interrupt Moderation: 1987 * - Write out last calculated setting 1988 * - Calculate based on average size over 1989 * the last interval. 1990 */ 1991 if (que->eitr_setting) 1992 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(que->msix), 1993 que->eitr_setting); 1994 1995 que->eitr_setting = 0; 1996 1997 /* Idle, do nothing */ 1998 if ((txr->bytes == 0) && (rxr->bytes == 0)) 1999 goto no_calc; 2000 2001 if ((txr->bytes) && (txr->packets)) 2002 newitr = txr->bytes/txr->packets; 2003 if ((rxr->bytes) && (rxr->packets)) 2004 newitr = max(newitr, (rxr->bytes / rxr->packets)); 2005 newitr += 24; /* account for hardware frame, crc */ 2006 2007 /* set an upper boundary */ 2008 newitr = min(newitr, 3000); 2009 2010 /* Be nice to the mid range */ 2011 if ((newitr > 300) && (newitr < 1200)) 2012 newitr = (newitr / 3); 2013 else 2014 newitr = (newitr / 2); 2015 2016 if (adapter->hw.mac.type == ixgbe_mac_82598EB) 2017 newitr |= newitr << 16; 2018 else 2019 newitr |= IXGBE_EITR_CNT_WDIS; 2020 2021 /* save for next interrupt */ 2022 que->eitr_setting = newitr; 2023 2024 /* Reset state */ 2025 txr->bytes = 0; 2026 txr->packets = 0; 2027 rxr->bytes = 0; 2028 rxr->packets = 0; 2029 2030no_calc: 2031 if (more) 2032 taskqueue_enqueue(que->tq, &que->que_task); 2033 else 2034 ixgbe_enable_queue(adapter, que->msix); 2035 2036 return; 2037} /* ixgbe_msix_que */ 2038 2039/************************************************************************ 2040 * ixgbe_media_status - Media Ioctl callback 2041 * 2042 * Called whenever the user queries the status of 2043 * the interface using ifconfig. 2044 ************************************************************************/ 2045static void 2046ixgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) 2047{ 2048 struct adapter *adapter = ifp->if_softc; 2049 struct ixgbe_hw *hw = &adapter->hw; 2050 int layer; 2051 2052 INIT_DEBUGOUT("ixgbe_media_status: begin"); 2053 IXGBE_CORE_LOCK(adapter); 2054 ixgbe_update_link_status(adapter); 2055 2056 ifmr->ifm_status = IFM_AVALID; 2057 ifmr->ifm_active = IFM_ETHER; 2058 2059 if (!adapter->link_active) { 2060 IXGBE_CORE_UNLOCK(adapter); 2061 return; 2062 } 2063 2064 ifmr->ifm_status |= IFM_ACTIVE; 2065 layer = adapter->phy_layer; 2066 2067 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T || 2068 layer & IXGBE_PHYSICAL_LAYER_1000BASE_T || 2069 layer & IXGBE_PHYSICAL_LAYER_100BASE_TX || 2070 layer & IXGBE_PHYSICAL_LAYER_10BASE_T) 2071 switch (adapter->link_speed) { 2072 case IXGBE_LINK_SPEED_10GB_FULL: 2073 ifmr->ifm_active |= IFM_10G_T | IFM_FDX; 2074 break; 2075 case IXGBE_LINK_SPEED_1GB_FULL: 2076 ifmr->ifm_active |= IFM_1000_T | IFM_FDX; 2077 break; 2078 case IXGBE_LINK_SPEED_100_FULL: 2079 ifmr->ifm_active |= IFM_100_TX | IFM_FDX; 2080 break; 2081 case IXGBE_LINK_SPEED_10_FULL: 2082 ifmr->ifm_active |= IFM_10_T | IFM_FDX; 2083 break; 2084 } 2085 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU || 2086 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA) 2087 switch (adapter->link_speed) { 2088 case IXGBE_LINK_SPEED_10GB_FULL: 2089 ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX; 2090 break; 2091 } 2092 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) 2093 switch (adapter->link_speed) { 2094 case IXGBE_LINK_SPEED_10GB_FULL: 2095 ifmr->ifm_active |= IFM_10G_LR | IFM_FDX; 2096 break; 2097 case IXGBE_LINK_SPEED_1GB_FULL: 2098 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX; 2099 break; 2100 } 2101 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM) 2102 switch (adapter->link_speed) { 2103 case IXGBE_LINK_SPEED_10GB_FULL: 2104 ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX; 2105 break; 2106 case IXGBE_LINK_SPEED_1GB_FULL: 2107 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX; 2108 break; 2109 } 2110 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR || 2111 layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX) 2112 switch (adapter->link_speed) { 2113 case IXGBE_LINK_SPEED_10GB_FULL: 2114 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX; 2115 break; 2116 case IXGBE_LINK_SPEED_1GB_FULL: 2117 ifmr->ifm_active |= IFM_1000_SX | IFM_FDX; 2118 break; 2119 } 2120 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4) 2121 switch (adapter->link_speed) { 2122 case IXGBE_LINK_SPEED_10GB_FULL: 2123 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX; 2124 break; 2125 } 2126 /* 2127 * XXX: These need to use the proper media types once 2128 * they're added. 2129 */ 2130#ifndef IFM_ETH_XTYPE 2131 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) 2132 switch (adapter->link_speed) { 2133 case IXGBE_LINK_SPEED_10GB_FULL: 2134 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX; 2135 break; 2136 case IXGBE_LINK_SPEED_2_5GB_FULL: 2137 ifmr->ifm_active |= IFM_2500_SX | IFM_FDX; 2138 break; 2139 case IXGBE_LINK_SPEED_1GB_FULL: 2140 ifmr->ifm_active |= IFM_1000_CX | IFM_FDX; 2141 break; 2142 } 2143 else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 || 2144 layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX || 2145 layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) 2146 switch (adapter->link_speed) { 2147 case IXGBE_LINK_SPEED_10GB_FULL: 2148 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX; 2149 break; 2150 case IXGBE_LINK_SPEED_2_5GB_FULL: 2151 ifmr->ifm_active |= IFM_2500_SX | IFM_FDX; 2152 break; 2153 case IXGBE_LINK_SPEED_1GB_FULL: 2154 ifmr->ifm_active |= IFM_1000_CX | IFM_FDX; 2155 break; 2156 } 2157#else 2158 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) 2159 switch (adapter->link_speed) { 2160 case IXGBE_LINK_SPEED_10GB_FULL: 2161 ifmr->ifm_active |= IFM_10G_KR | IFM_FDX; 2162 break; 2163 case IXGBE_LINK_SPEED_2_5GB_FULL: 2164 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX; 2165 break; 2166 case IXGBE_LINK_SPEED_1GB_FULL: 2167 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX; 2168 break; 2169 } 2170 else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 || 2171 layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX || 2172 layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) 2173 switch (adapter->link_speed) { 2174 case IXGBE_LINK_SPEED_10GB_FULL: 2175 ifmr->ifm_active |= IFM_10G_KX4 | IFM_FDX; 2176 break; 2177 case IXGBE_LINK_SPEED_2_5GB_FULL: 2178 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX; 2179 break; 2180 case IXGBE_LINK_SPEED_1GB_FULL: 2181 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX; 2182 break; 2183 } 2184#endif 2185 2186 /* If nothing is recognized... */ 2187 if (IFM_SUBTYPE(ifmr->ifm_active) == 0) 2188 ifmr->ifm_active |= IFM_UNKNOWN; 2189 2190#if __FreeBSD_version >= 900025 2191 /* Display current flow control setting used on link */ 2192 if (hw->fc.current_mode == ixgbe_fc_rx_pause || 2193 hw->fc.current_mode == ixgbe_fc_full) 2194 ifmr->ifm_active |= IFM_ETH_RXPAUSE; 2195 if (hw->fc.current_mode == ixgbe_fc_tx_pause || 2196 hw->fc.current_mode == ixgbe_fc_full) 2197 ifmr->ifm_active |= IFM_ETH_TXPAUSE; 2198#endif 2199 2200 IXGBE_CORE_UNLOCK(adapter); 2201 2202 return; 2203} /* ixgbe_media_status */ 2204 2205/************************************************************************ 2206 * ixgbe_media_change - Media Ioctl callback 2207 * 2208 * Called when the user changes speed/duplex using 2209 * media/mediopt option with ifconfig. 2210 ************************************************************************/ 2211static int 2212ixgbe_media_change(struct ifnet *ifp) 2213{ 2214 struct adapter *adapter = ifp->if_softc; 2215 struct ifmedia *ifm = &adapter->media; 2216 struct ixgbe_hw *hw = &adapter->hw; 2217 ixgbe_link_speed speed = 0; 2218 2219 INIT_DEBUGOUT("ixgbe_media_change: begin"); 2220 2221 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 2222 return (EINVAL); 2223 2224 if (hw->phy.media_type == ixgbe_media_type_backplane) 2225 return (ENODEV); 2226 2227 /* 2228 * We don't actually need to check against the supported 2229 * media types of the adapter; ifmedia will take care of 2230 * that for us. 2231 */ 2232 switch (IFM_SUBTYPE(ifm->ifm_media)) { 2233 case IFM_AUTO: 2234 case IFM_10G_T: 2235 speed |= IXGBE_LINK_SPEED_100_FULL; 2236 speed |= IXGBE_LINK_SPEED_1GB_FULL; 2237 speed |= IXGBE_LINK_SPEED_10GB_FULL; 2238 break; 2239 case IFM_10G_LRM: 2240 case IFM_10G_LR: 2241#ifndef IFM_ETH_XTYPE 2242 case IFM_10G_SR: /* KR, too */ 2243 case IFM_10G_CX4: /* KX4 */ 2244#else 2245 case IFM_10G_KR: 2246 case IFM_10G_KX4: 2247#endif 2248 speed |= IXGBE_LINK_SPEED_1GB_FULL; 2249 speed |= IXGBE_LINK_SPEED_10GB_FULL; 2250 break; 2251#ifndef IFM_ETH_XTYPE 2252 case IFM_1000_CX: /* KX */ 2253#else 2254 case IFM_1000_KX: 2255#endif 2256 case IFM_1000_LX: 2257 case IFM_1000_SX: 2258 speed |= IXGBE_LINK_SPEED_1GB_FULL; 2259 break; 2260 case IFM_1000_T: 2261 speed |= IXGBE_LINK_SPEED_100_FULL; 2262 speed |= IXGBE_LINK_SPEED_1GB_FULL; 2263 break; 2264 case IFM_10G_TWINAX: 2265 speed |= IXGBE_LINK_SPEED_10GB_FULL; 2266 break; 2267 case IFM_100_TX: 2268 speed |= IXGBE_LINK_SPEED_100_FULL; 2269 break; 2270 case IFM_10_T: 2271 speed |= IXGBE_LINK_SPEED_10_FULL; 2272 break; 2273 default: 2274 goto invalid; 2275 } 2276 2277 hw->mac.autotry_restart = TRUE; 2278 hw->mac.ops.setup_link(hw, speed, TRUE); 2279 adapter->advertise = 2280 ((speed & IXGBE_LINK_SPEED_10GB_FULL) ? 4 : 0) | 2281 ((speed & IXGBE_LINK_SPEED_1GB_FULL) ? 2 : 0) | 2282 ((speed & IXGBE_LINK_SPEED_100_FULL) ? 1 : 0) | 2283 ((speed & IXGBE_LINK_SPEED_10_FULL) ? 8 : 0); 2284 2285 return (0); 2286 2287invalid: 2288 device_printf(adapter->dev, "Invalid media type!\n"); 2289 2290 return (EINVAL); 2291} /* ixgbe_media_change */ 2292 2293/************************************************************************ 2294 * ixgbe_set_promisc 2295 ************************************************************************/ 2296static void 2297ixgbe_set_promisc(struct adapter *adapter) 2298{ 2299 struct ifnet *ifp = adapter->ifp; 2300 int mcnt = 0; 2301 u32 rctl; 2302 2303 rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL); 2304 rctl &= (~IXGBE_FCTRL_UPE); 2305 if (ifp->if_flags & IFF_ALLMULTI) 2306 mcnt = MAX_NUM_MULTICAST_ADDRESSES; 2307 else { 2308 struct ifmultiaddr *ifma; 2309#if __FreeBSD_version < 800000 2310 IF_ADDR_LOCK(ifp); 2311#else 2312 if_maddr_rlock(ifp); 2313#endif 2314 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 2315 if (ifma->ifma_addr->sa_family != AF_LINK) 2316 continue; 2317 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES) 2318 break; 2319 mcnt++; 2320 } 2321#if __FreeBSD_version < 800000 2322 IF_ADDR_UNLOCK(ifp); 2323#else 2324 if_maddr_runlock(ifp); 2325#endif 2326 } 2327 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) 2328 rctl &= (~IXGBE_FCTRL_MPE); 2329 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl); 2330 2331 if (ifp->if_flags & IFF_PROMISC) { 2332 rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); 2333 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl); 2334 } else if (ifp->if_flags & IFF_ALLMULTI) { 2335 rctl |= IXGBE_FCTRL_MPE; 2336 rctl &= ~IXGBE_FCTRL_UPE; 2337 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl); 2338 } 2339} /* ixgbe_set_promisc */ 2340 2341/************************************************************************ 2342 * ixgbe_msix_link - Link status change ISR (MSI/MSI-X) 2343 ************************************************************************/ 2344static void 2345ixgbe_msix_link(void *arg) 2346{ 2347 struct adapter *adapter = arg; 2348 struct ixgbe_hw *hw = &adapter->hw; 2349 u32 eicr, eicr_mask; 2350 s32 retval; 2351 2352 ++adapter->link_irq; 2353 2354 /* Pause other interrupts */ 2355 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_OTHER); 2356 2357 /* First get the cause */ 2358 eicr = IXGBE_READ_REG(hw, IXGBE_EICS); 2359 /* Be sure the queue bits are not cleared */ 2360 eicr &= ~IXGBE_EICR_RTX_QUEUE; 2361 /* Clear interrupt with write */ 2362 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr); 2363 2364 /* Link status change */ 2365 if (eicr & IXGBE_EICR_LSC) { 2366 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC); 2367 adapter->task_requests |= IXGBE_REQUEST_TASK_LINK; 2368 taskqueue_enqueue(adapter->tq, &adapter->admin_task); 2369 } 2370 2371 if (adapter->hw.mac.type != ixgbe_mac_82598EB) { 2372 if ((adapter->feat_en & IXGBE_FEATURE_FDIR) && 2373 (eicr & IXGBE_EICR_FLOW_DIR)) { 2374 /* This is probably overkill :) */ 2375 if (!atomic_cmpset_int(&adapter->fdir_reinit, 0, 1)) 2376 return; 2377 /* Disable the interrupt */ 2378 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_FLOW_DIR); 2379 adapter->task_requests |= IXGBE_REQUEST_TASK_FDIR; 2380 taskqueue_enqueue(adapter->tq, &adapter->admin_task); 2381 } 2382 2383 if (eicr & IXGBE_EICR_ECC) { 2384 device_printf(adapter->dev, 2385 "CRITICAL: ECC ERROR!! Please Reboot!!\n"); 2386 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC); 2387 } 2388 2389 /* Check for over temp condition */ 2390 if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR) { 2391 switch (adapter->hw.mac.type) { 2392 case ixgbe_mac_X550EM_a: 2393 if (!(eicr & IXGBE_EICR_GPI_SDP0_X550EM_a)) 2394 break; 2395 IXGBE_WRITE_REG(hw, IXGBE_EIMC, 2396 IXGBE_EICR_GPI_SDP0_X550EM_a); 2397 IXGBE_WRITE_REG(hw, IXGBE_EICR, 2398 IXGBE_EICR_GPI_SDP0_X550EM_a); 2399 retval = hw->phy.ops.check_overtemp(hw); 2400 if (retval != IXGBE_ERR_OVERTEMP) 2401 break; 2402 device_printf(adapter->dev, "CRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n"); 2403 device_printf(adapter->dev, "System shutdown required!\n"); 2404 break; 2405 default: 2406 if (!(eicr & IXGBE_EICR_TS)) 2407 break; 2408 retval = hw->phy.ops.check_overtemp(hw); 2409 if (retval != IXGBE_ERR_OVERTEMP) 2410 break; 2411 device_printf(adapter->dev, "CRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n"); 2412 device_printf(adapter->dev, "System shutdown required!\n"); 2413 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS); 2414 break; 2415 } 2416 } 2417 2418 /* Check for VF message */ 2419 if ((adapter->feat_en & IXGBE_FEATURE_SRIOV) && 2420 (eicr & IXGBE_EICR_MAILBOX)) { 2421 adapter->task_requests |= IXGBE_REQUEST_TASK_MBX; 2422 taskqueue_enqueue(adapter->tq, &adapter->admin_task); 2423 } 2424 } 2425 2426 if (ixgbe_is_sfp(hw)) { 2427 /* Pluggable optics-related interrupt */ 2428 if (hw->mac.type >= ixgbe_mac_X540) 2429 eicr_mask = IXGBE_EICR_GPI_SDP0_X540; 2430 else 2431 eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw); 2432 2433 if (eicr & eicr_mask) { 2434 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask); 2435 adapter->task_requests |= IXGBE_REQUEST_TASK_MOD; 2436 taskqueue_enqueue(adapter->tq, &adapter->admin_task); 2437 } 2438 2439 if ((hw->mac.type == ixgbe_mac_82599EB) && 2440 (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) { 2441 IXGBE_WRITE_REG(hw, IXGBE_EICR, 2442 IXGBE_EICR_GPI_SDP1_BY_MAC(hw)); 2443 adapter->task_requests |= IXGBE_REQUEST_TASK_MSF; 2444 taskqueue_enqueue(adapter->tq, &adapter->admin_task); 2445 } 2446 } 2447 2448 /* Check for fan failure */ 2449 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) { 2450 ixgbe_check_fan_failure(adapter, eicr, TRUE); 2451 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw)); 2452 } 2453 2454 /* External PHY interrupt */ 2455 if ((hw->phy.type == ixgbe_phy_x550em_ext_t) && 2456 (eicr & IXGBE_EICR_GPI_SDP0_X540)) { 2457 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540); 2458 adapter->task_requests |= IXGBE_REQUEST_TASK_PHY; 2459 taskqueue_enqueue(adapter->tq, &adapter->admin_task); 2460 } 2461} /* ixgbe_msix_link */ 2462 2463/************************************************************************ 2464 * ixgbe_sysctl_interrupt_rate_handler 2465 ************************************************************************/ 2466static int 2467ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS) 2468{ 2469 struct ix_queue *que = ((struct ix_queue *)oidp->oid_arg1); 2470 int error; 2471 unsigned int reg, usec, rate; 2472 2473 reg = IXGBE_READ_REG(&que->adapter->hw, IXGBE_EITR(que->msix)); 2474 usec = ((reg & 0x0FF8) >> 3); 2475 if (usec > 0) 2476 rate = 500000 / usec; 2477 else 2478 rate = 0; 2479 error = sysctl_handle_int(oidp, &rate, 0, req); 2480 if (error || !req->newptr) 2481 return error; 2482 reg &= ~0xfff; /* default, no limitation */ 2483 ixgbe_max_interrupt_rate = 0; 2484 if (rate > 0 && rate < 500000) { 2485 if (rate < 1000) 2486 rate = 1000; 2487 ixgbe_max_interrupt_rate = rate; 2488 reg |= ((4000000/rate) & 0xff8); 2489 } 2490 IXGBE_WRITE_REG(&que->adapter->hw, IXGBE_EITR(que->msix), reg); 2491 2492 return (0); 2493} /* ixgbe_sysctl_interrupt_rate_handler */ 2494 2495/************************************************************************ 2496 * ixgbe_add_device_sysctls 2497 ************************************************************************/ 2498static void 2499ixgbe_add_device_sysctls(struct adapter *adapter) 2500{ 2501 device_t dev = adapter->dev; 2502 struct ixgbe_hw *hw = &adapter->hw; 2503 struct sysctl_oid_list *child; 2504 struct sysctl_ctx_list *ctx; 2505 2506 ctx = device_get_sysctl_ctx(dev); 2507 child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev)); 2508 2509 /* Sysctls for all devices */ 2510 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "fc", CTLTYPE_INT | CTLFLAG_RW, 2511 adapter, 0, ixgbe_sysctl_flowcntl, "I", IXGBE_SYSCTL_DESC_SET_FC); 2512 2513 adapter->enable_aim = ixgbe_enable_aim; 2514 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "enable_aim", CTLFLAG_RW, 2515 &adapter->enable_aim, 1, "Interrupt Moderation"); 2516 2517 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "advertise_speed", 2518 CTLTYPE_INT | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_advertise, "I", 2519 IXGBE_SYSCTL_DESC_ADV_SPEED); 2520 2521#ifdef IXGBE_DEBUG 2522 /* testing sysctls (for all devices) */ 2523 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "power_state", 2524 CTLTYPE_INT | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_power_state, 2525 "I", "PCI Power State"); 2526 2527 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "print_rss_config", 2528 CTLTYPE_STRING | CTLFLAG_RD, adapter, 0, 2529 ixgbe_sysctl_print_rss_config, "A", "Prints RSS Configuration"); 2530#endif 2531 /* for X550 series devices */ 2532 if (hw->mac.type >= ixgbe_mac_X550) 2533 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "dmac", 2534 CTLTYPE_INT | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_dmac, 2535 "I", "DMA Coalesce"); 2536 2537 /* for WoL-capable devices */ 2538 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) { 2539 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "wol_enable", 2540 CTLTYPE_INT | CTLFLAG_RW, adapter, 0, 2541 ixgbe_sysctl_wol_enable, "I", "Enable/Disable Wake on LAN"); 2542 2543 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "wufc", 2544 CTLTYPE_INT | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_wufc, 2545 "I", "Enable/Disable Wake Up Filters"); 2546 } 2547 2548 /* for X552/X557-AT devices */ 2549 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) { 2550 struct sysctl_oid *phy_node; 2551 struct sysctl_oid_list *phy_list; 2552 2553 phy_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "phy", 2554 CTLFLAG_RD, NULL, "External PHY sysctls"); 2555 phy_list = SYSCTL_CHILDREN(phy_node); 2556 2557 SYSCTL_ADD_PROC(ctx, phy_list, OID_AUTO, "temp", 2558 CTLTYPE_INT | CTLFLAG_RD, adapter, 0, ixgbe_sysctl_phy_temp, 2559 "I", "Current External PHY Temperature (Celsius)"); 2560 2561 SYSCTL_ADD_PROC(ctx, phy_list, OID_AUTO, "overtemp_occurred", 2562 CTLTYPE_INT | CTLFLAG_RD, adapter, 0, 2563 ixgbe_sysctl_phy_overtemp_occurred, "I", 2564 "External PHY High Temperature Event Occurred"); 2565 } 2566 2567 if (adapter->feat_cap & IXGBE_FEATURE_EEE) { 2568 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "eee_state", 2569 CTLTYPE_INT | CTLFLAG_RW, adapter, 0, 2570 ixgbe_sysctl_eee_state, "I", "EEE Power Save State"); 2571 } 2572} /* ixgbe_add_device_sysctls */ 2573 2574/************************************************************************ 2575 * ixgbe_allocate_pci_resources 2576 ************************************************************************/ 2577static int 2578ixgbe_allocate_pci_resources(struct adapter *adapter) 2579{ 2580 device_t dev = adapter->dev; 2581 int rid; 2582 2583 rid = PCIR_BAR(0); 2584 adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 2585 RF_ACTIVE); 2586 2587 if (!(adapter->pci_mem)) { 2588 device_printf(dev, "Unable to allocate bus resource: memory\n"); 2589 return (ENXIO); 2590 } 2591 2592 /* Save bus_space values for READ/WRITE_REG macros */ 2593 adapter->osdep.mem_bus_space_tag = rman_get_bustag(adapter->pci_mem); 2594 adapter->osdep.mem_bus_space_handle = 2595 rman_get_bushandle(adapter->pci_mem); 2596 /* Set hw values for shared code */ 2597 adapter->hw.hw_addr = (u8 *)&adapter->osdep.mem_bus_space_handle; 2598 2599 return (0); 2600} /* ixgbe_allocate_pci_resources */ 2601 2602/************************************************************************ 2603 * ixgbe_detach - Device removal routine 2604 * 2605 * Called when the driver is being removed. 2606 * Stops the adapter and deallocates all the resources 2607 * that were allocated for driver operation. 2608 * 2609 * return 0 on success, positive on failure 2610 ************************************************************************/ 2611static int 2612ixgbe_detach(device_t dev) 2613{ 2614 struct adapter *adapter = device_get_softc(dev); 2615 struct ix_queue *que = adapter->queues; 2616 struct tx_ring *txr = adapter->tx_rings; 2617 u32 ctrl_ext; 2618 2619 INIT_DEBUGOUT("ixgbe_detach: begin"); 2620 2621 /* Make sure VLANS are not using driver */ 2622 if (adapter->ifp->if_vlantrunk != NULL) { 2623 device_printf(dev, "Vlan in use, detach first\n"); 2624 return (EBUSY); 2625 } 2626 2627 if (ixgbe_pci_iov_detach(dev) != 0) { 2628 device_printf(dev, "SR-IOV in use; detach first.\n"); 2629 return (EBUSY); 2630 } 2631 2632 ether_ifdetach(adapter->ifp); 2633 /* Stop the adapter */ 2634 IXGBE_CORE_LOCK(adapter); 2635 ixgbe_setup_low_power_mode(adapter); 2636 IXGBE_CORE_UNLOCK(adapter); 2637 2638 for (int i = 0; i < adapter->num_queues; i++, que++, txr++) { 2639 if (que->tq) { 2640 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) 2641 taskqueue_drain(que->tq, &txr->txq_task); 2642 taskqueue_drain(que->tq, &que->que_task); 2643 taskqueue_free(que->tq); 2644 } 2645 } 2646 2647 /* let hardware know driver is unloading */ 2648 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT); 2649 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD; 2650 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext); 2651 2652 /* Unregister VLAN events */ 2653 if (adapter->vlan_attach != NULL) 2654 EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach); 2655 if (adapter->vlan_detach != NULL) 2656 EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach); 2657 2658 callout_drain(&adapter->timer); 2659 2660 if (adapter->feat_en & IXGBE_FEATURE_NETMAP) 2661 netmap_detach(adapter->ifp); 2662 2663 /* Drain the Admin Task queue */ 2664 if (adapter->tq) { 2665 taskqueue_drain(adapter->tq, &adapter->admin_task); 2666 taskqueue_free(adapter->tq); 2667 } 2668 2669 ixgbe_free_pci_resources(adapter); 2670 bus_generic_detach(dev); 2671 if_free(adapter->ifp); 2672 2673 ixgbe_free_transmit_structures(adapter); 2674 ixgbe_free_receive_structures(adapter); 2675 free(adapter->queues, M_DEVBUF); 2676 free(adapter->mta, M_IXGBE); 2677 2678 IXGBE_CORE_LOCK_DESTROY(adapter); 2679 2680 return (0); 2681} /* ixgbe_detach */ 2682 2683/************************************************************************ 2684 * ixgbe_setup_low_power_mode - LPLU/WoL preparation 2685 * 2686 * Prepare the adapter/port for LPLU and/or WoL 2687 ************************************************************************/ 2688static int 2689ixgbe_setup_low_power_mode(struct adapter *adapter) 2690{ 2691 struct ixgbe_hw *hw = &adapter->hw; 2692 device_t dev = adapter->dev; 2693 s32 error = 0; 2694 2695 mtx_assert(&adapter->core_mtx, MA_OWNED); 2696 2697 /* Limit power management flow to X550EM baseT */ 2698 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T && 2699 hw->phy.ops.enter_lplu) { 2700 /* Turn off support for APM wakeup. (Using ACPI instead) */ 2701 IXGBE_WRITE_REG(hw, IXGBE_GRC, 2702 IXGBE_READ_REG(hw, IXGBE_GRC) & ~(u32)2); 2703 2704 /* 2705 * Clear Wake Up Status register to prevent any previous wakeup 2706 * events from waking us up immediately after we suspend. 2707 */ 2708 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff); 2709 2710 /* 2711 * Program the Wakeup Filter Control register with user filter 2712 * settings 2713 */ 2714 IXGBE_WRITE_REG(hw, IXGBE_WUFC, adapter->wufc); 2715 2716 /* Enable wakeups and power management in Wakeup Control */ 2717 IXGBE_WRITE_REG(hw, IXGBE_WUC, 2718 IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN); 2719 2720 /* X550EM baseT adapters need a special LPLU flow */ 2721 hw->phy.reset_disable = true; 2722 ixgbe_stop(adapter); 2723 error = hw->phy.ops.enter_lplu(hw); 2724 if (error) 2725 device_printf(dev, "Error entering LPLU: %d\n", error); 2726 hw->phy.reset_disable = false; 2727 } else { 2728 /* Just stop for other adapters */ 2729 ixgbe_stop(adapter); 2730 } 2731 2732 return error; 2733} /* ixgbe_setup_low_power_mode */ 2734 2735/************************************************************************ 2736 * ixgbe_shutdown - Shutdown entry point 2737 ************************************************************************/ 2738static int 2739ixgbe_shutdown(device_t dev) 2740{ 2741 struct adapter *adapter = device_get_softc(dev); 2742 int error = 0; 2743 2744 INIT_DEBUGOUT("ixgbe_shutdown: begin"); 2745 2746 IXGBE_CORE_LOCK(adapter); 2747 error = ixgbe_setup_low_power_mode(adapter); 2748 IXGBE_CORE_UNLOCK(adapter); 2749 2750 return (error); 2751} /* ixgbe_shutdown */ 2752 2753/************************************************************************ 2754 * ixgbe_suspend 2755 * 2756 * From D0 to D3 2757 ************************************************************************/ 2758static int 2759ixgbe_suspend(device_t dev) 2760{ 2761 struct adapter *adapter = device_get_softc(dev); 2762 int error = 0; 2763 2764 INIT_DEBUGOUT("ixgbe_suspend: begin"); 2765 2766 IXGBE_CORE_LOCK(adapter); 2767 2768 error = ixgbe_setup_low_power_mode(adapter); 2769 2770 IXGBE_CORE_UNLOCK(adapter); 2771 2772 return (error); 2773} /* ixgbe_suspend */ 2774 2775/************************************************************************ 2776 * ixgbe_resume 2777 * 2778 * From D3 to D0 2779 ************************************************************************/ 2780static int 2781ixgbe_resume(device_t dev) 2782{ 2783 struct adapter *adapter = device_get_softc(dev); 2784 struct ifnet *ifp = adapter->ifp; 2785 struct ixgbe_hw *hw = &adapter->hw; 2786 u32 wus; 2787 2788 INIT_DEBUGOUT("ixgbe_resume: begin"); 2789 2790 IXGBE_CORE_LOCK(adapter); 2791 2792 /* Read & clear WUS register */ 2793 wus = IXGBE_READ_REG(hw, IXGBE_WUS); 2794 if (wus) 2795 device_printf(dev, "Woken up by (WUS): %#010x\n", 2796 IXGBE_READ_REG(hw, IXGBE_WUS)); 2797 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff); 2798 /* And clear WUFC until next low-power transition */ 2799 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0); 2800 2801 /* 2802 * Required after D3->D0 transition; 2803 * will re-advertise all previous advertised speeds 2804 */ 2805 if (ifp->if_flags & IFF_UP) 2806 ixgbe_init_locked(adapter); 2807 2808 IXGBE_CORE_UNLOCK(adapter); 2809 2810 return (0); 2811} /* ixgbe_resume */ 2812 2813/************************************************************************ 2814 * ixgbe_set_if_hwassist - Set the various hardware offload abilities. 2815 * 2816 * Takes the ifnet's if_capenable flags (e.g. set by the user using 2817 * ifconfig) and indicates to the OS via the ifnet's if_hwassist 2818 * field what mbuf offload flags the driver will understand. 2819 ************************************************************************/ 2820static void 2821ixgbe_set_if_hwassist(struct adapter *adapter) 2822{ 2823 struct ifnet *ifp = adapter->ifp; 2824 2825 ifp->if_hwassist = 0; 2826#if __FreeBSD_version >= 1000000 2827 if (ifp->if_capenable & IFCAP_TSO4) 2828 ifp->if_hwassist |= CSUM_IP_TSO; 2829 if (ifp->if_capenable & IFCAP_TSO6) 2830 ifp->if_hwassist |= CSUM_IP6_TSO; 2831 if (ifp->if_capenable & IFCAP_TXCSUM) { 2832 ifp->if_hwassist |= (CSUM_IP | CSUM_IP_UDP | CSUM_IP_TCP); 2833 if (adapter->hw.mac.type != ixgbe_mac_82598EB) 2834 ifp->if_hwassist |= CSUM_IP_SCTP; 2835 } 2836 if (ifp->if_capenable & IFCAP_TXCSUM_IPV6) { 2837 ifp->if_hwassist |= (CSUM_IP6_UDP | CSUM_IP6_TCP); 2838 if (adapter->hw.mac.type != ixgbe_mac_82598EB) 2839 ifp->if_hwassist |= CSUM_IP6_SCTP; 2840 } 2841#else 2842 if (ifp->if_capenable & IFCAP_TSO) 2843 ifp->if_hwassist |= CSUM_TSO; 2844 if (ifp->if_capenable & IFCAP_TXCSUM) { 2845 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP); 2846 if (adapter->hw.mac.type != ixgbe_mac_82598EB) 2847 ifp->if_hwassist |= CSUM_SCTP; 2848 } 2849#endif 2850} /* ixgbe_set_if_hwassist */ 2851 2852/************************************************************************ 2853 * ixgbe_init_locked - Init entry point 2854 * 2855 * Used in two ways: It is used by the stack as an init 2856 * entry point in network interface structure. It is also 2857 * used by the driver as a hw/sw initialization routine to 2858 * get to a consistent state. 2859 * 2860 * return 0 on success, positive on failure 2861 ************************************************************************/ 2862void 2863ixgbe_init_locked(struct adapter *adapter) 2864{ 2865 struct ifnet *ifp = adapter->ifp; 2866 device_t dev = adapter->dev; 2867 struct ixgbe_hw *hw = &adapter->hw; 2868 struct tx_ring *txr; 2869 struct rx_ring *rxr; 2870 u32 txdctl, mhadd; 2871 u32 rxdctl, rxctrl; 2872 u32 ctrl_ext; 2873 int err = 0; 2874 2875 mtx_assert(&adapter->core_mtx, MA_OWNED); 2876 INIT_DEBUGOUT("ixgbe_init_locked: begin"); 2877 2878 hw->adapter_stopped = FALSE; 2879 ixgbe_stop_adapter(hw); 2880 callout_stop(&adapter->timer); 2881 2882 /* Queue indices may change with IOV mode */ 2883 ixgbe_align_all_queue_indices(adapter); 2884 2885 /* reprogram the RAR[0] in case user changed it. */ 2886 ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, IXGBE_RAH_AV); 2887 2888 /* Get the latest mac address, User can use a LAA */ 2889 bcopy(IF_LLADDR(ifp), hw->mac.addr, IXGBE_ETH_LENGTH_OF_ADDRESS); 2890 ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, 1); 2891 hw->addr_ctrl.rar_used_count = 1; 2892 2893 /* Set hardware offload abilities from ifnet flags */ 2894 ixgbe_set_if_hwassist(adapter); 2895 2896 /* Prepare transmit descriptors and buffers */ 2897 if (ixgbe_setup_transmit_structures(adapter)) { 2898 device_printf(dev, "Could not setup transmit structures\n"); 2899 ixgbe_stop(adapter); 2900 return; 2901 } 2902 2903 ixgbe_init_hw(hw); 2904 ixgbe_initialize_iov(adapter); 2905 ixgbe_initialize_transmit_units(adapter); 2906 2907 /* Setup Multicast table */ 2908 ixgbe_set_multi(adapter); 2909 2910 /* Determine the correct mbuf pool, based on frame size */ 2911 if (adapter->max_frame_size <= MCLBYTES) 2912 adapter->rx_mbuf_sz = MCLBYTES; 2913 else 2914 adapter->rx_mbuf_sz = MJUMPAGESIZE; 2915 2916 /* Prepare receive descriptors and buffers */ 2917 if (ixgbe_setup_receive_structures(adapter)) { 2918 device_printf(dev, "Could not setup receive structures\n"); 2919 ixgbe_stop(adapter); 2920 return; 2921 } 2922 2923 /* Configure RX settings */ 2924 ixgbe_initialize_receive_units(adapter); 2925 2926 /* Initialize variable holding task enqueue requests 2927 * generated by interrupt handlers */ 2928 adapter->task_requests = 0; 2929 2930 /* Enable SDP & MSI-X interrupts based on adapter */ 2931 ixgbe_config_gpie(adapter); 2932 2933 /* Set MTU size */ 2934 if (ifp->if_mtu > ETHERMTU) { 2935 /* aka IXGBE_MAXFRS on 82599 and newer */ 2936 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD); 2937 mhadd &= ~IXGBE_MHADD_MFS_MASK; 2938 mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT; 2939 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd); 2940 } 2941 2942 /* Now enable all the queues */ 2943 for (int i = 0; i < adapter->num_queues; i++) { 2944 txr = &adapter->tx_rings[i]; 2945 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me)); 2946 txdctl |= IXGBE_TXDCTL_ENABLE; 2947 /* Set WTHRESH to 8, burst writeback */ 2948 txdctl |= (8 << 16); 2949 /* 2950 * When the internal queue falls below PTHRESH (32), 2951 * start prefetching as long as there are at least 2952 * HTHRESH (1) buffers ready. The values are taken 2953 * from the Intel linux driver 3.8.21. 2954 * Prefetching enables tx line rate even with 1 queue. 2955 */ 2956 txdctl |= (32 << 0) | (1 << 8); 2957 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl); 2958 } 2959 2960 for (int i = 0, j = 0; i < adapter->num_queues; i++) { 2961 rxr = &adapter->rx_rings[i]; 2962 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)); 2963 if (hw->mac.type == ixgbe_mac_82598EB) { 2964 /* 2965 * PTHRESH = 21 2966 * HTHRESH = 4 2967 * WTHRESH = 8 2968 */ 2969 rxdctl &= ~0x3FFFFF; 2970 rxdctl |= 0x080420; 2971 } 2972 rxdctl |= IXGBE_RXDCTL_ENABLE; 2973 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), rxdctl); 2974 for (; j < 10; j++) { 2975 if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)) & 2976 IXGBE_RXDCTL_ENABLE) 2977 break; 2978 else 2979 msec_delay(1); 2980 } 2981 wmb(); 2982 2983 /* 2984 * In netmap mode, we must preserve the buffers made 2985 * available to userspace before the if_init() 2986 * (this is true by default on the TX side, because 2987 * init makes all buffers available to userspace). 2988 * 2989 * netmap_reset() and the device specific routines 2990 * (e.g. ixgbe_setup_receive_rings()) map these 2991 * buffers at the end of the NIC ring, so here we 2992 * must set the RDT (tail) register to make sure 2993 * they are not overwritten. 2994 * 2995 * In this driver the NIC ring starts at RDH = 0, 2996 * RDT points to the last slot available for reception (?), 2997 * so RDT = num_rx_desc - 1 means the whole ring is available. 2998 */ 2999#ifdef DEV_NETMAP 3000 if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) && 3001 (ifp->if_capenable & IFCAP_NETMAP)) { 3002 struct netmap_adapter *na = NA(adapter->ifp); 3003 struct netmap_kring *kring = na->rx_rings[i]; 3004 int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring); 3005 3006 IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me), t); 3007 } else 3008#endif /* DEV_NETMAP */ 3009 IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me), 3010 adapter->num_rx_desc - 1); 3011 } 3012 3013 /* Enable Receive engine */ 3014 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); 3015 if (hw->mac.type == ixgbe_mac_82598EB) 3016 rxctrl |= IXGBE_RXCTRL_DMBYPS; 3017 rxctrl |= IXGBE_RXCTRL_RXEN; 3018 ixgbe_enable_rx_dma(hw, rxctrl); 3019 3020 callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter); 3021 3022 /* Set up MSI-X routing */ 3023 if (adapter->feat_en & IXGBE_FEATURE_MSIX) { 3024 ixgbe_configure_ivars(adapter); 3025 /* Set up auto-mask */ 3026 if (hw->mac.type == ixgbe_mac_82598EB) 3027 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE); 3028 else { 3029 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF); 3030 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF); 3031 } 3032 } else { /* Simple settings for Legacy/MSI */ 3033 ixgbe_set_ivar(adapter, 0, 0, 0); 3034 ixgbe_set_ivar(adapter, 0, 0, 1); 3035 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE); 3036 } 3037 3038 ixgbe_init_fdir(adapter); 3039 3040 /* 3041 * Check on any SFP devices that 3042 * need to be kick-started 3043 */ 3044 if (hw->phy.type == ixgbe_phy_none) { 3045 err = hw->phy.ops.identify(hw); 3046 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { 3047 device_printf(dev, 3048 "Unsupported SFP+ module type was detected.\n"); 3049 return; 3050 } 3051 } 3052 3053 /* Set moderation on the Link interrupt */ 3054 IXGBE_WRITE_REG(hw, IXGBE_EITR(adapter->vector), IXGBE_LINK_ITR); 3055 3056 /* Config/Enable Link */ 3057 ixgbe_config_link(adapter); 3058 3059 /* Hardware Packet Buffer & Flow Control setup */ 3060 ixgbe_config_delay_values(adapter); 3061 3062 /* Initialize the FC settings */ 3063 ixgbe_start_hw(hw); 3064 3065 /* Set up VLAN support and filter */ 3066 ixgbe_setup_vlan_hw_support(adapter); 3067 3068 /* Setup DMA Coalescing */ 3069 ixgbe_config_dmac(adapter); 3070 3071 /* And now turn on interrupts */ 3072 ixgbe_enable_intr(adapter, false); 3073 3074 /* Enable the use of the MBX by the VF's */ 3075 if (adapter->feat_en & IXGBE_FEATURE_SRIOV) { 3076 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 3077 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD; 3078 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); 3079 } 3080 3081 /* Now inform the stack we're ready */ 3082 ifp->if_drv_flags |= IFF_DRV_RUNNING; 3083 3084 return; 3085} /* ixgbe_init_locked */ 3086 3087/************************************************************************ 3088 * ixgbe_init 3089 ************************************************************************/ 3090static void 3091ixgbe_init(void *arg) 3092{ 3093 struct adapter *adapter = arg; 3094 3095 IXGBE_CORE_LOCK(adapter); 3096 ixgbe_init_locked(adapter); 3097 IXGBE_CORE_UNLOCK(adapter); 3098 3099 return; 3100} /* ixgbe_init */ 3101 3102/************************************************************************ 3103 * ixgbe_set_ivar 3104 * 3105 * Setup the correct IVAR register for a particular MSI-X interrupt 3106 * (yes this is all very magic and confusing :) 3107 * - entry is the register array entry 3108 * - vector is the MSI-X vector for this queue 3109 * - type is RX/TX/MISC 3110 ************************************************************************/ 3111static void 3112ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type) 3113{ 3114 struct ixgbe_hw *hw = &adapter->hw; 3115 u32 ivar, index; 3116 3117 vector |= IXGBE_IVAR_ALLOC_VAL; 3118 3119 switch (hw->mac.type) { 3120 3121 case ixgbe_mac_82598EB: 3122 if (type == -1) 3123 entry = IXGBE_IVAR_OTHER_CAUSES_INDEX; 3124 else 3125 entry += (type * 64); 3126 index = (entry >> 2) & 0x1F; 3127 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index)); 3128 ivar &= ~(0xFF << (8 * (entry & 0x3))); 3129 ivar |= (vector << (8 * (entry & 0x3))); 3130 IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar); 3131 break; 3132 3133 case ixgbe_mac_82599EB: 3134 case ixgbe_mac_X540: 3135 case ixgbe_mac_X550: 3136 case ixgbe_mac_X550EM_x: 3137 case ixgbe_mac_X550EM_a: 3138 if (type == -1) { /* MISC IVAR */ 3139 index = (entry & 1) * 8; 3140 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC); 3141 ivar &= ~(0xFF << index); 3142 ivar |= (vector << index); 3143 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar); 3144 } else { /* RX/TX IVARS */ 3145 index = (16 * (entry & 1)) + (8 * type); 3146 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1)); 3147 ivar &= ~(0xFF << index); 3148 ivar |= (vector << index); 3149 IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar); 3150 } 3151 3152 default: 3153 break; 3154 } 3155} /* ixgbe_set_ivar */ 3156 3157/************************************************************************ 3158 * ixgbe_configure_ivars 3159 ************************************************************************/ 3160static void 3161ixgbe_configure_ivars(struct adapter *adapter) 3162{ 3163 struct ix_queue *que = adapter->queues; 3164 u32 newitr; 3165 3166 if (ixgbe_max_interrupt_rate > 0) 3167 newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8; 3168 else { 3169 /* 3170 * Disable DMA coalescing if interrupt moderation is 3171 * disabled. 3172 */ 3173 adapter->dmac = 0; 3174 newitr = 0; 3175 } 3176 3177 for (int i = 0; i < adapter->num_queues; i++, que++) { 3178 struct rx_ring *rxr = &adapter->rx_rings[i]; 3179 struct tx_ring *txr = &adapter->tx_rings[i]; 3180 /* First the RX queue entry */ 3181 ixgbe_set_ivar(adapter, rxr->me, que->msix, 0); 3182 /* ... and the TX */ 3183 ixgbe_set_ivar(adapter, txr->me, que->msix, 1); 3184 /* Set an Initial EITR value */ 3185 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(que->msix), newitr); 3186 } 3187 3188 /* For the Link interrupt */ 3189 ixgbe_set_ivar(adapter, 1, adapter->vector, -1); 3190} /* ixgbe_configure_ivars */ 3191 3192/************************************************************************ 3193 * ixgbe_config_gpie 3194 ************************************************************************/ 3195static void 3196ixgbe_config_gpie(struct adapter *adapter) 3197{ 3198 struct ixgbe_hw *hw = &adapter->hw; 3199 u32 gpie; 3200 3201 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE); 3202 3203 if (adapter->feat_en & IXGBE_FEATURE_MSIX) { 3204 /* Enable Enhanced MSI-X mode */ 3205 gpie |= IXGBE_GPIE_MSIX_MODE 3206 | IXGBE_GPIE_EIAME 3207 | IXGBE_GPIE_PBA_SUPPORT 3208 | IXGBE_GPIE_OCD; 3209 } 3210 3211 /* Fan Failure Interrupt */ 3212 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) 3213 gpie |= IXGBE_SDP1_GPIEN; 3214 3215 /* Thermal Sensor Interrupt */ 3216 if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR) 3217 gpie |= IXGBE_SDP0_GPIEN_X540; 3218 3219 /* Link detection */ 3220 switch (hw->mac.type) { 3221 case ixgbe_mac_82599EB: 3222 gpie |= IXGBE_SDP1_GPIEN | IXGBE_SDP2_GPIEN; 3223 break; 3224 case ixgbe_mac_X550EM_x: 3225 case ixgbe_mac_X550EM_a: 3226 gpie |= IXGBE_SDP0_GPIEN_X540; 3227 break; 3228 default: 3229 break; 3230 } 3231 3232 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); 3233 3234 return; 3235} /* ixgbe_config_gpie */ 3236 3237/************************************************************************ 3238 * ixgbe_config_delay_values 3239 * 3240 * Requires adapter->max_frame_size to be set. 3241 ************************************************************************/ 3242static void 3243ixgbe_config_delay_values(struct adapter *adapter) 3244{ 3245 struct ixgbe_hw *hw = &adapter->hw; 3246 u32 rxpb, frame, size, tmp; 3247 3248 frame = adapter->max_frame_size; 3249 3250 /* Calculate High Water */ 3251 switch (hw->mac.type) { 3252 case ixgbe_mac_X540: 3253 case ixgbe_mac_X550: 3254 case ixgbe_mac_X550EM_x: 3255 case ixgbe_mac_X550EM_a: 3256 tmp = IXGBE_DV_X540(frame, frame); 3257 break; 3258 default: 3259 tmp = IXGBE_DV(frame, frame); 3260 break; 3261 } 3262 size = IXGBE_BT2KB(tmp); 3263 rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10; 3264 hw->fc.high_water[0] = rxpb - size; 3265 3266 /* Now calculate Low Water */ 3267 switch (hw->mac.type) { 3268 case ixgbe_mac_X540: 3269 case ixgbe_mac_X550: 3270 case ixgbe_mac_X550EM_x: 3271 case ixgbe_mac_X550EM_a: 3272 tmp = IXGBE_LOW_DV_X540(frame); 3273 break; 3274 default: 3275 tmp = IXGBE_LOW_DV(frame); 3276 break; 3277 } 3278 hw->fc.low_water[0] = IXGBE_BT2KB(tmp); 3279 3280 hw->fc.pause_time = IXGBE_FC_PAUSE; 3281 hw->fc.send_xon = TRUE; 3282} /* ixgbe_config_delay_values */ 3283 3284/************************************************************************ 3285 * ixgbe_set_multi - Multicast Update 3286 * 3287 * Called whenever multicast address list is updated. 3288 ************************************************************************/ 3289static void 3290ixgbe_set_multi(struct adapter *adapter) 3291{ 3292 struct ifmultiaddr *ifma; 3293 struct ixgbe_mc_addr *mta; 3294 struct ifnet *ifp = adapter->ifp; 3295 u8 *update_ptr; 3296 int mcnt = 0; 3297 u32 fctrl; 3298 3299 IOCTL_DEBUGOUT("ixgbe_set_multi: begin"); 3300 3301 mta = adapter->mta; 3302 bzero(mta, sizeof(*mta) * MAX_NUM_MULTICAST_ADDRESSES); 3303 3304#if __FreeBSD_version < 800000 3305 IF_ADDR_LOCK(ifp); 3306#else 3307 if_maddr_rlock(ifp); 3308#endif 3309 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 3310 if (ifma->ifma_addr->sa_family != AF_LINK) 3311 continue; 3312 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES) 3313 break; 3314 bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr), 3315 mta[mcnt].addr, IXGBE_ETH_LENGTH_OF_ADDRESS); 3316 mta[mcnt].vmdq = adapter->pool; 3317 mcnt++; 3318 } 3319#if __FreeBSD_version < 800000 3320 IF_ADDR_UNLOCK(ifp); 3321#else 3322 if_maddr_runlock(ifp); 3323#endif 3324 3325 fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL); 3326 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); 3327 if (ifp->if_flags & IFF_PROMISC) 3328 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); 3329 else if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES || 3330 ifp->if_flags & IFF_ALLMULTI) { 3331 fctrl |= IXGBE_FCTRL_MPE; 3332 fctrl &= ~IXGBE_FCTRL_UPE; 3333 } else 3334 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); 3335 3336 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl); 3337 3338 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) { 3339 update_ptr = (u8 *)mta; 3340 ixgbe_update_mc_addr_list(&adapter->hw, update_ptr, mcnt, 3341 ixgbe_mc_array_itr, TRUE); 3342 } 3343 3344 return; 3345} /* ixgbe_set_multi */ 3346 3347/************************************************************************ 3348 * ixgbe_mc_array_itr 3349 * 3350 * An iterator function needed by the multicast shared code. 3351 * It feeds the shared code routine the addresses in the 3352 * array of ixgbe_set_multi() one by one. 3353 ************************************************************************/ 3354static u8 * 3355ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq) 3356{ 3357 struct ixgbe_mc_addr *mta; 3358 3359 mta = (struct ixgbe_mc_addr *)*update_ptr; 3360 *vmdq = mta->vmdq; 3361 3362 *update_ptr = (u8*)(mta + 1); 3363 3364 return (mta->addr); 3365} /* ixgbe_mc_array_itr */ 3366 3367/************************************************************************ 3368 * ixgbe_local_timer - Timer routine 3369 * 3370 * Checks for link status, updates statistics, 3371 * and runs the watchdog check. 3372 ************************************************************************/ 3373static void 3374ixgbe_local_timer(void *arg) 3375{ 3376 struct adapter *adapter = arg; 3377 device_t dev = adapter->dev; 3378 struct ix_queue *que = adapter->queues; 3379 u64 queues = 0; 3380 int hung = 0; 3381 3382 mtx_assert(&adapter->core_mtx, MA_OWNED); 3383 3384 /* Check for pluggable optics */ 3385 if (adapter->sfp_probe) 3386 if (!ixgbe_sfp_probe(adapter)) 3387 goto out; /* Nothing to do */ 3388 3389 ixgbe_update_link_status(adapter); 3390 ixgbe_update_stats_counters(adapter); 3391 3392 /* 3393 * Check the TX queues status 3394 * - mark hung queues so we don't schedule on them 3395 * - watchdog only if all queues show hung 3396 */ 3397 for (int i = 0; i < adapter->num_queues; i++, que++) { 3398 /* Keep track of queues with work for soft irq */ 3399 if (que->txr->busy) 3400 queues |= ((u64)1 << que->me); 3401 /* 3402 * Each time txeof runs without cleaning, but there 3403 * are uncleaned descriptors it increments busy. If 3404 * we get to the MAX we declare it hung. 3405 */ 3406 if (que->busy == IXGBE_QUEUE_HUNG) { 3407 ++hung; 3408 /* Mark the queue as inactive */ 3409 adapter->active_queues &= ~((u64)1 << que->me); 3410 continue; 3411 } else { 3412 /* Check if we've come back from hung */ 3413 if ((adapter->active_queues & ((u64)1 << que->me)) == 0) 3414 adapter->active_queues |= ((u64)1 << que->me); 3415 } 3416 if (que->busy >= IXGBE_MAX_TX_BUSY) { 3417 device_printf(dev, 3418 "Warning queue %d appears to be hung!\n", i); 3419 que->txr->busy = IXGBE_QUEUE_HUNG; 3420 ++hung; 3421 } 3422 } 3423 3424 /* Only truly watchdog if all queues show hung */ 3425 if (hung == adapter->num_queues) 3426 goto watchdog; 3427 else if (queues != 0) { /* Force an IRQ on queues with work */ 3428 ixgbe_rearm_queues(adapter, queues); 3429 } 3430 3431out: 3432 callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter); 3433 return; 3434 3435watchdog: 3436 device_printf(adapter->dev, "Watchdog timeout -- resetting\n"); 3437 adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 3438 adapter->watchdog_events++; 3439 ixgbe_init_locked(adapter); 3440} /* ixgbe_local_timer */ 3441 3442/************************************************************************ 3443 * ixgbe_sfp_probe 3444 * 3445 * Determine if a port had optics inserted. 3446 ************************************************************************/ 3447static bool 3448ixgbe_sfp_probe(struct adapter *adapter) 3449{ 3450 struct ixgbe_hw *hw = &adapter->hw; 3451 device_t dev = adapter->dev; 3452 bool result = FALSE; 3453 3454 if ((hw->phy.type == ixgbe_phy_nl) && 3455 (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) { 3456 s32 ret = hw->phy.ops.identify_sfp(hw); 3457 if (ret) 3458 goto out; 3459 ret = hw->phy.ops.reset(hw); 3460 adapter->sfp_probe = FALSE; 3461 if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) { 3462 device_printf(dev, "Unsupported SFP+ module detected!"); 3463 device_printf(dev, 3464 "Reload driver with supported module.\n"); 3465 goto out; 3466 } else 3467 device_printf(dev, "SFP+ module detected!\n"); 3468 /* We now have supported optics */ 3469 result = TRUE; 3470 } 3471out: 3472 3473 return (result); 3474} /* ixgbe_sfp_probe */ 3475 3476/************************************************************************ 3477 * ixgbe_handle_mod - Tasklet for SFP module interrupts 3478 ************************************************************************/ 3479static void 3480ixgbe_handle_mod(void *context) 3481{ 3482 struct adapter *adapter = context; 3483 struct ixgbe_hw *hw = &adapter->hw; 3484 device_t dev = adapter->dev; 3485 u32 err, cage_full = 0; 3486 3487 if (adapter->hw.need_crosstalk_fix) { 3488 switch (hw->mac.type) { 3489 case ixgbe_mac_82599EB: 3490 cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) & 3491 IXGBE_ESDP_SDP2; 3492 break; 3493 case ixgbe_mac_X550EM_x: 3494 case ixgbe_mac_X550EM_a: 3495 cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) & 3496 IXGBE_ESDP_SDP0; 3497 break; 3498 default: 3499 break; 3500 } 3501 3502 if (!cage_full) 3503 return; 3504 } 3505 3506 err = hw->phy.ops.identify_sfp(hw); 3507 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { 3508 device_printf(dev, 3509 "Unsupported SFP+ module type was detected.\n"); 3510 goto handle_mod_out; 3511 } 3512 3513 err = hw->mac.ops.setup_sfp(hw); 3514 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { 3515 device_printf(dev, 3516 "Setup failure - unsupported SFP+ module type.\n"); 3517 goto handle_mod_out; 3518 } 3519 adapter->task_requests |= IXGBE_REQUEST_TASK_MSF; 3520 taskqueue_enqueue(adapter->tq, &adapter->admin_task); 3521 return; 3522 3523handle_mod_out: 3524 adapter->task_requests &= ~(IXGBE_REQUEST_TASK_MSF); 3525} /* ixgbe_handle_mod */ 3526 3527 3528/************************************************************************ 3529 * ixgbe_handle_msf - Tasklet for MSF (multispeed fiber) interrupts 3530 ************************************************************************/ 3531static void 3532ixgbe_handle_msf(void *context) 3533{ 3534 struct adapter *adapter = context; 3535 struct ixgbe_hw *hw = &adapter->hw; 3536 u32 autoneg; 3537 bool negotiate; 3538 3539 /* get_supported_phy_layer will call hw->phy.ops.identify_sfp() */ 3540 adapter->phy_layer = ixgbe_get_supported_physical_layer(hw); 3541 3542 autoneg = hw->phy.autoneg_advertised; 3543 if ((!autoneg) && (hw->mac.ops.get_link_capabilities)) 3544 hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate); 3545 if (hw->mac.ops.setup_link) 3546 hw->mac.ops.setup_link(hw, autoneg, TRUE); 3547 3548 /* Adjust media types shown in ifconfig */ 3549 ifmedia_removeall(&adapter->media); 3550 ixgbe_add_media_types(adapter); 3551 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO); 3552} /* ixgbe_handle_msf */ 3553 3554/************************************************************************ 3555 * ixgbe_handle_phy - Tasklet for external PHY interrupts 3556 ************************************************************************/ 3557static void 3558ixgbe_handle_phy(void *context) 3559{ 3560 struct adapter *adapter = context; 3561 struct ixgbe_hw *hw = &adapter->hw; 3562 int error; 3563 3564 error = hw->phy.ops.handle_lasi(hw); 3565 if (error == IXGBE_ERR_OVERTEMP) 3566 device_printf(adapter->dev, "CRITICAL: EXTERNAL PHY OVER TEMP!! PHY will downshift to lower power state!\n"); 3567 else if (error) 3568 device_printf(adapter->dev, 3569 "Error handling LASI interrupt: %d\n", error); 3570} /* ixgbe_handle_phy */ 3571 3572/************************************************************************ 3573 * ixgbe_handle_admin_task - Handler for interrupt tasklets meant to be 3574 * called in separate task. 3575 ************************************************************************/ 3576static void 3577ixgbe_handle_admin_task(void *context, int pending) 3578{ 3579 struct adapter *adapter = context; 3580 3581 IXGBE_CORE_LOCK(adapter); 3582 ixgbe_disable_intr(adapter, true); 3583 3584 if (adapter->task_requests & IXGBE_REQUEST_TASK_MOD) 3585 ixgbe_handle_mod(adapter); 3586 if (adapter->task_requests & IXGBE_REQUEST_TASK_MSF) 3587 ixgbe_handle_msf(adapter); 3588 if (adapter->task_requests & IXGBE_REQUEST_TASK_MBX) 3589 ixgbe_handle_mbx(adapter); 3590 if (adapter->task_requests & IXGBE_REQUEST_TASK_FDIR) 3591 ixgbe_reinit_fdir(adapter); 3592 if (adapter->task_requests & IXGBE_REQUEST_TASK_PHY) 3593 ixgbe_handle_phy(adapter); 3594 if (adapter->task_requests & IXGBE_REQUEST_TASK_LINK) 3595 ixgbe_handle_link(adapter); 3596 adapter->task_requests = 0; 3597 3598 ixgbe_enable_intr(adapter, true); 3599 IXGBE_CORE_UNLOCK(adapter); 3600} /* ixgbe_handle_admin_task */ 3601 3602/************************************************************************ 3603 * ixgbe_stop - Stop the hardware 3604 * 3605 * Disables all traffic on the adapter by issuing a 3606 * global reset on the MAC and deallocates TX/RX buffers. 3607 ************************************************************************/ 3608static void 3609ixgbe_stop(void *arg) 3610{ 3611 struct ifnet *ifp; 3612 struct adapter *adapter = arg; 3613 struct ixgbe_hw *hw = &adapter->hw; 3614 3615 ifp = adapter->ifp; 3616 3617 mtx_assert(&adapter->core_mtx, MA_OWNED); 3618 3619 INIT_DEBUGOUT("ixgbe_stop: begin\n"); 3620 ixgbe_disable_intr(adapter, false); 3621 callout_stop(&adapter->timer); 3622 3623 /* Let the stack know...*/ 3624 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 3625 3626 ixgbe_reset_hw(hw); 3627 hw->adapter_stopped = FALSE; 3628 ixgbe_stop_adapter(hw); 3629 if (hw->mac.type == ixgbe_mac_82599EB) 3630 ixgbe_stop_mac_link_on_d3_82599(hw); 3631 /* Turn off the laser - noop with no optics */ 3632 ixgbe_disable_tx_laser(hw); 3633 3634 /* Update the stack */ 3635 adapter->link_up = FALSE; 3636 ixgbe_update_link_status(adapter); 3637 3638 /* reprogram the RAR[0] in case user changed it. */ 3639 ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV); 3640 3641 return; 3642} /* ixgbe_stop */ 3643 3644/************************************************************************ 3645 * ixgbe_update_link_status - Update OS on link state 3646 * 3647 * Note: Only updates the OS on the cached link state. 3648 * The real check of the hardware only happens with 3649 * a link interrupt. 3650 ************************************************************************/ 3651static void 3652ixgbe_update_link_status(struct adapter *adapter) 3653{ 3654 struct ifnet *ifp = adapter->ifp; 3655 device_t dev = adapter->dev; 3656 3657 if (adapter->link_up) { 3658 if (adapter->link_active == FALSE) { 3659 if (bootverbose) 3660 device_printf(dev, "Link is up %d Gbps %s \n", 3661 ((adapter->link_speed == 128) ? 10 : 1), 3662 "Full Duplex"); 3663 adapter->link_active = TRUE; 3664 /* Update any Flow Control changes */ 3665 ixgbe_fc_enable(&adapter->hw); 3666 /* Update DMA coalescing config */ 3667 ixgbe_config_dmac(adapter); 3668 if_link_state_change(ifp, LINK_STATE_UP); 3669 if (adapter->feat_en & IXGBE_FEATURE_SRIOV) 3670 ixgbe_ping_all_vfs(adapter); 3671 } 3672 } else { /* Link down */ 3673 if (adapter->link_active == TRUE) { 3674 if (bootverbose) 3675 device_printf(dev, "Link is Down\n"); 3676 if_link_state_change(ifp, LINK_STATE_DOWN); 3677 adapter->link_active = FALSE; 3678 if (adapter->feat_en & IXGBE_FEATURE_SRIOV) 3679 ixgbe_ping_all_vfs(adapter); 3680 } 3681 } 3682 3683 return; 3684} /* ixgbe_update_link_status */ 3685 3686/************************************************************************ 3687 * ixgbe_config_dmac - Configure DMA Coalescing 3688 ************************************************************************/ 3689static void 3690ixgbe_config_dmac(struct adapter *adapter) 3691{ 3692 struct ixgbe_hw *hw = &adapter->hw; 3693 struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config; 3694 3695 if (hw->mac.type < ixgbe_mac_X550 || !hw->mac.ops.dmac_config) 3696 return; 3697 3698 if (dcfg->watchdog_timer ^ adapter->dmac || 3699 dcfg->link_speed ^ adapter->link_speed) { 3700 dcfg->watchdog_timer = adapter->dmac; 3701 dcfg->fcoe_en = false; 3702 dcfg->link_speed = adapter->link_speed; 3703 dcfg->num_tcs = 1; 3704 3705 INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n", 3706 dcfg->watchdog_timer, dcfg->link_speed); 3707 3708 hw->mac.ops.dmac_config(hw); 3709 } 3710} /* ixgbe_config_dmac */ 3711 3712/************************************************************************ 3713 * ixgbe_enable_intr 3714 * If skip_traffic parameter is set, queues' irqs are not enabled. 3715 * This is useful while reenabling interrupts after disabling them 3716 * with ixgbe_disable_intr() 'keep_traffic' parameter set to true 3717 * as queues' interrupts are already enabled. 3718 ************************************************************************/ 3719static void 3720ixgbe_enable_intr(struct adapter *adapter, bool skip_traffic) 3721{ 3722 struct ixgbe_hw *hw = &adapter->hw; 3723 struct ix_queue *que = adapter->queues; 3724 u32 mask, fwsm; 3725 3726 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE); 3727 3728 switch (adapter->hw.mac.type) { 3729 case ixgbe_mac_82599EB: 3730 mask |= IXGBE_EIMS_ECC; 3731 /* Temperature sensor on some adapters */ 3732 mask |= IXGBE_EIMS_GPI_SDP0; 3733 /* SFP+ (RX_LOS_N & MOD_ABS_N) */ 3734 mask |= IXGBE_EIMS_GPI_SDP1; 3735 mask |= IXGBE_EIMS_GPI_SDP2; 3736 break; 3737 case ixgbe_mac_X540: 3738 /* Detect if Thermal Sensor is enabled */ 3739 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM); 3740 if (fwsm & IXGBE_FWSM_TS_ENABLED) 3741 mask |= IXGBE_EIMS_TS; 3742 mask |= IXGBE_EIMS_ECC; 3743 break; 3744 case ixgbe_mac_X550: 3745 /* MAC thermal sensor is automatically enabled */ 3746 mask |= IXGBE_EIMS_TS; 3747 mask |= IXGBE_EIMS_ECC; 3748 break; 3749 case ixgbe_mac_X550EM_x: 3750 case ixgbe_mac_X550EM_a: 3751 /* Some devices use SDP0 for important information */ 3752 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP || 3753 hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP || 3754 hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N || 3755 hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) 3756 mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw); 3757 if (hw->phy.type == ixgbe_phy_x550em_ext_t) 3758 mask |= IXGBE_EICR_GPI_SDP0_X540; 3759 mask |= IXGBE_EIMS_ECC; 3760 break; 3761 default: 3762 break; 3763 } 3764 3765 /* Enable Fan Failure detection */ 3766 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) 3767 mask |= IXGBE_EIMS_GPI_SDP1; 3768 /* Enable SR-IOV */ 3769 if (adapter->feat_en & IXGBE_FEATURE_SRIOV) 3770 mask |= IXGBE_EIMS_MAILBOX; 3771 /* Enable Flow Director */ 3772 if (adapter->feat_en & IXGBE_FEATURE_FDIR) 3773 mask |= IXGBE_EIMS_FLOW_DIR; 3774 3775 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask); 3776 3777 /* With MSI-X we use auto clear */ 3778 if (adapter->msix_mem) { 3779 mask = IXGBE_EIMS_ENABLE_MASK; 3780 /* Don't autoclear Link */ 3781 mask &= ~IXGBE_EIMS_OTHER; 3782 mask &= ~IXGBE_EIMS_LSC; 3783 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV) 3784 mask &= ~IXGBE_EIMS_MAILBOX; 3785 IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask); 3786 } 3787 3788 if (!skip_traffic) { 3789 /* 3790 * Now enable all queues, this is done separately to 3791 * allow for handling the extended (beyond 32) MSI-X 3792 * vectors that can be used by 82599 3793 */ 3794 for (int i = 0; i < adapter->num_queues; i++, que++) 3795 ixgbe_enable_queue(adapter, que->msix); 3796 } 3797 3798 IXGBE_WRITE_FLUSH(hw); 3799 3800 return; 3801} /* ixgbe_enable_intr */ 3802 3803/************************************************************************ 3804 * ixgbe_disable_intr 3805 * If keep_traffic parameter is set, queue interrupts are not disabled. 3806 * This is needed by ixgbe_handle_admin_task() to handle link specific 3807 * interrupt procedures without stopping the traffic. 3808 ************************************************************************/ 3809static void 3810ixgbe_disable_intr(struct adapter *adapter, bool keep_traffic) 3811{ 3812 struct ixgbe_hw *hw = &adapter->hw; 3813 u32 eiac_mask, eimc_mask, eimc_ext_mask; 3814 3815 if (keep_traffic) { 3816 /* Autoclear only queue irqs */ 3817 eiac_mask = IXGBE_EICR_RTX_QUEUE; 3818 3819 /* Disable everything but queue irqs */ 3820 eimc_mask = ~0; 3821 eimc_mask &= ~IXGBE_EIMC_RTX_QUEUE; 3822 eimc_ext_mask = 0; 3823 } else { 3824 eiac_mask = 0; 3825 eimc_mask = (hw->mac.type == ixgbe_mac_82598EB) ? ~0 : 0xFFFF0000; 3826 eimc_ext_mask = ~0; 3827 } 3828 3829 if (adapter->msix_mem) 3830 IXGBE_WRITE_REG(hw, IXGBE_EIAC, eiac_mask); 3831 3832 IXGBE_WRITE_REG(hw, IXGBE_EIMC, eimc_mask); 3833 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), eimc_ext_mask); 3834 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), eimc_ext_mask); 3835 3836 IXGBE_WRITE_FLUSH(hw); 3837 3838 return; 3839} /* ixgbe_disable_intr */ 3840 3841/************************************************************************ 3842 * ixgbe_legacy_irq - Legacy Interrupt Service routine 3843 ************************************************************************/ 3844static void 3845ixgbe_legacy_irq(void *arg) 3846{ 3847 struct ix_queue *que = arg; 3848 struct adapter *adapter = que->adapter; 3849 struct ixgbe_hw *hw = &adapter->hw; 3850 struct ifnet *ifp = adapter->ifp; 3851 struct tx_ring *txr = adapter->tx_rings; 3852 bool more = false; 3853 u32 eicr, eicr_mask; 3854 3855 /* Silicon errata #26 on 82598 */ 3856 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK); 3857 3858 eicr = IXGBE_READ_REG(hw, IXGBE_EICR); 3859 3860 ++que->irqs; 3861 if (eicr == 0) { 3862 ixgbe_enable_intr(adapter, false); 3863 return; 3864 } 3865 3866 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 3867 more = ixgbe_rxeof(que); 3868 3869 IXGBE_TX_LOCK(txr); 3870 ixgbe_txeof(txr); 3871 if (!ixgbe_ring_empty(ifp, txr->br)) 3872 ixgbe_start_locked(ifp, txr); 3873 IXGBE_TX_UNLOCK(txr); 3874 } 3875 3876 /* Check for fan failure */ 3877 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) { 3878 ixgbe_check_fan_failure(adapter, eicr, true); 3879 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1_BY_MAC(hw)); 3880 } 3881 3882 /* Link status change */ 3883 if (eicr & IXGBE_EICR_LSC){ 3884 adapter->task_requests |= IXGBE_REQUEST_TASK_LINK; 3885 taskqueue_enqueue(adapter->tq, &adapter->admin_task); 3886 } 3887 3888 if (ixgbe_is_sfp(hw)) { 3889 /* Pluggable optics-related interrupt */ 3890 if (hw->mac.type >= ixgbe_mac_X540) 3891 eicr_mask = IXGBE_EICR_GPI_SDP0_X540; 3892 else 3893 eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw); 3894 3895 if (eicr & eicr_mask) { 3896 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask); 3897 adapter->task_requests |= IXGBE_REQUEST_TASK_MOD; 3898 taskqueue_enqueue(adapter->tq, &adapter->admin_task); 3899 } 3900 3901 if ((hw->mac.type == ixgbe_mac_82599EB) && 3902 (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) { 3903 IXGBE_WRITE_REG(hw, IXGBE_EICR, 3904 IXGBE_EICR_GPI_SDP1_BY_MAC(hw)); 3905 adapter->task_requests |= IXGBE_REQUEST_TASK_MSF; 3906 taskqueue_enqueue(adapter->tq, &adapter->admin_task); 3907 } 3908 } 3909 3910 /* External PHY interrupt */ 3911 if ((hw->phy.type == ixgbe_phy_x550em_ext_t) && 3912 (eicr & IXGBE_EICR_GPI_SDP0_X540)) { 3913 adapter->task_requests |= IXGBE_REQUEST_TASK_PHY; 3914 taskqueue_enqueue(adapter->tq, &adapter->admin_task); 3915 } 3916 3917 if (more) 3918 taskqueue_enqueue(que->tq, &que->que_task); 3919 else 3920 ixgbe_enable_intr(adapter, false); 3921 3922 return; 3923} /* ixgbe_legacy_irq */ 3924 3925/************************************************************************ 3926 * ixgbe_free_pci_resources 3927 ************************************************************************/ 3928static void 3929ixgbe_free_pci_resources(struct adapter *adapter) 3930{ 3931 struct ix_queue *que = adapter->queues; 3932 device_t dev = adapter->dev; 3933 int rid, memrid; 3934 3935 if (adapter->hw.mac.type == ixgbe_mac_82598EB) 3936 memrid = PCIR_BAR(MSIX_82598_BAR); 3937 else 3938 memrid = PCIR_BAR(MSIX_82599_BAR); 3939 3940 /* 3941 * There is a slight possibility of a failure mode 3942 * in attach that will result in entering this function 3943 * before interrupt resources have been initialized, and 3944 * in that case we do not want to execute the loops below 3945 * We can detect this reliably by the state of the adapter 3946 * res pointer. 3947 */ 3948 if (adapter->res == NULL) 3949 goto mem; 3950 3951 /* 3952 * Release all msix queue resources: 3953 */ 3954 for (int i = 0; i < adapter->num_queues; i++, que++) { 3955 rid = que->msix + 1; 3956 if (que->tag != NULL) { 3957 bus_teardown_intr(dev, que->res, que->tag); 3958 que->tag = NULL; 3959 } 3960 if (que->res != NULL) 3961 bus_release_resource(dev, SYS_RES_IRQ, rid, que->res); 3962 } 3963 3964 3965 if (adapter->tag != NULL) { 3966 bus_teardown_intr(dev, adapter->res, adapter->tag); 3967 adapter->tag = NULL; 3968 } 3969 3970 /* Clean the Legacy or Link interrupt last */ 3971 if (adapter->res != NULL) 3972 bus_release_resource(dev, SYS_RES_IRQ, adapter->link_rid, 3973 adapter->res); 3974 3975mem: 3976 if ((adapter->feat_en & IXGBE_FEATURE_MSI) || 3977 (adapter->feat_en & IXGBE_FEATURE_MSIX)) 3978 pci_release_msi(dev); 3979 3980 if (adapter->msix_mem != NULL) 3981 bus_release_resource(dev, SYS_RES_MEMORY, memrid, 3982 adapter->msix_mem); 3983 3984 if (adapter->pci_mem != NULL) 3985 bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0), 3986 adapter->pci_mem); 3987 3988 return; 3989} /* ixgbe_free_pci_resources */ 3990 3991/************************************************************************ 3992 * ixgbe_set_sysctl_value 3993 ************************************************************************/ 3994static void 3995ixgbe_set_sysctl_value(struct adapter *adapter, const char *name, 3996 const char *description, int *limit, int value) 3997{ 3998 *limit = value; 3999 SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev), 4000 SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)), 4001 OID_AUTO, name, CTLFLAG_RW, limit, value, description); 4002} /* ixgbe_set_sysctl_value */ 4003 4004/************************************************************************ 4005 * ixgbe_sysctl_flowcntl 4006 * 4007 * SYSCTL wrapper around setting Flow Control 4008 ************************************************************************/ 4009static int 4010ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS) 4011{ 4012 struct adapter *adapter; 4013 int error, fc; 4014 4015 adapter = (struct adapter *)arg1; 4016 fc = adapter->hw.fc.current_mode; 4017 4018 error = sysctl_handle_int(oidp, &fc, 0, req); 4019 if ((error) || (req->newptr == NULL)) 4020 return (error); 4021 4022 /* Don't bother if it's not changed */ 4023 if (fc == adapter->hw.fc.current_mode) 4024 return (0); 4025 4026 return ixgbe_set_flowcntl(adapter, fc); 4027} /* ixgbe_sysctl_flowcntl */ 4028 4029/************************************************************************ 4030 * ixgbe_set_flowcntl - Set flow control 4031 * 4032 * Flow control values: 4033 * 0 - off 4034 * 1 - rx pause 4035 * 2 - tx pause 4036 * 3 - full 4037 ************************************************************************/ 4038static int 4039ixgbe_set_flowcntl(struct adapter *adapter, int fc) 4040{ 4041 switch (fc) { 4042 case ixgbe_fc_rx_pause: 4043 case ixgbe_fc_tx_pause: 4044 case ixgbe_fc_full: 4045 adapter->hw.fc.requested_mode = fc; 4046 if (adapter->num_queues > 1) 4047 ixgbe_disable_rx_drop(adapter); 4048 break; 4049 case ixgbe_fc_none: 4050 adapter->hw.fc.requested_mode = ixgbe_fc_none; 4051 if (adapter->num_queues > 1) 4052 ixgbe_enable_rx_drop(adapter); 4053 break; 4054 default: 4055 return (EINVAL); 4056 } 4057 4058 /* Don't autoneg if forcing a value */ 4059 adapter->hw.fc.disable_fc_autoneg = TRUE; 4060 ixgbe_fc_enable(&adapter->hw); 4061 4062 return (0); 4063} /* ixgbe_set_flowcntl */ 4064 4065/************************************************************************ 4066 * ixgbe_enable_rx_drop 4067 * 4068 * Enable the hardware to drop packets when the buffer is 4069 * full. This is useful with multiqueue, so that no single 4070 * queue being full stalls the entire RX engine. We only 4071 * enable this when Multiqueue is enabled AND Flow Control 4072 * is disabled. 4073 ************************************************************************/ 4074static void 4075ixgbe_enable_rx_drop(struct adapter *adapter) 4076{ 4077 struct ixgbe_hw *hw = &adapter->hw; 4078 struct rx_ring *rxr; 4079 u32 srrctl; 4080 4081 for (int i = 0; i < adapter->num_queues; i++) { 4082 rxr = &adapter->rx_rings[i]; 4083 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me)); 4084 srrctl |= IXGBE_SRRCTL_DROP_EN; 4085 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl); 4086 } 4087 4088 /* enable drop for each vf */ 4089 for (int i = 0; i < adapter->num_vfs; i++) { 4090 IXGBE_WRITE_REG(hw, IXGBE_QDE, 4091 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT) | 4092 IXGBE_QDE_ENABLE)); 4093 } 4094} /* ixgbe_enable_rx_drop */ 4095 4096/************************************************************************ 4097 * ixgbe_disable_rx_drop 4098 ************************************************************************/ 4099static void 4100ixgbe_disable_rx_drop(struct adapter *adapter) 4101{ 4102 struct ixgbe_hw *hw = &adapter->hw; 4103 struct rx_ring *rxr; 4104 u32 srrctl; 4105 4106 for (int i = 0; i < adapter->num_queues; i++) { 4107 rxr = &adapter->rx_rings[i]; 4108 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me)); 4109 srrctl &= ~IXGBE_SRRCTL_DROP_EN; 4110 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl); 4111 } 4112 4113 /* disable drop for each vf */ 4114 for (int i = 0; i < adapter->num_vfs; i++) { 4115 IXGBE_WRITE_REG(hw, IXGBE_QDE, 4116 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT))); 4117 } 4118} /* ixgbe_disable_rx_drop */ 4119 4120/************************************************************************ 4121 * ixgbe_sysctl_advertise 4122 * 4123 * SYSCTL wrapper around setting advertised speed 4124 ************************************************************************/ 4125static int 4126ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS) 4127{ 4128 struct adapter *adapter; 4129 int error, advertise; 4130 4131 adapter = (struct adapter *)arg1; 4132 advertise = adapter->advertise; 4133 4134 error = sysctl_handle_int(oidp, &advertise, 0, req); 4135 if ((error) || (req->newptr == NULL)) 4136 return (error); 4137 4138 return ixgbe_set_advertise(adapter, advertise); 4139} /* ixgbe_sysctl_advertise */ 4140 4141/************************************************************************ 4142 * ixgbe_set_advertise - Control advertised link speed 4143 * 4144 * Flags: 4145 * 0x1 - advertise 100 Mb 4146 * 0x2 - advertise 1G 4147 * 0x4 - advertise 10G 4148 * 0x8 - advertise 10 Mb (yes, Mb) 4149 ************************************************************************/ 4150static int 4151ixgbe_set_advertise(struct adapter *adapter, int advertise) 4152{ 4153 device_t dev; 4154 struct ixgbe_hw *hw; 4155 ixgbe_link_speed speed = 0; 4156 ixgbe_link_speed link_caps = 0; 4157 s32 err = IXGBE_NOT_IMPLEMENTED; 4158 bool negotiate = FALSE; 4159 4160 /* Checks to validate new value */ 4161 if (adapter->advertise == advertise) /* no change */ 4162 return (0); 4163 4164 dev = adapter->dev; 4165 hw = &adapter->hw; 4166 4167 /* No speed changes for backplane media */ 4168 if (hw->phy.media_type == ixgbe_media_type_backplane) 4169 return (ENODEV); 4170 4171 if (!((hw->phy.media_type == ixgbe_media_type_copper) || 4172 (hw->phy.multispeed_fiber))) { 4173 device_printf(dev, "Advertised speed can only be set on copper or multispeed fiber media types.\n"); 4174 return (EINVAL); 4175 } 4176 4177 if (advertise < 0x1 || advertise > 0xF) { 4178 device_printf(dev, "Invalid advertised speed; valid modes are 0x1 through 0xF\n"); 4179 return (EINVAL); 4180 } 4181 4182 if (hw->mac.ops.get_link_capabilities) { 4183 err = hw->mac.ops.get_link_capabilities(hw, &link_caps, 4184 &negotiate); 4185 if (err != IXGBE_SUCCESS) { 4186 device_printf(dev, "Unable to determine supported advertise speeds\n"); 4187 return (ENODEV); 4188 } 4189 } 4190 4191 /* Set new value and report new advertised mode */ 4192 if (advertise & 0x1) { 4193 if (!(link_caps & IXGBE_LINK_SPEED_100_FULL)) { 4194 device_printf(dev, "Interface does not support 100Mb advertised speed\n"); 4195 return (EINVAL); 4196 } 4197 speed |= IXGBE_LINK_SPEED_100_FULL; 4198 } 4199 if (advertise & 0x2) { 4200 if (!(link_caps & IXGBE_LINK_SPEED_1GB_FULL)) { 4201 device_printf(dev, "Interface does not support 1Gb advertised speed\n"); 4202 return (EINVAL); 4203 } 4204 speed |= IXGBE_LINK_SPEED_1GB_FULL; 4205 } 4206 if (advertise & 0x4) { 4207 if (!(link_caps & IXGBE_LINK_SPEED_10GB_FULL)) { 4208 device_printf(dev, "Interface does not support 10Gb advertised speed\n"); 4209 return (EINVAL); 4210 } 4211 speed |= IXGBE_LINK_SPEED_10GB_FULL; 4212 } 4213 if (advertise & 0x8) { 4214 if (!(link_caps & IXGBE_LINK_SPEED_10_FULL)) { 4215 device_printf(dev, "Interface does not support 10Mb advertised speed\n"); 4216 return (EINVAL); 4217 } 4218 speed |= IXGBE_LINK_SPEED_10_FULL; 4219 } 4220 4221 hw->mac.autotry_restart = TRUE; 4222 hw->mac.ops.setup_link(hw, speed, TRUE); 4223 adapter->advertise = advertise; 4224 4225 return (0); 4226} /* ixgbe_set_advertise */ 4227 4228/************************************************************************ 4229 * ixgbe_get_advertise - Get current advertised speed settings 4230 * 4231 * Formatted for sysctl usage. 4232 * Flags: 4233 * 0x1 - advertise 100 Mb 4234 * 0x2 - advertise 1G 4235 * 0x4 - advertise 10G 4236 * 0x8 - advertise 10 Mb (yes, Mb) 4237 ************************************************************************/ 4238static int 4239ixgbe_get_advertise(struct adapter *adapter) 4240{ 4241 struct ixgbe_hw *hw = &adapter->hw; 4242 int speed; 4243 ixgbe_link_speed link_caps = 0; 4244 s32 err; 4245 bool negotiate = FALSE; 4246 4247 /* 4248 * Advertised speed means nothing unless it's copper or 4249 * multi-speed fiber 4250 */ 4251 if (!(hw->phy.media_type == ixgbe_media_type_copper) && 4252 !(hw->phy.multispeed_fiber)) 4253 return (0); 4254 4255 err = hw->mac.ops.get_link_capabilities(hw, &link_caps, &negotiate); 4256 if (err != IXGBE_SUCCESS) 4257 return (0); 4258 4259 speed = 4260 ((link_caps & IXGBE_LINK_SPEED_10GB_FULL) ? 4 : 0) | 4261 ((link_caps & IXGBE_LINK_SPEED_1GB_FULL) ? 2 : 0) | 4262 ((link_caps & IXGBE_LINK_SPEED_100_FULL) ? 1 : 0) | 4263 ((link_caps & IXGBE_LINK_SPEED_10_FULL) ? 8 : 0); 4264 4265 return speed; 4266} /* ixgbe_get_advertise */ 4267 4268/************************************************************************ 4269 * ixgbe_sysctl_dmac - Manage DMA Coalescing 4270 * 4271 * Control values: 4272 * 0/1 - off / on (use default value of 1000) 4273 * 4274 * Legal timer values are: 4275 * 50,100,250,500,1000,2000,5000,10000 4276 * 4277 * Turning off interrupt moderation will also turn this off. 4278 ************************************************************************/ 4279static int 4280ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS) 4281{ 4282 struct adapter *adapter = (struct adapter *)arg1; 4283 struct ifnet *ifp = adapter->ifp; 4284 int error; 4285 u32 newval; 4286 4287 newval = adapter->dmac; 4288 error = sysctl_handle_int(oidp, &newval, 0, req); 4289 if ((error) || (req->newptr == NULL)) 4290 return (error); 4291 4292 switch (newval) { 4293 case 0: 4294 /* Disabled */ 4295 adapter->dmac = 0; 4296 break; 4297 case 1: 4298 /* Enable and use default */ 4299 adapter->dmac = 1000; 4300 break; 4301 case 50: 4302 case 100: 4303 case 250: 4304 case 500: 4305 case 1000: 4306 case 2000: 4307 case 5000: 4308 case 10000: 4309 /* Legal values - allow */ 4310 adapter->dmac = newval; 4311 break; 4312 default: 4313 /* Do nothing, illegal value */ 4314 return (EINVAL); 4315 } 4316 4317 /* Re-initialize hardware if it's already running */ 4318 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 4319 ixgbe_init(adapter); 4320 4321 return (0); 4322} /* ixgbe_sysctl_dmac */ 4323 4324#ifdef IXGBE_DEBUG 4325/************************************************************************ 4326 * ixgbe_sysctl_power_state 4327 * 4328 * Sysctl to test power states 4329 * Values: 4330 * 0 - set device to D0 4331 * 3 - set device to D3 4332 * (none) - get current device power state 4333 ************************************************************************/ 4334static int 4335ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS) 4336{ 4337 struct adapter *adapter = (struct adapter *)arg1; 4338 device_t dev = adapter->dev; 4339 int curr_ps, new_ps, error = 0; 4340 4341 curr_ps = new_ps = pci_get_powerstate(dev); 4342 4343 error = sysctl_handle_int(oidp, &new_ps, 0, req); 4344 if ((error) || (req->newptr == NULL)) 4345 return (error); 4346 4347 if (new_ps == curr_ps) 4348 return (0); 4349 4350 if (new_ps == 3 && curr_ps == 0) 4351 error = DEVICE_SUSPEND(dev); 4352 else if (new_ps == 0 && curr_ps == 3) 4353 error = DEVICE_RESUME(dev); 4354 else 4355 return (EINVAL); 4356 4357 device_printf(dev, "New state: %d\n", pci_get_powerstate(dev)); 4358 4359 return (error); 4360} /* ixgbe_sysctl_power_state */ 4361#endif 4362 4363/************************************************************************ 4364 * ixgbe_sysctl_wol_enable 4365 * 4366 * Sysctl to enable/disable the WoL capability, 4367 * if supported by the adapter. 4368 * 4369 * Values: 4370 * 0 - disabled 4371 * 1 - enabled 4372 ************************************************************************/ 4373static int 4374ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS) 4375{ 4376 struct adapter *adapter = (struct adapter *)arg1; 4377 struct ixgbe_hw *hw = &adapter->hw; 4378 int new_wol_enabled; 4379 int error = 0; 4380 4381 new_wol_enabled = hw->wol_enabled; 4382 error = sysctl_handle_int(oidp, &new_wol_enabled, 0, req); 4383 if ((error) || (req->newptr == NULL)) 4384 return (error); 4385 new_wol_enabled = !!(new_wol_enabled); 4386 if (new_wol_enabled == hw->wol_enabled) 4387 return (0); 4388 4389 if (new_wol_enabled > 0 && !adapter->wol_support) 4390 return (ENODEV); 4391 else 4392 hw->wol_enabled = new_wol_enabled; 4393 4394 return (0); 4395} /* ixgbe_sysctl_wol_enable */ 4396 4397/************************************************************************ 4398 * ixgbe_sysctl_wufc - Wake Up Filter Control 4399 * 4400 * Sysctl to enable/disable the types of packets that the 4401 * adapter will wake up on upon receipt. 4402 * Flags: 4403 * 0x1 - Link Status Change 4404 * 0x2 - Magic Packet 4405 * 0x4 - Direct Exact 4406 * 0x8 - Directed Multicast 4407 * 0x10 - Broadcast 4408 * 0x20 - ARP/IPv4 Request Packet 4409 * 0x40 - Direct IPv4 Packet 4410 * 0x80 - Direct IPv6 Packet 4411 * 4412 * Settings not listed above will cause the sysctl to return an error. 4413 ************************************************************************/ 4414static int 4415ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS) 4416{ 4417 struct adapter *adapter = (struct adapter *)arg1; 4418 int error = 0; 4419 u32 new_wufc; 4420 4421 new_wufc = adapter->wufc; 4422 4423 error = sysctl_handle_int(oidp, &new_wufc, 0, req); 4424 if ((error) || (req->newptr == NULL)) 4425 return (error); 4426 if (new_wufc == adapter->wufc) 4427 return (0); 4428 4429 if (new_wufc & 0xffffff00) 4430 return (EINVAL); 4431 4432 new_wufc &= 0xff; 4433 new_wufc |= (0xffffff & adapter->wufc); 4434 adapter->wufc = new_wufc; 4435 4436 return (0); 4437} /* ixgbe_sysctl_wufc */ 4438 4439#ifdef IXGBE_DEBUG 4440/************************************************************************ 4441 * ixgbe_sysctl_print_rss_config 4442 ************************************************************************/ 4443static int 4444ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS) 4445{ 4446 struct adapter *adapter = (struct adapter *)arg1; 4447 struct ixgbe_hw *hw = &adapter->hw; 4448 device_t dev = adapter->dev; 4449 struct sbuf *buf; 4450 int error = 0, reta_size; 4451 u32 reg; 4452 4453 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 4454 if (!buf) { 4455 device_printf(dev, "Could not allocate sbuf for output.\n"); 4456 return (ENOMEM); 4457 } 4458 4459 // TODO: use sbufs to make a string to print out 4460 /* Set multiplier for RETA setup and table size based on MAC */ 4461 switch (adapter->hw.mac.type) { 4462 case ixgbe_mac_X550: 4463 case ixgbe_mac_X550EM_x: 4464 case ixgbe_mac_X550EM_a: 4465 reta_size = 128; 4466 break; 4467 default: 4468 reta_size = 32; 4469 break; 4470 } 4471 4472 /* Print out the redirection table */ 4473 sbuf_cat(buf, "\n"); 4474 for (int i = 0; i < reta_size; i++) { 4475 if (i < 32) { 4476 reg = IXGBE_READ_REG(hw, IXGBE_RETA(i)); 4477 sbuf_printf(buf, "RETA(%2d): 0x%08x\n", i, reg); 4478 } else { 4479 reg = IXGBE_READ_REG(hw, IXGBE_ERETA(i - 32)); 4480 sbuf_printf(buf, "ERETA(%2d): 0x%08x\n", i - 32, reg); 4481 } 4482 } 4483 4484 // TODO: print more config 4485 4486 error = sbuf_finish(buf); 4487 if (error) 4488 device_printf(dev, "Error finishing sbuf: %d\n", error); 4489 4490 sbuf_delete(buf); 4491 4492 return (0); 4493} /* ixgbe_sysctl_print_rss_config */ 4494#endif /* IXGBE_DEBUG */ 4495 4496/************************************************************************ 4497 * ixgbe_sysctl_phy_temp - Retrieve temperature of PHY 4498 * 4499 * For X552/X557-AT devices using an external PHY 4500 ************************************************************************/ 4501static int 4502ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS) 4503{ 4504 struct adapter *adapter = (struct adapter *)arg1; 4505 struct ixgbe_hw *hw = &adapter->hw; 4506 u16 reg; 4507 4508 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) { 4509 device_printf(adapter->dev, 4510 "Device has no supported external thermal sensor.\n"); 4511 return (ENODEV); 4512 } 4513 4514 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP, 4515 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®)) { 4516 device_printf(adapter->dev, 4517 "Error reading from PHY's current temperature register\n"); 4518 return (EAGAIN); 4519 } 4520 4521 /* Shift temp for output */ 4522 reg = reg >> 8; 4523 4524 return (sysctl_handle_int(oidp, NULL, reg, req)); 4525} /* ixgbe_sysctl_phy_temp */ 4526 4527/************************************************************************ 4528 * ixgbe_sysctl_phy_overtemp_occurred 4529 * 4530 * Reports (directly from the PHY) whether the current PHY 4531 * temperature is over the overtemp threshold. 4532 ************************************************************************/ 4533static int 4534ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS) 4535{ 4536 struct adapter *adapter = (struct adapter *)arg1; 4537 struct ixgbe_hw *hw = &adapter->hw; 4538 u16 reg; 4539 4540 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) { 4541 device_printf(adapter->dev, 4542 "Device has no supported external thermal sensor.\n"); 4543 return (ENODEV); 4544 } 4545 4546 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS, 4547 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®)) { 4548 device_printf(adapter->dev, 4549 "Error reading from PHY's temperature status register\n"); 4550 return (EAGAIN); 4551 } 4552 4553 /* Get occurrence bit */ 4554 reg = !!(reg & 0x4000); 4555 4556 return (sysctl_handle_int(oidp, 0, reg, req)); 4557} /* ixgbe_sysctl_phy_overtemp_occurred */ 4558 4559/************************************************************************ 4560 * ixgbe_sysctl_eee_state 4561 * 4562 * Sysctl to set EEE power saving feature 4563 * Values: 4564 * 0 - disable EEE 4565 * 1 - enable EEE 4566 * (none) - get current device EEE state 4567 ************************************************************************/ 4568static int 4569ixgbe_sysctl_eee_state(SYSCTL_HANDLER_ARGS) 4570{ 4571 struct adapter *adapter = (struct adapter *)arg1; 4572 device_t dev = adapter->dev; 4573 int curr_eee, new_eee, error = 0; 4574 s32 retval; 4575 4576 curr_eee = new_eee = !!(adapter->feat_en & IXGBE_FEATURE_EEE); 4577 4578 error = sysctl_handle_int(oidp, &new_eee, 0, req); 4579 if ((error) || (req->newptr == NULL)) 4580 return (error); 4581 4582 /* Nothing to do */ 4583 if (new_eee == curr_eee) 4584 return (0); 4585 4586 /* Not supported */ 4587 if (!(adapter->feat_cap & IXGBE_FEATURE_EEE)) 4588 return (EINVAL); 4589 4590 /* Bounds checking */ 4591 if ((new_eee < 0) || (new_eee > 1)) 4592 return (EINVAL); 4593 4594 retval = ixgbe_setup_eee(&adapter->hw, new_eee); 4595 if (retval) { 4596 device_printf(dev, "Error in EEE setup: 0x%08X\n", retval); 4597 return (EINVAL); 4598 } 4599 4600 /* Restart auto-neg */ 4601 ixgbe_init(adapter); 4602 4603 device_printf(dev, "New EEE state: %d\n", new_eee); 4604 4605 /* Cache new value */ 4606 if (new_eee) 4607 adapter->feat_en |= IXGBE_FEATURE_EEE; 4608 else 4609 adapter->feat_en &= ~IXGBE_FEATURE_EEE; 4610 4611 return (error); 4612} /* ixgbe_sysctl_eee_state */ 4613 4614/************************************************************************ 4615 * ixgbe_init_device_features 4616 ************************************************************************/ 4617static void 4618ixgbe_init_device_features(struct adapter *adapter) 4619{ 4620 adapter->feat_cap = IXGBE_FEATURE_NETMAP 4621 | IXGBE_FEATURE_RSS 4622 | IXGBE_FEATURE_MSI 4623 | IXGBE_FEATURE_MSIX 4624 | IXGBE_FEATURE_LEGACY_IRQ 4625 | IXGBE_FEATURE_LEGACY_TX; 4626 4627 /* Set capabilities first... */ 4628 switch (adapter->hw.mac.type) { 4629 case ixgbe_mac_82598EB: 4630 if (adapter->hw.device_id == IXGBE_DEV_ID_82598AT) 4631 adapter->feat_cap |= IXGBE_FEATURE_FAN_FAIL; 4632 break; 4633 case ixgbe_mac_X540: 4634 adapter->feat_cap |= IXGBE_FEATURE_SRIOV; 4635 adapter->feat_cap |= IXGBE_FEATURE_FDIR; 4636 if ((adapter->hw.device_id == IXGBE_DEV_ID_X540_BYPASS) && 4637 (adapter->hw.bus.func == 0)) 4638 adapter->feat_cap |= IXGBE_FEATURE_BYPASS; 4639 break; 4640 case ixgbe_mac_X550: 4641 adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR; 4642 adapter->feat_cap |= IXGBE_FEATURE_SRIOV; 4643 adapter->feat_cap |= IXGBE_FEATURE_FDIR; 4644 break; 4645 case ixgbe_mac_X550EM_x: 4646 adapter->feat_cap |= IXGBE_FEATURE_SRIOV; 4647 adapter->feat_cap |= IXGBE_FEATURE_FDIR; 4648 break; 4649 case ixgbe_mac_X550EM_a: 4650 adapter->feat_cap |= IXGBE_FEATURE_SRIOV; 4651 adapter->feat_cap |= IXGBE_FEATURE_FDIR; 4652 adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ; 4653 if ((adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T) || 4654 (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)) { 4655 adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR; 4656 adapter->feat_cap |= IXGBE_FEATURE_EEE; 4657 } 4658 break; 4659 case ixgbe_mac_82599EB: 4660 adapter->feat_cap |= IXGBE_FEATURE_SRIOV; 4661 adapter->feat_cap |= IXGBE_FEATURE_FDIR; 4662 if ((adapter->hw.device_id == IXGBE_DEV_ID_82599_BYPASS) && 4663 (adapter->hw.bus.func == 0)) 4664 adapter->feat_cap |= IXGBE_FEATURE_BYPASS; 4665 if (adapter->hw.device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP) 4666 adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ; 4667 break; 4668 default: 4669 break; 4670 } 4671 4672 /* Enabled by default... */ 4673 /* Fan failure detection */ 4674 if (adapter->feat_cap & IXGBE_FEATURE_FAN_FAIL) 4675 adapter->feat_en |= IXGBE_FEATURE_FAN_FAIL; 4676 /* Netmap */ 4677 if (adapter->feat_cap & IXGBE_FEATURE_NETMAP) 4678 adapter->feat_en |= IXGBE_FEATURE_NETMAP; 4679 /* EEE */ 4680 if (adapter->feat_cap & IXGBE_FEATURE_EEE) 4681 adapter->feat_en |= IXGBE_FEATURE_EEE; 4682 /* Thermal Sensor */ 4683 if (adapter->feat_cap & IXGBE_FEATURE_TEMP_SENSOR) 4684 adapter->feat_en |= IXGBE_FEATURE_TEMP_SENSOR; 4685 4686 /* Enabled via global sysctl... */ 4687 /* Flow Director */ 4688 if (ixgbe_enable_fdir) { 4689 if (adapter->feat_cap & IXGBE_FEATURE_FDIR) 4690 adapter->feat_en |= IXGBE_FEATURE_FDIR; 4691 else 4692 device_printf(adapter->dev, "Device does not support Flow Director. Leaving disabled."); 4693 } 4694 /* Legacy (single queue) transmit */ 4695 if ((adapter->feat_cap & IXGBE_FEATURE_LEGACY_TX) && 4696 ixgbe_enable_legacy_tx) 4697 adapter->feat_en |= IXGBE_FEATURE_LEGACY_TX; 4698 /* 4699 * Message Signal Interrupts - Extended (MSI-X) 4700 * Normal MSI is only enabled if MSI-X calls fail. 4701 */ 4702 if (!ixgbe_enable_msix) 4703 adapter->feat_cap &= ~IXGBE_FEATURE_MSIX; 4704 /* Receive-Side Scaling (RSS) */ 4705 if ((adapter->feat_cap & IXGBE_FEATURE_RSS) && ixgbe_enable_rss) 4706 adapter->feat_en |= IXGBE_FEATURE_RSS; 4707 4708 /* Disable features with unmet dependencies... */ 4709 /* No MSI-X */ 4710 if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX)) { 4711 adapter->feat_cap &= ~IXGBE_FEATURE_RSS; 4712 adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV; 4713 adapter->feat_en &= ~IXGBE_FEATURE_RSS; 4714 adapter->feat_en &= ~IXGBE_FEATURE_SRIOV; 4715 } 4716} /* ixgbe_init_device_features */ 4717 4718/************************************************************************ 4719 * ixgbe_probe - Device identification routine 4720 * 4721 * Determines if the driver should be loaded on 4722 * adapter based on its PCI vendor/device ID. 4723 * 4724 * return BUS_PROBE_DEFAULT on success, positive on failure 4725 ************************************************************************/ 4726static int 4727ixgbe_probe(device_t dev) 4728{ 4729 ixgbe_vendor_info_t *ent; 4730 4731 u16 pci_vendor_id = 0; 4732 u16 pci_device_id = 0; 4733 u16 pci_subvendor_id = 0; 4734 u16 pci_subdevice_id = 0; 4735 char adapter_name[256]; 4736 4737 INIT_DEBUGOUT("ixgbe_probe: begin"); 4738 4739 pci_vendor_id = pci_get_vendor(dev); 4740 if (pci_vendor_id != IXGBE_INTEL_VENDOR_ID) 4741 return (ENXIO); 4742 4743 pci_device_id = pci_get_device(dev); 4744 pci_subvendor_id = pci_get_subvendor(dev); 4745 pci_subdevice_id = pci_get_subdevice(dev); 4746 4747 ent = ixgbe_vendor_info_array; 4748 while (ent->vendor_id != 0) { 4749 if ((pci_vendor_id == ent->vendor_id) && 4750 (pci_device_id == ent->device_id) && 4751 ((pci_subvendor_id == ent->subvendor_id) || 4752 (ent->subvendor_id == 0)) && 4753 ((pci_subdevice_id == ent->subdevice_id) || 4754 (ent->subdevice_id == 0))) { 4755 sprintf(adapter_name, "%s, Version - %s", 4756 ixgbe_strings[ent->index], 4757 ixgbe_driver_version); 4758 device_set_desc_copy(dev, adapter_name); 4759 ++ixgbe_total_ports; 4760 return (BUS_PROBE_DEFAULT); 4761 } 4762 ent++; 4763 } 4764 4765 return (ENXIO); 4766} /* ixgbe_probe */ 4767 4768 4769/************************************************************************ 4770 * ixgbe_ioctl - Ioctl entry point 4771 * 4772 * Called when the user wants to configure the interface. 4773 * 4774 * return 0 on success, positive on failure 4775 ************************************************************************/ 4776static int 4777ixgbe_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 4778{ 4779 struct adapter *adapter = ifp->if_softc; 4780 struct ifreq *ifr = (struct ifreq *) data; 4781#if defined(INET) || defined(INET6) 4782 struct ifaddr *ifa = (struct ifaddr *)data; 4783#endif 4784 int error = 0; 4785 bool avoid_reset = FALSE; 4786 4787 switch (command) { 4788 case SIOCSIFADDR: 4789#ifdef INET 4790 if (ifa->ifa_addr->sa_family == AF_INET) 4791 avoid_reset = TRUE; 4792#endif 4793#ifdef INET6 4794 if (ifa->ifa_addr->sa_family == AF_INET6) 4795 avoid_reset = TRUE; 4796#endif 4797 /* 4798 * Calling init results in link renegotiation, 4799 * so we avoid doing it when possible. 4800 */ 4801 if (avoid_reset) { 4802 ifp->if_flags |= IFF_UP; 4803 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) 4804 ixgbe_init(adapter); 4805#ifdef INET 4806 if (!(ifp->if_flags & IFF_NOARP)) 4807 arp_ifinit(ifp, ifa); 4808#endif 4809 } else 4810 error = ether_ioctl(ifp, command, data); 4811 break; 4812 case SIOCSIFMTU: 4813 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)"); 4814 if (ifr->ifr_mtu > IXGBE_MAX_MTU) { 4815 error = EINVAL; 4816 } else { 4817 IXGBE_CORE_LOCK(adapter); 4818 ifp->if_mtu = ifr->ifr_mtu; 4819 adapter->max_frame_size = ifp->if_mtu + IXGBE_MTU_HDR; 4820 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 4821 ixgbe_init_locked(adapter); 4822 ixgbe_recalculate_max_frame(adapter); 4823 IXGBE_CORE_UNLOCK(adapter); 4824 } 4825 break; 4826 case SIOCSIFFLAGS: 4827 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)"); 4828 IXGBE_CORE_LOCK(adapter); 4829 if (ifp->if_flags & IFF_UP) { 4830 if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) { 4831 if ((ifp->if_flags ^ adapter->if_flags) & 4832 (IFF_PROMISC | IFF_ALLMULTI)) { 4833 ixgbe_set_promisc(adapter); 4834 } 4835 } else 4836 ixgbe_init_locked(adapter); 4837 } else 4838 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 4839 ixgbe_stop(adapter); 4840 adapter->if_flags = ifp->if_flags; 4841 IXGBE_CORE_UNLOCK(adapter); 4842 break; 4843 case SIOCADDMULTI: 4844 case SIOCDELMULTI: 4845 IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI"); 4846 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 4847 IXGBE_CORE_LOCK(adapter); 4848 ixgbe_disable_intr(adapter, false); 4849 ixgbe_set_multi(adapter); 4850 ixgbe_enable_intr(adapter, false); 4851 IXGBE_CORE_UNLOCK(adapter); 4852 } 4853 break; 4854 case SIOCSIFMEDIA: 4855 case SIOCGIFMEDIA: 4856 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)"); 4857 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command); 4858 break; 4859 case SIOCSIFCAP: 4860 { 4861 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)"); 4862 4863 int mask = ifr->ifr_reqcap ^ ifp->if_capenable; 4864 4865 if (!mask) 4866 break; 4867 4868 /* HW cannot turn these on/off separately */ 4869 if (mask & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6)) { 4870 ifp->if_capenable ^= IFCAP_RXCSUM; 4871 ifp->if_capenable ^= IFCAP_RXCSUM_IPV6; 4872 } 4873 if (mask & IFCAP_TXCSUM) 4874 ifp->if_capenable ^= IFCAP_TXCSUM; 4875 if (mask & IFCAP_TXCSUM_IPV6) 4876 ifp->if_capenable ^= IFCAP_TXCSUM_IPV6; 4877 if (mask & IFCAP_TSO4) 4878 ifp->if_capenable ^= IFCAP_TSO4; 4879 if (mask & IFCAP_TSO6) 4880 ifp->if_capenable ^= IFCAP_TSO6; 4881 if (mask & IFCAP_LRO) 4882 ifp->if_capenable ^= IFCAP_LRO; 4883 if (mask & IFCAP_VLAN_HWTAGGING) 4884 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 4885 if (mask & IFCAP_VLAN_HWFILTER) 4886 ifp->if_capenable ^= IFCAP_VLAN_HWFILTER; 4887 if (mask & IFCAP_VLAN_HWTSO) 4888 ifp->if_capenable ^= IFCAP_VLAN_HWTSO; 4889 4890 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 4891 IXGBE_CORE_LOCK(adapter); 4892 ixgbe_init_locked(adapter); 4893 IXGBE_CORE_UNLOCK(adapter); 4894 } 4895 VLAN_CAPABILITIES(ifp); 4896 break; 4897 } 4898#if __FreeBSD_version >= 1100036 4899 case SIOCGI2C: 4900 { 4901 struct ixgbe_hw *hw = &adapter->hw; 4902 struct ifi2creq i2c; 4903 int i; 4904 4905 IOCTL_DEBUGOUT("ioctl: SIOCGI2C (Get I2C Data)"); 4906 error = copyin(ifr_data_get_ptr(ifr), &i2c, sizeof(i2c)); 4907 if (error != 0) 4908 break; 4909 if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) { 4910 error = EINVAL; 4911 break; 4912 } 4913 if (i2c.len > sizeof(i2c.data)) { 4914 error = EINVAL; 4915 break; 4916 } 4917 4918 for (i = 0; i < i2c.len; i++) 4919 hw->phy.ops.read_i2c_byte(hw, i2c.offset + i, 4920 i2c.dev_addr, &i2c.data[i]); 4921 error = copyout(&i2c, ifr_data_get_ptr(ifr), sizeof(i2c)); 4922 break; 4923 } 4924#endif 4925 default: 4926 IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)\n", (int)command); 4927 error = ether_ioctl(ifp, command, data); 4928 break; 4929 } 4930 4931 return (error); 4932} /* ixgbe_ioctl */ 4933 4934/************************************************************************ 4935 * ixgbe_check_fan_failure 4936 ************************************************************************/ 4937static void 4938ixgbe_check_fan_failure(struct adapter *adapter, u32 reg, bool in_interrupt) 4939{ 4940 u32 mask; 4941 4942 mask = (in_interrupt) ? IXGBE_EICR_GPI_SDP1_BY_MAC(&adapter->hw) : 4943 IXGBE_ESDP_SDP1; 4944 4945 if (reg & mask) 4946 device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n"); 4947} /* ixgbe_check_fan_failure */ 4948 4949/************************************************************************ 4950 * ixgbe_handle_que 4951 ************************************************************************/ 4952static void 4953ixgbe_handle_que(void *context, int pending) 4954{ 4955 struct ix_queue *que = context; 4956 struct adapter *adapter = que->adapter; 4957 struct tx_ring *txr = que->txr; 4958 struct ifnet *ifp = adapter->ifp; 4959 4960 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 4961 ixgbe_rxeof(que); 4962 IXGBE_TX_LOCK(txr); 4963 ixgbe_txeof(txr); 4964 if (!ixgbe_ring_empty(ifp, txr->br)) 4965 ixgbe_start_locked(ifp, txr); 4966 IXGBE_TX_UNLOCK(txr); 4967 } 4968 4969 /* Re-enable this interrupt */ 4970 if (que->res != NULL) 4971 ixgbe_enable_queue(adapter, que->msix); 4972 else 4973 ixgbe_enable_intr(adapter, false); 4974 4975 return; 4976} /* ixgbe_handle_que */ 4977 4978 4979 4980/************************************************************************ 4981 * ixgbe_allocate_legacy - Setup the Legacy or MSI Interrupt handler 4982 ************************************************************************/ 4983static int 4984ixgbe_allocate_legacy(struct adapter *adapter) 4985{ 4986 device_t dev = adapter->dev; 4987 struct ix_queue *que = adapter->queues; 4988 struct tx_ring *txr = adapter->tx_rings; 4989 int error; 4990 4991 /* We allocate a single interrupt resource */ 4992 adapter->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, 4993 &adapter->link_rid, RF_SHAREABLE | RF_ACTIVE); 4994 if (adapter->res == NULL) { 4995 device_printf(dev, 4996 "Unable to allocate bus resource: interrupt\n"); 4997 return (ENXIO); 4998 } 4999 5000 /* 5001 * Try allocating a fast interrupt and the associated deferred 5002 * processing contexts. 5003 */ 5004 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) 5005 TASK_INIT(&txr->txq_task, 0, ixgbe_deferred_mq_start, txr); 5006 TASK_INIT(&que->que_task, 0, ixgbe_handle_que, que); 5007 que->tq = taskqueue_create_fast("ixgbe_que", M_NOWAIT, 5008 taskqueue_thread_enqueue, &que->tq); 5009 taskqueue_start_threads(&que->tq, 1, PI_NET, "%s ixq", 5010 device_get_nameunit(adapter->dev)); 5011 5012 if ((error = bus_setup_intr(dev, adapter->res, 5013 INTR_TYPE_NET | INTR_MPSAFE, NULL, ixgbe_legacy_irq, que, 5014 &adapter->tag)) != 0) { 5015 device_printf(dev, 5016 "Failed to register fast interrupt handler: %d\n", error); 5017 taskqueue_free(que->tq); 5018 que->tq = NULL; 5019 5020 return (error); 5021 } 5022 /* For simplicity in the handlers */ 5023 adapter->active_queues = IXGBE_EIMS_ENABLE_MASK; 5024 5025 return (0); 5026} /* ixgbe_allocate_legacy */ 5027 5028 5029/************************************************************************ 5030 * ixgbe_allocate_msix - Setup MSI-X Interrupt resources and handlers 5031 ************************************************************************/ 5032static int 5033ixgbe_allocate_msix(struct adapter *adapter) 5034{ 5035 device_t dev = adapter->dev; 5036 struct ix_queue *que = adapter->queues; 5037 struct tx_ring *txr = adapter->tx_rings; 5038 int error, rid, vector = 0; 5039 int cpu_id = 0; 5040 unsigned int rss_buckets = 0; 5041 cpuset_t cpu_mask; 5042 5043 /* 5044 * If we're doing RSS, the number of queues needs to 5045 * match the number of RSS buckets that are configured. 5046 * 5047 * + If there's more queues than RSS buckets, we'll end 5048 * up with queues that get no traffic. 5049 * 5050 * + If there's more RSS buckets than queues, we'll end 5051 * up having multiple RSS buckets map to the same queue, 5052 * so there'll be some contention. 5053 */ 5054 rss_buckets = rss_getnumbuckets(); 5055 if ((adapter->feat_en & IXGBE_FEATURE_RSS) && 5056 (adapter->num_queues != rss_buckets)) { 5057 device_printf(dev, "%s: number of queues (%d) != number of RSS buckets (%d); performance will be impacted.\n", 5058 __func__, adapter->num_queues, rss_buckets); 5059 } 5060 5061 for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) { 5062 rid = vector + 1; 5063 que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 5064 RF_SHAREABLE | RF_ACTIVE); 5065 if (que->res == NULL) { 5066 device_printf(dev, "Unable to allocate bus resource: que interrupt [%d]\n", 5067 vector); 5068 return (ENXIO); 5069 } 5070 /* Set the handler function */ 5071 error = bus_setup_intr(dev, que->res, 5072 INTR_TYPE_NET | INTR_MPSAFE, NULL, ixgbe_msix_que, que, 5073 &que->tag); 5074 if (error) { 5075 que->res = NULL; 5076 device_printf(dev, "Failed to register QUE handler"); 5077 return (error); 5078 } 5079#if __FreeBSD_version >= 800504 5080 bus_describe_intr(dev, que->res, que->tag, "q%d", i); 5081#endif 5082 que->msix = vector; 5083 adapter->active_queues |= (u64)(1 << que->msix); 5084 5085 if (adapter->feat_en & IXGBE_FEATURE_RSS) { 5086 /* 5087 * The queue ID is used as the RSS layer bucket ID. 5088 * We look up the queue ID -> RSS CPU ID and select 5089 * that. 5090 */ 5091 cpu_id = rss_getcpu(i % rss_buckets); 5092 CPU_SETOF(cpu_id, &cpu_mask); 5093 } else { 5094 /* 5095 * Bind the MSI-X vector, and thus the 5096 * rings to the corresponding CPU. 5097 * 5098 * This just happens to match the default RSS 5099 * round-robin bucket -> queue -> CPU allocation. 5100 */ 5101 if (adapter->num_queues > 1) 5102 cpu_id = i; 5103 } 5104 if (adapter->num_queues > 1) 5105 bus_bind_intr(dev, que->res, cpu_id); 5106#ifdef IXGBE_DEBUG 5107 if (adapter->feat_en & IXGBE_FEATURE_RSS) 5108 device_printf(dev, "Bound RSS bucket %d to CPU %d\n", i, 5109 cpu_id); 5110 else 5111 device_printf(dev, "Bound queue %d to cpu %d\n", i, 5112 cpu_id); 5113#endif /* IXGBE_DEBUG */ 5114 5115 5116 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) 5117 TASK_INIT(&txr->txq_task, 0, ixgbe_deferred_mq_start, 5118 txr); 5119 TASK_INIT(&que->que_task, 0, ixgbe_handle_que, que); 5120 que->tq = taskqueue_create_fast("ixgbe_que", M_NOWAIT, 5121 taskqueue_thread_enqueue, &que->tq); 5122#if __FreeBSD_version < 1100000 5123 taskqueue_start_threads(&que->tq, 1, PI_NET, "%s:q%d", 5124 device_get_nameunit(adapter->dev), i); 5125#else 5126 if (adapter->feat_en & IXGBE_FEATURE_RSS) 5127 taskqueue_start_threads_cpuset(&que->tq, 1, PI_NET, 5128 &cpu_mask, "%s (bucket %d)", 5129 device_get_nameunit(adapter->dev), cpu_id); 5130 else 5131 taskqueue_start_threads_cpuset(&que->tq, 1, PI_NET, 5132 NULL, "%s:q%d", device_get_nameunit(adapter->dev), 5133 i); 5134#endif 5135 } 5136 5137 /* and Link */ 5138 adapter->link_rid = vector + 1; 5139 adapter->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, 5140 &adapter->link_rid, RF_SHAREABLE | RF_ACTIVE); 5141 if (!adapter->res) { 5142 device_printf(dev, 5143 "Unable to allocate bus resource: Link interrupt [%d]\n", 5144 adapter->link_rid); 5145 return (ENXIO); 5146 } 5147 /* Set the link handler function */ 5148 error = bus_setup_intr(dev, adapter->res, INTR_TYPE_NET | INTR_MPSAFE, 5149 NULL, ixgbe_msix_link, adapter, &adapter->tag); 5150 if (error) { 5151 adapter->res = NULL; 5152 device_printf(dev, "Failed to register LINK handler"); 5153 return (error); 5154 } 5155#if __FreeBSD_version >= 800504 5156 bus_describe_intr(dev, adapter->res, adapter->tag, "link"); 5157#endif 5158 adapter->vector = vector; 5159 return (0); 5160} /* ixgbe_allocate_msix */ 5161 5162/************************************************************************ 5163 * ixgbe_configure_interrupts 5164 * 5165 * Setup MSI-X, MSI, or legacy interrupts (in that order). 5166 * This will also depend on user settings. 5167 ************************************************************************/ 5168static int 5169ixgbe_configure_interrupts(struct adapter *adapter) 5170{ 5171 device_t dev = adapter->dev; 5172 int rid, want, queues, msgs; 5173 5174 /* Default to 1 queue if MSI-X setup fails */ 5175 adapter->num_queues = 1; 5176 5177 /* Override by tuneable */ 5178 if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX)) 5179 goto msi; 5180 5181 /* First try MSI-X */ 5182 msgs = pci_msix_count(dev); 5183 if (msgs == 0) 5184 goto msi; 5185 rid = PCIR_BAR(MSIX_82598_BAR); 5186 adapter->msix_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 5187 RF_ACTIVE); 5188 if (adapter->msix_mem == NULL) { 5189 rid += 4; /* 82599 maps in higher BAR */ 5190 adapter->msix_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 5191 &rid, RF_ACTIVE); 5192 } 5193 if (adapter->msix_mem == NULL) { 5194 /* May not be enabled */ 5195 device_printf(adapter->dev, "Unable to map MSI-X table.\n"); 5196 goto msi; 5197 } 5198 5199 /* Figure out a reasonable auto config value */ 5200 queues = min(mp_ncpus, msgs - 1); 5201 /* If we're doing RSS, clamp at the number of RSS buckets */ 5202 if (adapter->feat_en & IXGBE_FEATURE_RSS) 5203 queues = min(queues, rss_getnumbuckets()); 5204 if (ixgbe_num_queues > queues) { 5205 device_printf(adapter->dev, "ixgbe_num_queues (%d) is too large, using reduced amount (%d).\n", ixgbe_num_queues, queues); 5206 ixgbe_num_queues = queues; 5207 } 5208 5209 if (ixgbe_num_queues != 0) 5210 queues = ixgbe_num_queues; 5211 /* Set max queues to 8 when autoconfiguring */ 5212 else 5213 queues = min(queues, 8); 5214 5215 /* reflect correct sysctl value */ 5216 ixgbe_num_queues = queues; 5217 5218 /* 5219 * Want one vector (RX/TX pair) per queue 5220 * plus an additional for Link. 5221 */ 5222 want = queues + 1; 5223 if (msgs >= want) 5224 msgs = want; 5225 else { 5226 device_printf(adapter->dev, "MSI-X Configuration Problem, %d vectors but %d queues wanted!\n", 5227 msgs, want); 5228 goto msi; 5229 } 5230 if ((pci_alloc_msix(dev, &msgs) == 0) && (msgs == want)) { 5231 device_printf(adapter->dev, 5232 "Using MSI-X interrupts with %d vectors\n", msgs); 5233 adapter->num_queues = queues; 5234 adapter->feat_en |= IXGBE_FEATURE_MSIX; 5235 return (0); 5236 } 5237 /* 5238 * MSI-X allocation failed or provided us with 5239 * less vectors than needed. Free MSI-X resources 5240 * and we'll try enabling MSI. 5241 */ 5242 pci_release_msi(dev); 5243 5244msi: 5245 /* Without MSI-X, some features are no longer supported */ 5246 adapter->feat_cap &= ~IXGBE_FEATURE_RSS; 5247 adapter->feat_en &= ~IXGBE_FEATURE_RSS; 5248 adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV; 5249 adapter->feat_en &= ~IXGBE_FEATURE_SRIOV; 5250 5251 if (adapter->msix_mem != NULL) { 5252 bus_release_resource(dev, SYS_RES_MEMORY, rid, 5253 adapter->msix_mem); 5254 adapter->msix_mem = NULL; 5255 } 5256 msgs = 1; 5257 if (pci_alloc_msi(dev, &msgs) == 0) { 5258 adapter->feat_en |= IXGBE_FEATURE_MSI; 5259 adapter->link_rid = 1; 5260 device_printf(adapter->dev, "Using an MSI interrupt\n"); 5261 return (0); 5262 } 5263 5264 if (!(adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ)) { 5265 device_printf(adapter->dev, 5266 "Device does not support legacy interrupts.\n"); 5267 return 1; 5268 } 5269 5270 adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ; 5271 adapter->link_rid = 0; 5272 device_printf(adapter->dev, "Using a Legacy interrupt\n"); 5273 5274 return (0); 5275} /* ixgbe_configure_interrupts */ 5276 5277 5278/************************************************************************ 5279 * ixgbe_handle_link - Tasklet for MSI-X Link interrupts 5280 * 5281 * Done outside of interrupt context since the driver might sleep 5282 ************************************************************************/ 5283static void 5284ixgbe_handle_link(void *context) 5285{ 5286 struct adapter *adapter = context; 5287 struct ixgbe_hw *hw = &adapter->hw; 5288 5289 ixgbe_check_link(hw, &adapter->link_speed, &adapter->link_up, 0); 5290 5291 /* Re-enable link interrupts */ 5292 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_LSC); 5293} /* ixgbe_handle_link */ 5294 5295/************************************************************************ 5296 * ixgbe_rearm_queues 5297 ************************************************************************/ 5298static void 5299ixgbe_rearm_queues(struct adapter *adapter, u64 queues) 5300{ 5301 u32 mask; 5302 5303 switch (adapter->hw.mac.type) { 5304 case ixgbe_mac_82598EB: 5305 mask = (IXGBE_EIMS_RTX_QUEUE & queues); 5306 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask); 5307 break; 5308 case ixgbe_mac_82599EB: 5309 case ixgbe_mac_X540: 5310 case ixgbe_mac_X550: 5311 case ixgbe_mac_X550EM_x: 5312 case ixgbe_mac_X550EM_a: 5313 mask = (queues & 0xFFFFFFFF); 5314 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask); 5315 mask = (queues >> 32); 5316 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask); 5317 break; 5318 default: 5319 break; 5320 } 5321} /* ixgbe_rearm_queues */ 5322 5323