1176667Sjfv/****************************************************************************** 2176667Sjfv 3248292Sjfv Copyright (c) 2001-2013, Intel Corporation 4176667Sjfv All rights reserved. 5176667Sjfv 6176667Sjfv Redistribution and use in source and binary forms, with or without 7176667Sjfv modification, are permitted provided that the following conditions are met: 8176667Sjfv 9176667Sjfv 1. Redistributions of source code must retain the above copyright notice, 10176667Sjfv this list of conditions and the following disclaimer. 11176667Sjfv 12176667Sjfv 2. Redistributions in binary form must reproduce the above copyright 13176667Sjfv notice, this list of conditions and the following disclaimer in the 14176667Sjfv documentation and/or other materials provided with the distribution. 15176667Sjfv 16176667Sjfv 3. Neither the name of the Intel Corporation nor the names of its 17176667Sjfv contributors may be used to endorse or promote products derived from 18176667Sjfv this software without specific prior written permission. 19176667Sjfv 20176667Sjfv THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21176667Sjfv AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22176667Sjfv IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23176667Sjfv ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 24176667Sjfv LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25176667Sjfv CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26176667Sjfv SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27176667Sjfv INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28176667Sjfv CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29176667Sjfv ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30176667Sjfv POSSIBILITY OF SUCH DAMAGE. 31176667Sjfv 32176667Sjfv******************************************************************************/ 33176667Sjfv/*$FreeBSD$*/ 34176667Sjfv 35194865Sjfv 36253374Sjfv#include "opt_inet.h" 37253374Sjfv#include "opt_inet6.h" 38253374Sjfv 39176667Sjfv#ifdef HAVE_KERNEL_OPTION_HEADERS 40176667Sjfv#include "opt_device_polling.h" 41203354Sjfv#include "opt_altq.h" 42176667Sjfv#endif 43176667Sjfv 44176667Sjfv#include <sys/param.h> 45176667Sjfv#include <sys/systm.h> 46252899Sjfv#ifndef IGB_LEGACY_TX 47194865Sjfv#include <sys/buf_ring.h> 48194865Sjfv#endif 49176667Sjfv#include <sys/bus.h> 50176667Sjfv#include <sys/endian.h> 51176667Sjfv#include <sys/kernel.h> 52176667Sjfv#include <sys/kthread.h> 53176667Sjfv#include <sys/malloc.h> 54176667Sjfv#include <sys/mbuf.h> 55176667Sjfv#include <sys/module.h> 56176667Sjfv#include <sys/rman.h> 57176667Sjfv#include <sys/socket.h> 58176667Sjfv#include <sys/sockio.h> 59176667Sjfv#include <sys/sysctl.h> 60176667Sjfv#include <sys/taskqueue.h> 61181027Sjfv#include <sys/eventhandler.h> 62176667Sjfv#include <sys/pcpu.h> 63194865Sjfv#include <sys/smp.h> 64194865Sjfv#include <machine/smp.h> 65176667Sjfv#include <machine/bus.h> 66176667Sjfv#include <machine/resource.h> 67176667Sjfv 68176667Sjfv#include <net/bpf.h> 69176667Sjfv#include <net/ethernet.h> 70176667Sjfv#include <net/if.h> 71176667Sjfv#include <net/if_arp.h> 72176667Sjfv#include <net/if_dl.h> 73176667Sjfv#include <net/if_media.h> 74176667Sjfv 75176667Sjfv#include <net/if_types.h> 76176667Sjfv#include <net/if_vlan_var.h> 77176667Sjfv 78176667Sjfv#include <netinet/in_systm.h> 79176667Sjfv#include <netinet/in.h> 80176667Sjfv#include <netinet/if_ether.h> 81176667Sjfv#include <netinet/ip.h> 82176667Sjfv#include <netinet/ip6.h> 83176667Sjfv#include <netinet/tcp.h> 84181035Sps#include <netinet/tcp_lro.h> 85176667Sjfv#include <netinet/udp.h> 86176667Sjfv 87176667Sjfv#include <machine/in_cksum.h> 88206001Smarius#include <dev/led/led.h> 89176667Sjfv#include <dev/pci/pcivar.h> 90176667Sjfv#include <dev/pci/pcireg.h> 91176667Sjfv 92176667Sjfv#include "e1000_api.h" 93176667Sjfv#include "e1000_82575.h" 94176667Sjfv#include "if_igb.h" 95176667Sjfv 96176667Sjfv/********************************************************************* 97176667Sjfv * Set this to one to display debug statistics 98176667Sjfv *********************************************************************/ 99176667Sjfvint igb_display_debug_stats = 0; 100176667Sjfv 101176667Sjfv/********************************************************************* 102176667Sjfv * Driver version: 103176667Sjfv *********************************************************************/ 104252899Sjfvchar igb_driver_version[] = "version - 2.3.10"; 105176667Sjfv 106176667Sjfv 107176667Sjfv/********************************************************************* 108176667Sjfv * PCI Device ID Table 109176667Sjfv * 110176667Sjfv * Used by probe to select devices to load on 111176667Sjfv * Last field stores an index into e1000_strings 112176667Sjfv * Last entry must be all 0s 113176667Sjfv * 114176667Sjfv * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index } 115176667Sjfv *********************************************************************/ 116176667Sjfv 117176667Sjfvstatic igb_vendor_info_t igb_vendor_info_array[] = 118176667Sjfv{ 119176667Sjfv { 0x8086, E1000_DEV_ID_82575EB_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0}, 120176667Sjfv { 0x8086, E1000_DEV_ID_82575EB_FIBER_SERDES, 121176667Sjfv PCI_ANY_ID, PCI_ANY_ID, 0}, 122176667Sjfv { 0x8086, E1000_DEV_ID_82575GB_QUAD_COPPER, 123176667Sjfv PCI_ANY_ID, PCI_ANY_ID, 0}, 124181027Sjfv { 0x8086, E1000_DEV_ID_82576, PCI_ANY_ID, PCI_ANY_ID, 0}, 125190872Sjfv { 0x8086, E1000_DEV_ID_82576_NS, PCI_ANY_ID, PCI_ANY_ID, 0}, 126200243Sjfv { 0x8086, E1000_DEV_ID_82576_NS_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0}, 127181027Sjfv { 0x8086, E1000_DEV_ID_82576_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0}, 128181027Sjfv { 0x8086, E1000_DEV_ID_82576_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0}, 129194865Sjfv { 0x8086, E1000_DEV_ID_82576_SERDES_QUAD, 130194865Sjfv PCI_ANY_ID, PCI_ANY_ID, 0}, 131190872Sjfv { 0x8086, E1000_DEV_ID_82576_QUAD_COPPER, 132190872Sjfv PCI_ANY_ID, PCI_ANY_ID, 0}, 133213234Sjfv { 0x8086, E1000_DEV_ID_82576_QUAD_COPPER_ET2, 134213234Sjfv PCI_ANY_ID, PCI_ANY_ID, 0}, 135209611Sjfv { 0x8086, E1000_DEV_ID_82576_VF, PCI_ANY_ID, PCI_ANY_ID, 0}, 136200243Sjfv { 0x8086, E1000_DEV_ID_82580_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0}, 137200243Sjfv { 0x8086, E1000_DEV_ID_82580_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0}, 138200243Sjfv { 0x8086, E1000_DEV_ID_82580_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0}, 139200243Sjfv { 0x8086, E1000_DEV_ID_82580_SGMII, PCI_ANY_ID, PCI_ANY_ID, 0}, 140200243Sjfv { 0x8086, E1000_DEV_ID_82580_COPPER_DUAL, 141200243Sjfv PCI_ANY_ID, PCI_ANY_ID, 0}, 142215781Sjfv { 0x8086, E1000_DEV_ID_82580_QUAD_FIBER, 143215781Sjfv PCI_ANY_ID, PCI_ANY_ID, 0}, 144215781Sjfv { 0x8086, E1000_DEV_ID_DH89XXCC_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0}, 145215781Sjfv { 0x8086, E1000_DEV_ID_DH89XXCC_SGMII, PCI_ANY_ID, PCI_ANY_ID, 0}, 146218530Sjfv { 0x8086, E1000_DEV_ID_DH89XXCC_SFP, PCI_ANY_ID, PCI_ANY_ID, 0}, 147218530Sjfv { 0x8086, E1000_DEV_ID_DH89XXCC_BACKPLANE, 148218530Sjfv PCI_ANY_ID, PCI_ANY_ID, 0}, 149218530Sjfv { 0x8086, E1000_DEV_ID_I350_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0}, 150218530Sjfv { 0x8086, E1000_DEV_ID_I350_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0}, 151218530Sjfv { 0x8086, E1000_DEV_ID_I350_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0}, 152218530Sjfv { 0x8086, E1000_DEV_ID_I350_SGMII, PCI_ANY_ID, PCI_ANY_ID, 0}, 153218530Sjfv { 0x8086, E1000_DEV_ID_I350_VF, PCI_ANY_ID, PCI_ANY_ID, 0}, 154238262Sjfv { 0x8086, E1000_DEV_ID_I210_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0}, 155238262Sjfv { 0x8086, E1000_DEV_ID_I210_COPPER_IT, PCI_ANY_ID, PCI_ANY_ID, 0}, 156238262Sjfv { 0x8086, E1000_DEV_ID_I210_COPPER_OEM1, 157238262Sjfv PCI_ANY_ID, PCI_ANY_ID, 0}, 158238262Sjfv { 0x8086, E1000_DEV_ID_I210_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0}, 159238262Sjfv { 0x8086, E1000_DEV_ID_I210_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0}, 160238262Sjfv { 0x8086, E1000_DEV_ID_I210_SGMII, PCI_ANY_ID, PCI_ANY_ID, 0}, 161238262Sjfv { 0x8086, E1000_DEV_ID_I211_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0}, 162176667Sjfv /* required last entry */ 163176667Sjfv { 0, 0, 0, 0, 0} 164176667Sjfv}; 165176667Sjfv 166176667Sjfv/********************************************************************* 167176667Sjfv * Table of branding strings for all supported NICs. 168176667Sjfv *********************************************************************/ 169176667Sjfv 170176667Sjfvstatic char *igb_strings[] = { 171176667Sjfv "Intel(R) PRO/1000 Network Connection" 172176667Sjfv}; 173176667Sjfv 174176667Sjfv/********************************************************************* 175176667Sjfv * Function prototypes 176176667Sjfv *********************************************************************/ 177176667Sjfvstatic int igb_probe(device_t); 178176667Sjfvstatic int igb_attach(device_t); 179176667Sjfvstatic int igb_detach(device_t); 180176667Sjfvstatic int igb_shutdown(device_t); 181176667Sjfvstatic int igb_suspend(device_t); 182176667Sjfvstatic int igb_resume(device_t); 183252899Sjfv#ifndef IGB_LEGACY_TX 184194865Sjfvstatic int igb_mq_start(struct ifnet *, struct mbuf *); 185248292Sjfvstatic int igb_mq_start_locked(struct ifnet *, struct tx_ring *); 186194865Sjfvstatic void igb_qflush(struct ifnet *); 187223198Sjhbstatic void igb_deferred_mq_start(void *, int); 188223198Sjhb#else 189223198Sjhbstatic void igb_start(struct ifnet *); 190223198Sjhbstatic void igb_start_locked(struct tx_ring *, struct ifnet *ifp); 191194865Sjfv#endif 192176667Sjfvstatic int igb_ioctl(struct ifnet *, u_long, caddr_t); 193176667Sjfvstatic void igb_init(void *); 194176667Sjfvstatic void igb_init_locked(struct adapter *); 195176667Sjfvstatic void igb_stop(void *); 196176667Sjfvstatic void igb_media_status(struct ifnet *, struct ifmediareq *); 197176667Sjfvstatic int igb_media_change(struct ifnet *); 198176667Sjfvstatic void igb_identify_hardware(struct adapter *); 199176667Sjfvstatic int igb_allocate_pci_resources(struct adapter *); 200176667Sjfvstatic int igb_allocate_msix(struct adapter *); 201176667Sjfvstatic int igb_allocate_legacy(struct adapter *); 202176667Sjfvstatic int igb_setup_msix(struct adapter *); 203176667Sjfvstatic void igb_free_pci_resources(struct adapter *); 204176667Sjfvstatic void igb_local_timer(void *); 205200243Sjfvstatic void igb_reset(struct adapter *); 206211907Syongaristatic int igb_setup_interface(device_t, struct adapter *); 207176667Sjfvstatic int igb_allocate_queues(struct adapter *); 208176667Sjfvstatic void igb_configure_queues(struct adapter *); 209176667Sjfv 210176667Sjfvstatic int igb_allocate_transmit_buffers(struct tx_ring *); 211176667Sjfvstatic void igb_setup_transmit_structures(struct adapter *); 212176667Sjfvstatic void igb_setup_transmit_ring(struct tx_ring *); 213176667Sjfvstatic void igb_initialize_transmit_units(struct adapter *); 214176667Sjfvstatic void igb_free_transmit_structures(struct adapter *); 215176667Sjfvstatic void igb_free_transmit_buffers(struct tx_ring *); 216176667Sjfv 217176667Sjfvstatic int igb_allocate_receive_buffers(struct rx_ring *); 218176667Sjfvstatic int igb_setup_receive_structures(struct adapter *); 219176667Sjfvstatic int igb_setup_receive_ring(struct rx_ring *); 220176667Sjfvstatic void igb_initialize_receive_units(struct adapter *); 221176667Sjfvstatic void igb_free_receive_structures(struct adapter *); 222176667Sjfvstatic void igb_free_receive_buffers(struct rx_ring *); 223203049Sjfvstatic void igb_free_receive_ring(struct rx_ring *); 224176667Sjfv 225176667Sjfvstatic void igb_enable_intr(struct adapter *); 226176667Sjfvstatic void igb_disable_intr(struct adapter *); 227176667Sjfvstatic void igb_update_stats_counters(struct adapter *); 228176667Sjfvstatic bool igb_txeof(struct tx_ring *); 229203049Sjfv 230205869Sjfvstatic __inline void igb_rx_discard(struct rx_ring *, int); 231203049Sjfvstatic __inline void igb_rx_input(struct rx_ring *, 232203049Sjfv struct ifnet *, struct mbuf *, u32); 233203049Sjfv 234209238Sjfvstatic bool igb_rxeof(struct igb_queue *, int, int *); 235203049Sjfvstatic void igb_rx_checksum(u32, struct mbuf *, u32); 236229145Smdfstatic bool igb_tx_ctx_setup(struct tx_ring *, struct mbuf *); 237235527Sjfvstatic bool igb_tso_setup(struct tx_ring *, struct mbuf *, int, 238235527Sjfv struct ip *, struct tcphdr *); 239176667Sjfvstatic void igb_set_promisc(struct adapter *); 240176667Sjfvstatic void igb_disable_promisc(struct adapter *); 241176667Sjfvstatic void igb_set_multi(struct adapter *); 242176667Sjfvstatic void igb_update_link_status(struct adapter *); 243205869Sjfvstatic void igb_refresh_mbufs(struct rx_ring *, int); 244194865Sjfv 245181027Sjfvstatic void igb_register_vlan(void *, struct ifnet *, u16); 246181027Sjfvstatic void igb_unregister_vlan(void *, struct ifnet *, u16); 247194865Sjfvstatic void igb_setup_vlan_hw_support(struct adapter *); 248194865Sjfv 249176667Sjfvstatic int igb_xmit(struct tx_ring *, struct mbuf **); 250176667Sjfvstatic int igb_dma_malloc(struct adapter *, bus_size_t, 251176667Sjfv struct igb_dma_alloc *, int); 252176667Sjfvstatic void igb_dma_free(struct adapter *, struct igb_dma_alloc *); 253209241Sgnnstatic int igb_sysctl_nvm_info(SYSCTL_HANDLER_ARGS); 254176667Sjfvstatic void igb_print_nvm_info(struct adapter *); 255176667Sjfvstatic int igb_is_valid_ether_addr(u8 *); 256209611Sjfvstatic void igb_add_hw_stats(struct adapter *); 257209611Sjfv 258209611Sjfvstatic void igb_vf_init_stats(struct adapter *); 259209611Sjfvstatic void igb_update_vf_stats_counters(struct adapter *); 260209611Sjfv 261176667Sjfv/* Management and WOL Support */ 262176667Sjfvstatic void igb_init_manageability(struct adapter *); 263176667Sjfvstatic void igb_release_manageability(struct adapter *); 264176667Sjfvstatic void igb_get_hw_control(struct adapter *); 265176667Sjfvstatic void igb_release_hw_control(struct adapter *); 266176667Sjfvstatic void igb_enable_wakeup(device_t); 267206001Smariusstatic void igb_led_func(void *, int); 268176667Sjfv 269176667Sjfvstatic int igb_irq_fast(void *); 270219753Sjfvstatic void igb_msix_que(void *); 271219753Sjfvstatic void igb_msix_link(void *); 272203049Sjfvstatic void igb_handle_que(void *context, int pending); 273203090Sjfvstatic void igb_handle_link(void *context, int pending); 274235527Sjfvstatic void igb_handle_link_locked(struct adapter *); 275176667Sjfv 276219753Sjfvstatic void igb_set_sysctl_value(struct adapter *, const char *, 277219753Sjfv const char *, int *, int); 278220375Sjfvstatic int igb_set_flowcntl(SYSCTL_HANDLER_ARGS); 279223350Sjfvstatic int igb_sysctl_dmac(SYSCTL_HANDLER_ARGS); 280238262Sjfvstatic int igb_sysctl_eee(SYSCTL_HANDLER_ARGS); 281176667Sjfv 282203354Sjfv#ifdef DEVICE_POLLING 283203354Sjfvstatic poll_handler_t igb_poll; 284203354Sjfv#endif /* POLLING */ 285203354Sjfv 286176667Sjfv/********************************************************************* 287176667Sjfv * FreeBSD Device Interface Entry Points 288176667Sjfv *********************************************************************/ 289176667Sjfv 290176667Sjfvstatic device_method_t igb_methods[] = { 291176667Sjfv /* Device interface */ 292176667Sjfv DEVMETHOD(device_probe, igb_probe), 293176667Sjfv DEVMETHOD(device_attach, igb_attach), 294176667Sjfv DEVMETHOD(device_detach, igb_detach), 295176667Sjfv DEVMETHOD(device_shutdown, igb_shutdown), 296176667Sjfv DEVMETHOD(device_suspend, igb_suspend), 297176667Sjfv DEVMETHOD(device_resume, igb_resume), 298248292Sjfv DEVMETHOD_END 299176667Sjfv}; 300176667Sjfv 301176667Sjfvstatic driver_t igb_driver = { 302176667Sjfv "igb", igb_methods, sizeof(struct adapter), 303176667Sjfv}; 304176667Sjfv 305176667Sjfvstatic devclass_t igb_devclass; 306176667SjfvDRIVER_MODULE(igb, pci, igb_driver, igb_devclass, 0, 0); 307176667SjfvMODULE_DEPEND(igb, pci, 1, 1, 1); 308176667SjfvMODULE_DEPEND(igb, ether, 1, 1, 1); 309176667Sjfv 310176667Sjfv/********************************************************************* 311176667Sjfv * Tunable default values. 312176667Sjfv *********************************************************************/ 313176667Sjfv 314235527Sjfvstatic SYSCTL_NODE(_hw, OID_AUTO, igb, CTLFLAG_RD, 0, "IGB driver parameters"); 315223676Sjhb 316182416Sjfv/* Descriptor defaults */ 317176667Sjfvstatic int igb_rxd = IGB_DEFAULT_RXD; 318176667Sjfvstatic int igb_txd = IGB_DEFAULT_TXD; 319176667SjfvTUNABLE_INT("hw.igb.rxd", &igb_rxd); 320176667SjfvTUNABLE_INT("hw.igb.txd", &igb_txd); 321223676SjhbSYSCTL_INT(_hw_igb, OID_AUTO, rxd, CTLFLAG_RDTUN, &igb_rxd, 0, 322223676Sjhb "Number of receive descriptors per queue"); 323223676SjhbSYSCTL_INT(_hw_igb, OID_AUTO, txd, CTLFLAG_RDTUN, &igb_txd, 0, 324223676Sjhb "Number of transmit descriptors per queue"); 325176667Sjfv 326178523Sjfv/* 327203049Sjfv** AIM: Adaptive Interrupt Moderation 328203049Sjfv** which means that the interrupt rate 329203049Sjfv** is varied over time based on the 330203049Sjfv** traffic for that interrupt vector 331182416Sjfv*/ 332182416Sjfvstatic int igb_enable_aim = TRUE; 333182416SjfvTUNABLE_INT("hw.igb.enable_aim", &igb_enable_aim); 334223676SjhbSYSCTL_INT(_hw_igb, OID_AUTO, enable_aim, CTLFLAG_RW, &igb_enable_aim, 0, 335223676Sjhb "Enable adaptive interrupt moderation"); 336203049Sjfv 337182416Sjfv/* 338200243Sjfv * MSIX should be the default for best performance, 339200243Sjfv * but this allows it to be forced off for testing. 340200243Sjfv */ 341200243Sjfvstatic int igb_enable_msix = 1; 342200243SjfvTUNABLE_INT("hw.igb.enable_msix", &igb_enable_msix); 343223676SjhbSYSCTL_INT(_hw_igb, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &igb_enable_msix, 0, 344223676Sjhb "Enable MSI-X interrupts"); 345200243Sjfv 346200243Sjfv/* 347215781Sjfv** Tuneable Interrupt rate 348215781Sjfv*/ 349215781Sjfvstatic int igb_max_interrupt_rate = 8000; 350215781SjfvTUNABLE_INT("hw.igb.max_interrupt_rate", &igb_max_interrupt_rate); 351223676SjhbSYSCTL_INT(_hw_igb, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN, 352223676Sjhb &igb_max_interrupt_rate, 0, "Maximum interrupts per second"); 353215781Sjfv 354248292Sjfv#if __FreeBSD_version >= 800000 355215781Sjfv/* 356248292Sjfv** Tuneable number of buffers in the buf-ring (drbr_xxx) 357248292Sjfv*/ 358248292Sjfvstatic int igb_buf_ring_size = IGB_BR_SIZE; 359248292SjfvTUNABLE_INT("hw.igb.buf_ring_size", &igb_buf_ring_size); 360248292SjfvSYSCTL_INT(_hw_igb, OID_AUTO, buf_ring_size, CTLFLAG_RDTUN, 361248292Sjfv &igb_buf_ring_size, 0, "Size of the bufring"); 362248292Sjfv#endif 363248292Sjfv 364248292Sjfv/* 365215781Sjfv** Header split causes the packet header to 366215781Sjfv** be dma'd to a seperate mbuf from the payload. 367215781Sjfv** this can have memory alignment benefits. But 368215781Sjfv** another plus is that small packets often fit 369215781Sjfv** into the header and thus use no cluster. Its 370215781Sjfv** a very workload dependent type feature. 371215781Sjfv*/ 372229145Smdfstatic int igb_header_split = FALSE; 373200243SjfvTUNABLE_INT("hw.igb.hdr_split", &igb_header_split); 374223676SjhbSYSCTL_INT(_hw_igb, OID_AUTO, header_split, CTLFLAG_RDTUN, &igb_header_split, 0, 375223676Sjhb "Enable receive mbuf header split"); 376200243Sjfv 377200243Sjfv/* 378252899Sjfv** This will autoconfigure based on the 379252899Sjfv** number of CPUs and max supported 380252899Sjfv** MSIX messages if left at 0. 381178523Sjfv*/ 382200243Sjfvstatic int igb_num_queues = 0; 383194865SjfvTUNABLE_INT("hw.igb.num_queues", &igb_num_queues); 384223676SjhbSYSCTL_INT(_hw_igb, OID_AUTO, num_queues, CTLFLAG_RDTUN, &igb_num_queues, 0, 385223676Sjhb "Number of queues to configure, 0 indicates autoconfigure"); 386176667Sjfv 387235393Ssbruno/* 388235393Ssbruno** Global variable to store last used CPU when binding queues 389235393Ssbruno** to CPUs in igb_allocate_msix. Starts at CPU_FIRST and increments when a 390235393Ssbruno** queue is bound to a cpu. 391235393Ssbruno*/ 392235393Ssbrunostatic int igb_last_bind_cpu = -1; 393235393Ssbruno 394223482Sjfv/* How many packets rxeof tries to clean at a time */ 395223482Sjfvstatic int igb_rx_process_limit = 100; 396223482SjfvTUNABLE_INT("hw.igb.rx_process_limit", &igb_rx_process_limit); 397223676SjhbSYSCTL_INT(_hw_igb, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN, 398223676Sjhb &igb_rx_process_limit, 0, 399223676Sjhb "Maximum number of received packets to process at a time, -1 means unlimited"); 400223482Sjfv 401235527Sjfv#ifdef DEV_NETMAP /* see ixgbe.c for details */ 402235527Sjfv#include <dev/netmap/if_igb_netmap.h> 403235527Sjfv#endif /* DEV_NETMAP */ 404176667Sjfv/********************************************************************* 405176667Sjfv * Device identification routine 406176667Sjfv * 407176667Sjfv * igb_probe determines if the driver should be loaded on 408176667Sjfv * adapter based on PCI vendor/device id of the adapter. 409176667Sjfv * 410176667Sjfv * return BUS_PROBE_DEFAULT on success, positive on failure 411176667Sjfv *********************************************************************/ 412176667Sjfv 413176667Sjfvstatic int 414176667Sjfvigb_probe(device_t dev) 415176667Sjfv{ 416176667Sjfv char adapter_name[60]; 417176667Sjfv uint16_t pci_vendor_id = 0; 418176667Sjfv uint16_t pci_device_id = 0; 419176667Sjfv uint16_t pci_subvendor_id = 0; 420176667Sjfv uint16_t pci_subdevice_id = 0; 421176667Sjfv igb_vendor_info_t *ent; 422176667Sjfv 423176667Sjfv INIT_DEBUGOUT("igb_probe: begin"); 424176667Sjfv 425176667Sjfv pci_vendor_id = pci_get_vendor(dev); 426176667Sjfv if (pci_vendor_id != IGB_VENDOR_ID) 427176667Sjfv return (ENXIO); 428176667Sjfv 429176667Sjfv pci_device_id = pci_get_device(dev); 430176667Sjfv pci_subvendor_id = pci_get_subvendor(dev); 431176667Sjfv pci_subdevice_id = pci_get_subdevice(dev); 432176667Sjfv 433176667Sjfv ent = igb_vendor_info_array; 434176667Sjfv while (ent->vendor_id != 0) { 435176667Sjfv if ((pci_vendor_id == ent->vendor_id) && 436176667Sjfv (pci_device_id == ent->device_id) && 437176667Sjfv 438176667Sjfv ((pci_subvendor_id == ent->subvendor_id) || 439176667Sjfv (ent->subvendor_id == PCI_ANY_ID)) && 440176667Sjfv 441176667Sjfv ((pci_subdevice_id == ent->subdevice_id) || 442176667Sjfv (ent->subdevice_id == PCI_ANY_ID))) { 443176667Sjfv sprintf(adapter_name, "%s %s", 444176667Sjfv igb_strings[ent->index], 445176667Sjfv igb_driver_version); 446176667Sjfv device_set_desc_copy(dev, adapter_name); 447176667Sjfv return (BUS_PROBE_DEFAULT); 448176667Sjfv } 449176667Sjfv ent++; 450176667Sjfv } 451176667Sjfv 452176667Sjfv return (ENXIO); 453176667Sjfv} 454176667Sjfv 455176667Sjfv/********************************************************************* 456176667Sjfv * Device initialization routine 457176667Sjfv * 458176667Sjfv * The attach entry point is called when the driver is being loaded. 459176667Sjfv * This routine identifies the type of hardware, allocates all resources 460176667Sjfv * and initializes the hardware. 461176667Sjfv * 462176667Sjfv * return 0 on success, positive on failure 463176667Sjfv *********************************************************************/ 464176667Sjfv 465176667Sjfvstatic int 466176667Sjfvigb_attach(device_t dev) 467176667Sjfv{ 468176667Sjfv struct adapter *adapter; 469176667Sjfv int error = 0; 470176667Sjfv u16 eeprom_data; 471176667Sjfv 472176667Sjfv INIT_DEBUGOUT("igb_attach: begin"); 473176667Sjfv 474223350Sjfv if (resource_disabled("igb", device_get_unit(dev))) { 475223350Sjfv device_printf(dev, "Disabled by device hint\n"); 476223350Sjfv return (ENXIO); 477223350Sjfv } 478223350Sjfv 479176667Sjfv adapter = device_get_softc(dev); 480176667Sjfv adapter->dev = adapter->osdep.dev = dev; 481176667Sjfv IGB_CORE_LOCK_INIT(adapter, device_get_nameunit(dev)); 482176667Sjfv 483176667Sjfv /* SYSCTL stuff */ 484176667Sjfv SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 485176667Sjfv SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 486209241Sgnn OID_AUTO, "nvm", CTLTYPE_INT|CTLFLAG_RW, adapter, 0, 487209241Sgnn igb_sysctl_nvm_info, "I", "NVM Information"); 488176667Sjfv 489223676Sjhb igb_set_sysctl_value(adapter, "enable_aim", 490223676Sjhb "Interrupt Moderation", &adapter->enable_aim, 491223676Sjhb igb_enable_aim); 492182416Sjfv 493220375Sjfv SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 494220375Sjfv SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 495223350Sjfv OID_AUTO, "fc", CTLTYPE_INT|CTLFLAG_RW, 496220375Sjfv adapter, 0, igb_set_flowcntl, "I", "Flow Control"); 497220375Sjfv 498176667Sjfv callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0); 499176667Sjfv 500176667Sjfv /* Determine hardware and mac info */ 501176667Sjfv igb_identify_hardware(adapter); 502176667Sjfv 503176667Sjfv /* Setup PCI resources */ 504176667Sjfv if (igb_allocate_pci_resources(adapter)) { 505176667Sjfv device_printf(dev, "Allocation of PCI resources failed\n"); 506176667Sjfv error = ENXIO; 507176667Sjfv goto err_pci; 508176667Sjfv } 509176667Sjfv 510176667Sjfv /* Do Shared Code initialization */ 511176667Sjfv if (e1000_setup_init_funcs(&adapter->hw, TRUE)) { 512176667Sjfv device_printf(dev, "Setup of Shared code failed\n"); 513176667Sjfv error = ENXIO; 514176667Sjfv goto err_pci; 515176667Sjfv } 516176667Sjfv 517176667Sjfv e1000_get_bus_info(&adapter->hw); 518176667Sjfv 519219753Sjfv /* Sysctl for limiting the amount of work done in the taskqueue */ 520219753Sjfv igb_set_sysctl_value(adapter, "rx_processing_limit", 521223350Sjfv "max number of rx packets to process", 522223482Sjfv &adapter->rx_process_limit, igb_rx_process_limit); 523176667Sjfv 524176667Sjfv /* 525176667Sjfv * Validate number of transmit and receive descriptors. It 526176667Sjfv * must not exceed hardware maximum, and must be multiple 527176667Sjfv * of E1000_DBA_ALIGN. 528176667Sjfv */ 529176667Sjfv if (((igb_txd * sizeof(struct e1000_tx_desc)) % IGB_DBA_ALIGN) != 0 || 530176667Sjfv (igb_txd > IGB_MAX_TXD) || (igb_txd < IGB_MIN_TXD)) { 531176667Sjfv device_printf(dev, "Using %d TX descriptors instead of %d!\n", 532176667Sjfv IGB_DEFAULT_TXD, igb_txd); 533176667Sjfv adapter->num_tx_desc = IGB_DEFAULT_TXD; 534176667Sjfv } else 535176667Sjfv adapter->num_tx_desc = igb_txd; 536176667Sjfv if (((igb_rxd * sizeof(struct e1000_rx_desc)) % IGB_DBA_ALIGN) != 0 || 537176667Sjfv (igb_rxd > IGB_MAX_RXD) || (igb_rxd < IGB_MIN_RXD)) { 538176667Sjfv device_printf(dev, "Using %d RX descriptors instead of %d!\n", 539176667Sjfv IGB_DEFAULT_RXD, igb_rxd); 540176667Sjfv adapter->num_rx_desc = IGB_DEFAULT_RXD; 541176667Sjfv } else 542176667Sjfv adapter->num_rx_desc = igb_rxd; 543176667Sjfv 544176667Sjfv adapter->hw.mac.autoneg = DO_AUTO_NEG; 545176667Sjfv adapter->hw.phy.autoneg_wait_to_complete = FALSE; 546176667Sjfv adapter->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT; 547176667Sjfv 548176667Sjfv /* Copper options */ 549176667Sjfv if (adapter->hw.phy.media_type == e1000_media_type_copper) { 550176667Sjfv adapter->hw.phy.mdix = AUTO_ALL_MODES; 551176667Sjfv adapter->hw.phy.disable_polarity_correction = FALSE; 552176667Sjfv adapter->hw.phy.ms_type = IGB_MASTER_SLAVE; 553176667Sjfv } 554176667Sjfv 555176667Sjfv /* 556176667Sjfv * Set the frame limits assuming 557176667Sjfv * standard ethernet sized frames. 558176667Sjfv */ 559176667Sjfv adapter->max_frame_size = ETHERMTU + ETHER_HDR_LEN + ETHERNET_FCS_SIZE; 560176667Sjfv adapter->min_frame_size = ETH_ZLEN + ETHERNET_FCS_SIZE; 561176667Sjfv 562176667Sjfv /* 563176667Sjfv ** Allocate and Setup Queues 564176667Sjfv */ 565176667Sjfv if (igb_allocate_queues(adapter)) { 566176667Sjfv error = ENOMEM; 567182416Sjfv goto err_pci; 568176667Sjfv } 569176667Sjfv 570209611Sjfv /* Allocate the appropriate stats memory */ 571218530Sjfv if (adapter->vf_ifp) { 572209611Sjfv adapter->stats = 573209611Sjfv (struct e1000_vf_stats *)malloc(sizeof \ 574209611Sjfv (struct e1000_vf_stats), M_DEVBUF, M_NOWAIT | M_ZERO); 575209611Sjfv igb_vf_init_stats(adapter); 576209611Sjfv } else 577209611Sjfv adapter->stats = 578209611Sjfv (struct e1000_hw_stats *)malloc(sizeof \ 579209611Sjfv (struct e1000_hw_stats), M_DEVBUF, M_NOWAIT | M_ZERO); 580211906Syongari if (adapter->stats == NULL) { 581211906Syongari device_printf(dev, "Can not allocate stats memory\n"); 582211906Syongari error = ENOMEM; 583211906Syongari goto err_late; 584211906Syongari } 585209611Sjfv 586211913Syongari /* Allocate multicast array memory. */ 587211913Syongari adapter->mta = malloc(sizeof(u8) * ETH_ADDR_LEN * 588211913Syongari MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT); 589211913Syongari if (adapter->mta == NULL) { 590211913Syongari device_printf(dev, "Can not allocate multicast setup array\n"); 591211913Syongari error = ENOMEM; 592211913Syongari goto err_late; 593211913Syongari } 594211913Syongari 595219753Sjfv /* Some adapter-specific advanced features */ 596219753Sjfv if (adapter->hw.mac.type >= e1000_i350) { 597223350Sjfv SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 598223350Sjfv SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 599223350Sjfv OID_AUTO, "dmac", CTLTYPE_INT|CTLFLAG_RW, 600223350Sjfv adapter, 0, igb_sysctl_dmac, "I", "DMA Coalesce"); 601238262Sjfv SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 602238262Sjfv SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 603238262Sjfv OID_AUTO, "eee_disabled", CTLTYPE_INT|CTLFLAG_RW, 604238262Sjfv adapter, 0, igb_sysctl_eee, "I", 605238262Sjfv "Disable Energy Efficient Ethernet"); 606238262Sjfv if (adapter->hw.phy.media_type == e1000_media_type_copper) 607238262Sjfv e1000_set_eee_i350(&adapter->hw); 608219753Sjfv } 609219753Sjfv 610190872Sjfv /* 611190872Sjfv ** Start from a known state, this is 612190872Sjfv ** important in reading the nvm and 613190872Sjfv ** mac from that. 614190872Sjfv */ 615190872Sjfv e1000_reset_hw(&adapter->hw); 616190872Sjfv 617176667Sjfv /* Make sure we have a good EEPROM before we read from it */ 618238262Sjfv if (((adapter->hw.mac.type != e1000_i210) && 619238262Sjfv (adapter->hw.mac.type != e1000_i211)) && 620238262Sjfv (e1000_validate_nvm_checksum(&adapter->hw) < 0)) { 621176667Sjfv /* 622176667Sjfv ** Some PCI-E parts fail the first check due to 623176667Sjfv ** the link being in sleep state, call it again, 624176667Sjfv ** if it fails a second time its a real issue. 625176667Sjfv */ 626176667Sjfv if (e1000_validate_nvm_checksum(&adapter->hw) < 0) { 627176667Sjfv device_printf(dev, 628176667Sjfv "The EEPROM Checksum Is Not Valid\n"); 629176667Sjfv error = EIO; 630176667Sjfv goto err_late; 631176667Sjfv } 632176667Sjfv } 633176667Sjfv 634190872Sjfv /* 635190872Sjfv ** Copy the permanent MAC address out of the EEPROM 636190872Sjfv */ 637176667Sjfv if (e1000_read_mac_addr(&adapter->hw) < 0) { 638176667Sjfv device_printf(dev, "EEPROM read error while reading MAC" 639176667Sjfv " address\n"); 640176667Sjfv error = EIO; 641176667Sjfv goto err_late; 642176667Sjfv } 643190872Sjfv /* Check its sanity */ 644176667Sjfv if (!igb_is_valid_ether_addr(adapter->hw.mac.addr)) { 645176667Sjfv device_printf(dev, "Invalid MAC address\n"); 646176667Sjfv error = EIO; 647176667Sjfv goto err_late; 648176667Sjfv } 649176667Sjfv 650176667Sjfv /* Setup OS specific network interface */ 651211907Syongari if (igb_setup_interface(dev, adapter) != 0) 652211907Syongari goto err_late; 653176667Sjfv 654200243Sjfv /* Now get a good starting state */ 655200243Sjfv igb_reset(adapter); 656200243Sjfv 657176667Sjfv /* Initialize statistics */ 658176667Sjfv igb_update_stats_counters(adapter); 659176667Sjfv 660176667Sjfv adapter->hw.mac.get_link_status = 1; 661176667Sjfv igb_update_link_status(adapter); 662176667Sjfv 663176667Sjfv /* Indicate SOL/IDER usage */ 664176667Sjfv if (e1000_check_reset_block(&adapter->hw)) 665176667Sjfv device_printf(dev, 666176667Sjfv "PHY reset is blocked due to SOL/IDER session.\n"); 667176667Sjfv 668176667Sjfv /* Determine if we have to control management hardware */ 669176667Sjfv adapter->has_manage = e1000_enable_mng_pass_thru(&adapter->hw); 670176667Sjfv 671176667Sjfv /* 672176667Sjfv * Setup Wake-on-Lan 673176667Sjfv */ 674176667Sjfv /* APME bit in EEPROM is mapped to WUC.APME */ 675176667Sjfv eeprom_data = E1000_READ_REG(&adapter->hw, E1000_WUC) & E1000_WUC_APME; 676176667Sjfv if (eeprom_data) 677176667Sjfv adapter->wol = E1000_WUFC_MAG; 678176667Sjfv 679181027Sjfv /* Register for VLAN events */ 680181027Sjfv adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config, 681195857Sjfv igb_register_vlan, adapter, EVENTHANDLER_PRI_FIRST); 682181027Sjfv adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig, 683195857Sjfv igb_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST); 684181027Sjfv 685209241Sgnn igb_add_hw_stats(adapter); 686209241Sgnn 687176667Sjfv /* Tell the stack that the interface is not active */ 688235527Sjfv adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 689235527Sjfv adapter->ifp->if_drv_flags |= IFF_DRV_OACTIVE; 690176667Sjfv 691206001Smarius adapter->led_dev = led_create(igb_led_func, adapter, 692206001Smarius device_get_nameunit(dev)); 693206001Smarius 694235527Sjfv /* 695235527Sjfv ** Configure Interrupts 696235527Sjfv */ 697235527Sjfv if ((adapter->msix > 1) && (igb_enable_msix)) 698235527Sjfv error = igb_allocate_msix(adapter); 699235527Sjfv else /* MSI or Legacy */ 700235527Sjfv error = igb_allocate_legacy(adapter); 701235527Sjfv if (error) 702235527Sjfv goto err_late; 703235527Sjfv 704235527Sjfv#ifdef DEV_NETMAP 705235527Sjfv igb_netmap_attach(adapter); 706235527Sjfv#endif /* DEV_NETMAP */ 707176667Sjfv INIT_DEBUGOUT("igb_attach: end"); 708176667Sjfv 709176667Sjfv return (0); 710176667Sjfv 711176667Sjfverr_late: 712223350Sjfv igb_detach(dev); 713176667Sjfv igb_free_transmit_structures(adapter); 714176667Sjfv igb_free_receive_structures(adapter); 715176667Sjfv igb_release_hw_control(adapter); 716235527Sjfverr_pci: 717235527Sjfv igb_free_pci_resources(adapter); 718211907Syongari if (adapter->ifp != NULL) 719211907Syongari if_free(adapter->ifp); 720211913Syongari free(adapter->mta, M_DEVBUF); 721176667Sjfv IGB_CORE_LOCK_DESTROY(adapter); 722176667Sjfv 723176667Sjfv return (error); 724176667Sjfv} 725176667Sjfv 726176667Sjfv/********************************************************************* 727176667Sjfv * Device removal routine 728176667Sjfv * 729176667Sjfv * The detach entry point is called when the driver is being removed. 730176667Sjfv * This routine stops the adapter and deallocates all the resources 731176667Sjfv * that were allocated for driver operation. 732176667Sjfv * 733176667Sjfv * return 0 on success, positive on failure 734176667Sjfv *********************************************************************/ 735176667Sjfv 736176667Sjfvstatic int 737176667Sjfvigb_detach(device_t dev) 738176667Sjfv{ 739176667Sjfv struct adapter *adapter = device_get_softc(dev); 740176667Sjfv struct ifnet *ifp = adapter->ifp; 741176667Sjfv 742176667Sjfv INIT_DEBUGOUT("igb_detach: begin"); 743176667Sjfv 744176667Sjfv /* Make sure VLANS are not using driver */ 745176667Sjfv if (adapter->ifp->if_vlantrunk != NULL) { 746176667Sjfv device_printf(dev,"Vlan in use, detach first\n"); 747176667Sjfv return (EBUSY); 748176667Sjfv } 749176667Sjfv 750223198Sjhb ether_ifdetach(adapter->ifp); 751223198Sjhb 752206001Smarius if (adapter->led_dev != NULL) 753206001Smarius led_destroy(adapter->led_dev); 754206001Smarius 755203354Sjfv#ifdef DEVICE_POLLING 756203354Sjfv if (ifp->if_capenable & IFCAP_POLLING) 757203354Sjfv ether_poll_deregister(ifp); 758203354Sjfv#endif 759203354Sjfv 760176667Sjfv IGB_CORE_LOCK(adapter); 761176667Sjfv adapter->in_detach = 1; 762176667Sjfv igb_stop(adapter); 763176667Sjfv IGB_CORE_UNLOCK(adapter); 764176667Sjfv 765176667Sjfv e1000_phy_hw_reset(&adapter->hw); 766176667Sjfv 767176667Sjfv /* Give control back to firmware */ 768176667Sjfv igb_release_manageability(adapter); 769176667Sjfv igb_release_hw_control(adapter); 770176667Sjfv 771176667Sjfv if (adapter->wol) { 772176667Sjfv E1000_WRITE_REG(&adapter->hw, E1000_WUC, E1000_WUC_PME_EN); 773176667Sjfv E1000_WRITE_REG(&adapter->hw, E1000_WUFC, adapter->wol); 774176667Sjfv igb_enable_wakeup(dev); 775176667Sjfv } 776176667Sjfv 777181027Sjfv /* Unregister VLAN events */ 778181027Sjfv if (adapter->vlan_attach != NULL) 779181027Sjfv EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach); 780181027Sjfv if (adapter->vlan_detach != NULL) 781181027Sjfv EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach); 782181027Sjfv 783176667Sjfv callout_drain(&adapter->timer); 784176667Sjfv 785235527Sjfv#ifdef DEV_NETMAP 786235527Sjfv netmap_detach(adapter->ifp); 787235527Sjfv#endif /* DEV_NETMAP */ 788176667Sjfv igb_free_pci_resources(adapter); 789176667Sjfv bus_generic_detach(dev); 790176667Sjfv if_free(ifp); 791176667Sjfv 792176667Sjfv igb_free_transmit_structures(adapter); 793176667Sjfv igb_free_receive_structures(adapter); 794223350Sjfv if (adapter->mta != NULL) 795223350Sjfv free(adapter->mta, M_DEVBUF); 796176667Sjfv 797176667Sjfv IGB_CORE_LOCK_DESTROY(adapter); 798176667Sjfv 799176667Sjfv return (0); 800176667Sjfv} 801176667Sjfv 802176667Sjfv/********************************************************************* 803176667Sjfv * 804176667Sjfv * Shutdown entry point 805176667Sjfv * 806176667Sjfv **********************************************************************/ 807176667Sjfv 808176667Sjfvstatic int 809176667Sjfvigb_shutdown(device_t dev) 810176667Sjfv{ 811176667Sjfv return igb_suspend(dev); 812176667Sjfv} 813176667Sjfv 814176667Sjfv/* 815176667Sjfv * Suspend/resume device methods. 816176667Sjfv */ 817176667Sjfvstatic int 818176667Sjfvigb_suspend(device_t dev) 819176667Sjfv{ 820176667Sjfv struct adapter *adapter = device_get_softc(dev); 821176667Sjfv 822176667Sjfv IGB_CORE_LOCK(adapter); 823176667Sjfv 824176667Sjfv igb_stop(adapter); 825176667Sjfv 826176667Sjfv igb_release_manageability(adapter); 827176667Sjfv igb_release_hw_control(adapter); 828176667Sjfv 829176667Sjfv if (adapter->wol) { 830176667Sjfv E1000_WRITE_REG(&adapter->hw, E1000_WUC, E1000_WUC_PME_EN); 831176667Sjfv E1000_WRITE_REG(&adapter->hw, E1000_WUFC, adapter->wol); 832176667Sjfv igb_enable_wakeup(dev); 833176667Sjfv } 834176667Sjfv 835176667Sjfv IGB_CORE_UNLOCK(adapter); 836176667Sjfv 837176667Sjfv return bus_generic_suspend(dev); 838176667Sjfv} 839176667Sjfv 840176667Sjfvstatic int 841176667Sjfvigb_resume(device_t dev) 842176667Sjfv{ 843176667Sjfv struct adapter *adapter = device_get_softc(dev); 844235527Sjfv struct tx_ring *txr = adapter->tx_rings; 845176667Sjfv struct ifnet *ifp = adapter->ifp; 846176667Sjfv 847176667Sjfv IGB_CORE_LOCK(adapter); 848176667Sjfv igb_init_locked(adapter); 849176667Sjfv igb_init_manageability(adapter); 850176667Sjfv 851176667Sjfv if ((ifp->if_flags & IFF_UP) && 852235527Sjfv (ifp->if_drv_flags & IFF_DRV_RUNNING) && adapter->link_active) { 853223198Sjhb for (int i = 0; i < adapter->num_queues; i++, txr++) { 854223198Sjhb IGB_TX_LOCK(txr); 855252899Sjfv#ifndef IGB_LEGACY_TX 856235527Sjfv /* Process the stack queue only if not depleted */ 857235527Sjfv if (((txr->queue_status & IGB_QUEUE_DEPLETED) == 0) && 858235527Sjfv !drbr_empty(ifp, txr->br)) 859248292Sjfv igb_mq_start_locked(ifp, txr); 860235527Sjfv#else 861235527Sjfv if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 862235527Sjfv igb_start_locked(txr, ifp); 863235527Sjfv#endif 864223198Sjhb IGB_TX_UNLOCK(txr); 865223198Sjhb } 866223198Sjhb } 867176667Sjfv IGB_CORE_UNLOCK(adapter); 868176667Sjfv 869176667Sjfv return bus_generic_resume(dev); 870176667Sjfv} 871176667Sjfv 872176667Sjfv 873252899Sjfv#ifdef IGB_LEGACY_TX 874238262Sjfv 875176667Sjfv/********************************************************************* 876176667Sjfv * Transmit entry point 877176667Sjfv * 878176667Sjfv * igb_start is called by the stack to initiate a transmit. 879176667Sjfv * The driver will remain in this routine as long as there are 880176667Sjfv * packets to transmit and transmit resources are available. 881176667Sjfv * In case resources are not available stack is notified and 882176667Sjfv * the packet is requeued. 883176667Sjfv **********************************************************************/ 884176667Sjfv 885176667Sjfvstatic void 886176667Sjfvigb_start_locked(struct tx_ring *txr, struct ifnet *ifp) 887176667Sjfv{ 888176667Sjfv struct adapter *adapter = ifp->if_softc; 889176667Sjfv struct mbuf *m_head; 890176667Sjfv 891176667Sjfv IGB_TX_LOCK_ASSERT(txr); 892176667Sjfv 893176667Sjfv if ((ifp->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) != 894176667Sjfv IFF_DRV_RUNNING) 895176667Sjfv return; 896176667Sjfv if (!adapter->link_active) 897176667Sjfv return; 898176667Sjfv 899220375Sjfv /* Call cleanup if number of TX descriptors low */ 900220375Sjfv if (txr->tx_avail <= IGB_TX_CLEANUP_THRESHOLD) 901220375Sjfv igb_txeof(txr); 902220375Sjfv 903176667Sjfv while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) { 904220375Sjfv if (txr->tx_avail <= IGB_MAX_SCATTER) { 905235527Sjfv txr->queue_status |= IGB_QUEUE_DEPLETED; 906206629Sjfv break; 907206629Sjfv } 908176667Sjfv IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); 909176667Sjfv if (m_head == NULL) 910176667Sjfv break; 911176667Sjfv /* 912176667Sjfv * Encapsulation can modify our pointer, and or make it 913176667Sjfv * NULL on failure. In that event, we can't requeue. 914176667Sjfv */ 915176667Sjfv if (igb_xmit(txr, &m_head)) { 916235527Sjfv if (m_head != NULL) 917235527Sjfv IFQ_DRV_PREPEND(&ifp->if_snd, m_head); 918235527Sjfv if (txr->tx_avail <= IGB_MAX_SCATTER) 919235527Sjfv txr->queue_status |= IGB_QUEUE_DEPLETED; 920176667Sjfv break; 921176667Sjfv } 922176667Sjfv 923176667Sjfv /* Send a copy of the frame to the BPF listener */ 924176667Sjfv ETHER_BPF_MTAP(ifp, m_head); 925176667Sjfv 926200243Sjfv /* Set watchdog on */ 927206629Sjfv txr->watchdog_time = ticks; 928235527Sjfv txr->queue_status |= IGB_QUEUE_WORKING; 929176667Sjfv } 930176667Sjfv} 931176667Sjfv 932194865Sjfv/* 933194865Sjfv * Legacy TX driver routine, called from the 934194865Sjfv * stack, always uses tx[0], and spins for it. 935194865Sjfv * Should not be used with multiqueue tx 936194865Sjfv */ 937176667Sjfvstatic void 938176667Sjfvigb_start(struct ifnet *ifp) 939176667Sjfv{ 940176667Sjfv struct adapter *adapter = ifp->if_softc; 941194865Sjfv struct tx_ring *txr = adapter->tx_rings; 942176667Sjfv 943176667Sjfv if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 944176667Sjfv IGB_TX_LOCK(txr); 945176667Sjfv igb_start_locked(txr, ifp); 946176667Sjfv IGB_TX_UNLOCK(txr); 947176667Sjfv } 948194865Sjfv return; 949176667Sjfv} 950176667Sjfv 951252899Sjfv#else /* ~IGB_LEGACY_TX */ 952238262Sjfv 953194865Sjfv/* 954248292Sjfv** Multiqueue Transmit Entry: 955248292Sjfv** quick turnaround to the stack 956194865Sjfv** 957194865Sjfv*/ 958194865Sjfvstatic int 959194865Sjfvigb_mq_start(struct ifnet *ifp, struct mbuf *m) 960194865Sjfv{ 961209068Sjfv struct adapter *adapter = ifp->if_softc; 962209068Sjfv struct igb_queue *que; 963209068Sjfv struct tx_ring *txr; 964235527Sjfv int i, err = 0; 965194865Sjfv 966194865Sjfv /* Which queue to use */ 967238262Sjfv if ((m->m_flags & M_FLOWID) != 0) 968194865Sjfv i = m->m_pkthdr.flowid % adapter->num_queues; 969238262Sjfv else 970235527Sjfv i = curcpu % adapter->num_queues; 971194865Sjfv txr = &adapter->tx_rings[i]; 972209068Sjfv que = &adapter->queues[i]; 973194865Sjfv 974248292Sjfv err = drbr_enqueue(ifp, txr->br, m); 975254385Sjfv if (err) 976254385Sjfv return (err); 977254385Sjfv if (IGB_TX_TRYLOCK(txr)) { 978254385Sjfv err = igb_mq_start_locked(ifp, txr); 979254385Sjfv IGB_TX_UNLOCK(txr); 980254385Sjfv } else 981254385Sjfv taskqueue_enqueue(que->tq, &txr->txq_task); 982248292Sjfv 983194865Sjfv return (err); 984194865Sjfv} 985194865Sjfv 986194865Sjfvstatic int 987248292Sjfvigb_mq_start_locked(struct ifnet *ifp, struct tx_ring *txr) 988194865Sjfv{ 989194865Sjfv struct adapter *adapter = txr->adapter; 990194865Sjfv struct mbuf *next; 991203049Sjfv int err = 0, enq; 992194865Sjfv 993203049Sjfv IGB_TX_LOCK_ASSERT(txr); 994203049Sjfv 995235527Sjfv if (((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) || 996248292Sjfv adapter->link_active == 0) 997248292Sjfv return (ENETDOWN); 998194865Sjfv 999203049Sjfv enq = 0; 1000206629Sjfv 1001194865Sjfv /* Process the queue */ 1002248292Sjfv while ((next = drbr_peek(ifp, txr->br)) != NULL) { 1003203049Sjfv if ((err = igb_xmit(txr, &next)) != 0) { 1004248292Sjfv if (next == NULL) { 1005248292Sjfv /* It was freed, move forward */ 1006248292Sjfv drbr_advance(ifp, txr->br); 1007248292Sjfv } else { 1008248292Sjfv /* 1009248292Sjfv * Still have one left, it may not be 1010248292Sjfv * the same since the transmit function 1011248292Sjfv * may have changed it. 1012248292Sjfv */ 1013248292Sjfv drbr_putback(ifp, txr->br, next); 1014248292Sjfv } 1015203049Sjfv break; 1016203049Sjfv } 1017248292Sjfv drbr_advance(ifp, txr->br); 1018203049Sjfv enq++; 1019243440Sglebius ifp->if_obytes += next->m_pkthdr.len; 1020243440Sglebius if (next->m_flags & M_MCAST) 1021243440Sglebius ifp->if_omcasts++; 1022203049Sjfv ETHER_BPF_MTAP(ifp, next); 1023194865Sjfv if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 1024194865Sjfv break; 1025203049Sjfv } 1026203049Sjfv if (enq > 0) { 1027194865Sjfv /* Set the watchdog */ 1028235527Sjfv txr->queue_status |= IGB_QUEUE_WORKING; 1029206629Sjfv txr->watchdog_time = ticks; 1030194865Sjfv } 1031235527Sjfv if (txr->tx_avail <= IGB_TX_CLEANUP_THRESHOLD) 1032235527Sjfv igb_txeof(txr); 1033235527Sjfv if (txr->tx_avail <= IGB_MAX_SCATTER) 1034235527Sjfv txr->queue_status |= IGB_QUEUE_DEPLETED; 1035194865Sjfv return (err); 1036194865Sjfv} 1037194865Sjfv 1038194865Sjfv/* 1039223198Sjhb * Called from a taskqueue to drain queued transmit packets. 1040223198Sjhb */ 1041223198Sjhbstatic void 1042223198Sjhbigb_deferred_mq_start(void *arg, int pending) 1043223198Sjhb{ 1044223198Sjhb struct tx_ring *txr = arg; 1045223198Sjhb struct adapter *adapter = txr->adapter; 1046223198Sjhb struct ifnet *ifp = adapter->ifp; 1047223198Sjhb 1048223198Sjhb IGB_TX_LOCK(txr); 1049223198Sjhb if (!drbr_empty(ifp, txr->br)) 1050248292Sjfv igb_mq_start_locked(ifp, txr); 1051223198Sjhb IGB_TX_UNLOCK(txr); 1052223198Sjhb} 1053223198Sjhb 1054223198Sjhb/* 1055194865Sjfv** Flush all ring buffers 1056194865Sjfv*/ 1057194865Sjfvstatic void 1058194865Sjfvigb_qflush(struct ifnet *ifp) 1059194865Sjfv{ 1060194865Sjfv struct adapter *adapter = ifp->if_softc; 1061194865Sjfv struct tx_ring *txr = adapter->tx_rings; 1062194865Sjfv struct mbuf *m; 1063194865Sjfv 1064194865Sjfv for (int i = 0; i < adapter->num_queues; i++, txr++) { 1065194865Sjfv IGB_TX_LOCK(txr); 1066194865Sjfv while ((m = buf_ring_dequeue_sc(txr->br)) != NULL) 1067194865Sjfv m_freem(m); 1068194865Sjfv IGB_TX_UNLOCK(txr); 1069194865Sjfv } 1070194865Sjfv if_qflush(ifp); 1071194865Sjfv} 1072252899Sjfv#endif /* ~IGB_LEGACY_TX */ 1073194865Sjfv 1074176667Sjfv/********************************************************************* 1075176667Sjfv * Ioctl entry point 1076176667Sjfv * 1077176667Sjfv * igb_ioctl is called when the user wants to configure the 1078176667Sjfv * interface. 1079176667Sjfv * 1080176667Sjfv * return 0 on success, positive on failure 1081176667Sjfv **********************************************************************/ 1082176667Sjfv 1083176667Sjfvstatic int 1084176667Sjfvigb_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 1085176667Sjfv{ 1086176667Sjfv struct adapter *adapter = ifp->if_softc; 1087223350Sjfv struct ifreq *ifr = (struct ifreq *)data; 1088223350Sjfv#if defined(INET) || defined(INET6) 1089223350Sjfv struct ifaddr *ifa = (struct ifaddr *)data; 1090223831Sjfv#endif 1091223350Sjfv bool avoid_reset = FALSE; 1092223350Sjfv int error = 0; 1093176667Sjfv 1094176667Sjfv if (adapter->in_detach) 1095176667Sjfv return (error); 1096176667Sjfv 1097176667Sjfv switch (command) { 1098176667Sjfv case SIOCSIFADDR: 1099184718Sbz#ifdef INET 1100223350Sjfv if (ifa->ifa_addr->sa_family == AF_INET) 1101223350Sjfv avoid_reset = TRUE; 1102223350Sjfv#endif 1103223350Sjfv#ifdef INET6 1104223350Sjfv if (ifa->ifa_addr->sa_family == AF_INET6) 1105223350Sjfv avoid_reset = TRUE; 1106223350Sjfv#endif 1107223350Sjfv /* 1108223350Sjfv ** Calling init results in link renegotiation, 1109223350Sjfv ** so we avoid doing it when possible. 1110223350Sjfv */ 1111223350Sjfv if (avoid_reset) { 1112176667Sjfv ifp->if_flags |= IFF_UP; 1113223350Sjfv if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) 1114223350Sjfv igb_init(adapter); 1115223831Sjfv#ifdef INET 1116203049Sjfv if (!(ifp->if_flags & IFF_NOARP)) 1117203049Sjfv arp_ifinit(ifp, ifa); 1118223831Sjfv#endif 1119176667Sjfv } else 1120176667Sjfv error = ether_ioctl(ifp, command, data); 1121176667Sjfv break; 1122176667Sjfv case SIOCSIFMTU: 1123176667Sjfv { 1124176667Sjfv int max_frame_size; 1125176667Sjfv 1126176667Sjfv IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFMTU (Set Interface MTU)"); 1127176667Sjfv 1128176667Sjfv IGB_CORE_LOCK(adapter); 1129176667Sjfv max_frame_size = 9234; 1130176667Sjfv if (ifr->ifr_mtu > max_frame_size - ETHER_HDR_LEN - 1131176667Sjfv ETHER_CRC_LEN) { 1132176667Sjfv IGB_CORE_UNLOCK(adapter); 1133176667Sjfv error = EINVAL; 1134176667Sjfv break; 1135176667Sjfv } 1136176667Sjfv 1137176667Sjfv ifp->if_mtu = ifr->ifr_mtu; 1138176667Sjfv adapter->max_frame_size = 1139176667Sjfv ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; 1140176667Sjfv igb_init_locked(adapter); 1141176667Sjfv IGB_CORE_UNLOCK(adapter); 1142176667Sjfv break; 1143176667Sjfv } 1144176667Sjfv case SIOCSIFFLAGS: 1145176667Sjfv IOCTL_DEBUGOUT("ioctl rcv'd:\ 1146176667Sjfv SIOCSIFFLAGS (Set Interface Flags)"); 1147176667Sjfv IGB_CORE_LOCK(adapter); 1148176667Sjfv if (ifp->if_flags & IFF_UP) { 1149176667Sjfv if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) { 1150176667Sjfv if ((ifp->if_flags ^ adapter->if_flags) & 1151178523Sjfv (IFF_PROMISC | IFF_ALLMULTI)) { 1152176667Sjfv igb_disable_promisc(adapter); 1153176667Sjfv igb_set_promisc(adapter); 1154176667Sjfv } 1155176667Sjfv } else 1156176667Sjfv igb_init_locked(adapter); 1157176667Sjfv } else 1158176667Sjfv if (ifp->if_drv_flags & IFF_DRV_RUNNING) 1159176667Sjfv igb_stop(adapter); 1160176667Sjfv adapter->if_flags = ifp->if_flags; 1161176667Sjfv IGB_CORE_UNLOCK(adapter); 1162176667Sjfv break; 1163176667Sjfv case SIOCADDMULTI: 1164176667Sjfv case SIOCDELMULTI: 1165176667Sjfv IOCTL_DEBUGOUT("ioctl rcv'd: SIOC(ADD|DEL)MULTI"); 1166176667Sjfv if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1167176667Sjfv IGB_CORE_LOCK(adapter); 1168176667Sjfv igb_disable_intr(adapter); 1169176667Sjfv igb_set_multi(adapter); 1170203354Sjfv#ifdef DEVICE_POLLING 1171203354Sjfv if (!(ifp->if_capenable & IFCAP_POLLING)) 1172203354Sjfv#endif 1173176667Sjfv igb_enable_intr(adapter); 1174176667Sjfv IGB_CORE_UNLOCK(adapter); 1175176667Sjfv } 1176176667Sjfv break; 1177176667Sjfv case SIOCSIFMEDIA: 1178176667Sjfv /* Check SOL/IDER usage */ 1179176667Sjfv IGB_CORE_LOCK(adapter); 1180176667Sjfv if (e1000_check_reset_block(&adapter->hw)) { 1181176667Sjfv IGB_CORE_UNLOCK(adapter); 1182176667Sjfv device_printf(adapter->dev, "Media change is" 1183176667Sjfv " blocked due to SOL/IDER session.\n"); 1184176667Sjfv break; 1185176667Sjfv } 1186176667Sjfv IGB_CORE_UNLOCK(adapter); 1187176667Sjfv case SIOCGIFMEDIA: 1188176667Sjfv IOCTL_DEBUGOUT("ioctl rcv'd: \ 1189176667Sjfv SIOCxIFMEDIA (Get/Set Interface Media)"); 1190176667Sjfv error = ifmedia_ioctl(ifp, ifr, &adapter->media, command); 1191176667Sjfv break; 1192176667Sjfv case SIOCSIFCAP: 1193176667Sjfv { 1194176667Sjfv int mask, reinit; 1195176667Sjfv 1196176667Sjfv IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFCAP (Set Capabilities)"); 1197176667Sjfv reinit = 0; 1198176667Sjfv mask = ifr->ifr_reqcap ^ ifp->if_capenable; 1199203354Sjfv#ifdef DEVICE_POLLING 1200203354Sjfv if (mask & IFCAP_POLLING) { 1201203354Sjfv if (ifr->ifr_reqcap & IFCAP_POLLING) { 1202203354Sjfv error = ether_poll_register(igb_poll, ifp); 1203203354Sjfv if (error) 1204203354Sjfv return (error); 1205203354Sjfv IGB_CORE_LOCK(adapter); 1206203354Sjfv igb_disable_intr(adapter); 1207203354Sjfv ifp->if_capenable |= IFCAP_POLLING; 1208203354Sjfv IGB_CORE_UNLOCK(adapter); 1209203354Sjfv } else { 1210203354Sjfv error = ether_poll_deregister(ifp); 1211203354Sjfv /* Enable interrupt even in error case */ 1212203354Sjfv IGB_CORE_LOCK(adapter); 1213203354Sjfv igb_enable_intr(adapter); 1214203354Sjfv ifp->if_capenable &= ~IFCAP_POLLING; 1215203354Sjfv IGB_CORE_UNLOCK(adapter); 1216203354Sjfv } 1217203354Sjfv } 1218203354Sjfv#endif 1219176667Sjfv if (mask & IFCAP_HWCSUM) { 1220176667Sjfv ifp->if_capenable ^= IFCAP_HWCSUM; 1221176667Sjfv reinit = 1; 1222176667Sjfv } 1223176667Sjfv if (mask & IFCAP_TSO4) { 1224176667Sjfv ifp->if_capenable ^= IFCAP_TSO4; 1225176667Sjfv reinit = 1; 1226176667Sjfv } 1227176667Sjfv if (mask & IFCAP_VLAN_HWTAGGING) { 1228176667Sjfv ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 1229176667Sjfv reinit = 1; 1230176667Sjfv } 1231206388Sjfv if (mask & IFCAP_VLAN_HWFILTER) { 1232206388Sjfv ifp->if_capenable ^= IFCAP_VLAN_HWFILTER; 1233206388Sjfv reinit = 1; 1234206388Sjfv } 1235223350Sjfv if (mask & IFCAP_VLAN_HWTSO) { 1236223350Sjfv ifp->if_capenable ^= IFCAP_VLAN_HWTSO; 1237223350Sjfv reinit = 1; 1238223350Sjfv } 1239203049Sjfv if (mask & IFCAP_LRO) { 1240194865Sjfv ifp->if_capenable ^= IFCAP_LRO; 1241194865Sjfv reinit = 1; 1242194865Sjfv } 1243176667Sjfv if (reinit && (ifp->if_drv_flags & IFF_DRV_RUNNING)) 1244176667Sjfv igb_init(adapter); 1245176667Sjfv VLAN_CAPABILITIES(ifp); 1246176667Sjfv break; 1247176667Sjfv } 1248176667Sjfv 1249176667Sjfv default: 1250176667Sjfv error = ether_ioctl(ifp, command, data); 1251176667Sjfv break; 1252176667Sjfv } 1253176667Sjfv 1254176667Sjfv return (error); 1255176667Sjfv} 1256176667Sjfv 1257176667Sjfv 1258176667Sjfv/********************************************************************* 1259176667Sjfv * Init entry point 1260176667Sjfv * 1261176667Sjfv * This routine is used in two ways. It is used by the stack as 1262176667Sjfv * init entry point in network interface structure. It is also used 1263176667Sjfv * by the driver as a hw/sw initialization routine to get to a 1264176667Sjfv * consistent state. 1265176667Sjfv * 1266176667Sjfv * return 0 on success, positive on failure 1267176667Sjfv **********************************************************************/ 1268176667Sjfv 1269176667Sjfvstatic void 1270176667Sjfvigb_init_locked(struct adapter *adapter) 1271176667Sjfv{ 1272176667Sjfv struct ifnet *ifp = adapter->ifp; 1273176667Sjfv device_t dev = adapter->dev; 1274176667Sjfv 1275176667Sjfv INIT_DEBUGOUT("igb_init: begin"); 1276176667Sjfv 1277176667Sjfv IGB_CORE_LOCK_ASSERT(adapter); 1278176667Sjfv 1279200243Sjfv igb_disable_intr(adapter); 1280200243Sjfv callout_stop(&adapter->timer); 1281176667Sjfv 1282176667Sjfv /* Get the latest mac address, User can use a LAA */ 1283176667Sjfv bcopy(IF_LLADDR(adapter->ifp), adapter->hw.mac.addr, 1284176667Sjfv ETHER_ADDR_LEN); 1285176667Sjfv 1286176667Sjfv /* Put the address into the Receive Address Array */ 1287176667Sjfv e1000_rar_set(&adapter->hw, adapter->hw.mac.addr, 0); 1288176667Sjfv 1289200243Sjfv igb_reset(adapter); 1290176667Sjfv igb_update_link_status(adapter); 1291176667Sjfv 1292203049Sjfv E1000_WRITE_REG(&adapter->hw, E1000_VET, ETHERTYPE_VLAN); 1293203049Sjfv 1294176667Sjfv /* Set hardware offload abilities */ 1295176667Sjfv ifp->if_hwassist = 0; 1296190872Sjfv if (ifp->if_capenable & IFCAP_TXCSUM) { 1297176667Sjfv ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP); 1298190872Sjfv#if __FreeBSD_version >= 800000 1299190872Sjfv if (adapter->hw.mac.type == e1000_82576) 1300190872Sjfv ifp->if_hwassist |= CSUM_SCTP; 1301190872Sjfv#endif 1302190872Sjfv } 1303190872Sjfv 1304176667Sjfv if (ifp->if_capenable & IFCAP_TSO4) 1305176667Sjfv ifp->if_hwassist |= CSUM_TSO; 1306176667Sjfv 1307176667Sjfv /* Configure for OS presence */ 1308176667Sjfv igb_init_manageability(adapter); 1309176667Sjfv 1310176667Sjfv /* Prepare transmit descriptors and buffers */ 1311176667Sjfv igb_setup_transmit_structures(adapter); 1312176667Sjfv igb_initialize_transmit_units(adapter); 1313176667Sjfv 1314176667Sjfv /* Setup Multicast table */ 1315176667Sjfv igb_set_multi(adapter); 1316176667Sjfv 1317190872Sjfv /* 1318190872Sjfv ** Figure out the desired mbuf pool 1319190872Sjfv ** for doing jumbo/packetsplit 1320190872Sjfv */ 1321215781Sjfv if (adapter->max_frame_size <= 2048) 1322215781Sjfv adapter->rx_mbuf_sz = MCLBYTES; 1323215781Sjfv else if (adapter->max_frame_size <= 4096) 1324190872Sjfv adapter->rx_mbuf_sz = MJUMPAGESIZE; 1325190872Sjfv else 1326215781Sjfv adapter->rx_mbuf_sz = MJUM9BYTES; 1327190872Sjfv 1328176667Sjfv /* Prepare receive descriptors and buffers */ 1329176667Sjfv if (igb_setup_receive_structures(adapter)) { 1330176667Sjfv device_printf(dev, "Could not setup receive structures\n"); 1331176667Sjfv return; 1332176667Sjfv } 1333176667Sjfv igb_initialize_receive_units(adapter); 1334176667Sjfv 1335218530Sjfv /* Enable VLAN support */ 1336218530Sjfv if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) 1337218530Sjfv igb_setup_vlan_hw_support(adapter); 1338215781Sjfv 1339176667Sjfv /* Don't lose promiscuous settings */ 1340176667Sjfv igb_set_promisc(adapter); 1341176667Sjfv 1342176667Sjfv ifp->if_drv_flags |= IFF_DRV_RUNNING; 1343176667Sjfv ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1344176667Sjfv 1345176667Sjfv callout_reset(&adapter->timer, hz, igb_local_timer, adapter); 1346176667Sjfv e1000_clear_hw_cntrs_base_generic(&adapter->hw); 1347176667Sjfv 1348176667Sjfv if (adapter->msix > 1) /* Set up queue routing */ 1349176667Sjfv igb_configure_queues(adapter); 1350176667Sjfv 1351200243Sjfv /* this clears any pending interrupts */ 1352200243Sjfv E1000_READ_REG(&adapter->hw, E1000_ICR); 1353203354Sjfv#ifdef DEVICE_POLLING 1354203354Sjfv /* 1355203354Sjfv * Only enable interrupts if we are not polling, make sure 1356203354Sjfv * they are off otherwise. 1357203354Sjfv */ 1358203354Sjfv if (ifp->if_capenable & IFCAP_POLLING) 1359203354Sjfv igb_disable_intr(adapter); 1360203354Sjfv else 1361203354Sjfv#endif /* DEVICE_POLLING */ 1362203354Sjfv { 1363220375Sjfv igb_enable_intr(adapter); 1364220375Sjfv E1000_WRITE_REG(&adapter->hw, E1000_ICS, E1000_ICS_LSC); 1365203354Sjfv } 1366176667Sjfv 1367220375Sjfv /* Set Energy Efficient Ethernet */ 1368238262Sjfv if (adapter->hw.phy.media_type == e1000_media_type_copper) 1369238262Sjfv e1000_set_eee_i350(&adapter->hw); 1370176667Sjfv} 1371176667Sjfv 1372176667Sjfvstatic void 1373176667Sjfvigb_init(void *arg) 1374176667Sjfv{ 1375176667Sjfv struct adapter *adapter = arg; 1376176667Sjfv 1377176667Sjfv IGB_CORE_LOCK(adapter); 1378176667Sjfv igb_init_locked(adapter); 1379176667Sjfv IGB_CORE_UNLOCK(adapter); 1380176667Sjfv} 1381176667Sjfv 1382176667Sjfv 1383176667Sjfvstatic void 1384203049Sjfvigb_handle_que(void *context, int pending) 1385176667Sjfv{ 1386203049Sjfv struct igb_queue *que = context; 1387203049Sjfv struct adapter *adapter = que->adapter; 1388203049Sjfv struct tx_ring *txr = que->txr; 1389203049Sjfv struct ifnet *ifp = adapter->ifp; 1390203049Sjfv 1391206629Sjfv if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1392209238Sjfv bool more; 1393176667Sjfv 1394235527Sjfv more = igb_rxeof(que, adapter->rx_process_limit, NULL); 1395209238Sjfv 1396206629Sjfv IGB_TX_LOCK(txr); 1397235527Sjfv igb_txeof(txr); 1398252899Sjfv#ifndef IGB_LEGACY_TX 1399235527Sjfv /* Process the stack queue only if not depleted */ 1400235527Sjfv if (((txr->queue_status & IGB_QUEUE_DEPLETED) == 0) && 1401235527Sjfv !drbr_empty(ifp, txr->br)) 1402248292Sjfv igb_mq_start_locked(ifp, txr); 1403194865Sjfv#else 1404235527Sjfv if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1405235527Sjfv igb_start_locked(txr, ifp); 1406194865Sjfv#endif 1407203049Sjfv IGB_TX_UNLOCK(txr); 1408235527Sjfv /* Do we need another? */ 1409235527Sjfv if (more) { 1410206629Sjfv taskqueue_enqueue(que->tq, &que->que_task); 1411206629Sjfv return; 1412206629Sjfv } 1413203049Sjfv } 1414203049Sjfv 1415203354Sjfv#ifdef DEVICE_POLLING 1416209068Sjfv if (ifp->if_capenable & IFCAP_POLLING) 1417209068Sjfv return; 1418203354Sjfv#endif 1419209068Sjfv /* Reenable this interrupt */ 1420209068Sjfv if (que->eims) 1421209068Sjfv E1000_WRITE_REG(&adapter->hw, E1000_EIMS, que->eims); 1422209068Sjfv else 1423209068Sjfv igb_enable_intr(adapter); 1424176667Sjfv} 1425176667Sjfv 1426203090Sjfv/* Deal with link in a sleepable context */ 1427203090Sjfvstatic void 1428203090Sjfvigb_handle_link(void *context, int pending) 1429203090Sjfv{ 1430203090Sjfv struct adapter *adapter = context; 1431176667Sjfv 1432235527Sjfv IGB_CORE_LOCK(adapter); 1433235527Sjfv igb_handle_link_locked(adapter); 1434235527Sjfv IGB_CORE_UNLOCK(adapter); 1435235527Sjfv} 1436235527Sjfv 1437235527Sjfvstatic void 1438235527Sjfvigb_handle_link_locked(struct adapter *adapter) 1439235527Sjfv{ 1440235527Sjfv struct tx_ring *txr = adapter->tx_rings; 1441235527Sjfv struct ifnet *ifp = adapter->ifp; 1442235527Sjfv 1443235527Sjfv IGB_CORE_LOCK_ASSERT(adapter); 1444203090Sjfv adapter->hw.mac.get_link_status = 1; 1445203090Sjfv igb_update_link_status(adapter); 1446235527Sjfv if ((ifp->if_drv_flags & IFF_DRV_RUNNING) && adapter->link_active) { 1447235527Sjfv for (int i = 0; i < adapter->num_queues; i++, txr++) { 1448235527Sjfv IGB_TX_LOCK(txr); 1449252899Sjfv#ifndef IGB_LEGACY_TX 1450235527Sjfv /* Process the stack queue only if not depleted */ 1451235527Sjfv if (((txr->queue_status & IGB_QUEUE_DEPLETED) == 0) && 1452235527Sjfv !drbr_empty(ifp, txr->br)) 1453248292Sjfv igb_mq_start_locked(ifp, txr); 1454235527Sjfv#else 1455235527Sjfv if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1456235527Sjfv igb_start_locked(txr, ifp); 1457235527Sjfv#endif 1458235527Sjfv IGB_TX_UNLOCK(txr); 1459235527Sjfv } 1460235527Sjfv } 1461203090Sjfv} 1462203090Sjfv 1463176667Sjfv/********************************************************************* 1464176667Sjfv * 1465176667Sjfv * MSI/Legacy Deferred 1466176667Sjfv * Interrupt Service routine 1467176667Sjfv * 1468176667Sjfv *********************************************************************/ 1469176667Sjfvstatic int 1470176667Sjfvigb_irq_fast(void *arg) 1471176667Sjfv{ 1472209218Sjfv struct adapter *adapter = arg; 1473209218Sjfv struct igb_queue *que = adapter->queues; 1474209218Sjfv u32 reg_icr; 1475176667Sjfv 1476176667Sjfv 1477176667Sjfv reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR); 1478176667Sjfv 1479176667Sjfv /* Hot eject? */ 1480176667Sjfv if (reg_icr == 0xffffffff) 1481176667Sjfv return FILTER_STRAY; 1482176667Sjfv 1483176667Sjfv /* Definitely not our interrupt. */ 1484176667Sjfv if (reg_icr == 0x0) 1485176667Sjfv return FILTER_STRAY; 1486176667Sjfv 1487176667Sjfv if ((reg_icr & E1000_ICR_INT_ASSERTED) == 0) 1488176667Sjfv return FILTER_STRAY; 1489176667Sjfv 1490176667Sjfv /* 1491176667Sjfv * Mask interrupts until the taskqueue is finished running. This is 1492176667Sjfv * cheap, just assume that it is needed. This also works around the 1493176667Sjfv * MSI message reordering errata on certain systems. 1494176667Sjfv */ 1495176667Sjfv igb_disable_intr(adapter); 1496209218Sjfv taskqueue_enqueue(que->tq, &que->que_task); 1497176667Sjfv 1498176667Sjfv /* Link status change */ 1499203090Sjfv if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) 1500209218Sjfv taskqueue_enqueue(que->tq, &adapter->link_task); 1501176667Sjfv 1502176667Sjfv if (reg_icr & E1000_ICR_RXO) 1503176667Sjfv adapter->rx_overruns++; 1504176667Sjfv return FILTER_HANDLED; 1505176667Sjfv} 1506176667Sjfv 1507203354Sjfv#ifdef DEVICE_POLLING 1508203354Sjfv#if __FreeBSD_version >= 800000 1509203354Sjfv#define POLL_RETURN_COUNT(a) (a) 1510203354Sjfvstatic int 1511203354Sjfv#else 1512203354Sjfv#define POLL_RETURN_COUNT(a) 1513203354Sjfvstatic void 1514203354Sjfv#endif 1515203354Sjfvigb_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) 1516203354Sjfv{ 1517206023Sjfv struct adapter *adapter = ifp->if_softc; 1518248292Sjfv struct igb_queue *que; 1519248292Sjfv struct tx_ring *txr; 1520206023Sjfv u32 reg_icr, rx_done = 0; 1521206023Sjfv u32 loop = IGB_MAX_LOOP; 1522206023Sjfv bool more; 1523176667Sjfv 1524203354Sjfv IGB_CORE_LOCK(adapter); 1525203354Sjfv if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 1526203354Sjfv IGB_CORE_UNLOCK(adapter); 1527203354Sjfv return POLL_RETURN_COUNT(rx_done); 1528203354Sjfv } 1529203354Sjfv 1530203354Sjfv if (cmd == POLL_AND_CHECK_STATUS) { 1531203354Sjfv reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR); 1532203354Sjfv /* Link status change */ 1533203354Sjfv if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) 1534235527Sjfv igb_handle_link_locked(adapter); 1535203354Sjfv 1536203354Sjfv if (reg_icr & E1000_ICR_RXO) 1537203354Sjfv adapter->rx_overruns++; 1538203354Sjfv } 1539203354Sjfv IGB_CORE_UNLOCK(adapter); 1540203354Sjfv 1541248292Sjfv for (int i = 0; i < adapter->num_queues; i++) { 1542248292Sjfv que = &adapter->queues[i]; 1543248292Sjfv txr = que->txr; 1544203354Sjfv 1545248292Sjfv igb_rxeof(que, count, &rx_done); 1546248292Sjfv 1547248292Sjfv IGB_TX_LOCK(txr); 1548248292Sjfv do { 1549248292Sjfv more = igb_txeof(txr); 1550248292Sjfv } while (loop-- && more); 1551252899Sjfv#ifndef IGB_LEGACY_TX 1552248292Sjfv if (!drbr_empty(ifp, txr->br)) 1553248292Sjfv igb_mq_start_locked(ifp, txr); 1554203354Sjfv#else 1555248292Sjfv if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1556248292Sjfv igb_start_locked(txr, ifp); 1557203354Sjfv#endif 1558248292Sjfv IGB_TX_UNLOCK(txr); 1559248292Sjfv } 1560248292Sjfv 1561203354Sjfv return POLL_RETURN_COUNT(rx_done); 1562203354Sjfv} 1563203354Sjfv#endif /* DEVICE_POLLING */ 1564203354Sjfv 1565176667Sjfv/********************************************************************* 1566176667Sjfv * 1567235527Sjfv * MSIX Que Interrupt Service routine 1568176667Sjfv * 1569176667Sjfv **********************************************************************/ 1570176667Sjfvstatic void 1571203049Sjfvigb_msix_que(void *arg) 1572176667Sjfv{ 1573203049Sjfv struct igb_queue *que = arg; 1574203049Sjfv struct adapter *adapter = que->adapter; 1575235527Sjfv struct ifnet *ifp = adapter->ifp; 1576203049Sjfv struct tx_ring *txr = que->txr; 1577203049Sjfv struct rx_ring *rxr = que->rxr; 1578203049Sjfv u32 newitr = 0; 1579235527Sjfv bool more_rx; 1580176667Sjfv 1581254003Sjfv /* Ignore spurious interrupts */ 1582254003Sjfv if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 1583254003Sjfv return; 1584254003Sjfv 1585203049Sjfv E1000_WRITE_REG(&adapter->hw, E1000_EIMC, que->eims); 1586203049Sjfv ++que->irqs; 1587200243Sjfv 1588200243Sjfv IGB_TX_LOCK(txr); 1589235527Sjfv igb_txeof(txr); 1590252899Sjfv#ifndef IGB_LEGACY_TX 1591235527Sjfv /* Process the stack queue only if not depleted */ 1592235527Sjfv if (((txr->queue_status & IGB_QUEUE_DEPLETED) == 0) && 1593235527Sjfv !drbr_empty(ifp, txr->br)) 1594248292Sjfv igb_mq_start_locked(ifp, txr); 1595235527Sjfv#else 1596235527Sjfv if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1597235527Sjfv igb_start_locked(txr, ifp); 1598235527Sjfv#endif 1599190872Sjfv IGB_TX_UNLOCK(txr); 1600190872Sjfv 1601209238Sjfv more_rx = igb_rxeof(que, adapter->rx_process_limit, NULL); 1602176667Sjfv 1603223676Sjhb if (adapter->enable_aim == FALSE) 1604203049Sjfv goto no_calc; 1605203049Sjfv /* 1606203049Sjfv ** Do Adaptive Interrupt Moderation: 1607203049Sjfv ** - Write out last calculated setting 1608203049Sjfv ** - Calculate based on average size over 1609203049Sjfv ** the last interval. 1610203049Sjfv */ 1611203049Sjfv if (que->eitr_setting) 1612203049Sjfv E1000_WRITE_REG(&adapter->hw, 1613203049Sjfv E1000_EITR(que->msix), que->eitr_setting); 1614203049Sjfv 1615203049Sjfv que->eitr_setting = 0; 1616176667Sjfv 1617203049Sjfv /* Idle, do nothing */ 1618203049Sjfv if ((txr->bytes == 0) && (rxr->bytes == 0)) 1619203049Sjfv goto no_calc; 1620203049Sjfv 1621203049Sjfv /* Used half Default if sub-gig */ 1622203049Sjfv if (adapter->link_speed != 1000) 1623203049Sjfv newitr = IGB_DEFAULT_ITR / 2; 1624203049Sjfv else { 1625203049Sjfv if ((txr->bytes) && (txr->packets)) 1626203049Sjfv newitr = txr->bytes/txr->packets; 1627203049Sjfv if ((rxr->bytes) && (rxr->packets)) 1628203049Sjfv newitr = max(newitr, 1629203049Sjfv (rxr->bytes / rxr->packets)); 1630203049Sjfv newitr += 24; /* account for hardware frame, crc */ 1631203049Sjfv /* set an upper boundary */ 1632203049Sjfv newitr = min(newitr, 3000); 1633203049Sjfv /* Be nice to the mid range */ 1634203049Sjfv if ((newitr > 300) && (newitr < 1200)) 1635203049Sjfv newitr = (newitr / 3); 1636203049Sjfv else 1637203049Sjfv newitr = (newitr / 2); 1638203049Sjfv } 1639203049Sjfv newitr &= 0x7FFC; /* Mask invalid bits */ 1640203049Sjfv if (adapter->hw.mac.type == e1000_82575) 1641203049Sjfv newitr |= newitr << 16; 1642203049Sjfv else 1643206430Sjfv newitr |= E1000_EITR_CNT_IGNR; 1644203049Sjfv 1645203049Sjfv /* save for next interrupt */ 1646203049Sjfv que->eitr_setting = newitr; 1647176667Sjfv 1648203049Sjfv /* Reset state */ 1649203049Sjfv txr->bytes = 0; 1650203049Sjfv txr->packets = 0; 1651203049Sjfv rxr->bytes = 0; 1652203049Sjfv rxr->packets = 0; 1653200243Sjfv 1654203049Sjfvno_calc: 1655203049Sjfv /* Schedule a clean task if needed*/ 1656235527Sjfv if (more_rx) 1657203049Sjfv taskqueue_enqueue(que->tq, &que->que_task); 1658200243Sjfv else 1659200243Sjfv /* Reenable this interrupt */ 1660203049Sjfv E1000_WRITE_REG(&adapter->hw, E1000_EIMS, que->eims); 1661176667Sjfv return; 1662176667Sjfv} 1663176667Sjfv 1664182416Sjfv 1665190872Sjfv/********************************************************************* 1666190872Sjfv * 1667190872Sjfv * MSIX Link Interrupt Service routine 1668190872Sjfv * 1669190872Sjfv **********************************************************************/ 1670190872Sjfv 1671190872Sjfvstatic void 1672190872Sjfvigb_msix_link(void *arg) 1673190872Sjfv{ 1674190872Sjfv struct adapter *adapter = arg; 1675190872Sjfv u32 icr; 1676190872Sjfv 1677190872Sjfv ++adapter->link_irq; 1678190872Sjfv icr = E1000_READ_REG(&adapter->hw, E1000_ICR); 1679190872Sjfv if (!(icr & E1000_ICR_LSC)) 1680190872Sjfv goto spurious; 1681209238Sjfv igb_handle_link(adapter, 0); 1682190872Sjfv 1683190872Sjfvspurious: 1684190872Sjfv /* Rearm */ 1685190872Sjfv E1000_WRITE_REG(&adapter->hw, E1000_IMS, E1000_IMS_LSC); 1686190872Sjfv E1000_WRITE_REG(&adapter->hw, E1000_EIMS, adapter->link_mask); 1687190872Sjfv return; 1688190872Sjfv} 1689190872Sjfv 1690190872Sjfv 1691176667Sjfv/********************************************************************* 1692176667Sjfv * 1693176667Sjfv * Media Ioctl callback 1694176667Sjfv * 1695176667Sjfv * This routine is called whenever the user queries the status of 1696176667Sjfv * the interface using ifconfig. 1697176667Sjfv * 1698176667Sjfv **********************************************************************/ 1699176667Sjfvstatic void 1700176667Sjfvigb_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) 1701176667Sjfv{ 1702176667Sjfv struct adapter *adapter = ifp->if_softc; 1703176667Sjfv 1704176667Sjfv INIT_DEBUGOUT("igb_media_status: begin"); 1705176667Sjfv 1706176667Sjfv IGB_CORE_LOCK(adapter); 1707176667Sjfv igb_update_link_status(adapter); 1708176667Sjfv 1709176667Sjfv ifmr->ifm_status = IFM_AVALID; 1710176667Sjfv ifmr->ifm_active = IFM_ETHER; 1711176667Sjfv 1712176667Sjfv if (!adapter->link_active) { 1713176667Sjfv IGB_CORE_UNLOCK(adapter); 1714176667Sjfv return; 1715176667Sjfv } 1716176667Sjfv 1717176667Sjfv ifmr->ifm_status |= IFM_ACTIVE; 1718176667Sjfv 1719248292Sjfv switch (adapter->link_speed) { 1720248292Sjfv case 10: 1721248292Sjfv ifmr->ifm_active |= IFM_10_T; 1722248292Sjfv break; 1723248292Sjfv case 100: 1724248292Sjfv /* 1725248292Sjfv ** Support for 100Mb SFP - these are Fiber 1726248292Sjfv ** but the media type appears as serdes 1727248292Sjfv */ 1728248292Sjfv if (adapter->hw.phy.media_type == 1729248292Sjfv e1000_media_type_internal_serdes) 1730248292Sjfv ifmr->ifm_active |= IFM_100_FX; 1731248292Sjfv else 1732176667Sjfv ifmr->ifm_active |= IFM_100_TX; 1733248292Sjfv break; 1734248292Sjfv case 1000: 1735248292Sjfv ifmr->ifm_active |= IFM_1000_T; 1736248292Sjfv break; 1737176667Sjfv } 1738248292Sjfv 1739248292Sjfv if (adapter->link_duplex == FULL_DUPLEX) 1740248292Sjfv ifmr->ifm_active |= IFM_FDX; 1741248292Sjfv else 1742248292Sjfv ifmr->ifm_active |= IFM_HDX; 1743248292Sjfv 1744176667Sjfv IGB_CORE_UNLOCK(adapter); 1745176667Sjfv} 1746176667Sjfv 1747176667Sjfv/********************************************************************* 1748176667Sjfv * 1749176667Sjfv * Media Ioctl callback 1750176667Sjfv * 1751176667Sjfv * This routine is called when the user changes speed/duplex using 1752176667Sjfv * media/mediopt option with ifconfig. 1753176667Sjfv * 1754176667Sjfv **********************************************************************/ 1755176667Sjfvstatic int 1756176667Sjfvigb_media_change(struct ifnet *ifp) 1757176667Sjfv{ 1758176667Sjfv struct adapter *adapter = ifp->if_softc; 1759176667Sjfv struct ifmedia *ifm = &adapter->media; 1760176667Sjfv 1761176667Sjfv INIT_DEBUGOUT("igb_media_change: begin"); 1762176667Sjfv 1763176667Sjfv if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 1764176667Sjfv return (EINVAL); 1765176667Sjfv 1766176667Sjfv IGB_CORE_LOCK(adapter); 1767176667Sjfv switch (IFM_SUBTYPE(ifm->ifm_media)) { 1768176667Sjfv case IFM_AUTO: 1769176667Sjfv adapter->hw.mac.autoneg = DO_AUTO_NEG; 1770176667Sjfv adapter->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT; 1771176667Sjfv break; 1772176667Sjfv case IFM_1000_LX: 1773176667Sjfv case IFM_1000_SX: 1774176667Sjfv case IFM_1000_T: 1775176667Sjfv adapter->hw.mac.autoneg = DO_AUTO_NEG; 1776176667Sjfv adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL; 1777176667Sjfv break; 1778176667Sjfv case IFM_100_TX: 1779176667Sjfv adapter->hw.mac.autoneg = FALSE; 1780176667Sjfv adapter->hw.phy.autoneg_advertised = 0; 1781176667Sjfv if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) 1782176667Sjfv adapter->hw.mac.forced_speed_duplex = ADVERTISE_100_FULL; 1783176667Sjfv else 1784176667Sjfv adapter->hw.mac.forced_speed_duplex = ADVERTISE_100_HALF; 1785176667Sjfv break; 1786176667Sjfv case IFM_10_T: 1787176667Sjfv adapter->hw.mac.autoneg = FALSE; 1788176667Sjfv adapter->hw.phy.autoneg_advertised = 0; 1789176667Sjfv if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) 1790176667Sjfv adapter->hw.mac.forced_speed_duplex = ADVERTISE_10_FULL; 1791176667Sjfv else 1792176667Sjfv adapter->hw.mac.forced_speed_duplex = ADVERTISE_10_HALF; 1793176667Sjfv break; 1794176667Sjfv default: 1795176667Sjfv device_printf(adapter->dev, "Unsupported media type\n"); 1796176667Sjfv } 1797176667Sjfv 1798176667Sjfv igb_init_locked(adapter); 1799176667Sjfv IGB_CORE_UNLOCK(adapter); 1800176667Sjfv 1801176667Sjfv return (0); 1802176667Sjfv} 1803176667Sjfv 1804176667Sjfv 1805176667Sjfv/********************************************************************* 1806176667Sjfv * 1807176667Sjfv * This routine maps the mbufs to Advanced TX descriptors. 1808176667Sjfv * 1809176667Sjfv **********************************************************************/ 1810176667Sjfvstatic int 1811176667Sjfvigb_xmit(struct tx_ring *txr, struct mbuf **m_headp) 1812176667Sjfv{ 1813176667Sjfv struct adapter *adapter = txr->adapter; 1814176667Sjfv bus_dma_segment_t segs[IGB_MAX_SCATTER]; 1815176667Sjfv bus_dmamap_t map; 1816190872Sjfv struct igb_tx_buffer *tx_buffer, *tx_buffer_mapped; 1817176667Sjfv union e1000_adv_tx_desc *txd = NULL; 1818235527Sjfv struct mbuf *m_head = *m_headp; 1819235527Sjfv struct ether_vlan_header *eh = NULL; 1820235527Sjfv struct ip *ip = NULL; 1821235527Sjfv struct tcphdr *th = NULL; 1822235527Sjfv u32 hdrlen, cmd_type_len, olinfo_status = 0; 1823235527Sjfv int ehdrlen, poff; 1824235527Sjfv int nsegs, i, first, last = 0; 1825235527Sjfv int error, do_tso, remap = 1; 1826176667Sjfv 1827176667Sjfv /* Set basic descriptor constants */ 1828235527Sjfv cmd_type_len = E1000_ADVTXD_DTYP_DATA; 1829176667Sjfv cmd_type_len |= E1000_ADVTXD_DCMD_IFCS | E1000_ADVTXD_DCMD_DEXT; 1830176667Sjfv if (m_head->m_flags & M_VLANTAG) 1831176667Sjfv cmd_type_len |= E1000_ADVTXD_DCMD_VLE; 1832176667Sjfv 1833235527Sjfvretry: 1834235527Sjfv m_head = *m_headp; 1835235527Sjfv do_tso = ((m_head->m_pkthdr.csum_flags & CSUM_TSO) != 0); 1836235527Sjfv hdrlen = ehdrlen = poff = 0; 1837235527Sjfv 1838176667Sjfv /* 1839235527Sjfv * Intel recommends entire IP/TCP header length reside in a single 1840235527Sjfv * buffer. If multiple descriptors are used to describe the IP and 1841235527Sjfv * TCP header, each descriptor should describe one or more 1842235527Sjfv * complete headers; descriptors referencing only parts of headers 1843235527Sjfv * are not supported. If all layer headers are not coalesced into 1844235527Sjfv * a single buffer, each buffer should not cross a 4KB boundary, 1845235527Sjfv * or be larger than the maximum read request size. 1846235527Sjfv * Controller also requires modifing IP/TCP header to make TSO work 1847235527Sjfv * so we firstly get a writable mbuf chain then coalesce ethernet/ 1848235527Sjfv * IP/TCP header into a single buffer to meet the requirement of 1849235527Sjfv * controller. This also simplifies IP/TCP/UDP checksum offloading 1850235527Sjfv * which also has similiar restrictions. 1851235527Sjfv */ 1852235527Sjfv if (do_tso || m_head->m_pkthdr.csum_flags & CSUM_OFFLOAD) { 1853235527Sjfv if (do_tso || (m_head->m_next != NULL && 1854235527Sjfv m_head->m_pkthdr.csum_flags & CSUM_OFFLOAD)) { 1855235527Sjfv if (M_WRITABLE(*m_headp) == 0) { 1856248078Smarius m_head = m_dup(*m_headp, M_NOWAIT); 1857235527Sjfv m_freem(*m_headp); 1858235527Sjfv if (m_head == NULL) { 1859235527Sjfv *m_headp = NULL; 1860235527Sjfv return (ENOBUFS); 1861235527Sjfv } 1862235527Sjfv *m_headp = m_head; 1863235527Sjfv } 1864235527Sjfv } 1865235527Sjfv /* 1866235527Sjfv * Assume IPv4, we don't have TSO/checksum offload support 1867235527Sjfv * for IPv6 yet. 1868235527Sjfv */ 1869235527Sjfv ehdrlen = sizeof(struct ether_header); 1870235527Sjfv m_head = m_pullup(m_head, ehdrlen); 1871235527Sjfv if (m_head == NULL) { 1872235527Sjfv *m_headp = NULL; 1873235527Sjfv return (ENOBUFS); 1874235527Sjfv } 1875235527Sjfv eh = mtod(m_head, struct ether_vlan_header *); 1876235527Sjfv if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { 1877235527Sjfv ehdrlen = sizeof(struct ether_vlan_header); 1878235527Sjfv m_head = m_pullup(m_head, ehdrlen); 1879235527Sjfv if (m_head == NULL) { 1880235527Sjfv *m_headp = NULL; 1881235527Sjfv return (ENOBUFS); 1882235527Sjfv } 1883235527Sjfv } 1884235527Sjfv m_head = m_pullup(m_head, ehdrlen + sizeof(struct ip)); 1885235527Sjfv if (m_head == NULL) { 1886235527Sjfv *m_headp = NULL; 1887235527Sjfv return (ENOBUFS); 1888235527Sjfv } 1889235527Sjfv ip = (struct ip *)(mtod(m_head, char *) + ehdrlen); 1890235527Sjfv poff = ehdrlen + (ip->ip_hl << 2); 1891235527Sjfv if (do_tso) { 1892235527Sjfv m_head = m_pullup(m_head, poff + sizeof(struct tcphdr)); 1893235527Sjfv if (m_head == NULL) { 1894235527Sjfv *m_headp = NULL; 1895235527Sjfv return (ENOBUFS); 1896235527Sjfv } 1897235527Sjfv /* 1898235527Sjfv * The pseudo TCP checksum does not include TCP payload 1899235527Sjfv * length so driver should recompute the checksum here 1900235527Sjfv * what hardware expect to see. This is adherence of 1901235527Sjfv * Microsoft's Large Send specification. 1902235527Sjfv */ 1903235527Sjfv th = (struct tcphdr *)(mtod(m_head, char *) + poff); 1904235527Sjfv th->th_sum = in_pseudo(ip->ip_src.s_addr, 1905235527Sjfv ip->ip_dst.s_addr, htons(IPPROTO_TCP)); 1906235527Sjfv /* Keep track of the full header length */ 1907235527Sjfv hdrlen = poff + (th->th_off << 2); 1908235527Sjfv } else if (m_head->m_pkthdr.csum_flags & CSUM_TCP) { 1909235527Sjfv m_head = m_pullup(m_head, poff + sizeof(struct tcphdr)); 1910235527Sjfv if (m_head == NULL) { 1911235527Sjfv *m_headp = NULL; 1912235527Sjfv return (ENOBUFS); 1913235527Sjfv } 1914235527Sjfv th = (struct tcphdr *)(mtod(m_head, char *) + poff); 1915235527Sjfv m_head = m_pullup(m_head, poff + (th->th_off << 2)); 1916235527Sjfv if (m_head == NULL) { 1917235527Sjfv *m_headp = NULL; 1918235527Sjfv return (ENOBUFS); 1919235527Sjfv } 1920235527Sjfv ip = (struct ip *)(mtod(m_head, char *) + ehdrlen); 1921235527Sjfv th = (struct tcphdr *)(mtod(m_head, char *) + poff); 1922235527Sjfv } else if (m_head->m_pkthdr.csum_flags & CSUM_UDP) { 1923235527Sjfv m_head = m_pullup(m_head, poff + sizeof(struct udphdr)); 1924235527Sjfv if (m_head == NULL) { 1925235527Sjfv *m_headp = NULL; 1926235527Sjfv return (ENOBUFS); 1927235527Sjfv } 1928235527Sjfv ip = (struct ip *)(mtod(m_head, char *) + ehdrlen); 1929235527Sjfv } 1930235527Sjfv *m_headp = m_head; 1931235527Sjfv } 1932235527Sjfv 1933235527Sjfv /* 1934235527Sjfv * Map the packet for DMA 1935176667Sjfv * 1936176667Sjfv * Capture the first descriptor index, 1937176667Sjfv * this descriptor will have the index 1938176667Sjfv * of the EOP which is the only one that 1939176667Sjfv * now gets a DONE bit writeback. 1940176667Sjfv */ 1941176667Sjfv first = txr->next_avail_desc; 1942176667Sjfv tx_buffer = &txr->tx_buffers[first]; 1943176667Sjfv tx_buffer_mapped = tx_buffer; 1944176667Sjfv map = tx_buffer->map; 1945176667Sjfv 1946176667Sjfv error = bus_dmamap_load_mbuf_sg(txr->txtag, map, 1947176667Sjfv *m_headp, segs, &nsegs, BUS_DMA_NOWAIT); 1948176667Sjfv 1949235527Sjfv /* 1950235527Sjfv * There are two types of errors we can (try) to handle: 1951235527Sjfv * - EFBIG means the mbuf chain was too long and bus_dma ran 1952235527Sjfv * out of segments. Defragment the mbuf chain and try again. 1953235527Sjfv * - ENOMEM means bus_dma could not obtain enough bounce buffers 1954235527Sjfv * at this point in time. Defer sending and try again later. 1955235527Sjfv * All other errors, in particular EINVAL, are fatal and prevent the 1956235527Sjfv * mbuf chain from ever going through. Drop it and report error. 1957235527Sjfv */ 1958235527Sjfv if (error == EFBIG && remap) { 1959176667Sjfv struct mbuf *m; 1960176667Sjfv 1961248078Smarius m = m_defrag(*m_headp, M_NOWAIT); 1962176667Sjfv if (m == NULL) { 1963190872Sjfv adapter->mbuf_defrag_failed++; 1964176667Sjfv m_freem(*m_headp); 1965176667Sjfv *m_headp = NULL; 1966176667Sjfv return (ENOBUFS); 1967176667Sjfv } 1968176667Sjfv *m_headp = m; 1969176667Sjfv 1970235527Sjfv /* Try it again, but only once */ 1971235527Sjfv remap = 0; 1972235527Sjfv goto retry; 1973176667Sjfv } else if (error == ENOMEM) { 1974176667Sjfv adapter->no_tx_dma_setup++; 1975176667Sjfv return (error); 1976176667Sjfv } else if (error != 0) { 1977176667Sjfv adapter->no_tx_dma_setup++; 1978176667Sjfv m_freem(*m_headp); 1979176667Sjfv *m_headp = NULL; 1980176667Sjfv return (error); 1981176667Sjfv } 1982176667Sjfv 1983235527Sjfv /* 1984235527Sjfv ** Make sure we don't overrun the ring, 1985235527Sjfv ** we need nsegs descriptors and one for 1986235527Sjfv ** the context descriptor used for the 1987235527Sjfv ** offloads. 1988235527Sjfv */ 1989235527Sjfv if ((nsegs + 1) > (txr->tx_avail - 2)) { 1990176667Sjfv txr->no_desc_avail++; 1991176667Sjfv bus_dmamap_unload(txr->txtag, map); 1992176667Sjfv return (ENOBUFS); 1993176667Sjfv } 1994176667Sjfv m_head = *m_headp; 1995176667Sjfv 1996235527Sjfv /* Do hardware assists: 1997235527Sjfv * Set up the context descriptor, used 1998235527Sjfv * when any hardware offload is done. 1999235527Sjfv * This includes CSUM, VLAN, and TSO. 2000235527Sjfv * It will use the first descriptor. 2001176667Sjfv */ 2002235527Sjfv 2003235527Sjfv if (m_head->m_pkthdr.csum_flags & CSUM_TSO) { 2004235527Sjfv if (igb_tso_setup(txr, m_head, ehdrlen, ip, th)) { 2005176667Sjfv cmd_type_len |= E1000_ADVTXD_DCMD_TSE; 2006176667Sjfv olinfo_status |= E1000_TXD_POPTS_IXSM << 8; 2007176667Sjfv olinfo_status |= E1000_TXD_POPTS_TXSM << 8; 2008176667Sjfv } else 2009235527Sjfv return (ENXIO); 2010194865Sjfv } else if (igb_tx_ctx_setup(txr, m_head)) 2011235527Sjfv olinfo_status |= E1000_TXD_POPTS_TXSM << 8; 2012194865Sjfv 2013176667Sjfv /* Calculate payload length */ 2014176667Sjfv olinfo_status |= ((m_head->m_pkthdr.len - hdrlen) 2015176667Sjfv << E1000_ADVTXD_PAYLEN_SHIFT); 2016176667Sjfv 2017200243Sjfv /* 82575 needs the queue index added */ 2018200243Sjfv if (adapter->hw.mac.type == e1000_82575) 2019200243Sjfv olinfo_status |= txr->me << 4; 2020200243Sjfv 2021176667Sjfv /* Set up our transmit descriptors */ 2022176667Sjfv i = txr->next_avail_desc; 2023235527Sjfv for (int j = 0; j < nsegs; j++) { 2024176667Sjfv bus_size_t seg_len; 2025176667Sjfv bus_addr_t seg_addr; 2026176667Sjfv 2027176667Sjfv tx_buffer = &txr->tx_buffers[i]; 2028176667Sjfv txd = (union e1000_adv_tx_desc *)&txr->tx_base[i]; 2029176667Sjfv seg_addr = segs[j].ds_addr; 2030176667Sjfv seg_len = segs[j].ds_len; 2031176667Sjfv 2032176667Sjfv txd->read.buffer_addr = htole64(seg_addr); 2033203049Sjfv txd->read.cmd_type_len = htole32(cmd_type_len | seg_len); 2034176667Sjfv txd->read.olinfo_status = htole32(olinfo_status); 2035176667Sjfv last = i; 2036176667Sjfv if (++i == adapter->num_tx_desc) 2037176667Sjfv i = 0; 2038176667Sjfv tx_buffer->m_head = NULL; 2039176667Sjfv tx_buffer->next_eop = -1; 2040176667Sjfv } 2041176667Sjfv 2042176667Sjfv txr->next_avail_desc = i; 2043176667Sjfv txr->tx_avail -= nsegs; 2044235527Sjfv tx_buffer->m_head = m_head; 2045176667Sjfv 2046235527Sjfv /* 2047235527Sjfv ** Here we swap the map so the last descriptor, 2048235527Sjfv ** which gets the completion interrupt has the 2049235527Sjfv ** real map, and the first descriptor gets the 2050235527Sjfv ** unused map from this descriptor. 2051235527Sjfv */ 2052176667Sjfv tx_buffer_mapped->map = tx_buffer->map; 2053176667Sjfv tx_buffer->map = map; 2054176667Sjfv bus_dmamap_sync(txr->txtag, map, BUS_DMASYNC_PREWRITE); 2055176667Sjfv 2056176667Sjfv /* 2057176667Sjfv * Last Descriptor of Packet 2058176667Sjfv * needs End Of Packet (EOP) 2059176667Sjfv * and Report Status (RS) 2060176667Sjfv */ 2061176667Sjfv txd->read.cmd_type_len |= 2062203049Sjfv htole32(E1000_ADVTXD_DCMD_EOP | E1000_ADVTXD_DCMD_RS); 2063176667Sjfv /* 2064176667Sjfv * Keep track in the first buffer which 2065176667Sjfv * descriptor will be written back 2066176667Sjfv */ 2067176667Sjfv tx_buffer = &txr->tx_buffers[first]; 2068176667Sjfv tx_buffer->next_eop = last; 2069235527Sjfv /* Update the watchdog time early and often */ 2070203051Sjfv txr->watchdog_time = ticks; 2071176667Sjfv 2072176667Sjfv /* 2073176667Sjfv * Advance the Transmit Descriptor Tail (TDT), this tells the E1000 2074176667Sjfv * that this frame is available to transmit. 2075176667Sjfv */ 2076176667Sjfv bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, 2077176667Sjfv BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2078176667Sjfv E1000_WRITE_REG(&adapter->hw, E1000_TDT(txr->me), i); 2079176667Sjfv ++txr->tx_packets; 2080176667Sjfv 2081176667Sjfv return (0); 2082176667Sjfv} 2083176667Sjfvstatic void 2084176667Sjfvigb_set_promisc(struct adapter *adapter) 2085176667Sjfv{ 2086176667Sjfv struct ifnet *ifp = adapter->ifp; 2087209611Sjfv struct e1000_hw *hw = &adapter->hw; 2088209611Sjfv u32 reg; 2089176667Sjfv 2090218530Sjfv if (adapter->vf_ifp) { 2091209611Sjfv e1000_promisc_set_vf(hw, e1000_promisc_enabled); 2092209611Sjfv return; 2093209611Sjfv } 2094176667Sjfv 2095209611Sjfv reg = E1000_READ_REG(hw, E1000_RCTL); 2096176667Sjfv if (ifp->if_flags & IFF_PROMISC) { 2097209611Sjfv reg |= (E1000_RCTL_UPE | E1000_RCTL_MPE); 2098209611Sjfv E1000_WRITE_REG(hw, E1000_RCTL, reg); 2099176667Sjfv } else if (ifp->if_flags & IFF_ALLMULTI) { 2100209611Sjfv reg |= E1000_RCTL_MPE; 2101209611Sjfv reg &= ~E1000_RCTL_UPE; 2102209611Sjfv E1000_WRITE_REG(hw, E1000_RCTL, reg); 2103176667Sjfv } 2104176667Sjfv} 2105176667Sjfv 2106176667Sjfvstatic void 2107176667Sjfvigb_disable_promisc(struct adapter *adapter) 2108176667Sjfv{ 2109209611Sjfv struct e1000_hw *hw = &adapter->hw; 2110252899Sjfv struct ifnet *ifp = adapter->ifp; 2111209611Sjfv u32 reg; 2112252899Sjfv int mcnt = 0; 2113176667Sjfv 2114218530Sjfv if (adapter->vf_ifp) { 2115209611Sjfv e1000_promisc_set_vf(hw, e1000_promisc_disabled); 2116209611Sjfv return; 2117209611Sjfv } 2118209611Sjfv reg = E1000_READ_REG(hw, E1000_RCTL); 2119209611Sjfv reg &= (~E1000_RCTL_UPE); 2120252899Sjfv if (ifp->if_flags & IFF_ALLMULTI) 2121252899Sjfv mcnt = MAX_NUM_MULTICAST_ADDRESSES; 2122252899Sjfv else { 2123252899Sjfv struct ifmultiaddr *ifma; 2124252899Sjfv#if __FreeBSD_version < 800000 2125252899Sjfv IF_ADDR_LOCK(ifp); 2126252899Sjfv#else 2127252899Sjfv if_maddr_rlock(ifp); 2128252899Sjfv#endif 2129252899Sjfv TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 2130252899Sjfv if (ifma->ifma_addr->sa_family != AF_LINK) 2131252899Sjfv continue; 2132252899Sjfv if (mcnt == MAX_NUM_MULTICAST_ADDRESSES) 2133252899Sjfv break; 2134252899Sjfv mcnt++; 2135252899Sjfv } 2136252899Sjfv#if __FreeBSD_version < 800000 2137252899Sjfv IF_ADDR_UNLOCK(ifp); 2138252899Sjfv#else 2139252899Sjfv if_maddr_runlock(ifp); 2140252899Sjfv#endif 2141252899Sjfv } 2142252899Sjfv /* Don't disable if in MAX groups */ 2143252899Sjfv if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) 2144252899Sjfv reg &= (~E1000_RCTL_MPE); 2145209611Sjfv E1000_WRITE_REG(hw, E1000_RCTL, reg); 2146176667Sjfv} 2147176667Sjfv 2148176667Sjfv 2149176667Sjfv/********************************************************************* 2150176667Sjfv * Multicast Update 2151176667Sjfv * 2152176667Sjfv * This routine is called whenever multicast address list is updated. 2153176667Sjfv * 2154176667Sjfv **********************************************************************/ 2155176667Sjfv 2156176667Sjfvstatic void 2157176667Sjfvigb_set_multi(struct adapter *adapter) 2158176667Sjfv{ 2159176667Sjfv struct ifnet *ifp = adapter->ifp; 2160176667Sjfv struct ifmultiaddr *ifma; 2161182416Sjfv u32 reg_rctl = 0; 2162211913Syongari u8 *mta; 2163182416Sjfv 2164176667Sjfv int mcnt = 0; 2165176667Sjfv 2166176667Sjfv IOCTL_DEBUGOUT("igb_set_multi: begin"); 2167176667Sjfv 2168211913Syongari mta = adapter->mta; 2169211913Syongari bzero(mta, sizeof(uint8_t) * ETH_ADDR_LEN * 2170211913Syongari MAX_NUM_MULTICAST_ADDRESSES); 2171211913Syongari 2172200243Sjfv#if __FreeBSD_version < 800000 2173200243Sjfv IF_ADDR_LOCK(ifp); 2174200243Sjfv#else 2175195049Srwatson if_maddr_rlock(ifp); 2176200243Sjfv#endif 2177176667Sjfv TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 2178176667Sjfv if (ifma->ifma_addr->sa_family != AF_LINK) 2179176667Sjfv continue; 2180176667Sjfv 2181176667Sjfv if (mcnt == MAX_NUM_MULTICAST_ADDRESSES) 2182176667Sjfv break; 2183176667Sjfv 2184176667Sjfv bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr), 2185176667Sjfv &mta[mcnt * ETH_ADDR_LEN], ETH_ADDR_LEN); 2186176667Sjfv mcnt++; 2187176667Sjfv } 2188200243Sjfv#if __FreeBSD_version < 800000 2189200243Sjfv IF_ADDR_UNLOCK(ifp); 2190200243Sjfv#else 2191195049Srwatson if_maddr_runlock(ifp); 2192200243Sjfv#endif 2193203049Sjfv 2194176667Sjfv if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES) { 2195176667Sjfv reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL); 2196176667Sjfv reg_rctl |= E1000_RCTL_MPE; 2197176667Sjfv E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl); 2198176667Sjfv } else 2199190872Sjfv e1000_update_mc_addr_list(&adapter->hw, mta, mcnt); 2200176667Sjfv} 2201176667Sjfv 2202176667Sjfv 2203176667Sjfv/********************************************************************* 2204200243Sjfv * Timer routine: 2205200243Sjfv * This routine checks for link status, 2206200243Sjfv * updates statistics, and does the watchdog. 2207176667Sjfv * 2208176667Sjfv **********************************************************************/ 2209176667Sjfv 2210176667Sjfvstatic void 2211176667Sjfvigb_local_timer(void *arg) 2212176667Sjfv{ 2213200243Sjfv struct adapter *adapter = arg; 2214200243Sjfv device_t dev = adapter->dev; 2215235527Sjfv struct ifnet *ifp = adapter->ifp; 2216200243Sjfv struct tx_ring *txr = adapter->tx_rings; 2217235527Sjfv struct igb_queue *que = adapter->queues; 2218235527Sjfv int hung = 0, busy = 0; 2219176667Sjfv 2220200243Sjfv 2221176667Sjfv IGB_CORE_LOCK_ASSERT(adapter); 2222176667Sjfv 2223176667Sjfv igb_update_link_status(adapter); 2224176667Sjfv igb_update_stats_counters(adapter); 2225176667Sjfv 2226235527Sjfv /* 2227235527Sjfv ** Check the TX queues status 2228235527Sjfv ** - central locked handling of OACTIVE 2229235527Sjfv ** - watchdog only if all queues show hung 2230235527Sjfv */ 2231235527Sjfv for (int i = 0; i < adapter->num_queues; i++, que++, txr++) { 2232235527Sjfv if ((txr->queue_status & IGB_QUEUE_HUNG) && 2233235527Sjfv (adapter->pause_frames == 0)) 2234235527Sjfv ++hung; 2235235527Sjfv if (txr->queue_status & IGB_QUEUE_DEPLETED) 2236235527Sjfv ++busy; 2237235527Sjfv if ((txr->queue_status & IGB_QUEUE_IDLE) == 0) 2238235527Sjfv taskqueue_enqueue(que->tq, &que->que_task); 2239213234Sjfv } 2240235527Sjfv if (hung == adapter->num_queues) 2241235527Sjfv goto timeout; 2242235527Sjfv if (busy == adapter->num_queues) 2243235527Sjfv ifp->if_drv_flags |= IFF_DRV_OACTIVE; 2244235527Sjfv else if ((ifp->if_drv_flags & IFF_DRV_OACTIVE) && 2245235527Sjfv (busy < adapter->num_queues)) 2246235527Sjfv ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 2247213234Sjfv 2248235527Sjfv adapter->pause_frames = 0; 2249176667Sjfv callout_reset(&adapter->timer, hz, igb_local_timer, adapter); 2250218583Sjfv#ifndef DEVICE_POLLING 2251219753Sjfv /* Schedule all queue interrupts - deadlock protection */ 2252218583Sjfv E1000_WRITE_REG(&adapter->hw, E1000_EICS, adapter->que_mask); 2253218583Sjfv#endif 2254200243Sjfv return; 2255176667Sjfv 2256200243Sjfvtimeout: 2257200243Sjfv device_printf(adapter->dev, "Watchdog timeout -- resetting\n"); 2258200243Sjfv device_printf(dev,"Queue(%d) tdh = %d, hw tdt = %d\n", txr->me, 2259200243Sjfv E1000_READ_REG(&adapter->hw, E1000_TDH(txr->me)), 2260200243Sjfv E1000_READ_REG(&adapter->hw, E1000_TDT(txr->me))); 2261200243Sjfv device_printf(dev,"TX(%d) desc avail = %d," 2262200243Sjfv "Next TX to Clean = %d\n", 2263200243Sjfv txr->me, txr->tx_avail, txr->next_to_clean); 2264200243Sjfv adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 2265200243Sjfv adapter->watchdog_events++; 2266200243Sjfv igb_init_locked(adapter); 2267176667Sjfv} 2268176667Sjfv 2269176667Sjfvstatic void 2270176667Sjfvigb_update_link_status(struct adapter *adapter) 2271176667Sjfv{ 2272248292Sjfv struct e1000_hw *hw = &adapter->hw; 2273248292Sjfv struct e1000_fc_info *fc = &hw->fc; 2274248292Sjfv struct ifnet *ifp = adapter->ifp; 2275248292Sjfv device_t dev = adapter->dev; 2276248292Sjfv struct tx_ring *txr = adapter->tx_rings; 2277248292Sjfv u32 link_check, thstat, ctrl; 2278248292Sjfv char *flowctl = NULL; 2279176667Sjfv 2280219753Sjfv link_check = thstat = ctrl = 0; 2281219753Sjfv 2282177867Sjfv /* Get the cached link value or read for real */ 2283177867Sjfv switch (hw->phy.media_type) { 2284177867Sjfv case e1000_media_type_copper: 2285177867Sjfv if (hw->mac.get_link_status) { 2286177867Sjfv /* Do the work to read phy */ 2287177867Sjfv e1000_check_for_link(hw); 2288177867Sjfv link_check = !hw->mac.get_link_status; 2289177867Sjfv } else 2290177867Sjfv link_check = TRUE; 2291177867Sjfv break; 2292177867Sjfv case e1000_media_type_fiber: 2293177867Sjfv e1000_check_for_link(hw); 2294177867Sjfv link_check = (E1000_READ_REG(hw, E1000_STATUS) & 2295177867Sjfv E1000_STATUS_LU); 2296177867Sjfv break; 2297177867Sjfv case e1000_media_type_internal_serdes: 2298177867Sjfv e1000_check_for_link(hw); 2299177867Sjfv link_check = adapter->hw.mac.serdes_has_link; 2300177867Sjfv break; 2301209611Sjfv /* VF device is type_unknown */ 2302209611Sjfv case e1000_media_type_unknown: 2303209611Sjfv e1000_check_for_link(hw); 2304209611Sjfv link_check = !hw->mac.get_link_status; 2305209611Sjfv /* Fall thru */ 2306177867Sjfv default: 2307177867Sjfv break; 2308177867Sjfv } 2309177867Sjfv 2310219753Sjfv /* Check for thermal downshift or shutdown */ 2311219753Sjfv if (hw->mac.type == e1000_i350) { 2312219753Sjfv thstat = E1000_READ_REG(hw, E1000_THSTAT); 2313219753Sjfv ctrl = E1000_READ_REG(hw, E1000_CTRL_EXT); 2314219753Sjfv } 2315219753Sjfv 2316248292Sjfv /* Get the flow control for display */ 2317248292Sjfv switch (fc->current_mode) { 2318248292Sjfv case e1000_fc_rx_pause: 2319248292Sjfv flowctl = "RX"; 2320248292Sjfv break; 2321248292Sjfv case e1000_fc_tx_pause: 2322248292Sjfv flowctl = "TX"; 2323248292Sjfv break; 2324248292Sjfv case e1000_fc_full: 2325248292Sjfv flowctl = "Full"; 2326248292Sjfv break; 2327248292Sjfv case e1000_fc_none: 2328248292Sjfv default: 2329248292Sjfv flowctl = "None"; 2330248292Sjfv break; 2331248292Sjfv } 2332248292Sjfv 2333177867Sjfv /* Now we check if a transition has happened */ 2334177867Sjfv if (link_check && (adapter->link_active == 0)) { 2335177867Sjfv e1000_get_speed_and_duplex(&adapter->hw, 2336177867Sjfv &adapter->link_speed, &adapter->link_duplex); 2337177867Sjfv if (bootverbose) 2338248292Sjfv device_printf(dev, "Link is up %d Mbps %s," 2339248292Sjfv " Flow Control: %s\n", 2340177867Sjfv adapter->link_speed, 2341177867Sjfv ((adapter->link_duplex == FULL_DUPLEX) ? 2342248292Sjfv "Full Duplex" : "Half Duplex"), flowctl); 2343177867Sjfv adapter->link_active = 1; 2344177867Sjfv ifp->if_baudrate = adapter->link_speed * 1000000; 2345219753Sjfv if ((ctrl & E1000_CTRL_EXT_LINK_MODE_GMII) && 2346219753Sjfv (thstat & E1000_THSTAT_LINK_THROTTLE)) 2347219753Sjfv device_printf(dev, "Link: thermal downshift\n"); 2348205869Sjfv /* This can sleep */ 2349177867Sjfv if_link_state_change(ifp, LINK_STATE_UP); 2350177867Sjfv } else if (!link_check && (adapter->link_active == 1)) { 2351177867Sjfv ifp->if_baudrate = adapter->link_speed = 0; 2352177867Sjfv adapter->link_duplex = 0; 2353177867Sjfv if (bootverbose) 2354177867Sjfv device_printf(dev, "Link is Down\n"); 2355219753Sjfv if ((ctrl & E1000_CTRL_EXT_LINK_MODE_GMII) && 2356219753Sjfv (thstat & E1000_THSTAT_PWR_DOWN)) 2357219753Sjfv device_printf(dev, "Link: thermal shutdown\n"); 2358177867Sjfv adapter->link_active = 0; 2359205869Sjfv /* This can sleep */ 2360177867Sjfv if_link_state_change(ifp, LINK_STATE_DOWN); 2361235527Sjfv /* Reset queue state */ 2362194865Sjfv for (int i = 0; i < adapter->num_queues; i++, txr++) 2363215781Sjfv txr->queue_status = IGB_QUEUE_IDLE; 2364176667Sjfv } 2365176667Sjfv} 2366176667Sjfv 2367176667Sjfv/********************************************************************* 2368176667Sjfv * 2369176667Sjfv * This routine disables all traffic on the adapter by issuing a 2370176667Sjfv * global reset on the MAC and deallocates TX/RX buffers. 2371176667Sjfv * 2372176667Sjfv **********************************************************************/ 2373176667Sjfv 2374176667Sjfvstatic void 2375176667Sjfvigb_stop(void *arg) 2376176667Sjfv{ 2377176667Sjfv struct adapter *adapter = arg; 2378176667Sjfv struct ifnet *ifp = adapter->ifp; 2379203049Sjfv struct tx_ring *txr = adapter->tx_rings; 2380176667Sjfv 2381176667Sjfv IGB_CORE_LOCK_ASSERT(adapter); 2382176667Sjfv 2383176667Sjfv INIT_DEBUGOUT("igb_stop: begin"); 2384176667Sjfv 2385176667Sjfv igb_disable_intr(adapter); 2386176667Sjfv 2387176667Sjfv callout_stop(&adapter->timer); 2388176667Sjfv 2389176667Sjfv /* Tell the stack that the interface is no longer active */ 2390235527Sjfv ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 2391235527Sjfv ifp->if_drv_flags |= IFF_DRV_OACTIVE; 2392176667Sjfv 2393235527Sjfv /* Disarm watchdog timer. */ 2394203049Sjfv for (int i = 0; i < adapter->num_queues; i++, txr++) { 2395203049Sjfv IGB_TX_LOCK(txr); 2396215781Sjfv txr->queue_status = IGB_QUEUE_IDLE; 2397203049Sjfv IGB_TX_UNLOCK(txr); 2398203049Sjfv } 2399203049Sjfv 2400176667Sjfv e1000_reset_hw(&adapter->hw); 2401176667Sjfv E1000_WRITE_REG(&adapter->hw, E1000_WUC, 0); 2402206001Smarius 2403206001Smarius e1000_led_off(&adapter->hw); 2404206001Smarius e1000_cleanup_led(&adapter->hw); 2405176667Sjfv} 2406176667Sjfv 2407176667Sjfv 2408176667Sjfv/********************************************************************* 2409176667Sjfv * 2410176667Sjfv * Determine hardware revision. 2411176667Sjfv * 2412176667Sjfv **********************************************************************/ 2413176667Sjfvstatic void 2414176667Sjfvigb_identify_hardware(struct adapter *adapter) 2415176667Sjfv{ 2416176667Sjfv device_t dev = adapter->dev; 2417176667Sjfv 2418176667Sjfv /* Make sure our PCI config space has the necessary stuff set */ 2419254306Sscottl pci_enable_busmaster(dev); 2420176667Sjfv adapter->hw.bus.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2); 2421176667Sjfv 2422176667Sjfv /* Save off the information about this board */ 2423176667Sjfv adapter->hw.vendor_id = pci_get_vendor(dev); 2424176667Sjfv adapter->hw.device_id = pci_get_device(dev); 2425176667Sjfv adapter->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1); 2426176667Sjfv adapter->hw.subsystem_vendor_id = 2427176667Sjfv pci_read_config(dev, PCIR_SUBVEND_0, 2); 2428176667Sjfv adapter->hw.subsystem_device_id = 2429176667Sjfv pci_read_config(dev, PCIR_SUBDEV_0, 2); 2430210968Sjfv 2431210968Sjfv /* Set MAC type early for PCI setup */ 2432210968Sjfv e1000_set_mac_type(&adapter->hw); 2433218530Sjfv 2434218530Sjfv /* Are we a VF device? */ 2435218530Sjfv if ((adapter->hw.mac.type == e1000_vfadapt) || 2436218530Sjfv (adapter->hw.mac.type == e1000_vfadapt_i350)) 2437218530Sjfv adapter->vf_ifp = 1; 2438218530Sjfv else 2439218530Sjfv adapter->vf_ifp = 0; 2440176667Sjfv} 2441176667Sjfv 2442176667Sjfvstatic int 2443176667Sjfvigb_allocate_pci_resources(struct adapter *adapter) 2444176667Sjfv{ 2445176667Sjfv device_t dev = adapter->dev; 2446194865Sjfv int rid; 2447176667Sjfv 2448176667Sjfv rid = PCIR_BAR(0); 2449176667Sjfv adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 2450176667Sjfv &rid, RF_ACTIVE); 2451176667Sjfv if (adapter->pci_mem == NULL) { 2452176667Sjfv device_printf(dev, "Unable to allocate bus resource: memory\n"); 2453176667Sjfv return (ENXIO); 2454176667Sjfv } 2455176667Sjfv adapter->osdep.mem_bus_space_tag = 2456176667Sjfv rman_get_bustag(adapter->pci_mem); 2457176667Sjfv adapter->osdep.mem_bus_space_handle = 2458176667Sjfv rman_get_bushandle(adapter->pci_mem); 2459194865Sjfv adapter->hw.hw_addr = (u8 *)&adapter->osdep.mem_bus_space_handle; 2460176667Sjfv 2461194865Sjfv adapter->num_queues = 1; /* Defaults for Legacy or MSI */ 2462176667Sjfv 2463176667Sjfv /* This will setup either MSI/X or MSI */ 2464176667Sjfv adapter->msix = igb_setup_msix(adapter); 2465176667Sjfv adapter->hw.back = &adapter->osdep; 2466176667Sjfv 2467194865Sjfv return (0); 2468176667Sjfv} 2469176667Sjfv 2470176667Sjfv/********************************************************************* 2471176667Sjfv * 2472176667Sjfv * Setup the Legacy or MSI Interrupt handler 2473176667Sjfv * 2474176667Sjfv **********************************************************************/ 2475176667Sjfvstatic int 2476176667Sjfvigb_allocate_legacy(struct adapter *adapter) 2477176667Sjfv{ 2478205869Sjfv device_t dev = adapter->dev; 2479205869Sjfv struct igb_queue *que = adapter->queues; 2480205869Sjfv int error, rid = 0; 2481176667Sjfv 2482176667Sjfv /* Turn off all interrupts */ 2483176667Sjfv E1000_WRITE_REG(&adapter->hw, E1000_IMC, 0xffffffff); 2484176667Sjfv 2485194865Sjfv /* MSI RID is 1 */ 2486194865Sjfv if (adapter->msix == 1) 2487194865Sjfv rid = 1; 2488176667Sjfv 2489176667Sjfv /* We allocate a single interrupt resource */ 2490194865Sjfv adapter->res = bus_alloc_resource_any(dev, 2491194865Sjfv SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); 2492194865Sjfv if (adapter->res == NULL) { 2493176667Sjfv device_printf(dev, "Unable to allocate bus resource: " 2494176667Sjfv "interrupt\n"); 2495176667Sjfv return (ENXIO); 2496176667Sjfv } 2497176667Sjfv 2498252899Sjfv#ifndef IGB_LEGACY_TX 2499252899Sjfv TASK_INIT(&que->txr->txq_task, 0, igb_deferred_mq_start, que->txr); 2500223198Sjhb#endif 2501223198Sjhb 2502176667Sjfv /* 2503176667Sjfv * Try allocating a fast interrupt and the associated deferred 2504176667Sjfv * processing contexts. 2505176667Sjfv */ 2506209218Sjfv TASK_INIT(&que->que_task, 0, igb_handle_que, que); 2507203090Sjfv /* Make tasklet for deferred link handling */ 2508203090Sjfv TASK_INIT(&adapter->link_task, 0, igb_handle_link, adapter); 2509209218Sjfv que->tq = taskqueue_create_fast("igb_taskq", M_NOWAIT, 2510209218Sjfv taskqueue_thread_enqueue, &que->tq); 2511209218Sjfv taskqueue_start_threads(&que->tq, 1, PI_NET, "%s taskq", 2512176667Sjfv device_get_nameunit(adapter->dev)); 2513194865Sjfv if ((error = bus_setup_intr(dev, adapter->res, 2514194865Sjfv INTR_TYPE_NET | INTR_MPSAFE, igb_irq_fast, NULL, 2515194865Sjfv adapter, &adapter->tag)) != 0) { 2516176667Sjfv device_printf(dev, "Failed to register fast interrupt " 2517176667Sjfv "handler: %d\n", error); 2518209218Sjfv taskqueue_free(que->tq); 2519209218Sjfv que->tq = NULL; 2520176667Sjfv return (error); 2521176667Sjfv } 2522176667Sjfv 2523176667Sjfv return (0); 2524176667Sjfv} 2525176667Sjfv 2526176667Sjfv 2527176667Sjfv/********************************************************************* 2528176667Sjfv * 2529203049Sjfv * Setup the MSIX Queue Interrupt handlers: 2530176667Sjfv * 2531176667Sjfv **********************************************************************/ 2532176667Sjfvstatic int 2533176667Sjfvigb_allocate_msix(struct adapter *adapter) 2534176667Sjfv{ 2535203049Sjfv device_t dev = adapter->dev; 2536203049Sjfv struct igb_queue *que = adapter->queues; 2537203049Sjfv int error, rid, vector = 0; 2538176667Sjfv 2539235527Sjfv /* Be sure to start with all interrupts disabled */ 2540235527Sjfv E1000_WRITE_REG(&adapter->hw, E1000_IMC, ~0); 2541235527Sjfv E1000_WRITE_FLUSH(&adapter->hw); 2542176667Sjfv 2543203049Sjfv for (int i = 0; i < adapter->num_queues; i++, vector++, que++) { 2544194865Sjfv rid = vector +1; 2545203049Sjfv que->res = bus_alloc_resource_any(dev, 2546194865Sjfv SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); 2547203049Sjfv if (que->res == NULL) { 2548176667Sjfv device_printf(dev, 2549176667Sjfv "Unable to allocate bus resource: " 2550203049Sjfv "MSIX Queue Interrupt\n"); 2551176667Sjfv return (ENXIO); 2552176667Sjfv } 2553203049Sjfv error = bus_setup_intr(dev, que->res, 2554194865Sjfv INTR_TYPE_NET | INTR_MPSAFE, NULL, 2555203049Sjfv igb_msix_que, que, &que->tag); 2556176667Sjfv if (error) { 2557203049Sjfv que->res = NULL; 2558203049Sjfv device_printf(dev, "Failed to register Queue handler"); 2559176667Sjfv return (error); 2560176667Sjfv } 2561209071Sjfv#if __FreeBSD_version >= 800504 2562209071Sjfv bus_describe_intr(dev, que->res, que->tag, "que %d", i); 2563209071Sjfv#endif 2564203049Sjfv que->msix = vector; 2565194865Sjfv if (adapter->hw.mac.type == e1000_82575) 2566203049Sjfv que->eims = E1000_EICR_TX_QUEUE0 << i; 2567194865Sjfv else 2568203049Sjfv que->eims = 1 << vector; 2569194865Sjfv /* 2570194865Sjfv ** Bind the msix vector, and thus the 2571203049Sjfv ** rings to the corresponding cpu. 2572194865Sjfv */ 2573235393Ssbruno if (adapter->num_queues > 1) { 2574235393Ssbruno if (igb_last_bind_cpu < 0) 2575235393Ssbruno igb_last_bind_cpu = CPU_FIRST(); 2576235393Ssbruno bus_bind_intr(dev, que->res, igb_last_bind_cpu); 2577235393Ssbruno device_printf(dev, 2578235393Ssbruno "Bound queue %d to cpu %d\n", 2579235393Ssbruno i,igb_last_bind_cpu); 2580235393Ssbruno igb_last_bind_cpu = CPU_NEXT(igb_last_bind_cpu); 2581235393Ssbruno } 2582252899Sjfv#ifndef IGB_LEGACY_TX 2583223198Sjhb TASK_INIT(&que->txr->txq_task, 0, igb_deferred_mq_start, 2584223198Sjhb que->txr); 2585223198Sjhb#endif 2586203049Sjfv /* Make tasklet for deferred handling */ 2587203049Sjfv TASK_INIT(&que->que_task, 0, igb_handle_que, que); 2588223198Sjhb que->tq = taskqueue_create("igb_que", M_NOWAIT, 2589203049Sjfv taskqueue_thread_enqueue, &que->tq); 2590203049Sjfv taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que", 2591200243Sjfv device_get_nameunit(adapter->dev)); 2592176667Sjfv } 2593176667Sjfv 2594176667Sjfv /* And Link */ 2595203049Sjfv rid = vector + 1; 2596194865Sjfv adapter->res = bus_alloc_resource_any(dev, 2597194865Sjfv SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); 2598194865Sjfv if (adapter->res == NULL) { 2599176667Sjfv device_printf(dev, 2600176667Sjfv "Unable to allocate bus resource: " 2601176667Sjfv "MSIX Link Interrupt\n"); 2602176667Sjfv return (ENXIO); 2603176667Sjfv } 2604194865Sjfv if ((error = bus_setup_intr(dev, adapter->res, 2605194865Sjfv INTR_TYPE_NET | INTR_MPSAFE, NULL, 2606194865Sjfv igb_msix_link, adapter, &adapter->tag)) != 0) { 2607203049Sjfv device_printf(dev, "Failed to register Link handler"); 2608176667Sjfv return (error); 2609176667Sjfv } 2610209071Sjfv#if __FreeBSD_version >= 800504 2611209071Sjfv bus_describe_intr(dev, adapter->res, adapter->tag, "link"); 2612209071Sjfv#endif 2613194865Sjfv adapter->linkvec = vector; 2614176667Sjfv 2615176667Sjfv return (0); 2616176667Sjfv} 2617176667Sjfv 2618190872Sjfv 2619176667Sjfvstatic void 2620176667Sjfvigb_configure_queues(struct adapter *adapter) 2621176667Sjfv{ 2622203049Sjfv struct e1000_hw *hw = &adapter->hw; 2623203049Sjfv struct igb_queue *que; 2624215781Sjfv u32 tmp, ivar = 0, newitr = 0; 2625176667Sjfv 2626200243Sjfv /* First turn on RSS capability */ 2627218530Sjfv if (adapter->hw.mac.type != e1000_82575) 2628181027Sjfv E1000_WRITE_REG(hw, E1000_GPIE, 2629200243Sjfv E1000_GPIE_MSIX_MODE | E1000_GPIE_EIAME | 2630181027Sjfv E1000_GPIE_PBA | E1000_GPIE_NSICR); 2631200243Sjfv 2632200243Sjfv /* Turn on MSIX */ 2633200243Sjfv switch (adapter->hw.mac.type) { 2634200243Sjfv case e1000_82580: 2635218530Sjfv case e1000_i350: 2636238262Sjfv case e1000_i210: 2637238262Sjfv case e1000_i211: 2638209611Sjfv case e1000_vfadapt: 2639218530Sjfv case e1000_vfadapt_i350: 2640203049Sjfv /* RX entries */ 2641194865Sjfv for (int i = 0; i < adapter->num_queues; i++) { 2642200243Sjfv u32 index = i >> 1; 2643200243Sjfv ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index); 2644203049Sjfv que = &adapter->queues[i]; 2645200243Sjfv if (i & 1) { 2646200243Sjfv ivar &= 0xFF00FFFF; 2647203049Sjfv ivar |= (que->msix | E1000_IVAR_VALID) << 16; 2648200243Sjfv } else { 2649200243Sjfv ivar &= 0xFFFFFF00; 2650203049Sjfv ivar |= que->msix | E1000_IVAR_VALID; 2651200243Sjfv } 2652200243Sjfv E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar); 2653200243Sjfv } 2654203049Sjfv /* TX entries */ 2655200243Sjfv for (int i = 0; i < adapter->num_queues; i++) { 2656200243Sjfv u32 index = i >> 1; 2657200243Sjfv ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index); 2658203049Sjfv que = &adapter->queues[i]; 2659200243Sjfv if (i & 1) { 2660200243Sjfv ivar &= 0x00FFFFFF; 2661203049Sjfv ivar |= (que->msix | E1000_IVAR_VALID) << 24; 2662200243Sjfv } else { 2663200243Sjfv ivar &= 0xFFFF00FF; 2664203049Sjfv ivar |= (que->msix | E1000_IVAR_VALID) << 8; 2665200243Sjfv } 2666200243Sjfv E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar); 2667218530Sjfv adapter->que_mask |= que->eims; 2668200243Sjfv } 2669200243Sjfv 2670200243Sjfv /* And for the link interrupt */ 2671200243Sjfv ivar = (adapter->linkvec | E1000_IVAR_VALID) << 8; 2672200243Sjfv adapter->link_mask = 1 << adapter->linkvec; 2673200243Sjfv E1000_WRITE_REG(hw, E1000_IVAR_MISC, ivar); 2674200243Sjfv break; 2675200243Sjfv case e1000_82576: 2676203049Sjfv /* RX entries */ 2677200243Sjfv for (int i = 0; i < adapter->num_queues; i++) { 2678181027Sjfv u32 index = i & 0x7; /* Each IVAR has two entries */ 2679181027Sjfv ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index); 2680203049Sjfv que = &adapter->queues[i]; 2681181027Sjfv if (i < 8) { 2682181027Sjfv ivar &= 0xFFFFFF00; 2683203049Sjfv ivar |= que->msix | E1000_IVAR_VALID; 2684181027Sjfv } else { 2685181027Sjfv ivar &= 0xFF00FFFF; 2686203049Sjfv ivar |= (que->msix | E1000_IVAR_VALID) << 16; 2687181027Sjfv } 2688181027Sjfv E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar); 2689218530Sjfv adapter->que_mask |= que->eims; 2690181027Sjfv } 2691203049Sjfv /* TX entries */ 2692194865Sjfv for (int i = 0; i < adapter->num_queues; i++) { 2693181027Sjfv u32 index = i & 0x7; /* Each IVAR has two entries */ 2694181027Sjfv ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index); 2695203049Sjfv que = &adapter->queues[i]; 2696181027Sjfv if (i < 8) { 2697181027Sjfv ivar &= 0xFFFF00FF; 2698203049Sjfv ivar |= (que->msix | E1000_IVAR_VALID) << 8; 2699181027Sjfv } else { 2700181027Sjfv ivar &= 0x00FFFFFF; 2701203049Sjfv ivar |= (que->msix | E1000_IVAR_VALID) << 24; 2702181027Sjfv } 2703181027Sjfv E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar); 2704218530Sjfv adapter->que_mask |= que->eims; 2705181027Sjfv } 2706181027Sjfv 2707181027Sjfv /* And for the link interrupt */ 2708181027Sjfv ivar = (adapter->linkvec | E1000_IVAR_VALID) << 8; 2709181027Sjfv adapter->link_mask = 1 << adapter->linkvec; 2710181027Sjfv E1000_WRITE_REG(hw, E1000_IVAR_MISC, ivar); 2711200243Sjfv break; 2712176667Sjfv 2713200243Sjfv case e1000_82575: 2714200243Sjfv /* enable MSI-X support*/ 2715176667Sjfv tmp = E1000_READ_REG(hw, E1000_CTRL_EXT); 2716176667Sjfv tmp |= E1000_CTRL_EXT_PBA_CLR; 2717176667Sjfv /* Auto-Mask interrupts upon ICR read. */ 2718176667Sjfv tmp |= E1000_CTRL_EXT_EIAME; 2719176667Sjfv tmp |= E1000_CTRL_EXT_IRCA; 2720176667Sjfv E1000_WRITE_REG(hw, E1000_CTRL_EXT, tmp); 2721176667Sjfv 2722203049Sjfv /* Queues */ 2723194865Sjfv for (int i = 0; i < adapter->num_queues; i++) { 2724203049Sjfv que = &adapter->queues[i]; 2725203049Sjfv tmp = E1000_EICR_RX_QUEUE0 << i; 2726203049Sjfv tmp |= E1000_EICR_TX_QUEUE0 << i; 2727203049Sjfv que->eims = tmp; 2728203049Sjfv E1000_WRITE_REG_ARRAY(hw, E1000_MSIXBM(0), 2729203049Sjfv i, que->eims); 2730218530Sjfv adapter->que_mask |= que->eims; 2731176667Sjfv } 2732176667Sjfv 2733176667Sjfv /* Link */ 2734176667Sjfv E1000_WRITE_REG(hw, E1000_MSIXBM(adapter->linkvec), 2735176667Sjfv E1000_EIMS_OTHER); 2736178523Sjfv adapter->link_mask |= E1000_EIMS_OTHER; 2737200243Sjfv default: 2738200243Sjfv break; 2739176667Sjfv } 2740200243Sjfv 2741203049Sjfv /* Set the starting interrupt rate */ 2742215781Sjfv if (igb_max_interrupt_rate > 0) 2743215781Sjfv newitr = (4000000 / igb_max_interrupt_rate) & 0x7FFC; 2744215781Sjfv 2745203049Sjfv if (hw->mac.type == e1000_82575) 2746203049Sjfv newitr |= newitr << 16; 2747203049Sjfv else 2748206431Sjfv newitr |= E1000_EITR_CNT_IGNR; 2749203049Sjfv 2750203049Sjfv for (int i = 0; i < adapter->num_queues; i++) { 2751203049Sjfv que = &adapter->queues[i]; 2752203049Sjfv E1000_WRITE_REG(hw, E1000_EITR(que->msix), newitr); 2753203049Sjfv } 2754203049Sjfv 2755176667Sjfv return; 2756176667Sjfv} 2757176667Sjfv 2758176667Sjfv 2759176667Sjfvstatic void 2760176667Sjfvigb_free_pci_resources(struct adapter *adapter) 2761176667Sjfv{ 2762203049Sjfv struct igb_queue *que = adapter->queues; 2763194865Sjfv device_t dev = adapter->dev; 2764194865Sjfv int rid; 2765176667Sjfv 2766194865Sjfv /* 2767194865Sjfv ** There is a slight possibility of a failure mode 2768194865Sjfv ** in attach that will result in entering this function 2769194865Sjfv ** before interrupt resources have been initialized, and 2770194865Sjfv ** in that case we do not want to execute the loops below 2771194865Sjfv ** We can detect this reliably by the state of the adapter 2772194865Sjfv ** res pointer. 2773194865Sjfv */ 2774194865Sjfv if (adapter->res == NULL) 2775194865Sjfv goto mem; 2776176667Sjfv 2777176667Sjfv /* 2778203049Sjfv * First release all the interrupt resources: 2779176667Sjfv */ 2780203049Sjfv for (int i = 0; i < adapter->num_queues; i++, que++) { 2781203049Sjfv rid = que->msix + 1; 2782203049Sjfv if (que->tag != NULL) { 2783203049Sjfv bus_teardown_intr(dev, que->res, que->tag); 2784203049Sjfv que->tag = NULL; 2785176667Sjfv } 2786203049Sjfv if (que->res != NULL) 2787203049Sjfv bus_release_resource(dev, 2788203049Sjfv SYS_RES_IRQ, rid, que->res); 2789194865Sjfv } 2790194865Sjfv 2791194865Sjfv /* Clean the Legacy or Link interrupt last */ 2792194865Sjfv if (adapter->linkvec) /* we are doing MSIX */ 2793194865Sjfv rid = adapter->linkvec + 1; 2794194865Sjfv else 2795194865Sjfv (adapter->msix != 0) ? (rid = 1):(rid = 0); 2796194865Sjfv 2797223198Sjhb que = adapter->queues; 2798194865Sjfv if (adapter->tag != NULL) { 2799223198Sjhb taskqueue_drain(que->tq, &adapter->link_task); 2800194865Sjfv bus_teardown_intr(dev, adapter->res, adapter->tag); 2801194865Sjfv adapter->tag = NULL; 2802194865Sjfv } 2803194865Sjfv if (adapter->res != NULL) 2804194865Sjfv bus_release_resource(dev, SYS_RES_IRQ, rid, adapter->res); 2805194865Sjfv 2806223198Sjhb for (int i = 0; i < adapter->num_queues; i++, que++) { 2807223198Sjhb if (que->tq != NULL) { 2808252899Sjfv#ifndef IGB_LEGACY_TX 2809223198Sjhb taskqueue_drain(que->tq, &que->txr->txq_task); 2810223198Sjhb#endif 2811223198Sjhb taskqueue_drain(que->tq, &que->que_task); 2812223198Sjhb taskqueue_free(que->tq); 2813223198Sjhb } 2814223198Sjhb } 2815194865Sjfvmem: 2816176667Sjfv if (adapter->msix) 2817176667Sjfv pci_release_msi(dev); 2818176667Sjfv 2819176667Sjfv if (adapter->msix_mem != NULL) 2820176667Sjfv bus_release_resource(dev, SYS_RES_MEMORY, 2821176667Sjfv PCIR_BAR(IGB_MSIX_BAR), adapter->msix_mem); 2822176667Sjfv 2823176667Sjfv if (adapter->pci_mem != NULL) 2824176667Sjfv bus_release_resource(dev, SYS_RES_MEMORY, 2825176667Sjfv PCIR_BAR(0), adapter->pci_mem); 2826176667Sjfv 2827176667Sjfv} 2828176667Sjfv 2829176667Sjfv/* 2830176667Sjfv * Setup Either MSI/X or MSI 2831176667Sjfv */ 2832176667Sjfvstatic int 2833176667Sjfvigb_setup_msix(struct adapter *adapter) 2834176667Sjfv{ 2835176667Sjfv device_t dev = adapter->dev; 2836238262Sjfv int rid, want, queues, msgs, maxqueues; 2837176667Sjfv 2838200243Sjfv /* tuneable override */ 2839200243Sjfv if (igb_enable_msix == 0) 2840200243Sjfv goto msi; 2841200243Sjfv 2842176667Sjfv /* First try MSI/X */ 2843254382Sjfv msgs = pci_msix_count(dev); 2844254382Sjfv if (msgs == 0) 2845254382Sjfv goto msi; 2846176667Sjfv rid = PCIR_BAR(IGB_MSIX_BAR); 2847176667Sjfv adapter->msix_mem = bus_alloc_resource_any(dev, 2848176667Sjfv SYS_RES_MEMORY, &rid, RF_ACTIVE); 2849254382Sjfv if (adapter->msix_mem == NULL) { 2850176667Sjfv /* May not be enabled */ 2851176667Sjfv device_printf(adapter->dev, 2852176667Sjfv "Unable to map MSIX table \n"); 2853176667Sjfv goto msi; 2854176667Sjfv } 2855176667Sjfv 2856176667Sjfv /* Figure out a reasonable auto config value */ 2857203049Sjfv queues = (mp_ncpus > (msgs-1)) ? (msgs-1) : mp_ncpus; 2858176667Sjfv 2859205869Sjfv /* Manual override */ 2860205869Sjfv if (igb_num_queues != 0) 2861205869Sjfv queues = igb_num_queues; 2862205869Sjfv 2863238262Sjfv /* Sanity check based on HW */ 2864238262Sjfv switch (adapter->hw.mac.type) { 2865238262Sjfv case e1000_82575: 2866238262Sjfv maxqueues = 4; 2867238262Sjfv break; 2868238262Sjfv case e1000_82576: 2869238262Sjfv case e1000_82580: 2870238262Sjfv case e1000_i350: 2871238262Sjfv maxqueues = 8; 2872238262Sjfv break; 2873238262Sjfv case e1000_i210: 2874238262Sjfv maxqueues = 4; 2875238262Sjfv break; 2876238262Sjfv case e1000_i211: 2877238262Sjfv maxqueues = 2; 2878238262Sjfv break; 2879238262Sjfv default: /* VF interfaces */ 2880238262Sjfv maxqueues = 1; 2881238262Sjfv break; 2882238262Sjfv } 2883238262Sjfv if (queues > maxqueues) 2884238262Sjfv queues = maxqueues; 2885203049Sjfv 2886256759Shiren /* reflect correct sysctl value */ 2887256759Shiren igb_num_queues = queues; 2888256759Shiren 2889194865Sjfv /* 2890203049Sjfv ** One vector (RX/TX pair) per queue 2891194865Sjfv ** plus an additional for Link interrupt 2892194865Sjfv */ 2893205869Sjfv want = queues + 1; 2894176667Sjfv if (msgs >= want) 2895176667Sjfv msgs = want; 2896176667Sjfv else { 2897176667Sjfv device_printf(adapter->dev, 2898176667Sjfv "MSIX Configuration Problem, " 2899178523Sjfv "%d vectors configured, but %d queues wanted!\n", 2900176667Sjfv msgs, want); 2901254382Sjfv goto msi; 2902176667Sjfv } 2903254383Sjfv if ((pci_alloc_msix(dev, &msgs) == 0) && (msgs == want)) { 2904176667Sjfv device_printf(adapter->dev, 2905176667Sjfv "Using MSIX interrupts with %d vectors\n", msgs); 2906205869Sjfv adapter->num_queues = queues; 2907176667Sjfv return (msgs); 2908176667Sjfv } 2909254383Sjfv /* 2910254383Sjfv ** If MSIX alloc failed or provided us with 2911254383Sjfv ** less than needed, free and fall through to MSI 2912254383Sjfv */ 2913254383Sjfv pci_release_msi(dev); 2914254383Sjfv 2915176667Sjfvmsi: 2916254382Sjfv if (adapter->msix_mem != NULL) { 2917254382Sjfv bus_release_resource(dev, SYS_RES_MEMORY, 2918254382Sjfv PCIR_BAR(IGB_MSIX_BAR), adapter->msix_mem); 2919254382Sjfv adapter->msix_mem = NULL; 2920254382Sjfv } 2921254382Sjfv msgs = 1; 2922254382Sjfv if (pci_alloc_msi(dev, &msgs) == 0) { 2923254383Sjfv device_printf(adapter->dev," Using an MSI interrupt\n"); 2924233383Sjhb return (msgs); 2925233383Sjhb } 2926254383Sjfv device_printf(adapter->dev," Using a Legacy interrupt\n"); 2927233383Sjhb return (0); 2928176667Sjfv} 2929176667Sjfv 2930176667Sjfv/********************************************************************* 2931176667Sjfv * 2932200243Sjfv * Set up an fresh starting state 2933176667Sjfv * 2934176667Sjfv **********************************************************************/ 2935200243Sjfvstatic void 2936200243Sjfvigb_reset(struct adapter *adapter) 2937176667Sjfv{ 2938176667Sjfv device_t dev = adapter->dev; 2939200243Sjfv struct e1000_hw *hw = &adapter->hw; 2940200243Sjfv struct e1000_fc_info *fc = &hw->fc; 2941200243Sjfv struct ifnet *ifp = adapter->ifp; 2942200243Sjfv u32 pba = 0; 2943200243Sjfv u16 hwm; 2944176667Sjfv 2945200243Sjfv INIT_DEBUGOUT("igb_reset: begin"); 2946176667Sjfv 2947176667Sjfv /* Let the firmware know the OS is in control */ 2948176667Sjfv igb_get_hw_control(adapter); 2949176667Sjfv 2950176667Sjfv /* 2951200243Sjfv * Packet Buffer Allocation (PBA) 2952200243Sjfv * Writing PBA sets the receive portion of the buffer 2953200243Sjfv * the remainder is used for the transmit buffer. 2954200243Sjfv */ 2955200243Sjfv switch (hw->mac.type) { 2956200243Sjfv case e1000_82575: 2957200243Sjfv pba = E1000_PBA_32K; 2958200243Sjfv break; 2959200243Sjfv case e1000_82576: 2960209611Sjfv case e1000_vfadapt: 2961218530Sjfv pba = E1000_READ_REG(hw, E1000_RXPBS); 2962218530Sjfv pba &= E1000_RXPBS_SIZE_MASK_82576; 2963200243Sjfv break; 2964200243Sjfv case e1000_82580: 2965218530Sjfv case e1000_i350: 2966218530Sjfv case e1000_vfadapt_i350: 2967218530Sjfv pba = E1000_READ_REG(hw, E1000_RXPBS); 2968218530Sjfv pba = e1000_rxpbs_adjust_82580(pba); 2969218530Sjfv break; 2970238262Sjfv case e1000_i210: 2971238262Sjfv case e1000_i211: 2972238262Sjfv pba = E1000_PBA_34K; 2973200243Sjfv default: 2974200243Sjfv break; 2975200243Sjfv } 2976200243Sjfv 2977200243Sjfv /* Special needs in case of Jumbo frames */ 2978200243Sjfv if ((hw->mac.type == e1000_82575) && (ifp->if_mtu > ETHERMTU)) { 2979200243Sjfv u32 tx_space, min_tx, min_rx; 2980200243Sjfv pba = E1000_READ_REG(hw, E1000_PBA); 2981200243Sjfv tx_space = pba >> 16; 2982200243Sjfv pba &= 0xffff; 2983200243Sjfv min_tx = (adapter->max_frame_size + 2984200243Sjfv sizeof(struct e1000_tx_desc) - ETHERNET_FCS_SIZE) * 2; 2985200243Sjfv min_tx = roundup2(min_tx, 1024); 2986200243Sjfv min_tx >>= 10; 2987200243Sjfv min_rx = adapter->max_frame_size; 2988200243Sjfv min_rx = roundup2(min_rx, 1024); 2989200243Sjfv min_rx >>= 10; 2990200243Sjfv if (tx_space < min_tx && 2991200243Sjfv ((min_tx - tx_space) < pba)) { 2992200243Sjfv pba = pba - (min_tx - tx_space); 2993200243Sjfv /* 2994200243Sjfv * if short on rx space, rx wins 2995200243Sjfv * and must trump tx adjustment 2996200243Sjfv */ 2997200243Sjfv if (pba < min_rx) 2998200243Sjfv pba = min_rx; 2999200243Sjfv } 3000200243Sjfv E1000_WRITE_REG(hw, E1000_PBA, pba); 3001200243Sjfv } 3002200243Sjfv 3003200243Sjfv INIT_DEBUGOUT1("igb_init: pba=%dK",pba); 3004200243Sjfv 3005200243Sjfv /* 3006176667Sjfv * These parameters control the automatic generation (Tx) and 3007176667Sjfv * response (Rx) to Ethernet PAUSE frames. 3008176667Sjfv * - High water mark should allow for at least two frames to be 3009176667Sjfv * received after sending an XOFF. 3010176667Sjfv * - Low water mark works best when it is very near the high water mark. 3011176667Sjfv * This allows the receiver to restart by sending XON when it has 3012200243Sjfv * drained a bit. 3013176667Sjfv */ 3014200243Sjfv hwm = min(((pba << 10) * 9 / 10), 3015200243Sjfv ((pba << 10) - 2 * adapter->max_frame_size)); 3016176667Sjfv 3017200243Sjfv if (hw->mac.type < e1000_82576) { 3018200243Sjfv fc->high_water = hwm & 0xFFF8; /* 8-byte granularity */ 3019200243Sjfv fc->low_water = fc->high_water - 8; 3020200243Sjfv } else { 3021200243Sjfv fc->high_water = hwm & 0xFFF0; /* 16-byte granularity */ 3022200243Sjfv fc->low_water = fc->high_water - 16; 3023200243Sjfv } 3024176667Sjfv 3025200243Sjfv fc->pause_time = IGB_FC_PAUSE_TIME; 3026200243Sjfv fc->send_xon = TRUE; 3027235527Sjfv if (adapter->fc) 3028235527Sjfv fc->requested_mode = adapter->fc; 3029223350Sjfv else 3030235527Sjfv fc->requested_mode = e1000_fc_default; 3031176667Sjfv 3032200243Sjfv /* Issue a global reset */ 3033200243Sjfv e1000_reset_hw(hw); 3034200243Sjfv E1000_WRITE_REG(hw, E1000_WUC, 0); 3035200243Sjfv 3036200243Sjfv if (e1000_init_hw(hw) < 0) 3037176667Sjfv device_printf(dev, "Hardware Initialization Failed\n"); 3038200243Sjfv 3039218530Sjfv /* Setup DMA Coalescing */ 3040238262Sjfv if ((hw->mac.type > e1000_82580) && 3041238262Sjfv (hw->mac.type != e1000_i211)) { 3042238262Sjfv u32 dmac; 3043223350Sjfv u32 reg = ~E1000_DMACR_DMAC_EN; 3044200243Sjfv 3045223350Sjfv if (adapter->dmac == 0) { /* Disabling it */ 3046223350Sjfv E1000_WRITE_REG(hw, E1000_DMACR, reg); 3047223350Sjfv goto reset_out; 3048223350Sjfv } 3049223350Sjfv 3050238262Sjfv /* Set starting thresholds */ 3051238262Sjfv E1000_WRITE_REG(hw, E1000_DMCTXTH, 0); 3052238262Sjfv E1000_WRITE_REG(hw, E1000_DMCRTRH, 0); 3053238262Sjfv 3054238262Sjfv hwm = 64 * pba - adapter->max_frame_size / 16; 3055238262Sjfv if (hwm < 64 * (pba - 6)) 3056238262Sjfv hwm = 64 * (pba - 6); 3057238262Sjfv reg = E1000_READ_REG(hw, E1000_FCRTC); 3058238262Sjfv reg &= ~E1000_FCRTC_RTH_COAL_MASK; 3059238262Sjfv reg |= ((hwm << E1000_FCRTC_RTH_COAL_SHIFT) 3060238262Sjfv & E1000_FCRTC_RTH_COAL_MASK); 3061238262Sjfv E1000_WRITE_REG(hw, E1000_FCRTC, reg); 3062238262Sjfv 3063238262Sjfv 3064238262Sjfv dmac = pba - adapter->max_frame_size / 512; 3065238262Sjfv if (dmac < pba - 10) 3066238262Sjfv dmac = pba - 10; 3067238262Sjfv reg = E1000_READ_REG(hw, E1000_DMACR); 3068238262Sjfv reg &= ~E1000_DMACR_DMACTHR_MASK; 3069238262Sjfv reg = ((dmac << E1000_DMACR_DMACTHR_SHIFT) 3070219753Sjfv & E1000_DMACR_DMACTHR_MASK); 3071219753Sjfv /* transition to L0x or L1 if available..*/ 3072219753Sjfv reg |= (E1000_DMACR_DMAC_EN | E1000_DMACR_DMAC_LX_MASK); 3073223350Sjfv /* timer = value in adapter->dmac in 32usec intervals */ 3074223350Sjfv reg |= (adapter->dmac >> 5); 3075219753Sjfv E1000_WRITE_REG(hw, E1000_DMACR, reg); 3076219753Sjfv 3077219753Sjfv /* Set the interval before transition */ 3078219753Sjfv reg = E1000_READ_REG(hw, E1000_DMCTLX); 3079238262Sjfv reg |= 0x80000004; 3080219753Sjfv E1000_WRITE_REG(hw, E1000_DMCTLX, reg); 3081219753Sjfv 3082200243Sjfv /* free space in tx packet buffer to wake from DMA coal */ 3083200243Sjfv E1000_WRITE_REG(hw, E1000_DMCTXTH, 3084200243Sjfv (20480 - (2 * adapter->max_frame_size)) >> 6); 3085200243Sjfv 3086200243Sjfv /* make low power state decision controlled by DMA coal */ 3087200243Sjfv reg = E1000_READ_REG(hw, E1000_PCIEMISC); 3088238262Sjfv reg &= ~E1000_PCIEMISC_LX_DECISION; 3089238262Sjfv E1000_WRITE_REG(hw, E1000_PCIEMISC, reg); 3090238262Sjfv device_printf(dev, "DMA Coalescing enabled\n"); 3091238262Sjfv 3092238262Sjfv } else if (hw->mac.type == e1000_82580) { 3093238262Sjfv u32 reg = E1000_READ_REG(hw, E1000_PCIEMISC); 3094238262Sjfv E1000_WRITE_REG(hw, E1000_DMACR, 0); 3095200243Sjfv E1000_WRITE_REG(hw, E1000_PCIEMISC, 3096238262Sjfv reg & ~E1000_PCIEMISC_LX_DECISION); 3097176667Sjfv } 3098176667Sjfv 3099223350Sjfvreset_out: 3100200243Sjfv E1000_WRITE_REG(&adapter->hw, E1000_VET, ETHERTYPE_VLAN); 3101200243Sjfv e1000_get_phy_info(hw); 3102200243Sjfv e1000_check_for_link(hw); 3103200243Sjfv return; 3104176667Sjfv} 3105176667Sjfv 3106176667Sjfv/********************************************************************* 3107176667Sjfv * 3108176667Sjfv * Setup networking device structure and register an interface. 3109176667Sjfv * 3110176667Sjfv **********************************************************************/ 3111211907Syongaristatic int 3112176667Sjfvigb_setup_interface(device_t dev, struct adapter *adapter) 3113176667Sjfv{ 3114176667Sjfv struct ifnet *ifp; 3115176667Sjfv 3116176667Sjfv INIT_DEBUGOUT("igb_setup_interface: begin"); 3117176667Sjfv 3118176667Sjfv ifp = adapter->ifp = if_alloc(IFT_ETHER); 3119211907Syongari if (ifp == NULL) { 3120211907Syongari device_printf(dev, "can not allocate ifnet structure\n"); 3121211907Syongari return (-1); 3122211907Syongari } 3123176667Sjfv if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 3124176667Sjfv ifp->if_init = igb_init; 3125176667Sjfv ifp->if_softc = adapter; 3126176667Sjfv ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 3127176667Sjfv ifp->if_ioctl = igb_ioctl; 3128252899Sjfv#ifndef IGB_LEGACY_TX 3129194865Sjfv ifp->if_transmit = igb_mq_start; 3130194865Sjfv ifp->if_qflush = igb_qflush; 3131238262Sjfv#else 3132238262Sjfv ifp->if_start = igb_start; 3133203049Sjfv IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 1); 3134203049Sjfv ifp->if_snd.ifq_drv_maxlen = adapter->num_tx_desc - 1; 3135176667Sjfv IFQ_SET_READY(&ifp->if_snd); 3136238262Sjfv#endif 3137176667Sjfv 3138176667Sjfv ether_ifattach(ifp, adapter->hw.mac.addr); 3139176667Sjfv 3140176667Sjfv ifp->if_capabilities = ifp->if_capenable = 0; 3141176667Sjfv 3142209859Sjfv ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM; 3143194865Sjfv ifp->if_capabilities |= IFCAP_TSO4; 3144190872Sjfv ifp->if_capabilities |= IFCAP_JUMBO_MTU; 3145215781Sjfv ifp->if_capenable = ifp->if_capabilities; 3146200243Sjfv 3147215781Sjfv /* Don't enable LRO by default */ 3148215781Sjfv ifp->if_capabilities |= IFCAP_LRO; 3149215781Sjfv 3150203354Sjfv#ifdef DEVICE_POLLING 3151203354Sjfv ifp->if_capabilities |= IFCAP_POLLING; 3152203354Sjfv#endif 3153176667Sjfv 3154176667Sjfv /* 3155206388Sjfv * Tell the upper layer(s) we 3156206388Sjfv * support full VLAN capability. 3157176667Sjfv */ 3158176667Sjfv ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 3159223350Sjfv ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING 3160223350Sjfv | IFCAP_VLAN_HWTSO 3161223350Sjfv | IFCAP_VLAN_MTU; 3162223350Sjfv ifp->if_capenable |= IFCAP_VLAN_HWTAGGING 3163223350Sjfv | IFCAP_VLAN_HWTSO 3164223350Sjfv | IFCAP_VLAN_MTU; 3165176667Sjfv 3166176667Sjfv /* 3167223350Sjfv ** Don't turn this on by default, if vlans are 3168206388Sjfv ** created on another pseudo device (eg. lagg) 3169206388Sjfv ** then vlan events are not passed thru, breaking 3170206388Sjfv ** operation, but with HW FILTER off it works. If 3171223350Sjfv ** using vlans directly on the igb driver you can 3172206388Sjfv ** enable this and get full hardware tag filtering. 3173206388Sjfv */ 3174206388Sjfv ifp->if_capabilities |= IFCAP_VLAN_HWFILTER; 3175206388Sjfv 3176206388Sjfv /* 3177176667Sjfv * Specify the media types supported by this adapter and register 3178176667Sjfv * callbacks to update media and link information 3179176667Sjfv */ 3180176667Sjfv ifmedia_init(&adapter->media, IFM_IMASK, 3181176667Sjfv igb_media_change, igb_media_status); 3182176667Sjfv if ((adapter->hw.phy.media_type == e1000_media_type_fiber) || 3183176667Sjfv (adapter->hw.phy.media_type == e1000_media_type_internal_serdes)) { 3184176667Sjfv ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX | IFM_FDX, 3185176667Sjfv 0, NULL); 3186176667Sjfv ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX, 0, NULL); 3187176667Sjfv } else { 3188176667Sjfv ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T, 0, NULL); 3189176667Sjfv ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T | IFM_FDX, 3190176667Sjfv 0, NULL); 3191176667Sjfv ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX, 3192176667Sjfv 0, NULL); 3193176667Sjfv ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX | IFM_FDX, 3194176667Sjfv 0, NULL); 3195176667Sjfv if (adapter->hw.phy.type != e1000_phy_ife) { 3196176667Sjfv ifmedia_add(&adapter->media, 3197176667Sjfv IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL); 3198176667Sjfv ifmedia_add(&adapter->media, 3199176667Sjfv IFM_ETHER | IFM_1000_T, 0, NULL); 3200176667Sjfv } 3201176667Sjfv } 3202176667Sjfv ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL); 3203176667Sjfv ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO); 3204211907Syongari return (0); 3205176667Sjfv} 3206176667Sjfv 3207176667Sjfv 3208176667Sjfv/* 3209176667Sjfv * Manage DMA'able memory. 3210176667Sjfv */ 3211176667Sjfvstatic void 3212176667Sjfvigb_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) 3213176667Sjfv{ 3214176667Sjfv if (error) 3215176667Sjfv return; 3216176667Sjfv *(bus_addr_t *) arg = segs[0].ds_addr; 3217176667Sjfv} 3218176667Sjfv 3219176667Sjfvstatic int 3220176667Sjfvigb_dma_malloc(struct adapter *adapter, bus_size_t size, 3221176667Sjfv struct igb_dma_alloc *dma, int mapflags) 3222176667Sjfv{ 3223176667Sjfv int error; 3224176667Sjfv 3225176667Sjfv error = bus_dma_tag_create(bus_get_dma_tag(adapter->dev), /* parent */ 3226203049Sjfv IGB_DBA_ALIGN, 0, /* alignment, bounds */ 3227176667Sjfv BUS_SPACE_MAXADDR, /* lowaddr */ 3228176667Sjfv BUS_SPACE_MAXADDR, /* highaddr */ 3229176667Sjfv NULL, NULL, /* filter, filterarg */ 3230176667Sjfv size, /* maxsize */ 3231176667Sjfv 1, /* nsegments */ 3232176667Sjfv size, /* maxsegsize */ 3233176667Sjfv 0, /* flags */ 3234176667Sjfv NULL, /* lockfunc */ 3235176667Sjfv NULL, /* lockarg */ 3236176667Sjfv &dma->dma_tag); 3237176667Sjfv if (error) { 3238176667Sjfv device_printf(adapter->dev, 3239176667Sjfv "%s: bus_dma_tag_create failed: %d\n", 3240176667Sjfv __func__, error); 3241176667Sjfv goto fail_0; 3242176667Sjfv } 3243176667Sjfv 3244176667Sjfv error = bus_dmamem_alloc(dma->dma_tag, (void**) &dma->dma_vaddr, 3245220375Sjfv BUS_DMA_NOWAIT | BUS_DMA_COHERENT, &dma->dma_map); 3246176667Sjfv if (error) { 3247176667Sjfv device_printf(adapter->dev, 3248176667Sjfv "%s: bus_dmamem_alloc(%ju) failed: %d\n", 3249176667Sjfv __func__, (uintmax_t)size, error); 3250176667Sjfv goto fail_2; 3251176667Sjfv } 3252176667Sjfv 3253176667Sjfv dma->dma_paddr = 0; 3254176667Sjfv error = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr, 3255176667Sjfv size, igb_dmamap_cb, &dma->dma_paddr, mapflags | BUS_DMA_NOWAIT); 3256176667Sjfv if (error || dma->dma_paddr == 0) { 3257176667Sjfv device_printf(adapter->dev, 3258176667Sjfv "%s: bus_dmamap_load failed: %d\n", 3259176667Sjfv __func__, error); 3260176667Sjfv goto fail_3; 3261176667Sjfv } 3262176667Sjfv 3263176667Sjfv return (0); 3264176667Sjfv 3265176667Sjfvfail_3: 3266176667Sjfv bus_dmamap_unload(dma->dma_tag, dma->dma_map); 3267176667Sjfvfail_2: 3268176667Sjfv bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map); 3269176667Sjfv bus_dma_tag_destroy(dma->dma_tag); 3270176667Sjfvfail_0: 3271176667Sjfv dma->dma_map = NULL; 3272176667Sjfv dma->dma_tag = NULL; 3273176667Sjfv 3274176667Sjfv return (error); 3275176667Sjfv} 3276176667Sjfv 3277176667Sjfvstatic void 3278176667Sjfvigb_dma_free(struct adapter *adapter, struct igb_dma_alloc *dma) 3279176667Sjfv{ 3280176667Sjfv if (dma->dma_tag == NULL) 3281176667Sjfv return; 3282176667Sjfv if (dma->dma_map != NULL) { 3283176667Sjfv bus_dmamap_sync(dma->dma_tag, dma->dma_map, 3284176667Sjfv BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 3285176667Sjfv bus_dmamap_unload(dma->dma_tag, dma->dma_map); 3286176667Sjfv bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map); 3287176667Sjfv dma->dma_map = NULL; 3288176667Sjfv } 3289176667Sjfv bus_dma_tag_destroy(dma->dma_tag); 3290176667Sjfv dma->dma_tag = NULL; 3291176667Sjfv} 3292176667Sjfv 3293176667Sjfv 3294176667Sjfv/********************************************************************* 3295176667Sjfv * 3296176667Sjfv * Allocate memory for the transmit and receive rings, and then 3297176667Sjfv * the descriptors associated with each, called only once at attach. 3298176667Sjfv * 3299176667Sjfv **********************************************************************/ 3300176667Sjfvstatic int 3301176667Sjfvigb_allocate_queues(struct adapter *adapter) 3302176667Sjfv{ 3303176667Sjfv device_t dev = adapter->dev; 3304203049Sjfv struct igb_queue *que = NULL; 3305203049Sjfv struct tx_ring *txr = NULL; 3306203049Sjfv struct rx_ring *rxr = NULL; 3307176667Sjfv int rsize, tsize, error = E1000_SUCCESS; 3308176667Sjfv int txconf = 0, rxconf = 0; 3309176667Sjfv 3310203049Sjfv /* First allocate the top level queue structs */ 3311203049Sjfv if (!(adapter->queues = 3312203049Sjfv (struct igb_queue *) malloc(sizeof(struct igb_queue) * 3313203049Sjfv adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) { 3314203049Sjfv device_printf(dev, "Unable to allocate queue memory\n"); 3315203049Sjfv error = ENOMEM; 3316203049Sjfv goto fail; 3317203049Sjfv } 3318203049Sjfv 3319203049Sjfv /* Next allocate the TX ring struct memory */ 3320176667Sjfv if (!(adapter->tx_rings = 3321176667Sjfv (struct tx_ring *) malloc(sizeof(struct tx_ring) * 3322194865Sjfv adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) { 3323176667Sjfv device_printf(dev, "Unable to allocate TX ring memory\n"); 3324176667Sjfv error = ENOMEM; 3325203049Sjfv goto tx_fail; 3326176667Sjfv } 3327176667Sjfv 3328203049Sjfv /* Now allocate the RX */ 3329176667Sjfv if (!(adapter->rx_rings = 3330176667Sjfv (struct rx_ring *) malloc(sizeof(struct rx_ring) * 3331194865Sjfv adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) { 3332176667Sjfv device_printf(dev, "Unable to allocate RX ring memory\n"); 3333176667Sjfv error = ENOMEM; 3334176667Sjfv goto rx_fail; 3335176667Sjfv } 3336176667Sjfv 3337176667Sjfv tsize = roundup2(adapter->num_tx_desc * 3338176667Sjfv sizeof(union e1000_adv_tx_desc), IGB_DBA_ALIGN); 3339176667Sjfv /* 3340176667Sjfv * Now set up the TX queues, txconf is needed to handle the 3341176667Sjfv * possibility that things fail midcourse and we need to 3342176667Sjfv * undo memory gracefully 3343176667Sjfv */ 3344194865Sjfv for (int i = 0; i < adapter->num_queues; i++, txconf++) { 3345176667Sjfv /* Set up some basics */ 3346176667Sjfv txr = &adapter->tx_rings[i]; 3347176667Sjfv txr->adapter = adapter; 3348176667Sjfv txr->me = i; 3349176667Sjfv 3350176667Sjfv /* Initialize the TX lock */ 3351182416Sjfv snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)", 3352176667Sjfv device_get_nameunit(dev), txr->me); 3353182416Sjfv mtx_init(&txr->tx_mtx, txr->mtx_name, NULL, MTX_DEF); 3354176667Sjfv 3355176667Sjfv if (igb_dma_malloc(adapter, tsize, 3356176667Sjfv &txr->txdma, BUS_DMA_NOWAIT)) { 3357176667Sjfv device_printf(dev, 3358176667Sjfv "Unable to allocate TX Descriptor memory\n"); 3359176667Sjfv error = ENOMEM; 3360176667Sjfv goto err_tx_desc; 3361176667Sjfv } 3362176667Sjfv txr->tx_base = (struct e1000_tx_desc *)txr->txdma.dma_vaddr; 3363176667Sjfv bzero((void *)txr->tx_base, tsize); 3364176667Sjfv 3365176667Sjfv /* Now allocate transmit buffers for the ring */ 3366176667Sjfv if (igb_allocate_transmit_buffers(txr)) { 3367176667Sjfv device_printf(dev, 3368176667Sjfv "Critical Failure setting up transmit buffers\n"); 3369176667Sjfv error = ENOMEM; 3370176667Sjfv goto err_tx_desc; 3371176667Sjfv } 3372252899Sjfv#ifndef IGB_LEGACY_TX 3373194865Sjfv /* Allocate a buf ring */ 3374248292Sjfv txr->br = buf_ring_alloc(igb_buf_ring_size, M_DEVBUF, 3375194865Sjfv M_WAITOK, &txr->tx_mtx); 3376194865Sjfv#endif 3377176667Sjfv } 3378176667Sjfv 3379176667Sjfv /* 3380176667Sjfv * Next the RX queues... 3381176667Sjfv */ 3382176667Sjfv rsize = roundup2(adapter->num_rx_desc * 3383176667Sjfv sizeof(union e1000_adv_rx_desc), IGB_DBA_ALIGN); 3384194865Sjfv for (int i = 0; i < adapter->num_queues; i++, rxconf++) { 3385176667Sjfv rxr = &adapter->rx_rings[i]; 3386176667Sjfv rxr->adapter = adapter; 3387176667Sjfv rxr->me = i; 3388176667Sjfv 3389176667Sjfv /* Initialize the RX lock */ 3390182416Sjfv snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)", 3391176667Sjfv device_get_nameunit(dev), txr->me); 3392182416Sjfv mtx_init(&rxr->rx_mtx, rxr->mtx_name, NULL, MTX_DEF); 3393176667Sjfv 3394176667Sjfv if (igb_dma_malloc(adapter, rsize, 3395176667Sjfv &rxr->rxdma, BUS_DMA_NOWAIT)) { 3396176667Sjfv device_printf(dev, 3397176667Sjfv "Unable to allocate RxDescriptor memory\n"); 3398176667Sjfv error = ENOMEM; 3399176667Sjfv goto err_rx_desc; 3400176667Sjfv } 3401176667Sjfv rxr->rx_base = (union e1000_adv_rx_desc *)rxr->rxdma.dma_vaddr; 3402176667Sjfv bzero((void *)rxr->rx_base, rsize); 3403176667Sjfv 3404176667Sjfv /* Allocate receive buffers for the ring*/ 3405176667Sjfv if (igb_allocate_receive_buffers(rxr)) { 3406176667Sjfv device_printf(dev, 3407176667Sjfv "Critical Failure setting up receive buffers\n"); 3408176667Sjfv error = ENOMEM; 3409176667Sjfv goto err_rx_desc; 3410176667Sjfv } 3411176667Sjfv } 3412176667Sjfv 3413203049Sjfv /* 3414203049Sjfv ** Finally set up the queue holding structs 3415203049Sjfv */ 3416203049Sjfv for (int i = 0; i < adapter->num_queues; i++) { 3417203049Sjfv que = &adapter->queues[i]; 3418203049Sjfv que->adapter = adapter; 3419203049Sjfv que->txr = &adapter->tx_rings[i]; 3420203049Sjfv que->rxr = &adapter->rx_rings[i]; 3421203049Sjfv } 3422203049Sjfv 3423176667Sjfv return (0); 3424176667Sjfv 3425176667Sjfverr_rx_desc: 3426176667Sjfv for (rxr = adapter->rx_rings; rxconf > 0; rxr++, rxconf--) 3427176667Sjfv igb_dma_free(adapter, &rxr->rxdma); 3428176667Sjfverr_tx_desc: 3429176667Sjfv for (txr = adapter->tx_rings; txconf > 0; txr++, txconf--) 3430176667Sjfv igb_dma_free(adapter, &txr->txdma); 3431176667Sjfv free(adapter->rx_rings, M_DEVBUF); 3432176667Sjfvrx_fail: 3433252899Sjfv#ifndef IGB_LEGACY_TX 3434203049Sjfv buf_ring_free(txr->br, M_DEVBUF); 3435208103Sjfv#endif 3436176667Sjfv free(adapter->tx_rings, M_DEVBUF); 3437203049Sjfvtx_fail: 3438203049Sjfv free(adapter->queues, M_DEVBUF); 3439176667Sjfvfail: 3440176667Sjfv return (error); 3441176667Sjfv} 3442176667Sjfv 3443176667Sjfv/********************************************************************* 3444176667Sjfv * 3445176667Sjfv * Allocate memory for tx_buffer structures. The tx_buffer stores all 3446176667Sjfv * the information needed to transmit a packet on the wire. This is 3447176667Sjfv * called only once at attach, setup is done every reset. 3448176667Sjfv * 3449176667Sjfv **********************************************************************/ 3450176667Sjfvstatic int 3451176667Sjfvigb_allocate_transmit_buffers(struct tx_ring *txr) 3452176667Sjfv{ 3453176667Sjfv struct adapter *adapter = txr->adapter; 3454176667Sjfv device_t dev = adapter->dev; 3455190872Sjfv struct igb_tx_buffer *txbuf; 3456176667Sjfv int error, i; 3457176667Sjfv 3458176667Sjfv /* 3459176667Sjfv * Setup DMA descriptor areas. 3460176667Sjfv */ 3461203049Sjfv if ((error = bus_dma_tag_create(bus_get_dma_tag(dev), 3462199192Sjfv 1, 0, /* alignment, bounds */ 3463176667Sjfv BUS_SPACE_MAXADDR, /* lowaddr */ 3464176667Sjfv BUS_SPACE_MAXADDR, /* highaddr */ 3465176667Sjfv NULL, NULL, /* filter, filterarg */ 3466176667Sjfv IGB_TSO_SIZE, /* maxsize */ 3467176667Sjfv IGB_MAX_SCATTER, /* nsegments */ 3468203049Sjfv PAGE_SIZE, /* maxsegsize */ 3469176667Sjfv 0, /* flags */ 3470176667Sjfv NULL, /* lockfunc */ 3471176667Sjfv NULL, /* lockfuncarg */ 3472176667Sjfv &txr->txtag))) { 3473176667Sjfv device_printf(dev,"Unable to allocate TX DMA tag\n"); 3474176667Sjfv goto fail; 3475176667Sjfv } 3476176667Sjfv 3477176667Sjfv if (!(txr->tx_buffers = 3478190872Sjfv (struct igb_tx_buffer *) malloc(sizeof(struct igb_tx_buffer) * 3479176667Sjfv adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO))) { 3480176667Sjfv device_printf(dev, "Unable to allocate tx_buffer memory\n"); 3481176667Sjfv error = ENOMEM; 3482176667Sjfv goto fail; 3483176667Sjfv } 3484176667Sjfv 3485176667Sjfv /* Create the descriptor buffer dma maps */ 3486176667Sjfv txbuf = txr->tx_buffers; 3487176667Sjfv for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) { 3488176667Sjfv error = bus_dmamap_create(txr->txtag, 0, &txbuf->map); 3489176667Sjfv if (error != 0) { 3490176667Sjfv device_printf(dev, "Unable to create TX DMA map\n"); 3491176667Sjfv goto fail; 3492176667Sjfv } 3493176667Sjfv } 3494176667Sjfv 3495176667Sjfv return 0; 3496176667Sjfvfail: 3497176667Sjfv /* We free all, it handles case where we are in the middle */ 3498176667Sjfv igb_free_transmit_structures(adapter); 3499176667Sjfv return (error); 3500176667Sjfv} 3501176667Sjfv 3502176667Sjfv/********************************************************************* 3503176667Sjfv * 3504176667Sjfv * Initialize a transmit ring. 3505176667Sjfv * 3506176667Sjfv **********************************************************************/ 3507176667Sjfvstatic void 3508176667Sjfvigb_setup_transmit_ring(struct tx_ring *txr) 3509176667Sjfv{ 3510176667Sjfv struct adapter *adapter = txr->adapter; 3511190872Sjfv struct igb_tx_buffer *txbuf; 3512176667Sjfv int i; 3513235527Sjfv#ifdef DEV_NETMAP 3514235527Sjfv struct netmap_adapter *na = NA(adapter->ifp); 3515235527Sjfv struct netmap_slot *slot; 3516235527Sjfv#endif /* DEV_NETMAP */ 3517176667Sjfv 3518194865Sjfv /* Clear the old descriptor contents */ 3519203049Sjfv IGB_TX_LOCK(txr); 3520235527Sjfv#ifdef DEV_NETMAP 3521235527Sjfv slot = netmap_reset(na, NR_TX, txr->me, 0); 3522235527Sjfv#endif /* DEV_NETMAP */ 3523176667Sjfv bzero((void *)txr->tx_base, 3524176667Sjfv (sizeof(union e1000_adv_tx_desc)) * adapter->num_tx_desc); 3525176667Sjfv /* Reset indices */ 3526176667Sjfv txr->next_avail_desc = 0; 3527176667Sjfv txr->next_to_clean = 0; 3528176667Sjfv 3529176667Sjfv /* Free any existing tx buffers. */ 3530176667Sjfv txbuf = txr->tx_buffers; 3531176667Sjfv for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) { 3532176667Sjfv if (txbuf->m_head != NULL) { 3533176667Sjfv bus_dmamap_sync(txr->txtag, txbuf->map, 3534176667Sjfv BUS_DMASYNC_POSTWRITE); 3535176667Sjfv bus_dmamap_unload(txr->txtag, txbuf->map); 3536176667Sjfv m_freem(txbuf->m_head); 3537176667Sjfv txbuf->m_head = NULL; 3538176667Sjfv } 3539235527Sjfv#ifdef DEV_NETMAP 3540235527Sjfv if (slot) { 3541235527Sjfv int si = netmap_idx_n2k(&na->tx_rings[txr->me], i); 3542235527Sjfv /* no need to set the address */ 3543235527Sjfv netmap_load_map(txr->txtag, txbuf->map, NMB(slot + si)); 3544235527Sjfv } 3545235527Sjfv#endif /* DEV_NETMAP */ 3546176667Sjfv /* clear the watch index */ 3547176667Sjfv txbuf->next_eop = -1; 3548176667Sjfv } 3549176667Sjfv 3550176667Sjfv /* Set number of descriptors available */ 3551176667Sjfv txr->tx_avail = adapter->num_tx_desc; 3552176667Sjfv 3553176667Sjfv bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, 3554176667Sjfv BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 3555203049Sjfv IGB_TX_UNLOCK(txr); 3556176667Sjfv} 3557176667Sjfv 3558176667Sjfv/********************************************************************* 3559176667Sjfv * 3560176667Sjfv * Initialize all transmit rings. 3561176667Sjfv * 3562176667Sjfv **********************************************************************/ 3563176667Sjfvstatic void 3564176667Sjfvigb_setup_transmit_structures(struct adapter *adapter) 3565176667Sjfv{ 3566176667Sjfv struct tx_ring *txr = adapter->tx_rings; 3567176667Sjfv 3568194865Sjfv for (int i = 0; i < adapter->num_queues; i++, txr++) 3569176667Sjfv igb_setup_transmit_ring(txr); 3570176667Sjfv 3571176667Sjfv return; 3572176667Sjfv} 3573176667Sjfv 3574176667Sjfv/********************************************************************* 3575176667Sjfv * 3576176667Sjfv * Enable transmit unit. 3577176667Sjfv * 3578176667Sjfv **********************************************************************/ 3579176667Sjfvstatic void 3580176667Sjfvigb_initialize_transmit_units(struct adapter *adapter) 3581176667Sjfv{ 3582176667Sjfv struct tx_ring *txr = adapter->tx_rings; 3583203049Sjfv struct e1000_hw *hw = &adapter->hw; 3584182416Sjfv u32 tctl, txdctl; 3585176667Sjfv 3586209611Sjfv INIT_DEBUGOUT("igb_initialize_transmit_units: begin"); 3587209611Sjfv tctl = txdctl = 0; 3588176667Sjfv 3589200243Sjfv /* Setup the Tx Descriptor Rings */ 3590194865Sjfv for (int i = 0; i < adapter->num_queues; i++, txr++) { 3591176667Sjfv u64 bus_addr = txr->txdma.dma_paddr; 3592176667Sjfv 3593203049Sjfv E1000_WRITE_REG(hw, E1000_TDLEN(i), 3594176667Sjfv adapter->num_tx_desc * sizeof(struct e1000_tx_desc)); 3595203049Sjfv E1000_WRITE_REG(hw, E1000_TDBAH(i), 3596176667Sjfv (uint32_t)(bus_addr >> 32)); 3597203049Sjfv E1000_WRITE_REG(hw, E1000_TDBAL(i), 3598176667Sjfv (uint32_t)bus_addr); 3599176667Sjfv 3600176667Sjfv /* Setup the HW Tx Head and Tail descriptor pointers */ 3601203049Sjfv E1000_WRITE_REG(hw, E1000_TDT(i), 0); 3602203049Sjfv E1000_WRITE_REG(hw, E1000_TDH(i), 0); 3603176667Sjfv 3604176667Sjfv HW_DEBUGOUT2("Base = %x, Length = %x\n", 3605203049Sjfv E1000_READ_REG(hw, E1000_TDBAL(i)), 3606203049Sjfv E1000_READ_REG(hw, E1000_TDLEN(i))); 3607176667Sjfv 3608215781Sjfv txr->queue_status = IGB_QUEUE_IDLE; 3609176667Sjfv 3610203049Sjfv txdctl |= IGB_TX_PTHRESH; 3611203049Sjfv txdctl |= IGB_TX_HTHRESH << 8; 3612203049Sjfv txdctl |= IGB_TX_WTHRESH << 16; 3613176667Sjfv txdctl |= E1000_TXDCTL_QUEUE_ENABLE; 3614203049Sjfv E1000_WRITE_REG(hw, E1000_TXDCTL(i), txdctl); 3615176667Sjfv } 3616176667Sjfv 3617218530Sjfv if (adapter->vf_ifp) 3618209611Sjfv return; 3619209611Sjfv 3620215781Sjfv e1000_config_collision_dist(hw); 3621215781Sjfv 3622176667Sjfv /* Program the Transmit Control Register */ 3623203049Sjfv tctl = E1000_READ_REG(hw, E1000_TCTL); 3624176667Sjfv tctl &= ~E1000_TCTL_CT; 3625176667Sjfv tctl |= (E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN | 3626176667Sjfv (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT)); 3627176667Sjfv 3628176667Sjfv /* This write will effectively turn on the transmit unit. */ 3629203049Sjfv E1000_WRITE_REG(hw, E1000_TCTL, tctl); 3630176667Sjfv} 3631176667Sjfv 3632176667Sjfv/********************************************************************* 3633176667Sjfv * 3634176667Sjfv * Free all transmit rings. 3635176667Sjfv * 3636176667Sjfv **********************************************************************/ 3637176667Sjfvstatic void 3638176667Sjfvigb_free_transmit_structures(struct adapter *adapter) 3639176667Sjfv{ 3640176667Sjfv struct tx_ring *txr = adapter->tx_rings; 3641176667Sjfv 3642194865Sjfv for (int i = 0; i < adapter->num_queues; i++, txr++) { 3643176667Sjfv IGB_TX_LOCK(txr); 3644176667Sjfv igb_free_transmit_buffers(txr); 3645176667Sjfv igb_dma_free(adapter, &txr->txdma); 3646176667Sjfv IGB_TX_UNLOCK(txr); 3647176667Sjfv IGB_TX_LOCK_DESTROY(txr); 3648176667Sjfv } 3649176667Sjfv free(adapter->tx_rings, M_DEVBUF); 3650176667Sjfv} 3651176667Sjfv 3652176667Sjfv/********************************************************************* 3653176667Sjfv * 3654176667Sjfv * Free transmit ring related data structures. 3655176667Sjfv * 3656176667Sjfv **********************************************************************/ 3657176667Sjfvstatic void 3658176667Sjfvigb_free_transmit_buffers(struct tx_ring *txr) 3659176667Sjfv{ 3660176667Sjfv struct adapter *adapter = txr->adapter; 3661190872Sjfv struct igb_tx_buffer *tx_buffer; 3662176667Sjfv int i; 3663176667Sjfv 3664176667Sjfv INIT_DEBUGOUT("free_transmit_ring: begin"); 3665176667Sjfv 3666176667Sjfv if (txr->tx_buffers == NULL) 3667176667Sjfv return; 3668176667Sjfv 3669176667Sjfv tx_buffer = txr->tx_buffers; 3670176667Sjfv for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) { 3671176667Sjfv if (tx_buffer->m_head != NULL) { 3672176667Sjfv bus_dmamap_sync(txr->txtag, tx_buffer->map, 3673176667Sjfv BUS_DMASYNC_POSTWRITE); 3674176667Sjfv bus_dmamap_unload(txr->txtag, 3675176667Sjfv tx_buffer->map); 3676176667Sjfv m_freem(tx_buffer->m_head); 3677176667Sjfv tx_buffer->m_head = NULL; 3678176667Sjfv if (tx_buffer->map != NULL) { 3679176667Sjfv bus_dmamap_destroy(txr->txtag, 3680176667Sjfv tx_buffer->map); 3681176667Sjfv tx_buffer->map = NULL; 3682176667Sjfv } 3683176667Sjfv } else if (tx_buffer->map != NULL) { 3684176667Sjfv bus_dmamap_unload(txr->txtag, 3685176667Sjfv tx_buffer->map); 3686176667Sjfv bus_dmamap_destroy(txr->txtag, 3687176667Sjfv tx_buffer->map); 3688176667Sjfv tx_buffer->map = NULL; 3689176667Sjfv } 3690176667Sjfv } 3691252899Sjfv#ifndef IGB_LEGACY_TX 3692195851Sjfv if (txr->br != NULL) 3693195851Sjfv buf_ring_free(txr->br, M_DEVBUF); 3694194865Sjfv#endif 3695176667Sjfv if (txr->tx_buffers != NULL) { 3696176667Sjfv free(txr->tx_buffers, M_DEVBUF); 3697176667Sjfv txr->tx_buffers = NULL; 3698176667Sjfv } 3699176667Sjfv if (txr->txtag != NULL) { 3700176667Sjfv bus_dma_tag_destroy(txr->txtag); 3701176667Sjfv txr->txtag = NULL; 3702176667Sjfv } 3703176667Sjfv return; 3704176667Sjfv} 3705176667Sjfv 3706176667Sjfv/********************************************************************** 3707176667Sjfv * 3708203049Sjfv * Setup work for hardware segmentation offload (TSO) 3709176667Sjfv * 3710176667Sjfv **********************************************************************/ 3711229145Smdfstatic bool 3712235527Sjfvigb_tso_setup(struct tx_ring *txr, struct mbuf *mp, int ehdrlen, 3713235527Sjfv struct ip *ip, struct tcphdr *th) 3714176667Sjfv{ 3715176667Sjfv struct adapter *adapter = txr->adapter; 3716176667Sjfv struct e1000_adv_tx_context_desc *TXD; 3717190872Sjfv struct igb_tx_buffer *tx_buffer; 3718176667Sjfv u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0; 3719176667Sjfv u32 mss_l4len_idx = 0; 3720176667Sjfv u16 vtag = 0; 3721235527Sjfv int ctxd, ip_hlen, tcp_hlen; 3722176667Sjfv 3723176667Sjfv ctxd = txr->next_avail_desc; 3724176667Sjfv tx_buffer = &txr->tx_buffers[ctxd]; 3725176667Sjfv TXD = (struct e1000_adv_tx_context_desc *) &txr->tx_base[ctxd]; 3726176667Sjfv 3727176667Sjfv ip->ip_sum = 0; 3728176667Sjfv ip_hlen = ip->ip_hl << 2; 3729176667Sjfv tcp_hlen = th->th_off << 2; 3730176667Sjfv 3731176667Sjfv /* VLAN MACLEN IPLEN */ 3732176667Sjfv if (mp->m_flags & M_VLANTAG) { 3733176667Sjfv vtag = htole16(mp->m_pkthdr.ether_vtag); 3734176667Sjfv vlan_macip_lens |= (vtag << E1000_ADVTXD_VLAN_SHIFT); 3735176667Sjfv } 3736176667Sjfv 3737176667Sjfv vlan_macip_lens |= (ehdrlen << E1000_ADVTXD_MACLEN_SHIFT); 3738176667Sjfv vlan_macip_lens |= ip_hlen; 3739176667Sjfv TXD->vlan_macip_lens |= htole32(vlan_macip_lens); 3740176667Sjfv 3741176667Sjfv /* ADV DTYPE TUCMD */ 3742176667Sjfv type_tucmd_mlhl |= E1000_ADVTXD_DCMD_DEXT | E1000_ADVTXD_DTYP_CTXT; 3743176667Sjfv type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_TCP; 3744176667Sjfv type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_IPV4; 3745176667Sjfv TXD->type_tucmd_mlhl |= htole32(type_tucmd_mlhl); 3746176667Sjfv 3747176667Sjfv /* MSS L4LEN IDX */ 3748176667Sjfv mss_l4len_idx |= (mp->m_pkthdr.tso_segsz << E1000_ADVTXD_MSS_SHIFT); 3749176667Sjfv mss_l4len_idx |= (tcp_hlen << E1000_ADVTXD_L4LEN_SHIFT); 3750200243Sjfv /* 82575 needs the queue index added */ 3751200243Sjfv if (adapter->hw.mac.type == e1000_82575) 3752200243Sjfv mss_l4len_idx |= txr->me << 4; 3753176667Sjfv TXD->mss_l4len_idx = htole32(mss_l4len_idx); 3754176667Sjfv 3755176667Sjfv TXD->seqnum_seed = htole32(0); 3756176667Sjfv tx_buffer->m_head = NULL; 3757176667Sjfv tx_buffer->next_eop = -1; 3758176667Sjfv 3759176667Sjfv if (++ctxd == adapter->num_tx_desc) 3760176667Sjfv ctxd = 0; 3761176667Sjfv 3762176667Sjfv txr->tx_avail--; 3763176667Sjfv txr->next_avail_desc = ctxd; 3764176667Sjfv return TRUE; 3765176667Sjfv} 3766176667Sjfv 3767194865Sjfv 3768176667Sjfv/********************************************************************* 3769176667Sjfv * 3770176667Sjfv * Context Descriptor setup for VLAN or CSUM 3771176667Sjfv * 3772176667Sjfv **********************************************************************/ 3773176667Sjfv 3774194865Sjfvstatic bool 3775176667Sjfvigb_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp) 3776176667Sjfv{ 3777176667Sjfv struct adapter *adapter = txr->adapter; 3778176667Sjfv struct e1000_adv_tx_context_desc *TXD; 3779190872Sjfv struct igb_tx_buffer *tx_buffer; 3780200243Sjfv u32 vlan_macip_lens, type_tucmd_mlhl, mss_l4len_idx; 3781176667Sjfv struct ether_vlan_header *eh; 3782176667Sjfv struct ip *ip = NULL; 3783176667Sjfv struct ip6_hdr *ip6; 3784194865Sjfv int ehdrlen, ctxd, ip_hlen = 0; 3785194865Sjfv u16 etype, vtag = 0; 3786176667Sjfv u8 ipproto = 0; 3787194865Sjfv bool offload = TRUE; 3788176667Sjfv 3789194865Sjfv if ((mp->m_pkthdr.csum_flags & CSUM_OFFLOAD) == 0) 3790194865Sjfv offload = FALSE; 3791194865Sjfv 3792200243Sjfv vlan_macip_lens = type_tucmd_mlhl = mss_l4len_idx = 0; 3793194865Sjfv ctxd = txr->next_avail_desc; 3794176667Sjfv tx_buffer = &txr->tx_buffers[ctxd]; 3795176667Sjfv TXD = (struct e1000_adv_tx_context_desc *) &txr->tx_base[ctxd]; 3796176667Sjfv 3797176667Sjfv /* 3798176667Sjfv ** In advanced descriptors the vlan tag must 3799194865Sjfv ** be placed into the context descriptor, thus 3800194865Sjfv ** we need to be here just for that setup. 3801176667Sjfv */ 3802176667Sjfv if (mp->m_flags & M_VLANTAG) { 3803176667Sjfv vtag = htole16(mp->m_pkthdr.ether_vtag); 3804176667Sjfv vlan_macip_lens |= (vtag << E1000_ADVTXD_VLAN_SHIFT); 3805194865Sjfv } else if (offload == FALSE) 3806194865Sjfv return FALSE; 3807194865Sjfv 3808176667Sjfv /* 3809176667Sjfv * Determine where frame payload starts. 3810176667Sjfv * Jump over vlan headers if already present, 3811176667Sjfv * helpful for QinQ too. 3812176667Sjfv */ 3813176667Sjfv eh = mtod(mp, struct ether_vlan_header *); 3814176667Sjfv if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { 3815176667Sjfv etype = ntohs(eh->evl_proto); 3816176667Sjfv ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 3817176667Sjfv } else { 3818176667Sjfv etype = ntohs(eh->evl_encap_proto); 3819176667Sjfv ehdrlen = ETHER_HDR_LEN; 3820176667Sjfv } 3821176667Sjfv 3822176667Sjfv /* Set the ether header length */ 3823176667Sjfv vlan_macip_lens |= ehdrlen << E1000_ADVTXD_MACLEN_SHIFT; 3824176667Sjfv 3825176667Sjfv switch (etype) { 3826176667Sjfv case ETHERTYPE_IP: 3827176667Sjfv ip = (struct ip *)(mp->m_data + ehdrlen); 3828176667Sjfv ip_hlen = ip->ip_hl << 2; 3829194865Sjfv if (mp->m_len < ehdrlen + ip_hlen) { 3830194865Sjfv offload = FALSE; 3831194865Sjfv break; 3832194865Sjfv } 3833176667Sjfv ipproto = ip->ip_p; 3834176667Sjfv type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_IPV4; 3835176667Sjfv break; 3836176667Sjfv case ETHERTYPE_IPV6: 3837176667Sjfv ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen); 3838176667Sjfv ip_hlen = sizeof(struct ip6_hdr); 3839176667Sjfv ipproto = ip6->ip6_nxt; 3840176667Sjfv type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_IPV6; 3841176667Sjfv break; 3842176667Sjfv default: 3843194865Sjfv offload = FALSE; 3844194865Sjfv break; 3845176667Sjfv } 3846176667Sjfv 3847176667Sjfv vlan_macip_lens |= ip_hlen; 3848176667Sjfv type_tucmd_mlhl |= E1000_ADVTXD_DCMD_DEXT | E1000_ADVTXD_DTYP_CTXT; 3849176667Sjfv 3850176667Sjfv switch (ipproto) { 3851176667Sjfv case IPPROTO_TCP: 3852194865Sjfv if (mp->m_pkthdr.csum_flags & CSUM_TCP) 3853176667Sjfv type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_TCP; 3854176667Sjfv break; 3855176667Sjfv case IPPROTO_UDP: 3856194865Sjfv if (mp->m_pkthdr.csum_flags & CSUM_UDP) 3857176667Sjfv type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_UDP; 3858176667Sjfv break; 3859190872Sjfv#if __FreeBSD_version >= 800000 3860190872Sjfv case IPPROTO_SCTP: 3861194865Sjfv if (mp->m_pkthdr.csum_flags & CSUM_SCTP) 3862190872Sjfv type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_SCTP; 3863190872Sjfv break; 3864190872Sjfv#endif 3865176667Sjfv default: 3866194865Sjfv offload = FALSE; 3867194865Sjfv break; 3868176667Sjfv } 3869176667Sjfv 3870200243Sjfv /* 82575 needs the queue index added */ 3871200243Sjfv if (adapter->hw.mac.type == e1000_82575) 3872200243Sjfv mss_l4len_idx = txr->me << 4; 3873200243Sjfv 3874176667Sjfv /* Now copy bits into descriptor */ 3875176667Sjfv TXD->vlan_macip_lens |= htole32(vlan_macip_lens); 3876176667Sjfv TXD->type_tucmd_mlhl |= htole32(type_tucmd_mlhl); 3877176667Sjfv TXD->seqnum_seed = htole32(0); 3878200243Sjfv TXD->mss_l4len_idx = htole32(mss_l4len_idx); 3879176667Sjfv 3880176667Sjfv tx_buffer->m_head = NULL; 3881176667Sjfv tx_buffer->next_eop = -1; 3882176667Sjfv 3883176667Sjfv /* We've consumed the first desc, adjust counters */ 3884176667Sjfv if (++ctxd == adapter->num_tx_desc) 3885176667Sjfv ctxd = 0; 3886176667Sjfv txr->next_avail_desc = ctxd; 3887176667Sjfv --txr->tx_avail; 3888176667Sjfv 3889194865Sjfv return (offload); 3890176667Sjfv} 3891176667Sjfv 3892176667Sjfv 3893176667Sjfv/********************************************************************** 3894176667Sjfv * 3895176667Sjfv * Examine each tx_buffer in the used queue. If the hardware is done 3896176667Sjfv * processing the packet then free associated resources. The 3897176667Sjfv * tx_buffer is put back on the free queue. 3898176667Sjfv * 3899176667Sjfv * TRUE return means there's work in the ring to clean, FALSE its empty. 3900176667Sjfv **********************************************************************/ 3901176667Sjfvstatic bool 3902176667Sjfvigb_txeof(struct tx_ring *txr) 3903176667Sjfv{ 3904176667Sjfv struct adapter *adapter = txr->adapter; 3905215781Sjfv int first, last, done, processed; 3906190872Sjfv struct igb_tx_buffer *tx_buffer; 3907176667Sjfv struct e1000_tx_desc *tx_desc, *eop_desc; 3908176667Sjfv struct ifnet *ifp = adapter->ifp; 3909176667Sjfv 3910176667Sjfv IGB_TX_LOCK_ASSERT(txr); 3911176667Sjfv 3912235527Sjfv#ifdef DEV_NETMAP 3913257768Sluigi if (netmap_tx_irq(ifp, txr->me )) 3914250458Sluigi return (FALSE); 3915235527Sjfv#endif /* DEV_NETMAP */ 3916215781Sjfv if (txr->tx_avail == adapter->num_tx_desc) { 3917215781Sjfv txr->queue_status = IGB_QUEUE_IDLE; 3918176667Sjfv return FALSE; 3919215781Sjfv } 3920176667Sjfv 3921215781Sjfv processed = 0; 3922176667Sjfv first = txr->next_to_clean; 3923176667Sjfv tx_desc = &txr->tx_base[first]; 3924176667Sjfv tx_buffer = &txr->tx_buffers[first]; 3925176667Sjfv last = tx_buffer->next_eop; 3926176667Sjfv eop_desc = &txr->tx_base[last]; 3927176667Sjfv 3928176667Sjfv /* 3929176667Sjfv * What this does is get the index of the 3930176667Sjfv * first descriptor AFTER the EOP of the 3931176667Sjfv * first packet, that way we can do the 3932176667Sjfv * simple comparison on the inner while loop. 3933176667Sjfv */ 3934176667Sjfv if (++last == adapter->num_tx_desc) 3935176667Sjfv last = 0; 3936176667Sjfv done = last; 3937176667Sjfv 3938176667Sjfv bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, 3939203049Sjfv BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 3940176667Sjfv 3941176667Sjfv while (eop_desc->upper.fields.status & E1000_TXD_STAT_DD) { 3942176667Sjfv /* We clean the range of the packet */ 3943176667Sjfv while (first != done) { 3944176667Sjfv tx_desc->upper.data = 0; 3945176667Sjfv tx_desc->lower.data = 0; 3946176667Sjfv tx_desc->buffer_addr = 0; 3947200243Sjfv ++txr->tx_avail; 3948215781Sjfv ++processed; 3949176667Sjfv 3950176667Sjfv if (tx_buffer->m_head) { 3951203049Sjfv txr->bytes += 3952203049Sjfv tx_buffer->m_head->m_pkthdr.len; 3953176667Sjfv bus_dmamap_sync(txr->txtag, 3954176667Sjfv tx_buffer->map, 3955176667Sjfv BUS_DMASYNC_POSTWRITE); 3956176667Sjfv bus_dmamap_unload(txr->txtag, 3957176667Sjfv tx_buffer->map); 3958176667Sjfv 3959176667Sjfv m_freem(tx_buffer->m_head); 3960176667Sjfv tx_buffer->m_head = NULL; 3961176667Sjfv } 3962176667Sjfv tx_buffer->next_eop = -1; 3963200243Sjfv txr->watchdog_time = ticks; 3964176667Sjfv 3965176667Sjfv if (++first == adapter->num_tx_desc) 3966176667Sjfv first = 0; 3967176667Sjfv 3968176667Sjfv tx_buffer = &txr->tx_buffers[first]; 3969176667Sjfv tx_desc = &txr->tx_base[first]; 3970176667Sjfv } 3971203049Sjfv ++txr->packets; 3972203049Sjfv ++ifp->if_opackets; 3973176667Sjfv /* See if we can continue to the next packet */ 3974176667Sjfv last = tx_buffer->next_eop; 3975176667Sjfv if (last != -1) { 3976176667Sjfv eop_desc = &txr->tx_base[last]; 3977176667Sjfv /* Get new done point */ 3978176667Sjfv if (++last == adapter->num_tx_desc) last = 0; 3979176667Sjfv done = last; 3980176667Sjfv } else 3981176667Sjfv break; 3982176667Sjfv } 3983176667Sjfv bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, 3984176667Sjfv BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 3985176667Sjfv 3986176667Sjfv txr->next_to_clean = first; 3987176667Sjfv 3988215781Sjfv /* 3989215781Sjfv ** Watchdog calculation, we know there's 3990215781Sjfv ** work outstanding or the first return 3991215781Sjfv ** would have been taken, so none processed 3992215781Sjfv ** for too long indicates a hang. 3993215781Sjfv */ 3994215781Sjfv if ((!processed) && ((ticks - txr->watchdog_time) > IGB_WATCHDOG)) 3995235527Sjfv txr->queue_status |= IGB_QUEUE_HUNG; 3996176667Sjfv /* 3997235527Sjfv * If we have a minimum free, 3998235527Sjfv * clear depleted state bit 3999176667Sjfv */ 4000235527Sjfv if (txr->tx_avail >= IGB_QUEUE_THRESHOLD) 4001235527Sjfv txr->queue_status &= ~IGB_QUEUE_DEPLETED; 4002235527Sjfv 4003235527Sjfv /* All clean, turn off the watchdog */ 4004235527Sjfv if (txr->tx_avail == adapter->num_tx_desc) { 4005235527Sjfv txr->queue_status = IGB_QUEUE_IDLE; 4006235527Sjfv return (FALSE); 4007220375Sjfv } 4008235527Sjfv 4009203049Sjfv return (TRUE); 4010176667Sjfv} 4011176667Sjfv 4012176667Sjfv/********************************************************************* 4013176667Sjfv * 4014205869Sjfv * Refresh mbuf buffers for RX descriptor rings 4015205869Sjfv * - now keeps its own state so discards due to resource 4016205869Sjfv * exhaustion are unnecessary, if an mbuf cannot be obtained 4017205869Sjfv * it just returns, keeping its placeholder, thus it can simply 4018205869Sjfv * be recalled to try again. 4019176667Sjfv * 4020176667Sjfv **********************************************************************/ 4021205869Sjfvstatic void 4022205869Sjfvigb_refresh_mbufs(struct rx_ring *rxr, int limit) 4023176667Sjfv{ 4024176667Sjfv struct adapter *adapter = rxr->adapter; 4025205869Sjfv bus_dma_segment_t hseg[1]; 4026205869Sjfv bus_dma_segment_t pseg[1]; 4027200243Sjfv struct igb_rx_buf *rxbuf; 4028190872Sjfv struct mbuf *mh, *mp; 4029219753Sjfv int i, j, nsegs, error; 4030219753Sjfv bool refreshed = FALSE; 4031176667Sjfv 4032219753Sjfv i = j = rxr->next_to_refresh; 4033219753Sjfv /* 4034219753Sjfv ** Get one descriptor beyond 4035219753Sjfv ** our work mark to control 4036219753Sjfv ** the loop. 4037219753Sjfv */ 4038219753Sjfv if (++j == adapter->num_rx_desc) 4039219753Sjfv j = 0; 4040219753Sjfv 4041219753Sjfv while (j != limit) { 4042205869Sjfv rxbuf = &rxr->rx_buffers[i]; 4043215781Sjfv /* No hdr mbuf used with header split off */ 4044215781Sjfv if (rxr->hdr_split == FALSE) 4045215781Sjfv goto no_split; 4046215781Sjfv if (rxbuf->m_head == NULL) { 4047248078Smarius mh = m_gethdr(M_NOWAIT, MT_DATA); 4048219753Sjfv if (mh == NULL) 4049205869Sjfv goto update; 4050215781Sjfv } else 4051215781Sjfv mh = rxbuf->m_head; 4052215781Sjfv 4053215781Sjfv mh->m_pkthdr.len = mh->m_len = MHLEN; 4054215781Sjfv mh->m_len = MHLEN; 4055215781Sjfv mh->m_flags |= M_PKTHDR; 4056215781Sjfv /* Get the memory mapping */ 4057215781Sjfv error = bus_dmamap_load_mbuf_sg(rxr->htag, 4058215781Sjfv rxbuf->hmap, mh, hseg, &nsegs, BUS_DMA_NOWAIT); 4059215781Sjfv if (error != 0) { 4060215781Sjfv printf("Refresh mbufs: hdr dmamap load" 4061215781Sjfv " failure - %d\n", error); 4062215781Sjfv m_free(mh); 4063215781Sjfv rxbuf->m_head = NULL; 4064215781Sjfv goto update; 4065203049Sjfv } 4066215781Sjfv rxbuf->m_head = mh; 4067215781Sjfv bus_dmamap_sync(rxr->htag, rxbuf->hmap, 4068215781Sjfv BUS_DMASYNC_PREREAD); 4069215781Sjfv rxr->rx_base[i].read.hdr_addr = 4070215781Sjfv htole64(hseg[0].ds_addr); 4071215781Sjfvno_split: 4072205869Sjfv if (rxbuf->m_pack == NULL) { 4073248078Smarius mp = m_getjcl(M_NOWAIT, MT_DATA, 4074205869Sjfv M_PKTHDR, adapter->rx_mbuf_sz); 4075219753Sjfv if (mp == NULL) 4076205869Sjfv goto update; 4077215781Sjfv } else 4078215781Sjfv mp = rxbuf->m_pack; 4079215781Sjfv 4080215781Sjfv mp->m_pkthdr.len = mp->m_len = adapter->rx_mbuf_sz; 4081215781Sjfv /* Get the memory mapping */ 4082215781Sjfv error = bus_dmamap_load_mbuf_sg(rxr->ptag, 4083215781Sjfv rxbuf->pmap, mp, pseg, &nsegs, BUS_DMA_NOWAIT); 4084215781Sjfv if (error != 0) { 4085215781Sjfv printf("Refresh mbufs: payload dmamap load" 4086215781Sjfv " failure - %d\n", error); 4087215781Sjfv m_free(mp); 4088215781Sjfv rxbuf->m_pack = NULL; 4089215781Sjfv goto update; 4090203049Sjfv } 4091215781Sjfv rxbuf->m_pack = mp; 4092215781Sjfv bus_dmamap_sync(rxr->ptag, rxbuf->pmap, 4093215781Sjfv BUS_DMASYNC_PREREAD); 4094215781Sjfv rxr->rx_base[i].read.pkt_addr = 4095215781Sjfv htole64(pseg[0].ds_addr); 4096219753Sjfv refreshed = TRUE; /* I feel wefreshed :) */ 4097190872Sjfv 4098219753Sjfv i = j; /* our next is precalculated */ 4099205869Sjfv rxr->next_to_refresh = i; 4100219753Sjfv if (++j == adapter->num_rx_desc) 4101219753Sjfv j = 0; 4102203049Sjfv } 4103205869Sjfvupdate: 4104219753Sjfv if (refreshed) /* update tail */ 4105205869Sjfv E1000_WRITE_REG(&adapter->hw, 4106219753Sjfv E1000_RDT(rxr->me), rxr->next_to_refresh); 4107205869Sjfv return; 4108205869Sjfv} 4109176667Sjfv 4110200243Sjfv 4111176667Sjfv/********************************************************************* 4112176667Sjfv * 4113176667Sjfv * Allocate memory for rx_buffer structures. Since we use one 4114176667Sjfv * rx_buffer per received packet, the maximum number of rx_buffer's 4115176667Sjfv * that we'll need is equal to the number of receive descriptors 4116176667Sjfv * that we've allocated. 4117176667Sjfv * 4118176667Sjfv **********************************************************************/ 4119176667Sjfvstatic int 4120176667Sjfvigb_allocate_receive_buffers(struct rx_ring *rxr) 4121176667Sjfv{ 4122176667Sjfv struct adapter *adapter = rxr->adapter; 4123176667Sjfv device_t dev = adapter->dev; 4124200243Sjfv struct igb_rx_buf *rxbuf; 4125176667Sjfv int i, bsize, error; 4126176667Sjfv 4127200243Sjfv bsize = sizeof(struct igb_rx_buf) * adapter->num_rx_desc; 4128176667Sjfv if (!(rxr->rx_buffers = 4129200243Sjfv (struct igb_rx_buf *) malloc(bsize, 4130176667Sjfv M_DEVBUF, M_NOWAIT | M_ZERO))) { 4131176667Sjfv device_printf(dev, "Unable to allocate rx_buffer memory\n"); 4132176667Sjfv error = ENOMEM; 4133176667Sjfv goto fail; 4134176667Sjfv } 4135176667Sjfv 4136203049Sjfv if ((error = bus_dma_tag_create(bus_get_dma_tag(dev), 4137199192Sjfv 1, 0, /* alignment, bounds */ 4138176667Sjfv BUS_SPACE_MAXADDR, /* lowaddr */ 4139176667Sjfv BUS_SPACE_MAXADDR, /* highaddr */ 4140176667Sjfv NULL, NULL, /* filter, filterarg */ 4141203049Sjfv MSIZE, /* maxsize */ 4142203049Sjfv 1, /* nsegments */ 4143203049Sjfv MSIZE, /* maxsegsize */ 4144176667Sjfv 0, /* flags */ 4145176667Sjfv NULL, /* lockfunc */ 4146176667Sjfv NULL, /* lockfuncarg */ 4147205869Sjfv &rxr->htag))) { 4148190872Sjfv device_printf(dev, "Unable to create RX DMA tag\n"); 4149176667Sjfv goto fail; 4150176667Sjfv } 4151176667Sjfv 4152203049Sjfv if ((error = bus_dma_tag_create(bus_get_dma_tag(dev), 4153203049Sjfv 1, 0, /* alignment, bounds */ 4154203049Sjfv BUS_SPACE_MAXADDR, /* lowaddr */ 4155203049Sjfv BUS_SPACE_MAXADDR, /* highaddr */ 4156203049Sjfv NULL, NULL, /* filter, filterarg */ 4157215781Sjfv MJUM9BYTES, /* maxsize */ 4158203049Sjfv 1, /* nsegments */ 4159215781Sjfv MJUM9BYTES, /* maxsegsize */ 4160203049Sjfv 0, /* flags */ 4161203049Sjfv NULL, /* lockfunc */ 4162203049Sjfv NULL, /* lockfuncarg */ 4163205869Sjfv &rxr->ptag))) { 4164203049Sjfv device_printf(dev, "Unable to create RX payload DMA tag\n"); 4165203049Sjfv goto fail; 4166203049Sjfv } 4167203049Sjfv 4168203049Sjfv for (i = 0; i < adapter->num_rx_desc; i++) { 4169176667Sjfv rxbuf = &rxr->rx_buffers[i]; 4170205869Sjfv error = bus_dmamap_create(rxr->htag, 4171205869Sjfv BUS_DMA_NOWAIT, &rxbuf->hmap); 4172176667Sjfv if (error) { 4173203049Sjfv device_printf(dev, 4174203049Sjfv "Unable to create RX head DMA maps\n"); 4175176667Sjfv goto fail; 4176176667Sjfv } 4177205869Sjfv error = bus_dmamap_create(rxr->ptag, 4178205869Sjfv BUS_DMA_NOWAIT, &rxbuf->pmap); 4179203049Sjfv if (error) { 4180203049Sjfv device_printf(dev, 4181203049Sjfv "Unable to create RX packet DMA maps\n"); 4182203049Sjfv goto fail; 4183203049Sjfv } 4184176667Sjfv } 4185176667Sjfv 4186176667Sjfv return (0); 4187176667Sjfv 4188176667Sjfvfail: 4189176667Sjfv /* Frees all, but can handle partial completion */ 4190176667Sjfv igb_free_receive_structures(adapter); 4191176667Sjfv return (error); 4192176667Sjfv} 4193176667Sjfv 4194203049Sjfv 4195203049Sjfvstatic void 4196203049Sjfvigb_free_receive_ring(struct rx_ring *rxr) 4197203049Sjfv{ 4198220375Sjfv struct adapter *adapter = rxr->adapter; 4199203049Sjfv struct igb_rx_buf *rxbuf; 4200203049Sjfv 4201220375Sjfv 4202220375Sjfv for (int i = 0; i < adapter->num_rx_desc; i++) { 4203203049Sjfv rxbuf = &rxr->rx_buffers[i]; 4204203049Sjfv if (rxbuf->m_head != NULL) { 4205205869Sjfv bus_dmamap_sync(rxr->htag, rxbuf->hmap, 4206203049Sjfv BUS_DMASYNC_POSTREAD); 4207205869Sjfv bus_dmamap_unload(rxr->htag, rxbuf->hmap); 4208203049Sjfv rxbuf->m_head->m_flags |= M_PKTHDR; 4209203049Sjfv m_freem(rxbuf->m_head); 4210203049Sjfv } 4211203049Sjfv if (rxbuf->m_pack != NULL) { 4212205869Sjfv bus_dmamap_sync(rxr->ptag, rxbuf->pmap, 4213203049Sjfv BUS_DMASYNC_POSTREAD); 4214205869Sjfv bus_dmamap_unload(rxr->ptag, rxbuf->pmap); 4215203049Sjfv rxbuf->m_pack->m_flags |= M_PKTHDR; 4216203049Sjfv m_freem(rxbuf->m_pack); 4217203049Sjfv } 4218203049Sjfv rxbuf->m_head = NULL; 4219203049Sjfv rxbuf->m_pack = NULL; 4220203049Sjfv } 4221203049Sjfv} 4222203049Sjfv 4223203049Sjfv 4224176667Sjfv/********************************************************************* 4225176667Sjfv * 4226176667Sjfv * Initialize a receive ring and its buffers. 4227176667Sjfv * 4228176667Sjfv **********************************************************************/ 4229176667Sjfvstatic int 4230176667Sjfvigb_setup_receive_ring(struct rx_ring *rxr) 4231176667Sjfv{ 4232181027Sjfv struct adapter *adapter; 4233194865Sjfv struct ifnet *ifp; 4234181027Sjfv device_t dev; 4235205869Sjfv struct igb_rx_buf *rxbuf; 4236205869Sjfv bus_dma_segment_t pseg[1], hseg[1]; 4237181027Sjfv struct lro_ctrl *lro = &rxr->lro; 4238220375Sjfv int rsize, nsegs, error = 0; 4239235527Sjfv#ifdef DEV_NETMAP 4240235527Sjfv struct netmap_adapter *na = NA(rxr->adapter->ifp); 4241235527Sjfv struct netmap_slot *slot; 4242235527Sjfv#endif /* DEV_NETMAP */ 4243176667Sjfv 4244176667Sjfv adapter = rxr->adapter; 4245181027Sjfv dev = adapter->dev; 4246194865Sjfv ifp = adapter->ifp; 4247190872Sjfv 4248220375Sjfv /* Clear the ring contents */ 4249203049Sjfv IGB_RX_LOCK(rxr); 4250235527Sjfv#ifdef DEV_NETMAP 4251235527Sjfv slot = netmap_reset(na, NR_RX, rxr->me, 0); 4252235527Sjfv#endif /* DEV_NETMAP */ 4253220375Sjfv rsize = roundup2(adapter->num_rx_desc * 4254220375Sjfv sizeof(union e1000_adv_rx_desc), IGB_DBA_ALIGN); 4255220375Sjfv bzero((void *)rxr->rx_base, rsize); 4256176667Sjfv 4257220375Sjfv /* 4258220375Sjfv ** Free current RX buffer structures and their mbufs 4259220375Sjfv */ 4260220375Sjfv igb_free_receive_ring(rxr); 4261220375Sjfv 4262209068Sjfv /* Configure for header split? */ 4263209068Sjfv if (igb_header_split) 4264209068Sjfv rxr->hdr_split = TRUE; 4265209068Sjfv 4266205869Sjfv /* Now replenish the ring mbufs */ 4267220375Sjfv for (int j = 0; j < adapter->num_rx_desc; ++j) { 4268205869Sjfv struct mbuf *mh, *mp; 4269176667Sjfv 4270220375Sjfv rxbuf = &rxr->rx_buffers[j]; 4271235527Sjfv#ifdef DEV_NETMAP 4272235527Sjfv if (slot) { 4273235527Sjfv /* slot sj is mapped to the i-th NIC-ring entry */ 4274235527Sjfv int sj = netmap_idx_n2k(&na->rx_rings[rxr->me], j); 4275235527Sjfv uint64_t paddr; 4276235527Sjfv void *addr; 4277235527Sjfv 4278235527Sjfv addr = PNMB(slot + sj, &paddr); 4279235527Sjfv netmap_load_map(rxr->ptag, rxbuf->pmap, addr); 4280235527Sjfv /* Update descriptor */ 4281235527Sjfv rxr->rx_base[j].read.pkt_addr = htole64(paddr); 4282235527Sjfv continue; 4283235527Sjfv } 4284235527Sjfv#endif /* DEV_NETMAP */ 4285209068Sjfv if (rxr->hdr_split == FALSE) 4286209068Sjfv goto skip_head; 4287205869Sjfv 4288205869Sjfv /* First the header */ 4289248078Smarius rxbuf->m_head = m_gethdr(M_NOWAIT, MT_DATA); 4290209068Sjfv if (rxbuf->m_head == NULL) { 4291209068Sjfv error = ENOBUFS; 4292205869Sjfv goto fail; 4293209068Sjfv } 4294205869Sjfv m_adj(rxbuf->m_head, ETHER_ALIGN); 4295205869Sjfv mh = rxbuf->m_head; 4296205869Sjfv mh->m_len = mh->m_pkthdr.len = MHLEN; 4297205869Sjfv mh->m_flags |= M_PKTHDR; 4298205869Sjfv /* Get the memory mapping */ 4299205869Sjfv error = bus_dmamap_load_mbuf_sg(rxr->htag, 4300205869Sjfv rxbuf->hmap, rxbuf->m_head, hseg, 4301205869Sjfv &nsegs, BUS_DMA_NOWAIT); 4302205869Sjfv if (error != 0) /* Nothing elegant to do here */ 4303205869Sjfv goto fail; 4304205869Sjfv bus_dmamap_sync(rxr->htag, 4305205869Sjfv rxbuf->hmap, BUS_DMASYNC_PREREAD); 4306205869Sjfv /* Update descriptor */ 4307220375Sjfv rxr->rx_base[j].read.hdr_addr = htole64(hseg[0].ds_addr); 4308205869Sjfv 4309209068Sjfvskip_head: 4310205869Sjfv /* Now the payload cluster */ 4311248078Smarius rxbuf->m_pack = m_getjcl(M_NOWAIT, MT_DATA, 4312205869Sjfv M_PKTHDR, adapter->rx_mbuf_sz); 4313209068Sjfv if (rxbuf->m_pack == NULL) { 4314209068Sjfv error = ENOBUFS; 4315205869Sjfv goto fail; 4316209068Sjfv } 4317205869Sjfv mp = rxbuf->m_pack; 4318205869Sjfv mp->m_pkthdr.len = mp->m_len = adapter->rx_mbuf_sz; 4319205869Sjfv /* Get the memory mapping */ 4320205869Sjfv error = bus_dmamap_load_mbuf_sg(rxr->ptag, 4321205869Sjfv rxbuf->pmap, mp, pseg, 4322205869Sjfv &nsegs, BUS_DMA_NOWAIT); 4323205869Sjfv if (error != 0) 4324205869Sjfv goto fail; 4325205869Sjfv bus_dmamap_sync(rxr->ptag, 4326205869Sjfv rxbuf->pmap, BUS_DMASYNC_PREREAD); 4327205869Sjfv /* Update descriptor */ 4328220375Sjfv rxr->rx_base[j].read.pkt_addr = htole64(pseg[0].ds_addr); 4329205869Sjfv } 4330206388Sjfv 4331206388Sjfv /* Setup our descriptor indices */ 4332220375Sjfv rxr->next_to_check = 0; 4333220375Sjfv rxr->next_to_refresh = adapter->num_rx_desc - 1; 4334200243Sjfv rxr->lro_enabled = FALSE; 4335209068Sjfv rxr->rx_split_packets = 0; 4336209068Sjfv rxr->rx_bytes = 0; 4337176667Sjfv 4338203049Sjfv rxr->fmp = NULL; 4339203049Sjfv rxr->lmp = NULL; 4340203049Sjfv rxr->discard = FALSE; 4341203049Sjfv 4342176667Sjfv bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map, 4343176667Sjfv BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 4344176667Sjfv 4345194865Sjfv /* 4346194865Sjfv ** Now set up the LRO interface, we 4347194865Sjfv ** also only do head split when LRO 4348194865Sjfv ** is enabled, since so often they 4349194865Sjfv ** are undesireable in similar setups. 4350194865Sjfv */ 4351203049Sjfv if (ifp->if_capenable & IFCAP_LRO) { 4352215781Sjfv error = tcp_lro_init(lro); 4353215781Sjfv if (error) { 4354203049Sjfv device_printf(dev, "LRO Initialization failed!\n"); 4355203049Sjfv goto fail; 4356203049Sjfv } 4357185353Sjfv INIT_DEBUGOUT("RX LRO Initialized\n"); 4358194865Sjfv rxr->lro_enabled = TRUE; 4359181027Sjfv lro->ifp = adapter->ifp; 4360181027Sjfv } 4361181027Sjfv 4362203049Sjfv IGB_RX_UNLOCK(rxr); 4363176667Sjfv return (0); 4364203049Sjfv 4365176667Sjfvfail: 4366203049Sjfv igb_free_receive_ring(rxr); 4367203049Sjfv IGB_RX_UNLOCK(rxr); 4368203049Sjfv return (error); 4369176667Sjfv} 4370176667Sjfv 4371220375Sjfv 4372176667Sjfv/********************************************************************* 4373176667Sjfv * 4374176667Sjfv * Initialize all receive rings. 4375176667Sjfv * 4376176667Sjfv **********************************************************************/ 4377176667Sjfvstatic int 4378176667Sjfvigb_setup_receive_structures(struct adapter *adapter) 4379176667Sjfv{ 4380176667Sjfv struct rx_ring *rxr = adapter->rx_rings; 4381209068Sjfv int i; 4382176667Sjfv 4383194865Sjfv for (i = 0; i < adapter->num_queues; i++, rxr++) 4384176667Sjfv if (igb_setup_receive_ring(rxr)) 4385176667Sjfv goto fail; 4386176667Sjfv 4387176667Sjfv return (0); 4388176667Sjfvfail: 4389176667Sjfv /* 4390176667Sjfv * Free RX buffers allocated so far, we will only handle 4391176667Sjfv * the rings that completed, the failing case will have 4392209068Sjfv * cleaned up for itself. 'i' is the endpoint. 4393176667Sjfv */ 4394248292Sjfv for (int j = 0; j < i; ++j) { 4395248292Sjfv rxr = &adapter->rx_rings[j]; 4396215781Sjfv IGB_RX_LOCK(rxr); 4397209068Sjfv igb_free_receive_ring(rxr); 4398215781Sjfv IGB_RX_UNLOCK(rxr); 4399176667Sjfv } 4400176667Sjfv 4401176667Sjfv return (ENOBUFS); 4402176667Sjfv} 4403176667Sjfv 4404176667Sjfv/********************************************************************* 4405176667Sjfv * 4406176667Sjfv * Enable receive unit. 4407176667Sjfv * 4408176667Sjfv **********************************************************************/ 4409176667Sjfvstatic void 4410176667Sjfvigb_initialize_receive_units(struct adapter *adapter) 4411176667Sjfv{ 4412176667Sjfv struct rx_ring *rxr = adapter->rx_rings; 4413176667Sjfv struct ifnet *ifp = adapter->ifp; 4414203049Sjfv struct e1000_hw *hw = &adapter->hw; 4415190872Sjfv u32 rctl, rxcsum, psize, srrctl = 0; 4416176667Sjfv 4417176667Sjfv INIT_DEBUGOUT("igb_initialize_receive_unit: begin"); 4418176667Sjfv 4419176667Sjfv /* 4420176667Sjfv * Make sure receives are disabled while setting 4421176667Sjfv * up the descriptor ring 4422176667Sjfv */ 4423203049Sjfv rctl = E1000_READ_REG(hw, E1000_RCTL); 4424203049Sjfv E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN); 4425176667Sjfv 4426190872Sjfv /* 4427190872Sjfv ** Set up for header split 4428190872Sjfv */ 4429219753Sjfv if (igb_header_split) { 4430190872Sjfv /* Use a standard mbuf for the header */ 4431190872Sjfv srrctl |= IGB_HDR_BUF << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT; 4432190872Sjfv srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS; 4433190872Sjfv } else 4434190872Sjfv srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF; 4435190872Sjfv 4436190872Sjfv /* 4437190872Sjfv ** Set up for jumbo frames 4438190872Sjfv */ 4439190872Sjfv if (ifp->if_mtu > ETHERMTU) { 4440190872Sjfv rctl |= E1000_RCTL_LPE; 4441215781Sjfv if (adapter->rx_mbuf_sz == MJUMPAGESIZE) { 4442215781Sjfv srrctl |= 4096 >> E1000_SRRCTL_BSIZEPKT_SHIFT; 4443215781Sjfv rctl |= E1000_RCTL_SZ_4096 | E1000_RCTL_BSEX; 4444215781Sjfv } else if (adapter->rx_mbuf_sz > MJUMPAGESIZE) { 4445215781Sjfv srrctl |= 8192 >> E1000_SRRCTL_BSIZEPKT_SHIFT; 4446215781Sjfv rctl |= E1000_RCTL_SZ_8192 | E1000_RCTL_BSEX; 4447215781Sjfv } 4448190872Sjfv /* Set maximum packet len */ 4449190872Sjfv psize = adapter->max_frame_size; 4450190872Sjfv /* are we on a vlan? */ 4451190872Sjfv if (adapter->ifp->if_vlantrunk != NULL) 4452190872Sjfv psize += VLAN_TAG_SIZE; 4453190872Sjfv E1000_WRITE_REG(&adapter->hw, E1000_RLPML, psize); 4454190872Sjfv } else { 4455190872Sjfv rctl &= ~E1000_RCTL_LPE; 4456190872Sjfv srrctl |= 2048 >> E1000_SRRCTL_BSIZEPKT_SHIFT; 4457190872Sjfv rctl |= E1000_RCTL_SZ_2048; 4458190872Sjfv } 4459190872Sjfv 4460176667Sjfv /* Setup the Base and Length of the Rx Descriptor Rings */ 4461194865Sjfv for (int i = 0; i < adapter->num_queues; i++, rxr++) { 4462176667Sjfv u64 bus_addr = rxr->rxdma.dma_paddr; 4463190872Sjfv u32 rxdctl; 4464176667Sjfv 4465203049Sjfv E1000_WRITE_REG(hw, E1000_RDLEN(i), 4466176667Sjfv adapter->num_rx_desc * sizeof(struct e1000_rx_desc)); 4467203049Sjfv E1000_WRITE_REG(hw, E1000_RDBAH(i), 4468176667Sjfv (uint32_t)(bus_addr >> 32)); 4469203049Sjfv E1000_WRITE_REG(hw, E1000_RDBAL(i), 4470176667Sjfv (uint32_t)bus_addr); 4471203049Sjfv E1000_WRITE_REG(hw, E1000_SRRCTL(i), srrctl); 4472176667Sjfv /* Enable this Queue */ 4473203049Sjfv rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(i)); 4474176667Sjfv rxdctl |= E1000_RXDCTL_QUEUE_ENABLE; 4475176667Sjfv rxdctl &= 0xFFF00000; 4476176667Sjfv rxdctl |= IGB_RX_PTHRESH; 4477176667Sjfv rxdctl |= IGB_RX_HTHRESH << 8; 4478176667Sjfv rxdctl |= IGB_RX_WTHRESH << 16; 4479203049Sjfv E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl); 4480176667Sjfv } 4481176667Sjfv 4482176667Sjfv /* 4483176667Sjfv ** Setup for RX MultiQueue 4484176667Sjfv */ 4485203049Sjfv rxcsum = E1000_READ_REG(hw, E1000_RXCSUM); 4486194865Sjfv if (adapter->num_queues >1) { 4487176667Sjfv u32 random[10], mrqc, shift = 0; 4488176667Sjfv union igb_reta { 4489176667Sjfv u32 dword; 4490176667Sjfv u8 bytes[4]; 4491176667Sjfv } reta; 4492176667Sjfv 4493176667Sjfv arc4rand(&random, sizeof(random), 0); 4494176667Sjfv if (adapter->hw.mac.type == e1000_82575) 4495176667Sjfv shift = 6; 4496176667Sjfv /* Warning FM follows */ 4497176667Sjfv for (int i = 0; i < 128; i++) { 4498176667Sjfv reta.bytes[i & 3] = 4499194865Sjfv (i % adapter->num_queues) << shift; 4500176667Sjfv if ((i & 3) == 3) 4501203049Sjfv E1000_WRITE_REG(hw, 4502190872Sjfv E1000_RETA(i >> 2), reta.dword); 4503176667Sjfv } 4504176667Sjfv /* Now fill in hash table */ 4505176667Sjfv mrqc = E1000_MRQC_ENABLE_RSS_4Q; 4506176667Sjfv for (int i = 0; i < 10; i++) 4507203049Sjfv E1000_WRITE_REG_ARRAY(hw, 4508176667Sjfv E1000_RSSRK(0), i, random[i]); 4509176667Sjfv 4510176667Sjfv mrqc |= (E1000_MRQC_RSS_FIELD_IPV4 | 4511176667Sjfv E1000_MRQC_RSS_FIELD_IPV4_TCP); 4512176667Sjfv mrqc |= (E1000_MRQC_RSS_FIELD_IPV6 | 4513176667Sjfv E1000_MRQC_RSS_FIELD_IPV6_TCP); 4514176667Sjfv mrqc |=( E1000_MRQC_RSS_FIELD_IPV4_UDP | 4515176667Sjfv E1000_MRQC_RSS_FIELD_IPV6_UDP); 4516176667Sjfv mrqc |=( E1000_MRQC_RSS_FIELD_IPV6_UDP_EX | 4517176667Sjfv E1000_MRQC_RSS_FIELD_IPV6_TCP_EX); 4518176667Sjfv 4519203049Sjfv E1000_WRITE_REG(hw, E1000_MRQC, mrqc); 4520176667Sjfv 4521176667Sjfv /* 4522176667Sjfv ** NOTE: Receive Full-Packet Checksum Offload 4523176667Sjfv ** is mutually exclusive with Multiqueue. However 4524176667Sjfv ** this is not the same as TCP/IP checksums which 4525176667Sjfv ** still work. 4526176667Sjfv */ 4527176667Sjfv rxcsum |= E1000_RXCSUM_PCSD; 4528190872Sjfv#if __FreeBSD_version >= 800000 4529190872Sjfv /* For SCTP Offload */ 4530203049Sjfv if ((hw->mac.type == e1000_82576) 4531190872Sjfv && (ifp->if_capenable & IFCAP_RXCSUM)) 4532190872Sjfv rxcsum |= E1000_RXCSUM_CRCOFL; 4533190872Sjfv#endif 4534190872Sjfv } else { 4535190872Sjfv /* Non RSS setup */ 4536190872Sjfv if (ifp->if_capenable & IFCAP_RXCSUM) { 4537190872Sjfv rxcsum |= E1000_RXCSUM_IPPCSE; 4538190872Sjfv#if __FreeBSD_version >= 800000 4539190872Sjfv if (adapter->hw.mac.type == e1000_82576) 4540190872Sjfv rxcsum |= E1000_RXCSUM_CRCOFL; 4541190872Sjfv#endif 4542190872Sjfv } else 4543190872Sjfv rxcsum &= ~E1000_RXCSUM_TUOFL; 4544176667Sjfv } 4545203049Sjfv E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum); 4546176667Sjfv 4547176667Sjfv /* Setup the Receive Control Register */ 4548176667Sjfv rctl &= ~(3 << E1000_RCTL_MO_SHIFT); 4549176667Sjfv rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO | 4550176667Sjfv E1000_RCTL_RDMTS_HALF | 4551203049Sjfv (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT); 4552203049Sjfv /* Strip CRC bytes. */ 4553203049Sjfv rctl |= E1000_RCTL_SECRC; 4554176667Sjfv /* Make sure VLAN Filters are off */ 4555176667Sjfv rctl &= ~E1000_RCTL_VFE; 4556190872Sjfv /* Don't store bad packets */ 4557176667Sjfv rctl &= ~E1000_RCTL_SBP; 4558176667Sjfv 4559176667Sjfv /* Enable Receives */ 4560203049Sjfv E1000_WRITE_REG(hw, E1000_RCTL, rctl); 4561176667Sjfv 4562176667Sjfv /* 4563176667Sjfv * Setup the HW Rx Head and Tail Descriptor Pointers 4564176667Sjfv * - needs to be after enable 4565176667Sjfv */ 4566194865Sjfv for (int i = 0; i < adapter->num_queues; i++) { 4567219753Sjfv rxr = &adapter->rx_rings[i]; 4568219753Sjfv E1000_WRITE_REG(hw, E1000_RDH(i), rxr->next_to_check); 4569235527Sjfv#ifdef DEV_NETMAP 4570235527Sjfv /* 4571235527Sjfv * an init() while a netmap client is active must 4572235527Sjfv * preserve the rx buffers passed to userspace. 4573235527Sjfv * In this driver it means we adjust RDT to 4574262153Sluigi * something different from next_to_refresh 4575235527Sjfv * (which is not used in netmap mode). 4576235527Sjfv */ 4577235527Sjfv if (ifp->if_capenable & IFCAP_NETMAP) { 4578235527Sjfv struct netmap_adapter *na = NA(adapter->ifp); 4579235527Sjfv struct netmap_kring *kring = &na->rx_rings[i]; 4580262153Sluigi int t = rxr->next_to_refresh - nm_kr_rxspace(kring); 4581235527Sjfv 4582235527Sjfv if (t >= adapter->num_rx_desc) 4583235527Sjfv t -= adapter->num_rx_desc; 4584235527Sjfv else if (t < 0) 4585235527Sjfv t += adapter->num_rx_desc; 4586235527Sjfv E1000_WRITE_REG(hw, E1000_RDT(i), t); 4587235527Sjfv } else 4588235527Sjfv#endif /* DEV_NETMAP */ 4589219753Sjfv E1000_WRITE_REG(hw, E1000_RDT(i), rxr->next_to_refresh); 4590176667Sjfv } 4591176667Sjfv return; 4592176667Sjfv} 4593176667Sjfv 4594176667Sjfv/********************************************************************* 4595176667Sjfv * 4596176667Sjfv * Free receive rings. 4597176667Sjfv * 4598176667Sjfv **********************************************************************/ 4599176667Sjfvstatic void 4600176667Sjfvigb_free_receive_structures(struct adapter *adapter) 4601176667Sjfv{ 4602176667Sjfv struct rx_ring *rxr = adapter->rx_rings; 4603176667Sjfv 4604194865Sjfv for (int i = 0; i < adapter->num_queues; i++, rxr++) { 4605181027Sjfv struct lro_ctrl *lro = &rxr->lro; 4606176667Sjfv igb_free_receive_buffers(rxr); 4607181027Sjfv tcp_lro_free(lro); 4608176667Sjfv igb_dma_free(adapter, &rxr->rxdma); 4609176667Sjfv } 4610176667Sjfv 4611176667Sjfv free(adapter->rx_rings, M_DEVBUF); 4612176667Sjfv} 4613176667Sjfv 4614176667Sjfv/********************************************************************* 4615176667Sjfv * 4616176667Sjfv * Free receive ring data structures. 4617176667Sjfv * 4618176667Sjfv **********************************************************************/ 4619176667Sjfvstatic void 4620176667Sjfvigb_free_receive_buffers(struct rx_ring *rxr) 4621176667Sjfv{ 4622200243Sjfv struct adapter *adapter = rxr->adapter; 4623203049Sjfv struct igb_rx_buf *rxbuf; 4624203049Sjfv int i; 4625176667Sjfv 4626176667Sjfv INIT_DEBUGOUT("free_receive_structures: begin"); 4627176667Sjfv 4628176667Sjfv /* Cleanup any existing buffers */ 4629176667Sjfv if (rxr->rx_buffers != NULL) { 4630203049Sjfv for (i = 0; i < adapter->num_rx_desc; i++) { 4631203049Sjfv rxbuf = &rxr->rx_buffers[i]; 4632203049Sjfv if (rxbuf->m_head != NULL) { 4633205869Sjfv bus_dmamap_sync(rxr->htag, rxbuf->hmap, 4634176667Sjfv BUS_DMASYNC_POSTREAD); 4635205869Sjfv bus_dmamap_unload(rxr->htag, rxbuf->hmap); 4636203049Sjfv rxbuf->m_head->m_flags |= M_PKTHDR; 4637203049Sjfv m_freem(rxbuf->m_head); 4638176667Sjfv } 4639203049Sjfv if (rxbuf->m_pack != NULL) { 4640205869Sjfv bus_dmamap_sync(rxr->ptag, rxbuf->pmap, 4641203049Sjfv BUS_DMASYNC_POSTREAD); 4642205869Sjfv bus_dmamap_unload(rxr->ptag, rxbuf->pmap); 4643203049Sjfv rxbuf->m_pack->m_flags |= M_PKTHDR; 4644203049Sjfv m_freem(rxbuf->m_pack); 4645203049Sjfv } 4646203049Sjfv rxbuf->m_head = NULL; 4647203049Sjfv rxbuf->m_pack = NULL; 4648205869Sjfv if (rxbuf->hmap != NULL) { 4649205869Sjfv bus_dmamap_destroy(rxr->htag, rxbuf->hmap); 4650205869Sjfv rxbuf->hmap = NULL; 4651203049Sjfv } 4652205869Sjfv if (rxbuf->pmap != NULL) { 4653205869Sjfv bus_dmamap_destroy(rxr->ptag, rxbuf->pmap); 4654205869Sjfv rxbuf->pmap = NULL; 4655203049Sjfv } 4656176667Sjfv } 4657203049Sjfv if (rxr->rx_buffers != NULL) { 4658203049Sjfv free(rxr->rx_buffers, M_DEVBUF); 4659203049Sjfv rxr->rx_buffers = NULL; 4660203049Sjfv } 4661176667Sjfv } 4662176667Sjfv 4663205869Sjfv if (rxr->htag != NULL) { 4664205869Sjfv bus_dma_tag_destroy(rxr->htag); 4665205869Sjfv rxr->htag = NULL; 4666176667Sjfv } 4667205869Sjfv if (rxr->ptag != NULL) { 4668205869Sjfv bus_dma_tag_destroy(rxr->ptag); 4669205869Sjfv rxr->ptag = NULL; 4670203049Sjfv } 4671203049Sjfv} 4672176667Sjfv 4673203049Sjfvstatic __inline void 4674205869Sjfvigb_rx_discard(struct rx_ring *rxr, int i) 4675203049Sjfv{ 4676205869Sjfv struct igb_rx_buf *rbuf; 4677203049Sjfv 4678205869Sjfv rbuf = &rxr->rx_buffers[i]; 4679215781Sjfv 4680215781Sjfv /* Partially received? Free the chain */ 4681203049Sjfv if (rxr->fmp != NULL) { 4682203049Sjfv rxr->fmp->m_flags |= M_PKTHDR; 4683203049Sjfv m_freem(rxr->fmp); 4684203049Sjfv rxr->fmp = NULL; 4685203049Sjfv rxr->lmp = NULL; 4686176667Sjfv } 4687205869Sjfv 4688215781Sjfv /* 4689215781Sjfv ** With advanced descriptors the writeback 4690215781Sjfv ** clobbers the buffer addrs, so its easier 4691215781Sjfv ** to just free the existing mbufs and take 4692215781Sjfv ** the normal refresh path to get new buffers 4693215781Sjfv ** and mapping. 4694215781Sjfv */ 4695215781Sjfv if (rbuf->m_head) { 4696215781Sjfv m_free(rbuf->m_head); 4697215781Sjfv rbuf->m_head = NULL; 4698215781Sjfv } 4699205869Sjfv 4700215781Sjfv if (rbuf->m_pack) { 4701215781Sjfv m_free(rbuf->m_pack); 4702215781Sjfv rbuf->m_pack = NULL; 4703213234Sjfv } 4704205869Sjfv 4705205869Sjfv return; 4706176667Sjfv} 4707203049Sjfv 4708203049Sjfvstatic __inline void 4709203049Sjfvigb_rx_input(struct rx_ring *rxr, struct ifnet *ifp, struct mbuf *m, u32 ptype) 4710203049Sjfv{ 4711203049Sjfv 4712203049Sjfv /* 4713203049Sjfv * ATM LRO is only for IPv4/TCP packets and TCP checksum of the packet 4714203049Sjfv * should be computed by hardware. Also it should not have VLAN tag in 4715203049Sjfv * ethernet header. 4716203049Sjfv */ 4717203049Sjfv if (rxr->lro_enabled && 4718203049Sjfv (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 && 4719203049Sjfv (ptype & E1000_RXDADV_PKTTYPE_ETQF) == 0 && 4720203049Sjfv (ptype & (E1000_RXDADV_PKTTYPE_IPV4 | E1000_RXDADV_PKTTYPE_TCP)) == 4721203049Sjfv (E1000_RXDADV_PKTTYPE_IPV4 | E1000_RXDADV_PKTTYPE_TCP) && 4722203049Sjfv (m->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) == 4723203049Sjfv (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) { 4724203049Sjfv /* 4725203049Sjfv * Send to the stack if: 4726203049Sjfv ** - LRO not enabled, or 4727203049Sjfv ** - no LRO resources, or 4728203049Sjfv ** - lro enqueue fails 4729203049Sjfv */ 4730203049Sjfv if (rxr->lro.lro_cnt != 0) 4731203049Sjfv if (tcp_lro_rx(&rxr->lro, m, 0) == 0) 4732203049Sjfv return; 4733203049Sjfv } 4734209068Sjfv IGB_RX_UNLOCK(rxr); 4735203049Sjfv (*ifp->if_input)(ifp, m); 4736209068Sjfv IGB_RX_LOCK(rxr); 4737203049Sjfv} 4738203049Sjfv 4739176667Sjfv/********************************************************************* 4740176667Sjfv * 4741176667Sjfv * This routine executes in interrupt context. It replenishes 4742176667Sjfv * the mbufs in the descriptor and sends data which has been 4743176667Sjfv * dma'ed into host memory to upper layer. 4744176667Sjfv * 4745176667Sjfv * We loop at most count times if count is > 0, or until done if 4746176667Sjfv * count < 0. 4747176667Sjfv * 4748190872Sjfv * Return TRUE if more to clean, FALSE otherwise 4749176667Sjfv *********************************************************************/ 4750176667Sjfvstatic bool 4751209238Sjfvigb_rxeof(struct igb_queue *que, int count, int *done) 4752176667Sjfv{ 4753205869Sjfv struct adapter *adapter = que->adapter; 4754205869Sjfv struct rx_ring *rxr = que->rxr; 4755203049Sjfv struct ifnet *ifp = adapter->ifp; 4756181027Sjfv struct lro_ctrl *lro = &rxr->lro; 4757181027Sjfv struct lro_entry *queued; 4758209238Sjfv int i, processed = 0, rxdone = 0; 4759203049Sjfv u32 ptype, staterr = 0; 4760181027Sjfv union e1000_adv_rx_desc *cur; 4761176667Sjfv 4762176667Sjfv IGB_RX_LOCK(rxr); 4763205869Sjfv /* Sync the ring. */ 4764205869Sjfv bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map, 4765205869Sjfv BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 4766176667Sjfv 4767235527Sjfv#ifdef DEV_NETMAP 4768257768Sluigi if (netmap_rx_irq(ifp, rxr->me, &processed)) { 4769257768Sluigi IGB_RX_UNLOCK(rxr); 4770250458Sluigi return (FALSE); 4771257768Sluigi } 4772235527Sjfv#endif /* DEV_NETMAP */ 4773235527Sjfv 4774203049Sjfv /* Main clean loop */ 4775205869Sjfv for (i = rxr->next_to_check; count != 0;) { 4776205869Sjfv struct mbuf *sendmp, *mh, *mp; 4777205869Sjfv struct igb_rx_buf *rxbuf; 4778205869Sjfv u16 hlen, plen, hdr, vtag; 4779205869Sjfv bool eop = FALSE; 4780203049Sjfv 4781203049Sjfv cur = &rxr->rx_base[i]; 4782203049Sjfv staterr = le32toh(cur->wb.upper.status_error); 4783203049Sjfv if ((staterr & E1000_RXD_STAT_DD) == 0) 4784203049Sjfv break; 4785203049Sjfv if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 4786203049Sjfv break; 4787203049Sjfv count--; 4788203049Sjfv sendmp = mh = mp = NULL; 4789203049Sjfv cur->wb.upper.status_error = 0; 4790205869Sjfv rxbuf = &rxr->rx_buffers[i]; 4791203049Sjfv plen = le16toh(cur->wb.upper.length); 4792203049Sjfv ptype = le32toh(cur->wb.lower.lo_dword.data) & IGB_PKTTYPE_MASK; 4793219753Sjfv if ((adapter->hw.mac.type == e1000_i350) && 4794219753Sjfv (staterr & E1000_RXDEXT_STATERR_LB)) 4795219753Sjfv vtag = be16toh(cur->wb.upper.vlan); 4796219753Sjfv else 4797219753Sjfv vtag = le16toh(cur->wb.upper.vlan); 4798203049Sjfv hdr = le16toh(cur->wb.lower.lo_dword.hs_rss.hdr_info); 4799203049Sjfv eop = ((staterr & E1000_RXD_STAT_EOP) == E1000_RXD_STAT_EOP); 4800176667Sjfv 4801203049Sjfv /* Make sure all segments of a bad packet are discarded */ 4802203049Sjfv if (((staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) != 0) || 4803203049Sjfv (rxr->discard)) { 4804241366Ssbruno adapter->dropped_pkts++; 4805203049Sjfv ++rxr->rx_discarded; 4806203049Sjfv if (!eop) /* Catch subsequent segs */ 4807203049Sjfv rxr->discard = TRUE; 4808203049Sjfv else 4809203049Sjfv rxr->discard = FALSE; 4810205869Sjfv igb_rx_discard(rxr, i); 4811203049Sjfv goto next_desc; 4812200243Sjfv } 4813200243Sjfv 4814190872Sjfv /* 4815190872Sjfv ** The way the hardware is configured to 4816190872Sjfv ** split, it will ONLY use the header buffer 4817190872Sjfv ** when header split is enabled, otherwise we 4818203049Sjfv ** get normal behavior, ie, both header and 4819203049Sjfv ** payload are DMA'd into the payload buffer. 4820190872Sjfv ** 4821203049Sjfv ** The fmp test is to catch the case where a 4822203049Sjfv ** packet spans multiple descriptors, in that 4823203049Sjfv ** case only the first header is valid. 4824190872Sjfv */ 4825203049Sjfv if (rxr->hdr_split && rxr->fmp == NULL) { 4826190872Sjfv hlen = (hdr & E1000_RXDADV_HDRBUFLEN_MASK) >> 4827190872Sjfv E1000_RXDADV_HDRBUFLEN_SHIFT; 4828190872Sjfv if (hlen > IGB_HDR_BUF) 4829190872Sjfv hlen = IGB_HDR_BUF; 4830203049Sjfv mh = rxr->rx_buffers[i].m_head; 4831190872Sjfv mh->m_len = hlen; 4832215781Sjfv /* clear buf pointer for refresh */ 4833205869Sjfv rxbuf->m_head = NULL; 4834190872Sjfv /* 4835190872Sjfv ** Get the payload length, this 4836190872Sjfv ** could be zero if its a small 4837190872Sjfv ** packet. 4838190872Sjfv */ 4839203049Sjfv if (plen > 0) { 4840203049Sjfv mp = rxr->rx_buffers[i].m_pack; 4841190872Sjfv mp->m_len = plen; 4842190872Sjfv mh->m_next = mp; 4843215781Sjfv /* clear buf pointer */ 4844205869Sjfv rxbuf->m_pack = NULL; 4845190872Sjfv rxr->rx_split_packets++; 4846190872Sjfv } 4847190872Sjfv } else { 4848190872Sjfv /* 4849190872Sjfv ** Either no header split, or a 4850190872Sjfv ** secondary piece of a fragmented 4851203049Sjfv ** split packet. 4852190872Sjfv */ 4853203049Sjfv mh = rxr->rx_buffers[i].m_pack; 4854203049Sjfv mh->m_len = plen; 4855205869Sjfv /* clear buf info for refresh */ 4856205869Sjfv rxbuf->m_pack = NULL; 4857203049Sjfv } 4858203049Sjfv 4859205869Sjfv ++processed; /* So we know when to refresh */ 4860203049Sjfv 4861203049Sjfv /* Initial frame - setup */ 4862203049Sjfv if (rxr->fmp == NULL) { 4863203049Sjfv mh->m_pkthdr.len = mh->m_len; 4864215781Sjfv /* Save the head of the chain */ 4865203049Sjfv rxr->fmp = mh; 4866203049Sjfv rxr->lmp = mh; 4867203049Sjfv if (mp != NULL) { 4868203049Sjfv /* Add payload if split */ 4869203049Sjfv mh->m_pkthdr.len += mp->m_len; 4870203049Sjfv rxr->lmp = mh->m_next; 4871200243Sjfv } 4872203049Sjfv } else { 4873203049Sjfv /* Chain mbuf's together */ 4874203049Sjfv rxr->lmp->m_next = mh; 4875203049Sjfv rxr->lmp = rxr->lmp->m_next; 4876203049Sjfv rxr->fmp->m_pkthdr.len += mh->m_len; 4877190872Sjfv } 4878190872Sjfv 4879203049Sjfv if (eop) { 4880203049Sjfv rxr->fmp->m_pkthdr.rcvif = ifp; 4881203049Sjfv ifp->if_ipackets++; 4882203049Sjfv rxr->rx_packets++; 4883203049Sjfv /* capture data for AIM */ 4884203049Sjfv rxr->packets++; 4885203049Sjfv rxr->bytes += rxr->fmp->m_pkthdr.len; 4886203049Sjfv rxr->rx_bytes += rxr->fmp->m_pkthdr.len; 4887176667Sjfv 4888203049Sjfv if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) 4889203049Sjfv igb_rx_checksum(staterr, rxr->fmp, ptype); 4890205869Sjfv 4891203049Sjfv if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 && 4892203049Sjfv (staterr & E1000_RXD_STAT_VP) != 0) { 4893203049Sjfv rxr->fmp->m_pkthdr.ether_vtag = vtag; 4894203049Sjfv rxr->fmp->m_flags |= M_VLANTAG; 4895203049Sjfv } 4896252899Sjfv#ifndef IGB_LEGACY_TX 4897205869Sjfv rxr->fmp->m_pkthdr.flowid = que->msix; 4898203049Sjfv rxr->fmp->m_flags |= M_FLOWID; 4899190872Sjfv#endif 4900203049Sjfv sendmp = rxr->fmp; 4901203049Sjfv /* Make sure to set M_PKTHDR. */ 4902203049Sjfv sendmp->m_flags |= M_PKTHDR; 4903203049Sjfv rxr->fmp = NULL; 4904203049Sjfv rxr->lmp = NULL; 4905176667Sjfv } 4906203049Sjfv 4907203049Sjfvnext_desc: 4908176667Sjfv bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map, 4909176667Sjfv BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 4910176667Sjfv 4911203049Sjfv /* Advance our pointers to the next descriptor. */ 4912176667Sjfv if (++i == adapter->num_rx_desc) 4913176667Sjfv i = 0; 4914190872Sjfv /* 4915205869Sjfv ** Send to the stack or LRO 4916190872Sjfv */ 4917209068Sjfv if (sendmp != NULL) { 4918209068Sjfv rxr->next_to_check = i; 4919203049Sjfv igb_rx_input(rxr, ifp, sendmp, ptype); 4920209068Sjfv i = rxr->next_to_check; 4921209238Sjfv rxdone++; 4922209068Sjfv } 4923205869Sjfv 4924205869Sjfv /* Every 8 descriptors we go to refresh mbufs */ 4925205869Sjfv if (processed == 8) { 4926205869Sjfv igb_refresh_mbufs(rxr, i); 4927205869Sjfv processed = 0; 4928205869Sjfv } 4929176667Sjfv } 4930200243Sjfv 4931205869Sjfv /* Catch any remainders */ 4932220375Sjfv if (igb_rx_unrefreshed(rxr)) 4933205869Sjfv igb_refresh_mbufs(rxr, i); 4934200243Sjfv 4935176667Sjfv rxr->next_to_check = i; 4936176667Sjfv 4937181027Sjfv /* 4938182416Sjfv * Flush any outstanding LRO work 4939182416Sjfv */ 4940203049Sjfv while ((queued = SLIST_FIRST(&lro->lro_active)) != NULL) { 4941181027Sjfv SLIST_REMOVE_HEAD(&lro->lro_active, next); 4942181027Sjfv tcp_lro_flush(lro, queued); 4943181027Sjfv } 4944181027Sjfv 4945209238Sjfv if (done != NULL) 4946248292Sjfv *done += rxdone; 4947209238Sjfv 4948219753Sjfv IGB_RX_UNLOCK(rxr); 4949219753Sjfv return ((staterr & E1000_RXD_STAT_DD) ? TRUE : FALSE); 4950176667Sjfv} 4951176667Sjfv 4952176667Sjfv/********************************************************************* 4953176667Sjfv * 4954176667Sjfv * Verify that the hardware indicated that the checksum is valid. 4955176667Sjfv * Inform the stack about the status of checksum so that stack 4956176667Sjfv * doesn't spend time verifying the checksum. 4957176667Sjfv * 4958176667Sjfv *********************************************************************/ 4959176667Sjfvstatic void 4960203049Sjfvigb_rx_checksum(u32 staterr, struct mbuf *mp, u32 ptype) 4961176667Sjfv{ 4962176667Sjfv u16 status = (u16)staterr; 4963176667Sjfv u8 errors = (u8) (staterr >> 24); 4964203049Sjfv int sctp; 4965176667Sjfv 4966176667Sjfv /* Ignore Checksum bit is set */ 4967176667Sjfv if (status & E1000_RXD_STAT_IXSM) { 4968176667Sjfv mp->m_pkthdr.csum_flags = 0; 4969176667Sjfv return; 4970176667Sjfv } 4971176667Sjfv 4972203049Sjfv if ((ptype & E1000_RXDADV_PKTTYPE_ETQF) == 0 && 4973203049Sjfv (ptype & E1000_RXDADV_PKTTYPE_SCTP) != 0) 4974203049Sjfv sctp = 1; 4975203049Sjfv else 4976203049Sjfv sctp = 0; 4977176667Sjfv if (status & E1000_RXD_STAT_IPCS) { 4978176667Sjfv /* Did it pass? */ 4979176667Sjfv if (!(errors & E1000_RXD_ERR_IPE)) { 4980176667Sjfv /* IP Checksum Good */ 4981176667Sjfv mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED; 4982176667Sjfv mp->m_pkthdr.csum_flags |= CSUM_IP_VALID; 4983176667Sjfv } else 4984176667Sjfv mp->m_pkthdr.csum_flags = 0; 4985176667Sjfv } 4986176667Sjfv 4987190872Sjfv if (status & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)) { 4988190872Sjfv u16 type = (CSUM_DATA_VALID | CSUM_PSEUDO_HDR); 4989190872Sjfv#if __FreeBSD_version >= 800000 4990190872Sjfv if (sctp) /* reassign */ 4991190872Sjfv type = CSUM_SCTP_VALID; 4992190872Sjfv#endif 4993176667Sjfv /* Did it pass? */ 4994176667Sjfv if (!(errors & E1000_RXD_ERR_TCPE)) { 4995191065Sjfv mp->m_pkthdr.csum_flags |= type; 4996203049Sjfv if (sctp == 0) 4997190872Sjfv mp->m_pkthdr.csum_data = htons(0xffff); 4998176667Sjfv } 4999176667Sjfv } 5000176667Sjfv return; 5001176667Sjfv} 5002176667Sjfv 5003176667Sjfv/* 5004181027Sjfv * This routine is run via an vlan 5005181027Sjfv * config EVENT 5006176667Sjfv */ 5007176667Sjfvstatic void 5008195857Sjfvigb_register_vlan(void *arg, struct ifnet *ifp, u16 vtag) 5009176667Sjfv{ 5010181027Sjfv struct adapter *adapter = ifp->if_softc; 5011194865Sjfv u32 index, bit; 5012176667Sjfv 5013203049Sjfv if (ifp->if_softc != arg) /* Not our event */ 5014195851Sjfv return; 5015195851Sjfv 5016194865Sjfv if ((vtag == 0) || (vtag > 4095)) /* Invalid */ 5017194865Sjfv return; 5018181027Sjfv 5019215781Sjfv IGB_CORE_LOCK(adapter); 5020194865Sjfv index = (vtag >> 5) & 0x7F; 5021194865Sjfv bit = vtag & 0x1F; 5022215781Sjfv adapter->shadow_vfta[index] |= (1 << bit); 5023194865Sjfv ++adapter->num_vlans; 5024218530Sjfv /* Change hw filter setting */ 5025215781Sjfv if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) 5026218530Sjfv igb_setup_vlan_hw_support(adapter); 5027215781Sjfv IGB_CORE_UNLOCK(adapter); 5028176667Sjfv} 5029176667Sjfv 5030181027Sjfv/* 5031181027Sjfv * This routine is run via an vlan 5032181027Sjfv * unconfig EVENT 5033181027Sjfv */ 5034176667Sjfvstatic void 5035195857Sjfvigb_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag) 5036181027Sjfv{ 5037181027Sjfv struct adapter *adapter = ifp->if_softc; 5038194865Sjfv u32 index, bit; 5039181027Sjfv 5040203049Sjfv if (ifp->if_softc != arg) 5041195851Sjfv return; 5042195851Sjfv 5043194865Sjfv if ((vtag == 0) || (vtag > 4095)) /* Invalid */ 5044194865Sjfv return; 5045194865Sjfv 5046215781Sjfv IGB_CORE_LOCK(adapter); 5047194865Sjfv index = (vtag >> 5) & 0x7F; 5048194865Sjfv bit = vtag & 0x1F; 5049215781Sjfv adapter->shadow_vfta[index] &= ~(1 << bit); 5050194865Sjfv --adapter->num_vlans; 5051218530Sjfv /* Change hw filter setting */ 5052215781Sjfv if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) 5053218530Sjfv igb_setup_vlan_hw_support(adapter); 5054215781Sjfv IGB_CORE_UNLOCK(adapter); 5055181027Sjfv} 5056181027Sjfv 5057181027Sjfvstatic void 5058194865Sjfvigb_setup_vlan_hw_support(struct adapter *adapter) 5059194865Sjfv{ 5060194865Sjfv struct e1000_hw *hw = &adapter->hw; 5061218530Sjfv struct ifnet *ifp = adapter->ifp; 5062194865Sjfv u32 reg; 5063194865Sjfv 5064218530Sjfv if (adapter->vf_ifp) { 5065218530Sjfv e1000_rlpml_set_vf(hw, 5066218530Sjfv adapter->max_frame_size + VLAN_TAG_SIZE); 5067218530Sjfv return; 5068218530Sjfv } 5069218530Sjfv 5070218530Sjfv reg = E1000_READ_REG(hw, E1000_CTRL); 5071218530Sjfv reg |= E1000_CTRL_VME; 5072218530Sjfv E1000_WRITE_REG(hw, E1000_CTRL, reg); 5073218530Sjfv 5074218530Sjfv /* Enable the Filter Table */ 5075218530Sjfv if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) { 5076218530Sjfv reg = E1000_READ_REG(hw, E1000_RCTL); 5077218530Sjfv reg &= ~E1000_RCTL_CFIEN; 5078218530Sjfv reg |= E1000_RCTL_VFE; 5079218530Sjfv E1000_WRITE_REG(hw, E1000_RCTL, reg); 5080218530Sjfv } 5081218530Sjfv 5082218530Sjfv /* Update the frame size */ 5083218530Sjfv E1000_WRITE_REG(&adapter->hw, E1000_RLPML, 5084218530Sjfv adapter->max_frame_size + VLAN_TAG_SIZE); 5085218530Sjfv 5086218530Sjfv /* Don't bother with table if no vlans */ 5087218530Sjfv if ((adapter->num_vlans == 0) || 5088218530Sjfv ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0)) 5089194865Sjfv return; 5090194865Sjfv /* 5091194865Sjfv ** A soft reset zero's out the VFTA, so 5092194865Sjfv ** we need to repopulate it now. 5093194865Sjfv */ 5094194865Sjfv for (int i = 0; i < IGB_VFTA_SIZE; i++) 5095215781Sjfv if (adapter->shadow_vfta[i] != 0) { 5096218530Sjfv if (adapter->vf_ifp) 5097215781Sjfv e1000_vfta_set_vf(hw, 5098215781Sjfv adapter->shadow_vfta[i], TRUE); 5099209611Sjfv else 5100235527Sjfv e1000_write_vfta(hw, 5101235527Sjfv i, adapter->shadow_vfta[i]); 5102209611Sjfv } 5103194865Sjfv} 5104194865Sjfv 5105194865Sjfvstatic void 5106176667Sjfvigb_enable_intr(struct adapter *adapter) 5107176667Sjfv{ 5108176667Sjfv /* With RSS set up what to auto clear */ 5109176667Sjfv if (adapter->msix_mem) { 5110218530Sjfv u32 mask = (adapter->que_mask | adapter->link_mask); 5111218530Sjfv E1000_WRITE_REG(&adapter->hw, E1000_EIAC, mask); 5112218530Sjfv E1000_WRITE_REG(&adapter->hw, E1000_EIAM, mask); 5113218530Sjfv E1000_WRITE_REG(&adapter->hw, E1000_EIMS, mask); 5114176667Sjfv E1000_WRITE_REG(&adapter->hw, E1000_IMS, 5115176667Sjfv E1000_IMS_LSC); 5116176667Sjfv } else { 5117176667Sjfv E1000_WRITE_REG(&adapter->hw, E1000_IMS, 5118176667Sjfv IMS_ENABLE_MASK); 5119176667Sjfv } 5120176667Sjfv E1000_WRITE_FLUSH(&adapter->hw); 5121176667Sjfv 5122176667Sjfv return; 5123176667Sjfv} 5124176667Sjfv 5125176667Sjfvstatic void 5126176667Sjfvigb_disable_intr(struct adapter *adapter) 5127176667Sjfv{ 5128176667Sjfv if (adapter->msix_mem) { 5129176667Sjfv E1000_WRITE_REG(&adapter->hw, E1000_EIMC, ~0); 5130176667Sjfv E1000_WRITE_REG(&adapter->hw, E1000_EIAC, 0); 5131176667Sjfv } 5132178523Sjfv E1000_WRITE_REG(&adapter->hw, E1000_IMC, ~0); 5133176667Sjfv E1000_WRITE_FLUSH(&adapter->hw); 5134176667Sjfv return; 5135176667Sjfv} 5136176667Sjfv 5137176667Sjfv/* 5138176667Sjfv * Bit of a misnomer, what this really means is 5139176667Sjfv * to enable OS management of the system... aka 5140176667Sjfv * to disable special hardware management features 5141176667Sjfv */ 5142176667Sjfvstatic void 5143176667Sjfvigb_init_manageability(struct adapter *adapter) 5144176667Sjfv{ 5145176667Sjfv if (adapter->has_manage) { 5146176667Sjfv int manc2h = E1000_READ_REG(&adapter->hw, E1000_MANC2H); 5147176667Sjfv int manc = E1000_READ_REG(&adapter->hw, E1000_MANC); 5148176667Sjfv 5149176667Sjfv /* disable hardware interception of ARP */ 5150176667Sjfv manc &= ~(E1000_MANC_ARP_EN); 5151176667Sjfv 5152176667Sjfv /* enable receiving management packets to the host */ 5153176667Sjfv manc |= E1000_MANC_EN_MNG2HOST; 5154190872Sjfv manc2h |= 1 << 5; /* Mng Port 623 */ 5155190872Sjfv manc2h |= 1 << 6; /* Mng Port 664 */ 5156176667Sjfv E1000_WRITE_REG(&adapter->hw, E1000_MANC2H, manc2h); 5157176667Sjfv E1000_WRITE_REG(&adapter->hw, E1000_MANC, manc); 5158176667Sjfv } 5159176667Sjfv} 5160176667Sjfv 5161176667Sjfv/* 5162176667Sjfv * Give control back to hardware management 5163176667Sjfv * controller if there is one. 5164176667Sjfv */ 5165176667Sjfvstatic void 5166176667Sjfvigb_release_manageability(struct adapter *adapter) 5167176667Sjfv{ 5168176667Sjfv if (adapter->has_manage) { 5169176667Sjfv int manc = E1000_READ_REG(&adapter->hw, E1000_MANC); 5170176667Sjfv 5171176667Sjfv /* re-enable hardware interception of ARP */ 5172176667Sjfv manc |= E1000_MANC_ARP_EN; 5173176667Sjfv manc &= ~E1000_MANC_EN_MNG2HOST; 5174176667Sjfv 5175176667Sjfv E1000_WRITE_REG(&adapter->hw, E1000_MANC, manc); 5176176667Sjfv } 5177176667Sjfv} 5178176667Sjfv 5179176667Sjfv/* 5180176667Sjfv * igb_get_hw_control sets CTRL_EXT:DRV_LOAD bit. 5181176667Sjfv * For ASF and Pass Through versions of f/w this means that 5182176667Sjfv * the driver is loaded. 5183176667Sjfv * 5184176667Sjfv */ 5185176667Sjfvstatic void 5186176667Sjfvigb_get_hw_control(struct adapter *adapter) 5187176667Sjfv{ 5188176667Sjfv u32 ctrl_ext; 5189176667Sjfv 5190218530Sjfv if (adapter->vf_ifp) 5191209611Sjfv return; 5192209611Sjfv 5193176667Sjfv /* Let firmware know the driver has taken over */ 5194176667Sjfv ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT); 5195176667Sjfv E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT, 5196176667Sjfv ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); 5197176667Sjfv} 5198176667Sjfv 5199176667Sjfv/* 5200176667Sjfv * igb_release_hw_control resets CTRL_EXT:DRV_LOAD bit. 5201176667Sjfv * For ASF and Pass Through versions of f/w this means that the 5202176667Sjfv * driver is no longer loaded. 5203176667Sjfv * 5204176667Sjfv */ 5205176667Sjfvstatic void 5206176667Sjfvigb_release_hw_control(struct adapter *adapter) 5207176667Sjfv{ 5208176667Sjfv u32 ctrl_ext; 5209176667Sjfv 5210218530Sjfv if (adapter->vf_ifp) 5211209611Sjfv return; 5212209611Sjfv 5213176667Sjfv /* Let firmware taken over control of h/w */ 5214176667Sjfv ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT); 5215176667Sjfv E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT, 5216176667Sjfv ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD); 5217176667Sjfv} 5218176667Sjfv 5219176667Sjfvstatic int 5220176667Sjfvigb_is_valid_ether_addr(uint8_t *addr) 5221176667Sjfv{ 5222176667Sjfv char zero_addr[6] = { 0, 0, 0, 0, 0, 0 }; 5223176667Sjfv 5224176667Sjfv if ((addr[0] & 1) || (!bcmp(addr, zero_addr, ETHER_ADDR_LEN))) { 5225176667Sjfv return (FALSE); 5226176667Sjfv } 5227176667Sjfv 5228176667Sjfv return (TRUE); 5229176667Sjfv} 5230176667Sjfv 5231176667Sjfv 5232176667Sjfv/* 5233176667Sjfv * Enable PCI Wake On Lan capability 5234176667Sjfv */ 5235206001Smariusstatic void 5236176667Sjfvigb_enable_wakeup(device_t dev) 5237176667Sjfv{ 5238176667Sjfv u16 cap, status; 5239176667Sjfv u8 id; 5240176667Sjfv 5241176667Sjfv /* First find the capabilities pointer*/ 5242176667Sjfv cap = pci_read_config(dev, PCIR_CAP_PTR, 2); 5243176667Sjfv /* Read the PM Capabilities */ 5244176667Sjfv id = pci_read_config(dev, cap, 1); 5245176667Sjfv if (id != PCIY_PMG) /* Something wrong */ 5246176667Sjfv return; 5247176667Sjfv /* OK, we have the power capabilities, so 5248176667Sjfv now get the status register */ 5249176667Sjfv cap += PCIR_POWER_STATUS; 5250176667Sjfv status = pci_read_config(dev, cap, 2); 5251176667Sjfv status |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE; 5252176667Sjfv pci_write_config(dev, cap, status, 2); 5253176667Sjfv return; 5254176667Sjfv} 5255176667Sjfv 5256206001Smariusstatic void 5257206001Smariusigb_led_func(void *arg, int onoff) 5258206001Smarius{ 5259206001Smarius struct adapter *adapter = arg; 5260176667Sjfv 5261206001Smarius IGB_CORE_LOCK(adapter); 5262206001Smarius if (onoff) { 5263206001Smarius e1000_setup_led(&adapter->hw); 5264206001Smarius e1000_led_on(&adapter->hw); 5265206001Smarius } else { 5266206001Smarius e1000_led_off(&adapter->hw); 5267206001Smarius e1000_cleanup_led(&adapter->hw); 5268206001Smarius } 5269206001Smarius IGB_CORE_UNLOCK(adapter); 5270206001Smarius} 5271206001Smarius 5272176667Sjfv/********************************************************************** 5273176667Sjfv * 5274176667Sjfv * Update the board statistics counters. 5275176667Sjfv * 5276176667Sjfv **********************************************************************/ 5277176667Sjfvstatic void 5278176667Sjfvigb_update_stats_counters(struct adapter *adapter) 5279176667Sjfv{ 5280209611Sjfv struct ifnet *ifp; 5281209611Sjfv struct e1000_hw *hw = &adapter->hw; 5282209611Sjfv struct e1000_hw_stats *stats; 5283176667Sjfv 5284209611Sjfv /* 5285209611Sjfv ** The virtual function adapter has only a 5286209611Sjfv ** small controlled set of stats, do only 5287209611Sjfv ** those and return. 5288209611Sjfv */ 5289218530Sjfv if (adapter->vf_ifp) { 5290209611Sjfv igb_update_vf_stats_counters(adapter); 5291209611Sjfv return; 5292176667Sjfv } 5293176667Sjfv 5294209611Sjfv stats = (struct e1000_hw_stats *)adapter->stats; 5295176667Sjfv 5296209611Sjfv if(adapter->hw.phy.media_type == e1000_media_type_copper || 5297209611Sjfv (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) { 5298209611Sjfv stats->symerrs += 5299209611Sjfv E1000_READ_REG(hw,E1000_SYMERRS); 5300209611Sjfv stats->sec += E1000_READ_REG(hw, E1000_SEC); 5301209611Sjfv } 5302209611Sjfv 5303209611Sjfv stats->crcerrs += E1000_READ_REG(hw, E1000_CRCERRS); 5304209611Sjfv stats->mpc += E1000_READ_REG(hw, E1000_MPC); 5305209611Sjfv stats->scc += E1000_READ_REG(hw, E1000_SCC); 5306209611Sjfv stats->ecol += E1000_READ_REG(hw, E1000_ECOL); 5307209611Sjfv 5308209611Sjfv stats->mcc += E1000_READ_REG(hw, E1000_MCC); 5309209611Sjfv stats->latecol += E1000_READ_REG(hw, E1000_LATECOL); 5310209611Sjfv stats->colc += E1000_READ_REG(hw, E1000_COLC); 5311209611Sjfv stats->dc += E1000_READ_REG(hw, E1000_DC); 5312209611Sjfv stats->rlec += E1000_READ_REG(hw, E1000_RLEC); 5313209611Sjfv stats->xonrxc += E1000_READ_REG(hw, E1000_XONRXC); 5314209611Sjfv stats->xontxc += E1000_READ_REG(hw, E1000_XONTXC); 5315213234Sjfv /* 5316213234Sjfv ** For watchdog management we need to know if we have been 5317213234Sjfv ** paused during the last interval, so capture that here. 5318213234Sjfv */ 5319213234Sjfv adapter->pause_frames = E1000_READ_REG(&adapter->hw, E1000_XOFFRXC); 5320213234Sjfv stats->xoffrxc += adapter->pause_frames; 5321209611Sjfv stats->xofftxc += E1000_READ_REG(hw, E1000_XOFFTXC); 5322209611Sjfv stats->fcruc += E1000_READ_REG(hw, E1000_FCRUC); 5323209611Sjfv stats->prc64 += E1000_READ_REG(hw, E1000_PRC64); 5324209611Sjfv stats->prc127 += E1000_READ_REG(hw, E1000_PRC127); 5325209611Sjfv stats->prc255 += E1000_READ_REG(hw, E1000_PRC255); 5326209611Sjfv stats->prc511 += E1000_READ_REG(hw, E1000_PRC511); 5327209611Sjfv stats->prc1023 += E1000_READ_REG(hw, E1000_PRC1023); 5328209611Sjfv stats->prc1522 += E1000_READ_REG(hw, E1000_PRC1522); 5329209611Sjfv stats->gprc += E1000_READ_REG(hw, E1000_GPRC); 5330209611Sjfv stats->bprc += E1000_READ_REG(hw, E1000_BPRC); 5331209611Sjfv stats->mprc += E1000_READ_REG(hw, E1000_MPRC); 5332209611Sjfv stats->gptc += E1000_READ_REG(hw, E1000_GPTC); 5333209611Sjfv 5334176667Sjfv /* For the 64-bit byte counters the low dword must be read first. */ 5335176667Sjfv /* Both registers clear on the read of the high dword */ 5336176667Sjfv 5337209611Sjfv stats->gorc += E1000_READ_REG(hw, E1000_GORCL) + 5338212902Sjhb ((u64)E1000_READ_REG(hw, E1000_GORCH) << 32); 5339209611Sjfv stats->gotc += E1000_READ_REG(hw, E1000_GOTCL) + 5340212902Sjhb ((u64)E1000_READ_REG(hw, E1000_GOTCH) << 32); 5341176667Sjfv 5342209611Sjfv stats->rnbc += E1000_READ_REG(hw, E1000_RNBC); 5343209611Sjfv stats->ruc += E1000_READ_REG(hw, E1000_RUC); 5344209611Sjfv stats->rfc += E1000_READ_REG(hw, E1000_RFC); 5345209611Sjfv stats->roc += E1000_READ_REG(hw, E1000_ROC); 5346209611Sjfv stats->rjc += E1000_READ_REG(hw, E1000_RJC); 5347176667Sjfv 5348209611Sjfv stats->tor += E1000_READ_REG(hw, E1000_TORH); 5349209611Sjfv stats->tot += E1000_READ_REG(hw, E1000_TOTH); 5350176667Sjfv 5351209611Sjfv stats->tpr += E1000_READ_REG(hw, E1000_TPR); 5352209611Sjfv stats->tpt += E1000_READ_REG(hw, E1000_TPT); 5353209611Sjfv stats->ptc64 += E1000_READ_REG(hw, E1000_PTC64); 5354209611Sjfv stats->ptc127 += E1000_READ_REG(hw, E1000_PTC127); 5355209611Sjfv stats->ptc255 += E1000_READ_REG(hw, E1000_PTC255); 5356209611Sjfv stats->ptc511 += E1000_READ_REG(hw, E1000_PTC511); 5357209611Sjfv stats->ptc1023 += E1000_READ_REG(hw, E1000_PTC1023); 5358209611Sjfv stats->ptc1522 += E1000_READ_REG(hw, E1000_PTC1522); 5359209611Sjfv stats->mptc += E1000_READ_REG(hw, E1000_MPTC); 5360209611Sjfv stats->bptc += E1000_READ_REG(hw, E1000_BPTC); 5361176667Sjfv 5362209241Sgnn /* Interrupt Counts */ 5363209241Sgnn 5364209611Sjfv stats->iac += E1000_READ_REG(hw, E1000_IAC); 5365209611Sjfv stats->icrxptc += E1000_READ_REG(hw, E1000_ICRXPTC); 5366209611Sjfv stats->icrxatc += E1000_READ_REG(hw, E1000_ICRXATC); 5367209611Sjfv stats->ictxptc += E1000_READ_REG(hw, E1000_ICTXPTC); 5368209611Sjfv stats->ictxatc += E1000_READ_REG(hw, E1000_ICTXATC); 5369209611Sjfv stats->ictxqec += E1000_READ_REG(hw, E1000_ICTXQEC); 5370209611Sjfv stats->ictxqmtc += E1000_READ_REG(hw, E1000_ICTXQMTC); 5371209611Sjfv stats->icrxdmtc += E1000_READ_REG(hw, E1000_ICRXDMTC); 5372209611Sjfv stats->icrxoc += E1000_READ_REG(hw, E1000_ICRXOC); 5373209241Sgnn 5374209241Sgnn /* Host to Card Statistics */ 5375209241Sgnn 5376209611Sjfv stats->cbtmpc += E1000_READ_REG(hw, E1000_CBTMPC); 5377209611Sjfv stats->htdpmc += E1000_READ_REG(hw, E1000_HTDPMC); 5378209611Sjfv stats->cbrdpc += E1000_READ_REG(hw, E1000_CBRDPC); 5379209611Sjfv stats->cbrmpc += E1000_READ_REG(hw, E1000_CBRMPC); 5380209611Sjfv stats->rpthc += E1000_READ_REG(hw, E1000_RPTHC); 5381209611Sjfv stats->hgptc += E1000_READ_REG(hw, E1000_HGPTC); 5382209611Sjfv stats->htcbdpc += E1000_READ_REG(hw, E1000_HTCBDPC); 5383209611Sjfv stats->hgorc += (E1000_READ_REG(hw, E1000_HGORCL) + 5384209611Sjfv ((u64)E1000_READ_REG(hw, E1000_HGORCH) << 32)); 5385209611Sjfv stats->hgotc += (E1000_READ_REG(hw, E1000_HGOTCL) + 5386209611Sjfv ((u64)E1000_READ_REG(hw, E1000_HGOTCH) << 32)); 5387209611Sjfv stats->lenerrs += E1000_READ_REG(hw, E1000_LENERRS); 5388209611Sjfv stats->scvpc += E1000_READ_REG(hw, E1000_SCVPC); 5389209611Sjfv stats->hrmpc += E1000_READ_REG(hw, E1000_HRMPC); 5390209241Sgnn 5391209611Sjfv stats->algnerrc += E1000_READ_REG(hw, E1000_ALGNERRC); 5392209611Sjfv stats->rxerrc += E1000_READ_REG(hw, E1000_RXERRC); 5393209611Sjfv stats->tncrs += E1000_READ_REG(hw, E1000_TNCRS); 5394209611Sjfv stats->cexterr += E1000_READ_REG(hw, E1000_CEXTERR); 5395209611Sjfv stats->tsctc += E1000_READ_REG(hw, E1000_TSCTC); 5396209611Sjfv stats->tsctfc += E1000_READ_REG(hw, E1000_TSCTFC); 5397209241Sgnn 5398176667Sjfv ifp = adapter->ifp; 5399209611Sjfv ifp->if_collisions = stats->colc; 5400176667Sjfv 5401176667Sjfv /* Rx Errors */ 5402209611Sjfv ifp->if_ierrors = adapter->dropped_pkts + stats->rxerrc + 5403209611Sjfv stats->crcerrs + stats->algnerrc + 5404209611Sjfv stats->ruc + stats->roc + stats->mpc + stats->cexterr; 5405176667Sjfv 5406176667Sjfv /* Tx Errors */ 5407209611Sjfv ifp->if_oerrors = stats->ecol + 5408209611Sjfv stats->latecol + adapter->watchdog_events; 5409209512Sgnn 5410209512Sgnn /* Driver specific counters */ 5411209611Sjfv adapter->device_control = E1000_READ_REG(hw, E1000_CTRL); 5412209611Sjfv adapter->rx_control = E1000_READ_REG(hw, E1000_RCTL); 5413209611Sjfv adapter->int_mask = E1000_READ_REG(hw, E1000_IMS); 5414209611Sjfv adapter->eint_mask = E1000_READ_REG(hw, E1000_EIMS); 5415209611Sjfv adapter->packet_buf_alloc_tx = 5416209611Sjfv ((E1000_READ_REG(hw, E1000_PBA) & 0xffff0000) >> 16); 5417209611Sjfv adapter->packet_buf_alloc_rx = 5418209616Sjfv (E1000_READ_REG(hw, E1000_PBA) & 0xffff); 5419209611Sjfv} 5420209512Sgnn 5421209512Sgnn 5422209611Sjfv/********************************************************************** 5423209611Sjfv * 5424209611Sjfv * Initialize the VF board statistics counters. 5425209611Sjfv * 5426209611Sjfv **********************************************************************/ 5427209611Sjfvstatic void 5428209611Sjfvigb_vf_init_stats(struct adapter *adapter) 5429209611Sjfv{ 5430209611Sjfv struct e1000_hw *hw = &adapter->hw; 5431209611Sjfv struct e1000_vf_stats *stats; 5432209611Sjfv 5433209611Sjfv stats = (struct e1000_vf_stats *)adapter->stats; 5434211906Syongari if (stats == NULL) 5435211906Syongari return; 5436209611Sjfv stats->last_gprc = E1000_READ_REG(hw, E1000_VFGPRC); 5437209611Sjfv stats->last_gorc = E1000_READ_REG(hw, E1000_VFGORC); 5438209611Sjfv stats->last_gptc = E1000_READ_REG(hw, E1000_VFGPTC); 5439209611Sjfv stats->last_gotc = E1000_READ_REG(hw, E1000_VFGOTC); 5440209611Sjfv stats->last_mprc = E1000_READ_REG(hw, E1000_VFMPRC); 5441176667Sjfv} 5442209611Sjfv 5443209611Sjfv/********************************************************************** 5444209611Sjfv * 5445209611Sjfv * Update the VF board statistics counters. 5446209611Sjfv * 5447209611Sjfv **********************************************************************/ 5448209611Sjfvstatic void 5449209611Sjfvigb_update_vf_stats_counters(struct adapter *adapter) 5450209611Sjfv{ 5451209611Sjfv struct e1000_hw *hw = &adapter->hw; 5452209611Sjfv struct e1000_vf_stats *stats; 5453176667Sjfv 5454209611Sjfv if (adapter->link_speed == 0) 5455209611Sjfv return; 5456176667Sjfv 5457209611Sjfv stats = (struct e1000_vf_stats *)adapter->stats; 5458209611Sjfv 5459209611Sjfv UPDATE_VF_REG(E1000_VFGPRC, 5460209611Sjfv stats->last_gprc, stats->gprc); 5461209611Sjfv UPDATE_VF_REG(E1000_VFGORC, 5462209611Sjfv stats->last_gorc, stats->gorc); 5463209611Sjfv UPDATE_VF_REG(E1000_VFGPTC, 5464209611Sjfv stats->last_gptc, stats->gptc); 5465209611Sjfv UPDATE_VF_REG(E1000_VFGOTC, 5466209611Sjfv stats->last_gotc, stats->gotc); 5467209611Sjfv UPDATE_VF_REG(E1000_VFMPRC, 5468209611Sjfv stats->last_mprc, stats->mprc); 5469209611Sjfv} 5470209611Sjfv 5471212902Sjhb/* Export a single 32-bit register via a read-only sysctl. */ 5472212902Sjhbstatic int 5473212902Sjhbigb_sysctl_reg_handler(SYSCTL_HANDLER_ARGS) 5474210428Sgnn{ 5475212902Sjhb struct adapter *adapter; 5476212902Sjhb u_int val; 5477210428Sgnn 5478212902Sjhb adapter = oidp->oid_arg1; 5479212902Sjhb val = E1000_READ_REG(&adapter->hw, oidp->oid_arg2); 5480212902Sjhb return (sysctl_handle_int(oidp, &val, 0, req)); 5481210428Sgnn} 5482210428Sgnn 5483209241Sgnn/* 5484215781Sjfv** Tuneable interrupt rate handler 5485215781Sjfv*/ 5486215781Sjfvstatic int 5487215781Sjfvigb_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS) 5488215781Sjfv{ 5489215781Sjfv struct igb_queue *que = ((struct igb_queue *)oidp->oid_arg1); 5490215781Sjfv int error; 5491215781Sjfv u32 reg, usec, rate; 5492215781Sjfv 5493215781Sjfv reg = E1000_READ_REG(&que->adapter->hw, E1000_EITR(que->msix)); 5494215781Sjfv usec = ((reg & 0x7FFC) >> 2); 5495215781Sjfv if (usec > 0) 5496215781Sjfv rate = 1000000 / usec; 5497215781Sjfv else 5498215781Sjfv rate = 0; 5499215781Sjfv error = sysctl_handle_int(oidp, &rate, 0, req); 5500215781Sjfv if (error || !req->newptr) 5501215781Sjfv return error; 5502215781Sjfv return 0; 5503215781Sjfv} 5504215781Sjfv 5505215781Sjfv/* 5506209241Sgnn * Add sysctl variables, one per statistic, to the system. 5507209241Sgnn */ 5508176667Sjfvstatic void 5509209241Sgnnigb_add_hw_stats(struct adapter *adapter) 5510176667Sjfv{ 5511176667Sjfv device_t dev = adapter->dev; 5512209241Sgnn 5513209241Sgnn struct tx_ring *txr = adapter->tx_rings; 5514176667Sjfv struct rx_ring *rxr = adapter->rx_rings; 5515176667Sjfv 5516209241Sgnn struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev); 5517209241Sgnn struct sysctl_oid *tree = device_get_sysctl_tree(dev); 5518209241Sgnn struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree); 5519209616Sjfv struct e1000_hw_stats *stats = adapter->stats; 5520178523Sjfv 5521209241Sgnn struct sysctl_oid *stat_node, *queue_node, *int_node, *host_node; 5522209241Sgnn struct sysctl_oid_list *stat_list, *queue_list, *int_list, *host_list; 5523178523Sjfv 5524209241Sgnn#define QUEUE_NAME_LEN 32 5525209241Sgnn char namebuf[QUEUE_NAME_LEN]; 5526176667Sjfv 5527209241Sgnn /* Driver Statistics */ 5528219753Sjfv SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "link_irq", 5529209241Sgnn CTLFLAG_RD, &adapter->link_irq, 0, 5530209241Sgnn "Link MSIX IRQ Handled"); 5531209241Sgnn SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped", 5532209241Sgnn CTLFLAG_RD, &adapter->dropped_pkts, 5533209241Sgnn "Driver dropped packets"); 5534209241Sgnn SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "tx_dma_fail", 5535209241Sgnn CTLFLAG_RD, &adapter->no_tx_dma_setup, 5536209241Sgnn "Driver tx dma failure in xmit"); 5537212902Sjhb SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "rx_overruns", 5538212902Sjhb CTLFLAG_RD, &adapter->rx_overruns, 5539212902Sjhb "RX overruns"); 5540212902Sjhb SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_timeouts", 5541212902Sjhb CTLFLAG_RD, &adapter->watchdog_events, 5542212902Sjhb "Watchdog timeouts"); 5543209241Sgnn 5544209241Sgnn SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "device_control", 5545209241Sgnn CTLFLAG_RD, &adapter->device_control, 5546209241Sgnn "Device Control Register"); 5547209241Sgnn SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "rx_control", 5548209241Sgnn CTLFLAG_RD, &adapter->rx_control, 5549209241Sgnn "Receiver Control Register"); 5550209241Sgnn SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "interrupt_mask", 5551209241Sgnn CTLFLAG_RD, &adapter->int_mask, 5552209241Sgnn "Interrupt Mask"); 5553209241Sgnn SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "extended_int_mask", 5554209241Sgnn CTLFLAG_RD, &adapter->eint_mask, 5555209241Sgnn "Extended Interrupt Mask"); 5556209241Sgnn SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "tx_buf_alloc", 5557209241Sgnn CTLFLAG_RD, &adapter->packet_buf_alloc_tx, 5558209241Sgnn "Transmit Buffer Packet Allocation"); 5559209241Sgnn SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "rx_buf_alloc", 5560209241Sgnn CTLFLAG_RD, &adapter->packet_buf_alloc_rx, 5561209241Sgnn "Receive Buffer Packet Allocation"); 5562209241Sgnn SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "fc_high_water", 5563209241Sgnn CTLFLAG_RD, &adapter->hw.fc.high_water, 0, 5564209241Sgnn "Flow Control High Watermark"); 5565209241Sgnn SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "fc_low_water", 5566209241Sgnn CTLFLAG_RD, &adapter->hw.fc.low_water, 0, 5567209241Sgnn "Flow Control Low Watermark"); 5568209241Sgnn 5569212902Sjhb for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) { 5570212902Sjhb struct lro_ctrl *lro = &rxr->lro; 5571212902Sjhb 5572209241Sgnn snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i); 5573209241Sgnn queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, 5574209241Sgnn CTLFLAG_RD, NULL, "Queue Name"); 5575209241Sgnn queue_list = SYSCTL_CHILDREN(queue_node); 5576209241Sgnn 5577215781Sjfv SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "interrupt_rate", 5578219753Sjfv CTLFLAG_RD, &adapter->queues[i], 5579215781Sjfv sizeof(&adapter->queues[i]), 5580215781Sjfv igb_sysctl_interrupt_rate_handler, 5581215781Sjfv "IU", "Interrupt Rate"); 5582215781Sjfv 5583210428Sgnn SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_head", 5584219753Sjfv CTLFLAG_RD, adapter, E1000_TDH(txr->me), 5585212902Sjhb igb_sysctl_reg_handler, "IU", 5586210428Sgnn "Transmit Descriptor Head"); 5587210428Sgnn SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_tail", 5588219753Sjfv CTLFLAG_RD, adapter, E1000_TDT(txr->me), 5589212902Sjhb igb_sysctl_reg_handler, "IU", 5590210428Sgnn "Transmit Descriptor Tail"); 5591219753Sjfv SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "no_desc_avail", 5592209241Sgnn CTLFLAG_RD, &txr->no_desc_avail, 5593209241Sgnn "Queue No Descriptor Available"); 5594219753Sjfv SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "tx_packets", 5595209241Sgnn CTLFLAG_RD, &txr->tx_packets, 5596209241Sgnn "Queue Packets Transmitted"); 5597176667Sjfv 5598210428Sgnn SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_head", 5599219753Sjfv CTLFLAG_RD, adapter, E1000_RDH(rxr->me), 5600212902Sjhb igb_sysctl_reg_handler, "IU", 5601209241Sgnn "Receive Descriptor Head"); 5602210428Sgnn SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_tail", 5603219753Sjfv CTLFLAG_RD, adapter, E1000_RDT(rxr->me), 5604212902Sjhb igb_sysctl_reg_handler, "IU", 5605209241Sgnn "Receive Descriptor Tail"); 5606219753Sjfv SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "rx_packets", 5607209241Sgnn CTLFLAG_RD, &rxr->rx_packets, 5608209241Sgnn "Queue Packets Received"); 5609219753Sjfv SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "rx_bytes", 5610209241Sgnn CTLFLAG_RD, &rxr->rx_bytes, 5611209241Sgnn "Queue Bytes Received"); 5612219753Sjfv SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "lro_queued", 5613209241Sgnn CTLFLAG_RD, &lro->lro_queued, 0, 5614209241Sgnn "LRO Queued"); 5615219753Sjfv SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "lro_flushed", 5616209241Sgnn CTLFLAG_RD, &lro->lro_flushed, 0, 5617209241Sgnn "LRO Flushed"); 5618176667Sjfv } 5619181027Sjfv 5620212902Sjhb /* MAC stats get their own sub node */ 5621203049Sjfv 5622209241Sgnn stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats", 5623209241Sgnn CTLFLAG_RD, NULL, "MAC Statistics"); 5624209241Sgnn stat_list = SYSCTL_CHILDREN(stat_node); 5625176667Sjfv 5626209611Sjfv /* 5627209611Sjfv ** VF adapter has a very limited set of stats 5628209611Sjfv ** since its not managing the metal, so to speak. 5629209611Sjfv */ 5630218530Sjfv if (adapter->vf_ifp) { 5631219753Sjfv SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "good_pkts_recvd", 5632209616Sjfv CTLFLAG_RD, &stats->gprc, 5633209611Sjfv "Good Packets Received"); 5634219753Sjfv SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd", 5635209616Sjfv CTLFLAG_RD, &stats->gptc, 5636209611Sjfv "Good Packets Transmitted"); 5637219753Sjfv SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "good_octets_recvd", 5638209616Sjfv CTLFLAG_RD, &stats->gorc, 5639209611Sjfv "Good Octets Received"); 5640219753Sjfv SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "good_octets_txd", 5641209616Sjfv CTLFLAG_RD, &stats->gotc, 5642212902Sjhb "Good Octets Transmitted"); 5643219753Sjfv SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_recvd", 5644209616Sjfv CTLFLAG_RD, &stats->mprc, 5645209611Sjfv "Multicast Packets Received"); 5646209611Sjfv return; 5647209611Sjfv } 5648209611Sjfv 5649219753Sjfv SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "excess_coll", 5650209241Sgnn CTLFLAG_RD, &stats->ecol, 5651209241Sgnn "Excessive collisions"); 5652219753Sjfv SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "single_coll", 5653209241Sgnn CTLFLAG_RD, &stats->scc, 5654209241Sgnn "Single collisions"); 5655219753Sjfv SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "multiple_coll", 5656209241Sgnn CTLFLAG_RD, &stats->mcc, 5657209241Sgnn "Multiple collisions"); 5658219753Sjfv SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "late_coll", 5659209241Sgnn CTLFLAG_RD, &stats->latecol, 5660209241Sgnn "Late collisions"); 5661219753Sjfv SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "collision_count", 5662209241Sgnn CTLFLAG_RD, &stats->colc, 5663209241Sgnn "Collision Count"); 5664219753Sjfv SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "symbol_errors", 5665209616Sjfv CTLFLAG_RD, &stats->symerrs, 5666209241Sgnn "Symbol Errors"); 5667219753Sjfv SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "sequence_errors", 5668209616Sjfv CTLFLAG_RD, &stats->sec, 5669209241Sgnn "Sequence Errors"); 5670219753Sjfv SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "defer_count", 5671209616Sjfv CTLFLAG_RD, &stats->dc, 5672209241Sgnn "Defer Count"); 5673219753Sjfv SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "missed_packets", 5674209616Sjfv CTLFLAG_RD, &stats->mpc, 5675209241Sgnn "Missed Packets"); 5676219753Sjfv SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "recv_no_buff", 5677209616Sjfv CTLFLAG_RD, &stats->rnbc, 5678209241Sgnn "Receive No Buffers"); 5679219753Sjfv SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "recv_undersize", 5680209616Sjfv CTLFLAG_RD, &stats->ruc, 5681209241Sgnn "Receive Undersize"); 5682219753Sjfv SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "recv_fragmented", 5683209616Sjfv CTLFLAG_RD, &stats->rfc, 5684209241Sgnn "Fragmented Packets Received "); 5685219753Sjfv SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "recv_oversize", 5686209616Sjfv CTLFLAG_RD, &stats->roc, 5687209241Sgnn "Oversized Packets Received"); 5688219753Sjfv SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "recv_jabber", 5689209616Sjfv CTLFLAG_RD, &stats->rjc, 5690209241Sgnn "Recevied Jabber"); 5691219753Sjfv SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "recv_errs", 5692209616Sjfv CTLFLAG_RD, &stats->rxerrc, 5693209241Sgnn "Receive Errors"); 5694219753Sjfv SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "crc_errs", 5695209616Sjfv CTLFLAG_RD, &stats->crcerrs, 5696209241Sgnn "CRC errors"); 5697219753Sjfv SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "alignment_errs", 5698209616Sjfv CTLFLAG_RD, &stats->algnerrc, 5699209241Sgnn "Alignment Errors"); 5700176667Sjfv /* On 82575 these are collision counts */ 5701219753Sjfv SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "coll_ext_errs", 5702209616Sjfv CTLFLAG_RD, &stats->cexterr, 5703209241Sgnn "Collision/Carrier extension errors"); 5704219753Sjfv SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "xon_recvd", 5705209616Sjfv CTLFLAG_RD, &stats->xonrxc, 5706209241Sgnn "XON Received"); 5707219753Sjfv SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "xon_txd", 5708209616Sjfv CTLFLAG_RD, &stats->xontxc, 5709209241Sgnn "XON Transmitted"); 5710219753Sjfv SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "xoff_recvd", 5711209616Sjfv CTLFLAG_RD, &stats->xoffrxc, 5712209241Sgnn "XOFF Received"); 5713219753Sjfv SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "xoff_txd", 5714209616Sjfv CTLFLAG_RD, &stats->xofftxc, 5715209241Sgnn "XOFF Transmitted"); 5716209241Sgnn /* Packet Reception Stats */ 5717219753Sjfv SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "total_pkts_recvd", 5718209616Sjfv CTLFLAG_RD, &stats->tpr, 5719209241Sgnn "Total Packets Received "); 5720219753Sjfv SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "good_pkts_recvd", 5721209616Sjfv CTLFLAG_RD, &stats->gprc, 5722209241Sgnn "Good Packets Received"); 5723219753Sjfv SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_recvd", 5724209616Sjfv CTLFLAG_RD, &stats->bprc, 5725209241Sgnn "Broadcast Packets Received"); 5726219753Sjfv SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_recvd", 5727209616Sjfv CTLFLAG_RD, &stats->mprc, 5728209241Sgnn "Multicast Packets Received"); 5729219753Sjfv SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "rx_frames_64", 5730209616Sjfv CTLFLAG_RD, &stats->prc64, 5731209241Sgnn "64 byte frames received "); 5732219753Sjfv SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127", 5733209616Sjfv CTLFLAG_RD, &stats->prc127, 5734209241Sgnn "65-127 byte frames received"); 5735219753Sjfv SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255", 5736209616Sjfv CTLFLAG_RD, &stats->prc255, 5737209241Sgnn "128-255 byte frames received"); 5738219753Sjfv SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511", 5739209616Sjfv CTLFLAG_RD, &stats->prc511, 5740209241Sgnn "256-511 byte frames received"); 5741219753Sjfv SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023", 5742209616Sjfv CTLFLAG_RD, &stats->prc1023, 5743209241Sgnn "512-1023 byte frames received"); 5744219753Sjfv SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522", 5745209616Sjfv CTLFLAG_RD, &stats->prc1522, 5746209241Sgnn "1023-1522 byte frames received"); 5747219753Sjfv SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "good_octets_recvd", 5748209616Sjfv CTLFLAG_RD, &stats->gorc, 5749209241Sgnn "Good Octets Received"); 5750209241Sgnn 5751209241Sgnn /* Packet Transmission Stats */ 5752219753Sjfv SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "good_octets_txd", 5753209616Sjfv CTLFLAG_RD, &stats->gotc, 5754212902Sjhb "Good Octets Transmitted"); 5755219753Sjfv SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd", 5756209616Sjfv CTLFLAG_RD, &stats->tpt, 5757209241Sgnn "Total Packets Transmitted"); 5758219753Sjfv SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd", 5759209616Sjfv CTLFLAG_RD, &stats->gptc, 5760209241Sgnn "Good Packets Transmitted"); 5761219753Sjfv SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd", 5762209616Sjfv CTLFLAG_RD, &stats->bptc, 5763209241Sgnn "Broadcast Packets Transmitted"); 5764219753Sjfv SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd", 5765209616Sjfv CTLFLAG_RD, &stats->mptc, 5766209241Sgnn "Multicast Packets Transmitted"); 5767219753Sjfv SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "tx_frames_64", 5768209616Sjfv CTLFLAG_RD, &stats->ptc64, 5769209241Sgnn "64 byte frames transmitted "); 5770219753Sjfv SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127", 5771209616Sjfv CTLFLAG_RD, &stats->ptc127, 5772209241Sgnn "65-127 byte frames transmitted"); 5773219753Sjfv SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255", 5774209616Sjfv CTLFLAG_RD, &stats->ptc255, 5775209241Sgnn "128-255 byte frames transmitted"); 5776219753Sjfv SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511", 5777209616Sjfv CTLFLAG_RD, &stats->ptc511, 5778209241Sgnn "256-511 byte frames transmitted"); 5779219753Sjfv SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023", 5780209616Sjfv CTLFLAG_RD, &stats->ptc1023, 5781209241Sgnn "512-1023 byte frames transmitted"); 5782219753Sjfv SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522", 5783209616Sjfv CTLFLAG_RD, &stats->ptc1522, 5784209241Sgnn "1024-1522 byte frames transmitted"); 5785219753Sjfv SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "tso_txd", 5786209616Sjfv CTLFLAG_RD, &stats->tsctc, 5787209241Sgnn "TSO Contexts Transmitted"); 5788219753Sjfv SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "tso_ctx_fail", 5789209616Sjfv CTLFLAG_RD, &stats->tsctfc, 5790209241Sgnn "TSO Contexts Failed"); 5791209241Sgnn 5792209241Sgnn 5793209241Sgnn /* Interrupt Stats */ 5794209241Sgnn 5795209241Sgnn int_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "interrupts", 5796209241Sgnn CTLFLAG_RD, NULL, "Interrupt Statistics"); 5797209241Sgnn int_list = SYSCTL_CHILDREN(int_node); 5798209241Sgnn 5799219753Sjfv SYSCTL_ADD_QUAD(ctx, int_list, OID_AUTO, "asserts", 5800209616Sjfv CTLFLAG_RD, &stats->iac, 5801209241Sgnn "Interrupt Assertion Count"); 5802209241Sgnn 5803219753Sjfv SYSCTL_ADD_QUAD(ctx, int_list, OID_AUTO, "rx_pkt_timer", 5804209616Sjfv CTLFLAG_RD, &stats->icrxptc, 5805209241Sgnn "Interrupt Cause Rx Pkt Timer Expire Count"); 5806209241Sgnn 5807219753Sjfv SYSCTL_ADD_QUAD(ctx, int_list, OID_AUTO, "rx_abs_timer", 5808209616Sjfv CTLFLAG_RD, &stats->icrxatc, 5809209241Sgnn "Interrupt Cause Rx Abs Timer Expire Count"); 5810209241Sgnn 5811219753Sjfv SYSCTL_ADD_QUAD(ctx, int_list, OID_AUTO, "tx_pkt_timer", 5812209616Sjfv CTLFLAG_RD, &stats->ictxptc, 5813209241Sgnn "Interrupt Cause Tx Pkt Timer Expire Count"); 5814209241Sgnn 5815219753Sjfv SYSCTL_ADD_QUAD(ctx, int_list, OID_AUTO, "tx_abs_timer", 5816209616Sjfv CTLFLAG_RD, &stats->ictxatc, 5817209241Sgnn "Interrupt Cause Tx Abs Timer Expire Count"); 5818209241Sgnn 5819219753Sjfv SYSCTL_ADD_QUAD(ctx, int_list, OID_AUTO, "tx_queue_empty", 5820209616Sjfv CTLFLAG_RD, &stats->ictxqec, 5821209241Sgnn "Interrupt Cause Tx Queue Empty Count"); 5822209241Sgnn 5823219753Sjfv SYSCTL_ADD_QUAD(ctx, int_list, OID_AUTO, "tx_queue_min_thresh", 5824209616Sjfv CTLFLAG_RD, &stats->ictxqmtc, 5825209241Sgnn "Interrupt Cause Tx Queue Min Thresh Count"); 5826209241Sgnn 5827219753Sjfv SYSCTL_ADD_QUAD(ctx, int_list, OID_AUTO, "rx_desc_min_thresh", 5828209616Sjfv CTLFLAG_RD, &stats->icrxdmtc, 5829209241Sgnn "Interrupt Cause Rx Desc Min Thresh Count"); 5830209241Sgnn 5831219753Sjfv SYSCTL_ADD_QUAD(ctx, int_list, OID_AUTO, "rx_overrun", 5832209616Sjfv CTLFLAG_RD, &stats->icrxoc, 5833209241Sgnn "Interrupt Cause Receiver Overrun Count"); 5834209241Sgnn 5835209241Sgnn /* Host to Card Stats */ 5836209241Sgnn 5837209241Sgnn host_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "host", 5838209241Sgnn CTLFLAG_RD, NULL, 5839209241Sgnn "Host to Card Statistics"); 5840209241Sgnn 5841209241Sgnn host_list = SYSCTL_CHILDREN(host_node); 5842209241Sgnn 5843219753Sjfv SYSCTL_ADD_QUAD(ctx, host_list, OID_AUTO, "breaker_tx_pkt", 5844209616Sjfv CTLFLAG_RD, &stats->cbtmpc, 5845209241Sgnn "Circuit Breaker Tx Packet Count"); 5846209241Sgnn 5847219753Sjfv SYSCTL_ADD_QUAD(ctx, host_list, OID_AUTO, "host_tx_pkt_discard", 5848209616Sjfv CTLFLAG_RD, &stats->htdpmc, 5849209241Sgnn "Host Transmit Discarded Packets"); 5850209241Sgnn 5851219753Sjfv SYSCTL_ADD_QUAD(ctx, host_list, OID_AUTO, "rx_pkt", 5852209616Sjfv CTLFLAG_RD, &stats->rpthc, 5853209241Sgnn "Rx Packets To Host"); 5854209241Sgnn 5855219753Sjfv SYSCTL_ADD_QUAD(ctx, host_list, OID_AUTO, "breaker_rx_pkts", 5856209616Sjfv CTLFLAG_RD, &stats->cbrmpc, 5857209241Sgnn "Circuit Breaker Rx Packet Count"); 5858209241Sgnn 5859219753Sjfv SYSCTL_ADD_QUAD(ctx, host_list, OID_AUTO, "breaker_rx_pkt_drop", 5860209616Sjfv CTLFLAG_RD, &stats->cbrdpc, 5861209241Sgnn "Circuit Breaker Rx Dropped Count"); 5862209241Sgnn 5863219753Sjfv SYSCTL_ADD_QUAD(ctx, host_list, OID_AUTO, "tx_good_pkt", 5864209616Sjfv CTLFLAG_RD, &stats->hgptc, 5865209241Sgnn "Host Good Packets Tx Count"); 5866209241Sgnn 5867219753Sjfv SYSCTL_ADD_QUAD(ctx, host_list, OID_AUTO, "breaker_tx_pkt_drop", 5868209616Sjfv CTLFLAG_RD, &stats->htcbdpc, 5869209241Sgnn "Host Tx Circuit Breaker Dropped Count"); 5870209241Sgnn 5871219753Sjfv SYSCTL_ADD_QUAD(ctx, host_list, OID_AUTO, "rx_good_bytes", 5872209616Sjfv CTLFLAG_RD, &stats->hgorc, 5873209241Sgnn "Host Good Octets Received Count"); 5874209241Sgnn 5875219753Sjfv SYSCTL_ADD_QUAD(ctx, host_list, OID_AUTO, "tx_good_bytes", 5876209616Sjfv CTLFLAG_RD, &stats->hgotc, 5877209241Sgnn "Host Good Octets Transmit Count"); 5878209241Sgnn 5879219753Sjfv SYSCTL_ADD_QUAD(ctx, host_list, OID_AUTO, "length_errors", 5880209616Sjfv CTLFLAG_RD, &stats->lenerrs, 5881209241Sgnn "Length Errors"); 5882209241Sgnn 5883219753Sjfv SYSCTL_ADD_QUAD(ctx, host_list, OID_AUTO, "serdes_violation_pkt", 5884209616Sjfv CTLFLAG_RD, &stats->scvpc, 5885209241Sgnn "SerDes/SGMII Code Violation Pkt Count"); 5886209241Sgnn 5887219753Sjfv SYSCTL_ADD_QUAD(ctx, host_list, OID_AUTO, "header_redir_missed", 5888209616Sjfv CTLFLAG_RD, &stats->hrmpc, 5889209241Sgnn "Header Redirection Missed Packet Count"); 5890209611Sjfv} 5891209241Sgnn 5892209241Sgnn 5893176667Sjfv/********************************************************************** 5894176667Sjfv * 5895176667Sjfv * This routine provides a way to dump out the adapter eeprom, 5896176667Sjfv * often a useful debug/service tool. This only dumps the first 5897176667Sjfv * 32 words, stuff that matters is in that extent. 5898176667Sjfv * 5899176667Sjfv **********************************************************************/ 5900176667Sjfvstatic int 5901209241Sgnnigb_sysctl_nvm_info(SYSCTL_HANDLER_ARGS) 5902176667Sjfv{ 5903176667Sjfv struct adapter *adapter; 5904176667Sjfv int error; 5905176667Sjfv int result; 5906176667Sjfv 5907176667Sjfv result = -1; 5908176667Sjfv error = sysctl_handle_int(oidp, &result, 0, req); 5909176667Sjfv 5910176667Sjfv if (error || !req->newptr) 5911176667Sjfv return (error); 5912176667Sjfv 5913176667Sjfv /* 5914176667Sjfv * This value will cause a hex dump of the 5915176667Sjfv * first 32 16-bit words of the EEPROM to 5916176667Sjfv * the screen. 5917176667Sjfv */ 5918209241Sgnn if (result == 1) { 5919176667Sjfv adapter = (struct adapter *)arg1; 5920176667Sjfv igb_print_nvm_info(adapter); 5921176667Sjfv } 5922176667Sjfv 5923176667Sjfv return (error); 5924176667Sjfv} 5925176667Sjfv 5926209241Sgnnstatic void 5927209241Sgnnigb_print_nvm_info(struct adapter *adapter) 5928176667Sjfv{ 5929209241Sgnn u16 eeprom_data; 5930209241Sgnn int i, j, row = 0; 5931176667Sjfv 5932209241Sgnn /* Its a bit crude, but it gets the job done */ 5933209241Sgnn printf("\nInterface EEPROM Dump:\n"); 5934209241Sgnn printf("Offset\n0x0000 "); 5935209241Sgnn for (i = 0, j = 0; i < 32; i++, j++) { 5936209241Sgnn if (j == 8) { /* Make the offset block */ 5937209241Sgnn j = 0; ++row; 5938209241Sgnn printf("\n0x00%x0 ",row); 5939209241Sgnn } 5940209241Sgnn e1000_read_nvm(&adapter->hw, i, 1, &eeprom_data); 5941209241Sgnn printf("%04x ", eeprom_data); 5942176667Sjfv } 5943209241Sgnn printf("\n"); 5944176667Sjfv} 5945176667Sjfv 5946176667Sjfvstatic void 5947219753Sjfvigb_set_sysctl_value(struct adapter *adapter, const char *name, 5948176667Sjfv const char *description, int *limit, int value) 5949176667Sjfv{ 5950176667Sjfv *limit = value; 5951176667Sjfv SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev), 5952176667Sjfv SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)), 5953176667Sjfv OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW, limit, value, description); 5954176667Sjfv} 5955219753Sjfv 5956220375Sjfv/* 5957220375Sjfv** Set flow control using sysctl: 5958220375Sjfv** Flow control values: 5959220375Sjfv** 0 - off 5960220375Sjfv** 1 - rx pause 5961220375Sjfv** 2 - tx pause 5962220375Sjfv** 3 - full 5963220375Sjfv*/ 5964220375Sjfvstatic int 5965220375Sjfvigb_set_flowcntl(SYSCTL_HANDLER_ARGS) 5966220375Sjfv{ 5967235527Sjfv int error; 5968235527Sjfv static int input = 3; /* default is full */ 5969235527Sjfv struct adapter *adapter = (struct adapter *) arg1; 5970220375Sjfv 5971235527Sjfv error = sysctl_handle_int(oidp, &input, 0, req); 5972220375Sjfv 5973223350Sjfv if ((error) || (req->newptr == NULL)) 5974220375Sjfv return (error); 5975220375Sjfv 5976235527Sjfv switch (input) { 5977220375Sjfv case e1000_fc_rx_pause: 5978220375Sjfv case e1000_fc_tx_pause: 5979220375Sjfv case e1000_fc_full: 5980235527Sjfv case e1000_fc_none: 5981235527Sjfv adapter->hw.fc.requested_mode = input; 5982235527Sjfv adapter->fc = input; 5983220375Sjfv break; 5984220375Sjfv default: 5985235527Sjfv /* Do nothing */ 5986235527Sjfv return (error); 5987220375Sjfv } 5988220375Sjfv 5989220375Sjfv adapter->hw.fc.current_mode = adapter->hw.fc.requested_mode; 5990220375Sjfv e1000_force_mac_fc(&adapter->hw); 5991223350Sjfv return (error); 5992220375Sjfv} 5993223350Sjfv 5994223350Sjfv/* 5995223350Sjfv** Manage DMA Coalesce: 5996223350Sjfv** Control values: 5997223350Sjfv** 0/1 - off/on 5998223350Sjfv** Legal timer values are: 5999223350Sjfv** 250,500,1000-10000 in thousands 6000223350Sjfv*/ 6001223350Sjfvstatic int 6002223350Sjfvigb_sysctl_dmac(SYSCTL_HANDLER_ARGS) 6003223350Sjfv{ 6004223350Sjfv struct adapter *adapter = (struct adapter *) arg1; 6005223350Sjfv int error; 6006223350Sjfv 6007223350Sjfv error = sysctl_handle_int(oidp, &adapter->dmac, 0, req); 6008223350Sjfv 6009223350Sjfv if ((error) || (req->newptr == NULL)) 6010223350Sjfv return (error); 6011223350Sjfv 6012223350Sjfv switch (adapter->dmac) { 6013223350Sjfv case 0: 6014223350Sjfv /*Disabling */ 6015223350Sjfv break; 6016223350Sjfv case 1: /* Just enable and use default */ 6017223350Sjfv adapter->dmac = 1000; 6018223350Sjfv break; 6019223350Sjfv case 250: 6020223350Sjfv case 500: 6021223350Sjfv case 1000: 6022223350Sjfv case 2000: 6023223350Sjfv case 3000: 6024223350Sjfv case 4000: 6025223350Sjfv case 5000: 6026223350Sjfv case 6000: 6027223350Sjfv case 7000: 6028223350Sjfv case 8000: 6029223350Sjfv case 9000: 6030223350Sjfv case 10000: 6031223350Sjfv /* Legal values - allow */ 6032223350Sjfv break; 6033223350Sjfv default: 6034223350Sjfv /* Do nothing, illegal value */ 6035223350Sjfv adapter->dmac = 0; 6036223350Sjfv return (error); 6037223350Sjfv } 6038223350Sjfv /* Reinit the interface */ 6039223350Sjfv igb_init(adapter); 6040223350Sjfv return (error); 6041223350Sjfv} 6042238262Sjfv 6043238262Sjfv/* 6044238262Sjfv** Manage Energy Efficient Ethernet: 6045238262Sjfv** Control values: 6046238262Sjfv** 0/1 - enabled/disabled 6047238262Sjfv*/ 6048238262Sjfvstatic int 6049238262Sjfvigb_sysctl_eee(SYSCTL_HANDLER_ARGS) 6050238262Sjfv{ 6051238262Sjfv struct adapter *adapter = (struct adapter *) arg1; 6052238262Sjfv int error, value; 6053238262Sjfv 6054238262Sjfv value = adapter->hw.dev_spec._82575.eee_disable; 6055238262Sjfv error = sysctl_handle_int(oidp, &value, 0, req); 6056238262Sjfv if (error || req->newptr == NULL) 6057238262Sjfv return (error); 6058238262Sjfv IGB_CORE_LOCK(adapter); 6059238262Sjfv adapter->hw.dev_spec._82575.eee_disable = (value != 0); 6060238262Sjfv igb_init_locked(adapter); 6061238262Sjfv IGB_CORE_UNLOCK(adapter); 6062238262Sjfv return (0); 6063238262Sjfv} 6064