if_em.c revision 161267
1/************************************************************************** 2 3Copyright (c) 2001-2006, Intel Corporation 4All rights reserved. 5 6Redistribution and use in source and binary forms, with or without 7modification, are permitted provided that the following conditions are met: 8 9 1. Redistributions of source code must retain the above copyright notice, 10 this list of conditions and the following disclaimer. 11 12 2. Redistributions in binary form must reproduce the above copyright 13 notice, this list of conditions and the following disclaimer in the 14 documentation and/or other materials provided with the distribution. 15 16 3. Neither the name of the Intel Corporation nor the names of its 17 contributors may be used to endorse or promote products derived from 18 this software without specific prior written permission. 19 20THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 24LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30POSSIBILITY OF SUCH DAMAGE. 31 32***************************************************************************/ 33 34/*$FreeBSD: head/sys/dev/em/if_em.c 161267 2006-08-14 02:21:26Z yongari $*/ 35 36#ifdef HAVE_KERNEL_OPTION_HEADERS 37#include "opt_device_polling.h" 38#endif 39 40#include <sys/param.h> 41#include <sys/systm.h> 42#include <sys/bus.h> 43#include <sys/endian.h> 44#include <sys/kernel.h> 45#include <sys/kthread.h> 46#include <sys/malloc.h> 47#include <sys/mbuf.h> 48#include <sys/module.h> 49#include <sys/rman.h> 50#include <sys/socket.h> 51#include <sys/sockio.h> 52#include <sys/sysctl.h> 53#include <sys/taskqueue.h> 54 55#include <machine/bus.h> 56#include <machine/resource.h> 57 58#include <net/bpf.h> 59#include <net/ethernet.h> 60#include <net/if.h> 61#include <net/if_arp.h> 62#include <net/if_dl.h> 63#include <net/if_media.h> 64 65#include <net/if_types.h> 66#include <net/if_vlan_var.h> 67 68#include <netinet/in_systm.h> 69#include <netinet/in.h> 70#include <netinet/if_ether.h> 71#include <netinet/ip.h> 72#include <netinet/tcp.h> 73#include <netinet/udp.h> 74 75#include <dev/pci/pcivar.h> 76#include <dev/pci/pcireg.h> 77#include <dev/em/if_em_hw.h> 78#include <dev/em/if_em.h> 79 80/********************************************************************* 81 * Set this to one to display debug statistics 82 *********************************************************************/ 83int em_display_debug_stats = 0; 84 85/********************************************************************* 86 * Driver version 87 *********************************************************************/ 88 89char em_driver_version[] = "Version - 6.1.4"; 90 91 92/********************************************************************* 93 * PCI Device ID Table 94 * 95 * Used by probe to select devices to load on 96 * Last field stores an index into em_strings 97 * Last entry must be all 0s 98 * 99 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index } 100 *********************************************************************/ 101 102static em_vendor_info_t em_vendor_info_array[] = 103{ 104 /* Intel(R) PRO/1000 Network Connection */ 105 { 0x8086, E1000_DEV_ID_82540EM, PCI_ANY_ID, PCI_ANY_ID, 0}, 106 { 0x8086, E1000_DEV_ID_82540EM_LOM, PCI_ANY_ID, PCI_ANY_ID, 0}, 107 { 0x8086, E1000_DEV_ID_82540EP, PCI_ANY_ID, PCI_ANY_ID, 0}, 108 { 0x8086, E1000_DEV_ID_82540EP_LOM, PCI_ANY_ID, PCI_ANY_ID, 0}, 109 { 0x8086, E1000_DEV_ID_82540EP_LP, PCI_ANY_ID, PCI_ANY_ID, 0}, 110 111 { 0x8086, E1000_DEV_ID_82541EI, PCI_ANY_ID, PCI_ANY_ID, 0}, 112 { 0x8086, E1000_DEV_ID_82541ER, PCI_ANY_ID, PCI_ANY_ID, 0}, 113 { 0x8086, E1000_DEV_ID_82541ER_LOM, PCI_ANY_ID, PCI_ANY_ID, 0}, 114 { 0x8086, E1000_DEV_ID_82541EI_MOBILE, PCI_ANY_ID, PCI_ANY_ID, 0}, 115 { 0x8086, E1000_DEV_ID_82541GI, PCI_ANY_ID, PCI_ANY_ID, 0}, 116 { 0x8086, E1000_DEV_ID_82541GI_LF, PCI_ANY_ID, PCI_ANY_ID, 0}, 117 { 0x8086, E1000_DEV_ID_82541GI_MOBILE, PCI_ANY_ID, PCI_ANY_ID, 0}, 118 119 { 0x8086, E1000_DEV_ID_82542, PCI_ANY_ID, PCI_ANY_ID, 0}, 120 121 { 0x8086, E1000_DEV_ID_82543GC_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0}, 122 { 0x8086, E1000_DEV_ID_82543GC_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0}, 123 124 { 0x8086, E1000_DEV_ID_82544EI_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0}, 125 { 0x8086, E1000_DEV_ID_82544EI_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0}, 126 { 0x8086, E1000_DEV_ID_82544GC_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0}, 127 { 0x8086, E1000_DEV_ID_82544GC_LOM, PCI_ANY_ID, PCI_ANY_ID, 0}, 128 129 { 0x8086, E1000_DEV_ID_82545EM_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0}, 130 { 0x8086, E1000_DEV_ID_82545EM_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0}, 131 { 0x8086, E1000_DEV_ID_82545GM_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0}, 132 { 0x8086, E1000_DEV_ID_82545GM_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0}, 133 { 0x8086, E1000_DEV_ID_82545GM_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0}, 134 135 { 0x8086, E1000_DEV_ID_82546EB_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0}, 136 { 0x8086, E1000_DEV_ID_82546EB_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0}, 137 { 0x8086, E1000_DEV_ID_82546EB_QUAD_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0}, 138 { 0x8086, E1000_DEV_ID_82546GB_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0}, 139 { 0x8086, E1000_DEV_ID_82546GB_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0}, 140 { 0x8086, E1000_DEV_ID_82546GB_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0}, 141 { 0x8086, E1000_DEV_ID_82546GB_PCIE, PCI_ANY_ID, PCI_ANY_ID, 0}, 142 { 0x8086, E1000_DEV_ID_82546GB_QUAD_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0}, 143 { 0x8086, E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3, 144 PCI_ANY_ID, PCI_ANY_ID, 0}, 145 146 { 0x8086, E1000_DEV_ID_82547EI, PCI_ANY_ID, PCI_ANY_ID, 0}, 147 { 0x8086, E1000_DEV_ID_82547EI_MOBILE, PCI_ANY_ID, PCI_ANY_ID, 0}, 148 { 0x8086, E1000_DEV_ID_82547GI, PCI_ANY_ID, PCI_ANY_ID, 0}, 149 150 { 0x8086, E1000_DEV_ID_82571EB_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0}, 151 { 0x8086, E1000_DEV_ID_82571EB_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0}, 152 { 0x8086, E1000_DEV_ID_82571EB_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0}, 153 { 0x8086, E1000_DEV_ID_82571EB_QUAD_COPPER, 154 PCI_ANY_ID, PCI_ANY_ID, 0}, 155 156 { 0x8086, E1000_DEV_ID_82572EI_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0}, 157 { 0x8086, E1000_DEV_ID_82572EI_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0}, 158 { 0x8086, E1000_DEV_ID_82572EI_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0}, 159 { 0x8086, E1000_DEV_ID_82572EI, PCI_ANY_ID, PCI_ANY_ID, 0}, 160 161 { 0x8086, E1000_DEV_ID_82573E, PCI_ANY_ID, PCI_ANY_ID, 0}, 162 { 0x8086, E1000_DEV_ID_82573E_IAMT, PCI_ANY_ID, PCI_ANY_ID, 0}, 163 { 0x8086, E1000_DEV_ID_82573L, PCI_ANY_ID, PCI_ANY_ID, 0}, 164 { 0x8086, E1000_DEV_ID_80003ES2LAN_COPPER_SPT, 165 PCI_ANY_ID, PCI_ANY_ID, 0}, 166 { 0x8086, E1000_DEV_ID_80003ES2LAN_SERDES_SPT, 167 PCI_ANY_ID, PCI_ANY_ID, 0}, 168 { 0x8086, E1000_DEV_ID_80003ES2LAN_COPPER_DPT, 169 PCI_ANY_ID, PCI_ANY_ID, 0}, 170 { 0x8086, E1000_DEV_ID_80003ES2LAN_SERDES_DPT, 171 PCI_ANY_ID, PCI_ANY_ID, 0}, 172 { 0x8086, E1000_DEV_ID_ICH8_IGP_AMT, PCI_ANY_ID, PCI_ANY_ID, 0}, 173 { 0x8086, E1000_DEV_ID_ICH8_IGP_C, PCI_ANY_ID, PCI_ANY_ID, 0}, 174 { 0x8086, E1000_DEV_ID_ICH8_IFE, PCI_ANY_ID, PCI_ANY_ID, 0}, 175 176 /* required last entry */ 177 { 0, 0, 0, 0, 0} 178}; 179 180/********************************************************************* 181 * Table of branding strings for all supported NICs. 182 *********************************************************************/ 183 184static char *em_strings[] = { 185 "Intel(R) PRO/1000 Network Connection" 186}; 187 188/********************************************************************* 189 * Function prototypes 190 *********************************************************************/ 191static int em_probe(device_t); 192static int em_attach(device_t); 193static int em_detach(device_t); 194static int em_shutdown(device_t); 195static int em_suspend(device_t); 196static int em_resume(device_t); 197static void em_start(struct ifnet *); 198static void em_start_locked(struct ifnet *ifp); 199static int em_ioctl(struct ifnet *, u_long, caddr_t); 200static void em_watchdog(struct ifnet *); 201static void em_init(void *); 202static void em_init_locked(struct adapter *); 203static void em_stop(void *); 204static void em_media_status(struct ifnet *, struct ifmediareq *); 205static int em_media_change(struct ifnet *); 206static void em_identify_hardware(struct adapter *); 207static int em_allocate_pci_resources(struct adapter *); 208static int em_allocate_intr(struct adapter *); 209static void em_free_intr(struct adapter *); 210static void em_free_pci_resources(struct adapter *); 211static void em_local_timer(void *); 212static int em_hardware_init(struct adapter *); 213static void em_setup_interface(device_t, struct adapter *); 214static int em_setup_transmit_structures(struct adapter *); 215static void em_initialize_transmit_unit(struct adapter *); 216static int em_setup_receive_structures(struct adapter *); 217static void em_initialize_receive_unit(struct adapter *); 218static void em_enable_intr(struct adapter *); 219static void em_disable_intr(struct adapter *); 220static void em_free_transmit_structures(struct adapter *); 221static void em_free_receive_structures(struct adapter *); 222static void em_update_stats_counters(struct adapter *); 223static void em_txeof(struct adapter *); 224static int em_allocate_receive_structures(struct adapter *); 225static int em_allocate_transmit_structures(struct adapter *); 226static int em_rxeof(struct adapter *, int); 227#ifndef __NO_STRICT_ALIGNMENT 228static int em_fixup_rx(struct adapter *); 229#endif 230static void em_receive_checksum(struct adapter *, struct em_rx_desc *, 231 struct mbuf *); 232static void em_transmit_checksum_setup(struct adapter *, struct mbuf *, 233 uint32_t *, uint32_t *); 234static void em_set_promisc(struct adapter *); 235static void em_disable_promisc(struct adapter *); 236static void em_set_multi(struct adapter *); 237static void em_print_hw_stats(struct adapter *); 238static void em_update_link_status(struct adapter *); 239static int em_get_buf(struct adapter *, int); 240static void em_enable_vlans(struct adapter *); 241static void em_disable_vlans(struct adapter *); 242static int em_encap(struct adapter *, struct mbuf **); 243static void em_smartspeed(struct adapter *); 244static int em_82547_fifo_workaround(struct adapter *, int); 245static void em_82547_update_fifo_head(struct adapter *, int); 246static int em_82547_tx_fifo_reset(struct adapter *); 247static void em_82547_move_tail(void *arg); 248static void em_82547_move_tail_locked(struct adapter *); 249static int em_dma_malloc(struct adapter *, bus_size_t, 250 struct em_dma_alloc *, int); 251static void em_dma_free(struct adapter *, struct em_dma_alloc *); 252static void em_print_debug_info(struct adapter *); 253static int em_is_valid_ether_addr(uint8_t *); 254static int em_sysctl_stats(SYSCTL_HANDLER_ARGS); 255static int em_sysctl_debug_info(SYSCTL_HANDLER_ARGS); 256static uint32_t em_fill_descriptors (bus_addr_t address, uint32_t length, 257 PDESC_ARRAY desc_array); 258static int em_sysctl_int_delay(SYSCTL_HANDLER_ARGS); 259static void em_add_int_delay_sysctl(struct adapter *, const char *, 260 const char *, struct em_int_delay_info *, int, int); 261 262/* 263 * Fast interrupt handler and legacy ithread/polling modes are 264 * mutually exclusive. 265 */ 266#ifdef DEVICE_POLLING 267static poll_handler_t em_poll; 268static void em_intr(void *); 269#else 270static void em_intr_fast(void *); 271static void em_add_int_process_limit(struct adapter *, const char *, 272 const char *, int *, int); 273static void em_handle_rxtx(void *context, int pending); 274static void em_handle_link(void *context, int pending); 275#endif 276 277/********************************************************************* 278 * FreeBSD Device Interface Entry Points 279 *********************************************************************/ 280 281static device_method_t em_methods[] = { 282 /* Device interface */ 283 DEVMETHOD(device_probe, em_probe), 284 DEVMETHOD(device_attach, em_attach), 285 DEVMETHOD(device_detach, em_detach), 286 DEVMETHOD(device_shutdown, em_shutdown), 287 DEVMETHOD(device_suspend, em_suspend), 288 DEVMETHOD(device_resume, em_resume), 289 {0, 0} 290}; 291 292static driver_t em_driver = { 293 "em", em_methods, sizeof(struct adapter), 294}; 295 296static devclass_t em_devclass; 297DRIVER_MODULE(em, pci, em_driver, em_devclass, 0, 0); 298MODULE_DEPEND(em, pci, 1, 1, 1); 299MODULE_DEPEND(em, ether, 1, 1, 1); 300 301/********************************************************************* 302 * Tunable default values. 303 *********************************************************************/ 304 305#define E1000_TICKS_TO_USECS(ticks) ((1024 * (ticks) + 500) / 1000) 306#define E1000_USECS_TO_TICKS(usecs) ((1000 * (usecs) + 512) / 1024) 307 308static int em_tx_int_delay_dflt = E1000_TICKS_TO_USECS(EM_TIDV); 309static int em_rx_int_delay_dflt = E1000_TICKS_TO_USECS(EM_RDTR); 310static int em_tx_abs_int_delay_dflt = E1000_TICKS_TO_USECS(EM_TADV); 311static int em_rx_abs_int_delay_dflt = E1000_TICKS_TO_USECS(EM_RADV); 312static int em_rxd = EM_DEFAULT_RXD; 313static int em_txd = EM_DEFAULT_TXD; 314static int em_smart_pwr_down = FALSE; 315 316TUNABLE_INT("hw.em.tx_int_delay", &em_tx_int_delay_dflt); 317TUNABLE_INT("hw.em.rx_int_delay", &em_rx_int_delay_dflt); 318TUNABLE_INT("hw.em.tx_abs_int_delay", &em_tx_abs_int_delay_dflt); 319TUNABLE_INT("hw.em.rx_abs_int_delay", &em_rx_abs_int_delay_dflt); 320TUNABLE_INT("hw.em.rxd", &em_rxd); 321TUNABLE_INT("hw.em.txd", &em_txd); 322TUNABLE_INT("hw.em.smart_pwr_down", &em_smart_pwr_down); 323#ifndef DEVICE_POLLING 324static int em_rx_process_limit = 100; 325TUNABLE_INT("hw.em.rx_process_limit", &em_rx_process_limit); 326#endif 327 328/********************************************************************* 329 * Device identification routine 330 * 331 * em_probe determines if the driver should be loaded on 332 * adapter based on PCI vendor/device id of the adapter. 333 * 334 * return BUS_PROBE_DEFAULT on success, positive on failure 335 *********************************************************************/ 336 337static int 338em_probe(device_t dev) 339{ 340 char adapter_name[60]; 341 uint16_t pci_vendor_id = 0; 342 uint16_t pci_device_id = 0; 343 uint16_t pci_subvendor_id = 0; 344 uint16_t pci_subdevice_id = 0; 345 em_vendor_info_t *ent; 346 347 INIT_DEBUGOUT("em_probe: begin"); 348 349 pci_vendor_id = pci_get_vendor(dev); 350 if (pci_vendor_id != EM_VENDOR_ID) 351 return (ENXIO); 352 353 pci_device_id = pci_get_device(dev); 354 pci_subvendor_id = pci_get_subvendor(dev); 355 pci_subdevice_id = pci_get_subdevice(dev); 356 357 ent = em_vendor_info_array; 358 while (ent->vendor_id != 0) { 359 if ((pci_vendor_id == ent->vendor_id) && 360 (pci_device_id == ent->device_id) && 361 362 ((pci_subvendor_id == ent->subvendor_id) || 363 (ent->subvendor_id == PCI_ANY_ID)) && 364 365 ((pci_subdevice_id == ent->subdevice_id) || 366 (ent->subdevice_id == PCI_ANY_ID))) { 367 sprintf(adapter_name, "%s %s", 368 em_strings[ent->index], 369 em_driver_version); 370 device_set_desc_copy(dev, adapter_name); 371 return (BUS_PROBE_DEFAULT); 372 } 373 ent++; 374 } 375 376 return (ENXIO); 377} 378 379/********************************************************************* 380 * Device initialization routine 381 * 382 * The attach entry point is called when the driver is being loaded. 383 * This routine identifies the type of hardware, allocates all resources 384 * and initializes the hardware. 385 * 386 * return 0 on success, positive on failure 387 *********************************************************************/ 388 389static int 390em_attach(device_t dev) 391{ 392 struct adapter *adapter; 393 int tsize, rsize; 394 int error = 0; 395 396 INIT_DEBUGOUT("em_attach: begin"); 397 398 adapter = device_get_softc(dev); 399 adapter->dev = adapter->osdep.dev = dev; 400 EM_LOCK_INIT(adapter, device_get_nameunit(dev)); 401 402 /* SYSCTL stuff */ 403 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 404 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 405 OID_AUTO, "debug_info", CTLTYPE_INT|CTLFLAG_RW, adapter, 0, 406 em_sysctl_debug_info, "I", "Debug Information"); 407 408 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 409 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 410 OID_AUTO, "stats", CTLTYPE_INT|CTLFLAG_RW, adapter, 0, 411 em_sysctl_stats, "I", "Statistics"); 412 413 callout_init(&adapter->timer, CALLOUT_MPSAFE); 414 callout_init(&adapter->tx_fifo_timer, CALLOUT_MPSAFE); 415 416 /* Determine hardware revision */ 417 em_identify_hardware(adapter); 418 419 /* Set up some sysctls for the tunable interrupt delays */ 420 em_add_int_delay_sysctl(adapter, "rx_int_delay", 421 "receive interrupt delay in usecs", &adapter->rx_int_delay, 422 E1000_REG_OFFSET(&adapter->hw, RDTR), em_rx_int_delay_dflt); 423 em_add_int_delay_sysctl(adapter, "tx_int_delay", 424 "transmit interrupt delay in usecs", &adapter->tx_int_delay, 425 E1000_REG_OFFSET(&adapter->hw, TIDV), em_tx_int_delay_dflt); 426 if (adapter->hw.mac_type >= em_82540) { 427 em_add_int_delay_sysctl(adapter, "rx_abs_int_delay", 428 "receive interrupt delay limit in usecs", 429 &adapter->rx_abs_int_delay, 430 E1000_REG_OFFSET(&adapter->hw, RADV), 431 em_rx_abs_int_delay_dflt); 432 em_add_int_delay_sysctl(adapter, "tx_abs_int_delay", 433 "transmit interrupt delay limit in usecs", 434 &adapter->tx_abs_int_delay, 435 E1000_REG_OFFSET(&adapter->hw, TADV), 436 em_tx_abs_int_delay_dflt); 437 } 438 439#ifndef DEVICE_POLLING 440 /* Sysctls for limiting the amount of work done in the taskqueue */ 441 em_add_int_process_limit(adapter, "rx_processing_limit", 442 "max number of rx packets to process", &adapter->rx_process_limit, 443 em_rx_process_limit); 444#endif 445 446 /* 447 * Validate number of transmit and receive descriptors. It 448 * must not exceed hardware maximum, and must be multiple 449 * of EM_DBA_ALIGN. 450 */ 451 if (((em_txd * sizeof(struct em_tx_desc)) % EM_DBA_ALIGN) != 0 || 452 (adapter->hw.mac_type >= em_82544 && em_txd > EM_MAX_TXD) || 453 (adapter->hw.mac_type < em_82544 && em_txd > EM_MAX_TXD_82543) || 454 (em_txd < EM_MIN_TXD)) { 455 device_printf(dev, "Using %d TX descriptors instead of %d!\n", 456 EM_DEFAULT_TXD, em_txd); 457 adapter->num_tx_desc = EM_DEFAULT_TXD; 458 } else 459 adapter->num_tx_desc = em_txd; 460 if (((em_rxd * sizeof(struct em_rx_desc)) % EM_DBA_ALIGN) != 0 || 461 (adapter->hw.mac_type >= em_82544 && em_rxd > EM_MAX_RXD) || 462 (adapter->hw.mac_type < em_82544 && em_rxd > EM_MAX_RXD_82543) || 463 (em_rxd < EM_MIN_RXD)) { 464 device_printf(dev, "Using %d RX descriptors instead of %d!\n", 465 EM_DEFAULT_RXD, em_rxd); 466 adapter->num_rx_desc = EM_DEFAULT_RXD; 467 } else 468 adapter->num_rx_desc = em_rxd; 469 470 adapter->hw.autoneg = DO_AUTO_NEG; 471 adapter->hw.wait_autoneg_complete = WAIT_FOR_AUTO_NEG_DEFAULT; 472 adapter->hw.autoneg_advertised = AUTONEG_ADV_DEFAULT; 473 adapter->hw.tbi_compatibility_en = TRUE; 474 adapter->rx_buffer_len = EM_RXBUFFER_2048; 475 476 adapter->hw.phy_init_script = 1; 477 adapter->hw.phy_reset_disable = FALSE; 478 479#ifndef EM_MASTER_SLAVE 480 adapter->hw.master_slave = em_ms_hw_default; 481#else 482 adapter->hw.master_slave = EM_MASTER_SLAVE; 483#endif 484 /* 485 * Set the max frame size assuming standard ethernet 486 * sized frames. 487 */ 488 adapter->hw.max_frame_size = ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN; 489 490 adapter->hw.min_frame_size = MINIMUM_ETHERNET_PACKET_SIZE + ETHER_CRC_LEN; 491 492 /* 493 * This controls when hardware reports transmit completion 494 * status. 495 */ 496 adapter->hw.report_tx_early = 1; 497 if (em_allocate_pci_resources(adapter)) { 498 device_printf(dev, "Allocation of PCI resources failed\n"); 499 error = ENXIO; 500 goto err_pci; 501 } 502 503 /* Initialize eeprom parameters */ 504 em_init_eeprom_params(&adapter->hw); 505 506 tsize = roundup2(adapter->num_tx_desc * sizeof(struct em_tx_desc), 507 EM_DBA_ALIGN); 508 509 /* Allocate Transmit Descriptor ring */ 510 if (em_dma_malloc(adapter, tsize, &adapter->txdma, BUS_DMA_NOWAIT)) { 511 device_printf(dev, "Unable to allocate tx_desc memory\n"); 512 error = ENOMEM; 513 goto err_tx_desc; 514 } 515 adapter->tx_desc_base = (struct em_tx_desc *)adapter->txdma.dma_vaddr; 516 517 rsize = roundup2(adapter->num_rx_desc * sizeof(struct em_rx_desc), 518 EM_DBA_ALIGN); 519 520 /* Allocate Receive Descriptor ring */ 521 if (em_dma_malloc(adapter, rsize, &adapter->rxdma, BUS_DMA_NOWAIT)) { 522 device_printf(dev, "Unable to allocate rx_desc memory\n"); 523 error = ENOMEM; 524 goto err_rx_desc; 525 } 526 adapter->rx_desc_base = (struct em_rx_desc *)adapter->rxdma.dma_vaddr; 527 528 /* Initialize the hardware */ 529 if (em_hardware_init(adapter)) { 530 device_printf(dev, "Unable to initialize the hardware\n"); 531 error = EIO; 532 goto err_hw_init; 533 } 534 535 /* Copy the permanent MAC address out of the EEPROM */ 536 if (em_read_mac_addr(&adapter->hw) < 0) { 537 device_printf(dev, "EEPROM read error while reading MAC" 538 " address\n"); 539 error = EIO; 540 goto err_hw_init; 541 } 542 543 if (!em_is_valid_ether_addr(adapter->hw.mac_addr)) { 544 device_printf(dev, "Invalid MAC address\n"); 545 error = EIO; 546 goto err_hw_init; 547 } 548 549 /* Setup OS specific network interface */ 550 em_setup_interface(dev, adapter); 551 552 em_allocate_intr(adapter); 553 554 /* Initialize statistics */ 555 em_clear_hw_cntrs(&adapter->hw); 556 em_update_stats_counters(adapter); 557 adapter->hw.get_link_status = 1; 558 em_update_link_status(adapter); 559 560 /* Indicate SOL/IDER usage */ 561 if (em_check_phy_reset_block(&adapter->hw)) 562 device_printf(dev, 563 "PHY reset is blocked due to SOL/IDER session.\n"); 564 565 /* Identify 82544 on PCIX */ 566 em_get_bus_info(&adapter->hw); 567 if(adapter->hw.bus_type == em_bus_type_pcix && adapter->hw.mac_type == em_82544) 568 adapter->pcix_82544 = TRUE; 569 else 570 adapter->pcix_82544 = FALSE; 571 572 INIT_DEBUGOUT("em_attach: end"); 573 574 return (0); 575 576err_hw_init: 577 em_dma_free(adapter, &adapter->rxdma); 578err_rx_desc: 579 em_dma_free(adapter, &adapter->txdma); 580err_tx_desc: 581err_pci: 582 em_free_intr(adapter); 583 em_free_pci_resources(adapter); 584 EM_LOCK_DESTROY(adapter); 585 586 return (error); 587} 588 589/********************************************************************* 590 * Device removal routine 591 * 592 * The detach entry point is called when the driver is being removed. 593 * This routine stops the adapter and deallocates all the resources 594 * that were allocated for driver operation. 595 * 596 * return 0 on success, positive on failure 597 *********************************************************************/ 598 599static int 600em_detach(device_t dev) 601{ 602 struct adapter *adapter = device_get_softc(dev); 603 struct ifnet *ifp = adapter->ifp; 604 605 INIT_DEBUGOUT("em_detach: begin"); 606 607#ifdef DEVICE_POLLING 608 if (ifp->if_capenable & IFCAP_POLLING) 609 ether_poll_deregister(ifp); 610#endif 611 612 em_free_intr(adapter); 613 EM_LOCK(adapter); 614 adapter->in_detach = 1; 615 em_stop(adapter); 616 em_phy_hw_reset(&adapter->hw); 617 EM_UNLOCK(adapter); 618 ether_ifdetach(adapter->ifp); 619 620 em_free_pci_resources(adapter); 621 bus_generic_detach(dev); 622 if_free(ifp); 623 624 /* Free Transmit Descriptor ring */ 625 if (adapter->tx_desc_base) { 626 em_dma_free(adapter, &adapter->txdma); 627 adapter->tx_desc_base = NULL; 628 } 629 630 /* Free Receive Descriptor ring */ 631 if (adapter->rx_desc_base) { 632 em_dma_free(adapter, &adapter->rxdma); 633 adapter->rx_desc_base = NULL; 634 } 635 636 EM_LOCK_DESTROY(adapter); 637 638 return (0); 639} 640 641/********************************************************************* 642 * 643 * Shutdown entry point 644 * 645 **********************************************************************/ 646 647static int 648em_shutdown(device_t dev) 649{ 650 struct adapter *adapter = device_get_softc(dev); 651 EM_LOCK(adapter); 652 em_stop(adapter); 653 EM_UNLOCK(adapter); 654 return (0); 655} 656 657/* 658 * Suspend/resume device methods. 659 */ 660static int 661em_suspend(device_t dev) 662{ 663 struct adapter *adapter = device_get_softc(dev); 664 665 EM_LOCK(adapter); 666 em_stop(adapter); 667 EM_UNLOCK(adapter); 668 669 return bus_generic_suspend(dev); 670} 671 672static int 673em_resume(device_t dev) 674{ 675 struct adapter *adapter = device_get_softc(dev); 676 struct ifnet *ifp = adapter->ifp; 677 678 EM_LOCK(adapter); 679 em_init_locked(adapter); 680 if ((ifp->if_flags & IFF_UP) && 681 (ifp->if_drv_flags & IFF_DRV_RUNNING)) 682 em_start_locked(ifp); 683 EM_UNLOCK(adapter); 684 685 return bus_generic_resume(dev); 686} 687 688 689/********************************************************************* 690 * Transmit entry point 691 * 692 * em_start is called by the stack to initiate a transmit. 693 * The driver will remain in this routine as long as there are 694 * packets to transmit and transmit resources are available. 695 * In case resources are not available stack is notified and 696 * the packet is requeued. 697 **********************************************************************/ 698 699static void 700em_start_locked(struct ifnet *ifp) 701{ 702 struct adapter *adapter = ifp->if_softc; 703 struct mbuf *m_head; 704 705 EM_LOCK_ASSERT(adapter); 706 707 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) != 708 IFF_DRV_RUNNING) 709 return; 710 if (!adapter->link_active) 711 return; 712 713 while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) { 714 715 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); 716 if (m_head == NULL) 717 break; 718 /* 719 * em_encap() can modify our pointer, and or make it NULL on 720 * failure. In that event, we can't requeue. 721 */ 722 if (em_encap(adapter, &m_head)) { 723 if (m_head == NULL) 724 break; 725 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 726 IFQ_DRV_PREPEND(&ifp->if_snd, m_head); 727 break; 728 } 729 730 /* Send a copy of the frame to the BPF listener */ 731 BPF_MTAP(ifp, m_head); 732 733 /* Set timeout in case hardware has problems transmitting. */ 734 ifp->if_timer = EM_TX_TIMEOUT; 735 } 736} 737 738static void 739em_start(struct ifnet *ifp) 740{ 741 struct adapter *adapter = ifp->if_softc; 742 743 EM_LOCK(adapter); 744 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 745 em_start_locked(ifp); 746 EM_UNLOCK(adapter); 747} 748 749/********************************************************************* 750 * Ioctl entry point 751 * 752 * em_ioctl is called when the user wants to configure the 753 * interface. 754 * 755 * return 0 on success, positive on failure 756 **********************************************************************/ 757 758static int 759em_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 760{ 761 struct adapter *adapter = ifp->if_softc; 762 struct ifreq *ifr = (struct ifreq *)data; 763 struct ifaddr *ifa = (struct ifaddr *)data; 764 int error = 0; 765 766 if (adapter->in_detach) 767 return (error); 768 769 switch (command) { 770 case SIOCSIFADDR: 771 case SIOCGIFADDR: 772 if (ifa->ifa_addr->sa_family == AF_INET) { 773 /* 774 * XXX 775 * Since resetting hardware takes a very long time 776 * and results in link renegotiation we only 777 * initialize the hardware only when it is absolutely 778 * required. 779 */ 780 ifp->if_flags |= IFF_UP; 781 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { 782 EM_LOCK(adapter); 783 em_init_locked(adapter); 784 EM_UNLOCK(adapter); 785 } 786 arp_ifinit(ifp, ifa); 787 } else 788 error = ether_ioctl(ifp, command, data); 789 break; 790 case SIOCSIFMTU: 791 { 792 int max_frame_size; 793 uint16_t eeprom_data = 0; 794 795 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFMTU (Set Interface MTU)"); 796 797 EM_LOCK(adapter); 798 switch (adapter->hw.mac_type) { 799 case em_82573: 800 /* 801 * 82573 only supports jumbo frames 802 * if ASPM is disabled. 803 */ 804 em_read_eeprom(&adapter->hw, EEPROM_INIT_3GIO_3, 1, 805 &eeprom_data); 806 if (eeprom_data & EEPROM_WORD1A_ASPM_MASK) { 807 max_frame_size = ETHER_MAX_LEN; 808 break; 809 } 810 /* Allow Jumbo frames - fall thru */ 811 case em_82571: 812 case em_82572: 813 case em_80003es2lan: /* Limit Jumbo Frame size */ 814 max_frame_size = 9234; 815 break; 816 case em_ich8lan: 817 /* ICH8 does not support jumbo frames */ 818 max_frame_size = ETHER_MAX_LEN; 819 break; 820 default: 821 max_frame_size = MAX_JUMBO_FRAME_SIZE; 822 } 823 if (ifr->ifr_mtu > max_frame_size - ETHER_HDR_LEN - 824 ETHER_CRC_LEN) { 825 EM_UNLOCK(adapter); 826 error = EINVAL; 827 break; 828 } 829 830 ifp->if_mtu = ifr->ifr_mtu; 831 adapter->hw.max_frame_size = 832 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; 833 em_init_locked(adapter); 834 EM_UNLOCK(adapter); 835 break; 836 } 837 case SIOCSIFFLAGS: 838 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFFLAGS (Set Interface Flags)"); 839 EM_LOCK(adapter); 840 if (ifp->if_flags & IFF_UP) { 841 if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) { 842 if ((ifp->if_flags ^ adapter->if_flags) & 843 IFF_PROMISC) { 844 em_disable_promisc(adapter); 845 em_set_promisc(adapter); 846 } 847 } else 848 em_init_locked(adapter); 849 } else { 850 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 851 em_stop(adapter); 852 } 853 } 854 adapter->if_flags = ifp->if_flags; 855 EM_UNLOCK(adapter); 856 break; 857 case SIOCADDMULTI: 858 case SIOCDELMULTI: 859 IOCTL_DEBUGOUT("ioctl rcv'd: SIOC(ADD|DEL)MULTI"); 860 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 861 EM_LOCK(adapter); 862 em_disable_intr(adapter); 863 em_set_multi(adapter); 864 if (adapter->hw.mac_type == em_82542_rev2_0) { 865 em_initialize_receive_unit(adapter); 866 } 867#ifdef DEVICE_POLLING 868 if (!(ifp->if_capenable & IFCAP_POLLING)) 869#endif 870 em_enable_intr(adapter); 871 EM_UNLOCK(adapter); 872 } 873 break; 874 case SIOCSIFMEDIA: 875 case SIOCGIFMEDIA: 876 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCxIFMEDIA (Get/Set Interface Media)"); 877 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command); 878 break; 879 case SIOCSIFCAP: 880 { 881 int mask, reinit; 882 883 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFCAP (Set Capabilities)"); 884 reinit = 0; 885 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 886#ifdef DEVICE_POLLING 887 if (mask & IFCAP_POLLING) { 888 if (ifr->ifr_reqcap & IFCAP_POLLING) { 889 error = ether_poll_register(em_poll, ifp); 890 if (error) 891 return (error); 892 EM_LOCK(adapter); 893 em_disable_intr(adapter); 894 ifp->if_capenable |= IFCAP_POLLING; 895 EM_UNLOCK(adapter); 896 } else { 897 error = ether_poll_deregister(ifp); 898 /* Enable interrupt even in error case */ 899 EM_LOCK(adapter); 900 em_enable_intr(adapter); 901 ifp->if_capenable &= ~IFCAP_POLLING; 902 EM_UNLOCK(adapter); 903 } 904 } 905#endif 906 if (mask & IFCAP_HWCSUM) { 907 ifp->if_capenable ^= IFCAP_HWCSUM; 908 reinit = 1; 909 } 910 if (mask & IFCAP_VLAN_HWTAGGING) { 911 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 912 reinit = 1; 913 } 914 if (reinit && (ifp->if_drv_flags & IFF_DRV_RUNNING)) 915 em_init(adapter); 916 VLAN_CAPABILITIES(ifp); 917 break; 918 } 919 default: 920 error = ether_ioctl(ifp, command, data); 921 break; 922 } 923 924 return (error); 925} 926 927/********************************************************************* 928 * Watchdog entry point 929 * 930 * This routine is called whenever hardware quits transmitting. 931 * 932 **********************************************************************/ 933 934static void 935em_watchdog(struct ifnet *ifp) 936{ 937 struct adapter *adapter = ifp->if_softc; 938 939 EM_LOCK(adapter); 940 /* If we are in this routine because of pause frames, then 941 * don't reset the hardware. 942 */ 943 if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_TXOFF) { 944 ifp->if_timer = EM_TX_TIMEOUT; 945 EM_UNLOCK(adapter); 946 return; 947 } 948 949 if (em_check_for_link(&adapter->hw) == 0) 950 device_printf(adapter->dev, "watchdog timeout -- resetting\n"); 951 952 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 953 adapter->watchdog_events++; 954 955 em_init_locked(adapter); 956 EM_UNLOCK(adapter); 957} 958 959/********************************************************************* 960 * Init entry point 961 * 962 * This routine is used in two ways. It is used by the stack as 963 * init entry point in network interface structure. It is also used 964 * by the driver as a hw/sw initialization routine to get to a 965 * consistent state. 966 * 967 * return 0 on success, positive on failure 968 **********************************************************************/ 969 970static void 971em_init_locked(struct adapter *adapter) 972{ 973 struct ifnet *ifp = adapter->ifp; 974 device_t dev = adapter->dev; 975 uint32_t pba; 976 977 INIT_DEBUGOUT("em_init: begin"); 978 979 EM_LOCK_ASSERT(adapter); 980 981 em_stop(adapter); 982 983 /* 984 * Packet Buffer Allocation (PBA) 985 * Writing PBA sets the receive portion of the buffer 986 * the remainder is used for the transmit buffer. 987 * 988 * Devices before the 82547 had a Packet Buffer of 64K. 989 * Default allocation: PBA=48K for Rx, leaving 16K for Tx. 990 * After the 82547 the buffer was reduced to 40K. 991 * Default allocation: PBA=30K for Rx, leaving 10K for Tx. 992 * Note: default does not leave enough room for Jumbo Frame >10k. 993 */ 994 switch (adapter->hw.mac_type) { 995 case em_82547: 996 case em_82547_rev_2: /* 82547: Total Packet Buffer is 40K */ 997 if (adapter->hw.max_frame_size > EM_RXBUFFER_8192) 998 pba = E1000_PBA_22K; /* 22K for Rx, 18K for Tx */ 999 else 1000 pba = E1000_PBA_30K; /* 30K for Rx, 10K for Tx */ 1001 adapter->tx_fifo_head = 0; 1002 adapter->tx_head_addr = pba << EM_TX_HEAD_ADDR_SHIFT; 1003 adapter->tx_fifo_size = (E1000_PBA_40K - pba) << EM_PBA_BYTES_SHIFT; 1004 break; 1005 case em_80003es2lan: /* 80003es2lan: Total Packet Buffer is 48K */ 1006 case em_82571: /* 82571: Total Packet Buffer is 48K */ 1007 case em_82572: /* 82572: Total Packet Buffer is 48K */ 1008 pba = E1000_PBA_32K; /* 32K for Rx, 16K for Tx */ 1009 break; 1010 case em_82573: /* 82573: Total Packet Buffer is 32K */ 1011 /* Jumbo frames not supported */ 1012 pba = E1000_PBA_12K; /* 12K for Rx, 20K for Tx */ 1013 break; 1014 case em_ich8lan: 1015 pba = E1000_PBA_8K; 1016 break; 1017 default: 1018 /* Devices before 82547 had a Packet Buffer of 64K. */ 1019 if(adapter->hw.max_frame_size > EM_RXBUFFER_8192) 1020 pba = E1000_PBA_40K; /* 40K for Rx, 24K for Tx */ 1021 else 1022 pba = E1000_PBA_48K; /* 48K for Rx, 16K for Tx */ 1023 } 1024 1025 INIT_DEBUGOUT1("em_init: pba=%dK",pba); 1026 E1000_WRITE_REG(&adapter->hw, PBA, pba); 1027 1028 /* Get the latest mac address, User can use a LAA */ 1029 bcopy(IF_LLADDR(adapter->ifp), adapter->hw.mac_addr, ETHER_ADDR_LEN); 1030 1031 /* Initialize the hardware */ 1032 if (em_hardware_init(adapter)) { 1033 device_printf(dev, "Unable to initialize the hardware\n"); 1034 return; 1035 } 1036 em_update_link_status(adapter); 1037 1038 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) 1039 em_enable_vlans(adapter); 1040 1041 /* Prepare transmit descriptors and buffers */ 1042 if (em_setup_transmit_structures(adapter)) { 1043 device_printf(dev, "Could not setup transmit structures\n"); 1044 em_stop(adapter); 1045 return; 1046 } 1047 em_initialize_transmit_unit(adapter); 1048 1049 /* Setup Multicast table */ 1050 em_set_multi(adapter); 1051 1052 /* Prepare receive descriptors and buffers */ 1053 if (em_setup_receive_structures(adapter)) { 1054 device_printf(dev, "Could not setup receive structures\n"); 1055 em_stop(adapter); 1056 return; 1057 } 1058 em_initialize_receive_unit(adapter); 1059 1060 /* Don't lose promiscuous settings */ 1061 em_set_promisc(adapter); 1062 1063 ifp->if_drv_flags |= IFF_DRV_RUNNING; 1064 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1065 1066 if (adapter->hw.mac_type >= em_82543) { 1067 if (ifp->if_capenable & IFCAP_TXCSUM) 1068 ifp->if_hwassist = EM_CHECKSUM_FEATURES; 1069 else 1070 ifp->if_hwassist = 0; 1071 } 1072 1073 callout_reset(&adapter->timer, hz, em_local_timer, adapter); 1074 em_clear_hw_cntrs(&adapter->hw); 1075#ifdef DEVICE_POLLING 1076 /* 1077 * Only enable interrupts if we are not polling, make sure 1078 * they are off otherwise. 1079 */ 1080 if (ifp->if_capenable & IFCAP_POLLING) 1081 em_disable_intr(adapter); 1082 else 1083#endif /* DEVICE_POLLING */ 1084 em_enable_intr(adapter); 1085 1086 /* Don't reset the phy next time init gets called */ 1087 adapter->hw.phy_reset_disable = TRUE; 1088} 1089 1090static void 1091em_init(void *arg) 1092{ 1093 struct adapter *adapter = arg; 1094 1095 EM_LOCK(adapter); 1096 em_init_locked(adapter); 1097 EM_UNLOCK(adapter); 1098} 1099 1100 1101#ifdef DEVICE_POLLING 1102/********************************************************************* 1103 * 1104 * Legacy polling routine 1105 * 1106 *********************************************************************/ 1107static void 1108em_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) 1109{ 1110 struct adapter *adapter = ifp->if_softc; 1111 uint32_t reg_icr; 1112 1113 EM_LOCK(adapter); 1114 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 1115 EM_UNLOCK(adapter); 1116 return; 1117 } 1118 1119 if (cmd == POLL_AND_CHECK_STATUS) { 1120 reg_icr = E1000_READ_REG(&adapter->hw, ICR); 1121 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { 1122 callout_stop(&adapter->timer); 1123 adapter->hw.get_link_status = 1; 1124 em_check_for_link(&adapter->hw); 1125 em_update_link_status(adapter); 1126 callout_reset(&adapter->timer, hz, em_local_timer, adapter); 1127 } 1128 } 1129 em_rxeof(adapter, count); 1130 em_txeof(adapter); 1131 1132 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1133 em_start_locked(ifp); 1134 EM_UNLOCK(adapter); 1135} 1136 1137/********************************************************************* 1138 * 1139 * Legacy Interrupt Service routine 1140 * 1141 *********************************************************************/ 1142static void 1143em_intr(void *arg) 1144{ 1145 struct adapter *adapter = arg; 1146 struct ifnet *ifp; 1147 uint32_t reg_icr; 1148 1149 EM_LOCK(adapter); 1150 1151 ifp = adapter->ifp; 1152 1153 if (ifp->if_capenable & IFCAP_POLLING) { 1154 EM_UNLOCK(adapter); 1155 return; 1156 } 1157 1158 for (;;) { 1159 reg_icr = E1000_READ_REG(&adapter->hw, ICR); 1160 if (adapter->hw.mac_type >= em_82571 && 1161 (reg_icr & E1000_ICR_INT_ASSERTED) == 0) 1162 break; 1163 else if (reg_icr == 0) 1164 break; 1165 1166 /* 1167 * XXX: some laptops trigger several spurious interrupts 1168 * on em(4) when in the resume cycle. The ICR register 1169 * reports all-ones value in this case. Processing such 1170 * interrupts would lead to a freeze. I don't know why. 1171 */ 1172 if (reg_icr == 0xffffffff) 1173 break; 1174 1175 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1176 em_rxeof(adapter, -1); 1177 em_txeof(adapter); 1178 } 1179 1180 /* Link status change */ 1181 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { 1182 callout_stop(&adapter->timer); 1183 adapter->hw.get_link_status = 1; 1184 em_check_for_link(&adapter->hw); 1185 em_update_link_status(adapter); 1186 callout_reset(&adapter->timer, hz, em_local_timer, adapter); 1187 } 1188 1189 if (reg_icr & E1000_ICR_RXO) 1190 adapter->rx_overruns++; 1191 } 1192 1193 if (ifp->if_drv_flags & IFF_DRV_RUNNING && 1194 !IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1195 em_start_locked(ifp); 1196 1197 EM_UNLOCK(adapter); 1198} 1199 1200#else /* if not DEVICE_POLLING, then fast interrupt routines only */ 1201 1202static void 1203em_handle_link(void *context, int pending) 1204{ 1205 struct adapter *adapter = context; 1206 struct ifnet *ifp; 1207 1208 ifp = adapter->ifp; 1209 1210 EM_LOCK(adapter); 1211 1212 callout_stop(&adapter->timer); 1213 adapter->hw.get_link_status = 1; 1214 em_check_for_link(&adapter->hw); 1215 em_update_link_status(adapter); 1216 callout_reset(&adapter->timer, hz, em_local_timer, adapter); 1217 EM_UNLOCK(adapter); 1218} 1219 1220static void 1221em_handle_rxtx(void *context, int pending) 1222{ 1223 struct adapter *adapter = context; 1224 struct ifnet *ifp; 1225 1226 NET_LOCK_GIANT(); 1227 ifp = adapter->ifp; 1228 1229 /* 1230 * TODO: 1231 * It should be possible to run the tx clean loop without the lock. 1232 */ 1233 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1234 if (em_rxeof(adapter, adapter->rx_process_limit) != 0) 1235 taskqueue_enqueue(adapter->tq, &adapter->rxtx_task); 1236 EM_LOCK(adapter); 1237 em_txeof(adapter); 1238 1239 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1240 em_start_locked(ifp); 1241 EM_UNLOCK(adapter); 1242 } 1243 1244 em_enable_intr(adapter); 1245 NET_UNLOCK_GIANT(); 1246} 1247 1248/********************************************************************* 1249 * 1250 * Fast Interrupt Service routine 1251 * 1252 *********************************************************************/ 1253static void 1254em_intr_fast(void *arg) 1255{ 1256 struct adapter *adapter = arg; 1257 struct ifnet *ifp; 1258 uint32_t reg_icr; 1259 1260 ifp = adapter->ifp; 1261 1262 reg_icr = E1000_READ_REG(&adapter->hw, ICR); 1263 1264 /* Hot eject? */ 1265 if (reg_icr == 0xffffffff) 1266 return; 1267 1268 /* Definitely not our interrupt. */ 1269 if (reg_icr == 0x0) 1270 return; 1271 1272 /* 1273 * Starting with the 82571 chip, bit 31 should be used to 1274 * determine whether the interrupt belongs to us. 1275 */ 1276 if (adapter->hw.mac_type >= em_82571 && 1277 (reg_icr & E1000_ICR_INT_ASSERTED) == 0) 1278 return; 1279 1280 /* 1281 * Mask interrupts until the taskqueue is finished running. This is 1282 * cheap, just assume that it is needed. This also works around the 1283 * MSI message reordering errata on certain systems. 1284 */ 1285 em_disable_intr(adapter); 1286 taskqueue_enqueue(adapter->tq, &adapter->rxtx_task); 1287 1288 /* Link status change */ 1289 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) 1290 taskqueue_enqueue(taskqueue_fast, &adapter->link_task); 1291 1292 if (reg_icr & E1000_ICR_RXO) 1293 adapter->rx_overruns++; 1294} 1295#endif /* ! DEVICE_POLLING */ 1296 1297/********************************************************************* 1298 * 1299 * Media Ioctl callback 1300 * 1301 * This routine is called whenever the user queries the status of 1302 * the interface using ifconfig. 1303 * 1304 **********************************************************************/ 1305static void 1306em_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) 1307{ 1308 struct adapter *adapter = ifp->if_softc; 1309 1310 INIT_DEBUGOUT("em_media_status: begin"); 1311 1312 em_check_for_link(&adapter->hw); 1313 em_update_link_status(adapter); 1314 1315 ifmr->ifm_status = IFM_AVALID; 1316 ifmr->ifm_active = IFM_ETHER; 1317 1318 if (!adapter->link_active) 1319 return; 1320 1321 ifmr->ifm_status |= IFM_ACTIVE; 1322 1323 if ((adapter->hw.media_type == em_media_type_fiber) || 1324 (adapter->hw.media_type == em_media_type_internal_serdes)) { 1325 if (adapter->hw.mac_type == em_82545) 1326 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX; 1327 else 1328 ifmr->ifm_active |= IFM_1000_SX | IFM_FDX; 1329 } else { 1330 switch (adapter->link_speed) { 1331 case 10: 1332 ifmr->ifm_active |= IFM_10_T; 1333 break; 1334 case 100: 1335 ifmr->ifm_active |= IFM_100_TX; 1336 break; 1337 case 1000: 1338 ifmr->ifm_active |= IFM_1000_T; 1339 break; 1340 } 1341 if (adapter->link_duplex == FULL_DUPLEX) 1342 ifmr->ifm_active |= IFM_FDX; 1343 else 1344 ifmr->ifm_active |= IFM_HDX; 1345 } 1346} 1347 1348/********************************************************************* 1349 * 1350 * Media Ioctl callback 1351 * 1352 * This routine is called when the user changes speed/duplex using 1353 * media/mediopt option with ifconfig. 1354 * 1355 **********************************************************************/ 1356static int 1357em_media_change(struct ifnet *ifp) 1358{ 1359 struct adapter *adapter = ifp->if_softc; 1360 struct ifmedia *ifm = &adapter->media; 1361 1362 INIT_DEBUGOUT("em_media_change: begin"); 1363 1364 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 1365 return (EINVAL); 1366 1367 switch (IFM_SUBTYPE(ifm->ifm_media)) { 1368 case IFM_AUTO: 1369 adapter->hw.autoneg = DO_AUTO_NEG; 1370 adapter->hw.autoneg_advertised = AUTONEG_ADV_DEFAULT; 1371 break; 1372 case IFM_1000_LX: 1373 case IFM_1000_SX: 1374 case IFM_1000_T: 1375 adapter->hw.autoneg = DO_AUTO_NEG; 1376 adapter->hw.autoneg_advertised = ADVERTISE_1000_FULL; 1377 break; 1378 case IFM_100_TX: 1379 adapter->hw.autoneg = FALSE; 1380 adapter->hw.autoneg_advertised = 0; 1381 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) 1382 adapter->hw.forced_speed_duplex = em_100_full; 1383 else 1384 adapter->hw.forced_speed_duplex = em_100_half; 1385 break; 1386 case IFM_10_T: 1387 adapter->hw.autoneg = FALSE; 1388 adapter->hw.autoneg_advertised = 0; 1389 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) 1390 adapter->hw.forced_speed_duplex = em_10_full; 1391 else 1392 adapter->hw.forced_speed_duplex = em_10_half; 1393 break; 1394 default: 1395 device_printf(adapter->dev, "Unsupported media type\n"); 1396 } 1397 1398 /* As the speed/duplex settings my have changed we need to 1399 * reset the PHY. 1400 */ 1401 adapter->hw.phy_reset_disable = FALSE; 1402 1403 em_init(adapter); 1404 1405 return (0); 1406} 1407 1408/********************************************************************* 1409 * 1410 * This routine maps the mbufs to tx descriptors. 1411 * 1412 * return 0 on success, positive on failure 1413 **********************************************************************/ 1414static int 1415em_encap(struct adapter *adapter, struct mbuf **m_headp) 1416{ 1417 struct ifnet *ifp = adapter->ifp; 1418 bus_dma_segment_t segs[EM_MAX_SCATTER]; 1419 bus_dmamap_t map; 1420 struct em_buffer *tx_buffer, *tx_buffer_last; 1421 struct em_tx_desc *current_tx_desc; 1422 struct mbuf *m_head; 1423 struct m_tag *mtag; 1424 uint32_t txd_upper, txd_lower, txd_used, txd_saved; 1425 int nsegs, i, j; 1426 int error; 1427 1428 m_head = *m_headp; 1429 current_tx_desc = NULL; 1430 txd_used = txd_saved = 0; 1431 1432 /* 1433 * Force a cleanup if number of TX descriptors 1434 * available hits the threshold. 1435 */ 1436 if (adapter->num_tx_desc_avail <= EM_TX_CLEANUP_THRESHOLD) { 1437 em_txeof(adapter); 1438 if (adapter->num_tx_desc_avail <= EM_TX_CLEANUP_THRESHOLD) { 1439 adapter->no_tx_desc_avail1++; 1440 return (ENOBUFS); 1441 } 1442 } 1443 1444 /* Find out if we are in vlan mode. */ 1445 mtag = VLAN_OUTPUT_TAG(ifp, m_head); 1446 1447 /* 1448 * When operating in promiscuous mode, hardware encapsulation for 1449 * packets is disabled. This means we have to add the vlan 1450 * encapsulation in the driver, since it will have come down from the 1451 * VLAN layer with a tag instead of a VLAN header. 1452 */ 1453 if (mtag != NULL && adapter->em_insert_vlan_header) { 1454 struct ether_vlan_header *evl; 1455 struct ether_header eh; 1456 1457 m_head = m_pullup(m_head, sizeof(eh)); 1458 if (m_head == NULL) { 1459 *m_headp = NULL; 1460 return (ENOBUFS); 1461 } 1462 eh = *mtod(m_head, struct ether_header *); 1463 M_PREPEND(m_head, sizeof(*evl), M_DONTWAIT); 1464 if (m_head == NULL) { 1465 *m_headp = NULL; 1466 return (ENOBUFS); 1467 } 1468 m_head = m_pullup(m_head, sizeof(*evl)); 1469 if (m_head == NULL) { 1470 *m_headp = NULL; 1471 return (ENOBUFS); 1472 } 1473 evl = mtod(m_head, struct ether_vlan_header *); 1474 bcopy(&eh, evl, sizeof(*evl)); 1475 evl->evl_proto = evl->evl_encap_proto; 1476 evl->evl_encap_proto = htons(ETHERTYPE_VLAN); 1477 evl->evl_tag = htons(VLAN_TAG_VALUE(mtag)); 1478 m_tag_delete(m_head, mtag); 1479 mtag = NULL; 1480 *m_headp = m_head; 1481 } 1482 1483 /* 1484 * Map the packet for DMA. 1485 */ 1486 tx_buffer = &adapter->tx_buffer_area[adapter->next_avail_tx_desc]; 1487 tx_buffer_last = tx_buffer; 1488 map = tx_buffer->map; 1489 error = bus_dmamap_load_mbuf_sg(adapter->txtag, map, *m_headp, segs, 1490 &nsegs, BUS_DMA_NOWAIT); 1491 if (error == EFBIG) { 1492 struct mbuf *m; 1493 1494 m = m_defrag(*m_headp, M_DONTWAIT); 1495 if (m == NULL) { 1496 /* Assume m_defrag(9) used only m_get(9). */ 1497 adapter->mbuf_alloc_failed++; 1498 m_freem(*m_headp); 1499 *m_headp = NULL; 1500 return (ENOBUFS); 1501 } 1502 *m_headp = m; 1503 error = bus_dmamap_load_mbuf_sg(adapter->txtag, map, *m_headp, 1504 segs, &nsegs, BUS_DMA_NOWAIT); 1505 if (error != 0) { 1506 adapter->no_tx_dma_setup++; 1507 m_freem(*m_headp); 1508 *m_headp = NULL; 1509 return (error); 1510 } 1511 } else if (error != 0) { 1512 adapter->no_tx_dma_setup++; 1513 return (error); 1514 } 1515 if (nsegs == 0) { 1516 m_freem(*m_headp); 1517 *m_headp = NULL; 1518 return (EIO); 1519 } 1520 1521 if (nsegs > adapter->num_tx_desc_avail) { 1522 adapter->no_tx_desc_avail2++; 1523 bus_dmamap_unload(adapter->txtag, map); 1524 return (ENOBUFS); 1525 } 1526 1527 m_head = *m_headp; 1528 if (ifp->if_hwassist > 0) 1529 em_transmit_checksum_setup(adapter, m_head, &txd_upper, &txd_lower); 1530 else 1531 txd_upper = txd_lower = 0; 1532 1533 i = adapter->next_avail_tx_desc; 1534 if (adapter->pcix_82544) { 1535 txd_saved = i; 1536 txd_used = 0; 1537 } 1538 for (j = 0; j < nsegs; j++) { 1539 /* If adapter is 82544 and on PCIX bus. */ 1540 if(adapter->pcix_82544) { 1541 DESC_ARRAY desc_array; 1542 uint32_t array_elements, counter; 1543 1544 /* 1545 * Check the Address and Length combination and 1546 * split the data accordingly 1547 */ 1548 array_elements = em_fill_descriptors(segs[j].ds_addr, 1549 segs[j].ds_len, &desc_array); 1550 for (counter = 0; counter < array_elements; counter++) { 1551 if (txd_used == adapter->num_tx_desc_avail) { 1552 adapter->next_avail_tx_desc = txd_saved; 1553 adapter->no_tx_desc_avail2++; 1554 bus_dmamap_unload(adapter->txtag, map); 1555 return (ENOBUFS); 1556 } 1557 tx_buffer = &adapter->tx_buffer_area[i]; 1558 current_tx_desc = &adapter->tx_desc_base[i]; 1559 current_tx_desc->buffer_addr = htole64( 1560 desc_array.descriptor[counter].address); 1561 current_tx_desc->lower.data = htole32( 1562 (adapter->txd_cmd | txd_lower | 1563 (uint16_t)desc_array.descriptor[counter].length)); 1564 current_tx_desc->upper.data = htole32((txd_upper)); 1565 if (++i == adapter->num_tx_desc) 1566 i = 0; 1567 1568 tx_buffer->m_head = NULL; 1569 txd_used++; 1570 } 1571 } else { 1572 tx_buffer = &adapter->tx_buffer_area[i]; 1573 current_tx_desc = &adapter->tx_desc_base[i]; 1574 1575 current_tx_desc->buffer_addr = htole64(segs[j].ds_addr); 1576 current_tx_desc->lower.data = htole32( 1577 adapter->txd_cmd | txd_lower | segs[j].ds_len); 1578 current_tx_desc->upper.data = htole32(txd_upper); 1579 1580 if (++i == adapter->num_tx_desc) 1581 i = 0; 1582 1583 tx_buffer->m_head = NULL; 1584 } 1585 } 1586 1587 adapter->next_avail_tx_desc = i; 1588 if (adapter->pcix_82544) 1589 adapter->num_tx_desc_avail -= txd_used; 1590 else 1591 adapter->num_tx_desc_avail -= nsegs; 1592 1593 if (mtag != NULL) { 1594 /* Set the vlan id. */ 1595 current_tx_desc->upper.fields.special = 1596 htole16(VLAN_TAG_VALUE(mtag)); 1597 1598 /* Tell hardware to add tag. */ 1599 current_tx_desc->lower.data |= htole32(E1000_TXD_CMD_VLE); 1600 } 1601 1602 tx_buffer->m_head = m_head; 1603 tx_buffer_last->map = tx_buffer->map; 1604 tx_buffer->map = map; 1605 bus_dmamap_sync(adapter->txtag, map, BUS_DMASYNC_PREWRITE); 1606 1607 /* 1608 * Last Descriptor of Packet needs End Of Packet (EOP). 1609 */ 1610 current_tx_desc->lower.data |= htole32(E1000_TXD_CMD_EOP); 1611 1612 /* 1613 * Advance the Transmit Descriptor Tail (Tdt), this tells the E1000 1614 * that this frame is available to transmit. 1615 */ 1616 bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map, 1617 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1618 if (adapter->hw.mac_type == em_82547 && adapter->link_duplex == HALF_DUPLEX) 1619 em_82547_move_tail_locked(adapter); 1620 else { 1621 E1000_WRITE_REG(&adapter->hw, TDT, i); 1622 if (adapter->hw.mac_type == em_82547) 1623 em_82547_update_fifo_head(adapter, m_head->m_pkthdr.len); 1624 } 1625 1626 return (0); 1627} 1628 1629/********************************************************************* 1630 * 1631 * 82547 workaround to avoid controller hang in half-duplex environment. 1632 * The workaround is to avoid queuing a large packet that would span 1633 * the internal Tx FIFO ring boundary. We need to reset the FIFO pointers 1634 * in this case. We do that only when FIFO is quiescent. 1635 * 1636 **********************************************************************/ 1637static void 1638em_82547_move_tail_locked(struct adapter *adapter) 1639{ 1640 uint16_t hw_tdt; 1641 uint16_t sw_tdt; 1642 struct em_tx_desc *tx_desc; 1643 uint16_t length = 0; 1644 boolean_t eop = 0; 1645 1646 EM_LOCK_ASSERT(adapter); 1647 1648 hw_tdt = E1000_READ_REG(&adapter->hw, TDT); 1649 sw_tdt = adapter->next_avail_tx_desc; 1650 1651 while (hw_tdt != sw_tdt) { 1652 tx_desc = &adapter->tx_desc_base[hw_tdt]; 1653 length += tx_desc->lower.flags.length; 1654 eop = tx_desc->lower.data & E1000_TXD_CMD_EOP; 1655 if(++hw_tdt == adapter->num_tx_desc) 1656 hw_tdt = 0; 1657 1658 if (eop) { 1659 if (em_82547_fifo_workaround(adapter, length)) { 1660 adapter->tx_fifo_wrk_cnt++; 1661 callout_reset(&adapter->tx_fifo_timer, 1, 1662 em_82547_move_tail, adapter); 1663 break; 1664 } 1665 E1000_WRITE_REG(&adapter->hw, TDT, hw_tdt); 1666 em_82547_update_fifo_head(adapter, length); 1667 length = 0; 1668 } 1669 } 1670} 1671 1672static void 1673em_82547_move_tail(void *arg) 1674{ 1675 struct adapter *adapter = arg; 1676 1677 EM_LOCK(adapter); 1678 em_82547_move_tail_locked(adapter); 1679 EM_UNLOCK(adapter); 1680} 1681 1682static int 1683em_82547_fifo_workaround(struct adapter *adapter, int len) 1684{ 1685 int fifo_space, fifo_pkt_len; 1686 1687 fifo_pkt_len = roundup2(len + EM_FIFO_HDR, EM_FIFO_HDR); 1688 1689 if (adapter->link_duplex == HALF_DUPLEX) { 1690 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head; 1691 1692 if (fifo_pkt_len >= (EM_82547_PKT_THRESH + fifo_space)) { 1693 if (em_82547_tx_fifo_reset(adapter)) 1694 return (0); 1695 else 1696 return (1); 1697 } 1698 } 1699 1700 return (0); 1701} 1702 1703static void 1704em_82547_update_fifo_head(struct adapter *adapter, int len) 1705{ 1706 int fifo_pkt_len = roundup2(len + EM_FIFO_HDR, EM_FIFO_HDR); 1707 1708 /* tx_fifo_head is always 16 byte aligned */ 1709 adapter->tx_fifo_head += fifo_pkt_len; 1710 if (adapter->tx_fifo_head >= adapter->tx_fifo_size) { 1711 adapter->tx_fifo_head -= adapter->tx_fifo_size; 1712 } 1713} 1714 1715 1716static int 1717em_82547_tx_fifo_reset(struct adapter *adapter) 1718{ 1719 uint32_t tctl; 1720 1721 if ((E1000_READ_REG(&adapter->hw, TDT) == E1000_READ_REG(&adapter->hw, TDH)) && 1722 (E1000_READ_REG(&adapter->hw, TDFT) == E1000_READ_REG(&adapter->hw, TDFH)) && 1723 (E1000_READ_REG(&adapter->hw, TDFTS) == E1000_READ_REG(&adapter->hw, TDFHS))&& 1724 (E1000_READ_REG(&adapter->hw, TDFPC) == 0)) { 1725 1726 /* Disable TX unit */ 1727 tctl = E1000_READ_REG(&adapter->hw, TCTL); 1728 E1000_WRITE_REG(&adapter->hw, TCTL, tctl & ~E1000_TCTL_EN); 1729 1730 /* Reset FIFO pointers */ 1731 E1000_WRITE_REG(&adapter->hw, TDFT, adapter->tx_head_addr); 1732 E1000_WRITE_REG(&adapter->hw, TDFH, adapter->tx_head_addr); 1733 E1000_WRITE_REG(&adapter->hw, TDFTS, adapter->tx_head_addr); 1734 E1000_WRITE_REG(&adapter->hw, TDFHS, adapter->tx_head_addr); 1735 1736 /* Re-enable TX unit */ 1737 E1000_WRITE_REG(&adapter->hw, TCTL, tctl); 1738 E1000_WRITE_FLUSH(&adapter->hw); 1739 1740 adapter->tx_fifo_head = 0; 1741 adapter->tx_fifo_reset_cnt++; 1742 1743 return (TRUE); 1744 } 1745 else { 1746 return (FALSE); 1747 } 1748} 1749 1750static void 1751em_set_promisc(struct adapter *adapter) 1752{ 1753 struct ifnet *ifp = adapter->ifp; 1754 uint32_t reg_rctl; 1755 1756 reg_rctl = E1000_READ_REG(&adapter->hw, RCTL); 1757 1758 if (ifp->if_flags & IFF_PROMISC) { 1759 reg_rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); 1760 E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl); 1761 /* Disable VLAN stripping in promiscous mode 1762 * This enables bridging of vlan tagged frames to occur 1763 * and also allows vlan tags to be seen in tcpdump 1764 */ 1765 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) 1766 em_disable_vlans(adapter); 1767 adapter->em_insert_vlan_header = 1; 1768 } else if (ifp->if_flags & IFF_ALLMULTI) { 1769 reg_rctl |= E1000_RCTL_MPE; 1770 reg_rctl &= ~E1000_RCTL_UPE; 1771 E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl); 1772 adapter->em_insert_vlan_header = 0; 1773 } else 1774 adapter->em_insert_vlan_header = 0; 1775} 1776 1777static void 1778em_disable_promisc(struct adapter *adapter) 1779{ 1780 struct ifnet *ifp = adapter->ifp; 1781 uint32_t reg_rctl; 1782 1783 reg_rctl = E1000_READ_REG(&adapter->hw, RCTL); 1784 1785 reg_rctl &= (~E1000_RCTL_UPE); 1786 reg_rctl &= (~E1000_RCTL_MPE); 1787 E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl); 1788 1789 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) 1790 em_enable_vlans(adapter); 1791 adapter->em_insert_vlan_header = 0; 1792} 1793 1794 1795/********************************************************************* 1796 * Multicast Update 1797 * 1798 * This routine is called whenever multicast address list is updated. 1799 * 1800 **********************************************************************/ 1801 1802static void 1803em_set_multi(struct adapter *adapter) 1804{ 1805 struct ifnet *ifp = adapter->ifp; 1806 struct ifmultiaddr *ifma; 1807 uint32_t reg_rctl = 0; 1808 uint8_t mta[MAX_NUM_MULTICAST_ADDRESSES * ETH_LENGTH_OF_ADDRESS]; 1809 int mcnt = 0; 1810 1811 IOCTL_DEBUGOUT("em_set_multi: begin"); 1812 1813 if (adapter->hw.mac_type == em_82542_rev2_0) { 1814 reg_rctl = E1000_READ_REG(&adapter->hw, RCTL); 1815 if (adapter->hw.pci_cmd_word & CMD_MEM_WRT_INVALIDATE) 1816 em_pci_clear_mwi(&adapter->hw); 1817 reg_rctl |= E1000_RCTL_RST; 1818 E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl); 1819 msec_delay(5); 1820 } 1821 1822 IF_ADDR_LOCK(ifp); 1823 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1824 if (ifma->ifma_addr->sa_family != AF_LINK) 1825 continue; 1826 1827 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES) 1828 break; 1829 1830 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr), 1831 &mta[mcnt*ETH_LENGTH_OF_ADDRESS], ETH_LENGTH_OF_ADDRESS); 1832 mcnt++; 1833 } 1834 IF_ADDR_UNLOCK(ifp); 1835 1836 if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES) { 1837 reg_rctl = E1000_READ_REG(&adapter->hw, RCTL); 1838 reg_rctl |= E1000_RCTL_MPE; 1839 E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl); 1840 } else 1841 em_mc_addr_list_update(&adapter->hw, mta, mcnt, 0, 1); 1842 1843 if (adapter->hw.mac_type == em_82542_rev2_0) { 1844 reg_rctl = E1000_READ_REG(&adapter->hw, RCTL); 1845 reg_rctl &= ~E1000_RCTL_RST; 1846 E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl); 1847 msec_delay(5); 1848 if (adapter->hw.pci_cmd_word & CMD_MEM_WRT_INVALIDATE) 1849 em_pci_set_mwi(&adapter->hw); 1850 } 1851} 1852 1853 1854/********************************************************************* 1855 * Timer routine 1856 * 1857 * This routine checks for link status and updates statistics. 1858 * 1859 **********************************************************************/ 1860 1861static void 1862em_local_timer(void *arg) 1863{ 1864 struct adapter *adapter = arg; 1865 struct ifnet *ifp = adapter->ifp; 1866 1867 EM_LOCK(adapter); 1868 1869 em_check_for_link(&adapter->hw); 1870 em_update_link_status(adapter); 1871 em_update_stats_counters(adapter); 1872 if (em_display_debug_stats && ifp->if_drv_flags & IFF_DRV_RUNNING) 1873 em_print_hw_stats(adapter); 1874 em_smartspeed(adapter); 1875 1876 callout_reset(&adapter->timer, hz, em_local_timer, adapter); 1877 1878 EM_UNLOCK(adapter); 1879} 1880 1881static void 1882em_update_link_status(struct adapter *adapter) 1883{ 1884 struct ifnet *ifp = adapter->ifp; 1885 device_t dev = adapter->dev; 1886 1887 if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU) { 1888 if (adapter->link_active == 0) { 1889 em_get_speed_and_duplex(&adapter->hw, &adapter->link_speed, 1890 &adapter->link_duplex); 1891 /* Check if we may set SPEED_MODE bit on PCI-E */ 1892 if ((adapter->link_speed == SPEED_1000) && 1893 ((adapter->hw.mac_type == em_82571) || 1894 (adapter->hw.mac_type == em_82572))) { 1895 int tarc0; 1896 1897 tarc0 = E1000_READ_REG(&adapter->hw, TARC0); 1898 tarc0 |= SPEED_MODE_BIT; 1899 E1000_WRITE_REG(&adapter->hw, TARC0, tarc0); 1900 } 1901 if (bootverbose) 1902 device_printf(dev, "Link is up %d Mbps %s\n", 1903 adapter->link_speed, 1904 ((adapter->link_duplex == FULL_DUPLEX) ? 1905 "Full Duplex" : "Half Duplex")); 1906 adapter->link_active = 1; 1907 adapter->smartspeed = 0; 1908 ifp->if_baudrate = adapter->link_speed * 1000000; 1909 if_link_state_change(ifp, LINK_STATE_UP); 1910 } 1911 } else { 1912 if (adapter->link_active == 1) { 1913 ifp->if_baudrate = adapter->link_speed = 0; 1914 adapter->link_duplex = 0; 1915 if (bootverbose) 1916 device_printf(dev, "Link is Down\n"); 1917 adapter->link_active = 0; 1918 if_link_state_change(ifp, LINK_STATE_DOWN); 1919 } 1920 } 1921} 1922 1923/********************************************************************* 1924 * 1925 * This routine disables all traffic on the adapter by issuing a 1926 * global reset on the MAC and deallocates TX/RX buffers. 1927 * 1928 **********************************************************************/ 1929 1930static void 1931em_stop(void *arg) 1932{ 1933 struct adapter *adapter = arg; 1934 struct ifnet *ifp = adapter->ifp; 1935 1936 EM_LOCK_ASSERT(adapter); 1937 1938 INIT_DEBUGOUT("em_stop: begin"); 1939 1940 em_disable_intr(adapter); 1941 em_reset_hw(&adapter->hw); 1942 callout_stop(&adapter->timer); 1943 callout_stop(&adapter->tx_fifo_timer); 1944 em_free_transmit_structures(adapter); 1945 em_free_receive_structures(adapter); 1946 1947 /* Tell the stack that the interface is no longer active */ 1948 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 1949} 1950 1951 1952/********************************************************************* 1953 * 1954 * Determine hardware revision. 1955 * 1956 **********************************************************************/ 1957static void 1958em_identify_hardware(struct adapter *adapter) 1959{ 1960 device_t dev = adapter->dev; 1961 1962 /* Make sure our PCI config space has the necessary stuff set */ 1963 adapter->hw.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2); 1964 if ((adapter->hw.pci_cmd_word & PCIM_CMD_BUSMASTEREN) == 0 && 1965 (adapter->hw.pci_cmd_word & PCIM_CMD_MEMEN)) { 1966 device_printf(dev, "Memory Access and/or Bus Master bits " 1967 "were not set!\n"); 1968 adapter->hw.pci_cmd_word |= 1969 (PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN); 1970 pci_write_config(dev, PCIR_COMMAND, adapter->hw.pci_cmd_word, 2); 1971 } 1972 1973 /* Save off the information about this board */ 1974 adapter->hw.vendor_id = pci_get_vendor(dev); 1975 adapter->hw.device_id = pci_get_device(dev); 1976 adapter->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1); 1977 adapter->hw.subsystem_vendor_id = pci_read_config(dev, PCIR_SUBVEND_0, 2); 1978 adapter->hw.subsystem_id = pci_read_config(dev, PCIR_SUBDEV_0, 2); 1979 1980 /* Identify the MAC */ 1981 if (em_set_mac_type(&adapter->hw)) 1982 device_printf(dev, "Unknown MAC Type\n"); 1983 1984 if(adapter->hw.mac_type == em_82541 || adapter->hw.mac_type == em_82541_rev_2 || 1985 adapter->hw.mac_type == em_82547 || adapter->hw.mac_type == em_82547_rev_2) 1986 adapter->hw.phy_init_script = TRUE; 1987} 1988 1989static int 1990em_allocate_pci_resources(struct adapter *adapter) 1991{ 1992 device_t dev = adapter->dev; 1993 int val, rid; 1994 1995 rid = PCIR_BAR(0); 1996 adapter->res_memory = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 1997 &rid, RF_ACTIVE); 1998 if (adapter->res_memory == NULL) { 1999 device_printf(dev, "Unable to allocate bus resource: memory\n"); 2000 return (ENXIO); 2001 } 2002 adapter->osdep.mem_bus_space_tag = 2003 rman_get_bustag(adapter->res_memory); 2004 adapter->osdep.mem_bus_space_handle = rman_get_bushandle(adapter->res_memory); 2005 adapter->hw.hw_addr = (uint8_t *)&adapter->osdep.mem_bus_space_handle; 2006 2007 if (adapter->hw.mac_type > em_82543) { 2008 /* Figure our where our IO BAR is ? */ 2009 for (rid = PCIR_BAR(0); rid < PCIR_CIS;) { 2010 val = pci_read_config(dev, rid, 4); 2011 if (E1000_BAR_TYPE(val) == E1000_BAR_TYPE_IO) { 2012 adapter->io_rid = rid; 2013 break; 2014 } 2015 rid += 4; 2016 /* check for 64bit BAR */ 2017 if (E1000_BAR_MEM_TYPE(val) == E1000_BAR_MEM_TYPE_64BIT) 2018 rid += 4; 2019 } 2020 if (rid >= PCIR_CIS) { 2021 device_printf(dev, "Unable to locate IO BAR\n"); 2022 return (ENXIO); 2023 } 2024 adapter->res_ioport = bus_alloc_resource_any(dev, SYS_RES_IOPORT, 2025 &adapter->io_rid, RF_ACTIVE); 2026 if (adapter->res_ioport == NULL) { 2027 device_printf(dev, "Unable to allocate bus resource: " 2028 "ioport\n"); 2029 return (ENXIO); 2030 } 2031 adapter->hw.io_base = 0; 2032 adapter->osdep.io_bus_space_tag = rman_get_bustag(adapter->res_ioport); 2033 adapter->osdep.io_bus_space_handle = 2034 rman_get_bushandle(adapter->res_ioport); 2035 } 2036 2037 /* For ICH8 we need to find the flash memory. */ 2038 if (adapter->hw.mac_type == em_ich8lan) { 2039 rid = EM_FLASH; 2040 2041 adapter->flash_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 2042 &rid, RF_ACTIVE); 2043 adapter->osdep.flash_bus_space_tag = rman_get_bustag(adapter->flash_mem); 2044 adapter->osdep.flash_bus_space_handle = 2045 rman_get_bushandle(adapter->flash_mem); 2046 } 2047 2048 rid = 0x0; 2049 adapter->res_interrupt = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 2050 RF_SHAREABLE | RF_ACTIVE); 2051 if (adapter->res_interrupt == NULL) { 2052 device_printf(dev, "Unable to allocate bus resource: " 2053 "interrupt\n"); 2054 return (ENXIO); 2055 } 2056 2057 adapter->hw.back = &adapter->osdep; 2058 2059 return (0); 2060} 2061 2062int 2063em_allocate_intr(struct adapter *adapter) 2064{ 2065 device_t dev = adapter->dev; 2066 int error; 2067 2068 /* Manually turn off all interrupts */ 2069 E1000_WRITE_REG(&adapter->hw, IMC, 0xffffffff); 2070 2071#ifdef DEVICE_POLLING 2072 if (adapter->int_handler_tag == NULL && (error = bus_setup_intr(dev, 2073 adapter->res_interrupt, INTR_TYPE_NET | INTR_MPSAFE, em_intr, adapter, 2074 &adapter->int_handler_tag)) != 0) { 2075 device_printf(dev, "Failed to register interrupt handler"); 2076 return (error); 2077 } 2078#else 2079 /* 2080 * Try allocating a fast interrupt and the associated deferred 2081 * processing contexts. 2082 */ 2083 TASK_INIT(&adapter->rxtx_task, 0, em_handle_rxtx, adapter); 2084 TASK_INIT(&adapter->link_task, 0, em_handle_link, adapter); 2085 adapter->tq = taskqueue_create_fast("em_taskq", M_NOWAIT, 2086 taskqueue_thread_enqueue, &adapter->tq); 2087 taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s taskq", 2088 device_get_nameunit(adapter->dev)); 2089 if ((error = bus_setup_intr(dev, adapter->res_interrupt, 2090 INTR_TYPE_NET | INTR_FAST, em_intr_fast, adapter, 2091 &adapter->int_handler_tag)) != 0) { 2092 device_printf(dev, "Failed to register fast interrupt " 2093 "handler: %d\n", error); 2094 taskqueue_free(adapter->tq); 2095 adapter->tq = NULL; 2096 return (error); 2097 } 2098#endif 2099 2100 em_enable_intr(adapter); 2101 return (0); 2102} 2103 2104static void 2105em_free_intr(struct adapter *adapter) 2106{ 2107 device_t dev = adapter->dev; 2108 2109 if (adapter->res_interrupt != NULL) { 2110 bus_teardown_intr(dev, adapter->res_interrupt, adapter->int_handler_tag); 2111 adapter->int_handler_tag = NULL; 2112 } 2113 if (adapter->tq != NULL) { 2114 taskqueue_drain(adapter->tq, &adapter->rxtx_task); 2115 taskqueue_drain(taskqueue_fast, &adapter->link_task); 2116 taskqueue_free(adapter->tq); 2117 adapter->tq = NULL; 2118 } 2119} 2120 2121static void 2122em_free_pci_resources(struct adapter *adapter) 2123{ 2124 device_t dev = adapter->dev; 2125 2126 if (adapter->res_interrupt != NULL) 2127 bus_release_resource(dev, SYS_RES_IRQ, 0, adapter->res_interrupt); 2128 2129 if (adapter->res_memory != NULL) 2130 bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0), 2131 adapter->res_memory); 2132 2133 if (adapter->flash_mem != NULL) 2134 bus_release_resource(dev, SYS_RES_MEMORY, EM_FLASH, 2135 adapter->flash_mem); 2136 2137 if (adapter->res_ioport != NULL) 2138 bus_release_resource(dev, SYS_RES_IOPORT, adapter->io_rid, 2139 adapter->res_ioport); 2140} 2141 2142/********************************************************************* 2143 * 2144 * Initialize the hardware to a configuration as specified by the 2145 * adapter structure. The controller is reset, the EEPROM is 2146 * verified, the MAC address is set, then the shared initialization 2147 * routines are called. 2148 * 2149 **********************************************************************/ 2150static int 2151em_hardware_init(struct adapter *adapter) 2152{ 2153 device_t dev = adapter->dev; 2154 uint16_t rx_buffer_size; 2155 2156 INIT_DEBUGOUT("em_hardware_init: begin"); 2157 /* Issue a global reset */ 2158 em_reset_hw(&adapter->hw); 2159 2160 /* When hardware is reset, fifo_head is also reset */ 2161 adapter->tx_fifo_head = 0; 2162 2163 /* Make sure we have a good EEPROM before we read from it */ 2164 if (em_validate_eeprom_checksum(&adapter->hw) < 0) { 2165 device_printf(dev, "The EEPROM Checksum Is Not Valid\n"); 2166 return (EIO); 2167 } 2168 2169 if (em_read_part_num(&adapter->hw, &(adapter->part_num)) < 0) { 2170 device_printf(dev, "EEPROM read error while reading part " 2171 "number\n"); 2172 return (EIO); 2173 } 2174 2175 /* Set up smart power down as default off on newer adapters. */ 2176 if (!em_smart_pwr_down && 2177 (adapter->hw.mac_type == em_82571 || adapter->hw.mac_type == em_82572)) { 2178 uint16_t phy_tmp = 0; 2179 2180 /* Speed up time to link by disabling smart power down. */ 2181 em_read_phy_reg(&adapter->hw, IGP02E1000_PHY_POWER_MGMT, &phy_tmp); 2182 phy_tmp &= ~IGP02E1000_PM_SPD; 2183 em_write_phy_reg(&adapter->hw, IGP02E1000_PHY_POWER_MGMT, phy_tmp); 2184 } 2185 2186 /* 2187 * These parameters control the automatic generation (Tx) and 2188 * response (Rx) to Ethernet PAUSE frames. 2189 * - High water mark should allow for at least two frames to be 2190 * received after sending an XOFF. 2191 * - Low water mark works best when it is very near the high water mark. 2192 * This allows the receiver to restart by sending XON when it has 2193 * drained a bit. Here we use an arbitary value of 1500 which will 2194 * restart after one full frame is pulled from the buffer. There 2195 * could be several smaller frames in the buffer and if so they will 2196 * not trigger the XON until their total number reduces the buffer 2197 * by 1500. 2198 * - The pause time is fairly large at 1000 x 512ns = 512 usec. 2199 */ 2200 rx_buffer_size = ((E1000_READ_REG(&adapter->hw, PBA) & 0xffff) << 10 ); 2201 2202 adapter->hw.fc_high_water = rx_buffer_size - 2203 roundup2(adapter->hw.max_frame_size, 1024); 2204 adapter->hw.fc_low_water = adapter->hw.fc_high_water - 1500; 2205 if (adapter->hw.mac_type == em_80003es2lan) 2206 adapter->hw.fc_pause_time = 0xFFFF; 2207 else 2208 adapter->hw.fc_pause_time = 0x1000; 2209 adapter->hw.fc_send_xon = TRUE; 2210 adapter->hw.fc = em_fc_full; 2211 2212 if (em_init_hw(&adapter->hw) < 0) { 2213 device_printf(dev, "Hardware Initialization Failed"); 2214 return (EIO); 2215 } 2216 2217 em_check_for_link(&adapter->hw); 2218 2219 return (0); 2220} 2221 2222/********************************************************************* 2223 * 2224 * Setup networking device structure and register an interface. 2225 * 2226 **********************************************************************/ 2227static void 2228em_setup_interface(device_t dev, struct adapter *adapter) 2229{ 2230 struct ifnet *ifp; 2231 INIT_DEBUGOUT("em_setup_interface: begin"); 2232 2233 ifp = adapter->ifp = if_alloc(IFT_ETHER); 2234 if (ifp == NULL) 2235 panic("%s: can not if_alloc()", device_get_nameunit(dev)); 2236 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 2237 ifp->if_mtu = ETHERMTU; 2238 ifp->if_init = em_init; 2239 ifp->if_softc = adapter; 2240 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 2241 ifp->if_ioctl = em_ioctl; 2242 ifp->if_start = em_start; 2243 ifp->if_watchdog = em_watchdog; 2244 IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 1); 2245 ifp->if_snd.ifq_drv_maxlen = adapter->num_tx_desc - 1; 2246 IFQ_SET_READY(&ifp->if_snd); 2247 2248 ether_ifattach(ifp, adapter->hw.mac_addr); 2249 2250 ifp->if_capabilities = ifp->if_capenable = 0; 2251 2252 if (adapter->hw.mac_type >= em_82543) { 2253 ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM; 2254 ifp->if_capenable |= IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM; 2255 } 2256 2257 /* 2258 * Tell the upper layer(s) we support long frames. 2259 */ 2260 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 2261 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU; 2262 ifp->if_capenable |= IFCAP_VLAN_MTU; 2263 2264#ifdef DEVICE_POLLING 2265 ifp->if_capabilities |= IFCAP_POLLING; 2266#endif 2267 2268 /* 2269 * Specify the media types supported by this adapter and register 2270 * callbacks to update media and link information 2271 */ 2272 ifmedia_init(&adapter->media, IFM_IMASK, em_media_change, 2273 em_media_status); 2274 if ((adapter->hw.media_type == em_media_type_fiber) || 2275 (adapter->hw.media_type == em_media_type_internal_serdes)) { 2276 u_char fiber_type = IFM_1000_SX; // default type; 2277 2278 if (adapter->hw.mac_type == em_82545) 2279 fiber_type = IFM_1000_LX; 2280 ifmedia_add(&adapter->media, IFM_ETHER | fiber_type | IFM_FDX, 2281 0, NULL); 2282 ifmedia_add(&adapter->media, IFM_ETHER | fiber_type, 0, NULL); 2283 } else { 2284 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T, 0, NULL); 2285 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T | IFM_FDX, 2286 0, NULL); 2287 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX, 2288 0, NULL); 2289 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX | IFM_FDX, 2290 0, NULL); 2291 if (adapter->hw.phy_type != em_phy_ife) { 2292 ifmedia_add(&adapter->media, 2293 IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL); 2294 ifmedia_add(&adapter->media, 2295 IFM_ETHER | IFM_1000_T, 0, NULL); 2296 } 2297 } 2298 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL); 2299 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO); 2300} 2301 2302 2303/********************************************************************* 2304 * 2305 * Workaround for SmartSpeed on 82541 and 82547 controllers 2306 * 2307 **********************************************************************/ 2308static void 2309em_smartspeed(struct adapter *adapter) 2310{ 2311 uint16_t phy_tmp; 2312 2313 if (adapter->link_active || (adapter->hw.phy_type != em_phy_igp) || 2314 adapter->hw.autoneg == 0 || 2315 (adapter->hw.autoneg_advertised & ADVERTISE_1000_FULL) == 0) 2316 return; 2317 2318 if (adapter->smartspeed == 0) { 2319 /* If Master/Slave config fault is asserted twice, 2320 * we assume back-to-back */ 2321 em_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp); 2322 if (!(phy_tmp & SR_1000T_MS_CONFIG_FAULT)) 2323 return; 2324 em_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp); 2325 if (phy_tmp & SR_1000T_MS_CONFIG_FAULT) { 2326 em_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_tmp); 2327 if(phy_tmp & CR_1000T_MS_ENABLE) { 2328 phy_tmp &= ~CR_1000T_MS_ENABLE; 2329 em_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, 2330 phy_tmp); 2331 adapter->smartspeed++; 2332 if(adapter->hw.autoneg && 2333 !em_phy_setup_autoneg(&adapter->hw) && 2334 !em_read_phy_reg(&adapter->hw, PHY_CTRL, 2335 &phy_tmp)) { 2336 phy_tmp |= (MII_CR_AUTO_NEG_EN | 2337 MII_CR_RESTART_AUTO_NEG); 2338 em_write_phy_reg(&adapter->hw, PHY_CTRL, 2339 phy_tmp); 2340 } 2341 } 2342 } 2343 return; 2344 } else if(adapter->smartspeed == EM_SMARTSPEED_DOWNSHIFT) { 2345 /* If still no link, perhaps using 2/3 pair cable */ 2346 em_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_tmp); 2347 phy_tmp |= CR_1000T_MS_ENABLE; 2348 em_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, phy_tmp); 2349 if(adapter->hw.autoneg && 2350 !em_phy_setup_autoneg(&adapter->hw) && 2351 !em_read_phy_reg(&adapter->hw, PHY_CTRL, &phy_tmp)) { 2352 phy_tmp |= (MII_CR_AUTO_NEG_EN | 2353 MII_CR_RESTART_AUTO_NEG); 2354 em_write_phy_reg(&adapter->hw, PHY_CTRL, phy_tmp); 2355 } 2356 } 2357 /* Restart process after EM_SMARTSPEED_MAX iterations */ 2358 if(adapter->smartspeed++ == EM_SMARTSPEED_MAX) 2359 adapter->smartspeed = 0; 2360} 2361 2362 2363/* 2364 * Manage DMA'able memory. 2365 */ 2366static void 2367em_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) 2368{ 2369 if (error) 2370 return; 2371 *(bus_addr_t *) arg = segs[0].ds_addr; 2372} 2373 2374static int 2375em_dma_malloc(struct adapter *adapter, bus_size_t size, struct em_dma_alloc *dma, 2376 int mapflags) 2377{ 2378 int error; 2379 2380 error = bus_dma_tag_create(NULL, /* parent */ 2381 EM_DBA_ALIGN, 0, /* alignment, bounds */ 2382 BUS_SPACE_MAXADDR, /* lowaddr */ 2383 BUS_SPACE_MAXADDR, /* highaddr */ 2384 NULL, NULL, /* filter, filterarg */ 2385 size, /* maxsize */ 2386 1, /* nsegments */ 2387 size, /* maxsegsize */ 2388 0, /* flags */ 2389 NULL, /* lockfunc */ 2390 NULL, /* lockarg */ 2391 &dma->dma_tag); 2392 if (error) { 2393 device_printf(adapter->dev, "%s: bus_dma_tag_create failed: %d\n", 2394 __func__, error); 2395 goto fail_0; 2396 } 2397 2398 error = bus_dmamem_alloc(dma->dma_tag, (void**) &dma->dma_vaddr, 2399 BUS_DMA_NOWAIT, &dma->dma_map); 2400 if (error) { 2401 device_printf(adapter->dev, "%s: bus_dmamem_alloc(%ju) failed: %d\n", 2402 __func__, (uintmax_t)size, error); 2403 goto fail_2; 2404 } 2405 2406 dma->dma_paddr = 0; 2407 error = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr, 2408 size, em_dmamap_cb, &dma->dma_paddr, mapflags | BUS_DMA_NOWAIT); 2409 if (error || dma->dma_paddr == 0) { 2410 device_printf(adapter->dev, "%s: bus_dmamap_load failed: %d\n", 2411 __func__, error); 2412 goto fail_3; 2413 } 2414 2415 return (0); 2416 2417fail_3: 2418 bus_dmamap_unload(dma->dma_tag, dma->dma_map); 2419fail_2: 2420 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map); 2421 bus_dma_tag_destroy(dma->dma_tag); 2422fail_0: 2423 dma->dma_map = NULL; 2424 dma->dma_tag = NULL; 2425 2426 return (error); 2427} 2428 2429static void 2430em_dma_free(struct adapter *adapter, struct em_dma_alloc *dma) 2431{ 2432 if (dma->dma_tag == NULL) 2433 return; 2434 if (dma->dma_map != NULL) { 2435 bus_dmamap_sync(dma->dma_tag, dma->dma_map, 2436 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2437 bus_dmamap_unload(dma->dma_tag, dma->dma_map); 2438 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map); 2439 dma->dma_map = NULL; 2440 } 2441 bus_dma_tag_destroy(dma->dma_tag); 2442 dma->dma_tag = NULL; 2443} 2444 2445 2446/********************************************************************* 2447 * 2448 * Allocate memory for tx_buffer structures. The tx_buffer stores all 2449 * the information needed to transmit a packet on the wire. 2450 * 2451 **********************************************************************/ 2452static int 2453em_allocate_transmit_structures(struct adapter *adapter) 2454{ 2455 adapter->tx_buffer_area = malloc(sizeof(struct em_buffer) * 2456 adapter->num_tx_desc, M_DEVBUF, M_NOWAIT); 2457 if (adapter->tx_buffer_area == NULL) { 2458 device_printf(adapter->dev, "Unable to allocate tx_buffer memory\n"); 2459 return (ENOMEM); 2460 } 2461 2462 bzero(adapter->tx_buffer_area, sizeof(struct em_buffer) * adapter->num_tx_desc); 2463 2464 return (0); 2465} 2466 2467/********************************************************************* 2468 * 2469 * Allocate and initialize transmit structures. 2470 * 2471 **********************************************************************/ 2472static int 2473em_setup_transmit_structures(struct adapter *adapter) 2474{ 2475 device_t dev = adapter->dev; 2476 struct em_buffer *tx_buffer; 2477 bus_size_t size; 2478 int error, i; 2479 2480 /* 2481 * Setup DMA descriptor areas. 2482 */ 2483 size = roundup2(adapter->hw.max_frame_size, MCLBYTES); 2484 if ((error = bus_dma_tag_create(NULL, /* parent */ 2485 1, 0, /* alignment, bounds */ 2486 BUS_SPACE_MAXADDR, /* lowaddr */ 2487 BUS_SPACE_MAXADDR, /* highaddr */ 2488 NULL, NULL, /* filter, filterarg */ 2489 size, /* maxsize */ 2490 EM_MAX_SCATTER, /* nsegments */ 2491 size, /* maxsegsize */ 2492 0, /* flags */ 2493 NULL, /* lockfunc */ 2494 NULL, /* lockarg */ 2495 &adapter->txtag)) != 0) { 2496 device_printf(dev, "Unable to allocate TX DMA tag\n"); 2497 goto fail; 2498 } 2499 2500 if ((error = em_allocate_transmit_structures(adapter)) != 0) 2501 goto fail; 2502 2503 bzero(adapter->tx_desc_base, (sizeof(struct em_tx_desc)) * adapter->num_tx_desc); 2504 tx_buffer = adapter->tx_buffer_area; 2505 for (i = 0; i < adapter->num_tx_desc; i++) { 2506 error = bus_dmamap_create(adapter->txtag, 0, &tx_buffer->map); 2507 if (error != 0) { 2508 device_printf(dev, "Unable to create TX DMA map\n"); 2509 goto fail; 2510 } 2511 tx_buffer++; 2512 } 2513 2514 adapter->next_avail_tx_desc = 0; 2515 adapter->oldest_used_tx_desc = 0; 2516 2517 /* Set number of descriptors available */ 2518 adapter->num_tx_desc_avail = adapter->num_tx_desc; 2519 2520 /* Set checksum context */ 2521 adapter->active_checksum_context = OFFLOAD_NONE; 2522 bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map, 2523 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2524 2525 return (0); 2526 2527fail: 2528 em_free_transmit_structures(adapter); 2529 return (error); 2530} 2531 2532/********************************************************************* 2533 * 2534 * Enable transmit unit. 2535 * 2536 **********************************************************************/ 2537static void 2538em_initialize_transmit_unit(struct adapter *adapter) 2539{ 2540 uint32_t reg_tctl, reg_tarc; 2541 uint32_t reg_tipg = 0; 2542 uint64_t bus_addr; 2543 2544 INIT_DEBUGOUT("em_initialize_transmit_unit: begin"); 2545 /* Setup the Base and Length of the Tx Descriptor Ring */ 2546 bus_addr = adapter->txdma.dma_paddr; 2547 E1000_WRITE_REG(&adapter->hw, TDLEN, 2548 adapter->num_tx_desc * sizeof(struct em_tx_desc)); 2549 E1000_WRITE_REG(&adapter->hw, TDBAH, (uint32_t)(bus_addr >> 32)); 2550 E1000_WRITE_REG(&adapter->hw, TDBAL, (uint32_t)bus_addr); 2551 2552 /* Setup the HW Tx Head and Tail descriptor pointers */ 2553 E1000_WRITE_REG(&adapter->hw, TDT, 0); 2554 E1000_WRITE_REG(&adapter->hw, TDH, 0); 2555 2556 2557 HW_DEBUGOUT2("Base = %x, Length = %x\n", E1000_READ_REG(&adapter->hw, TDBAL), 2558 E1000_READ_REG(&adapter->hw, TDLEN)); 2559 2560 /* Set the default values for the Tx Inter Packet Gap timer */ 2561 switch (adapter->hw.mac_type) { 2562 case em_82542_rev2_0: 2563 case em_82542_rev2_1: 2564 reg_tipg = DEFAULT_82542_TIPG_IPGT; 2565 reg_tipg |= DEFAULT_82542_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT; 2566 reg_tipg |= DEFAULT_82542_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT; 2567 break; 2568 case em_80003es2lan: 2569 reg_tipg = DEFAULT_82543_TIPG_IPGR1; 2570 reg_tipg |= DEFAULT_80003ES2LAN_TIPG_IPGR2 << 2571 E1000_TIPG_IPGR2_SHIFT; 2572 break; 2573 default: 2574 if ((adapter->hw.media_type == em_media_type_fiber) || 2575 (adapter->hw.media_type == em_media_type_internal_serdes)) 2576 reg_tipg = DEFAULT_82543_TIPG_IPGT_FIBER; 2577 else 2578 reg_tipg = DEFAULT_82543_TIPG_IPGT_COPPER; 2579 reg_tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT; 2580 reg_tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT; 2581 } 2582 2583 E1000_WRITE_REG(&adapter->hw, TIPG, reg_tipg); 2584 E1000_WRITE_REG(&adapter->hw, TIDV, adapter->tx_int_delay.value); 2585 if(adapter->hw.mac_type >= em_82540) 2586 E1000_WRITE_REG(&adapter->hw, TADV, adapter->tx_abs_int_delay.value); 2587 2588 /* Do adapter specific tweaks before we enable the transmitter. */ 2589 if (adapter->hw.mac_type == em_82571 || adapter->hw.mac_type == em_82572) { 2590 reg_tarc = E1000_READ_REG(&adapter->hw, TARC0); 2591 reg_tarc |= (1 << 25); 2592 E1000_WRITE_REG(&adapter->hw, TARC0, reg_tarc); 2593 reg_tarc = E1000_READ_REG(&adapter->hw, TARC1); 2594 reg_tarc |= (1 << 25); 2595 reg_tarc &= ~(1 << 28); 2596 E1000_WRITE_REG(&adapter->hw, TARC1, reg_tarc); 2597 } else if (adapter->hw.mac_type == em_80003es2lan) { 2598 reg_tarc = E1000_READ_REG(&adapter->hw, TARC0); 2599 reg_tarc |= 1; 2600 E1000_WRITE_REG(&adapter->hw, TARC0, reg_tarc); 2601 reg_tarc = E1000_READ_REG(&adapter->hw, TARC1); 2602 reg_tarc |= 1; 2603 E1000_WRITE_REG(&adapter->hw, TARC1, reg_tarc); 2604 } 2605 2606 /* Program the Transmit Control Register */ 2607 reg_tctl = E1000_TCTL_PSP | E1000_TCTL_EN | 2608 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT); 2609 if (adapter->hw.mac_type >= em_82571) 2610 reg_tctl |= E1000_TCTL_MULR; 2611 if (adapter->link_duplex == 1) { 2612 reg_tctl |= E1000_FDX_COLLISION_DISTANCE << E1000_COLD_SHIFT; 2613 } else { 2614 reg_tctl |= E1000_HDX_COLLISION_DISTANCE << E1000_COLD_SHIFT; 2615 } 2616 /* This write will effectively turn on the transmit unit. */ 2617 E1000_WRITE_REG(&adapter->hw, TCTL, reg_tctl); 2618 2619 /* Setup Transmit Descriptor Settings for this adapter */ 2620 adapter->txd_cmd = E1000_TXD_CMD_IFCS | E1000_TXD_CMD_RS; 2621 2622 if (adapter->tx_int_delay.value > 0) 2623 adapter->txd_cmd |= E1000_TXD_CMD_IDE; 2624} 2625 2626/********************************************************************* 2627 * 2628 * Free all transmit related data structures. 2629 * 2630 **********************************************************************/ 2631static void 2632em_free_transmit_structures(struct adapter *adapter) 2633{ 2634 struct em_buffer *tx_buffer; 2635 int i; 2636 2637 INIT_DEBUGOUT("free_transmit_structures: begin"); 2638 2639 if (adapter->tx_buffer_area != NULL) { 2640 tx_buffer = adapter->tx_buffer_area; 2641 for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) { 2642 if (tx_buffer->m_head != NULL) { 2643 bus_dmamap_sync(adapter->txtag, tx_buffer->map, 2644 BUS_DMASYNC_POSTWRITE); 2645 bus_dmamap_unload(adapter->txtag, 2646 tx_buffer->map); 2647 m_freem(tx_buffer->m_head); 2648 tx_buffer->m_head = NULL; 2649 } else if (tx_buffer->map != NULL) 2650 bus_dmamap_unload(adapter->txtag, 2651 tx_buffer->map); 2652 if (tx_buffer->map != NULL) { 2653 bus_dmamap_destroy(adapter->txtag, 2654 tx_buffer->map); 2655 tx_buffer->map = NULL; 2656 } 2657 } 2658 } 2659 if (adapter->tx_buffer_area != NULL) { 2660 free(adapter->tx_buffer_area, M_DEVBUF); 2661 adapter->tx_buffer_area = NULL; 2662 } 2663 if (adapter->txtag != NULL) { 2664 bus_dma_tag_destroy(adapter->txtag); 2665 adapter->txtag = NULL; 2666 } 2667} 2668 2669/********************************************************************* 2670 * 2671 * The offload context needs to be set when we transfer the first 2672 * packet of a particular protocol (TCP/UDP). We change the 2673 * context only if the protocol type changes. 2674 * 2675 **********************************************************************/ 2676static void 2677em_transmit_checksum_setup(struct adapter *adapter, struct mbuf *mp, 2678 uint32_t *txd_upper, uint32_t *txd_lower) 2679{ 2680 struct em_context_desc *TXD; 2681 struct em_buffer *tx_buffer; 2682 int curr_txd; 2683 2684 if (mp->m_pkthdr.csum_flags) { 2685 2686 if (mp->m_pkthdr.csum_flags & CSUM_TCP) { 2687 *txd_upper = E1000_TXD_POPTS_TXSM << 8; 2688 *txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D; 2689 if (adapter->active_checksum_context == OFFLOAD_TCP_IP) 2690 return; 2691 else 2692 adapter->active_checksum_context = OFFLOAD_TCP_IP; 2693 2694 } else if (mp->m_pkthdr.csum_flags & CSUM_UDP) { 2695 *txd_upper = E1000_TXD_POPTS_TXSM << 8; 2696 *txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D; 2697 if (adapter->active_checksum_context == OFFLOAD_UDP_IP) 2698 return; 2699 else 2700 adapter->active_checksum_context = OFFLOAD_UDP_IP; 2701 } else { 2702 *txd_upper = 0; 2703 *txd_lower = 0; 2704 return; 2705 } 2706 } else { 2707 *txd_upper = 0; 2708 *txd_lower = 0; 2709 return; 2710 } 2711 2712 /* If we reach this point, the checksum offload context 2713 * needs to be reset. 2714 */ 2715 curr_txd = adapter->next_avail_tx_desc; 2716 tx_buffer = &adapter->tx_buffer_area[curr_txd]; 2717 TXD = (struct em_context_desc *) &adapter->tx_desc_base[curr_txd]; 2718 2719 TXD->lower_setup.ip_fields.ipcss = ETHER_HDR_LEN; 2720 TXD->lower_setup.ip_fields.ipcso = 2721 ETHER_HDR_LEN + offsetof(struct ip, ip_sum); 2722 TXD->lower_setup.ip_fields.ipcse = 2723 htole16(ETHER_HDR_LEN + sizeof(struct ip) - 1); 2724 2725 TXD->upper_setup.tcp_fields.tucss = 2726 ETHER_HDR_LEN + sizeof(struct ip); 2727 TXD->upper_setup.tcp_fields.tucse = htole16(0); 2728 2729 if (adapter->active_checksum_context == OFFLOAD_TCP_IP) { 2730 TXD->upper_setup.tcp_fields.tucso = 2731 ETHER_HDR_LEN + sizeof(struct ip) + 2732 offsetof(struct tcphdr, th_sum); 2733 } else if (adapter->active_checksum_context == OFFLOAD_UDP_IP) { 2734 TXD->upper_setup.tcp_fields.tucso = 2735 ETHER_HDR_LEN + sizeof(struct ip) + 2736 offsetof(struct udphdr, uh_sum); 2737 } 2738 2739 TXD->tcp_seg_setup.data = htole32(0); 2740 TXD->cmd_and_length = htole32(adapter->txd_cmd | E1000_TXD_CMD_DEXT); 2741 2742 tx_buffer->m_head = NULL; 2743 2744 if (++curr_txd == adapter->num_tx_desc) 2745 curr_txd = 0; 2746 2747 adapter->num_tx_desc_avail--; 2748 adapter->next_avail_tx_desc = curr_txd; 2749} 2750 2751/********************************************************************** 2752 * 2753 * Examine each tx_buffer in the used queue. If the hardware is done 2754 * processing the packet then free associated resources. The 2755 * tx_buffer is put back on the free queue. 2756 * 2757 **********************************************************************/ 2758static void 2759em_txeof(struct adapter *adapter) 2760{ 2761 int i, num_avail; 2762 struct em_buffer *tx_buffer; 2763 struct em_tx_desc *tx_desc; 2764 struct ifnet *ifp = adapter->ifp; 2765 2766 EM_LOCK_ASSERT(adapter); 2767 2768 if (adapter->num_tx_desc_avail == adapter->num_tx_desc) 2769 return; 2770 2771 num_avail = adapter->num_tx_desc_avail; 2772 i = adapter->oldest_used_tx_desc; 2773 2774 tx_buffer = &adapter->tx_buffer_area[i]; 2775 tx_desc = &adapter->tx_desc_base[i]; 2776 2777 bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map, 2778 BUS_DMASYNC_POSTREAD); 2779 while (tx_desc->upper.fields.status & E1000_TXD_STAT_DD) { 2780 2781 tx_desc->upper.data = 0; 2782 num_avail++; 2783 2784 if (tx_buffer->m_head) { 2785 ifp->if_opackets++; 2786 bus_dmamap_sync(adapter->txtag, tx_buffer->map, 2787 BUS_DMASYNC_POSTWRITE); 2788 bus_dmamap_unload(adapter->txtag, tx_buffer->map); 2789 2790 m_freem(tx_buffer->m_head); 2791 tx_buffer->m_head = NULL; 2792 } 2793 2794 if (++i == adapter->num_tx_desc) 2795 i = 0; 2796 2797 tx_buffer = &adapter->tx_buffer_area[i]; 2798 tx_desc = &adapter->tx_desc_base[i]; 2799 } 2800 bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map, 2801 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2802 2803 adapter->oldest_used_tx_desc = i; 2804 2805 /* 2806 * If we have enough room, clear IFF_DRV_OACTIVE to tell the stack 2807 * that it is OK to send packets. 2808 * If there are no pending descriptors, clear the timeout. Otherwise, 2809 * if some descriptors have been freed, restart the timeout. 2810 */ 2811 if (num_avail > EM_TX_CLEANUP_THRESHOLD) { 2812 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 2813 if (num_avail == adapter->num_tx_desc) 2814 ifp->if_timer = 0; 2815 else if (num_avail != adapter->num_tx_desc_avail) 2816 ifp->if_timer = EM_TX_TIMEOUT; 2817 } 2818 adapter->num_tx_desc_avail = num_avail; 2819} 2820 2821/********************************************************************* 2822 * 2823 * Get a buffer from system mbuf buffer pool. 2824 * 2825 **********************************************************************/ 2826static int 2827em_get_buf(struct adapter *adapter, int i) 2828{ 2829 struct mbuf *m; 2830 bus_dma_segment_t segs[1]; 2831 bus_dmamap_t map; 2832 struct em_buffer *rx_buffer; 2833 int error, nsegs; 2834 2835 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 2836 if (m == NULL) { 2837 adapter->mbuf_cluster_failed++; 2838 return (ENOBUFS); 2839 } 2840 m->m_len = m->m_pkthdr.len = MCLBYTES; 2841 if (adapter->hw.max_frame_size <= (MCLBYTES - ETHER_ALIGN)) 2842 m_adj(m, ETHER_ALIGN); 2843 2844 /* 2845 * Using memory from the mbuf cluster pool, invoke the 2846 * bus_dma machinery to arrange the memory mapping. 2847 */ 2848 error = bus_dmamap_load_mbuf_sg(adapter->rxtag, adapter->rx_sparemap, 2849 m, segs, &nsegs, BUS_DMA_NOWAIT); 2850 if (error != 0) { 2851 m_free(m); 2852 return (error); 2853 } 2854 /* If nsegs is wrong then the stack is corrupt. */ 2855 KASSERT(nsegs == 1, ("Too many segments returned!")); 2856 2857 rx_buffer = &adapter->rx_buffer_area[i]; 2858 if (rx_buffer->m_head != NULL) 2859 bus_dmamap_unload(adapter->rxtag, rx_buffer->map); 2860 2861 map = rx_buffer->map; 2862 rx_buffer->map = adapter->rx_sparemap; 2863 adapter->rx_sparemap = map; 2864 bus_dmamap_sync(adapter->rxtag, rx_buffer->map, BUS_DMASYNC_PREREAD); 2865 rx_buffer->m_head = m; 2866 2867 adapter->rx_desc_base[i].buffer_addr = htole64(segs[0].ds_addr); 2868 /* Zero out the receive descriptors status. */ 2869 adapter->rx_desc_base[i].status = 0; 2870 2871 return (0); 2872} 2873 2874/********************************************************************* 2875 * 2876 * Allocate memory for rx_buffer structures. Since we use one 2877 * rx_buffer per received packet, the maximum number of rx_buffer's 2878 * that we'll need is equal to the number of receive descriptors 2879 * that we've allocated. 2880 * 2881 **********************************************************************/ 2882static int 2883em_allocate_receive_structures(struct adapter *adapter) 2884{ 2885 device_t dev = adapter->dev; 2886 struct em_buffer *rx_buffer; 2887 int i, error; 2888 2889 adapter->rx_buffer_area = malloc(sizeof(struct em_buffer) * adapter->num_rx_desc, 2890 M_DEVBUF, M_NOWAIT); 2891 if (adapter->rx_buffer_area == NULL) { 2892 device_printf(dev, "Unable to allocate rx_buffer memory\n"); 2893 return (ENOMEM); 2894 } 2895 2896 bzero(adapter->rx_buffer_area, sizeof(struct em_buffer) * adapter->num_rx_desc); 2897 2898 error = bus_dma_tag_create(NULL, /* parent */ 2899 1, 0, /* alignment, bounds */ 2900 BUS_SPACE_MAXADDR, /* lowaddr */ 2901 BUS_SPACE_MAXADDR, /* highaddr */ 2902 NULL, NULL, /* filter, filterarg */ 2903 MCLBYTES, /* maxsize */ 2904 1, /* nsegments */ 2905 MCLBYTES, /* maxsegsize */ 2906 0, /* flags */ 2907 NULL, /* lockfunc */ 2908 NULL, /* lockarg */ 2909 &adapter->rxtag); 2910 if (error) { 2911 device_printf(dev, "%s: bus_dma_tag_create failed %d\n", 2912 __func__, error); 2913 goto fail; 2914 } 2915 2916 error = bus_dmamap_create(adapter->rxtag, BUS_DMA_NOWAIT, 2917 &adapter->rx_sparemap); 2918 if (error) { 2919 device_printf(dev, "%s: bus_dmamap_create failed: %d\n", 2920 __func__, error); 2921 goto fail; 2922 } 2923 rx_buffer = adapter->rx_buffer_area; 2924 for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) { 2925 error = bus_dmamap_create(adapter->rxtag, BUS_DMA_NOWAIT, 2926 &rx_buffer->map); 2927 if (error) { 2928 device_printf(dev, "%s: bus_dmamap_create failed: %d\n", 2929 __func__, error); 2930 goto fail; 2931 } 2932 } 2933 2934 for (i = 0; i < adapter->num_rx_desc; i++) { 2935 error = em_get_buf(adapter, i); 2936 if (error) 2937 goto fail; 2938 } 2939 bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map, 2940 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2941 2942 return (0); 2943 2944fail: 2945 em_free_receive_structures(adapter); 2946 return (error); 2947} 2948 2949/********************************************************************* 2950 * 2951 * Allocate and initialize receive structures. 2952 * 2953 **********************************************************************/ 2954static int 2955em_setup_receive_structures(struct adapter *adapter) 2956{ 2957 int error; 2958 2959 bzero(adapter->rx_desc_base, (sizeof(struct em_rx_desc)) * adapter->num_rx_desc); 2960 2961 if ((error = em_allocate_receive_structures(adapter)) != 0) 2962 return (error); 2963 2964 /* Setup our descriptor pointers */ 2965 adapter->next_rx_desc_to_check = 0; 2966 2967 return (0); 2968} 2969 2970/********************************************************************* 2971 * 2972 * Enable receive unit. 2973 * 2974 **********************************************************************/ 2975static void 2976em_initialize_receive_unit(struct adapter *adapter) 2977{ 2978 struct ifnet *ifp = adapter->ifp; 2979 uint64_t bus_addr; 2980 uint32_t reg_rctl; 2981 uint32_t reg_rxcsum; 2982 2983 INIT_DEBUGOUT("em_initialize_receive_unit: begin"); 2984 2985 /* 2986 * Make sure receives are disabled while setting 2987 * up the descriptor ring 2988 */ 2989 E1000_WRITE_REG(&adapter->hw, RCTL, 0); 2990 2991 /* Set the Receive Delay Timer Register */ 2992 E1000_WRITE_REG(&adapter->hw, RDTR, adapter->rx_int_delay.value | E1000_RDT_FPDB); 2993 2994 if(adapter->hw.mac_type >= em_82540) { 2995 E1000_WRITE_REG(&adapter->hw, RADV, adapter->rx_abs_int_delay.value); 2996 2997 /* 2998 * Set the interrupt throttling rate. Value is calculated 2999 * as DEFAULT_ITR = 1/(MAX_INTS_PER_SEC * 256ns) 3000 */ 3001#define MAX_INTS_PER_SEC 8000 3002#define DEFAULT_ITR 1000000000/(MAX_INTS_PER_SEC * 256) 3003 E1000_WRITE_REG(&adapter->hw, ITR, DEFAULT_ITR); 3004 } 3005 3006 /* Setup the Base and Length of the Rx Descriptor Ring */ 3007 bus_addr = adapter->rxdma.dma_paddr; 3008 E1000_WRITE_REG(&adapter->hw, RDLEN, adapter->num_rx_desc * 3009 sizeof(struct em_rx_desc)); 3010 E1000_WRITE_REG(&adapter->hw, RDBAH, (uint32_t)(bus_addr >> 32)); 3011 E1000_WRITE_REG(&adapter->hw, RDBAL, (uint32_t)bus_addr); 3012 3013 /* Setup the HW Rx Head and Tail Descriptor Pointers */ 3014 E1000_WRITE_REG(&adapter->hw, RDT, adapter->num_rx_desc - 1); 3015 E1000_WRITE_REG(&adapter->hw, RDH, 0); 3016 3017 /* Setup the Receive Control Register */ 3018 reg_rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO | 3019 E1000_RCTL_RDMTS_HALF | 3020 (adapter->hw.mc_filter_type << E1000_RCTL_MO_SHIFT); 3021 3022 if (adapter->hw.tbi_compatibility_on == TRUE) 3023 reg_rctl |= E1000_RCTL_SBP; 3024 3025 3026 switch (adapter->rx_buffer_len) { 3027 default: 3028 case EM_RXBUFFER_2048: 3029 reg_rctl |= E1000_RCTL_SZ_2048; 3030 break; 3031 case EM_RXBUFFER_4096: 3032 reg_rctl |= E1000_RCTL_SZ_4096 | E1000_RCTL_BSEX | E1000_RCTL_LPE; 3033 break; 3034 case EM_RXBUFFER_8192: 3035 reg_rctl |= E1000_RCTL_SZ_8192 | E1000_RCTL_BSEX | E1000_RCTL_LPE; 3036 break; 3037 case EM_RXBUFFER_16384: 3038 reg_rctl |= E1000_RCTL_SZ_16384 | E1000_RCTL_BSEX | E1000_RCTL_LPE; 3039 break; 3040 } 3041 3042 if (ifp->if_mtu > ETHERMTU) 3043 reg_rctl |= E1000_RCTL_LPE; 3044 3045 /* Enable 82543 Receive Checksum Offload for TCP and UDP */ 3046 if ((adapter->hw.mac_type >= em_82543) && 3047 (ifp->if_capenable & IFCAP_RXCSUM)) { 3048 reg_rxcsum = E1000_READ_REG(&adapter->hw, RXCSUM); 3049 reg_rxcsum |= (E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL); 3050 E1000_WRITE_REG(&adapter->hw, RXCSUM, reg_rxcsum); 3051 } 3052 3053 /* Enable Receives */ 3054 E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl); 3055} 3056 3057/********************************************************************* 3058 * 3059 * Free receive related data structures. 3060 * 3061 **********************************************************************/ 3062static void 3063em_free_receive_structures(struct adapter *adapter) 3064{ 3065 struct em_buffer *rx_buffer; 3066 int i; 3067 3068 INIT_DEBUGOUT("free_receive_structures: begin"); 3069 3070 if (adapter->rx_sparemap) { 3071 bus_dmamap_destroy(adapter->rxtag, adapter->rx_sparemap); 3072 adapter->rx_sparemap = NULL; 3073 } 3074 if (adapter->rx_buffer_area != NULL) { 3075 rx_buffer = adapter->rx_buffer_area; 3076 for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) { 3077 if (rx_buffer->m_head != NULL) { 3078 bus_dmamap_sync(adapter->rxtag, rx_buffer->map, 3079 BUS_DMASYNC_POSTREAD); 3080 bus_dmamap_unload(adapter->rxtag, 3081 rx_buffer->map); 3082 m_freem(rx_buffer->m_head); 3083 rx_buffer->m_head = NULL; 3084 } else if (rx_buffer->map != NULL) 3085 bus_dmamap_unload(adapter->rxtag, 3086 rx_buffer->map); 3087 if (rx_buffer->map != NULL) { 3088 bus_dmamap_destroy(adapter->rxtag, 3089 rx_buffer->map); 3090 rx_buffer->map = NULL; 3091 } 3092 } 3093 } 3094 if (adapter->rx_buffer_area != NULL) { 3095 free(adapter->rx_buffer_area, M_DEVBUF); 3096 adapter->rx_buffer_area = NULL; 3097 } 3098 if (adapter->rxtag != NULL) { 3099 bus_dma_tag_destroy(adapter->rxtag); 3100 adapter->rxtag = NULL; 3101 } 3102} 3103 3104/********************************************************************* 3105 * 3106 * This routine executes in interrupt context. It replenishes 3107 * the mbufs in the descriptor and sends data which has been 3108 * dma'ed into host memory to upper layer. 3109 * 3110 * We loop at most count times if count is > 0, or until done if 3111 * count < 0. 3112 * 3113 *********************************************************************/ 3114static int 3115em_rxeof(struct adapter *adapter, int count) 3116{ 3117 struct ifnet *ifp; 3118 struct mbuf *mp; 3119 uint8_t accept_frame = 0; 3120 uint8_t eop = 0; 3121 uint16_t len, desc_len, prev_len_adj; 3122 int i; 3123 3124 /* Pointer to the receive descriptor being examined. */ 3125 struct em_rx_desc *current_desc; 3126 3127 ifp = adapter->ifp; 3128 i = adapter->next_rx_desc_to_check; 3129 current_desc = &adapter->rx_desc_base[i]; 3130 bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map, 3131 BUS_DMASYNC_POSTREAD); 3132 3133 if (!((current_desc->status) & E1000_RXD_STAT_DD)) 3134 return (0); 3135 3136 while ((current_desc->status & E1000_RXD_STAT_DD) && 3137 (count != 0) && 3138 (ifp->if_drv_flags & IFF_DRV_RUNNING)) { 3139 struct mbuf *m = NULL; 3140 3141 mp = adapter->rx_buffer_area[i].m_head; 3142 /* 3143 * Can't defer bus_dmamap_sync(9) because TBI_ACCEPT 3144 * needs to access the last received byte in the mbuf. 3145 */ 3146 bus_dmamap_sync(adapter->rxtag, adapter->rx_buffer_area[i].map, 3147 BUS_DMASYNC_POSTREAD); 3148 3149 accept_frame = 1; 3150 prev_len_adj = 0; 3151 desc_len = le16toh(current_desc->length); 3152 if (current_desc->status & E1000_RXD_STAT_EOP) { 3153 count--; 3154 eop = 1; 3155 if (desc_len < ETHER_CRC_LEN) { 3156 len = 0; 3157 prev_len_adj = ETHER_CRC_LEN - desc_len; 3158 } else 3159 len = desc_len - ETHER_CRC_LEN; 3160 } else { 3161 eop = 0; 3162 len = desc_len; 3163 } 3164 3165 if (current_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK) { 3166 uint8_t last_byte; 3167 uint32_t pkt_len = desc_len; 3168 3169 if (adapter->fmp != NULL) 3170 pkt_len += adapter->fmp->m_pkthdr.len; 3171 3172 last_byte = *(mtod(mp, caddr_t) + desc_len - 1); 3173 if (TBI_ACCEPT(&adapter->hw, current_desc->status, 3174 current_desc->errors, 3175 pkt_len, last_byte)) { 3176 em_tbi_adjust_stats(&adapter->hw, 3177 &adapter->stats, pkt_len, 3178 adapter->hw.mac_addr); 3179 if (len > 0) 3180 len--; 3181 } else 3182 accept_frame = 0; 3183 } 3184 3185 if (accept_frame) { 3186 if (em_get_buf(adapter, i) != 0) { 3187 ifp->if_iqdrops++; 3188 goto discard; 3189 } 3190 3191 /* Assign correct length to the current fragment */ 3192 mp->m_len = len; 3193 3194 if (adapter->fmp == NULL) { 3195 mp->m_pkthdr.len = len; 3196 adapter->fmp = mp; /* Store the first mbuf */ 3197 adapter->lmp = mp; 3198 } else { 3199 /* Chain mbuf's together */ 3200 mp->m_flags &= ~M_PKTHDR; 3201 /* 3202 * Adjust length of previous mbuf in chain if 3203 * we received less than 4 bytes in the last 3204 * descriptor. 3205 */ 3206 if (prev_len_adj > 0) { 3207 adapter->lmp->m_len -= prev_len_adj; 3208 adapter->fmp->m_pkthdr.len -= 3209 prev_len_adj; 3210 } 3211 adapter->lmp->m_next = mp; 3212 adapter->lmp = adapter->lmp->m_next; 3213 adapter->fmp->m_pkthdr.len += len; 3214 } 3215 3216 if (eop) { 3217 adapter->fmp->m_pkthdr.rcvif = ifp; 3218 ifp->if_ipackets++; 3219 em_receive_checksum(adapter, current_desc, 3220 adapter->fmp); 3221#ifndef __NO_STRICT_ALIGNMENT 3222 if (adapter->hw.max_frame_size > 3223 (MCLBYTES - ETHER_ALIGN) && 3224 em_fixup_rx(adapter) != 0) 3225 goto skip; 3226#endif 3227 if (current_desc->status & E1000_RXD_STAT_VP) 3228 VLAN_INPUT_TAG(ifp, adapter->fmp, 3229 (le16toh(current_desc->special) & 3230 E1000_RXD_SPC_VLAN_MASK)); 3231#ifndef __NO_STRICT_ALIGNMENT 3232skip: 3233#endif 3234 m = adapter->fmp; 3235 adapter->fmp = NULL; 3236 adapter->lmp = NULL; 3237 } 3238 } else { 3239 ifp->if_ierrors++; 3240discard: 3241 /* Reuse loaded DMA map and just update mbuf chain */ 3242 mp = adapter->rx_buffer_area[i].m_head; 3243 mp->m_len = mp->m_pkthdr.len = MCLBYTES; 3244 mp->m_data = mp->m_ext.ext_buf; 3245 mp->m_next = NULL; 3246 if (adapter->hw.max_frame_size <= (MCLBYTES - ETHER_ALIGN)) 3247 m_adj(mp, ETHER_ALIGN); 3248 if (adapter->fmp != NULL) { 3249 m_freem(adapter->fmp); 3250 adapter->fmp = NULL; 3251 adapter->lmp = NULL; 3252 } 3253 /* Zero out the receive descriptors status. */ 3254 adapter->rx_desc_base[i].status = 0; 3255 m = NULL; 3256 } 3257 3258 bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map, 3259 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 3260 3261 /* Advance our pointers to the next descriptor. */ 3262 if (++i == adapter->num_rx_desc) 3263 i = 0; 3264 if (m != NULL) { 3265 adapter->next_rx_desc_to_check = i; 3266#ifdef DEVICE_POLLING 3267 EM_UNLOCK(adapter); 3268 (*ifp->if_input)(ifp, m); 3269 EM_LOCK(adapter); 3270#else 3271 (*ifp->if_input)(ifp, m); 3272#endif 3273 i = adapter->next_rx_desc_to_check; 3274 } 3275 current_desc = &adapter->rx_desc_base[i]; 3276 } 3277 adapter->next_rx_desc_to_check = i; 3278 3279 /* Advance the E1000's Receive Queue #0 "Tail Pointer". */ 3280 if (--i < 0) 3281 i = adapter->num_rx_desc - 1; 3282 E1000_WRITE_REG(&adapter->hw, RDT, i); 3283 if (!((current_desc->status) & E1000_RXD_STAT_DD)) 3284 return (0); 3285 3286 return (1); 3287} 3288 3289#ifndef __NO_STRICT_ALIGNMENT 3290/* 3291 * When jumbo frames are enabled we should realign entire payload on 3292 * architecures with strict alignment. This is serious design mistake of 8254x 3293 * as it nullifies DMA operations. 8254x just allows RX buffer size to be 3294 * 2048/4096/8192/16384. What we really want is 2048 - ETHER_ALIGN to align its 3295 * payload. On architecures without strict alignment restrictions 8254x still 3296 * performs unaligned memory access which would reduce the performance too. 3297 * To avoid copying over an entire frame to align, we allocate a new mbuf and 3298 * copy ethernet header to the new mbuf. The new mbuf is prepended into the 3299 * existing mbuf chain. 3300 * 3301 * Be aware, best performance of the 8254x is achived only when jumbo frame is 3302 * not used at all on architectures with strict alignment. 3303 */ 3304static int 3305em_fixup_rx(struct adapter *adapter) 3306{ 3307 struct mbuf *m, *n; 3308 int error; 3309 3310 error = 0; 3311 m = adapter->fmp; 3312 if (m->m_len <= (MCLBYTES - ETHER_HDR_LEN)) { 3313 bcopy(m->m_data, m->m_data + ETHER_HDR_LEN, m->m_len); 3314 m->m_data += ETHER_HDR_LEN; 3315 } else { 3316 MGETHDR(n, M_DONTWAIT, MT_DATA); 3317 if (n != NULL) { 3318 bcopy(m->m_data, n->m_data, ETHER_HDR_LEN); 3319 m->m_data += ETHER_HDR_LEN; 3320 m->m_len -= ETHER_HDR_LEN; 3321 n->m_len = ETHER_HDR_LEN; 3322 M_MOVE_PKTHDR(n, m); 3323 n->m_next = m; 3324 adapter->fmp = n; 3325 } else { 3326 adapter->ifp->if_iqdrops++; 3327 adapter->mbuf_alloc_failed++; 3328 m_freem(adapter->fmp); 3329 adapter->fmp = NULL; 3330 adapter->lmp = NULL; 3331 error = ENOBUFS; 3332 } 3333 } 3334 3335 return (error); 3336} 3337#endif 3338 3339/********************************************************************* 3340 * 3341 * Verify that the hardware indicated that the checksum is valid. 3342 * Inform the stack about the status of checksum so that stack 3343 * doesn't spend time verifying the checksum. 3344 * 3345 *********************************************************************/ 3346static void 3347em_receive_checksum(struct adapter *adapter, struct em_rx_desc *rx_desc, 3348 struct mbuf *mp) 3349{ 3350 /* 82543 or newer only */ 3351 if ((adapter->hw.mac_type < em_82543) || 3352 /* Ignore Checksum bit is set */ 3353 (rx_desc->status & E1000_RXD_STAT_IXSM)) { 3354 mp->m_pkthdr.csum_flags = 0; 3355 return; 3356 } 3357 3358 if (rx_desc->status & E1000_RXD_STAT_IPCS) { 3359 /* Did it pass? */ 3360 if (!(rx_desc->errors & E1000_RXD_ERR_IPE)) { 3361 /* IP Checksum Good */ 3362 mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED; 3363 mp->m_pkthdr.csum_flags |= CSUM_IP_VALID; 3364 3365 } else { 3366 mp->m_pkthdr.csum_flags = 0; 3367 } 3368 } 3369 3370 if (rx_desc->status & E1000_RXD_STAT_TCPCS) { 3371 /* Did it pass? */ 3372 if (!(rx_desc->errors & E1000_RXD_ERR_TCPE)) { 3373 mp->m_pkthdr.csum_flags |= 3374 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR); 3375 mp->m_pkthdr.csum_data = htons(0xffff); 3376 } 3377 } 3378} 3379 3380 3381static void 3382em_enable_vlans(struct adapter *adapter) 3383{ 3384 uint32_t ctrl; 3385 3386 E1000_WRITE_REG(&adapter->hw, VET, ETHERTYPE_VLAN); 3387 3388 ctrl = E1000_READ_REG(&adapter->hw, CTRL); 3389 ctrl |= E1000_CTRL_VME; 3390 E1000_WRITE_REG(&adapter->hw, CTRL, ctrl); 3391} 3392 3393static void 3394em_disable_vlans(struct adapter *adapter) 3395{ 3396 uint32_t ctrl; 3397 3398 ctrl = E1000_READ_REG(&adapter->hw, CTRL); 3399 ctrl &= ~E1000_CTRL_VME; 3400 E1000_WRITE_REG(&adapter->hw, CTRL, ctrl); 3401} 3402 3403static void 3404em_enable_intr(struct adapter *adapter) 3405{ 3406 E1000_WRITE_REG(&adapter->hw, IMS, (IMS_ENABLE_MASK)); 3407} 3408 3409static void 3410em_disable_intr(struct adapter *adapter) 3411{ 3412 /* 3413 * The first version of 82542 had an errata where when link was forced 3414 * it would stay up even up even if the cable was disconnected. 3415 * Sequence errors were used to detect the disconnect and then the 3416 * driver would unforce the link. This code in the in the ISR. For this 3417 * to work correctly the Sequence error interrupt had to be enabled 3418 * all the time. 3419 */ 3420 3421 if (adapter->hw.mac_type == em_82542_rev2_0) 3422 E1000_WRITE_REG(&adapter->hw, IMC, 3423 (0xffffffff & ~E1000_IMC_RXSEQ)); 3424 else 3425 E1000_WRITE_REG(&adapter->hw, IMC, 3426 0xffffffff); 3427} 3428 3429static int 3430em_is_valid_ether_addr(uint8_t *addr) 3431{ 3432 char zero_addr[6] = { 0, 0, 0, 0, 0, 0 }; 3433 3434 if ((addr[0] & 1) || (!bcmp(addr, zero_addr, ETHER_ADDR_LEN))) { 3435 return (FALSE); 3436 } 3437 3438 return (TRUE); 3439} 3440 3441void 3442em_write_pci_cfg(struct em_hw *hw, uint32_t reg, uint16_t *value) 3443{ 3444 pci_write_config(((struct em_osdep *)hw->back)->dev, reg, *value, 2); 3445} 3446 3447void 3448em_read_pci_cfg(struct em_hw *hw, uint32_t reg, uint16_t *value) 3449{ 3450 *value = pci_read_config(((struct em_osdep *)hw->back)->dev, reg, 2); 3451} 3452 3453void 3454em_pci_set_mwi(struct em_hw *hw) 3455{ 3456 pci_write_config(((struct em_osdep *)hw->back)->dev, PCIR_COMMAND, 3457 (hw->pci_cmd_word | CMD_MEM_WRT_INVALIDATE), 2); 3458} 3459 3460void 3461em_pci_clear_mwi(struct em_hw *hw) 3462{ 3463 pci_write_config(((struct em_osdep *)hw->back)->dev, PCIR_COMMAND, 3464 (hw->pci_cmd_word & ~CMD_MEM_WRT_INVALIDATE), 2); 3465} 3466 3467/********************************************************************* 3468* 82544 Coexistence issue workaround. 3469* There are 2 issues. 3470* 1. Transmit Hang issue. 3471* To detect this issue, following equation can be used... 3472* SIZE[3:0] + ADDR[2:0] = SUM[3:0]. 3473* If SUM[3:0] is in between 1 to 4, we will have this issue. 3474* 3475* 2. DAC issue. 3476* To detect this issue, following equation can be used... 3477* SIZE[3:0] + ADDR[2:0] = SUM[3:0]. 3478* If SUM[3:0] is in between 9 to c, we will have this issue. 3479* 3480* 3481* WORKAROUND: 3482* Make sure we do not have ending address as 1,2,3,4(Hang) or 9,a,b,c (DAC) 3483* 3484*** *********************************************************************/ 3485static uint32_t 3486em_fill_descriptors (bus_addr_t address, uint32_t length, 3487 PDESC_ARRAY desc_array) 3488{ 3489 /* Since issue is sensitive to length and address.*/ 3490 /* Let us first check the address...*/ 3491 uint32_t safe_terminator; 3492 if (length <= 4) { 3493 desc_array->descriptor[0].address = address; 3494 desc_array->descriptor[0].length = length; 3495 desc_array->elements = 1; 3496 return (desc_array->elements); 3497 } 3498 safe_terminator = (uint32_t)((((uint32_t)address & 0x7) + (length & 0xF)) & 0xF); 3499 /* if it does not fall between 0x1 to 0x4 and 0x9 to 0xC then return */ 3500 if (safe_terminator == 0 || 3501 (safe_terminator > 4 && 3502 safe_terminator < 9) || 3503 (safe_terminator > 0xC && 3504 safe_terminator <= 0xF)) { 3505 desc_array->descriptor[0].address = address; 3506 desc_array->descriptor[0].length = length; 3507 desc_array->elements = 1; 3508 return (desc_array->elements); 3509 } 3510 3511 desc_array->descriptor[0].address = address; 3512 desc_array->descriptor[0].length = length - 4; 3513 desc_array->descriptor[1].address = address + (length - 4); 3514 desc_array->descriptor[1].length = 4; 3515 desc_array->elements = 2; 3516 return (desc_array->elements); 3517} 3518 3519/********************************************************************** 3520 * 3521 * Update the board statistics counters. 3522 * 3523 **********************************************************************/ 3524static void 3525em_update_stats_counters(struct adapter *adapter) 3526{ 3527 struct ifnet *ifp; 3528 3529 if(adapter->hw.media_type == em_media_type_copper || 3530 (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU)) { 3531 adapter->stats.symerrs += E1000_READ_REG(&adapter->hw, SYMERRS); 3532 adapter->stats.sec += E1000_READ_REG(&adapter->hw, SEC); 3533 } 3534 adapter->stats.crcerrs += E1000_READ_REG(&adapter->hw, CRCERRS); 3535 adapter->stats.mpc += E1000_READ_REG(&adapter->hw, MPC); 3536 adapter->stats.scc += E1000_READ_REG(&adapter->hw, SCC); 3537 adapter->stats.ecol += E1000_READ_REG(&adapter->hw, ECOL); 3538 3539 adapter->stats.mcc += E1000_READ_REG(&adapter->hw, MCC); 3540 adapter->stats.latecol += E1000_READ_REG(&adapter->hw, LATECOL); 3541 adapter->stats.colc += E1000_READ_REG(&adapter->hw, COLC); 3542 adapter->stats.dc += E1000_READ_REG(&adapter->hw, DC); 3543 adapter->stats.rlec += E1000_READ_REG(&adapter->hw, RLEC); 3544 adapter->stats.xonrxc += E1000_READ_REG(&adapter->hw, XONRXC); 3545 adapter->stats.xontxc += E1000_READ_REG(&adapter->hw, XONTXC); 3546 adapter->stats.xoffrxc += E1000_READ_REG(&adapter->hw, XOFFRXC); 3547 adapter->stats.xofftxc += E1000_READ_REG(&adapter->hw, XOFFTXC); 3548 adapter->stats.fcruc += E1000_READ_REG(&adapter->hw, FCRUC); 3549 adapter->stats.prc64 += E1000_READ_REG(&adapter->hw, PRC64); 3550 adapter->stats.prc127 += E1000_READ_REG(&adapter->hw, PRC127); 3551 adapter->stats.prc255 += E1000_READ_REG(&adapter->hw, PRC255); 3552 adapter->stats.prc511 += E1000_READ_REG(&adapter->hw, PRC511); 3553 adapter->stats.prc1023 += E1000_READ_REG(&adapter->hw, PRC1023); 3554 adapter->stats.prc1522 += E1000_READ_REG(&adapter->hw, PRC1522); 3555 adapter->stats.gprc += E1000_READ_REG(&adapter->hw, GPRC); 3556 adapter->stats.bprc += E1000_READ_REG(&adapter->hw, BPRC); 3557 adapter->stats.mprc += E1000_READ_REG(&adapter->hw, MPRC); 3558 adapter->stats.gptc += E1000_READ_REG(&adapter->hw, GPTC); 3559 3560 /* For the 64-bit byte counters the low dword must be read first. */ 3561 /* Both registers clear on the read of the high dword */ 3562 3563 adapter->stats.gorcl += E1000_READ_REG(&adapter->hw, GORCL); 3564 adapter->stats.gorch += E1000_READ_REG(&adapter->hw, GORCH); 3565 adapter->stats.gotcl += E1000_READ_REG(&adapter->hw, GOTCL); 3566 adapter->stats.gotch += E1000_READ_REG(&adapter->hw, GOTCH); 3567 3568 adapter->stats.rnbc += E1000_READ_REG(&adapter->hw, RNBC); 3569 adapter->stats.ruc += E1000_READ_REG(&adapter->hw, RUC); 3570 adapter->stats.rfc += E1000_READ_REG(&adapter->hw, RFC); 3571 adapter->stats.roc += E1000_READ_REG(&adapter->hw, ROC); 3572 adapter->stats.rjc += E1000_READ_REG(&adapter->hw, RJC); 3573 3574 adapter->stats.torl += E1000_READ_REG(&adapter->hw, TORL); 3575 adapter->stats.torh += E1000_READ_REG(&adapter->hw, TORH); 3576 adapter->stats.totl += E1000_READ_REG(&adapter->hw, TOTL); 3577 adapter->stats.toth += E1000_READ_REG(&adapter->hw, TOTH); 3578 3579 adapter->stats.tpr += E1000_READ_REG(&adapter->hw, TPR); 3580 adapter->stats.tpt += E1000_READ_REG(&adapter->hw, TPT); 3581 adapter->stats.ptc64 += E1000_READ_REG(&adapter->hw, PTC64); 3582 adapter->stats.ptc127 += E1000_READ_REG(&adapter->hw, PTC127); 3583 adapter->stats.ptc255 += E1000_READ_REG(&adapter->hw, PTC255); 3584 adapter->stats.ptc511 += E1000_READ_REG(&adapter->hw, PTC511); 3585 adapter->stats.ptc1023 += E1000_READ_REG(&adapter->hw, PTC1023); 3586 adapter->stats.ptc1522 += E1000_READ_REG(&adapter->hw, PTC1522); 3587 adapter->stats.mptc += E1000_READ_REG(&adapter->hw, MPTC); 3588 adapter->stats.bptc += E1000_READ_REG(&adapter->hw, BPTC); 3589 3590 if (adapter->hw.mac_type >= em_82543) { 3591 adapter->stats.algnerrc += E1000_READ_REG(&adapter->hw, ALGNERRC); 3592 adapter->stats.rxerrc += E1000_READ_REG(&adapter->hw, RXERRC); 3593 adapter->stats.tncrs += E1000_READ_REG(&adapter->hw, TNCRS); 3594 adapter->stats.cexterr += E1000_READ_REG(&adapter->hw, CEXTERR); 3595 adapter->stats.tsctc += E1000_READ_REG(&adapter->hw, TSCTC); 3596 adapter->stats.tsctfc += E1000_READ_REG(&adapter->hw, TSCTFC); 3597 } 3598 ifp = adapter->ifp; 3599 3600 ifp->if_collisions = adapter->stats.colc; 3601 3602 /* Rx Errors */ 3603 ifp->if_ierrors = adapter->stats.rxerrc + adapter->stats.crcerrs + 3604 adapter->stats.algnerrc + adapter->stats.ruc + adapter->stats.roc + 3605 adapter->stats.mpc + adapter->stats.cexterr; 3606 3607 /* Tx Errors */ 3608 ifp->if_oerrors = adapter->stats.ecol + adapter->stats.latecol + 3609 adapter->watchdog_events; 3610} 3611 3612 3613/********************************************************************** 3614 * 3615 * This routine is called only when em_display_debug_stats is enabled. 3616 * This routine provides a way to take a look at important statistics 3617 * maintained by the driver and hardware. 3618 * 3619 **********************************************************************/ 3620static void 3621em_print_debug_info(struct adapter *adapter) 3622{ 3623 device_t dev = adapter->dev; 3624 uint8_t *hw_addr = adapter->hw.hw_addr; 3625 3626 device_printf(dev, "Adapter hardware address = %p \n", hw_addr); 3627 device_printf(dev, "CTRL = 0x%x RCTL = 0x%x \n", 3628 E1000_READ_REG(&adapter->hw, CTRL), 3629 E1000_READ_REG(&adapter->hw, RCTL)); 3630 device_printf(dev, "Packet buffer = Tx=%dk Rx=%dk \n", 3631 ((E1000_READ_REG(&adapter->hw, PBA) & 0xffff0000) >> 16),\ 3632 (E1000_READ_REG(&adapter->hw, PBA) & 0xffff) ); 3633 device_printf(dev, "Flow control watermarks high = %d low = %d\n", 3634 adapter->hw.fc_high_water, 3635 adapter->hw.fc_low_water); 3636 device_printf(dev, "tx_int_delay = %d, tx_abs_int_delay = %d\n", 3637 E1000_READ_REG(&adapter->hw, TIDV), 3638 E1000_READ_REG(&adapter->hw, TADV)); 3639 device_printf(dev, "rx_int_delay = %d, rx_abs_int_delay = %d\n", 3640 E1000_READ_REG(&adapter->hw, RDTR), 3641 E1000_READ_REG(&adapter->hw, RADV)); 3642 device_printf(dev, "fifo workaround = %lld, fifo_reset_count = %lld\n", 3643 (long long)adapter->tx_fifo_wrk_cnt, 3644 (long long)adapter->tx_fifo_reset_cnt); 3645 device_printf(dev, "hw tdh = %d, hw tdt = %d\n", 3646 E1000_READ_REG(&adapter->hw, TDH), 3647 E1000_READ_REG(&adapter->hw, TDT)); 3648 device_printf(dev, "Num Tx descriptors avail = %d\n", 3649 adapter->num_tx_desc_avail); 3650 device_printf(dev, "Tx Descriptors not avail1 = %ld\n", 3651 adapter->no_tx_desc_avail1); 3652 device_printf(dev, "Tx Descriptors not avail2 = %ld\n", 3653 adapter->no_tx_desc_avail2); 3654 device_printf(dev, "Std mbuf failed = %ld\n", 3655 adapter->mbuf_alloc_failed); 3656 device_printf(dev, "Std mbuf cluster failed = %ld\n", 3657 adapter->mbuf_cluster_failed); 3658} 3659 3660static void 3661em_print_hw_stats(struct adapter *adapter) 3662{ 3663 device_t dev = adapter->dev; 3664 3665 device_printf(dev, "Excessive collisions = %lld\n", 3666 (long long)adapter->stats.ecol); 3667 device_printf(dev, "Symbol errors = %lld\n", 3668 (long long)adapter->stats.symerrs); 3669 device_printf(dev, "Sequence errors = %lld\n", 3670 (long long)adapter->stats.sec); 3671 device_printf(dev, "Defer count = %lld\n", (long long)adapter->stats.dc); 3672 3673 device_printf(dev, "Missed Packets = %lld\n", (long long)adapter->stats.mpc); 3674 device_printf(dev, "Receive No Buffers = %lld\n", 3675 (long long)adapter->stats.rnbc); 3676 /* RLEC is inaccurate on some hardware, calculate our own. */ 3677 device_printf(dev, "Receive Length Errors = %lld\n", 3678 ((long long)adapter->stats.roc + (long long)adapter->stats.ruc)); 3679 device_printf(dev, "Receive errors = %lld\n", 3680 (long long)adapter->stats.rxerrc); 3681 device_printf(dev, "Crc errors = %lld\n", (long long)adapter->stats.crcerrs); 3682 device_printf(dev, "Alignment errors = %lld\n", 3683 (long long)adapter->stats.algnerrc); 3684 device_printf(dev, "Carrier extension errors = %lld\n", 3685 (long long)adapter->stats.cexterr); 3686 device_printf(dev, "RX overruns = %ld\n", adapter->rx_overruns); 3687 device_printf(dev, "watchdog timeouts = %ld\n", adapter->watchdog_events); 3688 3689 device_printf(dev, "XON Rcvd = %lld\n", (long long)adapter->stats.xonrxc); 3690 device_printf(dev, "XON Xmtd = %lld\n", (long long)adapter->stats.xontxc); 3691 device_printf(dev, "XOFF Rcvd = %lld\n", (long long)adapter->stats.xoffrxc); 3692 device_printf(dev, "XOFF Xmtd = %lld\n", (long long)adapter->stats.xofftxc); 3693 3694 device_printf(dev, "Good Packets Rcvd = %lld\n", 3695 (long long)adapter->stats.gprc); 3696 device_printf(dev, "Good Packets Xmtd = %lld\n", 3697 (long long)adapter->stats.gptc); 3698} 3699 3700static int 3701em_sysctl_debug_info(SYSCTL_HANDLER_ARGS) 3702{ 3703 struct adapter *adapter; 3704 int error; 3705 int result; 3706 3707 result = -1; 3708 error = sysctl_handle_int(oidp, &result, 0, req); 3709 3710 if (error || !req->newptr) 3711 return (error); 3712 3713 if (result == 1) { 3714 adapter = (struct adapter *)arg1; 3715 em_print_debug_info(adapter); 3716 } 3717 3718 return (error); 3719} 3720 3721 3722static int 3723em_sysctl_stats(SYSCTL_HANDLER_ARGS) 3724{ 3725 struct adapter *adapter; 3726 int error; 3727 int result; 3728 3729 result = -1; 3730 error = sysctl_handle_int(oidp, &result, 0, req); 3731 3732 if (error || !req->newptr) 3733 return (error); 3734 3735 if (result == 1) { 3736 adapter = (struct adapter *)arg1; 3737 em_print_hw_stats(adapter); 3738 } 3739 3740 return (error); 3741} 3742 3743static int 3744em_sysctl_int_delay(SYSCTL_HANDLER_ARGS) 3745{ 3746 struct em_int_delay_info *info; 3747 struct adapter *adapter; 3748 uint32_t regval; 3749 int error; 3750 int usecs; 3751 int ticks; 3752 3753 info = (struct em_int_delay_info *)arg1; 3754 usecs = info->value; 3755 error = sysctl_handle_int(oidp, &usecs, 0, req); 3756 if (error != 0 || req->newptr == NULL) 3757 return (error); 3758 if (usecs < 0 || usecs > E1000_TICKS_TO_USECS(65535)) 3759 return (EINVAL); 3760 info->value = usecs; 3761 ticks = E1000_USECS_TO_TICKS(usecs); 3762 3763 adapter = info->adapter; 3764 3765 EM_LOCK(adapter); 3766 regval = E1000_READ_OFFSET(&adapter->hw, info->offset); 3767 regval = (regval & ~0xffff) | (ticks & 0xffff); 3768 /* Handle a few special cases. */ 3769 switch (info->offset) { 3770 case E1000_RDTR: 3771 case E1000_82542_RDTR: 3772 regval |= E1000_RDT_FPDB; 3773 break; 3774 case E1000_TIDV: 3775 case E1000_82542_TIDV: 3776 if (ticks == 0) { 3777 adapter->txd_cmd &= ~E1000_TXD_CMD_IDE; 3778 /* Don't write 0 into the TIDV register. */ 3779 regval++; 3780 } else 3781 adapter->txd_cmd |= E1000_TXD_CMD_IDE; 3782 break; 3783 } 3784 E1000_WRITE_OFFSET(&adapter->hw, info->offset, regval); 3785 EM_UNLOCK(adapter); 3786 return (0); 3787} 3788 3789static void 3790em_add_int_delay_sysctl(struct adapter *adapter, const char *name, 3791 const char *description, struct em_int_delay_info *info, 3792 int offset, int value) 3793{ 3794 info->adapter = adapter; 3795 info->offset = offset; 3796 info->value = value; 3797 SYSCTL_ADD_PROC(device_get_sysctl_ctx(adapter->dev), 3798 SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)), 3799 OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW, 3800 info, 0, em_sysctl_int_delay, "I", description); 3801} 3802 3803#ifndef DEVICE_POLLING 3804static void 3805em_add_int_process_limit(struct adapter *adapter, const char *name, 3806 const char *description, int *limit, int value) 3807{ 3808 *limit = value; 3809 SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev), 3810 SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)), 3811 OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW, limit, value, description); 3812} 3813#endif 3814