34 35#include "opt_inet.h" 36#include "opt_inet6.h" 37 38#ifdef HAVE_KERNEL_OPTION_HEADERS 39#include "opt_device_polling.h" 40#endif 41 42#include <sys/param.h> 43#include <sys/systm.h> 44#include <sys/bus.h> 45#include <sys/endian.h> 46#include <sys/kernel.h> 47#include <sys/kthread.h> 48#include <sys/malloc.h> 49#include <sys/mbuf.h> 50#include <sys/module.h> 51#include <sys/rman.h> 52#include <sys/socket.h> 53#include <sys/sockio.h> 54#include <sys/sysctl.h> 55#include <sys/taskqueue.h> 56#include <sys/eventhandler.h> 57#include <machine/bus.h> 58#include <machine/resource.h> 59 60#include <net/bpf.h> 61#include <net/ethernet.h> 62#include <net/if.h> 63#include <net/if_arp.h> 64#include <net/if_dl.h> 65#include <net/if_media.h> 66 67#include <net/if_types.h> 68#include <net/if_vlan_var.h> 69 70#include <netinet/in_systm.h> 71#include <netinet/in.h> 72#include <netinet/if_ether.h> 73#include <netinet/ip.h> 74#include <netinet/ip6.h> 75#include <netinet/tcp.h> 76#include <netinet/udp.h> 77 78#include <machine/in_cksum.h> 79#include <dev/led/led.h> 80#include <dev/pci/pcivar.h> 81#include <dev/pci/pcireg.h> 82 83#include "e1000_api.h" 84#include "if_lem.h" 85 86/********************************************************************* 87 * Legacy Em Driver version: 88 *********************************************************************/ 89char lem_driver_version[] = "1.0.6"; 90 91/********************************************************************* 92 * PCI Device ID Table 93 * 94 * Used by probe to select devices to load on 95 * Last field stores an index into e1000_strings 96 * Last entry must be all 0s 97 * 98 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index } 99 *********************************************************************/ 100 101static em_vendor_info_t lem_vendor_info_array[] = 102{ 103 /* Intel(R) PRO/1000 Network Connection */ 104 { 0x8086, E1000_DEV_ID_82540EM, PCI_ANY_ID, PCI_ANY_ID, 0}, 105 { 0x8086, E1000_DEV_ID_82540EM_LOM, PCI_ANY_ID, PCI_ANY_ID, 0}, 106 { 0x8086, E1000_DEV_ID_82540EP, PCI_ANY_ID, PCI_ANY_ID, 0}, 107 { 0x8086, E1000_DEV_ID_82540EP_LOM, PCI_ANY_ID, PCI_ANY_ID, 0}, 108 { 0x8086, E1000_DEV_ID_82540EP_LP, PCI_ANY_ID, PCI_ANY_ID, 0}, 109 110 { 0x8086, E1000_DEV_ID_82541EI, PCI_ANY_ID, PCI_ANY_ID, 0}, 111 { 0x8086, E1000_DEV_ID_82541ER, PCI_ANY_ID, PCI_ANY_ID, 0}, 112 { 0x8086, E1000_DEV_ID_82541ER_LOM, PCI_ANY_ID, PCI_ANY_ID, 0}, 113 { 0x8086, E1000_DEV_ID_82541EI_MOBILE, PCI_ANY_ID, PCI_ANY_ID, 0}, 114 { 0x8086, E1000_DEV_ID_82541GI, PCI_ANY_ID, PCI_ANY_ID, 0}, 115 { 0x8086, E1000_DEV_ID_82541GI_LF, PCI_ANY_ID, PCI_ANY_ID, 0}, 116 { 0x8086, E1000_DEV_ID_82541GI_MOBILE, PCI_ANY_ID, PCI_ANY_ID, 0}, 117 118 { 0x8086, E1000_DEV_ID_82542, PCI_ANY_ID, PCI_ANY_ID, 0}, 119 120 { 0x8086, E1000_DEV_ID_82543GC_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0}, 121 { 0x8086, E1000_DEV_ID_82543GC_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0}, 122 123 { 0x8086, E1000_DEV_ID_82544EI_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0}, 124 { 0x8086, E1000_DEV_ID_82544EI_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0}, 125 { 0x8086, E1000_DEV_ID_82544GC_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0}, 126 { 0x8086, E1000_DEV_ID_82544GC_LOM, PCI_ANY_ID, PCI_ANY_ID, 0}, 127 128 { 0x8086, E1000_DEV_ID_82545EM_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0}, 129 { 0x8086, E1000_DEV_ID_82545EM_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0}, 130 { 0x8086, E1000_DEV_ID_82545GM_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0}, 131 { 0x8086, E1000_DEV_ID_82545GM_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0}, 132 { 0x8086, E1000_DEV_ID_82545GM_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0}, 133 134 { 0x8086, E1000_DEV_ID_82546EB_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0}, 135 { 0x8086, E1000_DEV_ID_82546EB_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0}, 136 { 0x8086, E1000_DEV_ID_82546EB_QUAD_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0}, 137 { 0x8086, E1000_DEV_ID_82546GB_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0}, 138 { 0x8086, E1000_DEV_ID_82546GB_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0}, 139 { 0x8086, E1000_DEV_ID_82546GB_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0}, 140 { 0x8086, E1000_DEV_ID_82546GB_PCIE, PCI_ANY_ID, PCI_ANY_ID, 0}, 141 { 0x8086, E1000_DEV_ID_82546GB_QUAD_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0}, 142 { 0x8086, E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3, 143 PCI_ANY_ID, PCI_ANY_ID, 0}, 144 145 { 0x8086, E1000_DEV_ID_82547EI, PCI_ANY_ID, PCI_ANY_ID, 0}, 146 { 0x8086, E1000_DEV_ID_82547EI_MOBILE, PCI_ANY_ID, PCI_ANY_ID, 0}, 147 { 0x8086, E1000_DEV_ID_82547GI, PCI_ANY_ID, PCI_ANY_ID, 0}, 148 /* required last entry */ 149 { 0, 0, 0, 0, 0} 150}; 151 152/********************************************************************* 153 * Table of branding strings for all supported NICs. 154 *********************************************************************/ 155 156static char *lem_strings[] = { 157 "Intel(R) PRO/1000 Legacy Network Connection" 158}; 159 160/********************************************************************* 161 * Function prototypes 162 *********************************************************************/ 163static int lem_probe(device_t); 164static int lem_attach(device_t); 165static int lem_detach(device_t); 166static int lem_shutdown(device_t); 167static int lem_suspend(device_t); 168static int lem_resume(device_t); 169static void lem_start(struct ifnet *); 170static void lem_start_locked(struct ifnet *ifp); 171static int lem_ioctl(struct ifnet *, u_long, caddr_t); 172static void lem_init(void *); 173static void lem_init_locked(struct adapter *); 174static void lem_stop(void *); 175static void lem_media_status(struct ifnet *, struct ifmediareq *); 176static int lem_media_change(struct ifnet *); 177static void lem_identify_hardware(struct adapter *); 178static int lem_allocate_pci_resources(struct adapter *); 179static int lem_allocate_irq(struct adapter *adapter); 180static void lem_free_pci_resources(struct adapter *); 181static void lem_local_timer(void *); 182static int lem_hardware_init(struct adapter *); 183static int lem_setup_interface(device_t, struct adapter *); 184static void lem_setup_transmit_structures(struct adapter *); 185static void lem_initialize_transmit_unit(struct adapter *); 186static int lem_setup_receive_structures(struct adapter *); 187static void lem_initialize_receive_unit(struct adapter *); 188static void lem_enable_intr(struct adapter *); 189static void lem_disable_intr(struct adapter *); 190static void lem_free_transmit_structures(struct adapter *); 191static void lem_free_receive_structures(struct adapter *); 192static void lem_update_stats_counters(struct adapter *); 193static void lem_add_hw_stats(struct adapter *adapter); 194static void lem_txeof(struct adapter *); 195static void lem_tx_purge(struct adapter *); 196static int lem_allocate_receive_structures(struct adapter *); 197static int lem_allocate_transmit_structures(struct adapter *); 198static bool lem_rxeof(struct adapter *, int, int *); 199#ifndef __NO_STRICT_ALIGNMENT 200static int lem_fixup_rx(struct adapter *); 201#endif 202static void lem_receive_checksum(struct adapter *, struct e1000_rx_desc *, 203 struct mbuf *); 204static void lem_transmit_checksum_setup(struct adapter *, struct mbuf *, 205 u32 *, u32 *); 206static void lem_set_promisc(struct adapter *); 207static void lem_disable_promisc(struct adapter *); 208static void lem_set_multi(struct adapter *); 209static void lem_update_link_status(struct adapter *); 210static int lem_get_buf(struct adapter *, int); 211static void lem_register_vlan(void *, struct ifnet *, u16); 212static void lem_unregister_vlan(void *, struct ifnet *, u16); 213static void lem_setup_vlan_hw_support(struct adapter *); 214static int lem_xmit(struct adapter *, struct mbuf **); 215static void lem_smartspeed(struct adapter *); 216static int lem_82547_fifo_workaround(struct adapter *, int); 217static void lem_82547_update_fifo_head(struct adapter *, int); 218static int lem_82547_tx_fifo_reset(struct adapter *); 219static void lem_82547_move_tail(void *); 220static int lem_dma_malloc(struct adapter *, bus_size_t, 221 struct em_dma_alloc *, int); 222static void lem_dma_free(struct adapter *, struct em_dma_alloc *); 223static int lem_sysctl_nvm_info(SYSCTL_HANDLER_ARGS); 224static void lem_print_nvm_info(struct adapter *); 225static int lem_is_valid_ether_addr(u8 *); 226static u32 lem_fill_descriptors (bus_addr_t address, u32 length, 227 PDESC_ARRAY desc_array); 228static int lem_sysctl_int_delay(SYSCTL_HANDLER_ARGS); 229static void lem_add_int_delay_sysctl(struct adapter *, const char *, 230 const char *, struct em_int_delay_info *, int, int); 231static void lem_set_flow_cntrl(struct adapter *, const char *, 232 const char *, int *, int); 233/* Management and WOL Support */ 234static void lem_init_manageability(struct adapter *); 235static void lem_release_manageability(struct adapter *); 236static void lem_get_hw_control(struct adapter *); 237static void lem_release_hw_control(struct adapter *); 238static void lem_get_wakeup(device_t); 239static void lem_enable_wakeup(device_t); 240static int lem_enable_phy_wakeup(struct adapter *); 241static void lem_led_func(void *, int); 242 243static void lem_intr(void *); 244static int lem_irq_fast(void *); 245static void lem_handle_rxtx(void *context, int pending); 246static void lem_handle_link(void *context, int pending); 247static void lem_add_rx_process_limit(struct adapter *, const char *, 248 const char *, int *, int); 249 250#ifdef DEVICE_POLLING 251static poll_handler_t lem_poll; 252#endif /* POLLING */ 253 254/********************************************************************* 255 * FreeBSD Device Interface Entry Points 256 *********************************************************************/ 257 258static device_method_t lem_methods[] = { 259 /* Device interface */ 260 DEVMETHOD(device_probe, lem_probe), 261 DEVMETHOD(device_attach, lem_attach), 262 DEVMETHOD(device_detach, lem_detach), 263 DEVMETHOD(device_shutdown, lem_shutdown), 264 DEVMETHOD(device_suspend, lem_suspend), 265 DEVMETHOD(device_resume, lem_resume), 266 DEVMETHOD_END 267}; 268 269static driver_t lem_driver = { 270 "em", lem_methods, sizeof(struct adapter), 271}; 272 273extern devclass_t em_devclass; 274DRIVER_MODULE(lem, pci, lem_driver, em_devclass, 0, 0); 275MODULE_DEPEND(lem, pci, 1, 1, 1); 276MODULE_DEPEND(lem, ether, 1, 1, 1); 277 278/********************************************************************* 279 * Tunable default values. 280 *********************************************************************/ 281 282#define EM_TICKS_TO_USECS(ticks) ((1024 * (ticks) + 500) / 1000) 283#define EM_USECS_TO_TICKS(usecs) ((1000 * (usecs) + 512) / 1024) 284 285#define MAX_INTS_PER_SEC 8000 286#define DEFAULT_ITR (1000000000/(MAX_INTS_PER_SEC * 256)) 287 288static int lem_tx_int_delay_dflt = EM_TICKS_TO_USECS(EM_TIDV); 289static int lem_rx_int_delay_dflt = EM_TICKS_TO_USECS(EM_RDTR); 290static int lem_tx_abs_int_delay_dflt = EM_TICKS_TO_USECS(EM_TADV); 291static int lem_rx_abs_int_delay_dflt = EM_TICKS_TO_USECS(EM_RADV); 292static int lem_rxd = EM_DEFAULT_RXD; 293static int lem_txd = EM_DEFAULT_TXD; 294static int lem_smart_pwr_down = FALSE; 295 296/* Controls whether promiscuous also shows bad packets */ 297static int lem_debug_sbp = FALSE; 298 299TUNABLE_INT("hw.em.tx_int_delay", &lem_tx_int_delay_dflt); 300TUNABLE_INT("hw.em.rx_int_delay", &lem_rx_int_delay_dflt); 301TUNABLE_INT("hw.em.tx_abs_int_delay", &lem_tx_abs_int_delay_dflt); 302TUNABLE_INT("hw.em.rx_abs_int_delay", &lem_rx_abs_int_delay_dflt); 303TUNABLE_INT("hw.em.rxd", &lem_rxd); 304TUNABLE_INT("hw.em.txd", &lem_txd); 305TUNABLE_INT("hw.em.smart_pwr_down", &lem_smart_pwr_down); 306TUNABLE_INT("hw.em.sbp", &lem_debug_sbp); 307 308/* Interrupt style - default to fast */ 309static int lem_use_legacy_irq = 0; 310TUNABLE_INT("hw.em.use_legacy_irq", &lem_use_legacy_irq); 311 312/* How many packets rxeof tries to clean at a time */ 313static int lem_rx_process_limit = 100; 314TUNABLE_INT("hw.em.rx_process_limit", &lem_rx_process_limit); 315 316/* Flow control setting - default to FULL */ 317static int lem_fc_setting = e1000_fc_full; 318TUNABLE_INT("hw.em.fc_setting", &lem_fc_setting); 319 320/* Global used in WOL setup with multiport cards */ 321static int global_quad_port_a = 0; 322 323#ifdef DEV_NETMAP /* see ixgbe.c for details */ 324#include <dev/netmap/if_lem_netmap.h> 325#endif /* DEV_NETMAP */ 326 327/********************************************************************* 328 * Device identification routine 329 * 330 * em_probe determines if the driver should be loaded on 331 * adapter based on PCI vendor/device id of the adapter. 332 * 333 * return BUS_PROBE_DEFAULT on success, positive on failure 334 *********************************************************************/ 335 336static int 337lem_probe(device_t dev) 338{ 339 char adapter_name[60]; 340 u16 pci_vendor_id = 0; 341 u16 pci_device_id = 0; 342 u16 pci_subvendor_id = 0; 343 u16 pci_subdevice_id = 0; 344 em_vendor_info_t *ent; 345 346 INIT_DEBUGOUT("em_probe: begin"); 347 348 pci_vendor_id = pci_get_vendor(dev); 349 if (pci_vendor_id != EM_VENDOR_ID) 350 return (ENXIO); 351 352 pci_device_id = pci_get_device(dev); 353 pci_subvendor_id = pci_get_subvendor(dev); 354 pci_subdevice_id = pci_get_subdevice(dev); 355 356 ent = lem_vendor_info_array; 357 while (ent->vendor_id != 0) { 358 if ((pci_vendor_id == ent->vendor_id) && 359 (pci_device_id == ent->device_id) && 360 361 ((pci_subvendor_id == ent->subvendor_id) || 362 (ent->subvendor_id == PCI_ANY_ID)) && 363 364 ((pci_subdevice_id == ent->subdevice_id) || 365 (ent->subdevice_id == PCI_ANY_ID))) { 366 sprintf(adapter_name, "%s %s", 367 lem_strings[ent->index], 368 lem_driver_version); 369 device_set_desc_copy(dev, adapter_name); 370 return (BUS_PROBE_DEFAULT); 371 } 372 ent++; 373 } 374 375 return (ENXIO); 376} 377 378/********************************************************************* 379 * Device initialization routine 380 * 381 * The attach entry point is called when the driver is being loaded. 382 * This routine identifies the type of hardware, allocates all resources 383 * and initializes the hardware. 384 * 385 * return 0 on success, positive on failure 386 *********************************************************************/ 387 388static int 389lem_attach(device_t dev) 390{ 391 struct adapter *adapter; 392 int tsize, rsize; 393 int error = 0; 394 395 INIT_DEBUGOUT("lem_attach: begin"); 396 397 adapter = device_get_softc(dev); 398 adapter->dev = adapter->osdep.dev = dev; 399 EM_CORE_LOCK_INIT(adapter, device_get_nameunit(dev)); 400 EM_TX_LOCK_INIT(adapter, device_get_nameunit(dev)); 401 EM_RX_LOCK_INIT(adapter, device_get_nameunit(dev)); 402 403 /* SYSCTL stuff */ 404 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 405 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 406 OID_AUTO, "nvm", CTLTYPE_INT|CTLFLAG_RW, adapter, 0, 407 lem_sysctl_nvm_info, "I", "NVM Information"); 408 409 callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0); 410 callout_init_mtx(&adapter->tx_fifo_timer, &adapter->tx_mtx, 0); 411 412 /* Determine hardware and mac info */ 413 lem_identify_hardware(adapter); 414 415 /* Setup PCI resources */ 416 if (lem_allocate_pci_resources(adapter)) { 417 device_printf(dev, "Allocation of PCI resources failed\n"); 418 error = ENXIO; 419 goto err_pci; 420 } 421 422 /* Do Shared Code initialization */ 423 if (e1000_setup_init_funcs(&adapter->hw, TRUE)) { 424 device_printf(dev, "Setup of Shared code failed\n"); 425 error = ENXIO; 426 goto err_pci; 427 } 428 429 e1000_get_bus_info(&adapter->hw); 430 431 /* Set up some sysctls for the tunable interrupt delays */ 432 lem_add_int_delay_sysctl(adapter, "rx_int_delay", 433 "receive interrupt delay in usecs", &adapter->rx_int_delay, 434 E1000_REGISTER(&adapter->hw, E1000_RDTR), lem_rx_int_delay_dflt); 435 lem_add_int_delay_sysctl(adapter, "tx_int_delay", 436 "transmit interrupt delay in usecs", &adapter->tx_int_delay, 437 E1000_REGISTER(&adapter->hw, E1000_TIDV), lem_tx_int_delay_dflt); 438 if (adapter->hw.mac.type >= e1000_82540) { 439 lem_add_int_delay_sysctl(adapter, "rx_abs_int_delay", 440 "receive interrupt delay limit in usecs", 441 &adapter->rx_abs_int_delay, 442 E1000_REGISTER(&adapter->hw, E1000_RADV), 443 lem_rx_abs_int_delay_dflt); 444 lem_add_int_delay_sysctl(adapter, "tx_abs_int_delay", 445 "transmit interrupt delay limit in usecs", 446 &adapter->tx_abs_int_delay, 447 E1000_REGISTER(&adapter->hw, E1000_TADV), 448 lem_tx_abs_int_delay_dflt); 449 lem_add_int_delay_sysctl(adapter, "itr", 450 "interrupt delay limit in usecs/4", 451 &adapter->tx_itr, 452 E1000_REGISTER(&adapter->hw, E1000_ITR), 453 DEFAULT_ITR); 454 } 455 456 /* Sysctls for limiting the amount of work done in the taskqueue */ 457 lem_add_rx_process_limit(adapter, "rx_processing_limit", 458 "max number of rx packets to process", &adapter->rx_process_limit, 459 lem_rx_process_limit); 460 461 /* Sysctl for setting the interface flow control */ 462 lem_set_flow_cntrl(adapter, "flow_control", 463 "flow control setting", 464 &adapter->fc_setting, lem_fc_setting); 465 466 /* 467 * Validate number of transmit and receive descriptors. It 468 * must not exceed hardware maximum, and must be multiple 469 * of E1000_DBA_ALIGN. 470 */ 471 if (((lem_txd * sizeof(struct e1000_tx_desc)) % EM_DBA_ALIGN) != 0 || 472 (adapter->hw.mac.type >= e1000_82544 && lem_txd > EM_MAX_TXD) || 473 (adapter->hw.mac.type < e1000_82544 && lem_txd > EM_MAX_TXD_82543) || 474 (lem_txd < EM_MIN_TXD)) { 475 device_printf(dev, "Using %d TX descriptors instead of %d!\n", 476 EM_DEFAULT_TXD, lem_txd); 477 adapter->num_tx_desc = EM_DEFAULT_TXD; 478 } else 479 adapter->num_tx_desc = lem_txd; 480 if (((lem_rxd * sizeof(struct e1000_rx_desc)) % EM_DBA_ALIGN) != 0 || 481 (adapter->hw.mac.type >= e1000_82544 && lem_rxd > EM_MAX_RXD) || 482 (adapter->hw.mac.type < e1000_82544 && lem_rxd > EM_MAX_RXD_82543) || 483 (lem_rxd < EM_MIN_RXD)) { 484 device_printf(dev, "Using %d RX descriptors instead of %d!\n", 485 EM_DEFAULT_RXD, lem_rxd); 486 adapter->num_rx_desc = EM_DEFAULT_RXD; 487 } else 488 adapter->num_rx_desc = lem_rxd; 489 490 adapter->hw.mac.autoneg = DO_AUTO_NEG; 491 adapter->hw.phy.autoneg_wait_to_complete = FALSE; 492 adapter->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT; 493 adapter->rx_buffer_len = 2048; 494 495 e1000_init_script_state_82541(&adapter->hw, TRUE); 496 e1000_set_tbi_compatibility_82543(&adapter->hw, TRUE); 497 498 /* Copper options */ 499 if (adapter->hw.phy.media_type == e1000_media_type_copper) { 500 adapter->hw.phy.mdix = AUTO_ALL_MODES; 501 adapter->hw.phy.disable_polarity_correction = FALSE; 502 adapter->hw.phy.ms_type = EM_MASTER_SLAVE; 503 } 504 505 /* 506 * Set the frame limits assuming 507 * standard ethernet sized frames. 508 */ 509 adapter->max_frame_size = ETHERMTU + ETHER_HDR_LEN + ETHERNET_FCS_SIZE; 510 adapter->min_frame_size = ETH_ZLEN + ETHERNET_FCS_SIZE; 511 512 /* 513 * This controls when hardware reports transmit completion 514 * status. 515 */ 516 adapter->hw.mac.report_tx_early = 1; 517 518 tsize = roundup2(adapter->num_tx_desc * sizeof(struct e1000_tx_desc), 519 EM_DBA_ALIGN); 520 521 /* Allocate Transmit Descriptor ring */ 522 if (lem_dma_malloc(adapter, tsize, &adapter->txdma, BUS_DMA_NOWAIT)) { 523 device_printf(dev, "Unable to allocate tx_desc memory\n"); 524 error = ENOMEM; 525 goto err_tx_desc; 526 } 527 adapter->tx_desc_base = 528 (struct e1000_tx_desc *)adapter->txdma.dma_vaddr; 529 530 rsize = roundup2(adapter->num_rx_desc * sizeof(struct e1000_rx_desc), 531 EM_DBA_ALIGN); 532 533 /* Allocate Receive Descriptor ring */ 534 if (lem_dma_malloc(adapter, rsize, &adapter->rxdma, BUS_DMA_NOWAIT)) { 535 device_printf(dev, "Unable to allocate rx_desc memory\n"); 536 error = ENOMEM; 537 goto err_rx_desc; 538 } 539 adapter->rx_desc_base = 540 (struct e1000_rx_desc *)adapter->rxdma.dma_vaddr; 541 542 /* Allocate multicast array memory. */ 543 adapter->mta = malloc(sizeof(u8) * ETH_ADDR_LEN * 544 MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT); 545 if (adapter->mta == NULL) { 546 device_printf(dev, "Can not allocate multicast setup array\n"); 547 error = ENOMEM; 548 goto err_hw_init; 549 } 550 551 /* 552 ** Start from a known state, this is 553 ** important in reading the nvm and 554 ** mac from that. 555 */ 556 e1000_reset_hw(&adapter->hw); 557 558 /* Make sure we have a good EEPROM before we read from it */ 559 if (e1000_validate_nvm_checksum(&adapter->hw) < 0) { 560 /* 561 ** Some PCI-E parts fail the first check due to 562 ** the link being in sleep state, call it again, 563 ** if it fails a second time its a real issue. 564 */ 565 if (e1000_validate_nvm_checksum(&adapter->hw) < 0) { 566 device_printf(dev, 567 "The EEPROM Checksum Is Not Valid\n"); 568 error = EIO; 569 goto err_hw_init; 570 } 571 } 572 573 /* Copy the permanent MAC address out of the EEPROM */ 574 if (e1000_read_mac_addr(&adapter->hw) < 0) { 575 device_printf(dev, "EEPROM read error while reading MAC" 576 " address\n"); 577 error = EIO; 578 goto err_hw_init; 579 } 580 581 if (!lem_is_valid_ether_addr(adapter->hw.mac.addr)) { 582 device_printf(dev, "Invalid MAC address\n"); 583 error = EIO; 584 goto err_hw_init; 585 } 586 587 /* Initialize the hardware */ 588 if (lem_hardware_init(adapter)) { 589 device_printf(dev, "Unable to initialize the hardware\n"); 590 error = EIO; 591 goto err_hw_init; 592 } 593 594 /* Allocate transmit descriptors and buffers */ 595 if (lem_allocate_transmit_structures(adapter)) { 596 device_printf(dev, "Could not setup transmit structures\n"); 597 error = ENOMEM; 598 goto err_tx_struct; 599 } 600 601 /* Allocate receive descriptors and buffers */ 602 if (lem_allocate_receive_structures(adapter)) { 603 device_printf(dev, "Could not setup receive structures\n"); 604 error = ENOMEM; 605 goto err_rx_struct; 606 } 607 608 /* 609 ** Do interrupt configuration 610 */ 611 error = lem_allocate_irq(adapter); 612 if (error) 613 goto err_rx_struct; 614 615 /* 616 * Get Wake-on-Lan and Management info for later use 617 */ 618 lem_get_wakeup(dev); 619 620 /* Setup OS specific network interface */ 621 if (lem_setup_interface(dev, adapter) != 0) 622 goto err_rx_struct; 623 624 /* Initialize statistics */ 625 lem_update_stats_counters(adapter); 626 627 adapter->hw.mac.get_link_status = 1; 628 lem_update_link_status(adapter); 629 630 /* Indicate SOL/IDER usage */ 631 if (e1000_check_reset_block(&adapter->hw)) 632 device_printf(dev, 633 "PHY reset is blocked due to SOL/IDER session.\n"); 634 635 /* Do we need workaround for 82544 PCI-X adapter? */ 636 if (adapter->hw.bus.type == e1000_bus_type_pcix && 637 adapter->hw.mac.type == e1000_82544) 638 adapter->pcix_82544 = TRUE; 639 else 640 adapter->pcix_82544 = FALSE; 641 642 /* Register for VLAN events */ 643 adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config, 644 lem_register_vlan, adapter, EVENTHANDLER_PRI_FIRST); 645 adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig, 646 lem_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST); 647 648 lem_add_hw_stats(adapter); 649 650 /* Non-AMT based hardware can now take control from firmware */ 651 if (adapter->has_manage && !adapter->has_amt) 652 lem_get_hw_control(adapter); 653 654 /* Tell the stack that the interface is not active */ 655 adapter->ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 656 657 adapter->led_dev = led_create(lem_led_func, adapter, 658 device_get_nameunit(dev)); 659 660#ifdef DEV_NETMAP 661 lem_netmap_attach(adapter); 662#endif /* DEV_NETMAP */ 663 INIT_DEBUGOUT("lem_attach: end"); 664 665 return (0); 666 667err_rx_struct: 668 lem_free_transmit_structures(adapter); 669err_tx_struct: 670err_hw_init: 671 lem_release_hw_control(adapter); 672 lem_dma_free(adapter, &adapter->rxdma); 673err_rx_desc: 674 lem_dma_free(adapter, &adapter->txdma); 675err_tx_desc: 676err_pci: 677 if (adapter->ifp != NULL) 678 if_free(adapter->ifp); 679 lem_free_pci_resources(adapter); 680 free(adapter->mta, M_DEVBUF); 681 EM_TX_LOCK_DESTROY(adapter); 682 EM_RX_LOCK_DESTROY(adapter); 683 EM_CORE_LOCK_DESTROY(adapter); 684 685 return (error); 686} 687 688/********************************************************************* 689 * Device removal routine 690 * 691 * The detach entry point is called when the driver is being removed. 692 * This routine stops the adapter and deallocates all the resources 693 * that were allocated for driver operation. 694 * 695 * return 0 on success, positive on failure 696 *********************************************************************/ 697 698static int 699lem_detach(device_t dev) 700{ 701 struct adapter *adapter = device_get_softc(dev); 702 struct ifnet *ifp = adapter->ifp; 703 704 INIT_DEBUGOUT("em_detach: begin"); 705 706 /* Make sure VLANS are not using driver */ 707 if (adapter->ifp->if_vlantrunk != NULL) { 708 device_printf(dev,"Vlan in use, detach first\n"); 709 return (EBUSY); 710 } 711 712#ifdef DEVICE_POLLING 713 if (ifp->if_capenable & IFCAP_POLLING) 714 ether_poll_deregister(ifp); 715#endif 716 717 if (adapter->led_dev != NULL) 718 led_destroy(adapter->led_dev); 719 720 EM_CORE_LOCK(adapter); 721 EM_TX_LOCK(adapter); 722 adapter->in_detach = 1; 723 lem_stop(adapter); 724 e1000_phy_hw_reset(&adapter->hw); 725 726 lem_release_manageability(adapter); 727 728 EM_TX_UNLOCK(adapter); 729 EM_CORE_UNLOCK(adapter); 730 731 /* Unregister VLAN events */ 732 if (adapter->vlan_attach != NULL) 733 EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach); 734 if (adapter->vlan_detach != NULL) 735 EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach); 736 737 ether_ifdetach(adapter->ifp); 738 callout_drain(&adapter->timer); 739 callout_drain(&adapter->tx_fifo_timer); 740 741#ifdef DEV_NETMAP 742 netmap_detach(ifp); 743#endif /* DEV_NETMAP */ 744 lem_free_pci_resources(adapter); 745 bus_generic_detach(dev); 746 if_free(ifp); 747 748 lem_free_transmit_structures(adapter); 749 lem_free_receive_structures(adapter); 750 751 /* Free Transmit Descriptor ring */ 752 if (adapter->tx_desc_base) { 753 lem_dma_free(adapter, &adapter->txdma); 754 adapter->tx_desc_base = NULL; 755 } 756 757 /* Free Receive Descriptor ring */ 758 if (adapter->rx_desc_base) { 759 lem_dma_free(adapter, &adapter->rxdma); 760 adapter->rx_desc_base = NULL; 761 } 762 763 lem_release_hw_control(adapter); 764 free(adapter->mta, M_DEVBUF); 765 EM_TX_LOCK_DESTROY(adapter); 766 EM_RX_LOCK_DESTROY(adapter); 767 EM_CORE_LOCK_DESTROY(adapter); 768 769 return (0); 770} 771 772/********************************************************************* 773 * 774 * Shutdown entry point 775 * 776 **********************************************************************/ 777 778static int 779lem_shutdown(device_t dev) 780{ 781 return lem_suspend(dev); 782} 783 784/* 785 * Suspend/resume device methods. 786 */ 787static int 788lem_suspend(device_t dev) 789{ 790 struct adapter *adapter = device_get_softc(dev); 791 792 EM_CORE_LOCK(adapter); 793 794 lem_release_manageability(adapter); 795 lem_release_hw_control(adapter); 796 lem_enable_wakeup(dev); 797 798 EM_CORE_UNLOCK(adapter); 799 800 return bus_generic_suspend(dev); 801} 802 803static int 804lem_resume(device_t dev) 805{ 806 struct adapter *adapter = device_get_softc(dev); 807 struct ifnet *ifp = adapter->ifp; 808 809 EM_CORE_LOCK(adapter); 810 lem_init_locked(adapter); 811 lem_init_manageability(adapter); 812 EM_CORE_UNLOCK(adapter); 813 lem_start(ifp); 814 815 return bus_generic_resume(dev); 816} 817 818 819static void 820lem_start_locked(struct ifnet *ifp) 821{ 822 struct adapter *adapter = ifp->if_softc; 823 struct mbuf *m_head; 824 825 EM_TX_LOCK_ASSERT(adapter); 826 827 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) != 828 IFF_DRV_RUNNING) 829 return; 830 if (!adapter->link_active) 831 return; 832 833 /* 834 * Force a cleanup if number of TX descriptors 835 * available hits the threshold 836 */ 837 if (adapter->num_tx_desc_avail <= EM_TX_CLEANUP_THRESHOLD) { 838 lem_txeof(adapter); 839 /* Now do we at least have a minimal? */ 840 if (adapter->num_tx_desc_avail <= EM_TX_OP_THRESHOLD) { 841 adapter->no_tx_desc_avail1++; 842 return; 843 } 844 } 845 846 while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) { 847 848 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); 849 if (m_head == NULL) 850 break; 851 /* 852 * Encapsulation can modify our pointer, and or make it 853 * NULL on failure. In that event, we can't requeue. 854 */ 855 if (lem_xmit(adapter, &m_head)) { 856 if (m_head == NULL) 857 break; 858 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 859 IFQ_DRV_PREPEND(&ifp->if_snd, m_head); 860 break; 861 } 862 863 /* Send a copy of the frame to the BPF listener */ 864 ETHER_BPF_MTAP(ifp, m_head); 865 866 /* Set timeout in case hardware has problems transmitting. */ 867 adapter->watchdog_check = TRUE; 868 adapter->watchdog_time = ticks; 869 } 870 if (adapter->num_tx_desc_avail <= EM_TX_OP_THRESHOLD) 871 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 872 873 return; 874} 875 876static void 877lem_start(struct ifnet *ifp) 878{ 879 struct adapter *adapter = ifp->if_softc; 880 881 EM_TX_LOCK(adapter); 882 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 883 lem_start_locked(ifp); 884 EM_TX_UNLOCK(adapter); 885} 886 887/********************************************************************* 888 * Ioctl entry point 889 * 890 * em_ioctl is called when the user wants to configure the 891 * interface. 892 * 893 * return 0 on success, positive on failure 894 **********************************************************************/ 895 896static int 897lem_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 898{ 899 struct adapter *adapter = ifp->if_softc; 900 struct ifreq *ifr = (struct ifreq *)data; 901#if defined(INET) || defined(INET6) 902 struct ifaddr *ifa = (struct ifaddr *)data; 903#endif 904 bool avoid_reset = FALSE; 905 int error = 0; 906 907 if (adapter->in_detach) 908 return (error); 909 910 switch (command) { 911 case SIOCSIFADDR: 912#ifdef INET 913 if (ifa->ifa_addr->sa_family == AF_INET) 914 avoid_reset = TRUE; 915#endif 916#ifdef INET6 917 if (ifa->ifa_addr->sa_family == AF_INET6) 918 avoid_reset = TRUE; 919#endif 920 /* 921 ** Calling init results in link renegotiation, 922 ** so we avoid doing it when possible. 923 */ 924 if (avoid_reset) { 925 ifp->if_flags |= IFF_UP; 926 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) 927 lem_init(adapter); 928#ifdef INET 929 if (!(ifp->if_flags & IFF_NOARP)) 930 arp_ifinit(ifp, ifa); 931#endif 932 } else 933 error = ether_ioctl(ifp, command, data); 934 break; 935 case SIOCSIFMTU: 936 { 937 int max_frame_size; 938 939 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFMTU (Set Interface MTU)"); 940 941 EM_CORE_LOCK(adapter); 942 switch (adapter->hw.mac.type) { 943 case e1000_82542: 944 max_frame_size = ETHER_MAX_LEN; 945 break; 946 default: 947 max_frame_size = MAX_JUMBO_FRAME_SIZE; 948 } 949 if (ifr->ifr_mtu > max_frame_size - ETHER_HDR_LEN - 950 ETHER_CRC_LEN) { 951 EM_CORE_UNLOCK(adapter); 952 error = EINVAL; 953 break; 954 } 955 956 ifp->if_mtu = ifr->ifr_mtu; 957 adapter->max_frame_size = 958 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; 959 lem_init_locked(adapter); 960 EM_CORE_UNLOCK(adapter); 961 break; 962 } 963 case SIOCSIFFLAGS: 964 IOCTL_DEBUGOUT("ioctl rcv'd:\ 965 SIOCSIFFLAGS (Set Interface Flags)"); 966 EM_CORE_LOCK(adapter); 967 if (ifp->if_flags & IFF_UP) { 968 if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) { 969 if ((ifp->if_flags ^ adapter->if_flags) & 970 (IFF_PROMISC | IFF_ALLMULTI)) { 971 lem_disable_promisc(adapter); 972 lem_set_promisc(adapter); 973 } 974 } else 975 lem_init_locked(adapter); 976 } else 977 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 978 EM_TX_LOCK(adapter); 979 lem_stop(adapter); 980 EM_TX_UNLOCK(adapter); 981 } 982 adapter->if_flags = ifp->if_flags; 983 EM_CORE_UNLOCK(adapter); 984 break; 985 case SIOCADDMULTI: 986 case SIOCDELMULTI: 987 IOCTL_DEBUGOUT("ioctl rcv'd: SIOC(ADD|DEL)MULTI"); 988 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 989 EM_CORE_LOCK(adapter); 990 lem_disable_intr(adapter); 991 lem_set_multi(adapter); 992 if (adapter->hw.mac.type == e1000_82542 && 993 adapter->hw.revision_id == E1000_REVISION_2) { 994 lem_initialize_receive_unit(adapter); 995 } 996#ifdef DEVICE_POLLING 997 if (!(ifp->if_capenable & IFCAP_POLLING)) 998#endif 999 lem_enable_intr(adapter); 1000 EM_CORE_UNLOCK(adapter); 1001 } 1002 break; 1003 case SIOCSIFMEDIA: 1004 /* Check SOL/IDER usage */ 1005 EM_CORE_LOCK(adapter); 1006 if (e1000_check_reset_block(&adapter->hw)) { 1007 EM_CORE_UNLOCK(adapter); 1008 device_printf(adapter->dev, "Media change is" 1009 " blocked due to SOL/IDER session.\n"); 1010 break; 1011 } 1012 EM_CORE_UNLOCK(adapter); 1013 case SIOCGIFMEDIA: 1014 IOCTL_DEBUGOUT("ioctl rcv'd: \ 1015 SIOCxIFMEDIA (Get/Set Interface Media)"); 1016 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command); 1017 break; 1018 case SIOCSIFCAP: 1019 { 1020 int mask, reinit; 1021 1022 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFCAP (Set Capabilities)"); 1023 reinit = 0; 1024 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 1025#ifdef DEVICE_POLLING 1026 if (mask & IFCAP_POLLING) { 1027 if (ifr->ifr_reqcap & IFCAP_POLLING) { 1028 error = ether_poll_register(lem_poll, ifp); 1029 if (error) 1030 return (error); 1031 EM_CORE_LOCK(adapter); 1032 lem_disable_intr(adapter); 1033 ifp->if_capenable |= IFCAP_POLLING; 1034 EM_CORE_UNLOCK(adapter); 1035 } else { 1036 error = ether_poll_deregister(ifp); 1037 /* Enable interrupt even in error case */ 1038 EM_CORE_LOCK(adapter); 1039 lem_enable_intr(adapter); 1040 ifp->if_capenable &= ~IFCAP_POLLING; 1041 EM_CORE_UNLOCK(adapter); 1042 } 1043 } 1044#endif 1045 if (mask & IFCAP_HWCSUM) { 1046 ifp->if_capenable ^= IFCAP_HWCSUM; 1047 reinit = 1; 1048 } 1049 if (mask & IFCAP_VLAN_HWTAGGING) { 1050 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 1051 reinit = 1; 1052 } 1053 if ((mask & IFCAP_WOL) && 1054 (ifp->if_capabilities & IFCAP_WOL) != 0) { 1055 if (mask & IFCAP_WOL_MCAST) 1056 ifp->if_capenable ^= IFCAP_WOL_MCAST; 1057 if (mask & IFCAP_WOL_MAGIC) 1058 ifp->if_capenable ^= IFCAP_WOL_MAGIC; 1059 } 1060 if (reinit && (ifp->if_drv_flags & IFF_DRV_RUNNING)) 1061 lem_init(adapter); 1062 VLAN_CAPABILITIES(ifp); 1063 break; 1064 } 1065 1066 default: 1067 error = ether_ioctl(ifp, command, data); 1068 break; 1069 } 1070 1071 return (error); 1072} 1073 1074 1075/********************************************************************* 1076 * Init entry point 1077 * 1078 * This routine is used in two ways. It is used by the stack as 1079 * init entry point in network interface structure. It is also used 1080 * by the driver as a hw/sw initialization routine to get to a 1081 * consistent state. 1082 * 1083 * return 0 on success, positive on failure 1084 **********************************************************************/ 1085 1086static void 1087lem_init_locked(struct adapter *adapter) 1088{ 1089 struct ifnet *ifp = adapter->ifp; 1090 device_t dev = adapter->dev; 1091 u32 pba; 1092 1093 INIT_DEBUGOUT("lem_init: begin"); 1094 1095 EM_CORE_LOCK_ASSERT(adapter); 1096 1097 EM_TX_LOCK(adapter); 1098 lem_stop(adapter); 1099 EM_TX_UNLOCK(adapter); 1100 1101 /* 1102 * Packet Buffer Allocation (PBA) 1103 * Writing PBA sets the receive portion of the buffer 1104 * the remainder is used for the transmit buffer. 1105 * 1106 * Devices before the 82547 had a Packet Buffer of 64K. 1107 * Default allocation: PBA=48K for Rx, leaving 16K for Tx. 1108 * After the 82547 the buffer was reduced to 40K. 1109 * Default allocation: PBA=30K for Rx, leaving 10K for Tx. 1110 * Note: default does not leave enough room for Jumbo Frame >10k. 1111 */ 1112 switch (adapter->hw.mac.type) { 1113 case e1000_82547: 1114 case e1000_82547_rev_2: /* 82547: Total Packet Buffer is 40K */ 1115 if (adapter->max_frame_size > 8192) 1116 pba = E1000_PBA_22K; /* 22K for Rx, 18K for Tx */ 1117 else 1118 pba = E1000_PBA_30K; /* 30K for Rx, 10K for Tx */ 1119 adapter->tx_fifo_head = 0; 1120 adapter->tx_head_addr = pba << EM_TX_HEAD_ADDR_SHIFT; 1121 adapter->tx_fifo_size = 1122 (E1000_PBA_40K - pba) << EM_PBA_BYTES_SHIFT; 1123 break; 1124 default: 1125 /* Devices before 82547 had a Packet Buffer of 64K. */ 1126 if (adapter->max_frame_size > 8192) 1127 pba = E1000_PBA_40K; /* 40K for Rx, 24K for Tx */ 1128 else 1129 pba = E1000_PBA_48K; /* 48K for Rx, 16K for Tx */ 1130 } 1131 1132 INIT_DEBUGOUT1("lem_init: pba=%dK",pba); 1133 E1000_WRITE_REG(&adapter->hw, E1000_PBA, pba); 1134 1135 /* Get the latest mac address, User can use a LAA */ 1136 bcopy(IF_LLADDR(adapter->ifp), adapter->hw.mac.addr, 1137 ETHER_ADDR_LEN); 1138 1139 /* Put the address into the Receive Address Array */ 1140 e1000_rar_set(&adapter->hw, adapter->hw.mac.addr, 0); 1141 1142 /* Initialize the hardware */ 1143 if (lem_hardware_init(adapter)) { 1144 device_printf(dev, "Unable to initialize the hardware\n"); 1145 return; 1146 } 1147 lem_update_link_status(adapter); 1148 1149 /* Setup VLAN support, basic and offload if available */ 1150 E1000_WRITE_REG(&adapter->hw, E1000_VET, ETHERTYPE_VLAN); 1151 1152 /* Set hardware offload abilities */ 1153 ifp->if_hwassist = 0; 1154 if (adapter->hw.mac.type >= e1000_82543) { 1155 if (ifp->if_capenable & IFCAP_TXCSUM) 1156 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP); 1157 } 1158 1159 /* Configure for OS presence */ 1160 lem_init_manageability(adapter); 1161 1162 /* Prepare transmit descriptors and buffers */ 1163 lem_setup_transmit_structures(adapter); 1164 lem_initialize_transmit_unit(adapter); 1165 1166 /* Setup Multicast table */ 1167 lem_set_multi(adapter); 1168 1169 /* Prepare receive descriptors and buffers */ 1170 if (lem_setup_receive_structures(adapter)) { 1171 device_printf(dev, "Could not setup receive structures\n"); 1172 EM_TX_LOCK(adapter); 1173 lem_stop(adapter); 1174 EM_TX_UNLOCK(adapter); 1175 return; 1176 } 1177 lem_initialize_receive_unit(adapter); 1178 1179 /* Use real VLAN Filter support? */ 1180 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) { 1181 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) 1182 /* Use real VLAN Filter support */ 1183 lem_setup_vlan_hw_support(adapter); 1184 else { 1185 u32 ctrl; 1186 ctrl = E1000_READ_REG(&adapter->hw, E1000_CTRL); 1187 ctrl |= E1000_CTRL_VME; 1188 E1000_WRITE_REG(&adapter->hw, E1000_CTRL, ctrl); 1189 } 1190 } 1191 1192 /* Don't lose promiscuous settings */ 1193 lem_set_promisc(adapter); 1194 1195 ifp->if_drv_flags |= IFF_DRV_RUNNING; 1196 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1197 1198 callout_reset(&adapter->timer, hz, lem_local_timer, adapter); 1199 e1000_clear_hw_cntrs_base_generic(&adapter->hw); 1200 1201#ifdef DEVICE_POLLING 1202 /* 1203 * Only enable interrupts if we are not polling, make sure 1204 * they are off otherwise. 1205 */ 1206 if (ifp->if_capenable & IFCAP_POLLING) 1207 lem_disable_intr(adapter); 1208 else 1209#endif /* DEVICE_POLLING */ 1210 lem_enable_intr(adapter); 1211 1212 /* AMT based hardware can now take control from firmware */ 1213 if (adapter->has_manage && adapter->has_amt) 1214 lem_get_hw_control(adapter); 1215} 1216 1217static void 1218lem_init(void *arg) 1219{ 1220 struct adapter *adapter = arg; 1221 1222 EM_CORE_LOCK(adapter); 1223 lem_init_locked(adapter); 1224 EM_CORE_UNLOCK(adapter); 1225} 1226 1227 1228#ifdef DEVICE_POLLING 1229/********************************************************************* 1230 * 1231 * Legacy polling routine 1232 * 1233 *********************************************************************/ 1234static int 1235lem_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) 1236{ 1237 struct adapter *adapter = ifp->if_softc; 1238 u32 reg_icr, rx_done = 0; 1239 1240 EM_CORE_LOCK(adapter); 1241 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 1242 EM_CORE_UNLOCK(adapter); 1243 return (rx_done); 1244 } 1245 1246 if (cmd == POLL_AND_CHECK_STATUS) { 1247 reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR); 1248 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { 1249 callout_stop(&adapter->timer); 1250 adapter->hw.mac.get_link_status = 1; 1251 lem_update_link_status(adapter); 1252 callout_reset(&adapter->timer, hz, 1253 lem_local_timer, adapter); 1254 } 1255 } 1256 EM_CORE_UNLOCK(adapter); 1257 1258 lem_rxeof(adapter, count, &rx_done); 1259 1260 EM_TX_LOCK(adapter); 1261 lem_txeof(adapter); 1262 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1263 lem_start_locked(ifp); 1264 EM_TX_UNLOCK(adapter); 1265 return (rx_done); 1266} 1267#endif /* DEVICE_POLLING */ 1268 1269/********************************************************************* 1270 * 1271 * Legacy Interrupt Service routine 1272 * 1273 *********************************************************************/ 1274static void 1275lem_intr(void *arg) 1276{ 1277 struct adapter *adapter = arg; 1278 struct ifnet *ifp = adapter->ifp; 1279 u32 reg_icr; 1280 1281 1282 if ((ifp->if_capenable & IFCAP_POLLING) || 1283 ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)) 1284 return; 1285 1286 EM_CORE_LOCK(adapter); 1287 reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR); 1288 if (reg_icr & E1000_ICR_RXO) 1289 adapter->rx_overruns++; 1290 1291 if ((reg_icr == 0xffffffff) || (reg_icr == 0)) { 1292 EM_CORE_UNLOCK(adapter); 1293 return; 1294 } 1295 1296 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { 1297 callout_stop(&adapter->timer); 1298 adapter->hw.mac.get_link_status = 1; 1299 lem_update_link_status(adapter); 1300 /* Deal with TX cruft when link lost */ 1301 lem_tx_purge(adapter); 1302 callout_reset(&adapter->timer, hz, 1303 lem_local_timer, adapter); 1304 EM_CORE_UNLOCK(adapter); 1305 return; 1306 } 1307 1308 EM_CORE_UNLOCK(adapter); 1309 lem_rxeof(adapter, -1, NULL); 1310 1311 EM_TX_LOCK(adapter); 1312 lem_txeof(adapter); 1313 if (ifp->if_drv_flags & IFF_DRV_RUNNING && 1314 !IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1315 lem_start_locked(ifp); 1316 EM_TX_UNLOCK(adapter); 1317 return; 1318} 1319 1320 1321static void 1322lem_handle_link(void *context, int pending) 1323{ 1324 struct adapter *adapter = context; 1325 struct ifnet *ifp = adapter->ifp; 1326 1327 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) 1328 return; 1329 1330 EM_CORE_LOCK(adapter); 1331 callout_stop(&adapter->timer); 1332 lem_update_link_status(adapter); 1333 /* Deal with TX cruft when link lost */ 1334 lem_tx_purge(adapter); 1335 callout_reset(&adapter->timer, hz, lem_local_timer, adapter); 1336 EM_CORE_UNLOCK(adapter); 1337} 1338 1339 1340/* Combined RX/TX handler, used by Legacy and MSI */ 1341static void 1342lem_handle_rxtx(void *context, int pending) 1343{ 1344 struct adapter *adapter = context; 1345 struct ifnet *ifp = adapter->ifp; 1346 1347 1348 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1349 bool more = lem_rxeof(adapter, adapter->rx_process_limit, NULL); 1350 EM_TX_LOCK(adapter); 1351 lem_txeof(adapter); 1352 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1353 lem_start_locked(ifp); 1354 EM_TX_UNLOCK(adapter); 1355 if (more) { 1356 taskqueue_enqueue(adapter->tq, &adapter->rxtx_task); 1357 return; 1358 } 1359 } 1360 1361 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 1362 lem_enable_intr(adapter); 1363} 1364 1365/********************************************************************* 1366 * 1367 * Fast Legacy/MSI Combined Interrupt Service routine 1368 * 1369 *********************************************************************/ 1370static int 1371lem_irq_fast(void *arg) 1372{ 1373 struct adapter *adapter = arg; 1374 struct ifnet *ifp; 1375 u32 reg_icr; 1376 1377 ifp = adapter->ifp; 1378 1379 reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR); 1380 1381 /* Hot eject? */ 1382 if (reg_icr == 0xffffffff) 1383 return FILTER_STRAY; 1384 1385 /* Definitely not our interrupt. */ 1386 if (reg_icr == 0x0) 1387 return FILTER_STRAY; 1388 1389 /* 1390 * Mask interrupts until the taskqueue is finished running. This is 1391 * cheap, just assume that it is needed. This also works around the 1392 * MSI message reordering errata on certain systems. 1393 */ 1394 lem_disable_intr(adapter); 1395 taskqueue_enqueue(adapter->tq, &adapter->rxtx_task); 1396 1397 /* Link status change */ 1398 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { 1399 adapter->hw.mac.get_link_status = 1; 1400 taskqueue_enqueue(taskqueue_fast, &adapter->link_task); 1401 } 1402 1403 if (reg_icr & E1000_ICR_RXO) 1404 adapter->rx_overruns++; 1405 return FILTER_HANDLED; 1406} 1407 1408 1409/********************************************************************* 1410 * 1411 * Media Ioctl callback 1412 * 1413 * This routine is called whenever the user queries the status of 1414 * the interface using ifconfig. 1415 * 1416 **********************************************************************/ 1417static void 1418lem_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) 1419{ 1420 struct adapter *adapter = ifp->if_softc; 1421 u_char fiber_type = IFM_1000_SX; 1422 1423 INIT_DEBUGOUT("lem_media_status: begin"); 1424 1425 EM_CORE_LOCK(adapter); 1426 lem_update_link_status(adapter); 1427 1428 ifmr->ifm_status = IFM_AVALID; 1429 ifmr->ifm_active = IFM_ETHER; 1430 1431 if (!adapter->link_active) { 1432 EM_CORE_UNLOCK(adapter); 1433 return; 1434 } 1435 1436 ifmr->ifm_status |= IFM_ACTIVE; 1437 1438 if ((adapter->hw.phy.media_type == e1000_media_type_fiber) || 1439 (adapter->hw.phy.media_type == e1000_media_type_internal_serdes)) { 1440 if (adapter->hw.mac.type == e1000_82545) 1441 fiber_type = IFM_1000_LX; 1442 ifmr->ifm_active |= fiber_type | IFM_FDX; 1443 } else { 1444 switch (adapter->link_speed) { 1445 case 10: 1446 ifmr->ifm_active |= IFM_10_T; 1447 break; 1448 case 100: 1449 ifmr->ifm_active |= IFM_100_TX; 1450 break; 1451 case 1000: 1452 ifmr->ifm_active |= IFM_1000_T; 1453 break; 1454 } 1455 if (adapter->link_duplex == FULL_DUPLEX) 1456 ifmr->ifm_active |= IFM_FDX; 1457 else 1458 ifmr->ifm_active |= IFM_HDX; 1459 } 1460 EM_CORE_UNLOCK(adapter); 1461} 1462 1463/********************************************************************* 1464 * 1465 * Media Ioctl callback 1466 * 1467 * This routine is called when the user changes speed/duplex using 1468 * media/mediopt option with ifconfig. 1469 * 1470 **********************************************************************/ 1471static int 1472lem_media_change(struct ifnet *ifp) 1473{ 1474 struct adapter *adapter = ifp->if_softc; 1475 struct ifmedia *ifm = &adapter->media; 1476 1477 INIT_DEBUGOUT("lem_media_change: begin"); 1478 1479 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 1480 return (EINVAL); 1481 1482 EM_CORE_LOCK(adapter); 1483 switch (IFM_SUBTYPE(ifm->ifm_media)) { 1484 case IFM_AUTO: 1485 adapter->hw.mac.autoneg = DO_AUTO_NEG; 1486 adapter->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT; 1487 break; 1488 case IFM_1000_LX: 1489 case IFM_1000_SX: 1490 case IFM_1000_T: 1491 adapter->hw.mac.autoneg = DO_AUTO_NEG; 1492 adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL; 1493 break; 1494 case IFM_100_TX: 1495 adapter->hw.mac.autoneg = FALSE; 1496 adapter->hw.phy.autoneg_advertised = 0; 1497 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) 1498 adapter->hw.mac.forced_speed_duplex = ADVERTISE_100_FULL; 1499 else 1500 adapter->hw.mac.forced_speed_duplex = ADVERTISE_100_HALF; 1501 break; 1502 case IFM_10_T: 1503 adapter->hw.mac.autoneg = FALSE; 1504 adapter->hw.phy.autoneg_advertised = 0; 1505 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) 1506 adapter->hw.mac.forced_speed_duplex = ADVERTISE_10_FULL; 1507 else 1508 adapter->hw.mac.forced_speed_duplex = ADVERTISE_10_HALF; 1509 break; 1510 default: 1511 device_printf(adapter->dev, "Unsupported media type\n"); 1512 } 1513 1514 lem_init_locked(adapter); 1515 EM_CORE_UNLOCK(adapter); 1516 1517 return (0); 1518} 1519 1520/********************************************************************* 1521 * 1522 * This routine maps the mbufs to tx descriptors. 1523 * 1524 * return 0 on success, positive on failure 1525 **********************************************************************/ 1526 1527static int 1528lem_xmit(struct adapter *adapter, struct mbuf **m_headp) 1529{ 1530 bus_dma_segment_t segs[EM_MAX_SCATTER]; 1531 bus_dmamap_t map; 1532 struct em_buffer *tx_buffer, *tx_buffer_mapped; 1533 struct e1000_tx_desc *ctxd = NULL; 1534 struct mbuf *m_head; 1535 u32 txd_upper, txd_lower, txd_used, txd_saved; 1536 int error, nsegs, i, j, first, last = 0; 1537 1538 m_head = *m_headp; 1539 txd_upper = txd_lower = txd_used = txd_saved = 0; 1540 1541 /* 1542 ** When doing checksum offload, it is critical to 1543 ** make sure the first mbuf has more than header, 1544 ** because that routine expects data to be present. 1545 */ 1546 if ((m_head->m_pkthdr.csum_flags & CSUM_OFFLOAD) && 1547 (m_head->m_len < ETHER_HDR_LEN + sizeof(struct ip))) { 1548 m_head = m_pullup(m_head, ETHER_HDR_LEN + sizeof(struct ip)); 1549 *m_headp = m_head; 1550 if (m_head == NULL) 1551 return (ENOBUFS); 1552 } 1553 1554 /* 1555 * Map the packet for DMA 1556 * 1557 * Capture the first descriptor index, 1558 * this descriptor will have the index 1559 * of the EOP which is the only one that 1560 * now gets a DONE bit writeback. 1561 */ 1562 first = adapter->next_avail_tx_desc; 1563 tx_buffer = &adapter->tx_buffer_area[first]; 1564 tx_buffer_mapped = tx_buffer; 1565 map = tx_buffer->map; 1566 1567 error = bus_dmamap_load_mbuf_sg(adapter->txtag, map, 1568 *m_headp, segs, &nsegs, BUS_DMA_NOWAIT); 1569 1570 /* 1571 * There are two types of errors we can (try) to handle: 1572 * - EFBIG means the mbuf chain was too long and bus_dma ran 1573 * out of segments. Defragment the mbuf chain and try again. 1574 * - ENOMEM means bus_dma could not obtain enough bounce buffers 1575 * at this point in time. Defer sending and try again later. 1576 * All other errors, in particular EINVAL, are fatal and prevent the 1577 * mbuf chain from ever going through. Drop it and report error. 1578 */ 1579 if (error == EFBIG) { 1580 struct mbuf *m; 1581 1582 m = m_defrag(*m_headp, M_NOWAIT); 1583 if (m == NULL) { 1584 adapter->mbuf_alloc_failed++; 1585 m_freem(*m_headp); 1586 *m_headp = NULL; 1587 return (ENOBUFS); 1588 } 1589 *m_headp = m; 1590 1591 /* Try it again */ 1592 error = bus_dmamap_load_mbuf_sg(adapter->txtag, map, 1593 *m_headp, segs, &nsegs, BUS_DMA_NOWAIT); 1594 1595 if (error) { 1596 adapter->no_tx_dma_setup++; 1597 m_freem(*m_headp); 1598 *m_headp = NULL; 1599 return (error); 1600 } 1601 } else if (error != 0) { 1602 adapter->no_tx_dma_setup++; 1603 return (error); 1604 } 1605 1606 if (nsegs > (adapter->num_tx_desc_avail - 2)) { 1607 adapter->no_tx_desc_avail2++; 1608 bus_dmamap_unload(adapter->txtag, map); 1609 return (ENOBUFS); 1610 } 1611 m_head = *m_headp; 1612 1613 /* Do hardware assists */ 1614 if (m_head->m_pkthdr.csum_flags & CSUM_OFFLOAD) 1615 lem_transmit_checksum_setup(adapter, m_head, 1616 &txd_upper, &txd_lower); 1617 1618 i = adapter->next_avail_tx_desc; 1619 if (adapter->pcix_82544) 1620 txd_saved = i; 1621 1622 /* Set up our transmit descriptors */ 1623 for (j = 0; j < nsegs; j++) { 1624 bus_size_t seg_len; 1625 bus_addr_t seg_addr; 1626 /* If adapter is 82544 and on PCIX bus */ 1627 if(adapter->pcix_82544) { 1628 DESC_ARRAY desc_array; 1629 u32 array_elements, counter; 1630 /* 1631 * Check the Address and Length combination and 1632 * split the data accordingly 1633 */ 1634 array_elements = lem_fill_descriptors(segs[j].ds_addr, 1635 segs[j].ds_len, &desc_array); 1636 for (counter = 0; counter < array_elements; counter++) { 1637 if (txd_used == adapter->num_tx_desc_avail) { 1638 adapter->next_avail_tx_desc = txd_saved; 1639 adapter->no_tx_desc_avail2++; 1640 bus_dmamap_unload(adapter->txtag, map); 1641 return (ENOBUFS); 1642 } 1643 tx_buffer = &adapter->tx_buffer_area[i]; 1644 ctxd = &adapter->tx_desc_base[i]; 1645 ctxd->buffer_addr = htole64( 1646 desc_array.descriptor[counter].address); 1647 ctxd->lower.data = htole32( 1648 (adapter->txd_cmd | txd_lower | (u16) 1649 desc_array.descriptor[counter].length)); 1650 ctxd->upper.data = 1651 htole32((txd_upper)); 1652 last = i; 1653 if (++i == adapter->num_tx_desc) 1654 i = 0; 1655 tx_buffer->m_head = NULL; 1656 tx_buffer->next_eop = -1; 1657 txd_used++; 1658 } 1659 } else { 1660 tx_buffer = &adapter->tx_buffer_area[i]; 1661 ctxd = &adapter->tx_desc_base[i]; 1662 seg_addr = segs[j].ds_addr; 1663 seg_len = segs[j].ds_len; 1664 ctxd->buffer_addr = htole64(seg_addr); 1665 ctxd->lower.data = htole32( 1666 adapter->txd_cmd | txd_lower | seg_len); 1667 ctxd->upper.data = 1668 htole32(txd_upper); 1669 last = i; 1670 if (++i == adapter->num_tx_desc) 1671 i = 0; 1672 tx_buffer->m_head = NULL; 1673 tx_buffer->next_eop = -1; 1674 } 1675 } 1676 1677 adapter->next_avail_tx_desc = i; 1678 1679 if (adapter->pcix_82544) 1680 adapter->num_tx_desc_avail -= txd_used; 1681 else 1682 adapter->num_tx_desc_avail -= nsegs; 1683 1684 if (m_head->m_flags & M_VLANTAG) { 1685 /* Set the vlan id. */ 1686 ctxd->upper.fields.special = 1687 htole16(m_head->m_pkthdr.ether_vtag); 1688 /* Tell hardware to add tag */ 1689 ctxd->lower.data |= htole32(E1000_TXD_CMD_VLE); 1690 } 1691 1692 tx_buffer->m_head = m_head; 1693 tx_buffer_mapped->map = tx_buffer->map; 1694 tx_buffer->map = map; 1695 bus_dmamap_sync(adapter->txtag, map, BUS_DMASYNC_PREWRITE); 1696 1697 /* 1698 * Last Descriptor of Packet 1699 * needs End Of Packet (EOP) 1700 * and Report Status (RS) 1701 */ 1702 ctxd->lower.data |= 1703 htole32(E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS); 1704 /* 1705 * Keep track in the first buffer which 1706 * descriptor will be written back 1707 */ 1708 tx_buffer = &adapter->tx_buffer_area[first]; 1709 tx_buffer->next_eop = last; 1710 adapter->watchdog_time = ticks; 1711 1712 /* 1713 * Advance the Transmit Descriptor Tail (TDT), this tells the E1000 1714 * that this frame is available to transmit. 1715 */ 1716 bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map, 1717 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1718 if (adapter->hw.mac.type == e1000_82547 && 1719 adapter->link_duplex == HALF_DUPLEX) 1720 lem_82547_move_tail(adapter); 1721 else { 1722 E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), i); 1723 if (adapter->hw.mac.type == e1000_82547) 1724 lem_82547_update_fifo_head(adapter, 1725 m_head->m_pkthdr.len); 1726 } 1727 1728 return (0); 1729} 1730 1731/********************************************************************* 1732 * 1733 * 82547 workaround to avoid controller hang in half-duplex environment. 1734 * The workaround is to avoid queuing a large packet that would span 1735 * the internal Tx FIFO ring boundary. We need to reset the FIFO pointers 1736 * in this case. We do that only when FIFO is quiescent. 1737 * 1738 **********************************************************************/ 1739static void 1740lem_82547_move_tail(void *arg) 1741{ 1742 struct adapter *adapter = arg; 1743 struct e1000_tx_desc *tx_desc; 1744 u16 hw_tdt, sw_tdt, length = 0; 1745 bool eop = 0; 1746 1747 EM_TX_LOCK_ASSERT(adapter); 1748 1749 hw_tdt = E1000_READ_REG(&adapter->hw, E1000_TDT(0)); 1750 sw_tdt = adapter->next_avail_tx_desc; 1751 1752 while (hw_tdt != sw_tdt) { 1753 tx_desc = &adapter->tx_desc_base[hw_tdt]; 1754 length += tx_desc->lower.flags.length; 1755 eop = tx_desc->lower.data & E1000_TXD_CMD_EOP; 1756 if (++hw_tdt == adapter->num_tx_desc) 1757 hw_tdt = 0; 1758 1759 if (eop) { 1760 if (lem_82547_fifo_workaround(adapter, length)) { 1761 adapter->tx_fifo_wrk_cnt++; 1762 callout_reset(&adapter->tx_fifo_timer, 1, 1763 lem_82547_move_tail, adapter); 1764 break; 1765 } 1766 E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), hw_tdt); 1767 lem_82547_update_fifo_head(adapter, length); 1768 length = 0; 1769 } 1770 } 1771} 1772 1773static int 1774lem_82547_fifo_workaround(struct adapter *adapter, int len) 1775{ 1776 int fifo_space, fifo_pkt_len; 1777 1778 fifo_pkt_len = roundup2(len + EM_FIFO_HDR, EM_FIFO_HDR); 1779 1780 if (adapter->link_duplex == HALF_DUPLEX) { 1781 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head; 1782 1783 if (fifo_pkt_len >= (EM_82547_PKT_THRESH + fifo_space)) { 1784 if (lem_82547_tx_fifo_reset(adapter)) 1785 return (0); 1786 else 1787 return (1); 1788 } 1789 } 1790 1791 return (0); 1792} 1793 1794static void 1795lem_82547_update_fifo_head(struct adapter *adapter, int len) 1796{ 1797 int fifo_pkt_len = roundup2(len + EM_FIFO_HDR, EM_FIFO_HDR); 1798 1799 /* tx_fifo_head is always 16 byte aligned */ 1800 adapter->tx_fifo_head += fifo_pkt_len; 1801 if (adapter->tx_fifo_head >= adapter->tx_fifo_size) { 1802 adapter->tx_fifo_head -= adapter->tx_fifo_size; 1803 } 1804} 1805 1806 1807static int 1808lem_82547_tx_fifo_reset(struct adapter *adapter) 1809{ 1810 u32 tctl; 1811 1812 if ((E1000_READ_REG(&adapter->hw, E1000_TDT(0)) == 1813 E1000_READ_REG(&adapter->hw, E1000_TDH(0))) && 1814 (E1000_READ_REG(&adapter->hw, E1000_TDFT) == 1815 E1000_READ_REG(&adapter->hw, E1000_TDFH)) && 1816 (E1000_READ_REG(&adapter->hw, E1000_TDFTS) == 1817 E1000_READ_REG(&adapter->hw, E1000_TDFHS)) && 1818 (E1000_READ_REG(&adapter->hw, E1000_TDFPC) == 0)) { 1819 /* Disable TX unit */ 1820 tctl = E1000_READ_REG(&adapter->hw, E1000_TCTL); 1821 E1000_WRITE_REG(&adapter->hw, E1000_TCTL, 1822 tctl & ~E1000_TCTL_EN); 1823 1824 /* Reset FIFO pointers */ 1825 E1000_WRITE_REG(&adapter->hw, E1000_TDFT, 1826 adapter->tx_head_addr); 1827 E1000_WRITE_REG(&adapter->hw, E1000_TDFH, 1828 adapter->tx_head_addr); 1829 E1000_WRITE_REG(&adapter->hw, E1000_TDFTS, 1830 adapter->tx_head_addr); 1831 E1000_WRITE_REG(&adapter->hw, E1000_TDFHS, 1832 adapter->tx_head_addr); 1833 1834 /* Re-enable TX unit */ 1835 E1000_WRITE_REG(&adapter->hw, E1000_TCTL, tctl); 1836 E1000_WRITE_FLUSH(&adapter->hw); 1837 1838 adapter->tx_fifo_head = 0; 1839 adapter->tx_fifo_reset_cnt++; 1840 1841 return (TRUE); 1842 } 1843 else { 1844 return (FALSE); 1845 } 1846} 1847 1848static void 1849lem_set_promisc(struct adapter *adapter) 1850{ 1851 struct ifnet *ifp = adapter->ifp; 1852 u32 reg_rctl; 1853 1854 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL); 1855 1856 if (ifp->if_flags & IFF_PROMISC) { 1857 reg_rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); 1858 /* Turn this on if you want to see bad packets */ 1859 if (lem_debug_sbp) 1860 reg_rctl |= E1000_RCTL_SBP; 1861 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl); 1862 } else if (ifp->if_flags & IFF_ALLMULTI) { 1863 reg_rctl |= E1000_RCTL_MPE; 1864 reg_rctl &= ~E1000_RCTL_UPE; 1865 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl); 1866 } 1867} 1868 1869static void 1870lem_disable_promisc(struct adapter *adapter) 1871{ 1872 struct ifnet *ifp = adapter->ifp; 1873 u32 reg_rctl; 1874 int mcnt = 0; 1875 1876 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL); 1877 reg_rctl &= (~E1000_RCTL_UPE); 1878 if (ifp->if_flags & IFF_ALLMULTI) 1879 mcnt = MAX_NUM_MULTICAST_ADDRESSES; 1880 else { 1881 struct ifmultiaddr *ifma; 1882#if __FreeBSD_version < 800000 1883 IF_ADDR_LOCK(ifp); 1884#else 1885 if_maddr_rlock(ifp); 1886#endif 1887 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1888 if (ifma->ifma_addr->sa_family != AF_LINK) 1889 continue; 1890 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES) 1891 break; 1892 mcnt++; 1893 } 1894#if __FreeBSD_version < 800000 1895 IF_ADDR_UNLOCK(ifp); 1896#else 1897 if_maddr_runlock(ifp); 1898#endif 1899 } 1900 /* Don't disable if in MAX groups */ 1901 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) 1902 reg_rctl &= (~E1000_RCTL_MPE); 1903 reg_rctl &= (~E1000_RCTL_SBP); 1904 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl); 1905} 1906 1907 1908/********************************************************************* 1909 * Multicast Update 1910 * 1911 * This routine is called whenever multicast address list is updated. 1912 * 1913 **********************************************************************/ 1914 1915static void 1916lem_set_multi(struct adapter *adapter) 1917{ 1918 struct ifnet *ifp = adapter->ifp; 1919 struct ifmultiaddr *ifma; 1920 u32 reg_rctl = 0; 1921 u8 *mta; /* Multicast array memory */ 1922 int mcnt = 0; 1923 1924 IOCTL_DEBUGOUT("lem_set_multi: begin"); 1925 1926 mta = adapter->mta; 1927 bzero(mta, sizeof(u8) * ETH_ADDR_LEN * MAX_NUM_MULTICAST_ADDRESSES); 1928 1929 if (adapter->hw.mac.type == e1000_82542 && 1930 adapter->hw.revision_id == E1000_REVISION_2) { 1931 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL); 1932 if (adapter->hw.bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE) 1933 e1000_pci_clear_mwi(&adapter->hw); 1934 reg_rctl |= E1000_RCTL_RST; 1935 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl); 1936 msec_delay(5); 1937 } 1938 1939#if __FreeBSD_version < 800000 1940 IF_ADDR_LOCK(ifp); 1941#else 1942 if_maddr_rlock(ifp); 1943#endif 1944 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1945 if (ifma->ifma_addr->sa_family != AF_LINK) 1946 continue; 1947 1948 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES) 1949 break; 1950 1951 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr), 1952 &mta[mcnt * ETH_ADDR_LEN], ETH_ADDR_LEN); 1953 mcnt++; 1954 } 1955#if __FreeBSD_version < 800000 1956 IF_ADDR_UNLOCK(ifp); 1957#else 1958 if_maddr_runlock(ifp); 1959#endif 1960 if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES) { 1961 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL); 1962 reg_rctl |= E1000_RCTL_MPE; 1963 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl); 1964 } else 1965 e1000_update_mc_addr_list(&adapter->hw, mta, mcnt); 1966 1967 if (adapter->hw.mac.type == e1000_82542 && 1968 adapter->hw.revision_id == E1000_REVISION_2) { 1969 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL); 1970 reg_rctl &= ~E1000_RCTL_RST; 1971 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl); 1972 msec_delay(5); 1973 if (adapter->hw.bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE) 1974 e1000_pci_set_mwi(&adapter->hw); 1975 } 1976} 1977 1978 1979/********************************************************************* 1980 * Timer routine 1981 * 1982 * This routine checks for link status and updates statistics. 1983 * 1984 **********************************************************************/ 1985 1986static void 1987lem_local_timer(void *arg) 1988{ 1989 struct adapter *adapter = arg; 1990 1991 EM_CORE_LOCK_ASSERT(adapter); 1992 1993 lem_update_link_status(adapter); 1994 lem_update_stats_counters(adapter); 1995 1996 lem_smartspeed(adapter); 1997 1998 /* 1999 * We check the watchdog: the time since 2000 * the last TX descriptor was cleaned. 2001 * This implies a functional TX engine. 2002 */ 2003 if ((adapter->watchdog_check == TRUE) && 2004 (ticks - adapter->watchdog_time > EM_WATCHDOG)) 2005 goto hung; 2006 2007 callout_reset(&adapter->timer, hz, lem_local_timer, adapter); 2008 return; 2009hung: 2010 device_printf(adapter->dev, "Watchdog timeout -- resetting\n"); 2011 adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 2012 adapter->watchdog_events++; 2013 lem_init_locked(adapter); 2014} 2015 2016static void 2017lem_update_link_status(struct adapter *adapter) 2018{ 2019 struct e1000_hw *hw = &adapter->hw; 2020 struct ifnet *ifp = adapter->ifp; 2021 device_t dev = adapter->dev; 2022 u32 link_check = 0; 2023 2024 /* Get the cached link value or read phy for real */ 2025 switch (hw->phy.media_type) { 2026 case e1000_media_type_copper: 2027 if (hw->mac.get_link_status) { 2028 /* Do the work to read phy */ 2029 e1000_check_for_link(hw); 2030 link_check = !hw->mac.get_link_status; 2031 if (link_check) /* ESB2 fix */ 2032 e1000_cfg_on_link_up(hw); 2033 } else 2034 link_check = TRUE; 2035 break; 2036 case e1000_media_type_fiber: 2037 e1000_check_for_link(hw); 2038 link_check = (E1000_READ_REG(hw, E1000_STATUS) & 2039 E1000_STATUS_LU); 2040 break; 2041 case e1000_media_type_internal_serdes: 2042 e1000_check_for_link(hw); 2043 link_check = adapter->hw.mac.serdes_has_link; 2044 break; 2045 default: 2046 case e1000_media_type_unknown: 2047 break; 2048 } 2049 2050 /* Now check for a transition */ 2051 if (link_check && (adapter->link_active == 0)) { 2052 e1000_get_speed_and_duplex(hw, &adapter->link_speed, 2053 &adapter->link_duplex); 2054 if (bootverbose) 2055 device_printf(dev, "Link is up %d Mbps %s\n", 2056 adapter->link_speed, 2057 ((adapter->link_duplex == FULL_DUPLEX) ? 2058 "Full Duplex" : "Half Duplex")); 2059 adapter->link_active = 1; 2060 adapter->smartspeed = 0; 2061 ifp->if_baudrate = adapter->link_speed * 1000000; 2062 if_link_state_change(ifp, LINK_STATE_UP); 2063 } else if (!link_check && (adapter->link_active == 1)) { 2064 ifp->if_baudrate = adapter->link_speed = 0; 2065 adapter->link_duplex = 0; 2066 if (bootverbose) 2067 device_printf(dev, "Link is Down\n"); 2068 adapter->link_active = 0; 2069 /* Link down, disable watchdog */ 2070 adapter->watchdog_check = FALSE; 2071 if_link_state_change(ifp, LINK_STATE_DOWN); 2072 } 2073} 2074 2075/********************************************************************* 2076 * 2077 * This routine disables all traffic on the adapter by issuing a 2078 * global reset on the MAC and deallocates TX/RX buffers. 2079 * 2080 * This routine should always be called with BOTH the CORE 2081 * and TX locks. 2082 **********************************************************************/ 2083 2084static void 2085lem_stop(void *arg) 2086{ 2087 struct adapter *adapter = arg; 2088 struct ifnet *ifp = adapter->ifp; 2089 2090 EM_CORE_LOCK_ASSERT(adapter); 2091 EM_TX_LOCK_ASSERT(adapter); 2092 2093 INIT_DEBUGOUT("lem_stop: begin"); 2094 2095 lem_disable_intr(adapter); 2096 callout_stop(&adapter->timer); 2097 callout_stop(&adapter->tx_fifo_timer); 2098 2099 /* Tell the stack that the interface is no longer active */ 2100 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 2101 2102 e1000_reset_hw(&adapter->hw); 2103 if (adapter->hw.mac.type >= e1000_82544) 2104 E1000_WRITE_REG(&adapter->hw, E1000_WUC, 0); 2105 2106 e1000_led_off(&adapter->hw); 2107 e1000_cleanup_led(&adapter->hw); 2108} 2109 2110 2111/********************************************************************* 2112 * 2113 * Determine hardware revision. 2114 * 2115 **********************************************************************/ 2116static void 2117lem_identify_hardware(struct adapter *adapter) 2118{ 2119 device_t dev = adapter->dev; 2120 2121 /* Make sure our PCI config space has the necessary stuff set */
| 34 35#include "opt_inet.h" 36#include "opt_inet6.h" 37 38#ifdef HAVE_KERNEL_OPTION_HEADERS 39#include "opt_device_polling.h" 40#endif 41 42#include <sys/param.h> 43#include <sys/systm.h> 44#include <sys/bus.h> 45#include <sys/endian.h> 46#include <sys/kernel.h> 47#include <sys/kthread.h> 48#include <sys/malloc.h> 49#include <sys/mbuf.h> 50#include <sys/module.h> 51#include <sys/rman.h> 52#include <sys/socket.h> 53#include <sys/sockio.h> 54#include <sys/sysctl.h> 55#include <sys/taskqueue.h> 56#include <sys/eventhandler.h> 57#include <machine/bus.h> 58#include <machine/resource.h> 59 60#include <net/bpf.h> 61#include <net/ethernet.h> 62#include <net/if.h> 63#include <net/if_arp.h> 64#include <net/if_dl.h> 65#include <net/if_media.h> 66 67#include <net/if_types.h> 68#include <net/if_vlan_var.h> 69 70#include <netinet/in_systm.h> 71#include <netinet/in.h> 72#include <netinet/if_ether.h> 73#include <netinet/ip.h> 74#include <netinet/ip6.h> 75#include <netinet/tcp.h> 76#include <netinet/udp.h> 77 78#include <machine/in_cksum.h> 79#include <dev/led/led.h> 80#include <dev/pci/pcivar.h> 81#include <dev/pci/pcireg.h> 82 83#include "e1000_api.h" 84#include "if_lem.h" 85 86/********************************************************************* 87 * Legacy Em Driver version: 88 *********************************************************************/ 89char lem_driver_version[] = "1.0.6"; 90 91/********************************************************************* 92 * PCI Device ID Table 93 * 94 * Used by probe to select devices to load on 95 * Last field stores an index into e1000_strings 96 * Last entry must be all 0s 97 * 98 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index } 99 *********************************************************************/ 100 101static em_vendor_info_t lem_vendor_info_array[] = 102{ 103 /* Intel(R) PRO/1000 Network Connection */ 104 { 0x8086, E1000_DEV_ID_82540EM, PCI_ANY_ID, PCI_ANY_ID, 0}, 105 { 0x8086, E1000_DEV_ID_82540EM_LOM, PCI_ANY_ID, PCI_ANY_ID, 0}, 106 { 0x8086, E1000_DEV_ID_82540EP, PCI_ANY_ID, PCI_ANY_ID, 0}, 107 { 0x8086, E1000_DEV_ID_82540EP_LOM, PCI_ANY_ID, PCI_ANY_ID, 0}, 108 { 0x8086, E1000_DEV_ID_82540EP_LP, PCI_ANY_ID, PCI_ANY_ID, 0}, 109 110 { 0x8086, E1000_DEV_ID_82541EI, PCI_ANY_ID, PCI_ANY_ID, 0}, 111 { 0x8086, E1000_DEV_ID_82541ER, PCI_ANY_ID, PCI_ANY_ID, 0}, 112 { 0x8086, E1000_DEV_ID_82541ER_LOM, PCI_ANY_ID, PCI_ANY_ID, 0}, 113 { 0x8086, E1000_DEV_ID_82541EI_MOBILE, PCI_ANY_ID, PCI_ANY_ID, 0}, 114 { 0x8086, E1000_DEV_ID_82541GI, PCI_ANY_ID, PCI_ANY_ID, 0}, 115 { 0x8086, E1000_DEV_ID_82541GI_LF, PCI_ANY_ID, PCI_ANY_ID, 0}, 116 { 0x8086, E1000_DEV_ID_82541GI_MOBILE, PCI_ANY_ID, PCI_ANY_ID, 0}, 117 118 { 0x8086, E1000_DEV_ID_82542, PCI_ANY_ID, PCI_ANY_ID, 0}, 119 120 { 0x8086, E1000_DEV_ID_82543GC_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0}, 121 { 0x8086, E1000_DEV_ID_82543GC_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0}, 122 123 { 0x8086, E1000_DEV_ID_82544EI_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0}, 124 { 0x8086, E1000_DEV_ID_82544EI_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0}, 125 { 0x8086, E1000_DEV_ID_82544GC_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0}, 126 { 0x8086, E1000_DEV_ID_82544GC_LOM, PCI_ANY_ID, PCI_ANY_ID, 0}, 127 128 { 0x8086, E1000_DEV_ID_82545EM_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0}, 129 { 0x8086, E1000_DEV_ID_82545EM_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0}, 130 { 0x8086, E1000_DEV_ID_82545GM_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0}, 131 { 0x8086, E1000_DEV_ID_82545GM_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0}, 132 { 0x8086, E1000_DEV_ID_82545GM_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0}, 133 134 { 0x8086, E1000_DEV_ID_82546EB_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0}, 135 { 0x8086, E1000_DEV_ID_82546EB_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0}, 136 { 0x8086, E1000_DEV_ID_82546EB_QUAD_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0}, 137 { 0x8086, E1000_DEV_ID_82546GB_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0}, 138 { 0x8086, E1000_DEV_ID_82546GB_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0}, 139 { 0x8086, E1000_DEV_ID_82546GB_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0}, 140 { 0x8086, E1000_DEV_ID_82546GB_PCIE, PCI_ANY_ID, PCI_ANY_ID, 0}, 141 { 0x8086, E1000_DEV_ID_82546GB_QUAD_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0}, 142 { 0x8086, E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3, 143 PCI_ANY_ID, PCI_ANY_ID, 0}, 144 145 { 0x8086, E1000_DEV_ID_82547EI, PCI_ANY_ID, PCI_ANY_ID, 0}, 146 { 0x8086, E1000_DEV_ID_82547EI_MOBILE, PCI_ANY_ID, PCI_ANY_ID, 0}, 147 { 0x8086, E1000_DEV_ID_82547GI, PCI_ANY_ID, PCI_ANY_ID, 0}, 148 /* required last entry */ 149 { 0, 0, 0, 0, 0} 150}; 151 152/********************************************************************* 153 * Table of branding strings for all supported NICs. 154 *********************************************************************/ 155 156static char *lem_strings[] = { 157 "Intel(R) PRO/1000 Legacy Network Connection" 158}; 159 160/********************************************************************* 161 * Function prototypes 162 *********************************************************************/ 163static int lem_probe(device_t); 164static int lem_attach(device_t); 165static int lem_detach(device_t); 166static int lem_shutdown(device_t); 167static int lem_suspend(device_t); 168static int lem_resume(device_t); 169static void lem_start(struct ifnet *); 170static void lem_start_locked(struct ifnet *ifp); 171static int lem_ioctl(struct ifnet *, u_long, caddr_t); 172static void lem_init(void *); 173static void lem_init_locked(struct adapter *); 174static void lem_stop(void *); 175static void lem_media_status(struct ifnet *, struct ifmediareq *); 176static int lem_media_change(struct ifnet *); 177static void lem_identify_hardware(struct adapter *); 178static int lem_allocate_pci_resources(struct adapter *); 179static int lem_allocate_irq(struct adapter *adapter); 180static void lem_free_pci_resources(struct adapter *); 181static void lem_local_timer(void *); 182static int lem_hardware_init(struct adapter *); 183static int lem_setup_interface(device_t, struct adapter *); 184static void lem_setup_transmit_structures(struct adapter *); 185static void lem_initialize_transmit_unit(struct adapter *); 186static int lem_setup_receive_structures(struct adapter *); 187static void lem_initialize_receive_unit(struct adapter *); 188static void lem_enable_intr(struct adapter *); 189static void lem_disable_intr(struct adapter *); 190static void lem_free_transmit_structures(struct adapter *); 191static void lem_free_receive_structures(struct adapter *); 192static void lem_update_stats_counters(struct adapter *); 193static void lem_add_hw_stats(struct adapter *adapter); 194static void lem_txeof(struct adapter *); 195static void lem_tx_purge(struct adapter *); 196static int lem_allocate_receive_structures(struct adapter *); 197static int lem_allocate_transmit_structures(struct adapter *); 198static bool lem_rxeof(struct adapter *, int, int *); 199#ifndef __NO_STRICT_ALIGNMENT 200static int lem_fixup_rx(struct adapter *); 201#endif 202static void lem_receive_checksum(struct adapter *, struct e1000_rx_desc *, 203 struct mbuf *); 204static void lem_transmit_checksum_setup(struct adapter *, struct mbuf *, 205 u32 *, u32 *); 206static void lem_set_promisc(struct adapter *); 207static void lem_disable_promisc(struct adapter *); 208static void lem_set_multi(struct adapter *); 209static void lem_update_link_status(struct adapter *); 210static int lem_get_buf(struct adapter *, int); 211static void lem_register_vlan(void *, struct ifnet *, u16); 212static void lem_unregister_vlan(void *, struct ifnet *, u16); 213static void lem_setup_vlan_hw_support(struct adapter *); 214static int lem_xmit(struct adapter *, struct mbuf **); 215static void lem_smartspeed(struct adapter *); 216static int lem_82547_fifo_workaround(struct adapter *, int); 217static void lem_82547_update_fifo_head(struct adapter *, int); 218static int lem_82547_tx_fifo_reset(struct adapter *); 219static void lem_82547_move_tail(void *); 220static int lem_dma_malloc(struct adapter *, bus_size_t, 221 struct em_dma_alloc *, int); 222static void lem_dma_free(struct adapter *, struct em_dma_alloc *); 223static int lem_sysctl_nvm_info(SYSCTL_HANDLER_ARGS); 224static void lem_print_nvm_info(struct adapter *); 225static int lem_is_valid_ether_addr(u8 *); 226static u32 lem_fill_descriptors (bus_addr_t address, u32 length, 227 PDESC_ARRAY desc_array); 228static int lem_sysctl_int_delay(SYSCTL_HANDLER_ARGS); 229static void lem_add_int_delay_sysctl(struct adapter *, const char *, 230 const char *, struct em_int_delay_info *, int, int); 231static void lem_set_flow_cntrl(struct adapter *, const char *, 232 const char *, int *, int); 233/* Management and WOL Support */ 234static void lem_init_manageability(struct adapter *); 235static void lem_release_manageability(struct adapter *); 236static void lem_get_hw_control(struct adapter *); 237static void lem_release_hw_control(struct adapter *); 238static void lem_get_wakeup(device_t); 239static void lem_enable_wakeup(device_t); 240static int lem_enable_phy_wakeup(struct adapter *); 241static void lem_led_func(void *, int); 242 243static void lem_intr(void *); 244static int lem_irq_fast(void *); 245static void lem_handle_rxtx(void *context, int pending); 246static void lem_handle_link(void *context, int pending); 247static void lem_add_rx_process_limit(struct adapter *, const char *, 248 const char *, int *, int); 249 250#ifdef DEVICE_POLLING 251static poll_handler_t lem_poll; 252#endif /* POLLING */ 253 254/********************************************************************* 255 * FreeBSD Device Interface Entry Points 256 *********************************************************************/ 257 258static device_method_t lem_methods[] = { 259 /* Device interface */ 260 DEVMETHOD(device_probe, lem_probe), 261 DEVMETHOD(device_attach, lem_attach), 262 DEVMETHOD(device_detach, lem_detach), 263 DEVMETHOD(device_shutdown, lem_shutdown), 264 DEVMETHOD(device_suspend, lem_suspend), 265 DEVMETHOD(device_resume, lem_resume), 266 DEVMETHOD_END 267}; 268 269static driver_t lem_driver = { 270 "em", lem_methods, sizeof(struct adapter), 271}; 272 273extern devclass_t em_devclass; 274DRIVER_MODULE(lem, pci, lem_driver, em_devclass, 0, 0); 275MODULE_DEPEND(lem, pci, 1, 1, 1); 276MODULE_DEPEND(lem, ether, 1, 1, 1); 277 278/********************************************************************* 279 * Tunable default values. 280 *********************************************************************/ 281 282#define EM_TICKS_TO_USECS(ticks) ((1024 * (ticks) + 500) / 1000) 283#define EM_USECS_TO_TICKS(usecs) ((1000 * (usecs) + 512) / 1024) 284 285#define MAX_INTS_PER_SEC 8000 286#define DEFAULT_ITR (1000000000/(MAX_INTS_PER_SEC * 256)) 287 288static int lem_tx_int_delay_dflt = EM_TICKS_TO_USECS(EM_TIDV); 289static int lem_rx_int_delay_dflt = EM_TICKS_TO_USECS(EM_RDTR); 290static int lem_tx_abs_int_delay_dflt = EM_TICKS_TO_USECS(EM_TADV); 291static int lem_rx_abs_int_delay_dflt = EM_TICKS_TO_USECS(EM_RADV); 292static int lem_rxd = EM_DEFAULT_RXD; 293static int lem_txd = EM_DEFAULT_TXD; 294static int lem_smart_pwr_down = FALSE; 295 296/* Controls whether promiscuous also shows bad packets */ 297static int lem_debug_sbp = FALSE; 298 299TUNABLE_INT("hw.em.tx_int_delay", &lem_tx_int_delay_dflt); 300TUNABLE_INT("hw.em.rx_int_delay", &lem_rx_int_delay_dflt); 301TUNABLE_INT("hw.em.tx_abs_int_delay", &lem_tx_abs_int_delay_dflt); 302TUNABLE_INT("hw.em.rx_abs_int_delay", &lem_rx_abs_int_delay_dflt); 303TUNABLE_INT("hw.em.rxd", &lem_rxd); 304TUNABLE_INT("hw.em.txd", &lem_txd); 305TUNABLE_INT("hw.em.smart_pwr_down", &lem_smart_pwr_down); 306TUNABLE_INT("hw.em.sbp", &lem_debug_sbp); 307 308/* Interrupt style - default to fast */ 309static int lem_use_legacy_irq = 0; 310TUNABLE_INT("hw.em.use_legacy_irq", &lem_use_legacy_irq); 311 312/* How many packets rxeof tries to clean at a time */ 313static int lem_rx_process_limit = 100; 314TUNABLE_INT("hw.em.rx_process_limit", &lem_rx_process_limit); 315 316/* Flow control setting - default to FULL */ 317static int lem_fc_setting = e1000_fc_full; 318TUNABLE_INT("hw.em.fc_setting", &lem_fc_setting); 319 320/* Global used in WOL setup with multiport cards */ 321static int global_quad_port_a = 0; 322 323#ifdef DEV_NETMAP /* see ixgbe.c for details */ 324#include <dev/netmap/if_lem_netmap.h> 325#endif /* DEV_NETMAP */ 326 327/********************************************************************* 328 * Device identification routine 329 * 330 * em_probe determines if the driver should be loaded on 331 * adapter based on PCI vendor/device id of the adapter. 332 * 333 * return BUS_PROBE_DEFAULT on success, positive on failure 334 *********************************************************************/ 335 336static int 337lem_probe(device_t dev) 338{ 339 char adapter_name[60]; 340 u16 pci_vendor_id = 0; 341 u16 pci_device_id = 0; 342 u16 pci_subvendor_id = 0; 343 u16 pci_subdevice_id = 0; 344 em_vendor_info_t *ent; 345 346 INIT_DEBUGOUT("em_probe: begin"); 347 348 pci_vendor_id = pci_get_vendor(dev); 349 if (pci_vendor_id != EM_VENDOR_ID) 350 return (ENXIO); 351 352 pci_device_id = pci_get_device(dev); 353 pci_subvendor_id = pci_get_subvendor(dev); 354 pci_subdevice_id = pci_get_subdevice(dev); 355 356 ent = lem_vendor_info_array; 357 while (ent->vendor_id != 0) { 358 if ((pci_vendor_id == ent->vendor_id) && 359 (pci_device_id == ent->device_id) && 360 361 ((pci_subvendor_id == ent->subvendor_id) || 362 (ent->subvendor_id == PCI_ANY_ID)) && 363 364 ((pci_subdevice_id == ent->subdevice_id) || 365 (ent->subdevice_id == PCI_ANY_ID))) { 366 sprintf(adapter_name, "%s %s", 367 lem_strings[ent->index], 368 lem_driver_version); 369 device_set_desc_copy(dev, adapter_name); 370 return (BUS_PROBE_DEFAULT); 371 } 372 ent++; 373 } 374 375 return (ENXIO); 376} 377 378/********************************************************************* 379 * Device initialization routine 380 * 381 * The attach entry point is called when the driver is being loaded. 382 * This routine identifies the type of hardware, allocates all resources 383 * and initializes the hardware. 384 * 385 * return 0 on success, positive on failure 386 *********************************************************************/ 387 388static int 389lem_attach(device_t dev) 390{ 391 struct adapter *adapter; 392 int tsize, rsize; 393 int error = 0; 394 395 INIT_DEBUGOUT("lem_attach: begin"); 396 397 adapter = device_get_softc(dev); 398 adapter->dev = adapter->osdep.dev = dev; 399 EM_CORE_LOCK_INIT(adapter, device_get_nameunit(dev)); 400 EM_TX_LOCK_INIT(adapter, device_get_nameunit(dev)); 401 EM_RX_LOCK_INIT(adapter, device_get_nameunit(dev)); 402 403 /* SYSCTL stuff */ 404 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 405 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 406 OID_AUTO, "nvm", CTLTYPE_INT|CTLFLAG_RW, adapter, 0, 407 lem_sysctl_nvm_info, "I", "NVM Information"); 408 409 callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0); 410 callout_init_mtx(&adapter->tx_fifo_timer, &adapter->tx_mtx, 0); 411 412 /* Determine hardware and mac info */ 413 lem_identify_hardware(adapter); 414 415 /* Setup PCI resources */ 416 if (lem_allocate_pci_resources(adapter)) { 417 device_printf(dev, "Allocation of PCI resources failed\n"); 418 error = ENXIO; 419 goto err_pci; 420 } 421 422 /* Do Shared Code initialization */ 423 if (e1000_setup_init_funcs(&adapter->hw, TRUE)) { 424 device_printf(dev, "Setup of Shared code failed\n"); 425 error = ENXIO; 426 goto err_pci; 427 } 428 429 e1000_get_bus_info(&adapter->hw); 430 431 /* Set up some sysctls for the tunable interrupt delays */ 432 lem_add_int_delay_sysctl(adapter, "rx_int_delay", 433 "receive interrupt delay in usecs", &adapter->rx_int_delay, 434 E1000_REGISTER(&adapter->hw, E1000_RDTR), lem_rx_int_delay_dflt); 435 lem_add_int_delay_sysctl(adapter, "tx_int_delay", 436 "transmit interrupt delay in usecs", &adapter->tx_int_delay, 437 E1000_REGISTER(&adapter->hw, E1000_TIDV), lem_tx_int_delay_dflt); 438 if (adapter->hw.mac.type >= e1000_82540) { 439 lem_add_int_delay_sysctl(adapter, "rx_abs_int_delay", 440 "receive interrupt delay limit in usecs", 441 &adapter->rx_abs_int_delay, 442 E1000_REGISTER(&adapter->hw, E1000_RADV), 443 lem_rx_abs_int_delay_dflt); 444 lem_add_int_delay_sysctl(adapter, "tx_abs_int_delay", 445 "transmit interrupt delay limit in usecs", 446 &adapter->tx_abs_int_delay, 447 E1000_REGISTER(&adapter->hw, E1000_TADV), 448 lem_tx_abs_int_delay_dflt); 449 lem_add_int_delay_sysctl(adapter, "itr", 450 "interrupt delay limit in usecs/4", 451 &adapter->tx_itr, 452 E1000_REGISTER(&adapter->hw, E1000_ITR), 453 DEFAULT_ITR); 454 } 455 456 /* Sysctls for limiting the amount of work done in the taskqueue */ 457 lem_add_rx_process_limit(adapter, "rx_processing_limit", 458 "max number of rx packets to process", &adapter->rx_process_limit, 459 lem_rx_process_limit); 460 461 /* Sysctl for setting the interface flow control */ 462 lem_set_flow_cntrl(adapter, "flow_control", 463 "flow control setting", 464 &adapter->fc_setting, lem_fc_setting); 465 466 /* 467 * Validate number of transmit and receive descriptors. It 468 * must not exceed hardware maximum, and must be multiple 469 * of E1000_DBA_ALIGN. 470 */ 471 if (((lem_txd * sizeof(struct e1000_tx_desc)) % EM_DBA_ALIGN) != 0 || 472 (adapter->hw.mac.type >= e1000_82544 && lem_txd > EM_MAX_TXD) || 473 (adapter->hw.mac.type < e1000_82544 && lem_txd > EM_MAX_TXD_82543) || 474 (lem_txd < EM_MIN_TXD)) { 475 device_printf(dev, "Using %d TX descriptors instead of %d!\n", 476 EM_DEFAULT_TXD, lem_txd); 477 adapter->num_tx_desc = EM_DEFAULT_TXD; 478 } else 479 adapter->num_tx_desc = lem_txd; 480 if (((lem_rxd * sizeof(struct e1000_rx_desc)) % EM_DBA_ALIGN) != 0 || 481 (adapter->hw.mac.type >= e1000_82544 && lem_rxd > EM_MAX_RXD) || 482 (adapter->hw.mac.type < e1000_82544 && lem_rxd > EM_MAX_RXD_82543) || 483 (lem_rxd < EM_MIN_RXD)) { 484 device_printf(dev, "Using %d RX descriptors instead of %d!\n", 485 EM_DEFAULT_RXD, lem_rxd); 486 adapter->num_rx_desc = EM_DEFAULT_RXD; 487 } else 488 adapter->num_rx_desc = lem_rxd; 489 490 adapter->hw.mac.autoneg = DO_AUTO_NEG; 491 adapter->hw.phy.autoneg_wait_to_complete = FALSE; 492 adapter->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT; 493 adapter->rx_buffer_len = 2048; 494 495 e1000_init_script_state_82541(&adapter->hw, TRUE); 496 e1000_set_tbi_compatibility_82543(&adapter->hw, TRUE); 497 498 /* Copper options */ 499 if (adapter->hw.phy.media_type == e1000_media_type_copper) { 500 adapter->hw.phy.mdix = AUTO_ALL_MODES; 501 adapter->hw.phy.disable_polarity_correction = FALSE; 502 adapter->hw.phy.ms_type = EM_MASTER_SLAVE; 503 } 504 505 /* 506 * Set the frame limits assuming 507 * standard ethernet sized frames. 508 */ 509 adapter->max_frame_size = ETHERMTU + ETHER_HDR_LEN + ETHERNET_FCS_SIZE; 510 adapter->min_frame_size = ETH_ZLEN + ETHERNET_FCS_SIZE; 511 512 /* 513 * This controls when hardware reports transmit completion 514 * status. 515 */ 516 adapter->hw.mac.report_tx_early = 1; 517 518 tsize = roundup2(adapter->num_tx_desc * sizeof(struct e1000_tx_desc), 519 EM_DBA_ALIGN); 520 521 /* Allocate Transmit Descriptor ring */ 522 if (lem_dma_malloc(adapter, tsize, &adapter->txdma, BUS_DMA_NOWAIT)) { 523 device_printf(dev, "Unable to allocate tx_desc memory\n"); 524 error = ENOMEM; 525 goto err_tx_desc; 526 } 527 adapter->tx_desc_base = 528 (struct e1000_tx_desc *)adapter->txdma.dma_vaddr; 529 530 rsize = roundup2(adapter->num_rx_desc * sizeof(struct e1000_rx_desc), 531 EM_DBA_ALIGN); 532 533 /* Allocate Receive Descriptor ring */ 534 if (lem_dma_malloc(adapter, rsize, &adapter->rxdma, BUS_DMA_NOWAIT)) { 535 device_printf(dev, "Unable to allocate rx_desc memory\n"); 536 error = ENOMEM; 537 goto err_rx_desc; 538 } 539 adapter->rx_desc_base = 540 (struct e1000_rx_desc *)adapter->rxdma.dma_vaddr; 541 542 /* Allocate multicast array memory. */ 543 adapter->mta = malloc(sizeof(u8) * ETH_ADDR_LEN * 544 MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT); 545 if (adapter->mta == NULL) { 546 device_printf(dev, "Can not allocate multicast setup array\n"); 547 error = ENOMEM; 548 goto err_hw_init; 549 } 550 551 /* 552 ** Start from a known state, this is 553 ** important in reading the nvm and 554 ** mac from that. 555 */ 556 e1000_reset_hw(&adapter->hw); 557 558 /* Make sure we have a good EEPROM before we read from it */ 559 if (e1000_validate_nvm_checksum(&adapter->hw) < 0) { 560 /* 561 ** Some PCI-E parts fail the first check due to 562 ** the link being in sleep state, call it again, 563 ** if it fails a second time its a real issue. 564 */ 565 if (e1000_validate_nvm_checksum(&adapter->hw) < 0) { 566 device_printf(dev, 567 "The EEPROM Checksum Is Not Valid\n"); 568 error = EIO; 569 goto err_hw_init; 570 } 571 } 572 573 /* Copy the permanent MAC address out of the EEPROM */ 574 if (e1000_read_mac_addr(&adapter->hw) < 0) { 575 device_printf(dev, "EEPROM read error while reading MAC" 576 " address\n"); 577 error = EIO; 578 goto err_hw_init; 579 } 580 581 if (!lem_is_valid_ether_addr(adapter->hw.mac.addr)) { 582 device_printf(dev, "Invalid MAC address\n"); 583 error = EIO; 584 goto err_hw_init; 585 } 586 587 /* Initialize the hardware */ 588 if (lem_hardware_init(adapter)) { 589 device_printf(dev, "Unable to initialize the hardware\n"); 590 error = EIO; 591 goto err_hw_init; 592 } 593 594 /* Allocate transmit descriptors and buffers */ 595 if (lem_allocate_transmit_structures(adapter)) { 596 device_printf(dev, "Could not setup transmit structures\n"); 597 error = ENOMEM; 598 goto err_tx_struct; 599 } 600 601 /* Allocate receive descriptors and buffers */ 602 if (lem_allocate_receive_structures(adapter)) { 603 device_printf(dev, "Could not setup receive structures\n"); 604 error = ENOMEM; 605 goto err_rx_struct; 606 } 607 608 /* 609 ** Do interrupt configuration 610 */ 611 error = lem_allocate_irq(adapter); 612 if (error) 613 goto err_rx_struct; 614 615 /* 616 * Get Wake-on-Lan and Management info for later use 617 */ 618 lem_get_wakeup(dev); 619 620 /* Setup OS specific network interface */ 621 if (lem_setup_interface(dev, adapter) != 0) 622 goto err_rx_struct; 623 624 /* Initialize statistics */ 625 lem_update_stats_counters(adapter); 626 627 adapter->hw.mac.get_link_status = 1; 628 lem_update_link_status(adapter); 629 630 /* Indicate SOL/IDER usage */ 631 if (e1000_check_reset_block(&adapter->hw)) 632 device_printf(dev, 633 "PHY reset is blocked due to SOL/IDER session.\n"); 634 635 /* Do we need workaround for 82544 PCI-X adapter? */ 636 if (adapter->hw.bus.type == e1000_bus_type_pcix && 637 adapter->hw.mac.type == e1000_82544) 638 adapter->pcix_82544 = TRUE; 639 else 640 adapter->pcix_82544 = FALSE; 641 642 /* Register for VLAN events */ 643 adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config, 644 lem_register_vlan, adapter, EVENTHANDLER_PRI_FIRST); 645 adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig, 646 lem_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST); 647 648 lem_add_hw_stats(adapter); 649 650 /* Non-AMT based hardware can now take control from firmware */ 651 if (adapter->has_manage && !adapter->has_amt) 652 lem_get_hw_control(adapter); 653 654 /* Tell the stack that the interface is not active */ 655 adapter->ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 656 657 adapter->led_dev = led_create(lem_led_func, adapter, 658 device_get_nameunit(dev)); 659 660#ifdef DEV_NETMAP 661 lem_netmap_attach(adapter); 662#endif /* DEV_NETMAP */ 663 INIT_DEBUGOUT("lem_attach: end"); 664 665 return (0); 666 667err_rx_struct: 668 lem_free_transmit_structures(adapter); 669err_tx_struct: 670err_hw_init: 671 lem_release_hw_control(adapter); 672 lem_dma_free(adapter, &adapter->rxdma); 673err_rx_desc: 674 lem_dma_free(adapter, &adapter->txdma); 675err_tx_desc: 676err_pci: 677 if (adapter->ifp != NULL) 678 if_free(adapter->ifp); 679 lem_free_pci_resources(adapter); 680 free(adapter->mta, M_DEVBUF); 681 EM_TX_LOCK_DESTROY(adapter); 682 EM_RX_LOCK_DESTROY(adapter); 683 EM_CORE_LOCK_DESTROY(adapter); 684 685 return (error); 686} 687 688/********************************************************************* 689 * Device removal routine 690 * 691 * The detach entry point is called when the driver is being removed. 692 * This routine stops the adapter and deallocates all the resources 693 * that were allocated for driver operation. 694 * 695 * return 0 on success, positive on failure 696 *********************************************************************/ 697 698static int 699lem_detach(device_t dev) 700{ 701 struct adapter *adapter = device_get_softc(dev); 702 struct ifnet *ifp = adapter->ifp; 703 704 INIT_DEBUGOUT("em_detach: begin"); 705 706 /* Make sure VLANS are not using driver */ 707 if (adapter->ifp->if_vlantrunk != NULL) { 708 device_printf(dev,"Vlan in use, detach first\n"); 709 return (EBUSY); 710 } 711 712#ifdef DEVICE_POLLING 713 if (ifp->if_capenable & IFCAP_POLLING) 714 ether_poll_deregister(ifp); 715#endif 716 717 if (adapter->led_dev != NULL) 718 led_destroy(adapter->led_dev); 719 720 EM_CORE_LOCK(adapter); 721 EM_TX_LOCK(adapter); 722 adapter->in_detach = 1; 723 lem_stop(adapter); 724 e1000_phy_hw_reset(&adapter->hw); 725 726 lem_release_manageability(adapter); 727 728 EM_TX_UNLOCK(adapter); 729 EM_CORE_UNLOCK(adapter); 730 731 /* Unregister VLAN events */ 732 if (adapter->vlan_attach != NULL) 733 EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach); 734 if (adapter->vlan_detach != NULL) 735 EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach); 736 737 ether_ifdetach(adapter->ifp); 738 callout_drain(&adapter->timer); 739 callout_drain(&adapter->tx_fifo_timer); 740 741#ifdef DEV_NETMAP 742 netmap_detach(ifp); 743#endif /* DEV_NETMAP */ 744 lem_free_pci_resources(adapter); 745 bus_generic_detach(dev); 746 if_free(ifp); 747 748 lem_free_transmit_structures(adapter); 749 lem_free_receive_structures(adapter); 750 751 /* Free Transmit Descriptor ring */ 752 if (adapter->tx_desc_base) { 753 lem_dma_free(adapter, &adapter->txdma); 754 adapter->tx_desc_base = NULL; 755 } 756 757 /* Free Receive Descriptor ring */ 758 if (adapter->rx_desc_base) { 759 lem_dma_free(adapter, &adapter->rxdma); 760 adapter->rx_desc_base = NULL; 761 } 762 763 lem_release_hw_control(adapter); 764 free(adapter->mta, M_DEVBUF); 765 EM_TX_LOCK_DESTROY(adapter); 766 EM_RX_LOCK_DESTROY(adapter); 767 EM_CORE_LOCK_DESTROY(adapter); 768 769 return (0); 770} 771 772/********************************************************************* 773 * 774 * Shutdown entry point 775 * 776 **********************************************************************/ 777 778static int 779lem_shutdown(device_t dev) 780{ 781 return lem_suspend(dev); 782} 783 784/* 785 * Suspend/resume device methods. 786 */ 787static int 788lem_suspend(device_t dev) 789{ 790 struct adapter *adapter = device_get_softc(dev); 791 792 EM_CORE_LOCK(adapter); 793 794 lem_release_manageability(adapter); 795 lem_release_hw_control(adapter); 796 lem_enable_wakeup(dev); 797 798 EM_CORE_UNLOCK(adapter); 799 800 return bus_generic_suspend(dev); 801} 802 803static int 804lem_resume(device_t dev) 805{ 806 struct adapter *adapter = device_get_softc(dev); 807 struct ifnet *ifp = adapter->ifp; 808 809 EM_CORE_LOCK(adapter); 810 lem_init_locked(adapter); 811 lem_init_manageability(adapter); 812 EM_CORE_UNLOCK(adapter); 813 lem_start(ifp); 814 815 return bus_generic_resume(dev); 816} 817 818 819static void 820lem_start_locked(struct ifnet *ifp) 821{ 822 struct adapter *adapter = ifp->if_softc; 823 struct mbuf *m_head; 824 825 EM_TX_LOCK_ASSERT(adapter); 826 827 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) != 828 IFF_DRV_RUNNING) 829 return; 830 if (!adapter->link_active) 831 return; 832 833 /* 834 * Force a cleanup if number of TX descriptors 835 * available hits the threshold 836 */ 837 if (adapter->num_tx_desc_avail <= EM_TX_CLEANUP_THRESHOLD) { 838 lem_txeof(adapter); 839 /* Now do we at least have a minimal? */ 840 if (adapter->num_tx_desc_avail <= EM_TX_OP_THRESHOLD) { 841 adapter->no_tx_desc_avail1++; 842 return; 843 } 844 } 845 846 while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) { 847 848 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); 849 if (m_head == NULL) 850 break; 851 /* 852 * Encapsulation can modify our pointer, and or make it 853 * NULL on failure. In that event, we can't requeue. 854 */ 855 if (lem_xmit(adapter, &m_head)) { 856 if (m_head == NULL) 857 break; 858 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 859 IFQ_DRV_PREPEND(&ifp->if_snd, m_head); 860 break; 861 } 862 863 /* Send a copy of the frame to the BPF listener */ 864 ETHER_BPF_MTAP(ifp, m_head); 865 866 /* Set timeout in case hardware has problems transmitting. */ 867 adapter->watchdog_check = TRUE; 868 adapter->watchdog_time = ticks; 869 } 870 if (adapter->num_tx_desc_avail <= EM_TX_OP_THRESHOLD) 871 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 872 873 return; 874} 875 876static void 877lem_start(struct ifnet *ifp) 878{ 879 struct adapter *adapter = ifp->if_softc; 880 881 EM_TX_LOCK(adapter); 882 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 883 lem_start_locked(ifp); 884 EM_TX_UNLOCK(adapter); 885} 886 887/********************************************************************* 888 * Ioctl entry point 889 * 890 * em_ioctl is called when the user wants to configure the 891 * interface. 892 * 893 * return 0 on success, positive on failure 894 **********************************************************************/ 895 896static int 897lem_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 898{ 899 struct adapter *adapter = ifp->if_softc; 900 struct ifreq *ifr = (struct ifreq *)data; 901#if defined(INET) || defined(INET6) 902 struct ifaddr *ifa = (struct ifaddr *)data; 903#endif 904 bool avoid_reset = FALSE; 905 int error = 0; 906 907 if (adapter->in_detach) 908 return (error); 909 910 switch (command) { 911 case SIOCSIFADDR: 912#ifdef INET 913 if (ifa->ifa_addr->sa_family == AF_INET) 914 avoid_reset = TRUE; 915#endif 916#ifdef INET6 917 if (ifa->ifa_addr->sa_family == AF_INET6) 918 avoid_reset = TRUE; 919#endif 920 /* 921 ** Calling init results in link renegotiation, 922 ** so we avoid doing it when possible. 923 */ 924 if (avoid_reset) { 925 ifp->if_flags |= IFF_UP; 926 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) 927 lem_init(adapter); 928#ifdef INET 929 if (!(ifp->if_flags & IFF_NOARP)) 930 arp_ifinit(ifp, ifa); 931#endif 932 } else 933 error = ether_ioctl(ifp, command, data); 934 break; 935 case SIOCSIFMTU: 936 { 937 int max_frame_size; 938 939 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFMTU (Set Interface MTU)"); 940 941 EM_CORE_LOCK(adapter); 942 switch (adapter->hw.mac.type) { 943 case e1000_82542: 944 max_frame_size = ETHER_MAX_LEN; 945 break; 946 default: 947 max_frame_size = MAX_JUMBO_FRAME_SIZE; 948 } 949 if (ifr->ifr_mtu > max_frame_size - ETHER_HDR_LEN - 950 ETHER_CRC_LEN) { 951 EM_CORE_UNLOCK(adapter); 952 error = EINVAL; 953 break; 954 } 955 956 ifp->if_mtu = ifr->ifr_mtu; 957 adapter->max_frame_size = 958 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; 959 lem_init_locked(adapter); 960 EM_CORE_UNLOCK(adapter); 961 break; 962 } 963 case SIOCSIFFLAGS: 964 IOCTL_DEBUGOUT("ioctl rcv'd:\ 965 SIOCSIFFLAGS (Set Interface Flags)"); 966 EM_CORE_LOCK(adapter); 967 if (ifp->if_flags & IFF_UP) { 968 if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) { 969 if ((ifp->if_flags ^ adapter->if_flags) & 970 (IFF_PROMISC | IFF_ALLMULTI)) { 971 lem_disable_promisc(adapter); 972 lem_set_promisc(adapter); 973 } 974 } else 975 lem_init_locked(adapter); 976 } else 977 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 978 EM_TX_LOCK(adapter); 979 lem_stop(adapter); 980 EM_TX_UNLOCK(adapter); 981 } 982 adapter->if_flags = ifp->if_flags; 983 EM_CORE_UNLOCK(adapter); 984 break; 985 case SIOCADDMULTI: 986 case SIOCDELMULTI: 987 IOCTL_DEBUGOUT("ioctl rcv'd: SIOC(ADD|DEL)MULTI"); 988 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 989 EM_CORE_LOCK(adapter); 990 lem_disable_intr(adapter); 991 lem_set_multi(adapter); 992 if (adapter->hw.mac.type == e1000_82542 && 993 adapter->hw.revision_id == E1000_REVISION_2) { 994 lem_initialize_receive_unit(adapter); 995 } 996#ifdef DEVICE_POLLING 997 if (!(ifp->if_capenable & IFCAP_POLLING)) 998#endif 999 lem_enable_intr(adapter); 1000 EM_CORE_UNLOCK(adapter); 1001 } 1002 break; 1003 case SIOCSIFMEDIA: 1004 /* Check SOL/IDER usage */ 1005 EM_CORE_LOCK(adapter); 1006 if (e1000_check_reset_block(&adapter->hw)) { 1007 EM_CORE_UNLOCK(adapter); 1008 device_printf(adapter->dev, "Media change is" 1009 " blocked due to SOL/IDER session.\n"); 1010 break; 1011 } 1012 EM_CORE_UNLOCK(adapter); 1013 case SIOCGIFMEDIA: 1014 IOCTL_DEBUGOUT("ioctl rcv'd: \ 1015 SIOCxIFMEDIA (Get/Set Interface Media)"); 1016 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command); 1017 break; 1018 case SIOCSIFCAP: 1019 { 1020 int mask, reinit; 1021 1022 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFCAP (Set Capabilities)"); 1023 reinit = 0; 1024 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 1025#ifdef DEVICE_POLLING 1026 if (mask & IFCAP_POLLING) { 1027 if (ifr->ifr_reqcap & IFCAP_POLLING) { 1028 error = ether_poll_register(lem_poll, ifp); 1029 if (error) 1030 return (error); 1031 EM_CORE_LOCK(adapter); 1032 lem_disable_intr(adapter); 1033 ifp->if_capenable |= IFCAP_POLLING; 1034 EM_CORE_UNLOCK(adapter); 1035 } else { 1036 error = ether_poll_deregister(ifp); 1037 /* Enable interrupt even in error case */ 1038 EM_CORE_LOCK(adapter); 1039 lem_enable_intr(adapter); 1040 ifp->if_capenable &= ~IFCAP_POLLING; 1041 EM_CORE_UNLOCK(adapter); 1042 } 1043 } 1044#endif 1045 if (mask & IFCAP_HWCSUM) { 1046 ifp->if_capenable ^= IFCAP_HWCSUM; 1047 reinit = 1; 1048 } 1049 if (mask & IFCAP_VLAN_HWTAGGING) { 1050 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 1051 reinit = 1; 1052 } 1053 if ((mask & IFCAP_WOL) && 1054 (ifp->if_capabilities & IFCAP_WOL) != 0) { 1055 if (mask & IFCAP_WOL_MCAST) 1056 ifp->if_capenable ^= IFCAP_WOL_MCAST; 1057 if (mask & IFCAP_WOL_MAGIC) 1058 ifp->if_capenable ^= IFCAP_WOL_MAGIC; 1059 } 1060 if (reinit && (ifp->if_drv_flags & IFF_DRV_RUNNING)) 1061 lem_init(adapter); 1062 VLAN_CAPABILITIES(ifp); 1063 break; 1064 } 1065 1066 default: 1067 error = ether_ioctl(ifp, command, data); 1068 break; 1069 } 1070 1071 return (error); 1072} 1073 1074 1075/********************************************************************* 1076 * Init entry point 1077 * 1078 * This routine is used in two ways. It is used by the stack as 1079 * init entry point in network interface structure. It is also used 1080 * by the driver as a hw/sw initialization routine to get to a 1081 * consistent state. 1082 * 1083 * return 0 on success, positive on failure 1084 **********************************************************************/ 1085 1086static void 1087lem_init_locked(struct adapter *adapter) 1088{ 1089 struct ifnet *ifp = adapter->ifp; 1090 device_t dev = adapter->dev; 1091 u32 pba; 1092 1093 INIT_DEBUGOUT("lem_init: begin"); 1094 1095 EM_CORE_LOCK_ASSERT(adapter); 1096 1097 EM_TX_LOCK(adapter); 1098 lem_stop(adapter); 1099 EM_TX_UNLOCK(adapter); 1100 1101 /* 1102 * Packet Buffer Allocation (PBA) 1103 * Writing PBA sets the receive portion of the buffer 1104 * the remainder is used for the transmit buffer. 1105 * 1106 * Devices before the 82547 had a Packet Buffer of 64K. 1107 * Default allocation: PBA=48K for Rx, leaving 16K for Tx. 1108 * After the 82547 the buffer was reduced to 40K. 1109 * Default allocation: PBA=30K for Rx, leaving 10K for Tx. 1110 * Note: default does not leave enough room for Jumbo Frame >10k. 1111 */ 1112 switch (adapter->hw.mac.type) { 1113 case e1000_82547: 1114 case e1000_82547_rev_2: /* 82547: Total Packet Buffer is 40K */ 1115 if (adapter->max_frame_size > 8192) 1116 pba = E1000_PBA_22K; /* 22K for Rx, 18K for Tx */ 1117 else 1118 pba = E1000_PBA_30K; /* 30K for Rx, 10K for Tx */ 1119 adapter->tx_fifo_head = 0; 1120 adapter->tx_head_addr = pba << EM_TX_HEAD_ADDR_SHIFT; 1121 adapter->tx_fifo_size = 1122 (E1000_PBA_40K - pba) << EM_PBA_BYTES_SHIFT; 1123 break; 1124 default: 1125 /* Devices before 82547 had a Packet Buffer of 64K. */ 1126 if (adapter->max_frame_size > 8192) 1127 pba = E1000_PBA_40K; /* 40K for Rx, 24K for Tx */ 1128 else 1129 pba = E1000_PBA_48K; /* 48K for Rx, 16K for Tx */ 1130 } 1131 1132 INIT_DEBUGOUT1("lem_init: pba=%dK",pba); 1133 E1000_WRITE_REG(&adapter->hw, E1000_PBA, pba); 1134 1135 /* Get the latest mac address, User can use a LAA */ 1136 bcopy(IF_LLADDR(adapter->ifp), adapter->hw.mac.addr, 1137 ETHER_ADDR_LEN); 1138 1139 /* Put the address into the Receive Address Array */ 1140 e1000_rar_set(&adapter->hw, adapter->hw.mac.addr, 0); 1141 1142 /* Initialize the hardware */ 1143 if (lem_hardware_init(adapter)) { 1144 device_printf(dev, "Unable to initialize the hardware\n"); 1145 return; 1146 } 1147 lem_update_link_status(adapter); 1148 1149 /* Setup VLAN support, basic and offload if available */ 1150 E1000_WRITE_REG(&adapter->hw, E1000_VET, ETHERTYPE_VLAN); 1151 1152 /* Set hardware offload abilities */ 1153 ifp->if_hwassist = 0; 1154 if (adapter->hw.mac.type >= e1000_82543) { 1155 if (ifp->if_capenable & IFCAP_TXCSUM) 1156 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP); 1157 } 1158 1159 /* Configure for OS presence */ 1160 lem_init_manageability(adapter); 1161 1162 /* Prepare transmit descriptors and buffers */ 1163 lem_setup_transmit_structures(adapter); 1164 lem_initialize_transmit_unit(adapter); 1165 1166 /* Setup Multicast table */ 1167 lem_set_multi(adapter); 1168 1169 /* Prepare receive descriptors and buffers */ 1170 if (lem_setup_receive_structures(adapter)) { 1171 device_printf(dev, "Could not setup receive structures\n"); 1172 EM_TX_LOCK(adapter); 1173 lem_stop(adapter); 1174 EM_TX_UNLOCK(adapter); 1175 return; 1176 } 1177 lem_initialize_receive_unit(adapter); 1178 1179 /* Use real VLAN Filter support? */ 1180 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) { 1181 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) 1182 /* Use real VLAN Filter support */ 1183 lem_setup_vlan_hw_support(adapter); 1184 else { 1185 u32 ctrl; 1186 ctrl = E1000_READ_REG(&adapter->hw, E1000_CTRL); 1187 ctrl |= E1000_CTRL_VME; 1188 E1000_WRITE_REG(&adapter->hw, E1000_CTRL, ctrl); 1189 } 1190 } 1191 1192 /* Don't lose promiscuous settings */ 1193 lem_set_promisc(adapter); 1194 1195 ifp->if_drv_flags |= IFF_DRV_RUNNING; 1196 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1197 1198 callout_reset(&adapter->timer, hz, lem_local_timer, adapter); 1199 e1000_clear_hw_cntrs_base_generic(&adapter->hw); 1200 1201#ifdef DEVICE_POLLING 1202 /* 1203 * Only enable interrupts if we are not polling, make sure 1204 * they are off otherwise. 1205 */ 1206 if (ifp->if_capenable & IFCAP_POLLING) 1207 lem_disable_intr(adapter); 1208 else 1209#endif /* DEVICE_POLLING */ 1210 lem_enable_intr(adapter); 1211 1212 /* AMT based hardware can now take control from firmware */ 1213 if (adapter->has_manage && adapter->has_amt) 1214 lem_get_hw_control(adapter); 1215} 1216 1217static void 1218lem_init(void *arg) 1219{ 1220 struct adapter *adapter = arg; 1221 1222 EM_CORE_LOCK(adapter); 1223 lem_init_locked(adapter); 1224 EM_CORE_UNLOCK(adapter); 1225} 1226 1227 1228#ifdef DEVICE_POLLING 1229/********************************************************************* 1230 * 1231 * Legacy polling routine 1232 * 1233 *********************************************************************/ 1234static int 1235lem_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) 1236{ 1237 struct adapter *adapter = ifp->if_softc; 1238 u32 reg_icr, rx_done = 0; 1239 1240 EM_CORE_LOCK(adapter); 1241 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 1242 EM_CORE_UNLOCK(adapter); 1243 return (rx_done); 1244 } 1245 1246 if (cmd == POLL_AND_CHECK_STATUS) { 1247 reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR); 1248 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { 1249 callout_stop(&adapter->timer); 1250 adapter->hw.mac.get_link_status = 1; 1251 lem_update_link_status(adapter); 1252 callout_reset(&adapter->timer, hz, 1253 lem_local_timer, adapter); 1254 } 1255 } 1256 EM_CORE_UNLOCK(adapter); 1257 1258 lem_rxeof(adapter, count, &rx_done); 1259 1260 EM_TX_LOCK(adapter); 1261 lem_txeof(adapter); 1262 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1263 lem_start_locked(ifp); 1264 EM_TX_UNLOCK(adapter); 1265 return (rx_done); 1266} 1267#endif /* DEVICE_POLLING */ 1268 1269/********************************************************************* 1270 * 1271 * Legacy Interrupt Service routine 1272 * 1273 *********************************************************************/ 1274static void 1275lem_intr(void *arg) 1276{ 1277 struct adapter *adapter = arg; 1278 struct ifnet *ifp = adapter->ifp; 1279 u32 reg_icr; 1280 1281 1282 if ((ifp->if_capenable & IFCAP_POLLING) || 1283 ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)) 1284 return; 1285 1286 EM_CORE_LOCK(adapter); 1287 reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR); 1288 if (reg_icr & E1000_ICR_RXO) 1289 adapter->rx_overruns++; 1290 1291 if ((reg_icr == 0xffffffff) || (reg_icr == 0)) { 1292 EM_CORE_UNLOCK(adapter); 1293 return; 1294 } 1295 1296 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { 1297 callout_stop(&adapter->timer); 1298 adapter->hw.mac.get_link_status = 1; 1299 lem_update_link_status(adapter); 1300 /* Deal with TX cruft when link lost */ 1301 lem_tx_purge(adapter); 1302 callout_reset(&adapter->timer, hz, 1303 lem_local_timer, adapter); 1304 EM_CORE_UNLOCK(adapter); 1305 return; 1306 } 1307 1308 EM_CORE_UNLOCK(adapter); 1309 lem_rxeof(adapter, -1, NULL); 1310 1311 EM_TX_LOCK(adapter); 1312 lem_txeof(adapter); 1313 if (ifp->if_drv_flags & IFF_DRV_RUNNING && 1314 !IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1315 lem_start_locked(ifp); 1316 EM_TX_UNLOCK(adapter); 1317 return; 1318} 1319 1320 1321static void 1322lem_handle_link(void *context, int pending) 1323{ 1324 struct adapter *adapter = context; 1325 struct ifnet *ifp = adapter->ifp; 1326 1327 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) 1328 return; 1329 1330 EM_CORE_LOCK(adapter); 1331 callout_stop(&adapter->timer); 1332 lem_update_link_status(adapter); 1333 /* Deal with TX cruft when link lost */ 1334 lem_tx_purge(adapter); 1335 callout_reset(&adapter->timer, hz, lem_local_timer, adapter); 1336 EM_CORE_UNLOCK(adapter); 1337} 1338 1339 1340/* Combined RX/TX handler, used by Legacy and MSI */ 1341static void 1342lem_handle_rxtx(void *context, int pending) 1343{ 1344 struct adapter *adapter = context; 1345 struct ifnet *ifp = adapter->ifp; 1346 1347 1348 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1349 bool more = lem_rxeof(adapter, adapter->rx_process_limit, NULL); 1350 EM_TX_LOCK(adapter); 1351 lem_txeof(adapter); 1352 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1353 lem_start_locked(ifp); 1354 EM_TX_UNLOCK(adapter); 1355 if (more) { 1356 taskqueue_enqueue(adapter->tq, &adapter->rxtx_task); 1357 return; 1358 } 1359 } 1360 1361 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 1362 lem_enable_intr(adapter); 1363} 1364 1365/********************************************************************* 1366 * 1367 * Fast Legacy/MSI Combined Interrupt Service routine 1368 * 1369 *********************************************************************/ 1370static int 1371lem_irq_fast(void *arg) 1372{ 1373 struct adapter *adapter = arg; 1374 struct ifnet *ifp; 1375 u32 reg_icr; 1376 1377 ifp = adapter->ifp; 1378 1379 reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR); 1380 1381 /* Hot eject? */ 1382 if (reg_icr == 0xffffffff) 1383 return FILTER_STRAY; 1384 1385 /* Definitely not our interrupt. */ 1386 if (reg_icr == 0x0) 1387 return FILTER_STRAY; 1388 1389 /* 1390 * Mask interrupts until the taskqueue is finished running. This is 1391 * cheap, just assume that it is needed. This also works around the 1392 * MSI message reordering errata on certain systems. 1393 */ 1394 lem_disable_intr(adapter); 1395 taskqueue_enqueue(adapter->tq, &adapter->rxtx_task); 1396 1397 /* Link status change */ 1398 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { 1399 adapter->hw.mac.get_link_status = 1; 1400 taskqueue_enqueue(taskqueue_fast, &adapter->link_task); 1401 } 1402 1403 if (reg_icr & E1000_ICR_RXO) 1404 adapter->rx_overruns++; 1405 return FILTER_HANDLED; 1406} 1407 1408 1409/********************************************************************* 1410 * 1411 * Media Ioctl callback 1412 * 1413 * This routine is called whenever the user queries the status of 1414 * the interface using ifconfig. 1415 * 1416 **********************************************************************/ 1417static void 1418lem_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) 1419{ 1420 struct adapter *adapter = ifp->if_softc; 1421 u_char fiber_type = IFM_1000_SX; 1422 1423 INIT_DEBUGOUT("lem_media_status: begin"); 1424 1425 EM_CORE_LOCK(adapter); 1426 lem_update_link_status(adapter); 1427 1428 ifmr->ifm_status = IFM_AVALID; 1429 ifmr->ifm_active = IFM_ETHER; 1430 1431 if (!adapter->link_active) { 1432 EM_CORE_UNLOCK(adapter); 1433 return; 1434 } 1435 1436 ifmr->ifm_status |= IFM_ACTIVE; 1437 1438 if ((adapter->hw.phy.media_type == e1000_media_type_fiber) || 1439 (adapter->hw.phy.media_type == e1000_media_type_internal_serdes)) { 1440 if (adapter->hw.mac.type == e1000_82545) 1441 fiber_type = IFM_1000_LX; 1442 ifmr->ifm_active |= fiber_type | IFM_FDX; 1443 } else { 1444 switch (adapter->link_speed) { 1445 case 10: 1446 ifmr->ifm_active |= IFM_10_T; 1447 break; 1448 case 100: 1449 ifmr->ifm_active |= IFM_100_TX; 1450 break; 1451 case 1000: 1452 ifmr->ifm_active |= IFM_1000_T; 1453 break; 1454 } 1455 if (adapter->link_duplex == FULL_DUPLEX) 1456 ifmr->ifm_active |= IFM_FDX; 1457 else 1458 ifmr->ifm_active |= IFM_HDX; 1459 } 1460 EM_CORE_UNLOCK(adapter); 1461} 1462 1463/********************************************************************* 1464 * 1465 * Media Ioctl callback 1466 * 1467 * This routine is called when the user changes speed/duplex using 1468 * media/mediopt option with ifconfig. 1469 * 1470 **********************************************************************/ 1471static int 1472lem_media_change(struct ifnet *ifp) 1473{ 1474 struct adapter *adapter = ifp->if_softc; 1475 struct ifmedia *ifm = &adapter->media; 1476 1477 INIT_DEBUGOUT("lem_media_change: begin"); 1478 1479 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 1480 return (EINVAL); 1481 1482 EM_CORE_LOCK(adapter); 1483 switch (IFM_SUBTYPE(ifm->ifm_media)) { 1484 case IFM_AUTO: 1485 adapter->hw.mac.autoneg = DO_AUTO_NEG; 1486 adapter->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT; 1487 break; 1488 case IFM_1000_LX: 1489 case IFM_1000_SX: 1490 case IFM_1000_T: 1491 adapter->hw.mac.autoneg = DO_AUTO_NEG; 1492 adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL; 1493 break; 1494 case IFM_100_TX: 1495 adapter->hw.mac.autoneg = FALSE; 1496 adapter->hw.phy.autoneg_advertised = 0; 1497 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) 1498 adapter->hw.mac.forced_speed_duplex = ADVERTISE_100_FULL; 1499 else 1500 adapter->hw.mac.forced_speed_duplex = ADVERTISE_100_HALF; 1501 break; 1502 case IFM_10_T: 1503 adapter->hw.mac.autoneg = FALSE; 1504 adapter->hw.phy.autoneg_advertised = 0; 1505 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) 1506 adapter->hw.mac.forced_speed_duplex = ADVERTISE_10_FULL; 1507 else 1508 adapter->hw.mac.forced_speed_duplex = ADVERTISE_10_HALF; 1509 break; 1510 default: 1511 device_printf(adapter->dev, "Unsupported media type\n"); 1512 } 1513 1514 lem_init_locked(adapter); 1515 EM_CORE_UNLOCK(adapter); 1516 1517 return (0); 1518} 1519 1520/********************************************************************* 1521 * 1522 * This routine maps the mbufs to tx descriptors. 1523 * 1524 * return 0 on success, positive on failure 1525 **********************************************************************/ 1526 1527static int 1528lem_xmit(struct adapter *adapter, struct mbuf **m_headp) 1529{ 1530 bus_dma_segment_t segs[EM_MAX_SCATTER]; 1531 bus_dmamap_t map; 1532 struct em_buffer *tx_buffer, *tx_buffer_mapped; 1533 struct e1000_tx_desc *ctxd = NULL; 1534 struct mbuf *m_head; 1535 u32 txd_upper, txd_lower, txd_used, txd_saved; 1536 int error, nsegs, i, j, first, last = 0; 1537 1538 m_head = *m_headp; 1539 txd_upper = txd_lower = txd_used = txd_saved = 0; 1540 1541 /* 1542 ** When doing checksum offload, it is critical to 1543 ** make sure the first mbuf has more than header, 1544 ** because that routine expects data to be present. 1545 */ 1546 if ((m_head->m_pkthdr.csum_flags & CSUM_OFFLOAD) && 1547 (m_head->m_len < ETHER_HDR_LEN + sizeof(struct ip))) { 1548 m_head = m_pullup(m_head, ETHER_HDR_LEN + sizeof(struct ip)); 1549 *m_headp = m_head; 1550 if (m_head == NULL) 1551 return (ENOBUFS); 1552 } 1553 1554 /* 1555 * Map the packet for DMA 1556 * 1557 * Capture the first descriptor index, 1558 * this descriptor will have the index 1559 * of the EOP which is the only one that 1560 * now gets a DONE bit writeback. 1561 */ 1562 first = adapter->next_avail_tx_desc; 1563 tx_buffer = &adapter->tx_buffer_area[first]; 1564 tx_buffer_mapped = tx_buffer; 1565 map = tx_buffer->map; 1566 1567 error = bus_dmamap_load_mbuf_sg(adapter->txtag, map, 1568 *m_headp, segs, &nsegs, BUS_DMA_NOWAIT); 1569 1570 /* 1571 * There are two types of errors we can (try) to handle: 1572 * - EFBIG means the mbuf chain was too long and bus_dma ran 1573 * out of segments. Defragment the mbuf chain and try again. 1574 * - ENOMEM means bus_dma could not obtain enough bounce buffers 1575 * at this point in time. Defer sending and try again later. 1576 * All other errors, in particular EINVAL, are fatal and prevent the 1577 * mbuf chain from ever going through. Drop it and report error. 1578 */ 1579 if (error == EFBIG) { 1580 struct mbuf *m; 1581 1582 m = m_defrag(*m_headp, M_NOWAIT); 1583 if (m == NULL) { 1584 adapter->mbuf_alloc_failed++; 1585 m_freem(*m_headp); 1586 *m_headp = NULL; 1587 return (ENOBUFS); 1588 } 1589 *m_headp = m; 1590 1591 /* Try it again */ 1592 error = bus_dmamap_load_mbuf_sg(adapter->txtag, map, 1593 *m_headp, segs, &nsegs, BUS_DMA_NOWAIT); 1594 1595 if (error) { 1596 adapter->no_tx_dma_setup++; 1597 m_freem(*m_headp); 1598 *m_headp = NULL; 1599 return (error); 1600 } 1601 } else if (error != 0) { 1602 adapter->no_tx_dma_setup++; 1603 return (error); 1604 } 1605 1606 if (nsegs > (adapter->num_tx_desc_avail - 2)) { 1607 adapter->no_tx_desc_avail2++; 1608 bus_dmamap_unload(adapter->txtag, map); 1609 return (ENOBUFS); 1610 } 1611 m_head = *m_headp; 1612 1613 /* Do hardware assists */ 1614 if (m_head->m_pkthdr.csum_flags & CSUM_OFFLOAD) 1615 lem_transmit_checksum_setup(adapter, m_head, 1616 &txd_upper, &txd_lower); 1617 1618 i = adapter->next_avail_tx_desc; 1619 if (adapter->pcix_82544) 1620 txd_saved = i; 1621 1622 /* Set up our transmit descriptors */ 1623 for (j = 0; j < nsegs; j++) { 1624 bus_size_t seg_len; 1625 bus_addr_t seg_addr; 1626 /* If adapter is 82544 and on PCIX bus */ 1627 if(adapter->pcix_82544) { 1628 DESC_ARRAY desc_array; 1629 u32 array_elements, counter; 1630 /* 1631 * Check the Address and Length combination and 1632 * split the data accordingly 1633 */ 1634 array_elements = lem_fill_descriptors(segs[j].ds_addr, 1635 segs[j].ds_len, &desc_array); 1636 for (counter = 0; counter < array_elements; counter++) { 1637 if (txd_used == adapter->num_tx_desc_avail) { 1638 adapter->next_avail_tx_desc = txd_saved; 1639 adapter->no_tx_desc_avail2++; 1640 bus_dmamap_unload(adapter->txtag, map); 1641 return (ENOBUFS); 1642 } 1643 tx_buffer = &adapter->tx_buffer_area[i]; 1644 ctxd = &adapter->tx_desc_base[i]; 1645 ctxd->buffer_addr = htole64( 1646 desc_array.descriptor[counter].address); 1647 ctxd->lower.data = htole32( 1648 (adapter->txd_cmd | txd_lower | (u16) 1649 desc_array.descriptor[counter].length)); 1650 ctxd->upper.data = 1651 htole32((txd_upper)); 1652 last = i; 1653 if (++i == adapter->num_tx_desc) 1654 i = 0; 1655 tx_buffer->m_head = NULL; 1656 tx_buffer->next_eop = -1; 1657 txd_used++; 1658 } 1659 } else { 1660 tx_buffer = &adapter->tx_buffer_area[i]; 1661 ctxd = &adapter->tx_desc_base[i]; 1662 seg_addr = segs[j].ds_addr; 1663 seg_len = segs[j].ds_len; 1664 ctxd->buffer_addr = htole64(seg_addr); 1665 ctxd->lower.data = htole32( 1666 adapter->txd_cmd | txd_lower | seg_len); 1667 ctxd->upper.data = 1668 htole32(txd_upper); 1669 last = i; 1670 if (++i == adapter->num_tx_desc) 1671 i = 0; 1672 tx_buffer->m_head = NULL; 1673 tx_buffer->next_eop = -1; 1674 } 1675 } 1676 1677 adapter->next_avail_tx_desc = i; 1678 1679 if (adapter->pcix_82544) 1680 adapter->num_tx_desc_avail -= txd_used; 1681 else 1682 adapter->num_tx_desc_avail -= nsegs; 1683 1684 if (m_head->m_flags & M_VLANTAG) { 1685 /* Set the vlan id. */ 1686 ctxd->upper.fields.special = 1687 htole16(m_head->m_pkthdr.ether_vtag); 1688 /* Tell hardware to add tag */ 1689 ctxd->lower.data |= htole32(E1000_TXD_CMD_VLE); 1690 } 1691 1692 tx_buffer->m_head = m_head; 1693 tx_buffer_mapped->map = tx_buffer->map; 1694 tx_buffer->map = map; 1695 bus_dmamap_sync(adapter->txtag, map, BUS_DMASYNC_PREWRITE); 1696 1697 /* 1698 * Last Descriptor of Packet 1699 * needs End Of Packet (EOP) 1700 * and Report Status (RS) 1701 */ 1702 ctxd->lower.data |= 1703 htole32(E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS); 1704 /* 1705 * Keep track in the first buffer which 1706 * descriptor will be written back 1707 */ 1708 tx_buffer = &adapter->tx_buffer_area[first]; 1709 tx_buffer->next_eop = last; 1710 adapter->watchdog_time = ticks; 1711 1712 /* 1713 * Advance the Transmit Descriptor Tail (TDT), this tells the E1000 1714 * that this frame is available to transmit. 1715 */ 1716 bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map, 1717 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1718 if (adapter->hw.mac.type == e1000_82547 && 1719 adapter->link_duplex == HALF_DUPLEX) 1720 lem_82547_move_tail(adapter); 1721 else { 1722 E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), i); 1723 if (adapter->hw.mac.type == e1000_82547) 1724 lem_82547_update_fifo_head(adapter, 1725 m_head->m_pkthdr.len); 1726 } 1727 1728 return (0); 1729} 1730 1731/********************************************************************* 1732 * 1733 * 82547 workaround to avoid controller hang in half-duplex environment. 1734 * The workaround is to avoid queuing a large packet that would span 1735 * the internal Tx FIFO ring boundary. We need to reset the FIFO pointers 1736 * in this case. We do that only when FIFO is quiescent. 1737 * 1738 **********************************************************************/ 1739static void 1740lem_82547_move_tail(void *arg) 1741{ 1742 struct adapter *adapter = arg; 1743 struct e1000_tx_desc *tx_desc; 1744 u16 hw_tdt, sw_tdt, length = 0; 1745 bool eop = 0; 1746 1747 EM_TX_LOCK_ASSERT(adapter); 1748 1749 hw_tdt = E1000_READ_REG(&adapter->hw, E1000_TDT(0)); 1750 sw_tdt = adapter->next_avail_tx_desc; 1751 1752 while (hw_tdt != sw_tdt) { 1753 tx_desc = &adapter->tx_desc_base[hw_tdt]; 1754 length += tx_desc->lower.flags.length; 1755 eop = tx_desc->lower.data & E1000_TXD_CMD_EOP; 1756 if (++hw_tdt == adapter->num_tx_desc) 1757 hw_tdt = 0; 1758 1759 if (eop) { 1760 if (lem_82547_fifo_workaround(adapter, length)) { 1761 adapter->tx_fifo_wrk_cnt++; 1762 callout_reset(&adapter->tx_fifo_timer, 1, 1763 lem_82547_move_tail, adapter); 1764 break; 1765 } 1766 E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), hw_tdt); 1767 lem_82547_update_fifo_head(adapter, length); 1768 length = 0; 1769 } 1770 } 1771} 1772 1773static int 1774lem_82547_fifo_workaround(struct adapter *adapter, int len) 1775{ 1776 int fifo_space, fifo_pkt_len; 1777 1778 fifo_pkt_len = roundup2(len + EM_FIFO_HDR, EM_FIFO_HDR); 1779 1780 if (adapter->link_duplex == HALF_DUPLEX) { 1781 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head; 1782 1783 if (fifo_pkt_len >= (EM_82547_PKT_THRESH + fifo_space)) { 1784 if (lem_82547_tx_fifo_reset(adapter)) 1785 return (0); 1786 else 1787 return (1); 1788 } 1789 } 1790 1791 return (0); 1792} 1793 1794static void 1795lem_82547_update_fifo_head(struct adapter *adapter, int len) 1796{ 1797 int fifo_pkt_len = roundup2(len + EM_FIFO_HDR, EM_FIFO_HDR); 1798 1799 /* tx_fifo_head is always 16 byte aligned */ 1800 adapter->tx_fifo_head += fifo_pkt_len; 1801 if (adapter->tx_fifo_head >= adapter->tx_fifo_size) { 1802 adapter->tx_fifo_head -= adapter->tx_fifo_size; 1803 } 1804} 1805 1806 1807static int 1808lem_82547_tx_fifo_reset(struct adapter *adapter) 1809{ 1810 u32 tctl; 1811 1812 if ((E1000_READ_REG(&adapter->hw, E1000_TDT(0)) == 1813 E1000_READ_REG(&adapter->hw, E1000_TDH(0))) && 1814 (E1000_READ_REG(&adapter->hw, E1000_TDFT) == 1815 E1000_READ_REG(&adapter->hw, E1000_TDFH)) && 1816 (E1000_READ_REG(&adapter->hw, E1000_TDFTS) == 1817 E1000_READ_REG(&adapter->hw, E1000_TDFHS)) && 1818 (E1000_READ_REG(&adapter->hw, E1000_TDFPC) == 0)) { 1819 /* Disable TX unit */ 1820 tctl = E1000_READ_REG(&adapter->hw, E1000_TCTL); 1821 E1000_WRITE_REG(&adapter->hw, E1000_TCTL, 1822 tctl & ~E1000_TCTL_EN); 1823 1824 /* Reset FIFO pointers */ 1825 E1000_WRITE_REG(&adapter->hw, E1000_TDFT, 1826 adapter->tx_head_addr); 1827 E1000_WRITE_REG(&adapter->hw, E1000_TDFH, 1828 adapter->tx_head_addr); 1829 E1000_WRITE_REG(&adapter->hw, E1000_TDFTS, 1830 adapter->tx_head_addr); 1831 E1000_WRITE_REG(&adapter->hw, E1000_TDFHS, 1832 adapter->tx_head_addr); 1833 1834 /* Re-enable TX unit */ 1835 E1000_WRITE_REG(&adapter->hw, E1000_TCTL, tctl); 1836 E1000_WRITE_FLUSH(&adapter->hw); 1837 1838 adapter->tx_fifo_head = 0; 1839 adapter->tx_fifo_reset_cnt++; 1840 1841 return (TRUE); 1842 } 1843 else { 1844 return (FALSE); 1845 } 1846} 1847 1848static void 1849lem_set_promisc(struct adapter *adapter) 1850{ 1851 struct ifnet *ifp = adapter->ifp; 1852 u32 reg_rctl; 1853 1854 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL); 1855 1856 if (ifp->if_flags & IFF_PROMISC) { 1857 reg_rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); 1858 /* Turn this on if you want to see bad packets */ 1859 if (lem_debug_sbp) 1860 reg_rctl |= E1000_RCTL_SBP; 1861 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl); 1862 } else if (ifp->if_flags & IFF_ALLMULTI) { 1863 reg_rctl |= E1000_RCTL_MPE; 1864 reg_rctl &= ~E1000_RCTL_UPE; 1865 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl); 1866 } 1867} 1868 1869static void 1870lem_disable_promisc(struct adapter *adapter) 1871{ 1872 struct ifnet *ifp = adapter->ifp; 1873 u32 reg_rctl; 1874 int mcnt = 0; 1875 1876 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL); 1877 reg_rctl &= (~E1000_RCTL_UPE); 1878 if (ifp->if_flags & IFF_ALLMULTI) 1879 mcnt = MAX_NUM_MULTICAST_ADDRESSES; 1880 else { 1881 struct ifmultiaddr *ifma; 1882#if __FreeBSD_version < 800000 1883 IF_ADDR_LOCK(ifp); 1884#else 1885 if_maddr_rlock(ifp); 1886#endif 1887 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1888 if (ifma->ifma_addr->sa_family != AF_LINK) 1889 continue; 1890 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES) 1891 break; 1892 mcnt++; 1893 } 1894#if __FreeBSD_version < 800000 1895 IF_ADDR_UNLOCK(ifp); 1896#else 1897 if_maddr_runlock(ifp); 1898#endif 1899 } 1900 /* Don't disable if in MAX groups */ 1901 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) 1902 reg_rctl &= (~E1000_RCTL_MPE); 1903 reg_rctl &= (~E1000_RCTL_SBP); 1904 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl); 1905} 1906 1907 1908/********************************************************************* 1909 * Multicast Update 1910 * 1911 * This routine is called whenever multicast address list is updated. 1912 * 1913 **********************************************************************/ 1914 1915static void 1916lem_set_multi(struct adapter *adapter) 1917{ 1918 struct ifnet *ifp = adapter->ifp; 1919 struct ifmultiaddr *ifma; 1920 u32 reg_rctl = 0; 1921 u8 *mta; /* Multicast array memory */ 1922 int mcnt = 0; 1923 1924 IOCTL_DEBUGOUT("lem_set_multi: begin"); 1925 1926 mta = adapter->mta; 1927 bzero(mta, sizeof(u8) * ETH_ADDR_LEN * MAX_NUM_MULTICAST_ADDRESSES); 1928 1929 if (adapter->hw.mac.type == e1000_82542 && 1930 adapter->hw.revision_id == E1000_REVISION_2) { 1931 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL); 1932 if (adapter->hw.bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE) 1933 e1000_pci_clear_mwi(&adapter->hw); 1934 reg_rctl |= E1000_RCTL_RST; 1935 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl); 1936 msec_delay(5); 1937 } 1938 1939#if __FreeBSD_version < 800000 1940 IF_ADDR_LOCK(ifp); 1941#else 1942 if_maddr_rlock(ifp); 1943#endif 1944 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1945 if (ifma->ifma_addr->sa_family != AF_LINK) 1946 continue; 1947 1948 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES) 1949 break; 1950 1951 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr), 1952 &mta[mcnt * ETH_ADDR_LEN], ETH_ADDR_LEN); 1953 mcnt++; 1954 } 1955#if __FreeBSD_version < 800000 1956 IF_ADDR_UNLOCK(ifp); 1957#else 1958 if_maddr_runlock(ifp); 1959#endif 1960 if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES) { 1961 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL); 1962 reg_rctl |= E1000_RCTL_MPE; 1963 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl); 1964 } else 1965 e1000_update_mc_addr_list(&adapter->hw, mta, mcnt); 1966 1967 if (adapter->hw.mac.type == e1000_82542 && 1968 adapter->hw.revision_id == E1000_REVISION_2) { 1969 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL); 1970 reg_rctl &= ~E1000_RCTL_RST; 1971 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl); 1972 msec_delay(5); 1973 if (adapter->hw.bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE) 1974 e1000_pci_set_mwi(&adapter->hw); 1975 } 1976} 1977 1978 1979/********************************************************************* 1980 * Timer routine 1981 * 1982 * This routine checks for link status and updates statistics. 1983 * 1984 **********************************************************************/ 1985 1986static void 1987lem_local_timer(void *arg) 1988{ 1989 struct adapter *adapter = arg; 1990 1991 EM_CORE_LOCK_ASSERT(adapter); 1992 1993 lem_update_link_status(adapter); 1994 lem_update_stats_counters(adapter); 1995 1996 lem_smartspeed(adapter); 1997 1998 /* 1999 * We check the watchdog: the time since 2000 * the last TX descriptor was cleaned. 2001 * This implies a functional TX engine. 2002 */ 2003 if ((adapter->watchdog_check == TRUE) && 2004 (ticks - adapter->watchdog_time > EM_WATCHDOG)) 2005 goto hung; 2006 2007 callout_reset(&adapter->timer, hz, lem_local_timer, adapter); 2008 return; 2009hung: 2010 device_printf(adapter->dev, "Watchdog timeout -- resetting\n"); 2011 adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 2012 adapter->watchdog_events++; 2013 lem_init_locked(adapter); 2014} 2015 2016static void 2017lem_update_link_status(struct adapter *adapter) 2018{ 2019 struct e1000_hw *hw = &adapter->hw; 2020 struct ifnet *ifp = adapter->ifp; 2021 device_t dev = adapter->dev; 2022 u32 link_check = 0; 2023 2024 /* Get the cached link value or read phy for real */ 2025 switch (hw->phy.media_type) { 2026 case e1000_media_type_copper: 2027 if (hw->mac.get_link_status) { 2028 /* Do the work to read phy */ 2029 e1000_check_for_link(hw); 2030 link_check = !hw->mac.get_link_status; 2031 if (link_check) /* ESB2 fix */ 2032 e1000_cfg_on_link_up(hw); 2033 } else 2034 link_check = TRUE; 2035 break; 2036 case e1000_media_type_fiber: 2037 e1000_check_for_link(hw); 2038 link_check = (E1000_READ_REG(hw, E1000_STATUS) & 2039 E1000_STATUS_LU); 2040 break; 2041 case e1000_media_type_internal_serdes: 2042 e1000_check_for_link(hw); 2043 link_check = adapter->hw.mac.serdes_has_link; 2044 break; 2045 default: 2046 case e1000_media_type_unknown: 2047 break; 2048 } 2049 2050 /* Now check for a transition */ 2051 if (link_check && (adapter->link_active == 0)) { 2052 e1000_get_speed_and_duplex(hw, &adapter->link_speed, 2053 &adapter->link_duplex); 2054 if (bootverbose) 2055 device_printf(dev, "Link is up %d Mbps %s\n", 2056 adapter->link_speed, 2057 ((adapter->link_duplex == FULL_DUPLEX) ? 2058 "Full Duplex" : "Half Duplex")); 2059 adapter->link_active = 1; 2060 adapter->smartspeed = 0; 2061 ifp->if_baudrate = adapter->link_speed * 1000000; 2062 if_link_state_change(ifp, LINK_STATE_UP); 2063 } else if (!link_check && (adapter->link_active == 1)) { 2064 ifp->if_baudrate = adapter->link_speed = 0; 2065 adapter->link_duplex = 0; 2066 if (bootverbose) 2067 device_printf(dev, "Link is Down\n"); 2068 adapter->link_active = 0; 2069 /* Link down, disable watchdog */ 2070 adapter->watchdog_check = FALSE; 2071 if_link_state_change(ifp, LINK_STATE_DOWN); 2072 } 2073} 2074 2075/********************************************************************* 2076 * 2077 * This routine disables all traffic on the adapter by issuing a 2078 * global reset on the MAC and deallocates TX/RX buffers. 2079 * 2080 * This routine should always be called with BOTH the CORE 2081 * and TX locks. 2082 **********************************************************************/ 2083 2084static void 2085lem_stop(void *arg) 2086{ 2087 struct adapter *adapter = arg; 2088 struct ifnet *ifp = adapter->ifp; 2089 2090 EM_CORE_LOCK_ASSERT(adapter); 2091 EM_TX_LOCK_ASSERT(adapter); 2092 2093 INIT_DEBUGOUT("lem_stop: begin"); 2094 2095 lem_disable_intr(adapter); 2096 callout_stop(&adapter->timer); 2097 callout_stop(&adapter->tx_fifo_timer); 2098 2099 /* Tell the stack that the interface is no longer active */ 2100 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 2101 2102 e1000_reset_hw(&adapter->hw); 2103 if (adapter->hw.mac.type >= e1000_82544) 2104 E1000_WRITE_REG(&adapter->hw, E1000_WUC, 0); 2105 2106 e1000_led_off(&adapter->hw); 2107 e1000_cleanup_led(&adapter->hw); 2108} 2109 2110 2111/********************************************************************* 2112 * 2113 * Determine hardware revision. 2114 * 2115 **********************************************************************/ 2116static void 2117lem_identify_hardware(struct adapter *adapter) 2118{ 2119 device_t dev = adapter->dev; 2120 2121 /* Make sure our PCI config space has the necessary stuff set */
|
2132 2133 /* Save off the information about this board */ 2134 adapter->hw.vendor_id = pci_get_vendor(dev); 2135 adapter->hw.device_id = pci_get_device(dev); 2136 adapter->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1); 2137 adapter->hw.subsystem_vendor_id = 2138 pci_read_config(dev, PCIR_SUBVEND_0, 2); 2139 adapter->hw.subsystem_device_id = 2140 pci_read_config(dev, PCIR_SUBDEV_0, 2); 2141 2142 /* Do Shared Code Init and Setup */ 2143 if (e1000_set_mac_type(&adapter->hw)) { 2144 device_printf(dev, "Setup init failure\n"); 2145 return; 2146 } 2147} 2148 2149static int 2150lem_allocate_pci_resources(struct adapter *adapter) 2151{ 2152 device_t dev = adapter->dev; 2153 int val, rid, error = E1000_SUCCESS; 2154 2155 rid = PCIR_BAR(0); 2156 adapter->memory = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 2157 &rid, RF_ACTIVE); 2158 if (adapter->memory == NULL) { 2159 device_printf(dev, "Unable to allocate bus resource: memory\n"); 2160 return (ENXIO); 2161 } 2162 adapter->osdep.mem_bus_space_tag = 2163 rman_get_bustag(adapter->memory); 2164 adapter->osdep.mem_bus_space_handle = 2165 rman_get_bushandle(adapter->memory); 2166 adapter->hw.hw_addr = (u8 *)&adapter->osdep.mem_bus_space_handle; 2167 2168 /* Only older adapters use IO mapping */ 2169 if (adapter->hw.mac.type > e1000_82543) { 2170 /* Figure our where our IO BAR is ? */ 2171 for (rid = PCIR_BAR(0); rid < PCIR_CIS;) { 2172 val = pci_read_config(dev, rid, 4); 2173 if (EM_BAR_TYPE(val) == EM_BAR_TYPE_IO) { 2174 adapter->io_rid = rid; 2175 break; 2176 } 2177 rid += 4; 2178 /* check for 64bit BAR */ 2179 if (EM_BAR_MEM_TYPE(val) == EM_BAR_MEM_TYPE_64BIT) 2180 rid += 4; 2181 } 2182 if (rid >= PCIR_CIS) { 2183 device_printf(dev, "Unable to locate IO BAR\n"); 2184 return (ENXIO); 2185 } 2186 adapter->ioport = bus_alloc_resource_any(dev, 2187 SYS_RES_IOPORT, &adapter->io_rid, RF_ACTIVE); 2188 if (adapter->ioport == NULL) { 2189 device_printf(dev, "Unable to allocate bus resource: " 2190 "ioport\n"); 2191 return (ENXIO); 2192 } 2193 adapter->hw.io_base = 0; 2194 adapter->osdep.io_bus_space_tag = 2195 rman_get_bustag(adapter->ioport); 2196 adapter->osdep.io_bus_space_handle = 2197 rman_get_bushandle(adapter->ioport); 2198 } 2199 2200 adapter->hw.back = &adapter->osdep; 2201 2202 return (error); 2203} 2204 2205/********************************************************************* 2206 * 2207 * Setup the Legacy or MSI Interrupt handler 2208 * 2209 **********************************************************************/ 2210int 2211lem_allocate_irq(struct adapter *adapter) 2212{ 2213 device_t dev = adapter->dev; 2214 int error, rid = 0; 2215 2216 /* Manually turn off all interrupts */ 2217 E1000_WRITE_REG(&adapter->hw, E1000_IMC, 0xffffffff); 2218 2219 /* We allocate a single interrupt resource */ 2220 adapter->res[0] = bus_alloc_resource_any(dev, 2221 SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); 2222 if (adapter->res[0] == NULL) { 2223 device_printf(dev, "Unable to allocate bus resource: " 2224 "interrupt\n"); 2225 return (ENXIO); 2226 } 2227 2228 /* Do Legacy setup? */ 2229 if (lem_use_legacy_irq) { 2230 if ((error = bus_setup_intr(dev, adapter->res[0], 2231 INTR_TYPE_NET | INTR_MPSAFE, NULL, lem_intr, adapter, 2232 &adapter->tag[0])) != 0) { 2233 device_printf(dev, 2234 "Failed to register interrupt handler"); 2235 return (error); 2236 } 2237 return (0); 2238 } 2239 2240 /* 2241 * Use a Fast interrupt and the associated 2242 * deferred processing contexts. 2243 */ 2244 TASK_INIT(&adapter->rxtx_task, 0, lem_handle_rxtx, adapter); 2245 TASK_INIT(&adapter->link_task, 0, lem_handle_link, adapter); 2246 adapter->tq = taskqueue_create_fast("lem_taskq", M_NOWAIT, 2247 taskqueue_thread_enqueue, &adapter->tq); 2248 taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s taskq", 2249 device_get_nameunit(adapter->dev)); 2250 if ((error = bus_setup_intr(dev, adapter->res[0], 2251 INTR_TYPE_NET, lem_irq_fast, NULL, adapter, 2252 &adapter->tag[0])) != 0) { 2253 device_printf(dev, "Failed to register fast interrupt " 2254 "handler: %d\n", error); 2255 taskqueue_free(adapter->tq); 2256 adapter->tq = NULL; 2257 return (error); 2258 } 2259 2260 return (0); 2261} 2262 2263 2264static void 2265lem_free_pci_resources(struct adapter *adapter) 2266{ 2267 device_t dev = adapter->dev; 2268 2269 2270 if (adapter->tag[0] != NULL) { 2271 bus_teardown_intr(dev, adapter->res[0], 2272 adapter->tag[0]); 2273 adapter->tag[0] = NULL; 2274 } 2275 2276 if (adapter->res[0] != NULL) { 2277 bus_release_resource(dev, SYS_RES_IRQ, 2278 0, adapter->res[0]); 2279 } 2280 2281 if (adapter->memory != NULL) 2282 bus_release_resource(dev, SYS_RES_MEMORY, 2283 PCIR_BAR(0), adapter->memory); 2284 2285 if (adapter->ioport != NULL) 2286 bus_release_resource(dev, SYS_RES_IOPORT, 2287 adapter->io_rid, adapter->ioport); 2288} 2289 2290 2291/********************************************************************* 2292 * 2293 * Initialize the hardware to a configuration 2294 * as specified by the adapter structure. 2295 * 2296 **********************************************************************/ 2297static int 2298lem_hardware_init(struct adapter *adapter) 2299{ 2300 device_t dev = adapter->dev; 2301 u16 rx_buffer_size; 2302 2303 INIT_DEBUGOUT("lem_hardware_init: begin"); 2304 2305 /* Issue a global reset */ 2306 e1000_reset_hw(&adapter->hw); 2307 2308 /* When hardware is reset, fifo_head is also reset */ 2309 adapter->tx_fifo_head = 0; 2310 2311 /* 2312 * These parameters control the automatic generation (Tx) and 2313 * response (Rx) to Ethernet PAUSE frames. 2314 * - High water mark should allow for at least two frames to be 2315 * received after sending an XOFF. 2316 * - Low water mark works best when it is very near the high water mark. 2317 * This allows the receiver to restart by sending XON when it has 2318 * drained a bit. Here we use an arbitary value of 1500 which will 2319 * restart after one full frame is pulled from the buffer. There 2320 * could be several smaller frames in the buffer and if so they will 2321 * not trigger the XON until their total number reduces the buffer 2322 * by 1500. 2323 * - The pause time is fairly large at 1000 x 512ns = 512 usec. 2324 */ 2325 rx_buffer_size = ((E1000_READ_REG(&adapter->hw, E1000_PBA) & 2326 0xffff) << 10 ); 2327 2328 adapter->hw.fc.high_water = rx_buffer_size - 2329 roundup2(adapter->max_frame_size, 1024); 2330 adapter->hw.fc.low_water = adapter->hw.fc.high_water - 1500; 2331 2332 adapter->hw.fc.pause_time = EM_FC_PAUSE_TIME; 2333 adapter->hw.fc.send_xon = TRUE; 2334 2335 /* Set Flow control, use the tunable location if sane */ 2336 if ((lem_fc_setting >= 0) && (lem_fc_setting < 4)) 2337 adapter->hw.fc.requested_mode = lem_fc_setting; 2338 else 2339 adapter->hw.fc.requested_mode = e1000_fc_none; 2340 2341 if (e1000_init_hw(&adapter->hw) < 0) { 2342 device_printf(dev, "Hardware Initialization Failed\n"); 2343 return (EIO); 2344 } 2345 2346 e1000_check_for_link(&adapter->hw); 2347 2348 return (0); 2349} 2350 2351/********************************************************************* 2352 * 2353 * Setup networking device structure and register an interface. 2354 * 2355 **********************************************************************/ 2356static int 2357lem_setup_interface(device_t dev, struct adapter *adapter) 2358{ 2359 struct ifnet *ifp; 2360 2361 INIT_DEBUGOUT("lem_setup_interface: begin"); 2362 2363 ifp = adapter->ifp = if_alloc(IFT_ETHER); 2364 if (ifp == NULL) { 2365 device_printf(dev, "can not allocate ifnet structure\n"); 2366 return (-1); 2367 } 2368 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 2369 ifp->if_init = lem_init; 2370 ifp->if_softc = adapter; 2371 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 2372 ifp->if_ioctl = lem_ioctl; 2373 ifp->if_start = lem_start; 2374 IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 1); 2375 ifp->if_snd.ifq_drv_maxlen = adapter->num_tx_desc - 1; 2376 IFQ_SET_READY(&ifp->if_snd); 2377 2378 ether_ifattach(ifp, adapter->hw.mac.addr); 2379 2380 ifp->if_capabilities = ifp->if_capenable = 0; 2381 2382 if (adapter->hw.mac.type >= e1000_82543) { 2383 ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM; 2384 ifp->if_capenable |= IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM; 2385 } 2386 2387 /* 2388 * Tell the upper layer(s) we support long frames. 2389 */ 2390 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 2391 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU; 2392 ifp->if_capenable |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU; 2393 2394 /* 2395 ** Dont turn this on by default, if vlans are 2396 ** created on another pseudo device (eg. lagg) 2397 ** then vlan events are not passed thru, breaking 2398 ** operation, but with HW FILTER off it works. If 2399 ** using vlans directly on the em driver you can 2400 ** enable this and get full hardware tag filtering. 2401 */ 2402 ifp->if_capabilities |= IFCAP_VLAN_HWFILTER; 2403 2404#ifdef DEVICE_POLLING 2405 ifp->if_capabilities |= IFCAP_POLLING; 2406#endif 2407 2408 /* Enable only WOL MAGIC by default */ 2409 if (adapter->wol) { 2410 ifp->if_capabilities |= IFCAP_WOL; 2411 ifp->if_capenable |= IFCAP_WOL_MAGIC; 2412 } 2413 2414 /* 2415 * Specify the media types supported by this adapter and register 2416 * callbacks to update media and link information 2417 */ 2418 ifmedia_init(&adapter->media, IFM_IMASK, 2419 lem_media_change, lem_media_status); 2420 if ((adapter->hw.phy.media_type == e1000_media_type_fiber) || 2421 (adapter->hw.phy.media_type == e1000_media_type_internal_serdes)) { 2422 u_char fiber_type = IFM_1000_SX; /* default type */ 2423 2424 if (adapter->hw.mac.type == e1000_82545) 2425 fiber_type = IFM_1000_LX; 2426 ifmedia_add(&adapter->media, IFM_ETHER | fiber_type | IFM_FDX, 2427 0, NULL); 2428 ifmedia_add(&adapter->media, IFM_ETHER | fiber_type, 0, NULL); 2429 } else { 2430 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T, 0, NULL); 2431 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T | IFM_FDX, 2432 0, NULL); 2433 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX, 2434 0, NULL); 2435 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX | IFM_FDX, 2436 0, NULL); 2437 if (adapter->hw.phy.type != e1000_phy_ife) { 2438 ifmedia_add(&adapter->media, 2439 IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL); 2440 ifmedia_add(&adapter->media, 2441 IFM_ETHER | IFM_1000_T, 0, NULL); 2442 } 2443 } 2444 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL); 2445 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO); 2446 return (0); 2447} 2448 2449 2450/********************************************************************* 2451 * 2452 * Workaround for SmartSpeed on 82541 and 82547 controllers 2453 * 2454 **********************************************************************/ 2455static void 2456lem_smartspeed(struct adapter *adapter) 2457{ 2458 u16 phy_tmp; 2459 2460 if (adapter->link_active || (adapter->hw.phy.type != e1000_phy_igp) || 2461 adapter->hw.mac.autoneg == 0 || 2462 (adapter->hw.phy.autoneg_advertised & ADVERTISE_1000_FULL) == 0) 2463 return; 2464 2465 if (adapter->smartspeed == 0) { 2466 /* If Master/Slave config fault is asserted twice, 2467 * we assume back-to-back */ 2468 e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp); 2469 if (!(phy_tmp & SR_1000T_MS_CONFIG_FAULT)) 2470 return; 2471 e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp); 2472 if (phy_tmp & SR_1000T_MS_CONFIG_FAULT) { 2473 e1000_read_phy_reg(&adapter->hw, 2474 PHY_1000T_CTRL, &phy_tmp); 2475 if(phy_tmp & CR_1000T_MS_ENABLE) { 2476 phy_tmp &= ~CR_1000T_MS_ENABLE; 2477 e1000_write_phy_reg(&adapter->hw, 2478 PHY_1000T_CTRL, phy_tmp); 2479 adapter->smartspeed++; 2480 if(adapter->hw.mac.autoneg && 2481 !e1000_copper_link_autoneg(&adapter->hw) && 2482 !e1000_read_phy_reg(&adapter->hw, 2483 PHY_CONTROL, &phy_tmp)) { 2484 phy_tmp |= (MII_CR_AUTO_NEG_EN | 2485 MII_CR_RESTART_AUTO_NEG); 2486 e1000_write_phy_reg(&adapter->hw, 2487 PHY_CONTROL, phy_tmp); 2488 } 2489 } 2490 } 2491 return; 2492 } else if(adapter->smartspeed == EM_SMARTSPEED_DOWNSHIFT) { 2493 /* If still no link, perhaps using 2/3 pair cable */ 2494 e1000_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_tmp); 2495 phy_tmp |= CR_1000T_MS_ENABLE; 2496 e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, phy_tmp); 2497 if(adapter->hw.mac.autoneg && 2498 !e1000_copper_link_autoneg(&adapter->hw) && 2499 !e1000_read_phy_reg(&adapter->hw, PHY_CONTROL, &phy_tmp)) { 2500 phy_tmp |= (MII_CR_AUTO_NEG_EN | 2501 MII_CR_RESTART_AUTO_NEG); 2502 e1000_write_phy_reg(&adapter->hw, PHY_CONTROL, phy_tmp); 2503 } 2504 } 2505 /* Restart process after EM_SMARTSPEED_MAX iterations */ 2506 if(adapter->smartspeed++ == EM_SMARTSPEED_MAX) 2507 adapter->smartspeed = 0; 2508} 2509 2510 2511/* 2512 * Manage DMA'able memory. 2513 */ 2514static void 2515lem_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) 2516{ 2517 if (error) 2518 return; 2519 *(bus_addr_t *) arg = segs[0].ds_addr; 2520} 2521 2522static int 2523lem_dma_malloc(struct adapter *adapter, bus_size_t size, 2524 struct em_dma_alloc *dma, int mapflags) 2525{ 2526 int error; 2527 2528 error = bus_dma_tag_create(bus_get_dma_tag(adapter->dev), /* parent */ 2529 EM_DBA_ALIGN, 0, /* alignment, bounds */ 2530 BUS_SPACE_MAXADDR, /* lowaddr */ 2531 BUS_SPACE_MAXADDR, /* highaddr */ 2532 NULL, NULL, /* filter, filterarg */ 2533 size, /* maxsize */ 2534 1, /* nsegments */ 2535 size, /* maxsegsize */ 2536 0, /* flags */ 2537 NULL, /* lockfunc */ 2538 NULL, /* lockarg */ 2539 &dma->dma_tag); 2540 if (error) { 2541 device_printf(adapter->dev, 2542 "%s: bus_dma_tag_create failed: %d\n", 2543 __func__, error); 2544 goto fail_0; 2545 } 2546 2547 error = bus_dmamem_alloc(dma->dma_tag, (void**) &dma->dma_vaddr, 2548 BUS_DMA_NOWAIT | BUS_DMA_COHERENT, &dma->dma_map); 2549 if (error) { 2550 device_printf(adapter->dev, 2551 "%s: bus_dmamem_alloc(%ju) failed: %d\n", 2552 __func__, (uintmax_t)size, error); 2553 goto fail_2; 2554 } 2555 2556 dma->dma_paddr = 0; 2557 error = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr, 2558 size, lem_dmamap_cb, &dma->dma_paddr, mapflags | BUS_DMA_NOWAIT); 2559 if (error || dma->dma_paddr == 0) { 2560 device_printf(adapter->dev, 2561 "%s: bus_dmamap_load failed: %d\n", 2562 __func__, error); 2563 goto fail_3; 2564 } 2565 2566 return (0); 2567 2568fail_3: 2569 bus_dmamap_unload(dma->dma_tag, dma->dma_map); 2570fail_2: 2571 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map); 2572 bus_dma_tag_destroy(dma->dma_tag); 2573fail_0: 2574 dma->dma_map = NULL; 2575 dma->dma_tag = NULL; 2576 2577 return (error); 2578} 2579 2580static void 2581lem_dma_free(struct adapter *adapter, struct em_dma_alloc *dma) 2582{ 2583 if (dma->dma_tag == NULL) 2584 return; 2585 if (dma->dma_map != NULL) { 2586 bus_dmamap_sync(dma->dma_tag, dma->dma_map, 2587 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2588 bus_dmamap_unload(dma->dma_tag, dma->dma_map); 2589 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map); 2590 dma->dma_map = NULL; 2591 } 2592 bus_dma_tag_destroy(dma->dma_tag); 2593 dma->dma_tag = NULL; 2594} 2595 2596 2597/********************************************************************* 2598 * 2599 * Allocate memory for tx_buffer structures. The tx_buffer stores all 2600 * the information needed to transmit a packet on the wire. 2601 * 2602 **********************************************************************/ 2603static int 2604lem_allocate_transmit_structures(struct adapter *adapter) 2605{ 2606 device_t dev = adapter->dev; 2607 struct em_buffer *tx_buffer; 2608 int error; 2609 2610 /* 2611 * Create DMA tags for tx descriptors 2612 */ 2613 if ((error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */ 2614 1, 0, /* alignment, bounds */ 2615 BUS_SPACE_MAXADDR, /* lowaddr */ 2616 BUS_SPACE_MAXADDR, /* highaddr */ 2617 NULL, NULL, /* filter, filterarg */ 2618 MCLBYTES * EM_MAX_SCATTER, /* maxsize */ 2619 EM_MAX_SCATTER, /* nsegments */ 2620 MCLBYTES, /* maxsegsize */ 2621 0, /* flags */ 2622 NULL, /* lockfunc */ 2623 NULL, /* lockarg */ 2624 &adapter->txtag)) != 0) { 2625 device_printf(dev, "Unable to allocate TX DMA tag\n"); 2626 goto fail; 2627 } 2628 2629 adapter->tx_buffer_area = malloc(sizeof(struct em_buffer) * 2630 adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO); 2631 if (adapter->tx_buffer_area == NULL) { 2632 device_printf(dev, "Unable to allocate tx_buffer memory\n"); 2633 error = ENOMEM; 2634 goto fail; 2635 } 2636 2637 /* Create the descriptor buffer dma maps */ 2638 for (int i = 0; i < adapter->num_tx_desc; i++) { 2639 tx_buffer = &adapter->tx_buffer_area[i]; 2640 error = bus_dmamap_create(adapter->txtag, 0, &tx_buffer->map); 2641 if (error != 0) { 2642 device_printf(dev, "Unable to create TX DMA map\n"); 2643 goto fail; 2644 } 2645 tx_buffer->next_eop = -1; 2646 } 2647 2648 return (0); 2649fail: 2650 lem_free_transmit_structures(adapter); 2651 return (error); 2652} 2653 2654/********************************************************************* 2655 * 2656 * (Re)Initialize transmit structures. 2657 * 2658 **********************************************************************/ 2659static void 2660lem_setup_transmit_structures(struct adapter *adapter) 2661{ 2662 struct em_buffer *tx_buffer; 2663#ifdef DEV_NETMAP 2664 /* we are already locked */ 2665 struct netmap_adapter *na = NA(adapter->ifp); 2666 struct netmap_slot *slot = netmap_reset(na, NR_TX, 0, 0); 2667#endif /* DEV_NETMAP */ 2668 2669 /* Clear the old ring contents */ 2670 bzero(adapter->tx_desc_base, 2671 (sizeof(struct e1000_tx_desc)) * adapter->num_tx_desc); 2672 2673 /* Free any existing TX buffers */ 2674 for (int i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) { 2675 tx_buffer = &adapter->tx_buffer_area[i]; 2676 bus_dmamap_sync(adapter->txtag, tx_buffer->map, 2677 BUS_DMASYNC_POSTWRITE); 2678 bus_dmamap_unload(adapter->txtag, tx_buffer->map); 2679 m_freem(tx_buffer->m_head); 2680 tx_buffer->m_head = NULL; 2681#ifdef DEV_NETMAP 2682 if (slot) { 2683 /* the i-th NIC entry goes to slot si */ 2684 int si = netmap_idx_n2k(&na->tx_rings[0], i); 2685 uint64_t paddr; 2686 void *addr; 2687 2688 addr = PNMB(slot + si, &paddr); 2689 adapter->tx_desc_base[si].buffer_addr = htole64(paddr); 2690 /* reload the map for netmap mode */ 2691 netmap_load_map(adapter->txtag, tx_buffer->map, addr); 2692 } 2693#endif /* DEV_NETMAP */ 2694 tx_buffer->next_eop = -1; 2695 } 2696 2697 /* Reset state */ 2698 adapter->last_hw_offload = 0; 2699 adapter->next_avail_tx_desc = 0; 2700 adapter->next_tx_to_clean = 0; 2701 adapter->num_tx_desc_avail = adapter->num_tx_desc; 2702 2703 bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map, 2704 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2705 2706 return; 2707} 2708 2709/********************************************************************* 2710 * 2711 * Enable transmit unit. 2712 * 2713 **********************************************************************/ 2714static void 2715lem_initialize_transmit_unit(struct adapter *adapter) 2716{ 2717 u32 tctl, tipg = 0; 2718 u64 bus_addr; 2719 2720 INIT_DEBUGOUT("lem_initialize_transmit_unit: begin"); 2721 /* Setup the Base and Length of the Tx Descriptor Ring */ 2722 bus_addr = adapter->txdma.dma_paddr; 2723 E1000_WRITE_REG(&adapter->hw, E1000_TDLEN(0), 2724 adapter->num_tx_desc * sizeof(struct e1000_tx_desc)); 2725 E1000_WRITE_REG(&adapter->hw, E1000_TDBAH(0), 2726 (u32)(bus_addr >> 32)); 2727 E1000_WRITE_REG(&adapter->hw, E1000_TDBAL(0), 2728 (u32)bus_addr); 2729 /* Setup the HW Tx Head and Tail descriptor pointers */ 2730 E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), 0); 2731 E1000_WRITE_REG(&adapter->hw, E1000_TDH(0), 0); 2732 2733 HW_DEBUGOUT2("Base = %x, Length = %x\n", 2734 E1000_READ_REG(&adapter->hw, E1000_TDBAL(0)), 2735 E1000_READ_REG(&adapter->hw, E1000_TDLEN(0))); 2736 2737 /* Set the default values for the Tx Inter Packet Gap timer */ 2738 switch (adapter->hw.mac.type) { 2739 case e1000_82542: 2740 tipg = DEFAULT_82542_TIPG_IPGT; 2741 tipg |= DEFAULT_82542_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT; 2742 tipg |= DEFAULT_82542_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT; 2743 break; 2744 default: 2745 if ((adapter->hw.phy.media_type == e1000_media_type_fiber) || 2746 (adapter->hw.phy.media_type == 2747 e1000_media_type_internal_serdes)) 2748 tipg = DEFAULT_82543_TIPG_IPGT_FIBER; 2749 else 2750 tipg = DEFAULT_82543_TIPG_IPGT_COPPER; 2751 tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT; 2752 tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT; 2753 } 2754 2755 E1000_WRITE_REG(&adapter->hw, E1000_TIPG, tipg); 2756 E1000_WRITE_REG(&adapter->hw, E1000_TIDV, adapter->tx_int_delay.value); 2757 if(adapter->hw.mac.type >= e1000_82540) 2758 E1000_WRITE_REG(&adapter->hw, E1000_TADV, 2759 adapter->tx_abs_int_delay.value); 2760 2761 /* Program the Transmit Control Register */ 2762 tctl = E1000_READ_REG(&adapter->hw, E1000_TCTL); 2763 tctl &= ~E1000_TCTL_CT; 2764 tctl |= (E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN | 2765 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT)); 2766 2767 /* This write will effectively turn on the transmit unit. */ 2768 E1000_WRITE_REG(&adapter->hw, E1000_TCTL, tctl); 2769 2770 /* Setup Transmit Descriptor Base Settings */ 2771 adapter->txd_cmd = E1000_TXD_CMD_IFCS; 2772 2773 if (adapter->tx_int_delay.value > 0) 2774 adapter->txd_cmd |= E1000_TXD_CMD_IDE; 2775} 2776 2777/********************************************************************* 2778 * 2779 * Free all transmit related data structures. 2780 * 2781 **********************************************************************/ 2782static void 2783lem_free_transmit_structures(struct adapter *adapter) 2784{ 2785 struct em_buffer *tx_buffer; 2786 2787 INIT_DEBUGOUT("free_transmit_structures: begin"); 2788 2789 if (adapter->tx_buffer_area != NULL) { 2790 for (int i = 0; i < adapter->num_tx_desc; i++) { 2791 tx_buffer = &adapter->tx_buffer_area[i]; 2792 if (tx_buffer->m_head != NULL) { 2793 bus_dmamap_sync(adapter->txtag, tx_buffer->map, 2794 BUS_DMASYNC_POSTWRITE); 2795 bus_dmamap_unload(adapter->txtag, 2796 tx_buffer->map); 2797 m_freem(tx_buffer->m_head); 2798 tx_buffer->m_head = NULL; 2799 } else if (tx_buffer->map != NULL) 2800 bus_dmamap_unload(adapter->txtag, 2801 tx_buffer->map); 2802 if (tx_buffer->map != NULL) { 2803 bus_dmamap_destroy(adapter->txtag, 2804 tx_buffer->map); 2805 tx_buffer->map = NULL; 2806 } 2807 } 2808 } 2809 if (adapter->tx_buffer_area != NULL) { 2810 free(adapter->tx_buffer_area, M_DEVBUF); 2811 adapter->tx_buffer_area = NULL; 2812 } 2813 if (adapter->txtag != NULL) { 2814 bus_dma_tag_destroy(adapter->txtag); 2815 adapter->txtag = NULL; 2816 } 2817#if __FreeBSD_version >= 800000 2818 if (adapter->br != NULL) 2819 buf_ring_free(adapter->br, M_DEVBUF); 2820#endif 2821} 2822 2823/********************************************************************* 2824 * 2825 * The offload context needs to be set when we transfer the first 2826 * packet of a particular protocol (TCP/UDP). This routine has been 2827 * enhanced to deal with inserted VLAN headers, and IPV6 (not complete) 2828 * 2829 * Added back the old method of keeping the current context type 2830 * and not setting if unnecessary, as this is reported to be a 2831 * big performance win. -jfv 2832 **********************************************************************/ 2833static void 2834lem_transmit_checksum_setup(struct adapter *adapter, struct mbuf *mp, 2835 u32 *txd_upper, u32 *txd_lower) 2836{ 2837 struct e1000_context_desc *TXD = NULL; 2838 struct em_buffer *tx_buffer; 2839 struct ether_vlan_header *eh; 2840 struct ip *ip = NULL; 2841 struct ip6_hdr *ip6; 2842 int curr_txd, ehdrlen; 2843 u32 cmd, hdr_len, ip_hlen; 2844 u16 etype; 2845 u8 ipproto; 2846 2847 2848 cmd = hdr_len = ipproto = 0; 2849 *txd_upper = *txd_lower = 0; 2850 curr_txd = adapter->next_avail_tx_desc; 2851 2852 /* 2853 * Determine where frame payload starts. 2854 * Jump over vlan headers if already present, 2855 * helpful for QinQ too. 2856 */ 2857 eh = mtod(mp, struct ether_vlan_header *); 2858 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { 2859 etype = ntohs(eh->evl_proto); 2860 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 2861 } else { 2862 etype = ntohs(eh->evl_encap_proto); 2863 ehdrlen = ETHER_HDR_LEN; 2864 } 2865 2866 /* 2867 * We only support TCP/UDP for IPv4 and IPv6 for the moment. 2868 * TODO: Support SCTP too when it hits the tree. 2869 */ 2870 switch (etype) { 2871 case ETHERTYPE_IP: 2872 ip = (struct ip *)(mp->m_data + ehdrlen); 2873 ip_hlen = ip->ip_hl << 2; 2874 2875 /* Setup of IP header checksum. */ 2876 if (mp->m_pkthdr.csum_flags & CSUM_IP) { 2877 /* 2878 * Start offset for header checksum calculation. 2879 * End offset for header checksum calculation. 2880 * Offset of place to put the checksum. 2881 */ 2882 TXD = (struct e1000_context_desc *) 2883 &adapter->tx_desc_base[curr_txd]; 2884 TXD->lower_setup.ip_fields.ipcss = ehdrlen; 2885 TXD->lower_setup.ip_fields.ipcse = 2886 htole16(ehdrlen + ip_hlen); 2887 TXD->lower_setup.ip_fields.ipcso = 2888 ehdrlen + offsetof(struct ip, ip_sum); 2889 cmd |= E1000_TXD_CMD_IP; 2890 *txd_upper |= E1000_TXD_POPTS_IXSM << 8; 2891 } 2892 2893 hdr_len = ehdrlen + ip_hlen; 2894 ipproto = ip->ip_p; 2895 2896 break; 2897 case ETHERTYPE_IPV6: 2898 ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen); 2899 ip_hlen = sizeof(struct ip6_hdr); /* XXX: No header stacking. */ 2900 2901 /* IPv6 doesn't have a header checksum. */ 2902 2903 hdr_len = ehdrlen + ip_hlen; 2904 ipproto = ip6->ip6_nxt; 2905 break; 2906 2907 default: 2908 return; 2909 } 2910 2911 switch (ipproto) { 2912 case IPPROTO_TCP: 2913 if (mp->m_pkthdr.csum_flags & CSUM_TCP) { 2914 *txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D; 2915 *txd_upper |= E1000_TXD_POPTS_TXSM << 8; 2916 /* no need for context if already set */ 2917 if (adapter->last_hw_offload == CSUM_TCP) 2918 return; 2919 adapter->last_hw_offload = CSUM_TCP; 2920 /* 2921 * Start offset for payload checksum calculation. 2922 * End offset for payload checksum calculation. 2923 * Offset of place to put the checksum. 2924 */ 2925 TXD = (struct e1000_context_desc *) 2926 &adapter->tx_desc_base[curr_txd]; 2927 TXD->upper_setup.tcp_fields.tucss = hdr_len; 2928 TXD->upper_setup.tcp_fields.tucse = htole16(0); 2929 TXD->upper_setup.tcp_fields.tucso = 2930 hdr_len + offsetof(struct tcphdr, th_sum); 2931 cmd |= E1000_TXD_CMD_TCP; 2932 } 2933 break; 2934 case IPPROTO_UDP: 2935 { 2936 if (mp->m_pkthdr.csum_flags & CSUM_UDP) { 2937 *txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D; 2938 *txd_upper |= E1000_TXD_POPTS_TXSM << 8; 2939 /* no need for context if already set */ 2940 if (adapter->last_hw_offload == CSUM_UDP) 2941 return; 2942 adapter->last_hw_offload = CSUM_UDP; 2943 /* 2944 * Start offset for header checksum calculation. 2945 * End offset for header checksum calculation. 2946 * Offset of place to put the checksum. 2947 */ 2948 TXD = (struct e1000_context_desc *) 2949 &adapter->tx_desc_base[curr_txd]; 2950 TXD->upper_setup.tcp_fields.tucss = hdr_len; 2951 TXD->upper_setup.tcp_fields.tucse = htole16(0); 2952 TXD->upper_setup.tcp_fields.tucso = 2953 hdr_len + offsetof(struct udphdr, uh_sum); 2954 } 2955 /* Fall Thru */ 2956 } 2957 default: 2958 break; 2959 } 2960 2961 if (TXD == NULL) 2962 return; 2963 TXD->tcp_seg_setup.data = htole32(0); 2964 TXD->cmd_and_length = 2965 htole32(adapter->txd_cmd | E1000_TXD_CMD_DEXT | cmd); 2966 tx_buffer = &adapter->tx_buffer_area[curr_txd]; 2967 tx_buffer->m_head = NULL; 2968 tx_buffer->next_eop = -1; 2969 2970 if (++curr_txd == adapter->num_tx_desc) 2971 curr_txd = 0; 2972 2973 adapter->num_tx_desc_avail--; 2974 adapter->next_avail_tx_desc = curr_txd; 2975} 2976 2977 2978/********************************************************************** 2979 * 2980 * Examine each tx_buffer in the used queue. If the hardware is done 2981 * processing the packet then free associated resources. The 2982 * tx_buffer is put back on the free queue. 2983 * 2984 **********************************************************************/ 2985static void 2986lem_txeof(struct adapter *adapter) 2987{ 2988 int first, last, done, num_avail; 2989 struct em_buffer *tx_buffer; 2990 struct e1000_tx_desc *tx_desc, *eop_desc; 2991 struct ifnet *ifp = adapter->ifp; 2992 2993 EM_TX_LOCK_ASSERT(adapter); 2994 2995#ifdef DEV_NETMAP 2996 if (netmap_tx_irq(ifp, 0 | (NETMAP_LOCKED_ENTER|NETMAP_LOCKED_EXIT))) 2997 return; 2998#endif /* DEV_NETMAP */ 2999 if (adapter->num_tx_desc_avail == adapter->num_tx_desc) 3000 return; 3001 3002 num_avail = adapter->num_tx_desc_avail; 3003 first = adapter->next_tx_to_clean; 3004 tx_desc = &adapter->tx_desc_base[first]; 3005 tx_buffer = &adapter->tx_buffer_area[first]; 3006 last = tx_buffer->next_eop; 3007 eop_desc = &adapter->tx_desc_base[last]; 3008 3009 /* 3010 * What this does is get the index of the 3011 * first descriptor AFTER the EOP of the 3012 * first packet, that way we can do the 3013 * simple comparison on the inner while loop. 3014 */ 3015 if (++last == adapter->num_tx_desc) 3016 last = 0; 3017 done = last; 3018 3019 bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map, 3020 BUS_DMASYNC_POSTREAD); 3021 3022 while (eop_desc->upper.fields.status & E1000_TXD_STAT_DD) { 3023 /* We clean the range of the packet */ 3024 while (first != done) { 3025 tx_desc->upper.data = 0; 3026 tx_desc->lower.data = 0; 3027 tx_desc->buffer_addr = 0; 3028 ++num_avail; 3029 3030 if (tx_buffer->m_head) { 3031 ifp->if_opackets++; 3032 bus_dmamap_sync(adapter->txtag, 3033 tx_buffer->map, 3034 BUS_DMASYNC_POSTWRITE); 3035 bus_dmamap_unload(adapter->txtag, 3036 tx_buffer->map); 3037 3038 m_freem(tx_buffer->m_head); 3039 tx_buffer->m_head = NULL; 3040 } 3041 tx_buffer->next_eop = -1; 3042 adapter->watchdog_time = ticks; 3043 3044 if (++first == adapter->num_tx_desc) 3045 first = 0; 3046 3047 tx_buffer = &adapter->tx_buffer_area[first]; 3048 tx_desc = &adapter->tx_desc_base[first]; 3049 } 3050 /* See if we can continue to the next packet */ 3051 last = tx_buffer->next_eop; 3052 if (last != -1) { 3053 eop_desc = &adapter->tx_desc_base[last]; 3054 /* Get new done point */ 3055 if (++last == adapter->num_tx_desc) last = 0; 3056 done = last; 3057 } else 3058 break; 3059 } 3060 bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map, 3061 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 3062 3063 adapter->next_tx_to_clean = first; 3064 adapter->num_tx_desc_avail = num_avail; 3065 3066 /* 3067 * If we have enough room, clear IFF_DRV_OACTIVE to 3068 * tell the stack that it is OK to send packets. 3069 * If there are no pending descriptors, clear the watchdog. 3070 */ 3071 if (adapter->num_tx_desc_avail > EM_TX_CLEANUP_THRESHOLD) { 3072 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 3073 if (adapter->num_tx_desc_avail == adapter->num_tx_desc) { 3074 adapter->watchdog_check = FALSE; 3075 return; 3076 } 3077 } 3078} 3079 3080/********************************************************************* 3081 * 3082 * When Link is lost sometimes there is work still in the TX ring 3083 * which may result in a watchdog, rather than allow that we do an 3084 * attempted cleanup and then reinit here. Note that this has been 3085 * seens mostly with fiber adapters. 3086 * 3087 **********************************************************************/ 3088static void 3089lem_tx_purge(struct adapter *adapter) 3090{ 3091 if ((!adapter->link_active) && (adapter->watchdog_check)) { 3092 EM_TX_LOCK(adapter); 3093 lem_txeof(adapter); 3094 EM_TX_UNLOCK(adapter); 3095 if (adapter->watchdog_check) /* Still outstanding? */ 3096 lem_init_locked(adapter); 3097 } 3098} 3099 3100/********************************************************************* 3101 * 3102 * Get a buffer from system mbuf buffer pool. 3103 * 3104 **********************************************************************/ 3105static int 3106lem_get_buf(struct adapter *adapter, int i) 3107{ 3108 struct mbuf *m; 3109 bus_dma_segment_t segs[1]; 3110 bus_dmamap_t map; 3111 struct em_buffer *rx_buffer; 3112 int error, nsegs; 3113 3114 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 3115 if (m == NULL) { 3116 adapter->mbuf_cluster_failed++; 3117 return (ENOBUFS); 3118 } 3119 m->m_len = m->m_pkthdr.len = MCLBYTES; 3120 3121 if (adapter->max_frame_size <= (MCLBYTES - ETHER_ALIGN)) 3122 m_adj(m, ETHER_ALIGN); 3123 3124 /* 3125 * Using memory from the mbuf cluster pool, invoke the 3126 * bus_dma machinery to arrange the memory mapping. 3127 */ 3128 error = bus_dmamap_load_mbuf_sg(adapter->rxtag, 3129 adapter->rx_sparemap, m, segs, &nsegs, BUS_DMA_NOWAIT); 3130 if (error != 0) { 3131 m_free(m); 3132 return (error); 3133 } 3134 3135 /* If nsegs is wrong then the stack is corrupt. */ 3136 KASSERT(nsegs == 1, ("Too many segments returned!")); 3137 3138 rx_buffer = &adapter->rx_buffer_area[i]; 3139 if (rx_buffer->m_head != NULL) 3140 bus_dmamap_unload(adapter->rxtag, rx_buffer->map); 3141 3142 map = rx_buffer->map; 3143 rx_buffer->map = adapter->rx_sparemap; 3144 adapter->rx_sparemap = map; 3145 bus_dmamap_sync(adapter->rxtag, rx_buffer->map, BUS_DMASYNC_PREREAD); 3146 rx_buffer->m_head = m; 3147 3148 adapter->rx_desc_base[i].buffer_addr = htole64(segs[0].ds_addr); 3149 return (0); 3150} 3151 3152/********************************************************************* 3153 * 3154 * Allocate memory for rx_buffer structures. Since we use one 3155 * rx_buffer per received packet, the maximum number of rx_buffer's 3156 * that we'll need is equal to the number of receive descriptors 3157 * that we've allocated. 3158 * 3159 **********************************************************************/ 3160static int 3161lem_allocate_receive_structures(struct adapter *adapter) 3162{ 3163 device_t dev = adapter->dev; 3164 struct em_buffer *rx_buffer; 3165 int i, error; 3166 3167 adapter->rx_buffer_area = malloc(sizeof(struct em_buffer) * 3168 adapter->num_rx_desc, M_DEVBUF, M_NOWAIT | M_ZERO); 3169 if (adapter->rx_buffer_area == NULL) { 3170 device_printf(dev, "Unable to allocate rx_buffer memory\n"); 3171 return (ENOMEM); 3172 } 3173 3174 error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */ 3175 1, 0, /* alignment, bounds */ 3176 BUS_SPACE_MAXADDR, /* lowaddr */ 3177 BUS_SPACE_MAXADDR, /* highaddr */ 3178 NULL, NULL, /* filter, filterarg */ 3179 MCLBYTES, /* maxsize */ 3180 1, /* nsegments */ 3181 MCLBYTES, /* maxsegsize */ 3182 0, /* flags */ 3183 NULL, /* lockfunc */ 3184 NULL, /* lockarg */ 3185 &adapter->rxtag); 3186 if (error) { 3187 device_printf(dev, "%s: bus_dma_tag_create failed %d\n", 3188 __func__, error); 3189 goto fail; 3190 } 3191 3192 /* Create the spare map (used by getbuf) */ 3193 error = bus_dmamap_create(adapter->rxtag, BUS_DMA_NOWAIT, 3194 &adapter->rx_sparemap); 3195 if (error) { 3196 device_printf(dev, "%s: bus_dmamap_create failed: %d\n", 3197 __func__, error); 3198 goto fail; 3199 } 3200 3201 rx_buffer = adapter->rx_buffer_area; 3202 for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) { 3203 error = bus_dmamap_create(adapter->rxtag, BUS_DMA_NOWAIT, 3204 &rx_buffer->map); 3205 if (error) { 3206 device_printf(dev, "%s: bus_dmamap_create failed: %d\n", 3207 __func__, error); 3208 goto fail; 3209 } 3210 } 3211 3212 return (0); 3213 3214fail: 3215 lem_free_receive_structures(adapter); 3216 return (error); 3217} 3218 3219/********************************************************************* 3220 * 3221 * (Re)initialize receive structures. 3222 * 3223 **********************************************************************/ 3224static int 3225lem_setup_receive_structures(struct adapter *adapter) 3226{ 3227 struct em_buffer *rx_buffer; 3228 int i, error; 3229#ifdef DEV_NETMAP 3230 /* we are already under lock */ 3231 struct netmap_adapter *na = NA(adapter->ifp); 3232 struct netmap_slot *slot = netmap_reset(na, NR_RX, 0, 0); 3233#endif 3234 3235 /* Reset descriptor ring */ 3236 bzero(adapter->rx_desc_base, 3237 (sizeof(struct e1000_rx_desc)) * adapter->num_rx_desc); 3238 3239 /* Free current RX buffers. */ 3240 rx_buffer = adapter->rx_buffer_area; 3241 for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) { 3242 if (rx_buffer->m_head != NULL) { 3243 bus_dmamap_sync(adapter->rxtag, rx_buffer->map, 3244 BUS_DMASYNC_POSTREAD); 3245 bus_dmamap_unload(adapter->rxtag, rx_buffer->map); 3246 m_freem(rx_buffer->m_head); 3247 rx_buffer->m_head = NULL; 3248 } 3249 } 3250 3251 /* Allocate new ones. */ 3252 for (i = 0; i < adapter->num_rx_desc; i++) { 3253#ifdef DEV_NETMAP 3254 if (slot) { 3255 /* the i-th NIC entry goes to slot si */ 3256 int si = netmap_idx_n2k(&na->rx_rings[0], i); 3257 uint64_t paddr; 3258 void *addr; 3259 3260 addr = PNMB(slot + si, &paddr); 3261 netmap_load_map(adapter->rxtag, rx_buffer->map, addr); 3262 /* Update descriptor */ 3263 adapter->rx_desc_base[i].buffer_addr = htole64(paddr); 3264 continue; 3265 } 3266#endif /* DEV_NETMAP */ 3267 error = lem_get_buf(adapter, i); 3268 if (error) 3269 return (error); 3270 } 3271 3272 /* Setup our descriptor pointers */ 3273 adapter->next_rx_desc_to_check = 0; 3274 bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map, 3275 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 3276 3277 return (0); 3278} 3279 3280/********************************************************************* 3281 * 3282 * Enable receive unit. 3283 * 3284 **********************************************************************/ 3285 3286static void 3287lem_initialize_receive_unit(struct adapter *adapter) 3288{ 3289 struct ifnet *ifp = adapter->ifp; 3290 u64 bus_addr; 3291 u32 rctl, rxcsum; 3292 3293 INIT_DEBUGOUT("lem_initialize_receive_unit: begin"); 3294 3295 /* 3296 * Make sure receives are disabled while setting 3297 * up the descriptor ring 3298 */ 3299 rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL); 3300 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl & ~E1000_RCTL_EN); 3301 3302 if (adapter->hw.mac.type >= e1000_82540) { 3303 E1000_WRITE_REG(&adapter->hw, E1000_RADV, 3304 adapter->rx_abs_int_delay.value); 3305 /* 3306 * Set the interrupt throttling rate. Value is calculated 3307 * as DEFAULT_ITR = 1/(MAX_INTS_PER_SEC * 256ns) 3308 */ 3309 E1000_WRITE_REG(&adapter->hw, E1000_ITR, DEFAULT_ITR); 3310 } 3311 3312 /* Setup the Base and Length of the Rx Descriptor Ring */ 3313 bus_addr = adapter->rxdma.dma_paddr; 3314 E1000_WRITE_REG(&adapter->hw, E1000_RDLEN(0), 3315 adapter->num_rx_desc * sizeof(struct e1000_rx_desc)); 3316 E1000_WRITE_REG(&adapter->hw, E1000_RDBAH(0), 3317 (u32)(bus_addr >> 32)); 3318 E1000_WRITE_REG(&adapter->hw, E1000_RDBAL(0), 3319 (u32)bus_addr); 3320 3321 /* Setup the Receive Control Register */ 3322 rctl &= ~(3 << E1000_RCTL_MO_SHIFT); 3323 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO | 3324 E1000_RCTL_RDMTS_HALF | 3325 (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT); 3326 3327 /* Make sure VLAN Filters are off */ 3328 rctl &= ~E1000_RCTL_VFE; 3329 3330 if (e1000_tbi_sbp_enabled_82543(&adapter->hw)) 3331 rctl |= E1000_RCTL_SBP; 3332 else 3333 rctl &= ~E1000_RCTL_SBP; 3334 3335 switch (adapter->rx_buffer_len) { 3336 default: 3337 case 2048: 3338 rctl |= E1000_RCTL_SZ_2048; 3339 break; 3340 case 4096: 3341 rctl |= E1000_RCTL_SZ_4096 | 3342 E1000_RCTL_BSEX | E1000_RCTL_LPE; 3343 break; 3344 case 8192: 3345 rctl |= E1000_RCTL_SZ_8192 | 3346 E1000_RCTL_BSEX | E1000_RCTL_LPE; 3347 break; 3348 case 16384: 3349 rctl |= E1000_RCTL_SZ_16384 | 3350 E1000_RCTL_BSEX | E1000_RCTL_LPE; 3351 break; 3352 } 3353 3354 if (ifp->if_mtu > ETHERMTU) 3355 rctl |= E1000_RCTL_LPE; 3356 else 3357 rctl &= ~E1000_RCTL_LPE; 3358 3359 /* Enable 82543 Receive Checksum Offload for TCP and UDP */ 3360 if ((adapter->hw.mac.type >= e1000_82543) && 3361 (ifp->if_capenable & IFCAP_RXCSUM)) { 3362 rxcsum = E1000_READ_REG(&adapter->hw, E1000_RXCSUM); 3363 rxcsum |= (E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL); 3364 E1000_WRITE_REG(&adapter->hw, E1000_RXCSUM, rxcsum); 3365 } 3366 3367 /* Enable Receives */ 3368 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl); 3369 3370 /* 3371 * Setup the HW Rx Head and 3372 * Tail Descriptor Pointers 3373 */ 3374 E1000_WRITE_REG(&adapter->hw, E1000_RDH(0), 0); 3375 rctl = adapter->num_rx_desc - 1; /* default RDT value */ 3376#ifdef DEV_NETMAP 3377 /* preserve buffers already made available to clients */ 3378 if (ifp->if_capenable & IFCAP_NETMAP) 3379 rctl -= NA(adapter->ifp)->rx_rings[0].nr_hwavail; 3380#endif /* DEV_NETMAP */ 3381 E1000_WRITE_REG(&adapter->hw, E1000_RDT(0), rctl); 3382 3383 return; 3384} 3385 3386/********************************************************************* 3387 * 3388 * Free receive related data structures. 3389 * 3390 **********************************************************************/ 3391static void 3392lem_free_receive_structures(struct adapter *adapter) 3393{ 3394 struct em_buffer *rx_buffer; 3395 int i; 3396 3397 INIT_DEBUGOUT("free_receive_structures: begin"); 3398 3399 if (adapter->rx_sparemap) { 3400 bus_dmamap_destroy(adapter->rxtag, adapter->rx_sparemap); 3401 adapter->rx_sparemap = NULL; 3402 } 3403 3404 /* Cleanup any existing buffers */ 3405 if (adapter->rx_buffer_area != NULL) { 3406 rx_buffer = adapter->rx_buffer_area; 3407 for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) { 3408 if (rx_buffer->m_head != NULL) { 3409 bus_dmamap_sync(adapter->rxtag, rx_buffer->map, 3410 BUS_DMASYNC_POSTREAD); 3411 bus_dmamap_unload(adapter->rxtag, 3412 rx_buffer->map); 3413 m_freem(rx_buffer->m_head); 3414 rx_buffer->m_head = NULL; 3415 } else if (rx_buffer->map != NULL) 3416 bus_dmamap_unload(adapter->rxtag, 3417 rx_buffer->map); 3418 if (rx_buffer->map != NULL) { 3419 bus_dmamap_destroy(adapter->rxtag, 3420 rx_buffer->map); 3421 rx_buffer->map = NULL; 3422 } 3423 } 3424 } 3425 3426 if (adapter->rx_buffer_area != NULL) { 3427 free(adapter->rx_buffer_area, M_DEVBUF); 3428 adapter->rx_buffer_area = NULL; 3429 } 3430 3431 if (adapter->rxtag != NULL) { 3432 bus_dma_tag_destroy(adapter->rxtag); 3433 adapter->rxtag = NULL; 3434 } 3435} 3436 3437/********************************************************************* 3438 * 3439 * This routine executes in interrupt context. It replenishes 3440 * the mbufs in the descriptor and sends data which has been 3441 * dma'ed into host memory to upper layer. 3442 * 3443 * We loop at most count times if count is > 0, or until done if 3444 * count < 0. 3445 * 3446 * For polling we also now return the number of cleaned packets 3447 *********************************************************************/ 3448static bool 3449lem_rxeof(struct adapter *adapter, int count, int *done) 3450{ 3451 struct ifnet *ifp = adapter->ifp; 3452 struct mbuf *mp; 3453 u8 status = 0, accept_frame = 0, eop = 0; 3454 u16 len, desc_len, prev_len_adj; 3455 int i, rx_sent = 0; 3456 struct e1000_rx_desc *current_desc; 3457 3458 EM_RX_LOCK(adapter); 3459 i = adapter->next_rx_desc_to_check; 3460 current_desc = &adapter->rx_desc_base[i]; 3461 bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map, 3462 BUS_DMASYNC_POSTREAD); 3463 3464#ifdef DEV_NETMAP 3465 if (netmap_rx_irq(ifp, 0 | NETMAP_LOCKED_ENTER, &rx_sent)) 3466 return (FALSE); 3467#endif /* DEV_NETMAP */ 3468 3469 if (!((current_desc->status) & E1000_RXD_STAT_DD)) { 3470 if (done != NULL) 3471 *done = rx_sent; 3472 EM_RX_UNLOCK(adapter); 3473 return (FALSE); 3474 } 3475 3476 while (count != 0 && ifp->if_drv_flags & IFF_DRV_RUNNING) { 3477 struct mbuf *m = NULL; 3478 3479 status = current_desc->status; 3480 if ((status & E1000_RXD_STAT_DD) == 0) 3481 break; 3482 3483 mp = adapter->rx_buffer_area[i].m_head; 3484 /* 3485 * Can't defer bus_dmamap_sync(9) because TBI_ACCEPT 3486 * needs to access the last received byte in the mbuf. 3487 */ 3488 bus_dmamap_sync(adapter->rxtag, adapter->rx_buffer_area[i].map, 3489 BUS_DMASYNC_POSTREAD); 3490 3491 accept_frame = 1; 3492 prev_len_adj = 0; 3493 desc_len = le16toh(current_desc->length); 3494 if (status & E1000_RXD_STAT_EOP) { 3495 count--; 3496 eop = 1; 3497 if (desc_len < ETHER_CRC_LEN) { 3498 len = 0; 3499 prev_len_adj = ETHER_CRC_LEN - desc_len; 3500 } else 3501 len = desc_len - ETHER_CRC_LEN; 3502 } else { 3503 eop = 0; 3504 len = desc_len; 3505 } 3506 3507 if (current_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK) { 3508 u8 last_byte; 3509 u32 pkt_len = desc_len; 3510 3511 if (adapter->fmp != NULL) 3512 pkt_len += adapter->fmp->m_pkthdr.len; 3513 3514 last_byte = *(mtod(mp, caddr_t) + desc_len - 1); 3515 if (TBI_ACCEPT(&adapter->hw, status, 3516 current_desc->errors, pkt_len, last_byte, 3517 adapter->min_frame_size, adapter->max_frame_size)) { 3518 e1000_tbi_adjust_stats_82543(&adapter->hw, 3519 &adapter->stats, pkt_len, 3520 adapter->hw.mac.addr, 3521 adapter->max_frame_size); 3522 if (len > 0) 3523 len--; 3524 } else 3525 accept_frame = 0; 3526 } 3527 3528 if (accept_frame) { 3529 if (lem_get_buf(adapter, i) != 0) { 3530 ifp->if_iqdrops++; 3531 goto discard; 3532 } 3533 3534 /* Assign correct length to the current fragment */ 3535 mp->m_len = len; 3536 3537 if (adapter->fmp == NULL) { 3538 mp->m_pkthdr.len = len; 3539 adapter->fmp = mp; /* Store the first mbuf */ 3540 adapter->lmp = mp; 3541 } else { 3542 /* Chain mbuf's together */ 3543 mp->m_flags &= ~M_PKTHDR; 3544 /* 3545 * Adjust length of previous mbuf in chain if 3546 * we received less than 4 bytes in the last 3547 * descriptor. 3548 */ 3549 if (prev_len_adj > 0) { 3550 adapter->lmp->m_len -= prev_len_adj; 3551 adapter->fmp->m_pkthdr.len -= 3552 prev_len_adj; 3553 } 3554 adapter->lmp->m_next = mp; 3555 adapter->lmp = adapter->lmp->m_next; 3556 adapter->fmp->m_pkthdr.len += len; 3557 } 3558 3559 if (eop) { 3560 adapter->fmp->m_pkthdr.rcvif = ifp; 3561 ifp->if_ipackets++; 3562 lem_receive_checksum(adapter, current_desc, 3563 adapter->fmp); 3564#ifndef __NO_STRICT_ALIGNMENT 3565 if (adapter->max_frame_size > 3566 (MCLBYTES - ETHER_ALIGN) && 3567 lem_fixup_rx(adapter) != 0) 3568 goto skip; 3569#endif 3570 if (status & E1000_RXD_STAT_VP) { 3571 adapter->fmp->m_pkthdr.ether_vtag = 3572 le16toh(current_desc->special); 3573 adapter->fmp->m_flags |= M_VLANTAG; 3574 } 3575#ifndef __NO_STRICT_ALIGNMENT 3576skip: 3577#endif 3578 m = adapter->fmp; 3579 adapter->fmp = NULL; 3580 adapter->lmp = NULL; 3581 } 3582 } else { 3583 adapter->dropped_pkts++; 3584discard: 3585 /* Reuse loaded DMA map and just update mbuf chain */ 3586 mp = adapter->rx_buffer_area[i].m_head; 3587 mp->m_len = mp->m_pkthdr.len = MCLBYTES; 3588 mp->m_data = mp->m_ext.ext_buf; 3589 mp->m_next = NULL; 3590 if (adapter->max_frame_size <= 3591 (MCLBYTES - ETHER_ALIGN)) 3592 m_adj(mp, ETHER_ALIGN); 3593 if (adapter->fmp != NULL) { 3594 m_freem(adapter->fmp); 3595 adapter->fmp = NULL; 3596 adapter->lmp = NULL; 3597 } 3598 m = NULL; 3599 } 3600 3601 /* Zero out the receive descriptors status. */ 3602 current_desc->status = 0; 3603 bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map, 3604 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 3605 3606 /* Advance our pointers to the next descriptor. */ 3607 if (++i == adapter->num_rx_desc) 3608 i = 0; 3609 /* Call into the stack */ 3610 if (m != NULL) { 3611 adapter->next_rx_desc_to_check = i; 3612 EM_RX_UNLOCK(adapter); 3613 (*ifp->if_input)(ifp, m); 3614 EM_RX_LOCK(adapter); 3615 rx_sent++; 3616 i = adapter->next_rx_desc_to_check; 3617 } 3618 current_desc = &adapter->rx_desc_base[i]; 3619 } 3620 adapter->next_rx_desc_to_check = i; 3621 3622 /* Advance the E1000's Receive Queue #0 "Tail Pointer". */ 3623 if (--i < 0) 3624 i = adapter->num_rx_desc - 1; 3625 E1000_WRITE_REG(&adapter->hw, E1000_RDT(0), i); 3626 if (done != NULL) 3627 *done = rx_sent; 3628 EM_RX_UNLOCK(adapter); 3629 return ((status & E1000_RXD_STAT_DD) ? TRUE : FALSE); 3630} 3631 3632#ifndef __NO_STRICT_ALIGNMENT 3633/* 3634 * When jumbo frames are enabled we should realign entire payload on 3635 * architecures with strict alignment. This is serious design mistake of 8254x 3636 * as it nullifies DMA operations. 8254x just allows RX buffer size to be 3637 * 2048/4096/8192/16384. What we really want is 2048 - ETHER_ALIGN to align its 3638 * payload. On architecures without strict alignment restrictions 8254x still 3639 * performs unaligned memory access which would reduce the performance too. 3640 * To avoid copying over an entire frame to align, we allocate a new mbuf and 3641 * copy ethernet header to the new mbuf. The new mbuf is prepended into the 3642 * existing mbuf chain. 3643 * 3644 * Be aware, best performance of the 8254x is achived only when jumbo frame is 3645 * not used at all on architectures with strict alignment. 3646 */ 3647static int 3648lem_fixup_rx(struct adapter *adapter) 3649{ 3650 struct mbuf *m, *n; 3651 int error; 3652 3653 error = 0; 3654 m = adapter->fmp; 3655 if (m->m_len <= (MCLBYTES - ETHER_HDR_LEN)) { 3656 bcopy(m->m_data, m->m_data + ETHER_HDR_LEN, m->m_len); 3657 m->m_data += ETHER_HDR_LEN; 3658 } else { 3659 MGETHDR(n, M_NOWAIT, MT_DATA); 3660 if (n != NULL) { 3661 bcopy(m->m_data, n->m_data, ETHER_HDR_LEN); 3662 m->m_data += ETHER_HDR_LEN; 3663 m->m_len -= ETHER_HDR_LEN; 3664 n->m_len = ETHER_HDR_LEN; 3665 M_MOVE_PKTHDR(n, m); 3666 n->m_next = m; 3667 adapter->fmp = n; 3668 } else { 3669 adapter->dropped_pkts++; 3670 m_freem(adapter->fmp); 3671 adapter->fmp = NULL; 3672 error = ENOMEM; 3673 } 3674 } 3675 3676 return (error); 3677} 3678#endif 3679 3680/********************************************************************* 3681 * 3682 * Verify that the hardware indicated that the checksum is valid. 3683 * Inform the stack about the status of checksum so that stack 3684 * doesn't spend time verifying the checksum. 3685 * 3686 *********************************************************************/ 3687static void 3688lem_receive_checksum(struct adapter *adapter, 3689 struct e1000_rx_desc *rx_desc, struct mbuf *mp) 3690{ 3691 /* 82543 or newer only */ 3692 if ((adapter->hw.mac.type < e1000_82543) || 3693 /* Ignore Checksum bit is set */ 3694 (rx_desc->status & E1000_RXD_STAT_IXSM)) { 3695 mp->m_pkthdr.csum_flags = 0; 3696 return; 3697 } 3698 3699 if (rx_desc->status & E1000_RXD_STAT_IPCS) { 3700 /* Did it pass? */ 3701 if (!(rx_desc->errors & E1000_RXD_ERR_IPE)) { 3702 /* IP Checksum Good */ 3703 mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED; 3704 mp->m_pkthdr.csum_flags |= CSUM_IP_VALID; 3705 3706 } else { 3707 mp->m_pkthdr.csum_flags = 0; 3708 } 3709 } 3710 3711 if (rx_desc->status & E1000_RXD_STAT_TCPCS) { 3712 /* Did it pass? */ 3713 if (!(rx_desc->errors & E1000_RXD_ERR_TCPE)) { 3714 mp->m_pkthdr.csum_flags |= 3715 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR); 3716 mp->m_pkthdr.csum_data = htons(0xffff); 3717 } 3718 } 3719} 3720 3721/* 3722 * This routine is run via an vlan 3723 * config EVENT 3724 */ 3725static void 3726lem_register_vlan(void *arg, struct ifnet *ifp, u16 vtag) 3727{ 3728 struct adapter *adapter = ifp->if_softc; 3729 u32 index, bit; 3730 3731 if (ifp->if_softc != arg) /* Not our event */ 3732 return; 3733 3734 if ((vtag == 0) || (vtag > 4095)) /* Invalid ID */ 3735 return; 3736 3737 EM_CORE_LOCK(adapter); 3738 index = (vtag >> 5) & 0x7F; 3739 bit = vtag & 0x1F; 3740 adapter->shadow_vfta[index] |= (1 << bit); 3741 ++adapter->num_vlans; 3742 /* Re-init to load the changes */ 3743 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) 3744 lem_init_locked(adapter); 3745 EM_CORE_UNLOCK(adapter); 3746} 3747 3748/* 3749 * This routine is run via an vlan 3750 * unconfig EVENT 3751 */ 3752static void 3753lem_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag) 3754{ 3755 struct adapter *adapter = ifp->if_softc; 3756 u32 index, bit; 3757 3758 if (ifp->if_softc != arg) 3759 return; 3760 3761 if ((vtag == 0) || (vtag > 4095)) /* Invalid */ 3762 return; 3763 3764 EM_CORE_LOCK(adapter); 3765 index = (vtag >> 5) & 0x7F; 3766 bit = vtag & 0x1F; 3767 adapter->shadow_vfta[index] &= ~(1 << bit); 3768 --adapter->num_vlans; 3769 /* Re-init to load the changes */ 3770 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) 3771 lem_init_locked(adapter); 3772 EM_CORE_UNLOCK(adapter); 3773} 3774 3775static void 3776lem_setup_vlan_hw_support(struct adapter *adapter) 3777{ 3778 struct e1000_hw *hw = &adapter->hw; 3779 u32 reg; 3780 3781 /* 3782 ** We get here thru init_locked, meaning 3783 ** a soft reset, this has already cleared 3784 ** the VFTA and other state, so if there 3785 ** have been no vlan's registered do nothing. 3786 */ 3787 if (adapter->num_vlans == 0) 3788 return; 3789 3790 /* 3791 ** A soft reset zero's out the VFTA, so 3792 ** we need to repopulate it now. 3793 */ 3794 for (int i = 0; i < EM_VFTA_SIZE; i++) 3795 if (adapter->shadow_vfta[i] != 0) 3796 E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, 3797 i, adapter->shadow_vfta[i]); 3798 3799 reg = E1000_READ_REG(hw, E1000_CTRL); 3800 reg |= E1000_CTRL_VME; 3801 E1000_WRITE_REG(hw, E1000_CTRL, reg); 3802 3803 /* Enable the Filter Table */ 3804 reg = E1000_READ_REG(hw, E1000_RCTL); 3805 reg &= ~E1000_RCTL_CFIEN; 3806 reg |= E1000_RCTL_VFE; 3807 E1000_WRITE_REG(hw, E1000_RCTL, reg); 3808} 3809 3810static void 3811lem_enable_intr(struct adapter *adapter) 3812{ 3813 struct e1000_hw *hw = &adapter->hw; 3814 u32 ims_mask = IMS_ENABLE_MASK; 3815 3816 E1000_WRITE_REG(hw, E1000_IMS, ims_mask); 3817} 3818 3819static void 3820lem_disable_intr(struct adapter *adapter) 3821{ 3822 struct e1000_hw *hw = &adapter->hw; 3823 3824 E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff); 3825} 3826 3827/* 3828 * Bit of a misnomer, what this really means is 3829 * to enable OS management of the system... aka 3830 * to disable special hardware management features 3831 */ 3832static void 3833lem_init_manageability(struct adapter *adapter) 3834{ 3835 /* A shared code workaround */ 3836 if (adapter->has_manage) { 3837 int manc = E1000_READ_REG(&adapter->hw, E1000_MANC); 3838 /* disable hardware interception of ARP */ 3839 manc &= ~(E1000_MANC_ARP_EN); 3840 E1000_WRITE_REG(&adapter->hw, E1000_MANC, manc); 3841 } 3842} 3843 3844/* 3845 * Give control back to hardware management 3846 * controller if there is one. 3847 */ 3848static void 3849lem_release_manageability(struct adapter *adapter) 3850{ 3851 if (adapter->has_manage) { 3852 int manc = E1000_READ_REG(&adapter->hw, E1000_MANC); 3853 3854 /* re-enable hardware interception of ARP */ 3855 manc |= E1000_MANC_ARP_EN; 3856 E1000_WRITE_REG(&adapter->hw, E1000_MANC, manc); 3857 } 3858} 3859 3860/* 3861 * lem_get_hw_control sets the {CTRL_EXT|FWSM}:DRV_LOAD bit. 3862 * For ASF and Pass Through versions of f/w this means 3863 * that the driver is loaded. For AMT version type f/w 3864 * this means that the network i/f is open. 3865 */ 3866static void 3867lem_get_hw_control(struct adapter *adapter) 3868{ 3869 u32 ctrl_ext; 3870 3871 ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT); 3872 E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT, 3873 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); 3874 return; 3875} 3876 3877/* 3878 * lem_release_hw_control resets {CTRL_EXT|FWSM}:DRV_LOAD bit. 3879 * For ASF and Pass Through versions of f/w this means that 3880 * the driver is no longer loaded. For AMT versions of the 3881 * f/w this means that the network i/f is closed. 3882 */ 3883static void 3884lem_release_hw_control(struct adapter *adapter) 3885{ 3886 u32 ctrl_ext; 3887 3888 if (!adapter->has_manage) 3889 return; 3890 3891 ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT); 3892 E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT, 3893 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD); 3894 return; 3895} 3896 3897static int 3898lem_is_valid_ether_addr(u8 *addr) 3899{ 3900 char zero_addr[6] = { 0, 0, 0, 0, 0, 0 }; 3901 3902 if ((addr[0] & 1) || (!bcmp(addr, zero_addr, ETHER_ADDR_LEN))) { 3903 return (FALSE); 3904 } 3905 3906 return (TRUE); 3907} 3908 3909/* 3910** Parse the interface capabilities with regard 3911** to both system management and wake-on-lan for 3912** later use. 3913*/ 3914static void 3915lem_get_wakeup(device_t dev) 3916{ 3917 struct adapter *adapter = device_get_softc(dev); 3918 u16 eeprom_data = 0, device_id, apme_mask; 3919 3920 adapter->has_manage = e1000_enable_mng_pass_thru(&adapter->hw); 3921 apme_mask = EM_EEPROM_APME; 3922 3923 switch (adapter->hw.mac.type) { 3924 case e1000_82542: 3925 case e1000_82543: 3926 break; 3927 case e1000_82544: 3928 e1000_read_nvm(&adapter->hw, 3929 NVM_INIT_CONTROL2_REG, 1, &eeprom_data); 3930 apme_mask = EM_82544_APME; 3931 break; 3932 case e1000_82546: 3933 case e1000_82546_rev_3: 3934 if (adapter->hw.bus.func == 1) { 3935 e1000_read_nvm(&adapter->hw, 3936 NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data); 3937 break; 3938 } else 3939 e1000_read_nvm(&adapter->hw, 3940 NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data); 3941 break; 3942 default: 3943 e1000_read_nvm(&adapter->hw, 3944 NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data); 3945 break; 3946 } 3947 if (eeprom_data & apme_mask) 3948 adapter->wol = (E1000_WUFC_MAG | E1000_WUFC_MC); 3949 /* 3950 * We have the eeprom settings, now apply the special cases 3951 * where the eeprom may be wrong or the board won't support 3952 * wake on lan on a particular port 3953 */ 3954 device_id = pci_get_device(dev); 3955 switch (device_id) { 3956 case E1000_DEV_ID_82546GB_PCIE: 3957 adapter->wol = 0; 3958 break; 3959 case E1000_DEV_ID_82546EB_FIBER: 3960 case E1000_DEV_ID_82546GB_FIBER: 3961 /* Wake events only supported on port A for dual fiber 3962 * regardless of eeprom setting */ 3963 if (E1000_READ_REG(&adapter->hw, E1000_STATUS) & 3964 E1000_STATUS_FUNC_1) 3965 adapter->wol = 0; 3966 break; 3967 case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3: 3968 /* if quad port adapter, disable WoL on all but port A */ 3969 if (global_quad_port_a != 0) 3970 adapter->wol = 0; 3971 /* Reset for multiple quad port adapters */ 3972 if (++global_quad_port_a == 4) 3973 global_quad_port_a = 0; 3974 break; 3975 } 3976 return; 3977} 3978 3979 3980/* 3981 * Enable PCI Wake On Lan capability 3982 */ 3983static void 3984lem_enable_wakeup(device_t dev) 3985{ 3986 struct adapter *adapter = device_get_softc(dev); 3987 struct ifnet *ifp = adapter->ifp; 3988 u32 pmc, ctrl, ctrl_ext, rctl; 3989 u16 status; 3990 3991 if ((pci_find_cap(dev, PCIY_PMG, &pmc) != 0)) 3992 return; 3993 3994 /* Advertise the wakeup capability */ 3995 ctrl = E1000_READ_REG(&adapter->hw, E1000_CTRL); 3996 ctrl |= (E1000_CTRL_SWDPIN2 | E1000_CTRL_SWDPIN3); 3997 E1000_WRITE_REG(&adapter->hw, E1000_CTRL, ctrl); 3998 E1000_WRITE_REG(&adapter->hw, E1000_WUC, E1000_WUC_PME_EN); 3999 4000 /* Keep the laser running on Fiber adapters */ 4001 if (adapter->hw.phy.media_type == e1000_media_type_fiber || 4002 adapter->hw.phy.media_type == e1000_media_type_internal_serdes) { 4003 ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT); 4004 ctrl_ext |= E1000_CTRL_EXT_SDP3_DATA; 4005 E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT, ctrl_ext); 4006 } 4007 4008 /* 4009 ** Determine type of Wakeup: note that wol 4010 ** is set with all bits on by default. 4011 */ 4012 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) == 0) 4013 adapter->wol &= ~E1000_WUFC_MAG; 4014 4015 if ((ifp->if_capenable & IFCAP_WOL_MCAST) == 0) 4016 adapter->wol &= ~E1000_WUFC_MC; 4017 else { 4018 rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL); 4019 rctl |= E1000_RCTL_MPE; 4020 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl); 4021 } 4022 4023 if (adapter->hw.mac.type == e1000_pchlan) { 4024 if (lem_enable_phy_wakeup(adapter)) 4025 return; 4026 } else { 4027 E1000_WRITE_REG(&adapter->hw, E1000_WUC, E1000_WUC_PME_EN); 4028 E1000_WRITE_REG(&adapter->hw, E1000_WUFC, adapter->wol); 4029 } 4030 4031 4032 /* Request PME */ 4033 status = pci_read_config(dev, pmc + PCIR_POWER_STATUS, 2); 4034 status &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE); 4035 if (ifp->if_capenable & IFCAP_WOL) 4036 status |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE; 4037 pci_write_config(dev, pmc + PCIR_POWER_STATUS, status, 2); 4038 4039 return; 4040} 4041 4042/* 4043** WOL in the newer chipset interfaces (pchlan) 4044** require thing to be copied into the phy 4045*/ 4046static int 4047lem_enable_phy_wakeup(struct adapter *adapter) 4048{ 4049 struct e1000_hw *hw = &adapter->hw; 4050 u32 mreg, ret = 0; 4051 u16 preg; 4052 4053 /* copy MAC RARs to PHY RARs */ 4054 for (int i = 0; i < adapter->hw.mac.rar_entry_count; i++) { 4055 mreg = E1000_READ_REG(hw, E1000_RAL(i)); 4056 e1000_write_phy_reg(hw, BM_RAR_L(i), (u16)(mreg & 0xFFFF)); 4057 e1000_write_phy_reg(hw, BM_RAR_M(i), 4058 (u16)((mreg >> 16) & 0xFFFF)); 4059 mreg = E1000_READ_REG(hw, E1000_RAH(i)); 4060 e1000_write_phy_reg(hw, BM_RAR_H(i), (u16)(mreg & 0xFFFF)); 4061 e1000_write_phy_reg(hw, BM_RAR_CTRL(i), 4062 (u16)((mreg >> 16) & 0xFFFF)); 4063 } 4064 4065 /* copy MAC MTA to PHY MTA */ 4066 for (int i = 0; i < adapter->hw.mac.mta_reg_count; i++) { 4067 mreg = E1000_READ_REG_ARRAY(hw, E1000_MTA, i); 4068 e1000_write_phy_reg(hw, BM_MTA(i), (u16)(mreg & 0xFFFF)); 4069 e1000_write_phy_reg(hw, BM_MTA(i) + 1, 4070 (u16)((mreg >> 16) & 0xFFFF)); 4071 } 4072 4073 /* configure PHY Rx Control register */ 4074 e1000_read_phy_reg(&adapter->hw, BM_RCTL, &preg); 4075 mreg = E1000_READ_REG(hw, E1000_RCTL); 4076 if (mreg & E1000_RCTL_UPE) 4077 preg |= BM_RCTL_UPE; 4078 if (mreg & E1000_RCTL_MPE) 4079 preg |= BM_RCTL_MPE; 4080 preg &= ~(BM_RCTL_MO_MASK); 4081 if (mreg & E1000_RCTL_MO_3) 4082 preg |= (((mreg & E1000_RCTL_MO_3) >> E1000_RCTL_MO_SHIFT) 4083 << BM_RCTL_MO_SHIFT); 4084 if (mreg & E1000_RCTL_BAM) 4085 preg |= BM_RCTL_BAM; 4086 if (mreg & E1000_RCTL_PMCF) 4087 preg |= BM_RCTL_PMCF; 4088 mreg = E1000_READ_REG(hw, E1000_CTRL); 4089 if (mreg & E1000_CTRL_RFCE) 4090 preg |= BM_RCTL_RFCE; 4091 e1000_write_phy_reg(&adapter->hw, BM_RCTL, preg); 4092 4093 /* enable PHY wakeup in MAC register */ 4094 E1000_WRITE_REG(hw, E1000_WUC, 4095 E1000_WUC_PHY_WAKE | E1000_WUC_PME_EN); 4096 E1000_WRITE_REG(hw, E1000_WUFC, adapter->wol); 4097 4098 /* configure and enable PHY wakeup in PHY registers */ 4099 e1000_write_phy_reg(&adapter->hw, BM_WUFC, adapter->wol); 4100 e1000_write_phy_reg(&adapter->hw, BM_WUC, E1000_WUC_PME_EN); 4101 4102 /* activate PHY wakeup */ 4103 ret = hw->phy.ops.acquire(hw); 4104 if (ret) { 4105 printf("Could not acquire PHY\n"); 4106 return ret; 4107 } 4108 e1000_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 4109 (BM_WUC_ENABLE_PAGE << IGP_PAGE_SHIFT)); 4110 ret = e1000_read_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, &preg); 4111 if (ret) { 4112 printf("Could not read PHY page 769\n"); 4113 goto out; 4114 } 4115 preg |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT; 4116 ret = e1000_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, preg); 4117 if (ret) 4118 printf("Could not set PHY Host Wakeup bit\n"); 4119out: 4120 hw->phy.ops.release(hw); 4121 4122 return ret; 4123} 4124 4125static void 4126lem_led_func(void *arg, int onoff) 4127{ 4128 struct adapter *adapter = arg; 4129 4130 EM_CORE_LOCK(adapter); 4131 if (onoff) { 4132 e1000_setup_led(&adapter->hw); 4133 e1000_led_on(&adapter->hw); 4134 } else { 4135 e1000_led_off(&adapter->hw); 4136 e1000_cleanup_led(&adapter->hw); 4137 } 4138 EM_CORE_UNLOCK(adapter); 4139} 4140 4141/********************************************************************* 4142* 82544 Coexistence issue workaround. 4143* There are 2 issues. 4144* 1. Transmit Hang issue. 4145* To detect this issue, following equation can be used... 4146* SIZE[3:0] + ADDR[2:0] = SUM[3:0]. 4147* If SUM[3:0] is in between 1 to 4, we will have this issue. 4148* 4149* 2. DAC issue. 4150* To detect this issue, following equation can be used... 4151* SIZE[3:0] + ADDR[2:0] = SUM[3:0]. 4152* If SUM[3:0] is in between 9 to c, we will have this issue. 4153* 4154* 4155* WORKAROUND: 4156* Make sure we do not have ending address 4157* as 1,2,3,4(Hang) or 9,a,b,c (DAC) 4158* 4159*************************************************************************/ 4160static u32 4161lem_fill_descriptors (bus_addr_t address, u32 length, 4162 PDESC_ARRAY desc_array) 4163{ 4164 u32 safe_terminator; 4165 4166 /* Since issue is sensitive to length and address.*/ 4167 /* Let us first check the address...*/ 4168 if (length <= 4) { 4169 desc_array->descriptor[0].address = address; 4170 desc_array->descriptor[0].length = length; 4171 desc_array->elements = 1; 4172 return (desc_array->elements); 4173 } 4174 safe_terminator = (u32)((((u32)address & 0x7) + 4175 (length & 0xF)) & 0xF); 4176 /* if it does not fall between 0x1 to 0x4 and 0x9 to 0xC then return */ 4177 if (safe_terminator == 0 || 4178 (safe_terminator > 4 && 4179 safe_terminator < 9) || 4180 (safe_terminator > 0xC && 4181 safe_terminator <= 0xF)) { 4182 desc_array->descriptor[0].address = address; 4183 desc_array->descriptor[0].length = length; 4184 desc_array->elements = 1; 4185 return (desc_array->elements); 4186 } 4187 4188 desc_array->descriptor[0].address = address; 4189 desc_array->descriptor[0].length = length - 4; 4190 desc_array->descriptor[1].address = address + (length - 4); 4191 desc_array->descriptor[1].length = 4; 4192 desc_array->elements = 2; 4193 return (desc_array->elements); 4194} 4195 4196/********************************************************************** 4197 * 4198 * Update the board statistics counters. 4199 * 4200 **********************************************************************/ 4201static void 4202lem_update_stats_counters(struct adapter *adapter) 4203{ 4204 struct ifnet *ifp; 4205 4206 if(adapter->hw.phy.media_type == e1000_media_type_copper || 4207 (E1000_READ_REG(&adapter->hw, E1000_STATUS) & E1000_STATUS_LU)) { 4208 adapter->stats.symerrs += E1000_READ_REG(&adapter->hw, E1000_SYMERRS); 4209 adapter->stats.sec += E1000_READ_REG(&adapter->hw, E1000_SEC); 4210 } 4211 adapter->stats.crcerrs += E1000_READ_REG(&adapter->hw, E1000_CRCERRS); 4212 adapter->stats.mpc += E1000_READ_REG(&adapter->hw, E1000_MPC); 4213 adapter->stats.scc += E1000_READ_REG(&adapter->hw, E1000_SCC); 4214 adapter->stats.ecol += E1000_READ_REG(&adapter->hw, E1000_ECOL); 4215 4216 adapter->stats.mcc += E1000_READ_REG(&adapter->hw, E1000_MCC); 4217 adapter->stats.latecol += E1000_READ_REG(&adapter->hw, E1000_LATECOL); 4218 adapter->stats.colc += E1000_READ_REG(&adapter->hw, E1000_COLC); 4219 adapter->stats.dc += E1000_READ_REG(&adapter->hw, E1000_DC); 4220 adapter->stats.rlec += E1000_READ_REG(&adapter->hw, E1000_RLEC); 4221 adapter->stats.xonrxc += E1000_READ_REG(&adapter->hw, E1000_XONRXC); 4222 adapter->stats.xontxc += E1000_READ_REG(&adapter->hw, E1000_XONTXC); 4223 adapter->stats.xoffrxc += E1000_READ_REG(&adapter->hw, E1000_XOFFRXC); 4224 adapter->stats.xofftxc += E1000_READ_REG(&adapter->hw, E1000_XOFFTXC); 4225 adapter->stats.fcruc += E1000_READ_REG(&adapter->hw, E1000_FCRUC); 4226 adapter->stats.prc64 += E1000_READ_REG(&adapter->hw, E1000_PRC64); 4227 adapter->stats.prc127 += E1000_READ_REG(&adapter->hw, E1000_PRC127); 4228 adapter->stats.prc255 += E1000_READ_REG(&adapter->hw, E1000_PRC255); 4229 adapter->stats.prc511 += E1000_READ_REG(&adapter->hw, E1000_PRC511); 4230 adapter->stats.prc1023 += E1000_READ_REG(&adapter->hw, E1000_PRC1023); 4231 adapter->stats.prc1522 += E1000_READ_REG(&adapter->hw, E1000_PRC1522); 4232 adapter->stats.gprc += E1000_READ_REG(&adapter->hw, E1000_GPRC); 4233 adapter->stats.bprc += E1000_READ_REG(&adapter->hw, E1000_BPRC); 4234 adapter->stats.mprc += E1000_READ_REG(&adapter->hw, E1000_MPRC); 4235 adapter->stats.gptc += E1000_READ_REG(&adapter->hw, E1000_GPTC); 4236 4237 /* For the 64-bit byte counters the low dword must be read first. */ 4238 /* Both registers clear on the read of the high dword */ 4239 4240 adapter->stats.gorc += E1000_READ_REG(&adapter->hw, E1000_GORCL) + 4241 ((u64)E1000_READ_REG(&adapter->hw, E1000_GORCH) << 32); 4242 adapter->stats.gotc += E1000_READ_REG(&adapter->hw, E1000_GOTCL) + 4243 ((u64)E1000_READ_REG(&adapter->hw, E1000_GOTCH) << 32); 4244 4245 adapter->stats.rnbc += E1000_READ_REG(&adapter->hw, E1000_RNBC); 4246 adapter->stats.ruc += E1000_READ_REG(&adapter->hw, E1000_RUC); 4247 adapter->stats.rfc += E1000_READ_REG(&adapter->hw, E1000_RFC); 4248 adapter->stats.roc += E1000_READ_REG(&adapter->hw, E1000_ROC); 4249 adapter->stats.rjc += E1000_READ_REG(&adapter->hw, E1000_RJC); 4250 4251 adapter->stats.tor += E1000_READ_REG(&adapter->hw, E1000_TORH); 4252 adapter->stats.tot += E1000_READ_REG(&adapter->hw, E1000_TOTH); 4253 4254 adapter->stats.tpr += E1000_READ_REG(&adapter->hw, E1000_TPR); 4255 adapter->stats.tpt += E1000_READ_REG(&adapter->hw, E1000_TPT); 4256 adapter->stats.ptc64 += E1000_READ_REG(&adapter->hw, E1000_PTC64); 4257 adapter->stats.ptc127 += E1000_READ_REG(&adapter->hw, E1000_PTC127); 4258 adapter->stats.ptc255 += E1000_READ_REG(&adapter->hw, E1000_PTC255); 4259 adapter->stats.ptc511 += E1000_READ_REG(&adapter->hw, E1000_PTC511); 4260 adapter->stats.ptc1023 += E1000_READ_REG(&adapter->hw, E1000_PTC1023); 4261 adapter->stats.ptc1522 += E1000_READ_REG(&adapter->hw, E1000_PTC1522); 4262 adapter->stats.mptc += E1000_READ_REG(&adapter->hw, E1000_MPTC); 4263 adapter->stats.bptc += E1000_READ_REG(&adapter->hw, E1000_BPTC); 4264 4265 if (adapter->hw.mac.type >= e1000_82543) { 4266 adapter->stats.algnerrc += 4267 E1000_READ_REG(&adapter->hw, E1000_ALGNERRC); 4268 adapter->stats.rxerrc += 4269 E1000_READ_REG(&adapter->hw, E1000_RXERRC); 4270 adapter->stats.tncrs += 4271 E1000_READ_REG(&adapter->hw, E1000_TNCRS); 4272 adapter->stats.cexterr += 4273 E1000_READ_REG(&adapter->hw, E1000_CEXTERR); 4274 adapter->stats.tsctc += 4275 E1000_READ_REG(&adapter->hw, E1000_TSCTC); 4276 adapter->stats.tsctfc += 4277 E1000_READ_REG(&adapter->hw, E1000_TSCTFC); 4278 } 4279 ifp = adapter->ifp; 4280 4281 ifp->if_collisions = adapter->stats.colc; 4282 4283 /* Rx Errors */ 4284 ifp->if_ierrors = adapter->dropped_pkts + adapter->stats.rxerrc + 4285 adapter->stats.crcerrs + adapter->stats.algnerrc + 4286 adapter->stats.ruc + adapter->stats.roc + 4287 adapter->stats.mpc + adapter->stats.cexterr; 4288 4289 /* Tx Errors */ 4290 ifp->if_oerrors = adapter->stats.ecol + 4291 adapter->stats.latecol + adapter->watchdog_events; 4292} 4293 4294/* Export a single 32-bit register via a read-only sysctl. */ 4295static int 4296lem_sysctl_reg_handler(SYSCTL_HANDLER_ARGS) 4297{ 4298 struct adapter *adapter; 4299 u_int val; 4300 4301 adapter = oidp->oid_arg1; 4302 val = E1000_READ_REG(&adapter->hw, oidp->oid_arg2); 4303 return (sysctl_handle_int(oidp, &val, 0, req)); 4304} 4305 4306/* 4307 * Add sysctl variables, one per statistic, to the system. 4308 */ 4309static void 4310lem_add_hw_stats(struct adapter *adapter) 4311{ 4312 device_t dev = adapter->dev; 4313 4314 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev); 4315 struct sysctl_oid *tree = device_get_sysctl_tree(dev); 4316 struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree); 4317 struct e1000_hw_stats *stats = &adapter->stats; 4318 4319 struct sysctl_oid *stat_node; 4320 struct sysctl_oid_list *stat_list; 4321 4322 /* Driver Statistics */ 4323 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "mbuf_alloc_fail", 4324 CTLFLAG_RD, &adapter->mbuf_alloc_failed, 4325 "Std mbuf failed"); 4326 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "cluster_alloc_fail", 4327 CTLFLAG_RD, &adapter->mbuf_cluster_failed, 4328 "Std mbuf cluster failed"); 4329 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped", 4330 CTLFLAG_RD, &adapter->dropped_pkts, 4331 "Driver dropped packets"); 4332 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "tx_dma_fail", 4333 CTLFLAG_RD, &adapter->no_tx_dma_setup, 4334 "Driver tx dma failure in xmit"); 4335 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "tx_desc_fail1", 4336 CTLFLAG_RD, &adapter->no_tx_desc_avail1, 4337 "Not enough tx descriptors failure in xmit"); 4338 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "tx_desc_fail2", 4339 CTLFLAG_RD, &adapter->no_tx_desc_avail2, 4340 "Not enough tx descriptors failure in xmit"); 4341 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "rx_overruns", 4342 CTLFLAG_RD, &adapter->rx_overruns, 4343 "RX overruns"); 4344 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_timeouts", 4345 CTLFLAG_RD, &adapter->watchdog_events, 4346 "Watchdog timeouts"); 4347 4348 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "device_control", 4349 CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_CTRL, 4350 lem_sysctl_reg_handler, "IU", 4351 "Device Control Register"); 4352 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "rx_control", 4353 CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_RCTL, 4354 lem_sysctl_reg_handler, "IU", 4355 "Receiver Control Register"); 4356 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "fc_high_water", 4357 CTLFLAG_RD, &adapter->hw.fc.high_water, 0, 4358 "Flow Control High Watermark"); 4359 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "fc_low_water", 4360 CTLFLAG_RD, &adapter->hw.fc.low_water, 0, 4361 "Flow Control Low Watermark"); 4362 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "fifo_workaround", 4363 CTLFLAG_RD, &adapter->tx_fifo_wrk_cnt, 4364 "TX FIFO workaround events"); 4365 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "fifo_reset", 4366 CTLFLAG_RD, &adapter->tx_fifo_reset_cnt, 4367 "TX FIFO resets"); 4368 4369 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "txd_head", 4370 CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_TDH(0), 4371 lem_sysctl_reg_handler, "IU", 4372 "Transmit Descriptor Head"); 4373 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "txd_tail", 4374 CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_TDT(0), 4375 lem_sysctl_reg_handler, "IU", 4376 "Transmit Descriptor Tail"); 4377 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "rxd_head", 4378 CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_RDH(0), 4379 lem_sysctl_reg_handler, "IU", 4380 "Receive Descriptor Head"); 4381 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "rxd_tail", 4382 CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_RDT(0), 4383 lem_sysctl_reg_handler, "IU", 4384 "Receive Descriptor Tail"); 4385 4386 4387 /* MAC stats get their own sub node */ 4388 4389 stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats", 4390 CTLFLAG_RD, NULL, "Statistics"); 4391 stat_list = SYSCTL_CHILDREN(stat_node); 4392 4393 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "excess_coll", 4394 CTLFLAG_RD, &stats->ecol, 4395 "Excessive collisions"); 4396 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "single_coll", 4397 CTLFLAG_RD, &stats->scc, 4398 "Single collisions"); 4399 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "multiple_coll", 4400 CTLFLAG_RD, &stats->mcc, 4401 "Multiple collisions"); 4402 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "late_coll", 4403 CTLFLAG_RD, &stats->latecol, 4404 "Late collisions"); 4405 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "collision_count", 4406 CTLFLAG_RD, &stats->colc, 4407 "Collision Count"); 4408 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "symbol_errors", 4409 CTLFLAG_RD, &adapter->stats.symerrs, 4410 "Symbol Errors"); 4411 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "sequence_errors", 4412 CTLFLAG_RD, &adapter->stats.sec, 4413 "Sequence Errors"); 4414 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "defer_count", 4415 CTLFLAG_RD, &adapter->stats.dc, 4416 "Defer Count"); 4417 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "missed_packets", 4418 CTLFLAG_RD, &adapter->stats.mpc, 4419 "Missed Packets"); 4420 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_no_buff", 4421 CTLFLAG_RD, &adapter->stats.rnbc, 4422 "Receive No Buffers"); 4423 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_undersize", 4424 CTLFLAG_RD, &adapter->stats.ruc, 4425 "Receive Undersize"); 4426 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_fragmented", 4427 CTLFLAG_RD, &adapter->stats.rfc, 4428 "Fragmented Packets Received "); 4429 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_oversize", 4430 CTLFLAG_RD, &adapter->stats.roc, 4431 "Oversized Packets Received"); 4432 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_jabber", 4433 CTLFLAG_RD, &adapter->stats.rjc, 4434 "Recevied Jabber"); 4435 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_errs", 4436 CTLFLAG_RD, &adapter->stats.rxerrc, 4437 "Receive Errors"); 4438 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "crc_errs", 4439 CTLFLAG_RD, &adapter->stats.crcerrs, 4440 "CRC errors"); 4441 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "alignment_errs", 4442 CTLFLAG_RD, &adapter->stats.algnerrc, 4443 "Alignment Errors"); 4444 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "coll_ext_errs", 4445 CTLFLAG_RD, &adapter->stats.cexterr, 4446 "Collision/Carrier extension errors"); 4447 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_recvd", 4448 CTLFLAG_RD, &adapter->stats.xonrxc, 4449 "XON Received"); 4450 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_txd", 4451 CTLFLAG_RD, &adapter->stats.xontxc, 4452 "XON Transmitted"); 4453 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_recvd", 4454 CTLFLAG_RD, &adapter->stats.xoffrxc, 4455 "XOFF Received"); 4456 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_txd", 4457 CTLFLAG_RD, &adapter->stats.xofftxc, 4458 "XOFF Transmitted"); 4459 4460 /* Packet Reception Stats */ 4461 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_recvd", 4462 CTLFLAG_RD, &adapter->stats.tpr, 4463 "Total Packets Received "); 4464 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_recvd", 4465 CTLFLAG_RD, &adapter->stats.gprc, 4466 "Good Packets Received"); 4467 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_recvd", 4468 CTLFLAG_RD, &adapter->stats.bprc, 4469 "Broadcast Packets Received"); 4470 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_recvd", 4471 CTLFLAG_RD, &adapter->stats.mprc, 4472 "Multicast Packets Received"); 4473 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_64", 4474 CTLFLAG_RD, &adapter->stats.prc64, 4475 "64 byte frames received "); 4476 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127", 4477 CTLFLAG_RD, &adapter->stats.prc127, 4478 "65-127 byte frames received"); 4479 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255", 4480 CTLFLAG_RD, &adapter->stats.prc255, 4481 "128-255 byte frames received"); 4482 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511", 4483 CTLFLAG_RD, &adapter->stats.prc511, 4484 "256-511 byte frames received"); 4485 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023", 4486 CTLFLAG_RD, &adapter->stats.prc1023, 4487 "512-1023 byte frames received"); 4488 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522", 4489 CTLFLAG_RD, &adapter->stats.prc1522, 4490 "1023-1522 byte frames received"); 4491 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_recvd", 4492 CTLFLAG_RD, &adapter->stats.gorc, 4493 "Good Octets Received"); 4494 4495 /* Packet Transmission Stats */ 4496 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd", 4497 CTLFLAG_RD, &adapter->stats.gotc, 4498 "Good Octets Transmitted"); 4499 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd", 4500 CTLFLAG_RD, &adapter->stats.tpt, 4501 "Total Packets Transmitted"); 4502 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd", 4503 CTLFLAG_RD, &adapter->stats.gptc, 4504 "Good Packets Transmitted"); 4505 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd", 4506 CTLFLAG_RD, &adapter->stats.bptc, 4507 "Broadcast Packets Transmitted"); 4508 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd", 4509 CTLFLAG_RD, &adapter->stats.mptc, 4510 "Multicast Packets Transmitted"); 4511 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_64", 4512 CTLFLAG_RD, &adapter->stats.ptc64, 4513 "64 byte frames transmitted "); 4514 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127", 4515 CTLFLAG_RD, &adapter->stats.ptc127, 4516 "65-127 byte frames transmitted"); 4517 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255", 4518 CTLFLAG_RD, &adapter->stats.ptc255, 4519 "128-255 byte frames transmitted"); 4520 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511", 4521 CTLFLAG_RD, &adapter->stats.ptc511, 4522 "256-511 byte frames transmitted"); 4523 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023", 4524 CTLFLAG_RD, &adapter->stats.ptc1023, 4525 "512-1023 byte frames transmitted"); 4526 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522", 4527 CTLFLAG_RD, &adapter->stats.ptc1522, 4528 "1024-1522 byte frames transmitted"); 4529 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tso_txd", 4530 CTLFLAG_RD, &adapter->stats.tsctc, 4531 "TSO Contexts Transmitted"); 4532 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tso_ctx_fail", 4533 CTLFLAG_RD, &adapter->stats.tsctfc, 4534 "TSO Contexts Failed"); 4535} 4536 4537/********************************************************************** 4538 * 4539 * This routine provides a way to dump out the adapter eeprom, 4540 * often a useful debug/service tool. This only dumps the first 4541 * 32 words, stuff that matters is in that extent. 4542 * 4543 **********************************************************************/ 4544 4545static int 4546lem_sysctl_nvm_info(SYSCTL_HANDLER_ARGS) 4547{ 4548 struct adapter *adapter; 4549 int error; 4550 int result; 4551 4552 result = -1; 4553 error = sysctl_handle_int(oidp, &result, 0, req); 4554 4555 if (error || !req->newptr) 4556 return (error); 4557 4558 /* 4559 * This value will cause a hex dump of the 4560 * first 32 16-bit words of the EEPROM to 4561 * the screen. 4562 */ 4563 if (result == 1) { 4564 adapter = (struct adapter *)arg1; 4565 lem_print_nvm_info(adapter); 4566 } 4567 4568 return (error); 4569} 4570 4571static void 4572lem_print_nvm_info(struct adapter *adapter) 4573{ 4574 u16 eeprom_data; 4575 int i, j, row = 0; 4576 4577 /* Its a bit crude, but it gets the job done */ 4578 printf("\nInterface EEPROM Dump:\n"); 4579 printf("Offset\n0x0000 "); 4580 for (i = 0, j = 0; i < 32; i++, j++) { 4581 if (j == 8) { /* Make the offset block */ 4582 j = 0; ++row; 4583 printf("\n0x00%x0 ",row); 4584 } 4585 e1000_read_nvm(&adapter->hw, i, 1, &eeprom_data); 4586 printf("%04x ", eeprom_data); 4587 } 4588 printf("\n"); 4589} 4590 4591static int 4592lem_sysctl_int_delay(SYSCTL_HANDLER_ARGS) 4593{ 4594 struct em_int_delay_info *info; 4595 struct adapter *adapter; 4596 u32 regval; 4597 int error; 4598 int usecs; 4599 int ticks; 4600 4601 info = (struct em_int_delay_info *)arg1; 4602 usecs = info->value; 4603 error = sysctl_handle_int(oidp, &usecs, 0, req); 4604 if (error != 0 || req->newptr == NULL) 4605 return (error); 4606 if (usecs < 0 || usecs > EM_TICKS_TO_USECS(65535)) 4607 return (EINVAL); 4608 info->value = usecs; 4609 ticks = EM_USECS_TO_TICKS(usecs); 4610 if (info->offset == E1000_ITR) /* units are 256ns here */ 4611 ticks *= 4; 4612 4613 adapter = info->adapter; 4614 4615 EM_CORE_LOCK(adapter); 4616 regval = E1000_READ_OFFSET(&adapter->hw, info->offset); 4617 regval = (regval & ~0xffff) | (ticks & 0xffff); 4618 /* Handle a few special cases. */ 4619 switch (info->offset) { 4620 case E1000_RDTR: 4621 break; 4622 case E1000_TIDV: 4623 if (ticks == 0) { 4624 adapter->txd_cmd &= ~E1000_TXD_CMD_IDE; 4625 /* Don't write 0 into the TIDV register. */ 4626 regval++; 4627 } else 4628 adapter->txd_cmd |= E1000_TXD_CMD_IDE; 4629 break; 4630 } 4631 E1000_WRITE_OFFSET(&adapter->hw, info->offset, regval); 4632 EM_CORE_UNLOCK(adapter); 4633 return (0); 4634} 4635 4636static void 4637lem_add_int_delay_sysctl(struct adapter *adapter, const char *name, 4638 const char *description, struct em_int_delay_info *info, 4639 int offset, int value) 4640{ 4641 info->adapter = adapter; 4642 info->offset = offset; 4643 info->value = value; 4644 SYSCTL_ADD_PROC(device_get_sysctl_ctx(adapter->dev), 4645 SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)), 4646 OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW, 4647 info, 0, lem_sysctl_int_delay, "I", description); 4648} 4649 4650static void 4651lem_set_flow_cntrl(struct adapter *adapter, const char *name, 4652 const char *description, int *limit, int value) 4653{ 4654 *limit = value; 4655 SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev), 4656 SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)), 4657 OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW, limit, value, description); 4658} 4659 4660static void 4661lem_add_rx_process_limit(struct adapter *adapter, const char *name, 4662 const char *description, int *limit, int value) 4663{ 4664 *limit = value; 4665 SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev), 4666 SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)), 4667 OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW, limit, value, description); 4668}
| 2124 2125 /* Save off the information about this board */ 2126 adapter->hw.vendor_id = pci_get_vendor(dev); 2127 adapter->hw.device_id = pci_get_device(dev); 2128 adapter->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1); 2129 adapter->hw.subsystem_vendor_id = 2130 pci_read_config(dev, PCIR_SUBVEND_0, 2); 2131 adapter->hw.subsystem_device_id = 2132 pci_read_config(dev, PCIR_SUBDEV_0, 2); 2133 2134 /* Do Shared Code Init and Setup */ 2135 if (e1000_set_mac_type(&adapter->hw)) { 2136 device_printf(dev, "Setup init failure\n"); 2137 return; 2138 } 2139} 2140 2141static int 2142lem_allocate_pci_resources(struct adapter *adapter) 2143{ 2144 device_t dev = adapter->dev; 2145 int val, rid, error = E1000_SUCCESS; 2146 2147 rid = PCIR_BAR(0); 2148 adapter->memory = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 2149 &rid, RF_ACTIVE); 2150 if (adapter->memory == NULL) { 2151 device_printf(dev, "Unable to allocate bus resource: memory\n"); 2152 return (ENXIO); 2153 } 2154 adapter->osdep.mem_bus_space_tag = 2155 rman_get_bustag(adapter->memory); 2156 adapter->osdep.mem_bus_space_handle = 2157 rman_get_bushandle(adapter->memory); 2158 adapter->hw.hw_addr = (u8 *)&adapter->osdep.mem_bus_space_handle; 2159 2160 /* Only older adapters use IO mapping */ 2161 if (adapter->hw.mac.type > e1000_82543) { 2162 /* Figure our where our IO BAR is ? */ 2163 for (rid = PCIR_BAR(0); rid < PCIR_CIS;) { 2164 val = pci_read_config(dev, rid, 4); 2165 if (EM_BAR_TYPE(val) == EM_BAR_TYPE_IO) { 2166 adapter->io_rid = rid; 2167 break; 2168 } 2169 rid += 4; 2170 /* check for 64bit BAR */ 2171 if (EM_BAR_MEM_TYPE(val) == EM_BAR_MEM_TYPE_64BIT) 2172 rid += 4; 2173 } 2174 if (rid >= PCIR_CIS) { 2175 device_printf(dev, "Unable to locate IO BAR\n"); 2176 return (ENXIO); 2177 } 2178 adapter->ioport = bus_alloc_resource_any(dev, 2179 SYS_RES_IOPORT, &adapter->io_rid, RF_ACTIVE); 2180 if (adapter->ioport == NULL) { 2181 device_printf(dev, "Unable to allocate bus resource: " 2182 "ioport\n"); 2183 return (ENXIO); 2184 } 2185 adapter->hw.io_base = 0; 2186 adapter->osdep.io_bus_space_tag = 2187 rman_get_bustag(adapter->ioport); 2188 adapter->osdep.io_bus_space_handle = 2189 rman_get_bushandle(adapter->ioport); 2190 } 2191 2192 adapter->hw.back = &adapter->osdep; 2193 2194 return (error); 2195} 2196 2197/********************************************************************* 2198 * 2199 * Setup the Legacy or MSI Interrupt handler 2200 * 2201 **********************************************************************/ 2202int 2203lem_allocate_irq(struct adapter *adapter) 2204{ 2205 device_t dev = adapter->dev; 2206 int error, rid = 0; 2207 2208 /* Manually turn off all interrupts */ 2209 E1000_WRITE_REG(&adapter->hw, E1000_IMC, 0xffffffff); 2210 2211 /* We allocate a single interrupt resource */ 2212 adapter->res[0] = bus_alloc_resource_any(dev, 2213 SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); 2214 if (adapter->res[0] == NULL) { 2215 device_printf(dev, "Unable to allocate bus resource: " 2216 "interrupt\n"); 2217 return (ENXIO); 2218 } 2219 2220 /* Do Legacy setup? */ 2221 if (lem_use_legacy_irq) { 2222 if ((error = bus_setup_intr(dev, adapter->res[0], 2223 INTR_TYPE_NET | INTR_MPSAFE, NULL, lem_intr, adapter, 2224 &adapter->tag[0])) != 0) { 2225 device_printf(dev, 2226 "Failed to register interrupt handler"); 2227 return (error); 2228 } 2229 return (0); 2230 } 2231 2232 /* 2233 * Use a Fast interrupt and the associated 2234 * deferred processing contexts. 2235 */ 2236 TASK_INIT(&adapter->rxtx_task, 0, lem_handle_rxtx, adapter); 2237 TASK_INIT(&adapter->link_task, 0, lem_handle_link, adapter); 2238 adapter->tq = taskqueue_create_fast("lem_taskq", M_NOWAIT, 2239 taskqueue_thread_enqueue, &adapter->tq); 2240 taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s taskq", 2241 device_get_nameunit(adapter->dev)); 2242 if ((error = bus_setup_intr(dev, adapter->res[0], 2243 INTR_TYPE_NET, lem_irq_fast, NULL, adapter, 2244 &adapter->tag[0])) != 0) { 2245 device_printf(dev, "Failed to register fast interrupt " 2246 "handler: %d\n", error); 2247 taskqueue_free(adapter->tq); 2248 adapter->tq = NULL; 2249 return (error); 2250 } 2251 2252 return (0); 2253} 2254 2255 2256static void 2257lem_free_pci_resources(struct adapter *adapter) 2258{ 2259 device_t dev = adapter->dev; 2260 2261 2262 if (adapter->tag[0] != NULL) { 2263 bus_teardown_intr(dev, adapter->res[0], 2264 adapter->tag[0]); 2265 adapter->tag[0] = NULL; 2266 } 2267 2268 if (adapter->res[0] != NULL) { 2269 bus_release_resource(dev, SYS_RES_IRQ, 2270 0, adapter->res[0]); 2271 } 2272 2273 if (adapter->memory != NULL) 2274 bus_release_resource(dev, SYS_RES_MEMORY, 2275 PCIR_BAR(0), adapter->memory); 2276 2277 if (adapter->ioport != NULL) 2278 bus_release_resource(dev, SYS_RES_IOPORT, 2279 adapter->io_rid, adapter->ioport); 2280} 2281 2282 2283/********************************************************************* 2284 * 2285 * Initialize the hardware to a configuration 2286 * as specified by the adapter structure. 2287 * 2288 **********************************************************************/ 2289static int 2290lem_hardware_init(struct adapter *adapter) 2291{ 2292 device_t dev = adapter->dev; 2293 u16 rx_buffer_size; 2294 2295 INIT_DEBUGOUT("lem_hardware_init: begin"); 2296 2297 /* Issue a global reset */ 2298 e1000_reset_hw(&adapter->hw); 2299 2300 /* When hardware is reset, fifo_head is also reset */ 2301 adapter->tx_fifo_head = 0; 2302 2303 /* 2304 * These parameters control the automatic generation (Tx) and 2305 * response (Rx) to Ethernet PAUSE frames. 2306 * - High water mark should allow for at least two frames to be 2307 * received after sending an XOFF. 2308 * - Low water mark works best when it is very near the high water mark. 2309 * This allows the receiver to restart by sending XON when it has 2310 * drained a bit. Here we use an arbitary value of 1500 which will 2311 * restart after one full frame is pulled from the buffer. There 2312 * could be several smaller frames in the buffer and if so they will 2313 * not trigger the XON until their total number reduces the buffer 2314 * by 1500. 2315 * - The pause time is fairly large at 1000 x 512ns = 512 usec. 2316 */ 2317 rx_buffer_size = ((E1000_READ_REG(&adapter->hw, E1000_PBA) & 2318 0xffff) << 10 ); 2319 2320 adapter->hw.fc.high_water = rx_buffer_size - 2321 roundup2(adapter->max_frame_size, 1024); 2322 adapter->hw.fc.low_water = adapter->hw.fc.high_water - 1500; 2323 2324 adapter->hw.fc.pause_time = EM_FC_PAUSE_TIME; 2325 adapter->hw.fc.send_xon = TRUE; 2326 2327 /* Set Flow control, use the tunable location if sane */ 2328 if ((lem_fc_setting >= 0) && (lem_fc_setting < 4)) 2329 adapter->hw.fc.requested_mode = lem_fc_setting; 2330 else 2331 adapter->hw.fc.requested_mode = e1000_fc_none; 2332 2333 if (e1000_init_hw(&adapter->hw) < 0) { 2334 device_printf(dev, "Hardware Initialization Failed\n"); 2335 return (EIO); 2336 } 2337 2338 e1000_check_for_link(&adapter->hw); 2339 2340 return (0); 2341} 2342 2343/********************************************************************* 2344 * 2345 * Setup networking device structure and register an interface. 2346 * 2347 **********************************************************************/ 2348static int 2349lem_setup_interface(device_t dev, struct adapter *adapter) 2350{ 2351 struct ifnet *ifp; 2352 2353 INIT_DEBUGOUT("lem_setup_interface: begin"); 2354 2355 ifp = adapter->ifp = if_alloc(IFT_ETHER); 2356 if (ifp == NULL) { 2357 device_printf(dev, "can not allocate ifnet structure\n"); 2358 return (-1); 2359 } 2360 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 2361 ifp->if_init = lem_init; 2362 ifp->if_softc = adapter; 2363 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 2364 ifp->if_ioctl = lem_ioctl; 2365 ifp->if_start = lem_start; 2366 IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 1); 2367 ifp->if_snd.ifq_drv_maxlen = adapter->num_tx_desc - 1; 2368 IFQ_SET_READY(&ifp->if_snd); 2369 2370 ether_ifattach(ifp, adapter->hw.mac.addr); 2371 2372 ifp->if_capabilities = ifp->if_capenable = 0; 2373 2374 if (adapter->hw.mac.type >= e1000_82543) { 2375 ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM; 2376 ifp->if_capenable |= IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM; 2377 } 2378 2379 /* 2380 * Tell the upper layer(s) we support long frames. 2381 */ 2382 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 2383 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU; 2384 ifp->if_capenable |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU; 2385 2386 /* 2387 ** Dont turn this on by default, if vlans are 2388 ** created on another pseudo device (eg. lagg) 2389 ** then vlan events are not passed thru, breaking 2390 ** operation, but with HW FILTER off it works. If 2391 ** using vlans directly on the em driver you can 2392 ** enable this and get full hardware tag filtering. 2393 */ 2394 ifp->if_capabilities |= IFCAP_VLAN_HWFILTER; 2395 2396#ifdef DEVICE_POLLING 2397 ifp->if_capabilities |= IFCAP_POLLING; 2398#endif 2399 2400 /* Enable only WOL MAGIC by default */ 2401 if (adapter->wol) { 2402 ifp->if_capabilities |= IFCAP_WOL; 2403 ifp->if_capenable |= IFCAP_WOL_MAGIC; 2404 } 2405 2406 /* 2407 * Specify the media types supported by this adapter and register 2408 * callbacks to update media and link information 2409 */ 2410 ifmedia_init(&adapter->media, IFM_IMASK, 2411 lem_media_change, lem_media_status); 2412 if ((adapter->hw.phy.media_type == e1000_media_type_fiber) || 2413 (adapter->hw.phy.media_type == e1000_media_type_internal_serdes)) { 2414 u_char fiber_type = IFM_1000_SX; /* default type */ 2415 2416 if (adapter->hw.mac.type == e1000_82545) 2417 fiber_type = IFM_1000_LX; 2418 ifmedia_add(&adapter->media, IFM_ETHER | fiber_type | IFM_FDX, 2419 0, NULL); 2420 ifmedia_add(&adapter->media, IFM_ETHER | fiber_type, 0, NULL); 2421 } else { 2422 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T, 0, NULL); 2423 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T | IFM_FDX, 2424 0, NULL); 2425 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX, 2426 0, NULL); 2427 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX | IFM_FDX, 2428 0, NULL); 2429 if (adapter->hw.phy.type != e1000_phy_ife) { 2430 ifmedia_add(&adapter->media, 2431 IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL); 2432 ifmedia_add(&adapter->media, 2433 IFM_ETHER | IFM_1000_T, 0, NULL); 2434 } 2435 } 2436 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL); 2437 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO); 2438 return (0); 2439} 2440 2441 2442/********************************************************************* 2443 * 2444 * Workaround for SmartSpeed on 82541 and 82547 controllers 2445 * 2446 **********************************************************************/ 2447static void 2448lem_smartspeed(struct adapter *adapter) 2449{ 2450 u16 phy_tmp; 2451 2452 if (adapter->link_active || (adapter->hw.phy.type != e1000_phy_igp) || 2453 adapter->hw.mac.autoneg == 0 || 2454 (adapter->hw.phy.autoneg_advertised & ADVERTISE_1000_FULL) == 0) 2455 return; 2456 2457 if (adapter->smartspeed == 0) { 2458 /* If Master/Slave config fault is asserted twice, 2459 * we assume back-to-back */ 2460 e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp); 2461 if (!(phy_tmp & SR_1000T_MS_CONFIG_FAULT)) 2462 return; 2463 e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp); 2464 if (phy_tmp & SR_1000T_MS_CONFIG_FAULT) { 2465 e1000_read_phy_reg(&adapter->hw, 2466 PHY_1000T_CTRL, &phy_tmp); 2467 if(phy_tmp & CR_1000T_MS_ENABLE) { 2468 phy_tmp &= ~CR_1000T_MS_ENABLE; 2469 e1000_write_phy_reg(&adapter->hw, 2470 PHY_1000T_CTRL, phy_tmp); 2471 adapter->smartspeed++; 2472 if(adapter->hw.mac.autoneg && 2473 !e1000_copper_link_autoneg(&adapter->hw) && 2474 !e1000_read_phy_reg(&adapter->hw, 2475 PHY_CONTROL, &phy_tmp)) { 2476 phy_tmp |= (MII_CR_AUTO_NEG_EN | 2477 MII_CR_RESTART_AUTO_NEG); 2478 e1000_write_phy_reg(&adapter->hw, 2479 PHY_CONTROL, phy_tmp); 2480 } 2481 } 2482 } 2483 return; 2484 } else if(adapter->smartspeed == EM_SMARTSPEED_DOWNSHIFT) { 2485 /* If still no link, perhaps using 2/3 pair cable */ 2486 e1000_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_tmp); 2487 phy_tmp |= CR_1000T_MS_ENABLE; 2488 e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, phy_tmp); 2489 if(adapter->hw.mac.autoneg && 2490 !e1000_copper_link_autoneg(&adapter->hw) && 2491 !e1000_read_phy_reg(&adapter->hw, PHY_CONTROL, &phy_tmp)) { 2492 phy_tmp |= (MII_CR_AUTO_NEG_EN | 2493 MII_CR_RESTART_AUTO_NEG); 2494 e1000_write_phy_reg(&adapter->hw, PHY_CONTROL, phy_tmp); 2495 } 2496 } 2497 /* Restart process after EM_SMARTSPEED_MAX iterations */ 2498 if(adapter->smartspeed++ == EM_SMARTSPEED_MAX) 2499 adapter->smartspeed = 0; 2500} 2501 2502 2503/* 2504 * Manage DMA'able memory. 2505 */ 2506static void 2507lem_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) 2508{ 2509 if (error) 2510 return; 2511 *(bus_addr_t *) arg = segs[0].ds_addr; 2512} 2513 2514static int 2515lem_dma_malloc(struct adapter *adapter, bus_size_t size, 2516 struct em_dma_alloc *dma, int mapflags) 2517{ 2518 int error; 2519 2520 error = bus_dma_tag_create(bus_get_dma_tag(adapter->dev), /* parent */ 2521 EM_DBA_ALIGN, 0, /* alignment, bounds */ 2522 BUS_SPACE_MAXADDR, /* lowaddr */ 2523 BUS_SPACE_MAXADDR, /* highaddr */ 2524 NULL, NULL, /* filter, filterarg */ 2525 size, /* maxsize */ 2526 1, /* nsegments */ 2527 size, /* maxsegsize */ 2528 0, /* flags */ 2529 NULL, /* lockfunc */ 2530 NULL, /* lockarg */ 2531 &dma->dma_tag); 2532 if (error) { 2533 device_printf(adapter->dev, 2534 "%s: bus_dma_tag_create failed: %d\n", 2535 __func__, error); 2536 goto fail_0; 2537 } 2538 2539 error = bus_dmamem_alloc(dma->dma_tag, (void**) &dma->dma_vaddr, 2540 BUS_DMA_NOWAIT | BUS_DMA_COHERENT, &dma->dma_map); 2541 if (error) { 2542 device_printf(adapter->dev, 2543 "%s: bus_dmamem_alloc(%ju) failed: %d\n", 2544 __func__, (uintmax_t)size, error); 2545 goto fail_2; 2546 } 2547 2548 dma->dma_paddr = 0; 2549 error = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr, 2550 size, lem_dmamap_cb, &dma->dma_paddr, mapflags | BUS_DMA_NOWAIT); 2551 if (error || dma->dma_paddr == 0) { 2552 device_printf(adapter->dev, 2553 "%s: bus_dmamap_load failed: %d\n", 2554 __func__, error); 2555 goto fail_3; 2556 } 2557 2558 return (0); 2559 2560fail_3: 2561 bus_dmamap_unload(dma->dma_tag, dma->dma_map); 2562fail_2: 2563 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map); 2564 bus_dma_tag_destroy(dma->dma_tag); 2565fail_0: 2566 dma->dma_map = NULL; 2567 dma->dma_tag = NULL; 2568 2569 return (error); 2570} 2571 2572static void 2573lem_dma_free(struct adapter *adapter, struct em_dma_alloc *dma) 2574{ 2575 if (dma->dma_tag == NULL) 2576 return; 2577 if (dma->dma_map != NULL) { 2578 bus_dmamap_sync(dma->dma_tag, dma->dma_map, 2579 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2580 bus_dmamap_unload(dma->dma_tag, dma->dma_map); 2581 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map); 2582 dma->dma_map = NULL; 2583 } 2584 bus_dma_tag_destroy(dma->dma_tag); 2585 dma->dma_tag = NULL; 2586} 2587 2588 2589/********************************************************************* 2590 * 2591 * Allocate memory for tx_buffer structures. The tx_buffer stores all 2592 * the information needed to transmit a packet on the wire. 2593 * 2594 **********************************************************************/ 2595static int 2596lem_allocate_transmit_structures(struct adapter *adapter) 2597{ 2598 device_t dev = adapter->dev; 2599 struct em_buffer *tx_buffer; 2600 int error; 2601 2602 /* 2603 * Create DMA tags for tx descriptors 2604 */ 2605 if ((error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */ 2606 1, 0, /* alignment, bounds */ 2607 BUS_SPACE_MAXADDR, /* lowaddr */ 2608 BUS_SPACE_MAXADDR, /* highaddr */ 2609 NULL, NULL, /* filter, filterarg */ 2610 MCLBYTES * EM_MAX_SCATTER, /* maxsize */ 2611 EM_MAX_SCATTER, /* nsegments */ 2612 MCLBYTES, /* maxsegsize */ 2613 0, /* flags */ 2614 NULL, /* lockfunc */ 2615 NULL, /* lockarg */ 2616 &adapter->txtag)) != 0) { 2617 device_printf(dev, "Unable to allocate TX DMA tag\n"); 2618 goto fail; 2619 } 2620 2621 adapter->tx_buffer_area = malloc(sizeof(struct em_buffer) * 2622 adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO); 2623 if (adapter->tx_buffer_area == NULL) { 2624 device_printf(dev, "Unable to allocate tx_buffer memory\n"); 2625 error = ENOMEM; 2626 goto fail; 2627 } 2628 2629 /* Create the descriptor buffer dma maps */ 2630 for (int i = 0; i < adapter->num_tx_desc; i++) { 2631 tx_buffer = &adapter->tx_buffer_area[i]; 2632 error = bus_dmamap_create(adapter->txtag, 0, &tx_buffer->map); 2633 if (error != 0) { 2634 device_printf(dev, "Unable to create TX DMA map\n"); 2635 goto fail; 2636 } 2637 tx_buffer->next_eop = -1; 2638 } 2639 2640 return (0); 2641fail: 2642 lem_free_transmit_structures(adapter); 2643 return (error); 2644} 2645 2646/********************************************************************* 2647 * 2648 * (Re)Initialize transmit structures. 2649 * 2650 **********************************************************************/ 2651static void 2652lem_setup_transmit_structures(struct adapter *adapter) 2653{ 2654 struct em_buffer *tx_buffer; 2655#ifdef DEV_NETMAP 2656 /* we are already locked */ 2657 struct netmap_adapter *na = NA(adapter->ifp); 2658 struct netmap_slot *slot = netmap_reset(na, NR_TX, 0, 0); 2659#endif /* DEV_NETMAP */ 2660 2661 /* Clear the old ring contents */ 2662 bzero(adapter->tx_desc_base, 2663 (sizeof(struct e1000_tx_desc)) * adapter->num_tx_desc); 2664 2665 /* Free any existing TX buffers */ 2666 for (int i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) { 2667 tx_buffer = &adapter->tx_buffer_area[i]; 2668 bus_dmamap_sync(adapter->txtag, tx_buffer->map, 2669 BUS_DMASYNC_POSTWRITE); 2670 bus_dmamap_unload(adapter->txtag, tx_buffer->map); 2671 m_freem(tx_buffer->m_head); 2672 tx_buffer->m_head = NULL; 2673#ifdef DEV_NETMAP 2674 if (slot) { 2675 /* the i-th NIC entry goes to slot si */ 2676 int si = netmap_idx_n2k(&na->tx_rings[0], i); 2677 uint64_t paddr; 2678 void *addr; 2679 2680 addr = PNMB(slot + si, &paddr); 2681 adapter->tx_desc_base[si].buffer_addr = htole64(paddr); 2682 /* reload the map for netmap mode */ 2683 netmap_load_map(adapter->txtag, tx_buffer->map, addr); 2684 } 2685#endif /* DEV_NETMAP */ 2686 tx_buffer->next_eop = -1; 2687 } 2688 2689 /* Reset state */ 2690 adapter->last_hw_offload = 0; 2691 adapter->next_avail_tx_desc = 0; 2692 adapter->next_tx_to_clean = 0; 2693 adapter->num_tx_desc_avail = adapter->num_tx_desc; 2694 2695 bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map, 2696 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2697 2698 return; 2699} 2700 2701/********************************************************************* 2702 * 2703 * Enable transmit unit. 2704 * 2705 **********************************************************************/ 2706static void 2707lem_initialize_transmit_unit(struct adapter *adapter) 2708{ 2709 u32 tctl, tipg = 0; 2710 u64 bus_addr; 2711 2712 INIT_DEBUGOUT("lem_initialize_transmit_unit: begin"); 2713 /* Setup the Base and Length of the Tx Descriptor Ring */ 2714 bus_addr = adapter->txdma.dma_paddr; 2715 E1000_WRITE_REG(&adapter->hw, E1000_TDLEN(0), 2716 adapter->num_tx_desc * sizeof(struct e1000_tx_desc)); 2717 E1000_WRITE_REG(&adapter->hw, E1000_TDBAH(0), 2718 (u32)(bus_addr >> 32)); 2719 E1000_WRITE_REG(&adapter->hw, E1000_TDBAL(0), 2720 (u32)bus_addr); 2721 /* Setup the HW Tx Head and Tail descriptor pointers */ 2722 E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), 0); 2723 E1000_WRITE_REG(&adapter->hw, E1000_TDH(0), 0); 2724 2725 HW_DEBUGOUT2("Base = %x, Length = %x\n", 2726 E1000_READ_REG(&adapter->hw, E1000_TDBAL(0)), 2727 E1000_READ_REG(&adapter->hw, E1000_TDLEN(0))); 2728 2729 /* Set the default values for the Tx Inter Packet Gap timer */ 2730 switch (adapter->hw.mac.type) { 2731 case e1000_82542: 2732 tipg = DEFAULT_82542_TIPG_IPGT; 2733 tipg |= DEFAULT_82542_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT; 2734 tipg |= DEFAULT_82542_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT; 2735 break; 2736 default: 2737 if ((adapter->hw.phy.media_type == e1000_media_type_fiber) || 2738 (adapter->hw.phy.media_type == 2739 e1000_media_type_internal_serdes)) 2740 tipg = DEFAULT_82543_TIPG_IPGT_FIBER; 2741 else 2742 tipg = DEFAULT_82543_TIPG_IPGT_COPPER; 2743 tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT; 2744 tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT; 2745 } 2746 2747 E1000_WRITE_REG(&adapter->hw, E1000_TIPG, tipg); 2748 E1000_WRITE_REG(&adapter->hw, E1000_TIDV, adapter->tx_int_delay.value); 2749 if(adapter->hw.mac.type >= e1000_82540) 2750 E1000_WRITE_REG(&adapter->hw, E1000_TADV, 2751 adapter->tx_abs_int_delay.value); 2752 2753 /* Program the Transmit Control Register */ 2754 tctl = E1000_READ_REG(&adapter->hw, E1000_TCTL); 2755 tctl &= ~E1000_TCTL_CT; 2756 tctl |= (E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN | 2757 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT)); 2758 2759 /* This write will effectively turn on the transmit unit. */ 2760 E1000_WRITE_REG(&adapter->hw, E1000_TCTL, tctl); 2761 2762 /* Setup Transmit Descriptor Base Settings */ 2763 adapter->txd_cmd = E1000_TXD_CMD_IFCS; 2764 2765 if (adapter->tx_int_delay.value > 0) 2766 adapter->txd_cmd |= E1000_TXD_CMD_IDE; 2767} 2768 2769/********************************************************************* 2770 * 2771 * Free all transmit related data structures. 2772 * 2773 **********************************************************************/ 2774static void 2775lem_free_transmit_structures(struct adapter *adapter) 2776{ 2777 struct em_buffer *tx_buffer; 2778 2779 INIT_DEBUGOUT("free_transmit_structures: begin"); 2780 2781 if (adapter->tx_buffer_area != NULL) { 2782 for (int i = 0; i < adapter->num_tx_desc; i++) { 2783 tx_buffer = &adapter->tx_buffer_area[i]; 2784 if (tx_buffer->m_head != NULL) { 2785 bus_dmamap_sync(adapter->txtag, tx_buffer->map, 2786 BUS_DMASYNC_POSTWRITE); 2787 bus_dmamap_unload(adapter->txtag, 2788 tx_buffer->map); 2789 m_freem(tx_buffer->m_head); 2790 tx_buffer->m_head = NULL; 2791 } else if (tx_buffer->map != NULL) 2792 bus_dmamap_unload(adapter->txtag, 2793 tx_buffer->map); 2794 if (tx_buffer->map != NULL) { 2795 bus_dmamap_destroy(adapter->txtag, 2796 tx_buffer->map); 2797 tx_buffer->map = NULL; 2798 } 2799 } 2800 } 2801 if (adapter->tx_buffer_area != NULL) { 2802 free(adapter->tx_buffer_area, M_DEVBUF); 2803 adapter->tx_buffer_area = NULL; 2804 } 2805 if (adapter->txtag != NULL) { 2806 bus_dma_tag_destroy(adapter->txtag); 2807 adapter->txtag = NULL; 2808 } 2809#if __FreeBSD_version >= 800000 2810 if (adapter->br != NULL) 2811 buf_ring_free(adapter->br, M_DEVBUF); 2812#endif 2813} 2814 2815/********************************************************************* 2816 * 2817 * The offload context needs to be set when we transfer the first 2818 * packet of a particular protocol (TCP/UDP). This routine has been 2819 * enhanced to deal with inserted VLAN headers, and IPV6 (not complete) 2820 * 2821 * Added back the old method of keeping the current context type 2822 * and not setting if unnecessary, as this is reported to be a 2823 * big performance win. -jfv 2824 **********************************************************************/ 2825static void 2826lem_transmit_checksum_setup(struct adapter *adapter, struct mbuf *mp, 2827 u32 *txd_upper, u32 *txd_lower) 2828{ 2829 struct e1000_context_desc *TXD = NULL; 2830 struct em_buffer *tx_buffer; 2831 struct ether_vlan_header *eh; 2832 struct ip *ip = NULL; 2833 struct ip6_hdr *ip6; 2834 int curr_txd, ehdrlen; 2835 u32 cmd, hdr_len, ip_hlen; 2836 u16 etype; 2837 u8 ipproto; 2838 2839 2840 cmd = hdr_len = ipproto = 0; 2841 *txd_upper = *txd_lower = 0; 2842 curr_txd = adapter->next_avail_tx_desc; 2843 2844 /* 2845 * Determine where frame payload starts. 2846 * Jump over vlan headers if already present, 2847 * helpful for QinQ too. 2848 */ 2849 eh = mtod(mp, struct ether_vlan_header *); 2850 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { 2851 etype = ntohs(eh->evl_proto); 2852 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 2853 } else { 2854 etype = ntohs(eh->evl_encap_proto); 2855 ehdrlen = ETHER_HDR_LEN; 2856 } 2857 2858 /* 2859 * We only support TCP/UDP for IPv4 and IPv6 for the moment. 2860 * TODO: Support SCTP too when it hits the tree. 2861 */ 2862 switch (etype) { 2863 case ETHERTYPE_IP: 2864 ip = (struct ip *)(mp->m_data + ehdrlen); 2865 ip_hlen = ip->ip_hl << 2; 2866 2867 /* Setup of IP header checksum. */ 2868 if (mp->m_pkthdr.csum_flags & CSUM_IP) { 2869 /* 2870 * Start offset for header checksum calculation. 2871 * End offset for header checksum calculation. 2872 * Offset of place to put the checksum. 2873 */ 2874 TXD = (struct e1000_context_desc *) 2875 &adapter->tx_desc_base[curr_txd]; 2876 TXD->lower_setup.ip_fields.ipcss = ehdrlen; 2877 TXD->lower_setup.ip_fields.ipcse = 2878 htole16(ehdrlen + ip_hlen); 2879 TXD->lower_setup.ip_fields.ipcso = 2880 ehdrlen + offsetof(struct ip, ip_sum); 2881 cmd |= E1000_TXD_CMD_IP; 2882 *txd_upper |= E1000_TXD_POPTS_IXSM << 8; 2883 } 2884 2885 hdr_len = ehdrlen + ip_hlen; 2886 ipproto = ip->ip_p; 2887 2888 break; 2889 case ETHERTYPE_IPV6: 2890 ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen); 2891 ip_hlen = sizeof(struct ip6_hdr); /* XXX: No header stacking. */ 2892 2893 /* IPv6 doesn't have a header checksum. */ 2894 2895 hdr_len = ehdrlen + ip_hlen; 2896 ipproto = ip6->ip6_nxt; 2897 break; 2898 2899 default: 2900 return; 2901 } 2902 2903 switch (ipproto) { 2904 case IPPROTO_TCP: 2905 if (mp->m_pkthdr.csum_flags & CSUM_TCP) { 2906 *txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D; 2907 *txd_upper |= E1000_TXD_POPTS_TXSM << 8; 2908 /* no need for context if already set */ 2909 if (adapter->last_hw_offload == CSUM_TCP) 2910 return; 2911 adapter->last_hw_offload = CSUM_TCP; 2912 /* 2913 * Start offset for payload checksum calculation. 2914 * End offset for payload checksum calculation. 2915 * Offset of place to put the checksum. 2916 */ 2917 TXD = (struct e1000_context_desc *) 2918 &adapter->tx_desc_base[curr_txd]; 2919 TXD->upper_setup.tcp_fields.tucss = hdr_len; 2920 TXD->upper_setup.tcp_fields.tucse = htole16(0); 2921 TXD->upper_setup.tcp_fields.tucso = 2922 hdr_len + offsetof(struct tcphdr, th_sum); 2923 cmd |= E1000_TXD_CMD_TCP; 2924 } 2925 break; 2926 case IPPROTO_UDP: 2927 { 2928 if (mp->m_pkthdr.csum_flags & CSUM_UDP) { 2929 *txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D; 2930 *txd_upper |= E1000_TXD_POPTS_TXSM << 8; 2931 /* no need for context if already set */ 2932 if (adapter->last_hw_offload == CSUM_UDP) 2933 return; 2934 adapter->last_hw_offload = CSUM_UDP; 2935 /* 2936 * Start offset for header checksum calculation. 2937 * End offset for header checksum calculation. 2938 * Offset of place to put the checksum. 2939 */ 2940 TXD = (struct e1000_context_desc *) 2941 &adapter->tx_desc_base[curr_txd]; 2942 TXD->upper_setup.tcp_fields.tucss = hdr_len; 2943 TXD->upper_setup.tcp_fields.tucse = htole16(0); 2944 TXD->upper_setup.tcp_fields.tucso = 2945 hdr_len + offsetof(struct udphdr, uh_sum); 2946 } 2947 /* Fall Thru */ 2948 } 2949 default: 2950 break; 2951 } 2952 2953 if (TXD == NULL) 2954 return; 2955 TXD->tcp_seg_setup.data = htole32(0); 2956 TXD->cmd_and_length = 2957 htole32(adapter->txd_cmd | E1000_TXD_CMD_DEXT | cmd); 2958 tx_buffer = &adapter->tx_buffer_area[curr_txd]; 2959 tx_buffer->m_head = NULL; 2960 tx_buffer->next_eop = -1; 2961 2962 if (++curr_txd == adapter->num_tx_desc) 2963 curr_txd = 0; 2964 2965 adapter->num_tx_desc_avail--; 2966 adapter->next_avail_tx_desc = curr_txd; 2967} 2968 2969 2970/********************************************************************** 2971 * 2972 * Examine each tx_buffer in the used queue. If the hardware is done 2973 * processing the packet then free associated resources. The 2974 * tx_buffer is put back on the free queue. 2975 * 2976 **********************************************************************/ 2977static void 2978lem_txeof(struct adapter *adapter) 2979{ 2980 int first, last, done, num_avail; 2981 struct em_buffer *tx_buffer; 2982 struct e1000_tx_desc *tx_desc, *eop_desc; 2983 struct ifnet *ifp = adapter->ifp; 2984 2985 EM_TX_LOCK_ASSERT(adapter); 2986 2987#ifdef DEV_NETMAP 2988 if (netmap_tx_irq(ifp, 0 | (NETMAP_LOCKED_ENTER|NETMAP_LOCKED_EXIT))) 2989 return; 2990#endif /* DEV_NETMAP */ 2991 if (adapter->num_tx_desc_avail == adapter->num_tx_desc) 2992 return; 2993 2994 num_avail = adapter->num_tx_desc_avail; 2995 first = adapter->next_tx_to_clean; 2996 tx_desc = &adapter->tx_desc_base[first]; 2997 tx_buffer = &adapter->tx_buffer_area[first]; 2998 last = tx_buffer->next_eop; 2999 eop_desc = &adapter->tx_desc_base[last]; 3000 3001 /* 3002 * What this does is get the index of the 3003 * first descriptor AFTER the EOP of the 3004 * first packet, that way we can do the 3005 * simple comparison on the inner while loop. 3006 */ 3007 if (++last == adapter->num_tx_desc) 3008 last = 0; 3009 done = last; 3010 3011 bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map, 3012 BUS_DMASYNC_POSTREAD); 3013 3014 while (eop_desc->upper.fields.status & E1000_TXD_STAT_DD) { 3015 /* We clean the range of the packet */ 3016 while (first != done) { 3017 tx_desc->upper.data = 0; 3018 tx_desc->lower.data = 0; 3019 tx_desc->buffer_addr = 0; 3020 ++num_avail; 3021 3022 if (tx_buffer->m_head) { 3023 ifp->if_opackets++; 3024 bus_dmamap_sync(adapter->txtag, 3025 tx_buffer->map, 3026 BUS_DMASYNC_POSTWRITE); 3027 bus_dmamap_unload(adapter->txtag, 3028 tx_buffer->map); 3029 3030 m_freem(tx_buffer->m_head); 3031 tx_buffer->m_head = NULL; 3032 } 3033 tx_buffer->next_eop = -1; 3034 adapter->watchdog_time = ticks; 3035 3036 if (++first == adapter->num_tx_desc) 3037 first = 0; 3038 3039 tx_buffer = &adapter->tx_buffer_area[first]; 3040 tx_desc = &adapter->tx_desc_base[first]; 3041 } 3042 /* See if we can continue to the next packet */ 3043 last = tx_buffer->next_eop; 3044 if (last != -1) { 3045 eop_desc = &adapter->tx_desc_base[last]; 3046 /* Get new done point */ 3047 if (++last == adapter->num_tx_desc) last = 0; 3048 done = last; 3049 } else 3050 break; 3051 } 3052 bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map, 3053 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 3054 3055 adapter->next_tx_to_clean = first; 3056 adapter->num_tx_desc_avail = num_avail; 3057 3058 /* 3059 * If we have enough room, clear IFF_DRV_OACTIVE to 3060 * tell the stack that it is OK to send packets. 3061 * If there are no pending descriptors, clear the watchdog. 3062 */ 3063 if (adapter->num_tx_desc_avail > EM_TX_CLEANUP_THRESHOLD) { 3064 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 3065 if (adapter->num_tx_desc_avail == adapter->num_tx_desc) { 3066 adapter->watchdog_check = FALSE; 3067 return; 3068 } 3069 } 3070} 3071 3072/********************************************************************* 3073 * 3074 * When Link is lost sometimes there is work still in the TX ring 3075 * which may result in a watchdog, rather than allow that we do an 3076 * attempted cleanup and then reinit here. Note that this has been 3077 * seens mostly with fiber adapters. 3078 * 3079 **********************************************************************/ 3080static void 3081lem_tx_purge(struct adapter *adapter) 3082{ 3083 if ((!adapter->link_active) && (adapter->watchdog_check)) { 3084 EM_TX_LOCK(adapter); 3085 lem_txeof(adapter); 3086 EM_TX_UNLOCK(adapter); 3087 if (adapter->watchdog_check) /* Still outstanding? */ 3088 lem_init_locked(adapter); 3089 } 3090} 3091 3092/********************************************************************* 3093 * 3094 * Get a buffer from system mbuf buffer pool. 3095 * 3096 **********************************************************************/ 3097static int 3098lem_get_buf(struct adapter *adapter, int i) 3099{ 3100 struct mbuf *m; 3101 bus_dma_segment_t segs[1]; 3102 bus_dmamap_t map; 3103 struct em_buffer *rx_buffer; 3104 int error, nsegs; 3105 3106 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 3107 if (m == NULL) { 3108 adapter->mbuf_cluster_failed++; 3109 return (ENOBUFS); 3110 } 3111 m->m_len = m->m_pkthdr.len = MCLBYTES; 3112 3113 if (adapter->max_frame_size <= (MCLBYTES - ETHER_ALIGN)) 3114 m_adj(m, ETHER_ALIGN); 3115 3116 /* 3117 * Using memory from the mbuf cluster pool, invoke the 3118 * bus_dma machinery to arrange the memory mapping. 3119 */ 3120 error = bus_dmamap_load_mbuf_sg(adapter->rxtag, 3121 adapter->rx_sparemap, m, segs, &nsegs, BUS_DMA_NOWAIT); 3122 if (error != 0) { 3123 m_free(m); 3124 return (error); 3125 } 3126 3127 /* If nsegs is wrong then the stack is corrupt. */ 3128 KASSERT(nsegs == 1, ("Too many segments returned!")); 3129 3130 rx_buffer = &adapter->rx_buffer_area[i]; 3131 if (rx_buffer->m_head != NULL) 3132 bus_dmamap_unload(adapter->rxtag, rx_buffer->map); 3133 3134 map = rx_buffer->map; 3135 rx_buffer->map = adapter->rx_sparemap; 3136 adapter->rx_sparemap = map; 3137 bus_dmamap_sync(adapter->rxtag, rx_buffer->map, BUS_DMASYNC_PREREAD); 3138 rx_buffer->m_head = m; 3139 3140 adapter->rx_desc_base[i].buffer_addr = htole64(segs[0].ds_addr); 3141 return (0); 3142} 3143 3144/********************************************************************* 3145 * 3146 * Allocate memory for rx_buffer structures. Since we use one 3147 * rx_buffer per received packet, the maximum number of rx_buffer's 3148 * that we'll need is equal to the number of receive descriptors 3149 * that we've allocated. 3150 * 3151 **********************************************************************/ 3152static int 3153lem_allocate_receive_structures(struct adapter *adapter) 3154{ 3155 device_t dev = adapter->dev; 3156 struct em_buffer *rx_buffer; 3157 int i, error; 3158 3159 adapter->rx_buffer_area = malloc(sizeof(struct em_buffer) * 3160 adapter->num_rx_desc, M_DEVBUF, M_NOWAIT | M_ZERO); 3161 if (adapter->rx_buffer_area == NULL) { 3162 device_printf(dev, "Unable to allocate rx_buffer memory\n"); 3163 return (ENOMEM); 3164 } 3165 3166 error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */ 3167 1, 0, /* alignment, bounds */ 3168 BUS_SPACE_MAXADDR, /* lowaddr */ 3169 BUS_SPACE_MAXADDR, /* highaddr */ 3170 NULL, NULL, /* filter, filterarg */ 3171 MCLBYTES, /* maxsize */ 3172 1, /* nsegments */ 3173 MCLBYTES, /* maxsegsize */ 3174 0, /* flags */ 3175 NULL, /* lockfunc */ 3176 NULL, /* lockarg */ 3177 &adapter->rxtag); 3178 if (error) { 3179 device_printf(dev, "%s: bus_dma_tag_create failed %d\n", 3180 __func__, error); 3181 goto fail; 3182 } 3183 3184 /* Create the spare map (used by getbuf) */ 3185 error = bus_dmamap_create(adapter->rxtag, BUS_DMA_NOWAIT, 3186 &adapter->rx_sparemap); 3187 if (error) { 3188 device_printf(dev, "%s: bus_dmamap_create failed: %d\n", 3189 __func__, error); 3190 goto fail; 3191 } 3192 3193 rx_buffer = adapter->rx_buffer_area; 3194 for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) { 3195 error = bus_dmamap_create(adapter->rxtag, BUS_DMA_NOWAIT, 3196 &rx_buffer->map); 3197 if (error) { 3198 device_printf(dev, "%s: bus_dmamap_create failed: %d\n", 3199 __func__, error); 3200 goto fail; 3201 } 3202 } 3203 3204 return (0); 3205 3206fail: 3207 lem_free_receive_structures(adapter); 3208 return (error); 3209} 3210 3211/********************************************************************* 3212 * 3213 * (Re)initialize receive structures. 3214 * 3215 **********************************************************************/ 3216static int 3217lem_setup_receive_structures(struct adapter *adapter) 3218{ 3219 struct em_buffer *rx_buffer; 3220 int i, error; 3221#ifdef DEV_NETMAP 3222 /* we are already under lock */ 3223 struct netmap_adapter *na = NA(adapter->ifp); 3224 struct netmap_slot *slot = netmap_reset(na, NR_RX, 0, 0); 3225#endif 3226 3227 /* Reset descriptor ring */ 3228 bzero(adapter->rx_desc_base, 3229 (sizeof(struct e1000_rx_desc)) * adapter->num_rx_desc); 3230 3231 /* Free current RX buffers. */ 3232 rx_buffer = adapter->rx_buffer_area; 3233 for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) { 3234 if (rx_buffer->m_head != NULL) { 3235 bus_dmamap_sync(adapter->rxtag, rx_buffer->map, 3236 BUS_DMASYNC_POSTREAD); 3237 bus_dmamap_unload(adapter->rxtag, rx_buffer->map); 3238 m_freem(rx_buffer->m_head); 3239 rx_buffer->m_head = NULL; 3240 } 3241 } 3242 3243 /* Allocate new ones. */ 3244 for (i = 0; i < adapter->num_rx_desc; i++) { 3245#ifdef DEV_NETMAP 3246 if (slot) { 3247 /* the i-th NIC entry goes to slot si */ 3248 int si = netmap_idx_n2k(&na->rx_rings[0], i); 3249 uint64_t paddr; 3250 void *addr; 3251 3252 addr = PNMB(slot + si, &paddr); 3253 netmap_load_map(adapter->rxtag, rx_buffer->map, addr); 3254 /* Update descriptor */ 3255 adapter->rx_desc_base[i].buffer_addr = htole64(paddr); 3256 continue; 3257 } 3258#endif /* DEV_NETMAP */ 3259 error = lem_get_buf(adapter, i); 3260 if (error) 3261 return (error); 3262 } 3263 3264 /* Setup our descriptor pointers */ 3265 adapter->next_rx_desc_to_check = 0; 3266 bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map, 3267 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 3268 3269 return (0); 3270} 3271 3272/********************************************************************* 3273 * 3274 * Enable receive unit. 3275 * 3276 **********************************************************************/ 3277 3278static void 3279lem_initialize_receive_unit(struct adapter *adapter) 3280{ 3281 struct ifnet *ifp = adapter->ifp; 3282 u64 bus_addr; 3283 u32 rctl, rxcsum; 3284 3285 INIT_DEBUGOUT("lem_initialize_receive_unit: begin"); 3286 3287 /* 3288 * Make sure receives are disabled while setting 3289 * up the descriptor ring 3290 */ 3291 rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL); 3292 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl & ~E1000_RCTL_EN); 3293 3294 if (adapter->hw.mac.type >= e1000_82540) { 3295 E1000_WRITE_REG(&adapter->hw, E1000_RADV, 3296 adapter->rx_abs_int_delay.value); 3297 /* 3298 * Set the interrupt throttling rate. Value is calculated 3299 * as DEFAULT_ITR = 1/(MAX_INTS_PER_SEC * 256ns) 3300 */ 3301 E1000_WRITE_REG(&adapter->hw, E1000_ITR, DEFAULT_ITR); 3302 } 3303 3304 /* Setup the Base and Length of the Rx Descriptor Ring */ 3305 bus_addr = adapter->rxdma.dma_paddr; 3306 E1000_WRITE_REG(&adapter->hw, E1000_RDLEN(0), 3307 adapter->num_rx_desc * sizeof(struct e1000_rx_desc)); 3308 E1000_WRITE_REG(&adapter->hw, E1000_RDBAH(0), 3309 (u32)(bus_addr >> 32)); 3310 E1000_WRITE_REG(&adapter->hw, E1000_RDBAL(0), 3311 (u32)bus_addr); 3312 3313 /* Setup the Receive Control Register */ 3314 rctl &= ~(3 << E1000_RCTL_MO_SHIFT); 3315 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO | 3316 E1000_RCTL_RDMTS_HALF | 3317 (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT); 3318 3319 /* Make sure VLAN Filters are off */ 3320 rctl &= ~E1000_RCTL_VFE; 3321 3322 if (e1000_tbi_sbp_enabled_82543(&adapter->hw)) 3323 rctl |= E1000_RCTL_SBP; 3324 else 3325 rctl &= ~E1000_RCTL_SBP; 3326 3327 switch (adapter->rx_buffer_len) { 3328 default: 3329 case 2048: 3330 rctl |= E1000_RCTL_SZ_2048; 3331 break; 3332 case 4096: 3333 rctl |= E1000_RCTL_SZ_4096 | 3334 E1000_RCTL_BSEX | E1000_RCTL_LPE; 3335 break; 3336 case 8192: 3337 rctl |= E1000_RCTL_SZ_8192 | 3338 E1000_RCTL_BSEX | E1000_RCTL_LPE; 3339 break; 3340 case 16384: 3341 rctl |= E1000_RCTL_SZ_16384 | 3342 E1000_RCTL_BSEX | E1000_RCTL_LPE; 3343 break; 3344 } 3345 3346 if (ifp->if_mtu > ETHERMTU) 3347 rctl |= E1000_RCTL_LPE; 3348 else 3349 rctl &= ~E1000_RCTL_LPE; 3350 3351 /* Enable 82543 Receive Checksum Offload for TCP and UDP */ 3352 if ((adapter->hw.mac.type >= e1000_82543) && 3353 (ifp->if_capenable & IFCAP_RXCSUM)) { 3354 rxcsum = E1000_READ_REG(&adapter->hw, E1000_RXCSUM); 3355 rxcsum |= (E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL); 3356 E1000_WRITE_REG(&adapter->hw, E1000_RXCSUM, rxcsum); 3357 } 3358 3359 /* Enable Receives */ 3360 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl); 3361 3362 /* 3363 * Setup the HW Rx Head and 3364 * Tail Descriptor Pointers 3365 */ 3366 E1000_WRITE_REG(&adapter->hw, E1000_RDH(0), 0); 3367 rctl = adapter->num_rx_desc - 1; /* default RDT value */ 3368#ifdef DEV_NETMAP 3369 /* preserve buffers already made available to clients */ 3370 if (ifp->if_capenable & IFCAP_NETMAP) 3371 rctl -= NA(adapter->ifp)->rx_rings[0].nr_hwavail; 3372#endif /* DEV_NETMAP */ 3373 E1000_WRITE_REG(&adapter->hw, E1000_RDT(0), rctl); 3374 3375 return; 3376} 3377 3378/********************************************************************* 3379 * 3380 * Free receive related data structures. 3381 * 3382 **********************************************************************/ 3383static void 3384lem_free_receive_structures(struct adapter *adapter) 3385{ 3386 struct em_buffer *rx_buffer; 3387 int i; 3388 3389 INIT_DEBUGOUT("free_receive_structures: begin"); 3390 3391 if (adapter->rx_sparemap) { 3392 bus_dmamap_destroy(adapter->rxtag, adapter->rx_sparemap); 3393 adapter->rx_sparemap = NULL; 3394 } 3395 3396 /* Cleanup any existing buffers */ 3397 if (adapter->rx_buffer_area != NULL) { 3398 rx_buffer = adapter->rx_buffer_area; 3399 for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) { 3400 if (rx_buffer->m_head != NULL) { 3401 bus_dmamap_sync(adapter->rxtag, rx_buffer->map, 3402 BUS_DMASYNC_POSTREAD); 3403 bus_dmamap_unload(adapter->rxtag, 3404 rx_buffer->map); 3405 m_freem(rx_buffer->m_head); 3406 rx_buffer->m_head = NULL; 3407 } else if (rx_buffer->map != NULL) 3408 bus_dmamap_unload(adapter->rxtag, 3409 rx_buffer->map); 3410 if (rx_buffer->map != NULL) { 3411 bus_dmamap_destroy(adapter->rxtag, 3412 rx_buffer->map); 3413 rx_buffer->map = NULL; 3414 } 3415 } 3416 } 3417 3418 if (adapter->rx_buffer_area != NULL) { 3419 free(adapter->rx_buffer_area, M_DEVBUF); 3420 adapter->rx_buffer_area = NULL; 3421 } 3422 3423 if (adapter->rxtag != NULL) { 3424 bus_dma_tag_destroy(adapter->rxtag); 3425 adapter->rxtag = NULL; 3426 } 3427} 3428 3429/********************************************************************* 3430 * 3431 * This routine executes in interrupt context. It replenishes 3432 * the mbufs in the descriptor and sends data which has been 3433 * dma'ed into host memory to upper layer. 3434 * 3435 * We loop at most count times if count is > 0, or until done if 3436 * count < 0. 3437 * 3438 * For polling we also now return the number of cleaned packets 3439 *********************************************************************/ 3440static bool 3441lem_rxeof(struct adapter *adapter, int count, int *done) 3442{ 3443 struct ifnet *ifp = adapter->ifp; 3444 struct mbuf *mp; 3445 u8 status = 0, accept_frame = 0, eop = 0; 3446 u16 len, desc_len, prev_len_adj; 3447 int i, rx_sent = 0; 3448 struct e1000_rx_desc *current_desc; 3449 3450 EM_RX_LOCK(adapter); 3451 i = adapter->next_rx_desc_to_check; 3452 current_desc = &adapter->rx_desc_base[i]; 3453 bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map, 3454 BUS_DMASYNC_POSTREAD); 3455 3456#ifdef DEV_NETMAP 3457 if (netmap_rx_irq(ifp, 0 | NETMAP_LOCKED_ENTER, &rx_sent)) 3458 return (FALSE); 3459#endif /* DEV_NETMAP */ 3460 3461 if (!((current_desc->status) & E1000_RXD_STAT_DD)) { 3462 if (done != NULL) 3463 *done = rx_sent; 3464 EM_RX_UNLOCK(adapter); 3465 return (FALSE); 3466 } 3467 3468 while (count != 0 && ifp->if_drv_flags & IFF_DRV_RUNNING) { 3469 struct mbuf *m = NULL; 3470 3471 status = current_desc->status; 3472 if ((status & E1000_RXD_STAT_DD) == 0) 3473 break; 3474 3475 mp = adapter->rx_buffer_area[i].m_head; 3476 /* 3477 * Can't defer bus_dmamap_sync(9) because TBI_ACCEPT 3478 * needs to access the last received byte in the mbuf. 3479 */ 3480 bus_dmamap_sync(adapter->rxtag, adapter->rx_buffer_area[i].map, 3481 BUS_DMASYNC_POSTREAD); 3482 3483 accept_frame = 1; 3484 prev_len_adj = 0; 3485 desc_len = le16toh(current_desc->length); 3486 if (status & E1000_RXD_STAT_EOP) { 3487 count--; 3488 eop = 1; 3489 if (desc_len < ETHER_CRC_LEN) { 3490 len = 0; 3491 prev_len_adj = ETHER_CRC_LEN - desc_len; 3492 } else 3493 len = desc_len - ETHER_CRC_LEN; 3494 } else { 3495 eop = 0; 3496 len = desc_len; 3497 } 3498 3499 if (current_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK) { 3500 u8 last_byte; 3501 u32 pkt_len = desc_len; 3502 3503 if (adapter->fmp != NULL) 3504 pkt_len += adapter->fmp->m_pkthdr.len; 3505 3506 last_byte = *(mtod(mp, caddr_t) + desc_len - 1); 3507 if (TBI_ACCEPT(&adapter->hw, status, 3508 current_desc->errors, pkt_len, last_byte, 3509 adapter->min_frame_size, adapter->max_frame_size)) { 3510 e1000_tbi_adjust_stats_82543(&adapter->hw, 3511 &adapter->stats, pkt_len, 3512 adapter->hw.mac.addr, 3513 adapter->max_frame_size); 3514 if (len > 0) 3515 len--; 3516 } else 3517 accept_frame = 0; 3518 } 3519 3520 if (accept_frame) { 3521 if (lem_get_buf(adapter, i) != 0) { 3522 ifp->if_iqdrops++; 3523 goto discard; 3524 } 3525 3526 /* Assign correct length to the current fragment */ 3527 mp->m_len = len; 3528 3529 if (adapter->fmp == NULL) { 3530 mp->m_pkthdr.len = len; 3531 adapter->fmp = mp; /* Store the first mbuf */ 3532 adapter->lmp = mp; 3533 } else { 3534 /* Chain mbuf's together */ 3535 mp->m_flags &= ~M_PKTHDR; 3536 /* 3537 * Adjust length of previous mbuf in chain if 3538 * we received less than 4 bytes in the last 3539 * descriptor. 3540 */ 3541 if (prev_len_adj > 0) { 3542 adapter->lmp->m_len -= prev_len_adj; 3543 adapter->fmp->m_pkthdr.len -= 3544 prev_len_adj; 3545 } 3546 adapter->lmp->m_next = mp; 3547 adapter->lmp = adapter->lmp->m_next; 3548 adapter->fmp->m_pkthdr.len += len; 3549 } 3550 3551 if (eop) { 3552 adapter->fmp->m_pkthdr.rcvif = ifp; 3553 ifp->if_ipackets++; 3554 lem_receive_checksum(adapter, current_desc, 3555 adapter->fmp); 3556#ifndef __NO_STRICT_ALIGNMENT 3557 if (adapter->max_frame_size > 3558 (MCLBYTES - ETHER_ALIGN) && 3559 lem_fixup_rx(adapter) != 0) 3560 goto skip; 3561#endif 3562 if (status & E1000_RXD_STAT_VP) { 3563 adapter->fmp->m_pkthdr.ether_vtag = 3564 le16toh(current_desc->special); 3565 adapter->fmp->m_flags |= M_VLANTAG; 3566 } 3567#ifndef __NO_STRICT_ALIGNMENT 3568skip: 3569#endif 3570 m = adapter->fmp; 3571 adapter->fmp = NULL; 3572 adapter->lmp = NULL; 3573 } 3574 } else { 3575 adapter->dropped_pkts++; 3576discard: 3577 /* Reuse loaded DMA map and just update mbuf chain */ 3578 mp = adapter->rx_buffer_area[i].m_head; 3579 mp->m_len = mp->m_pkthdr.len = MCLBYTES; 3580 mp->m_data = mp->m_ext.ext_buf; 3581 mp->m_next = NULL; 3582 if (adapter->max_frame_size <= 3583 (MCLBYTES - ETHER_ALIGN)) 3584 m_adj(mp, ETHER_ALIGN); 3585 if (adapter->fmp != NULL) { 3586 m_freem(adapter->fmp); 3587 adapter->fmp = NULL; 3588 adapter->lmp = NULL; 3589 } 3590 m = NULL; 3591 } 3592 3593 /* Zero out the receive descriptors status. */ 3594 current_desc->status = 0; 3595 bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map, 3596 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 3597 3598 /* Advance our pointers to the next descriptor. */ 3599 if (++i == adapter->num_rx_desc) 3600 i = 0; 3601 /* Call into the stack */ 3602 if (m != NULL) { 3603 adapter->next_rx_desc_to_check = i; 3604 EM_RX_UNLOCK(adapter); 3605 (*ifp->if_input)(ifp, m); 3606 EM_RX_LOCK(adapter); 3607 rx_sent++; 3608 i = adapter->next_rx_desc_to_check; 3609 } 3610 current_desc = &adapter->rx_desc_base[i]; 3611 } 3612 adapter->next_rx_desc_to_check = i; 3613 3614 /* Advance the E1000's Receive Queue #0 "Tail Pointer". */ 3615 if (--i < 0) 3616 i = adapter->num_rx_desc - 1; 3617 E1000_WRITE_REG(&adapter->hw, E1000_RDT(0), i); 3618 if (done != NULL) 3619 *done = rx_sent; 3620 EM_RX_UNLOCK(adapter); 3621 return ((status & E1000_RXD_STAT_DD) ? TRUE : FALSE); 3622} 3623 3624#ifndef __NO_STRICT_ALIGNMENT 3625/* 3626 * When jumbo frames are enabled we should realign entire payload on 3627 * architecures with strict alignment. This is serious design mistake of 8254x 3628 * as it nullifies DMA operations. 8254x just allows RX buffer size to be 3629 * 2048/4096/8192/16384. What we really want is 2048 - ETHER_ALIGN to align its 3630 * payload. On architecures without strict alignment restrictions 8254x still 3631 * performs unaligned memory access which would reduce the performance too. 3632 * To avoid copying over an entire frame to align, we allocate a new mbuf and 3633 * copy ethernet header to the new mbuf. The new mbuf is prepended into the 3634 * existing mbuf chain. 3635 * 3636 * Be aware, best performance of the 8254x is achived only when jumbo frame is 3637 * not used at all on architectures with strict alignment. 3638 */ 3639static int 3640lem_fixup_rx(struct adapter *adapter) 3641{ 3642 struct mbuf *m, *n; 3643 int error; 3644 3645 error = 0; 3646 m = adapter->fmp; 3647 if (m->m_len <= (MCLBYTES - ETHER_HDR_LEN)) { 3648 bcopy(m->m_data, m->m_data + ETHER_HDR_LEN, m->m_len); 3649 m->m_data += ETHER_HDR_LEN; 3650 } else { 3651 MGETHDR(n, M_NOWAIT, MT_DATA); 3652 if (n != NULL) { 3653 bcopy(m->m_data, n->m_data, ETHER_HDR_LEN); 3654 m->m_data += ETHER_HDR_LEN; 3655 m->m_len -= ETHER_HDR_LEN; 3656 n->m_len = ETHER_HDR_LEN; 3657 M_MOVE_PKTHDR(n, m); 3658 n->m_next = m; 3659 adapter->fmp = n; 3660 } else { 3661 adapter->dropped_pkts++; 3662 m_freem(adapter->fmp); 3663 adapter->fmp = NULL; 3664 error = ENOMEM; 3665 } 3666 } 3667 3668 return (error); 3669} 3670#endif 3671 3672/********************************************************************* 3673 * 3674 * Verify that the hardware indicated that the checksum is valid. 3675 * Inform the stack about the status of checksum so that stack 3676 * doesn't spend time verifying the checksum. 3677 * 3678 *********************************************************************/ 3679static void 3680lem_receive_checksum(struct adapter *adapter, 3681 struct e1000_rx_desc *rx_desc, struct mbuf *mp) 3682{ 3683 /* 82543 or newer only */ 3684 if ((adapter->hw.mac.type < e1000_82543) || 3685 /* Ignore Checksum bit is set */ 3686 (rx_desc->status & E1000_RXD_STAT_IXSM)) { 3687 mp->m_pkthdr.csum_flags = 0; 3688 return; 3689 } 3690 3691 if (rx_desc->status & E1000_RXD_STAT_IPCS) { 3692 /* Did it pass? */ 3693 if (!(rx_desc->errors & E1000_RXD_ERR_IPE)) { 3694 /* IP Checksum Good */ 3695 mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED; 3696 mp->m_pkthdr.csum_flags |= CSUM_IP_VALID; 3697 3698 } else { 3699 mp->m_pkthdr.csum_flags = 0; 3700 } 3701 } 3702 3703 if (rx_desc->status & E1000_RXD_STAT_TCPCS) { 3704 /* Did it pass? */ 3705 if (!(rx_desc->errors & E1000_RXD_ERR_TCPE)) { 3706 mp->m_pkthdr.csum_flags |= 3707 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR); 3708 mp->m_pkthdr.csum_data = htons(0xffff); 3709 } 3710 } 3711} 3712 3713/* 3714 * This routine is run via an vlan 3715 * config EVENT 3716 */ 3717static void 3718lem_register_vlan(void *arg, struct ifnet *ifp, u16 vtag) 3719{ 3720 struct adapter *adapter = ifp->if_softc; 3721 u32 index, bit; 3722 3723 if (ifp->if_softc != arg) /* Not our event */ 3724 return; 3725 3726 if ((vtag == 0) || (vtag > 4095)) /* Invalid ID */ 3727 return; 3728 3729 EM_CORE_LOCK(adapter); 3730 index = (vtag >> 5) & 0x7F; 3731 bit = vtag & 0x1F; 3732 adapter->shadow_vfta[index] |= (1 << bit); 3733 ++adapter->num_vlans; 3734 /* Re-init to load the changes */ 3735 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) 3736 lem_init_locked(adapter); 3737 EM_CORE_UNLOCK(adapter); 3738} 3739 3740/* 3741 * This routine is run via an vlan 3742 * unconfig EVENT 3743 */ 3744static void 3745lem_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag) 3746{ 3747 struct adapter *adapter = ifp->if_softc; 3748 u32 index, bit; 3749 3750 if (ifp->if_softc != arg) 3751 return; 3752 3753 if ((vtag == 0) || (vtag > 4095)) /* Invalid */ 3754 return; 3755 3756 EM_CORE_LOCK(adapter); 3757 index = (vtag >> 5) & 0x7F; 3758 bit = vtag & 0x1F; 3759 adapter->shadow_vfta[index] &= ~(1 << bit); 3760 --adapter->num_vlans; 3761 /* Re-init to load the changes */ 3762 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) 3763 lem_init_locked(adapter); 3764 EM_CORE_UNLOCK(adapter); 3765} 3766 3767static void 3768lem_setup_vlan_hw_support(struct adapter *adapter) 3769{ 3770 struct e1000_hw *hw = &adapter->hw; 3771 u32 reg; 3772 3773 /* 3774 ** We get here thru init_locked, meaning 3775 ** a soft reset, this has already cleared 3776 ** the VFTA and other state, so if there 3777 ** have been no vlan's registered do nothing. 3778 */ 3779 if (adapter->num_vlans == 0) 3780 return; 3781 3782 /* 3783 ** A soft reset zero's out the VFTA, so 3784 ** we need to repopulate it now. 3785 */ 3786 for (int i = 0; i < EM_VFTA_SIZE; i++) 3787 if (adapter->shadow_vfta[i] != 0) 3788 E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, 3789 i, adapter->shadow_vfta[i]); 3790 3791 reg = E1000_READ_REG(hw, E1000_CTRL); 3792 reg |= E1000_CTRL_VME; 3793 E1000_WRITE_REG(hw, E1000_CTRL, reg); 3794 3795 /* Enable the Filter Table */ 3796 reg = E1000_READ_REG(hw, E1000_RCTL); 3797 reg &= ~E1000_RCTL_CFIEN; 3798 reg |= E1000_RCTL_VFE; 3799 E1000_WRITE_REG(hw, E1000_RCTL, reg); 3800} 3801 3802static void 3803lem_enable_intr(struct adapter *adapter) 3804{ 3805 struct e1000_hw *hw = &adapter->hw; 3806 u32 ims_mask = IMS_ENABLE_MASK; 3807 3808 E1000_WRITE_REG(hw, E1000_IMS, ims_mask); 3809} 3810 3811static void 3812lem_disable_intr(struct adapter *adapter) 3813{ 3814 struct e1000_hw *hw = &adapter->hw; 3815 3816 E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff); 3817} 3818 3819/* 3820 * Bit of a misnomer, what this really means is 3821 * to enable OS management of the system... aka 3822 * to disable special hardware management features 3823 */ 3824static void 3825lem_init_manageability(struct adapter *adapter) 3826{ 3827 /* A shared code workaround */ 3828 if (adapter->has_manage) { 3829 int manc = E1000_READ_REG(&adapter->hw, E1000_MANC); 3830 /* disable hardware interception of ARP */ 3831 manc &= ~(E1000_MANC_ARP_EN); 3832 E1000_WRITE_REG(&adapter->hw, E1000_MANC, manc); 3833 } 3834} 3835 3836/* 3837 * Give control back to hardware management 3838 * controller if there is one. 3839 */ 3840static void 3841lem_release_manageability(struct adapter *adapter) 3842{ 3843 if (adapter->has_manage) { 3844 int manc = E1000_READ_REG(&adapter->hw, E1000_MANC); 3845 3846 /* re-enable hardware interception of ARP */ 3847 manc |= E1000_MANC_ARP_EN; 3848 E1000_WRITE_REG(&adapter->hw, E1000_MANC, manc); 3849 } 3850} 3851 3852/* 3853 * lem_get_hw_control sets the {CTRL_EXT|FWSM}:DRV_LOAD bit. 3854 * For ASF and Pass Through versions of f/w this means 3855 * that the driver is loaded. For AMT version type f/w 3856 * this means that the network i/f is open. 3857 */ 3858static void 3859lem_get_hw_control(struct adapter *adapter) 3860{ 3861 u32 ctrl_ext; 3862 3863 ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT); 3864 E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT, 3865 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); 3866 return; 3867} 3868 3869/* 3870 * lem_release_hw_control resets {CTRL_EXT|FWSM}:DRV_LOAD bit. 3871 * For ASF and Pass Through versions of f/w this means that 3872 * the driver is no longer loaded. For AMT versions of the 3873 * f/w this means that the network i/f is closed. 3874 */ 3875static void 3876lem_release_hw_control(struct adapter *adapter) 3877{ 3878 u32 ctrl_ext; 3879 3880 if (!adapter->has_manage) 3881 return; 3882 3883 ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT); 3884 E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT, 3885 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD); 3886 return; 3887} 3888 3889static int 3890lem_is_valid_ether_addr(u8 *addr) 3891{ 3892 char zero_addr[6] = { 0, 0, 0, 0, 0, 0 }; 3893 3894 if ((addr[0] & 1) || (!bcmp(addr, zero_addr, ETHER_ADDR_LEN))) { 3895 return (FALSE); 3896 } 3897 3898 return (TRUE); 3899} 3900 3901/* 3902** Parse the interface capabilities with regard 3903** to both system management and wake-on-lan for 3904** later use. 3905*/ 3906static void 3907lem_get_wakeup(device_t dev) 3908{ 3909 struct adapter *adapter = device_get_softc(dev); 3910 u16 eeprom_data = 0, device_id, apme_mask; 3911 3912 adapter->has_manage = e1000_enable_mng_pass_thru(&adapter->hw); 3913 apme_mask = EM_EEPROM_APME; 3914 3915 switch (adapter->hw.mac.type) { 3916 case e1000_82542: 3917 case e1000_82543: 3918 break; 3919 case e1000_82544: 3920 e1000_read_nvm(&adapter->hw, 3921 NVM_INIT_CONTROL2_REG, 1, &eeprom_data); 3922 apme_mask = EM_82544_APME; 3923 break; 3924 case e1000_82546: 3925 case e1000_82546_rev_3: 3926 if (adapter->hw.bus.func == 1) { 3927 e1000_read_nvm(&adapter->hw, 3928 NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data); 3929 break; 3930 } else 3931 e1000_read_nvm(&adapter->hw, 3932 NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data); 3933 break; 3934 default: 3935 e1000_read_nvm(&adapter->hw, 3936 NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data); 3937 break; 3938 } 3939 if (eeprom_data & apme_mask) 3940 adapter->wol = (E1000_WUFC_MAG | E1000_WUFC_MC); 3941 /* 3942 * We have the eeprom settings, now apply the special cases 3943 * where the eeprom may be wrong or the board won't support 3944 * wake on lan on a particular port 3945 */ 3946 device_id = pci_get_device(dev); 3947 switch (device_id) { 3948 case E1000_DEV_ID_82546GB_PCIE: 3949 adapter->wol = 0; 3950 break; 3951 case E1000_DEV_ID_82546EB_FIBER: 3952 case E1000_DEV_ID_82546GB_FIBER: 3953 /* Wake events only supported on port A for dual fiber 3954 * regardless of eeprom setting */ 3955 if (E1000_READ_REG(&adapter->hw, E1000_STATUS) & 3956 E1000_STATUS_FUNC_1) 3957 adapter->wol = 0; 3958 break; 3959 case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3: 3960 /* if quad port adapter, disable WoL on all but port A */ 3961 if (global_quad_port_a != 0) 3962 adapter->wol = 0; 3963 /* Reset for multiple quad port adapters */ 3964 if (++global_quad_port_a == 4) 3965 global_quad_port_a = 0; 3966 break; 3967 } 3968 return; 3969} 3970 3971 3972/* 3973 * Enable PCI Wake On Lan capability 3974 */ 3975static void 3976lem_enable_wakeup(device_t dev) 3977{ 3978 struct adapter *adapter = device_get_softc(dev); 3979 struct ifnet *ifp = adapter->ifp; 3980 u32 pmc, ctrl, ctrl_ext, rctl; 3981 u16 status; 3982 3983 if ((pci_find_cap(dev, PCIY_PMG, &pmc) != 0)) 3984 return; 3985 3986 /* Advertise the wakeup capability */ 3987 ctrl = E1000_READ_REG(&adapter->hw, E1000_CTRL); 3988 ctrl |= (E1000_CTRL_SWDPIN2 | E1000_CTRL_SWDPIN3); 3989 E1000_WRITE_REG(&adapter->hw, E1000_CTRL, ctrl); 3990 E1000_WRITE_REG(&adapter->hw, E1000_WUC, E1000_WUC_PME_EN); 3991 3992 /* Keep the laser running on Fiber adapters */ 3993 if (adapter->hw.phy.media_type == e1000_media_type_fiber || 3994 adapter->hw.phy.media_type == e1000_media_type_internal_serdes) { 3995 ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT); 3996 ctrl_ext |= E1000_CTRL_EXT_SDP3_DATA; 3997 E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT, ctrl_ext); 3998 } 3999 4000 /* 4001 ** Determine type of Wakeup: note that wol 4002 ** is set with all bits on by default. 4003 */ 4004 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) == 0) 4005 adapter->wol &= ~E1000_WUFC_MAG; 4006 4007 if ((ifp->if_capenable & IFCAP_WOL_MCAST) == 0) 4008 adapter->wol &= ~E1000_WUFC_MC; 4009 else { 4010 rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL); 4011 rctl |= E1000_RCTL_MPE; 4012 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl); 4013 } 4014 4015 if (adapter->hw.mac.type == e1000_pchlan) { 4016 if (lem_enable_phy_wakeup(adapter)) 4017 return; 4018 } else { 4019 E1000_WRITE_REG(&adapter->hw, E1000_WUC, E1000_WUC_PME_EN); 4020 E1000_WRITE_REG(&adapter->hw, E1000_WUFC, adapter->wol); 4021 } 4022 4023 4024 /* Request PME */ 4025 status = pci_read_config(dev, pmc + PCIR_POWER_STATUS, 2); 4026 status &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE); 4027 if (ifp->if_capenable & IFCAP_WOL) 4028 status |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE; 4029 pci_write_config(dev, pmc + PCIR_POWER_STATUS, status, 2); 4030 4031 return; 4032} 4033 4034/* 4035** WOL in the newer chipset interfaces (pchlan) 4036** require thing to be copied into the phy 4037*/ 4038static int 4039lem_enable_phy_wakeup(struct adapter *adapter) 4040{ 4041 struct e1000_hw *hw = &adapter->hw; 4042 u32 mreg, ret = 0; 4043 u16 preg; 4044 4045 /* copy MAC RARs to PHY RARs */ 4046 for (int i = 0; i < adapter->hw.mac.rar_entry_count; i++) { 4047 mreg = E1000_READ_REG(hw, E1000_RAL(i)); 4048 e1000_write_phy_reg(hw, BM_RAR_L(i), (u16)(mreg & 0xFFFF)); 4049 e1000_write_phy_reg(hw, BM_RAR_M(i), 4050 (u16)((mreg >> 16) & 0xFFFF)); 4051 mreg = E1000_READ_REG(hw, E1000_RAH(i)); 4052 e1000_write_phy_reg(hw, BM_RAR_H(i), (u16)(mreg & 0xFFFF)); 4053 e1000_write_phy_reg(hw, BM_RAR_CTRL(i), 4054 (u16)((mreg >> 16) & 0xFFFF)); 4055 } 4056 4057 /* copy MAC MTA to PHY MTA */ 4058 for (int i = 0; i < adapter->hw.mac.mta_reg_count; i++) { 4059 mreg = E1000_READ_REG_ARRAY(hw, E1000_MTA, i); 4060 e1000_write_phy_reg(hw, BM_MTA(i), (u16)(mreg & 0xFFFF)); 4061 e1000_write_phy_reg(hw, BM_MTA(i) + 1, 4062 (u16)((mreg >> 16) & 0xFFFF)); 4063 } 4064 4065 /* configure PHY Rx Control register */ 4066 e1000_read_phy_reg(&adapter->hw, BM_RCTL, &preg); 4067 mreg = E1000_READ_REG(hw, E1000_RCTL); 4068 if (mreg & E1000_RCTL_UPE) 4069 preg |= BM_RCTL_UPE; 4070 if (mreg & E1000_RCTL_MPE) 4071 preg |= BM_RCTL_MPE; 4072 preg &= ~(BM_RCTL_MO_MASK); 4073 if (mreg & E1000_RCTL_MO_3) 4074 preg |= (((mreg & E1000_RCTL_MO_3) >> E1000_RCTL_MO_SHIFT) 4075 << BM_RCTL_MO_SHIFT); 4076 if (mreg & E1000_RCTL_BAM) 4077 preg |= BM_RCTL_BAM; 4078 if (mreg & E1000_RCTL_PMCF) 4079 preg |= BM_RCTL_PMCF; 4080 mreg = E1000_READ_REG(hw, E1000_CTRL); 4081 if (mreg & E1000_CTRL_RFCE) 4082 preg |= BM_RCTL_RFCE; 4083 e1000_write_phy_reg(&adapter->hw, BM_RCTL, preg); 4084 4085 /* enable PHY wakeup in MAC register */ 4086 E1000_WRITE_REG(hw, E1000_WUC, 4087 E1000_WUC_PHY_WAKE | E1000_WUC_PME_EN); 4088 E1000_WRITE_REG(hw, E1000_WUFC, adapter->wol); 4089 4090 /* configure and enable PHY wakeup in PHY registers */ 4091 e1000_write_phy_reg(&adapter->hw, BM_WUFC, adapter->wol); 4092 e1000_write_phy_reg(&adapter->hw, BM_WUC, E1000_WUC_PME_EN); 4093 4094 /* activate PHY wakeup */ 4095 ret = hw->phy.ops.acquire(hw); 4096 if (ret) { 4097 printf("Could not acquire PHY\n"); 4098 return ret; 4099 } 4100 e1000_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 4101 (BM_WUC_ENABLE_PAGE << IGP_PAGE_SHIFT)); 4102 ret = e1000_read_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, &preg); 4103 if (ret) { 4104 printf("Could not read PHY page 769\n"); 4105 goto out; 4106 } 4107 preg |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT; 4108 ret = e1000_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, preg); 4109 if (ret) 4110 printf("Could not set PHY Host Wakeup bit\n"); 4111out: 4112 hw->phy.ops.release(hw); 4113 4114 return ret; 4115} 4116 4117static void 4118lem_led_func(void *arg, int onoff) 4119{ 4120 struct adapter *adapter = arg; 4121 4122 EM_CORE_LOCK(adapter); 4123 if (onoff) { 4124 e1000_setup_led(&adapter->hw); 4125 e1000_led_on(&adapter->hw); 4126 } else { 4127 e1000_led_off(&adapter->hw); 4128 e1000_cleanup_led(&adapter->hw); 4129 } 4130 EM_CORE_UNLOCK(adapter); 4131} 4132 4133/********************************************************************* 4134* 82544 Coexistence issue workaround. 4135* There are 2 issues. 4136* 1. Transmit Hang issue. 4137* To detect this issue, following equation can be used... 4138* SIZE[3:0] + ADDR[2:0] = SUM[3:0]. 4139* If SUM[3:0] is in between 1 to 4, we will have this issue. 4140* 4141* 2. DAC issue. 4142* To detect this issue, following equation can be used... 4143* SIZE[3:0] + ADDR[2:0] = SUM[3:0]. 4144* If SUM[3:0] is in between 9 to c, we will have this issue. 4145* 4146* 4147* WORKAROUND: 4148* Make sure we do not have ending address 4149* as 1,2,3,4(Hang) or 9,a,b,c (DAC) 4150* 4151*************************************************************************/ 4152static u32 4153lem_fill_descriptors (bus_addr_t address, u32 length, 4154 PDESC_ARRAY desc_array) 4155{ 4156 u32 safe_terminator; 4157 4158 /* Since issue is sensitive to length and address.*/ 4159 /* Let us first check the address...*/ 4160 if (length <= 4) { 4161 desc_array->descriptor[0].address = address; 4162 desc_array->descriptor[0].length = length; 4163 desc_array->elements = 1; 4164 return (desc_array->elements); 4165 } 4166 safe_terminator = (u32)((((u32)address & 0x7) + 4167 (length & 0xF)) & 0xF); 4168 /* if it does not fall between 0x1 to 0x4 and 0x9 to 0xC then return */ 4169 if (safe_terminator == 0 || 4170 (safe_terminator > 4 && 4171 safe_terminator < 9) || 4172 (safe_terminator > 0xC && 4173 safe_terminator <= 0xF)) { 4174 desc_array->descriptor[0].address = address; 4175 desc_array->descriptor[0].length = length; 4176 desc_array->elements = 1; 4177 return (desc_array->elements); 4178 } 4179 4180 desc_array->descriptor[0].address = address; 4181 desc_array->descriptor[0].length = length - 4; 4182 desc_array->descriptor[1].address = address + (length - 4); 4183 desc_array->descriptor[1].length = 4; 4184 desc_array->elements = 2; 4185 return (desc_array->elements); 4186} 4187 4188/********************************************************************** 4189 * 4190 * Update the board statistics counters. 4191 * 4192 **********************************************************************/ 4193static void 4194lem_update_stats_counters(struct adapter *adapter) 4195{ 4196 struct ifnet *ifp; 4197 4198 if(adapter->hw.phy.media_type == e1000_media_type_copper || 4199 (E1000_READ_REG(&adapter->hw, E1000_STATUS) & E1000_STATUS_LU)) { 4200 adapter->stats.symerrs += E1000_READ_REG(&adapter->hw, E1000_SYMERRS); 4201 adapter->stats.sec += E1000_READ_REG(&adapter->hw, E1000_SEC); 4202 } 4203 adapter->stats.crcerrs += E1000_READ_REG(&adapter->hw, E1000_CRCERRS); 4204 adapter->stats.mpc += E1000_READ_REG(&adapter->hw, E1000_MPC); 4205 adapter->stats.scc += E1000_READ_REG(&adapter->hw, E1000_SCC); 4206 adapter->stats.ecol += E1000_READ_REG(&adapter->hw, E1000_ECOL); 4207 4208 adapter->stats.mcc += E1000_READ_REG(&adapter->hw, E1000_MCC); 4209 adapter->stats.latecol += E1000_READ_REG(&adapter->hw, E1000_LATECOL); 4210 adapter->stats.colc += E1000_READ_REG(&adapter->hw, E1000_COLC); 4211 adapter->stats.dc += E1000_READ_REG(&adapter->hw, E1000_DC); 4212 adapter->stats.rlec += E1000_READ_REG(&adapter->hw, E1000_RLEC); 4213 adapter->stats.xonrxc += E1000_READ_REG(&adapter->hw, E1000_XONRXC); 4214 adapter->stats.xontxc += E1000_READ_REG(&adapter->hw, E1000_XONTXC); 4215 adapter->stats.xoffrxc += E1000_READ_REG(&adapter->hw, E1000_XOFFRXC); 4216 adapter->stats.xofftxc += E1000_READ_REG(&adapter->hw, E1000_XOFFTXC); 4217 adapter->stats.fcruc += E1000_READ_REG(&adapter->hw, E1000_FCRUC); 4218 adapter->stats.prc64 += E1000_READ_REG(&adapter->hw, E1000_PRC64); 4219 adapter->stats.prc127 += E1000_READ_REG(&adapter->hw, E1000_PRC127); 4220 adapter->stats.prc255 += E1000_READ_REG(&adapter->hw, E1000_PRC255); 4221 adapter->stats.prc511 += E1000_READ_REG(&adapter->hw, E1000_PRC511); 4222 adapter->stats.prc1023 += E1000_READ_REG(&adapter->hw, E1000_PRC1023); 4223 adapter->stats.prc1522 += E1000_READ_REG(&adapter->hw, E1000_PRC1522); 4224 adapter->stats.gprc += E1000_READ_REG(&adapter->hw, E1000_GPRC); 4225 adapter->stats.bprc += E1000_READ_REG(&adapter->hw, E1000_BPRC); 4226 adapter->stats.mprc += E1000_READ_REG(&adapter->hw, E1000_MPRC); 4227 adapter->stats.gptc += E1000_READ_REG(&adapter->hw, E1000_GPTC); 4228 4229 /* For the 64-bit byte counters the low dword must be read first. */ 4230 /* Both registers clear on the read of the high dword */ 4231 4232 adapter->stats.gorc += E1000_READ_REG(&adapter->hw, E1000_GORCL) + 4233 ((u64)E1000_READ_REG(&adapter->hw, E1000_GORCH) << 32); 4234 adapter->stats.gotc += E1000_READ_REG(&adapter->hw, E1000_GOTCL) + 4235 ((u64)E1000_READ_REG(&adapter->hw, E1000_GOTCH) << 32); 4236 4237 adapter->stats.rnbc += E1000_READ_REG(&adapter->hw, E1000_RNBC); 4238 adapter->stats.ruc += E1000_READ_REG(&adapter->hw, E1000_RUC); 4239 adapter->stats.rfc += E1000_READ_REG(&adapter->hw, E1000_RFC); 4240 adapter->stats.roc += E1000_READ_REG(&adapter->hw, E1000_ROC); 4241 adapter->stats.rjc += E1000_READ_REG(&adapter->hw, E1000_RJC); 4242 4243 adapter->stats.tor += E1000_READ_REG(&adapter->hw, E1000_TORH); 4244 adapter->stats.tot += E1000_READ_REG(&adapter->hw, E1000_TOTH); 4245 4246 adapter->stats.tpr += E1000_READ_REG(&adapter->hw, E1000_TPR); 4247 adapter->stats.tpt += E1000_READ_REG(&adapter->hw, E1000_TPT); 4248 adapter->stats.ptc64 += E1000_READ_REG(&adapter->hw, E1000_PTC64); 4249 adapter->stats.ptc127 += E1000_READ_REG(&adapter->hw, E1000_PTC127); 4250 adapter->stats.ptc255 += E1000_READ_REG(&adapter->hw, E1000_PTC255); 4251 adapter->stats.ptc511 += E1000_READ_REG(&adapter->hw, E1000_PTC511); 4252 adapter->stats.ptc1023 += E1000_READ_REG(&adapter->hw, E1000_PTC1023); 4253 adapter->stats.ptc1522 += E1000_READ_REG(&adapter->hw, E1000_PTC1522); 4254 adapter->stats.mptc += E1000_READ_REG(&adapter->hw, E1000_MPTC); 4255 adapter->stats.bptc += E1000_READ_REG(&adapter->hw, E1000_BPTC); 4256 4257 if (adapter->hw.mac.type >= e1000_82543) { 4258 adapter->stats.algnerrc += 4259 E1000_READ_REG(&adapter->hw, E1000_ALGNERRC); 4260 adapter->stats.rxerrc += 4261 E1000_READ_REG(&adapter->hw, E1000_RXERRC); 4262 adapter->stats.tncrs += 4263 E1000_READ_REG(&adapter->hw, E1000_TNCRS); 4264 adapter->stats.cexterr += 4265 E1000_READ_REG(&adapter->hw, E1000_CEXTERR); 4266 adapter->stats.tsctc += 4267 E1000_READ_REG(&adapter->hw, E1000_TSCTC); 4268 adapter->stats.tsctfc += 4269 E1000_READ_REG(&adapter->hw, E1000_TSCTFC); 4270 } 4271 ifp = adapter->ifp; 4272 4273 ifp->if_collisions = adapter->stats.colc; 4274 4275 /* Rx Errors */ 4276 ifp->if_ierrors = adapter->dropped_pkts + adapter->stats.rxerrc + 4277 adapter->stats.crcerrs + adapter->stats.algnerrc + 4278 adapter->stats.ruc + adapter->stats.roc + 4279 adapter->stats.mpc + adapter->stats.cexterr; 4280 4281 /* Tx Errors */ 4282 ifp->if_oerrors = adapter->stats.ecol + 4283 adapter->stats.latecol + adapter->watchdog_events; 4284} 4285 4286/* Export a single 32-bit register via a read-only sysctl. */ 4287static int 4288lem_sysctl_reg_handler(SYSCTL_HANDLER_ARGS) 4289{ 4290 struct adapter *adapter; 4291 u_int val; 4292 4293 adapter = oidp->oid_arg1; 4294 val = E1000_READ_REG(&adapter->hw, oidp->oid_arg2); 4295 return (sysctl_handle_int(oidp, &val, 0, req)); 4296} 4297 4298/* 4299 * Add sysctl variables, one per statistic, to the system. 4300 */ 4301static void 4302lem_add_hw_stats(struct adapter *adapter) 4303{ 4304 device_t dev = adapter->dev; 4305 4306 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev); 4307 struct sysctl_oid *tree = device_get_sysctl_tree(dev); 4308 struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree); 4309 struct e1000_hw_stats *stats = &adapter->stats; 4310 4311 struct sysctl_oid *stat_node; 4312 struct sysctl_oid_list *stat_list; 4313 4314 /* Driver Statistics */ 4315 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "mbuf_alloc_fail", 4316 CTLFLAG_RD, &adapter->mbuf_alloc_failed, 4317 "Std mbuf failed"); 4318 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "cluster_alloc_fail", 4319 CTLFLAG_RD, &adapter->mbuf_cluster_failed, 4320 "Std mbuf cluster failed"); 4321 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped", 4322 CTLFLAG_RD, &adapter->dropped_pkts, 4323 "Driver dropped packets"); 4324 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "tx_dma_fail", 4325 CTLFLAG_RD, &adapter->no_tx_dma_setup, 4326 "Driver tx dma failure in xmit"); 4327 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "tx_desc_fail1", 4328 CTLFLAG_RD, &adapter->no_tx_desc_avail1, 4329 "Not enough tx descriptors failure in xmit"); 4330 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "tx_desc_fail2", 4331 CTLFLAG_RD, &adapter->no_tx_desc_avail2, 4332 "Not enough tx descriptors failure in xmit"); 4333 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "rx_overruns", 4334 CTLFLAG_RD, &adapter->rx_overruns, 4335 "RX overruns"); 4336 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_timeouts", 4337 CTLFLAG_RD, &adapter->watchdog_events, 4338 "Watchdog timeouts"); 4339 4340 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "device_control", 4341 CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_CTRL, 4342 lem_sysctl_reg_handler, "IU", 4343 "Device Control Register"); 4344 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "rx_control", 4345 CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_RCTL, 4346 lem_sysctl_reg_handler, "IU", 4347 "Receiver Control Register"); 4348 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "fc_high_water", 4349 CTLFLAG_RD, &adapter->hw.fc.high_water, 0, 4350 "Flow Control High Watermark"); 4351 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "fc_low_water", 4352 CTLFLAG_RD, &adapter->hw.fc.low_water, 0, 4353 "Flow Control Low Watermark"); 4354 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "fifo_workaround", 4355 CTLFLAG_RD, &adapter->tx_fifo_wrk_cnt, 4356 "TX FIFO workaround events"); 4357 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "fifo_reset", 4358 CTLFLAG_RD, &adapter->tx_fifo_reset_cnt, 4359 "TX FIFO resets"); 4360 4361 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "txd_head", 4362 CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_TDH(0), 4363 lem_sysctl_reg_handler, "IU", 4364 "Transmit Descriptor Head"); 4365 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "txd_tail", 4366 CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_TDT(0), 4367 lem_sysctl_reg_handler, "IU", 4368 "Transmit Descriptor Tail"); 4369 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "rxd_head", 4370 CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_RDH(0), 4371 lem_sysctl_reg_handler, "IU", 4372 "Receive Descriptor Head"); 4373 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "rxd_tail", 4374 CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_RDT(0), 4375 lem_sysctl_reg_handler, "IU", 4376 "Receive Descriptor Tail"); 4377 4378 4379 /* MAC stats get their own sub node */ 4380 4381 stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats", 4382 CTLFLAG_RD, NULL, "Statistics"); 4383 stat_list = SYSCTL_CHILDREN(stat_node); 4384 4385 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "excess_coll", 4386 CTLFLAG_RD, &stats->ecol, 4387 "Excessive collisions"); 4388 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "single_coll", 4389 CTLFLAG_RD, &stats->scc, 4390 "Single collisions"); 4391 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "multiple_coll", 4392 CTLFLAG_RD, &stats->mcc, 4393 "Multiple collisions"); 4394 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "late_coll", 4395 CTLFLAG_RD, &stats->latecol, 4396 "Late collisions"); 4397 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "collision_count", 4398 CTLFLAG_RD, &stats->colc, 4399 "Collision Count"); 4400 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "symbol_errors", 4401 CTLFLAG_RD, &adapter->stats.symerrs, 4402 "Symbol Errors"); 4403 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "sequence_errors", 4404 CTLFLAG_RD, &adapter->stats.sec, 4405 "Sequence Errors"); 4406 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "defer_count", 4407 CTLFLAG_RD, &adapter->stats.dc, 4408 "Defer Count"); 4409 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "missed_packets", 4410 CTLFLAG_RD, &adapter->stats.mpc, 4411 "Missed Packets"); 4412 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_no_buff", 4413 CTLFLAG_RD, &adapter->stats.rnbc, 4414 "Receive No Buffers"); 4415 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_undersize", 4416 CTLFLAG_RD, &adapter->stats.ruc, 4417 "Receive Undersize"); 4418 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_fragmented", 4419 CTLFLAG_RD, &adapter->stats.rfc, 4420 "Fragmented Packets Received "); 4421 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_oversize", 4422 CTLFLAG_RD, &adapter->stats.roc, 4423 "Oversized Packets Received"); 4424 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_jabber", 4425 CTLFLAG_RD, &adapter->stats.rjc, 4426 "Recevied Jabber"); 4427 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_errs", 4428 CTLFLAG_RD, &adapter->stats.rxerrc, 4429 "Receive Errors"); 4430 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "crc_errs", 4431 CTLFLAG_RD, &adapter->stats.crcerrs, 4432 "CRC errors"); 4433 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "alignment_errs", 4434 CTLFLAG_RD, &adapter->stats.algnerrc, 4435 "Alignment Errors"); 4436 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "coll_ext_errs", 4437 CTLFLAG_RD, &adapter->stats.cexterr, 4438 "Collision/Carrier extension errors"); 4439 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_recvd", 4440 CTLFLAG_RD, &adapter->stats.xonrxc, 4441 "XON Received"); 4442 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_txd", 4443 CTLFLAG_RD, &adapter->stats.xontxc, 4444 "XON Transmitted"); 4445 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_recvd", 4446 CTLFLAG_RD, &adapter->stats.xoffrxc, 4447 "XOFF Received"); 4448 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_txd", 4449 CTLFLAG_RD, &adapter->stats.xofftxc, 4450 "XOFF Transmitted"); 4451 4452 /* Packet Reception Stats */ 4453 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_recvd", 4454 CTLFLAG_RD, &adapter->stats.tpr, 4455 "Total Packets Received "); 4456 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_recvd", 4457 CTLFLAG_RD, &adapter->stats.gprc, 4458 "Good Packets Received"); 4459 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_recvd", 4460 CTLFLAG_RD, &adapter->stats.bprc, 4461 "Broadcast Packets Received"); 4462 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_recvd", 4463 CTLFLAG_RD, &adapter->stats.mprc, 4464 "Multicast Packets Received"); 4465 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_64", 4466 CTLFLAG_RD, &adapter->stats.prc64, 4467 "64 byte frames received "); 4468 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127", 4469 CTLFLAG_RD, &adapter->stats.prc127, 4470 "65-127 byte frames received"); 4471 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255", 4472 CTLFLAG_RD, &adapter->stats.prc255, 4473 "128-255 byte frames received"); 4474 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511", 4475 CTLFLAG_RD, &adapter->stats.prc511, 4476 "256-511 byte frames received"); 4477 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023", 4478 CTLFLAG_RD, &adapter->stats.prc1023, 4479 "512-1023 byte frames received"); 4480 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522", 4481 CTLFLAG_RD, &adapter->stats.prc1522, 4482 "1023-1522 byte frames received"); 4483 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_recvd", 4484 CTLFLAG_RD, &adapter->stats.gorc, 4485 "Good Octets Received"); 4486 4487 /* Packet Transmission Stats */ 4488 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd", 4489 CTLFLAG_RD, &adapter->stats.gotc, 4490 "Good Octets Transmitted"); 4491 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd", 4492 CTLFLAG_RD, &adapter->stats.tpt, 4493 "Total Packets Transmitted"); 4494 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd", 4495 CTLFLAG_RD, &adapter->stats.gptc, 4496 "Good Packets Transmitted"); 4497 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd", 4498 CTLFLAG_RD, &adapter->stats.bptc, 4499 "Broadcast Packets Transmitted"); 4500 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd", 4501 CTLFLAG_RD, &adapter->stats.mptc, 4502 "Multicast Packets Transmitted"); 4503 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_64", 4504 CTLFLAG_RD, &adapter->stats.ptc64, 4505 "64 byte frames transmitted "); 4506 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127", 4507 CTLFLAG_RD, &adapter->stats.ptc127, 4508 "65-127 byte frames transmitted"); 4509 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255", 4510 CTLFLAG_RD, &adapter->stats.ptc255, 4511 "128-255 byte frames transmitted"); 4512 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511", 4513 CTLFLAG_RD, &adapter->stats.ptc511, 4514 "256-511 byte frames transmitted"); 4515 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023", 4516 CTLFLAG_RD, &adapter->stats.ptc1023, 4517 "512-1023 byte frames transmitted"); 4518 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522", 4519 CTLFLAG_RD, &adapter->stats.ptc1522, 4520 "1024-1522 byte frames transmitted"); 4521 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tso_txd", 4522 CTLFLAG_RD, &adapter->stats.tsctc, 4523 "TSO Contexts Transmitted"); 4524 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tso_ctx_fail", 4525 CTLFLAG_RD, &adapter->stats.tsctfc, 4526 "TSO Contexts Failed"); 4527} 4528 4529/********************************************************************** 4530 * 4531 * This routine provides a way to dump out the adapter eeprom, 4532 * often a useful debug/service tool. This only dumps the first 4533 * 32 words, stuff that matters is in that extent. 4534 * 4535 **********************************************************************/ 4536 4537static int 4538lem_sysctl_nvm_info(SYSCTL_HANDLER_ARGS) 4539{ 4540 struct adapter *adapter; 4541 int error; 4542 int result; 4543 4544 result = -1; 4545 error = sysctl_handle_int(oidp, &result, 0, req); 4546 4547 if (error || !req->newptr) 4548 return (error); 4549 4550 /* 4551 * This value will cause a hex dump of the 4552 * first 32 16-bit words of the EEPROM to 4553 * the screen. 4554 */ 4555 if (result == 1) { 4556 adapter = (struct adapter *)arg1; 4557 lem_print_nvm_info(adapter); 4558 } 4559 4560 return (error); 4561} 4562 4563static void 4564lem_print_nvm_info(struct adapter *adapter) 4565{ 4566 u16 eeprom_data; 4567 int i, j, row = 0; 4568 4569 /* Its a bit crude, but it gets the job done */ 4570 printf("\nInterface EEPROM Dump:\n"); 4571 printf("Offset\n0x0000 "); 4572 for (i = 0, j = 0; i < 32; i++, j++) { 4573 if (j == 8) { /* Make the offset block */ 4574 j = 0; ++row; 4575 printf("\n0x00%x0 ",row); 4576 } 4577 e1000_read_nvm(&adapter->hw, i, 1, &eeprom_data); 4578 printf("%04x ", eeprom_data); 4579 } 4580 printf("\n"); 4581} 4582 4583static int 4584lem_sysctl_int_delay(SYSCTL_HANDLER_ARGS) 4585{ 4586 struct em_int_delay_info *info; 4587 struct adapter *adapter; 4588 u32 regval; 4589 int error; 4590 int usecs; 4591 int ticks; 4592 4593 info = (struct em_int_delay_info *)arg1; 4594 usecs = info->value; 4595 error = sysctl_handle_int(oidp, &usecs, 0, req); 4596 if (error != 0 || req->newptr == NULL) 4597 return (error); 4598 if (usecs < 0 || usecs > EM_TICKS_TO_USECS(65535)) 4599 return (EINVAL); 4600 info->value = usecs; 4601 ticks = EM_USECS_TO_TICKS(usecs); 4602 if (info->offset == E1000_ITR) /* units are 256ns here */ 4603 ticks *= 4; 4604 4605 adapter = info->adapter; 4606 4607 EM_CORE_LOCK(adapter); 4608 regval = E1000_READ_OFFSET(&adapter->hw, info->offset); 4609 regval = (regval & ~0xffff) | (ticks & 0xffff); 4610 /* Handle a few special cases. */ 4611 switch (info->offset) { 4612 case E1000_RDTR: 4613 break; 4614 case E1000_TIDV: 4615 if (ticks == 0) { 4616 adapter->txd_cmd &= ~E1000_TXD_CMD_IDE; 4617 /* Don't write 0 into the TIDV register. */ 4618 regval++; 4619 } else 4620 adapter->txd_cmd |= E1000_TXD_CMD_IDE; 4621 break; 4622 } 4623 E1000_WRITE_OFFSET(&adapter->hw, info->offset, regval); 4624 EM_CORE_UNLOCK(adapter); 4625 return (0); 4626} 4627 4628static void 4629lem_add_int_delay_sysctl(struct adapter *adapter, const char *name, 4630 const char *description, struct em_int_delay_info *info, 4631 int offset, int value) 4632{ 4633 info->adapter = adapter; 4634 info->offset = offset; 4635 info->value = value; 4636 SYSCTL_ADD_PROC(device_get_sysctl_ctx(adapter->dev), 4637 SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)), 4638 OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW, 4639 info, 0, lem_sysctl_int_delay, "I", description); 4640} 4641 4642static void 4643lem_set_flow_cntrl(struct adapter *adapter, const char *name, 4644 const char *description, int *limit, int value) 4645{ 4646 *limit = value; 4647 SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev), 4648 SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)), 4649 OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW, limit, value, description); 4650} 4651 4652static void 4653lem_add_rx_process_limit(struct adapter *adapter, const char *name, 4654 const char *description, int *limit, int value) 4655{ 4656 *limit = value; 4657 SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev), 4658 SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)), 4659 OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW, limit, value, description); 4660}
|