if_em.c revision 152545
1125208Sache/************************************************************************** 2125208Sache 3125208SacheCopyright (c) 2001-2005, Intel Corporation 4125208SacheAll rights reserved. 5125208Sache 6125208SacheRedistribution and use in source and binary forms, with or without 7125208Sachemodification, are permitted provided that the following conditions are met: 8125208Sache 9125208Sache 1. Redistributions of source code must retain the above copyright notice, 10125208Sache this list of conditions and the following disclaimer. 11125208Sache 12125208Sache 2. Redistributions in binary form must reproduce the above copyright 13125208Sache notice, this list of conditions and the following disclaimer in the 14125208Sache documentation and/or other materials provided with the distribution. 15125208Sache 16125208Sache 3. Neither the name of the Intel Corporation nor the names of its 17125208Sache contributors may be used to endorse or promote products derived from 18125208Sache this software without specific prior written permission. 19125208Sache 20125208SacheTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21125208SacheAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22125208SacheIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23125208SacheARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 24125208SacheLIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25125208SacheCONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26125208SacheSUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27125208SacheINTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28125208SacheCONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29125208SacheARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30125208SachePOSSIBILITY OF SUCH DAMAGE. 31125208Sache 32125208Sache***************************************************************************/ 33125208Sache 34125208Sache/*$FreeBSD: head/sys/dev/em/if_em.c 152545 2005-11-17 10:13:18Z glebius $*/ 35125208Sache 36125208Sache#ifdef HAVE_KERNEL_OPTION_HEADERS 37#include "opt_device_polling.h" 38#endif 39 40#include <dev/em/if_em.h> 41 42/********************************************************************* 43 * Set this to one to display debug statistics 44 *********************************************************************/ 45int em_display_debug_stats = 0; 46 47/********************************************************************* 48 * Driver version 49 *********************************************************************/ 50 51char em_driver_version[] = "2.1.7"; 52 53 54/********************************************************************* 55 * PCI Device ID Table 56 * 57 * Used by probe to select devices to load on 58 * Last field stores an index into em_strings 59 * Last entry must be all 0s 60 * 61 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index } 62 *********************************************************************/ 63 64static em_vendor_info_t em_vendor_info_array[] = 65{ 66 /* Intel(R) PRO/1000 Network Connection */ 67 { 0x8086, E1000_DEV_ID_82540EM, PCI_ANY_ID, PCI_ANY_ID, 0}, 68 { 0x8086, E1000_DEV_ID_82540EM_LOM, PCI_ANY_ID, PCI_ANY_ID, 0}, 69 { 0x8086, E1000_DEV_ID_82540EP, PCI_ANY_ID, PCI_ANY_ID, 0}, 70 { 0x8086, E1000_DEV_ID_82540EP_LOM, PCI_ANY_ID, PCI_ANY_ID, 0}, 71 { 0x8086, E1000_DEV_ID_82540EP_LP, PCI_ANY_ID, PCI_ANY_ID, 0}, 72 73 { 0x8086, E1000_DEV_ID_82541EI, PCI_ANY_ID, PCI_ANY_ID, 0}, 74 { 0x8086, E1000_DEV_ID_82541ER, PCI_ANY_ID, PCI_ANY_ID, 0}, 75 { 0x8086, E1000_DEV_ID_82541ER_LOM, PCI_ANY_ID, PCI_ANY_ID, 0}, 76 { 0x8086, E1000_DEV_ID_82541EI_MOBILE, PCI_ANY_ID, PCI_ANY_ID, 0}, 77 { 0x8086, E1000_DEV_ID_82541GI, PCI_ANY_ID, PCI_ANY_ID, 0}, 78 { 0x8086, E1000_DEV_ID_82541GI_LF, PCI_ANY_ID, PCI_ANY_ID, 0}, 79 { 0x8086, E1000_DEV_ID_82541GI_MOBILE, PCI_ANY_ID, PCI_ANY_ID, 0}, 80 81 { 0x8086, E1000_DEV_ID_82542, PCI_ANY_ID, PCI_ANY_ID, 0}, 82 83 { 0x8086, E1000_DEV_ID_82543GC_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0}, 84 { 0x8086, E1000_DEV_ID_82543GC_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0}, 85 86 { 0x8086, E1000_DEV_ID_82544EI_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0}, 87 { 0x8086, E1000_DEV_ID_82544EI_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0}, 88 { 0x8086, E1000_DEV_ID_82544GC_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0}, 89 { 0x8086, E1000_DEV_ID_82544GC_LOM, PCI_ANY_ID, PCI_ANY_ID, 0}, 90 91 { 0x8086, E1000_DEV_ID_82545EM_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0}, 92 { 0x8086, E1000_DEV_ID_82545EM_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0}, 93 { 0x8086, E1000_DEV_ID_82545GM_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0}, 94 { 0x8086, E1000_DEV_ID_82545GM_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0}, 95 { 0x8086, E1000_DEV_ID_82545GM_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0}, 96 97 { 0x8086, E1000_DEV_ID_82546EB_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0}, 98 { 0x8086, E1000_DEV_ID_82546EB_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0}, 99 { 0x8086, E1000_DEV_ID_82546EB_QUAD_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0}, 100 { 0x8086, E1000_DEV_ID_82546GB_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0}, 101 { 0x8086, E1000_DEV_ID_82546GB_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0}, 102 { 0x8086, E1000_DEV_ID_82546GB_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0}, 103 { 0x8086, E1000_DEV_ID_82546GB_PCIE, PCI_ANY_ID, PCI_ANY_ID, 0}, 104 { 0x8086, E1000_DEV_ID_82546GB_QUAD_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0}, 105 106 { 0x8086, E1000_DEV_ID_82547EI, PCI_ANY_ID, PCI_ANY_ID, 0}, 107 { 0x8086, E1000_DEV_ID_82547EI_MOBILE, PCI_ANY_ID, PCI_ANY_ID, 0}, 108 { 0x8086, E1000_DEV_ID_82547GI, PCI_ANY_ID, PCI_ANY_ID, 0}, 109 110 { 0x8086, E1000_DEV_ID_82573E, PCI_ANY_ID, PCI_ANY_ID, 0}, 111 { 0x8086, E1000_DEV_ID_82573E_IAMT, PCI_ANY_ID, PCI_ANY_ID, 0}, 112 113 /* required last entry */ 114 { 0, 0, 0, 0, 0} 115}; 116 117/********************************************************************* 118 * Table of branding strings for all supported NICs. 119 *********************************************************************/ 120 121static char *em_strings[] = { 122 "Intel(R) PRO/1000 Network Connection" 123}; 124 125/********************************************************************* 126 * Function prototypes 127 *********************************************************************/ 128static int em_probe(device_t); 129static int em_attach(device_t); 130static int em_detach(device_t); 131static int em_shutdown(device_t); 132static void em_intr(void *); 133static void em_start(struct ifnet *); 134static int em_ioctl(struct ifnet *, u_long, caddr_t); 135static void em_watchdog(struct ifnet *); 136static void em_init(void *); 137static void em_init_locked(struct adapter *); 138static void em_stop(void *); 139static void em_media_status(struct ifnet *, struct ifmediareq *); 140static int em_media_change(struct ifnet *); 141static void em_identify_hardware(struct adapter *); 142static int em_allocate_pci_resources(struct adapter *); 143static void em_free_pci_resources(struct adapter *); 144static void em_local_timer(void *); 145static int em_hardware_init(struct adapter *); 146static void em_setup_interface(device_t, struct adapter *); 147static int em_setup_transmit_structures(struct adapter *); 148static void em_initialize_transmit_unit(struct adapter *); 149static int em_setup_receive_structures(struct adapter *); 150static void em_initialize_receive_unit(struct adapter *); 151static void em_enable_intr(struct adapter *); 152static void em_disable_intr(struct adapter *); 153static void em_free_transmit_structures(struct adapter *); 154static void em_free_receive_structures(struct adapter *); 155static void em_update_stats_counters(struct adapter *); 156static void em_clean_transmit_interrupts(struct adapter *); 157static int em_allocate_receive_structures(struct adapter *); 158static int em_allocate_transmit_structures(struct adapter *); 159static void em_process_receive_interrupts(struct adapter *, int); 160static void em_receive_checksum(struct adapter *, 161 struct em_rx_desc *, 162 struct mbuf *); 163static void em_transmit_checksum_setup(struct adapter *, 164 struct mbuf *, 165 u_int32_t *, 166 u_int32_t *); 167static void em_set_promisc(struct adapter *); 168static void em_disable_promisc(struct adapter *); 169static void em_set_multi(struct adapter *); 170static void em_print_hw_stats(struct adapter *); 171static void em_print_link_status(struct adapter *); 172static int em_get_buf(int i, struct adapter *, 173 struct mbuf *); 174static void em_enable_vlans(struct adapter *); 175static void em_disable_vlans(struct adapter *); 176static int em_encap(struct adapter *, struct mbuf **); 177static void em_smartspeed(struct adapter *); 178static int em_82547_fifo_workaround(struct adapter *, int); 179static void em_82547_update_fifo_head(struct adapter *, int); 180static int em_82547_tx_fifo_reset(struct adapter *); 181static void em_82547_move_tail(void *arg); 182static void em_82547_move_tail_locked(struct adapter *); 183static int em_dma_malloc(struct adapter *, bus_size_t, 184 struct em_dma_alloc *, int); 185static void em_dma_free(struct adapter *, struct em_dma_alloc *); 186static void em_print_debug_info(struct adapter *); 187static int em_is_valid_ether_addr(u_int8_t *); 188static int em_sysctl_stats(SYSCTL_HANDLER_ARGS); 189static int em_sysctl_debug_info(SYSCTL_HANDLER_ARGS); 190static u_int32_t em_fill_descriptors (bus_addr_t address, 191 u_int32_t length, 192 PDESC_ARRAY desc_array); 193static int em_sysctl_int_delay(SYSCTL_HANDLER_ARGS); 194static void em_add_int_delay_sysctl(struct adapter *, const char *, 195 const char *, struct em_int_delay_info *, 196 int, int); 197#ifdef DEVICE_POLLING 198static poll_handler_t em_poll; 199#endif 200 201/********************************************************************* 202 * FreeBSD Device Interface Entry Points 203 *********************************************************************/ 204 205static device_method_t em_methods[] = { 206 /* Device interface */ 207 DEVMETHOD(device_probe, em_probe), 208 DEVMETHOD(device_attach, em_attach), 209 DEVMETHOD(device_detach, em_detach), 210 DEVMETHOD(device_shutdown, em_shutdown), 211 {0, 0} 212}; 213 214static driver_t em_driver = { 215 "em", em_methods, sizeof(struct adapter ), 216}; 217 218static devclass_t em_devclass; 219DRIVER_MODULE(em, pci, em_driver, em_devclass, 0, 0); 220MODULE_DEPEND(em, pci, 1, 1, 1); 221MODULE_DEPEND(em, ether, 1, 1, 1); 222 223/********************************************************************* 224 * Tunable default values. 225 *********************************************************************/ 226 227#define E1000_TICKS_TO_USECS(ticks) ((1024 * (ticks) + 500) / 1000) 228#define E1000_USECS_TO_TICKS(usecs) ((1000 * (usecs) + 512) / 1024) 229 230static int em_tx_int_delay_dflt = E1000_TICKS_TO_USECS(EM_TIDV); 231static int em_rx_int_delay_dflt = E1000_TICKS_TO_USECS(EM_RDTR); 232static int em_tx_abs_int_delay_dflt = E1000_TICKS_TO_USECS(EM_TADV); 233static int em_rx_abs_int_delay_dflt = E1000_TICKS_TO_USECS(EM_RADV); 234static int em_rxd = EM_DEFAULT_RXD; 235static int em_txd = EM_DEFAULT_TXD; 236 237TUNABLE_INT("hw.em.tx_int_delay", &em_tx_int_delay_dflt); 238TUNABLE_INT("hw.em.rx_int_delay", &em_rx_int_delay_dflt); 239TUNABLE_INT("hw.em.tx_abs_int_delay", &em_tx_abs_int_delay_dflt); 240TUNABLE_INT("hw.em.rx_abs_int_delay", &em_rx_abs_int_delay_dflt); 241TUNABLE_INT("hw.em.rxd", &em_rxd); 242TUNABLE_INT("hw.em.txd", &em_txd); 243 244/********************************************************************* 245 * Device identification routine 246 * 247 * em_probe determines if the driver should be loaded on 248 * adapter based on PCI vendor/device id of the adapter. 249 * 250 * return BUS_PROBE_DEFAULT on success, positive on failure 251 *********************************************************************/ 252 253static int 254em_probe(device_t dev) 255{ 256 em_vendor_info_t *ent; 257 258 u_int16_t pci_vendor_id = 0; 259 u_int16_t pci_device_id = 0; 260 u_int16_t pci_subvendor_id = 0; 261 u_int16_t pci_subdevice_id = 0; 262 char adapter_name[60]; 263 264 INIT_DEBUGOUT("em_probe: begin"); 265 266 pci_vendor_id = pci_get_vendor(dev); 267 if (pci_vendor_id != EM_VENDOR_ID) 268 return(ENXIO); 269 270 pci_device_id = pci_get_device(dev); 271 pci_subvendor_id = pci_get_subvendor(dev); 272 pci_subdevice_id = pci_get_subdevice(dev); 273 274 ent = em_vendor_info_array; 275 while (ent->vendor_id != 0) { 276 if ((pci_vendor_id == ent->vendor_id) && 277 (pci_device_id == ent->device_id) && 278 279 ((pci_subvendor_id == ent->subvendor_id) || 280 (ent->subvendor_id == PCI_ANY_ID)) && 281 282 ((pci_subdevice_id == ent->subdevice_id) || 283 (ent->subdevice_id == PCI_ANY_ID))) { 284 sprintf(adapter_name, "%s, Version - %s", 285 em_strings[ent->index], 286 em_driver_version); 287 device_set_desc_copy(dev, adapter_name); 288 return(BUS_PROBE_DEFAULT); 289 } 290 ent++; 291 } 292 293 return(ENXIO); 294} 295 296/********************************************************************* 297 * Device initialization routine 298 * 299 * The attach entry point is called when the driver is being loaded. 300 * This routine identifies the type of hardware, allocates all resources 301 * and initializes the hardware. 302 * 303 * return 0 on success, positive on failure 304 *********************************************************************/ 305 306static int 307em_attach(device_t dev) 308{ 309 struct adapter * adapter; 310 int tsize, rsize; 311 int error = 0; 312 313 INIT_DEBUGOUT("em_attach: begin"); 314 315 /* Allocate, clear, and link in our adapter structure */ 316 if (!(adapter = device_get_softc(dev))) { 317 printf("em: adapter structure allocation failed\n"); 318 return(ENOMEM); 319 } 320 bzero(adapter, sizeof(struct adapter )); 321 adapter->dev = dev; 322 adapter->osdep.dev = dev; 323 adapter->unit = device_get_unit(dev); 324 EM_LOCK_INIT(adapter, device_get_nameunit(dev)); 325 326 /* SYSCTL stuff */ 327 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 328 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 329 OID_AUTO, "debug_info", CTLTYPE_INT|CTLFLAG_RW, 330 (void *)adapter, 0, 331 em_sysctl_debug_info, "I", "Debug Information"); 332 333 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 334 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 335 OID_AUTO, "stats", CTLTYPE_INT|CTLFLAG_RW, 336 (void *)adapter, 0, 337 em_sysctl_stats, "I", "Statistics"); 338 339 callout_init(&adapter->timer, CALLOUT_MPSAFE); 340 callout_init(&adapter->tx_fifo_timer, CALLOUT_MPSAFE); 341 342 /* Determine hardware revision */ 343 em_identify_hardware(adapter); 344 345 /* Set up some sysctls for the tunable interrupt delays */ 346 em_add_int_delay_sysctl(adapter, "rx_int_delay", 347 "receive interrupt delay in usecs", &adapter->rx_int_delay, 348 E1000_REG_OFFSET(&adapter->hw, RDTR), em_rx_int_delay_dflt); 349 em_add_int_delay_sysctl(adapter, "tx_int_delay", 350 "transmit interrupt delay in usecs", &adapter->tx_int_delay, 351 E1000_REG_OFFSET(&adapter->hw, TIDV), em_tx_int_delay_dflt); 352 if (adapter->hw.mac_type >= em_82540) { 353 em_add_int_delay_sysctl(adapter, "rx_abs_int_delay", 354 "receive interrupt delay limit in usecs", 355 &adapter->rx_abs_int_delay, 356 E1000_REG_OFFSET(&adapter->hw, RADV), 357 em_rx_abs_int_delay_dflt); 358 em_add_int_delay_sysctl(adapter, "tx_abs_int_delay", 359 "transmit interrupt delay limit in usecs", 360 &adapter->tx_abs_int_delay, 361 E1000_REG_OFFSET(&adapter->hw, TADV), 362 em_tx_abs_int_delay_dflt); 363 } 364 365 /* 366 * Validate number of transmit and receive descriptors. It 367 * must not exceed hardware maximum, and must be multiple 368 * of E1000_DBA_ALIGN. 369 */ 370 if (((em_txd * sizeof(struct em_tx_desc)) % E1000_DBA_ALIGN) != 0 || 371 (adapter->hw.mac_type >= em_82544 && em_txd > EM_MAX_TXD) || 372 (adapter->hw.mac_type < em_82544 && em_txd > EM_MAX_TXD_82543) || 373 (em_txd < EM_MIN_TXD)) { 374 printf("em%d: Using %d TX descriptors instead of %d!\n", 375 adapter->unit, EM_DEFAULT_TXD, em_txd); 376 adapter->num_tx_desc = EM_DEFAULT_TXD; 377 } else 378 adapter->num_tx_desc = em_txd; 379 if (((em_rxd * sizeof(struct em_rx_desc)) % E1000_DBA_ALIGN) != 0 || 380 (adapter->hw.mac_type >= em_82544 && em_rxd > EM_MAX_RXD) || 381 (adapter->hw.mac_type < em_82544 && em_rxd > EM_MAX_RXD_82543) || 382 (em_rxd < EM_MIN_RXD)) { 383 printf("em%d: Using %d RX descriptors instead of %d!\n", 384 adapter->unit, EM_DEFAULT_RXD, em_rxd); 385 adapter->num_rx_desc = EM_DEFAULT_RXD; 386 } else 387 adapter->num_rx_desc = em_rxd; 388 389 adapter->hw.autoneg = DO_AUTO_NEG; 390 adapter->hw.wait_autoneg_complete = WAIT_FOR_AUTO_NEG_DEFAULT; 391 adapter->hw.autoneg_advertised = AUTONEG_ADV_DEFAULT; 392 adapter->hw.tbi_compatibility_en = TRUE; 393 adapter->rx_buffer_len = EM_RXBUFFER_2048; 394 395 /* 396 * These parameters control the automatic generation(Tx) and 397 * response(Rx) to Ethernet PAUSE frames. 398 */ 399 adapter->hw.fc_high_water = FC_DEFAULT_HI_THRESH; 400 adapter->hw.fc_low_water = FC_DEFAULT_LO_THRESH; 401 adapter->hw.fc_pause_time = FC_DEFAULT_TX_TIMER; 402 adapter->hw.fc_send_xon = TRUE; 403 adapter->hw.fc = em_fc_full; 404 405 adapter->hw.phy_init_script = 1; 406 adapter->hw.phy_reset_disable = FALSE; 407 408#ifndef EM_MASTER_SLAVE 409 adapter->hw.master_slave = em_ms_hw_default; 410#else 411 adapter->hw.master_slave = EM_MASTER_SLAVE; 412#endif 413 /* 414 * Set the max frame size assuming standard ethernet 415 * sized frames 416 */ 417 adapter->hw.max_frame_size = 418 ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN; 419 420 adapter->hw.min_frame_size = 421 MINIMUM_ETHERNET_PACKET_SIZE + ETHER_CRC_LEN; 422 423 /* 424 * This controls when hardware reports transmit completion 425 * status. 426 */ 427 adapter->hw.report_tx_early = 1; 428 429 430 if (em_allocate_pci_resources(adapter)) { 431 printf("em%d: Allocation of PCI resources failed\n", 432 adapter->unit); 433 error = ENXIO; 434 goto err_pci; 435 } 436 437 438 /* Initialize eeprom parameters */ 439 em_init_eeprom_params(&adapter->hw); 440 441 tsize = EM_ROUNDUP(adapter->num_tx_desc * 442 sizeof(struct em_tx_desc), 4096); 443 444 /* Allocate Transmit Descriptor ring */ 445 if (em_dma_malloc(adapter, tsize, &adapter->txdma, BUS_DMA_NOWAIT)) { 446 printf("em%d: Unable to allocate tx_desc memory\n", 447 adapter->unit); 448 error = ENOMEM; 449 goto err_tx_desc; 450 } 451 adapter->tx_desc_base = (struct em_tx_desc *) adapter->txdma.dma_vaddr; 452 453 rsize = EM_ROUNDUP(adapter->num_rx_desc * 454 sizeof(struct em_rx_desc), 4096); 455 456 /* Allocate Receive Descriptor ring */ 457 if (em_dma_malloc(adapter, rsize, &adapter->rxdma, BUS_DMA_NOWAIT)) { 458 printf("em%d: Unable to allocate rx_desc memory\n", 459 adapter->unit); 460 error = ENOMEM; 461 goto err_rx_desc; 462 } 463 adapter->rx_desc_base = (struct em_rx_desc *) adapter->rxdma.dma_vaddr; 464 465 /* Initialize the hardware */ 466 if (em_hardware_init(adapter)) { 467 printf("em%d: Unable to initialize the hardware\n", 468 adapter->unit); 469 error = EIO; 470 goto err_hw_init; 471 } 472 473 /* Copy the permanent MAC address out of the EEPROM */ 474 if (em_read_mac_addr(&adapter->hw) < 0) { 475 printf("em%d: EEPROM read error while reading mac address\n", 476 adapter->unit); 477 error = EIO; 478 goto err_mac_addr; 479 } 480 481 if (!em_is_valid_ether_addr(adapter->hw.mac_addr)) { 482 printf("em%d: Invalid mac address\n", adapter->unit); 483 error = EIO; 484 goto err_mac_addr; 485 } 486 487 /* Setup OS specific network interface */ 488 em_setup_interface(dev, adapter); 489 490 /* Initialize statistics */ 491 em_clear_hw_cntrs(&adapter->hw); 492 em_update_stats_counters(adapter); 493 adapter->hw.get_link_status = 1; 494 em_check_for_link(&adapter->hw); 495 496 if (bootverbose) { 497 /* Print the link status */ 498 if (adapter->link_active == 1) { 499 em_get_speed_and_duplex(&adapter->hw, 500 &adapter->link_speed, &adapter->link_duplex); 501 printf("em%d: Speed:%d Mbps Duplex:%s\n", 502 adapter->unit, 503 adapter->link_speed, 504 adapter->link_duplex == FULL_DUPLEX ? "Full" : 505 "Half"); 506 } else 507 printf("em%d: Speed:N/A Duplex:N/A\n", 508 adapter->unit); 509 } 510 511 /* Identify 82544 on PCIX */ 512 em_get_bus_info(&adapter->hw); 513 if(adapter->hw.bus_type == em_bus_type_pcix && 514 adapter->hw.mac_type == em_82544) { 515 adapter->pcix_82544 = TRUE; 516 } 517 else { 518 adapter->pcix_82544 = FALSE; 519 } 520 INIT_DEBUGOUT("em_attach: end"); 521 return(0); 522 523err_mac_addr: 524err_hw_init: 525 em_dma_free(adapter, &adapter->rxdma); 526err_rx_desc: 527 em_dma_free(adapter, &adapter->txdma); 528err_tx_desc: 529err_pci: 530 em_free_pci_resources(adapter); 531 EM_LOCK_DESTROY(adapter); 532 return(error); 533 534} 535 536/********************************************************************* 537 * Device removal routine 538 * 539 * The detach entry point is called when the driver is being removed. 540 * This routine stops the adapter and deallocates all the resources 541 * that were allocated for driver operation. 542 * 543 * return 0 on success, positive on failure 544 *********************************************************************/ 545 546static int 547em_detach(device_t dev) 548{ 549 struct adapter * adapter = device_get_softc(dev); 550 struct ifnet *ifp = adapter->ifp; 551 552 INIT_DEBUGOUT("em_detach: begin"); 553 554#ifdef DEVICE_POLLING 555 if (ifp->if_capenable & IFCAP_POLLING) 556 ether_poll_deregister(ifp); 557#endif 558 559 EM_LOCK(adapter); 560 adapter->in_detach = 1; 561 em_stop(adapter); 562 em_phy_hw_reset(&adapter->hw); 563 EM_UNLOCK(adapter); 564 ether_ifdetach(adapter->ifp); 565 566 em_free_pci_resources(adapter); 567 bus_generic_detach(dev); 568 if_free(ifp); 569 570 /* Free Transmit Descriptor ring */ 571 if (adapter->tx_desc_base) { 572 em_dma_free(adapter, &adapter->txdma); 573 adapter->tx_desc_base = NULL; 574 } 575 576 /* Free Receive Descriptor ring */ 577 if (adapter->rx_desc_base) { 578 em_dma_free(adapter, &adapter->rxdma); 579 adapter->rx_desc_base = NULL; 580 } 581 582 EM_LOCK_DESTROY(adapter); 583 584 return(0); 585} 586 587/********************************************************************* 588 * 589 * Shutdown entry point 590 * 591 **********************************************************************/ 592 593static int 594em_shutdown(device_t dev) 595{ 596 struct adapter *adapter = device_get_softc(dev); 597 EM_LOCK(adapter); 598 em_stop(adapter); 599 EM_UNLOCK(adapter); 600 return(0); 601} 602 603 604/********************************************************************* 605 * Transmit entry point 606 * 607 * em_start is called by the stack to initiate a transmit. 608 * The driver will remain in this routine as long as there are 609 * packets to transmit and transmit resources are available. 610 * In case resources are not available stack is notified and 611 * the packet is requeued. 612 **********************************************************************/ 613 614static void 615em_start_locked(struct ifnet *ifp) 616{ 617 struct mbuf *m_head; 618 struct adapter *adapter = ifp->if_softc; 619 620 mtx_assert(&adapter->mtx, MA_OWNED); 621 622 if (!adapter->link_active) 623 return; 624 625 while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) { 626 627 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); 628 629 if (m_head == NULL) break; 630 631 /* 632 * em_encap() can modify our pointer, and or make it NULL on 633 * failure. In that event, we can't requeue. 634 */ 635 if (em_encap(adapter, &m_head)) { 636 if (m_head == NULL) 637 break; 638 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 639 IFQ_DRV_PREPEND(&ifp->if_snd, m_head); 640 break; 641 } 642 643 /* Send a copy of the frame to the BPF listener */ 644 BPF_MTAP(ifp, m_head); 645 646 /* Set timeout in case hardware has problems transmitting */ 647 ifp->if_timer = EM_TX_TIMEOUT; 648 649 } 650 return; 651} 652 653static void 654em_start(struct ifnet *ifp) 655{ 656 struct adapter *adapter = ifp->if_softc; 657 658 EM_LOCK(adapter); 659 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 660 em_start_locked(ifp); 661 EM_UNLOCK(adapter); 662 return; 663} 664 665/********************************************************************* 666 * Ioctl entry point 667 * 668 * em_ioctl is called when the user wants to configure the 669 * interface. 670 * 671 * return 0 on success, positive on failure 672 **********************************************************************/ 673 674static int 675em_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 676{ 677 int mask, reinit, error = 0; 678 struct ifreq *ifr = (struct ifreq *) data; 679 struct adapter * adapter = ifp->if_softc; 680 681 if (adapter->in_detach) return(error); 682 683 switch (command) { 684 case SIOCSIFADDR: 685 case SIOCGIFADDR: 686 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCxIFADDR (Get/Set Interface Addr)"); 687 ether_ioctl(ifp, command, data); 688 break; 689 case SIOCSIFMTU: 690 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFMTU (Set Interface MTU)"); 691#ifndef __NO_STRICT_ALIGNMENT 692 if (ifr->ifr_mtu > ETHERMTU) { 693 /* 694 * XXX 695 * Due to the limitation of DMA engine, it needs fix-up 696 * code for strict alignment architectures. Disable 697 * jumbo frame until we have better solutions. 698 */ 699 error = EINVAL; 700 } else 701#endif 702 if (ifr->ifr_mtu > MAX_JUMBO_FRAME_SIZE - ETHER_HDR_LEN || \ 703 /* 82573 does not support jumbo frames */ 704 (adapter->hw.mac_type == em_82573 && ifr->ifr_mtu > ETHERMTU) ) { 705 error = EINVAL; 706 } else { 707 EM_LOCK(adapter); 708 ifp->if_mtu = ifr->ifr_mtu; 709 adapter->hw.max_frame_size = 710 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; 711 em_init_locked(adapter); 712 EM_UNLOCK(adapter); 713 } 714 break; 715 case SIOCSIFFLAGS: 716 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFFLAGS (Set Interface Flags)"); 717 EM_LOCK(adapter); 718 if (ifp->if_flags & IFF_UP) { 719 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { 720 em_init_locked(adapter); 721 } 722 723 em_disable_promisc(adapter); 724 em_set_promisc(adapter); 725 } else { 726 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 727 em_stop(adapter); 728 } 729 } 730 EM_UNLOCK(adapter); 731 break; 732 case SIOCADDMULTI: 733 case SIOCDELMULTI: 734 IOCTL_DEBUGOUT("ioctl rcv'd: SIOC(ADD|DEL)MULTI"); 735 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 736 EM_LOCK(adapter); 737 em_disable_intr(adapter); 738 em_set_multi(adapter); 739 if (adapter->hw.mac_type == em_82542_rev2_0) { 740 em_initialize_receive_unit(adapter); 741 } 742#ifdef DEVICE_POLLING 743 if (!(ifp->if_capenable & IFCAP_POLLING)) 744#endif 745 em_enable_intr(adapter); 746 EM_UNLOCK(adapter); 747 } 748 break; 749 case SIOCSIFMEDIA: 750 case SIOCGIFMEDIA: 751 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCxIFMEDIA (Get/Set Interface Media)"); 752 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command); 753 break; 754 case SIOCSIFCAP: 755 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFCAP (Set Capabilities)"); 756 reinit = 0; 757 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 758#ifdef DEVICE_POLLING 759 if (mask & IFCAP_POLLING) { 760 if (ifr->ifr_reqcap & IFCAP_POLLING) { 761 error = ether_poll_register(em_poll, ifp); 762 if (error) 763 return(error); 764 EM_LOCK(adapter); 765 em_disable_intr(adapter); 766 ifp->if_capenable |= IFCAP_POLLING; 767 EM_UNLOCK(adapter); 768 } else { 769 error = ether_poll_deregister(ifp); 770 /* Enable interrupt even in error case */ 771 EM_LOCK(adapter); 772 em_enable_intr(adapter); 773 ifp->if_capenable &= ~IFCAP_POLLING; 774 EM_UNLOCK(adapter); 775 } 776 } 777#endif 778 if (mask & IFCAP_HWCSUM) { 779 ifp->if_capenable ^= IFCAP_HWCSUM; 780 reinit = 1; 781 } 782 if (mask & IFCAP_VLAN_HWTAGGING) { 783 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 784 reinit = 1; 785 } 786 if (reinit && (ifp->if_drv_flags & IFF_DRV_RUNNING)) 787 em_init(adapter); 788 break; 789 default: 790 IOCTL_DEBUGOUT1("ioctl received: UNKNOWN (0x%x)", (int)command); 791 error = EINVAL; 792 } 793 794 return(error); 795} 796 797/********************************************************************* 798 * Watchdog entry point 799 * 800 * This routine is called whenever hardware quits transmitting. 801 * 802 **********************************************************************/ 803 804static void 805em_watchdog(struct ifnet *ifp) 806{ 807 struct adapter * adapter; 808 adapter = ifp->if_softc; 809 810 EM_LOCK(adapter); 811 /* If we are in this routine because of pause frames, then 812 * don't reset the hardware. 813 */ 814 if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_TXOFF) { 815 ifp->if_timer = EM_TX_TIMEOUT; 816 EM_UNLOCK(adapter); 817 return; 818 } 819 820 if (em_check_for_link(&adapter->hw)) 821 printf("em%d: watchdog timeout -- resetting\n", adapter->unit); 822 823 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 824 adapter->watchdog_events++; 825 826 em_init_locked(adapter); 827 EM_UNLOCK(adapter); 828} 829 830/********************************************************************* 831 * Init entry point 832 * 833 * This routine is used in two ways. It is used by the stack as 834 * init entry point in network interface structure. It is also used 835 * by the driver as a hw/sw initialization routine to get to a 836 * consistent state. 837 * 838 * return 0 on success, positive on failure 839 **********************************************************************/ 840 841static void 842em_init_locked(struct adapter * adapter) 843{ 844 struct ifnet *ifp; 845 846 uint32_t pba; 847 ifp = adapter->ifp; 848 849 INIT_DEBUGOUT("em_init: begin"); 850 851 mtx_assert(&adapter->mtx, MA_OWNED); 852 853 em_stop(adapter); 854 855 /* Packet Buffer Allocation (PBA) 856 * Writing PBA sets the receive portion of the buffer 857 * the remainder is used for the transmit buffer. 858 * 859 * Devices before the 82547 had a Packet Buffer of 64K. 860 * Default allocation: PBA=48K for Rx, leaving 16K for Tx. 861 * After the 82547 the buffer was reduced to 40K. 862 * Default allocation: PBA=30K for Rx, leaving 10K for Tx. 863 * Note: default does not leave enough room for Jumbo Frame >10k. 864 */ 865 if(adapter->hw.mac_type < em_82547) { 866 /* Total FIFO is 64K */ 867 if(adapter->rx_buffer_len > EM_RXBUFFER_8192) 868 pba = E1000_PBA_40K; /* 40K for Rx, 24K for Tx */ 869 else 870 pba = E1000_PBA_48K; /* 48K for Rx, 16K for Tx */ 871 } else { 872 /* Total FIFO is 40K */ 873 if(adapter->hw.max_frame_size > EM_RXBUFFER_8192) { 874 pba = E1000_PBA_22K; /* 22K for Rx, 18K for Tx */ 875 } else { 876 pba = E1000_PBA_30K; /* 30K for Rx, 10K for Tx */ 877 } 878 adapter->tx_fifo_head = 0; 879 adapter->tx_head_addr = pba << EM_TX_HEAD_ADDR_SHIFT; 880 adapter->tx_fifo_size = (E1000_PBA_40K - pba) << EM_PBA_BYTES_SHIFT; 881 } 882 INIT_DEBUGOUT1("em_init: pba=%dK",pba); 883 E1000_WRITE_REG(&adapter->hw, PBA, pba); 884 885 /* Get the latest mac address, User can use a LAA */ 886 bcopy(IF_LLADDR(adapter->ifp), adapter->hw.mac_addr, 887 ETHER_ADDR_LEN); 888 889 /* Initialize the hardware */ 890 if (em_hardware_init(adapter)) { 891 printf("em%d: Unable to initialize the hardware\n", 892 adapter->unit); 893 return; 894 } 895 896 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) 897 em_enable_vlans(adapter); 898 899 /* Prepare transmit descriptors and buffers */ 900 if (em_setup_transmit_structures(adapter)) { 901 printf("em%d: Could not setup transmit structures\n", 902 adapter->unit); 903 em_stop(adapter); 904 return; 905 } 906 em_initialize_transmit_unit(adapter); 907 908 /* Setup Multicast table */ 909 em_set_multi(adapter); 910 911 /* Prepare receive descriptors and buffers */ 912 if (em_setup_receive_structures(adapter)) { 913 printf("em%d: Could not setup receive structures\n", 914 adapter->unit); 915 em_stop(adapter); 916 return; 917 } 918 em_initialize_receive_unit(adapter); 919 920 /* Don't loose promiscuous settings */ 921 em_set_promisc(adapter); 922 923 ifp->if_drv_flags |= IFF_DRV_RUNNING; 924 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 925 926 if (adapter->hw.mac_type >= em_82543) { 927 if (ifp->if_capenable & IFCAP_TXCSUM) 928 ifp->if_hwassist = EM_CHECKSUM_FEATURES; 929 else 930 ifp->if_hwassist = 0; 931 } 932 933 callout_reset(&adapter->timer, hz, em_local_timer, adapter); 934 em_clear_hw_cntrs(&adapter->hw); 935#ifdef DEVICE_POLLING 936 /* 937 * Only enable interrupts if we are not polling, make sure 938 * they are off otherwise. 939 */ 940 if (ifp->if_capenable & IFCAP_POLLING) 941 em_disable_intr(adapter); 942 else 943#endif /* DEVICE_POLLING */ 944 em_enable_intr(adapter); 945 946 /* Don't reset the phy next time init gets called */ 947 adapter->hw.phy_reset_disable = TRUE; 948 949 return; 950} 951 952static void 953em_init(void *arg) 954{ 955 struct adapter * adapter = arg; 956 957 EM_LOCK(adapter); 958 em_init_locked(adapter); 959 EM_UNLOCK(adapter); 960 return; 961} 962 963 964#ifdef DEVICE_POLLING 965static void 966em_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count) 967{ 968 struct adapter *adapter = ifp->if_softc; 969 u_int32_t reg_icr; 970 971 mtx_assert(&adapter->mtx, MA_OWNED); 972 973 if (cmd == POLL_AND_CHECK_STATUS) { 974 reg_icr = E1000_READ_REG(&adapter->hw, ICR); 975 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { 976 callout_stop(&adapter->timer); 977 adapter->hw.get_link_status = 1; 978 em_check_for_link(&adapter->hw); 979 em_print_link_status(adapter); 980 callout_reset(&adapter->timer, hz, em_local_timer, adapter); 981 } 982 } 983 em_process_receive_interrupts(adapter, count); 984 em_clean_transmit_interrupts(adapter); 985 986 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 987 em_start_locked(ifp); 988} 989 990static void 991em_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) 992{ 993 struct adapter *adapter = ifp->if_softc; 994 995 EM_LOCK(adapter); 996 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 997 em_poll_locked(ifp, cmd, count); 998 EM_UNLOCK(adapter); 999} 1000#endif /* DEVICE_POLLING */ 1001 1002/********************************************************************* 1003 * 1004 * Interrupt Service routine 1005 * 1006 **********************************************************************/ 1007static void 1008em_intr(void *arg) 1009{ 1010 struct adapter *adapter = arg; 1011 struct ifnet *ifp; 1012 uint32_t reg_icr; 1013 int wantinit = 0; 1014 1015 EM_LOCK(adapter); 1016 1017 ifp = adapter->ifp; 1018 1019#ifdef DEVICE_POLLING 1020 if (ifp->if_capenable & IFCAP_POLLING) { 1021 EM_UNLOCK(adapter); 1022 return; 1023 } 1024#endif /* DEVICE_POLLING */ 1025 1026 for (;;) { 1027 reg_icr = E1000_READ_REG(&adapter->hw, ICR); 1028 if (reg_icr == 0) 1029 break; 1030 1031 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1032 em_process_receive_interrupts(adapter, -1); 1033 em_clean_transmit_interrupts(adapter); 1034 } 1035 1036 /* Link status change */ 1037 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { 1038 callout_stop(&adapter->timer); 1039 adapter->hw.get_link_status = 1; 1040 em_check_for_link(&adapter->hw); 1041 em_print_link_status(adapter); 1042 callout_reset(&adapter->timer, hz, em_local_timer, 1043 adapter); 1044 } 1045 1046 if (reg_icr & E1000_ICR_RXO) { 1047 adapter->rx_overruns++; 1048 wantinit = 1; 1049 } 1050 } 1051#if 0 1052 if (wantinit) 1053 em_init_locked(adapter); 1054#endif 1055 if (ifp->if_drv_flags & IFF_DRV_RUNNING && 1056 !IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1057 em_start_locked(ifp); 1058 1059 EM_UNLOCK(adapter); 1060 return; 1061} 1062 1063 1064 1065/********************************************************************* 1066 * 1067 * Media Ioctl callback 1068 * 1069 * This routine is called whenever the user queries the status of 1070 * the interface using ifconfig. 1071 * 1072 **********************************************************************/ 1073static void 1074em_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) 1075{ 1076 struct adapter * adapter = ifp->if_softc; 1077 1078 INIT_DEBUGOUT("em_media_status: begin"); 1079 1080 em_check_for_link(&adapter->hw); 1081 if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU) { 1082 if (adapter->link_active == 0) { 1083 em_get_speed_and_duplex(&adapter->hw, 1084 &adapter->link_speed, 1085 &adapter->link_duplex); 1086 adapter->link_active = 1; 1087 } 1088 } else { 1089 if (adapter->link_active == 1) { 1090 adapter->link_speed = 0; 1091 adapter->link_duplex = 0; 1092 adapter->link_active = 0; 1093 } 1094 } 1095 1096 ifmr->ifm_status = IFM_AVALID; 1097 ifmr->ifm_active = IFM_ETHER; 1098 1099 if (!adapter->link_active) 1100 return; 1101 1102 ifmr->ifm_status |= IFM_ACTIVE; 1103 1104 if (adapter->hw.media_type == em_media_type_fiber) { 1105 ifmr->ifm_active |= IFM_1000_SX | IFM_FDX; 1106 } else { 1107 switch (adapter->link_speed) { 1108 case 10: 1109 ifmr->ifm_active |= IFM_10_T; 1110 break; 1111 case 100: 1112 ifmr->ifm_active |= IFM_100_TX; 1113 break; 1114 case 1000: 1115 ifmr->ifm_active |= IFM_1000_T; 1116 break; 1117 } 1118 if (adapter->link_duplex == FULL_DUPLEX) 1119 ifmr->ifm_active |= IFM_FDX; 1120 else 1121 ifmr->ifm_active |= IFM_HDX; 1122 } 1123 return; 1124} 1125 1126/********************************************************************* 1127 * 1128 * Media Ioctl callback 1129 * 1130 * This routine is called when the user changes speed/duplex using 1131 * media/mediopt option with ifconfig. 1132 * 1133 **********************************************************************/ 1134static int 1135em_media_change(struct ifnet *ifp) 1136{ 1137 struct adapter * adapter = ifp->if_softc; 1138 struct ifmedia *ifm = &adapter->media; 1139 1140 INIT_DEBUGOUT("em_media_change: begin"); 1141 1142 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 1143 return(EINVAL); 1144 1145 switch (IFM_SUBTYPE(ifm->ifm_media)) { 1146 case IFM_AUTO: 1147 adapter->hw.autoneg = DO_AUTO_NEG; 1148 adapter->hw.autoneg_advertised = AUTONEG_ADV_DEFAULT; 1149 break; 1150 case IFM_1000_SX: 1151 case IFM_1000_T: 1152 adapter->hw.autoneg = DO_AUTO_NEG; 1153 adapter->hw.autoneg_advertised = ADVERTISE_1000_FULL; 1154 break; 1155 case IFM_100_TX: 1156 adapter->hw.autoneg = FALSE; 1157 adapter->hw.autoneg_advertised = 0; 1158 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) 1159 adapter->hw.forced_speed_duplex = em_100_full; 1160 else 1161 adapter->hw.forced_speed_duplex = em_100_half; 1162 break; 1163 case IFM_10_T: 1164 adapter->hw.autoneg = FALSE; 1165 adapter->hw.autoneg_advertised = 0; 1166 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) 1167 adapter->hw.forced_speed_duplex = em_10_full; 1168 else 1169 adapter->hw.forced_speed_duplex = em_10_half; 1170 break; 1171 default: 1172 printf("em%d: Unsupported media type\n", adapter->unit); 1173 } 1174 1175 /* As the speed/duplex settings my have changed we need to 1176 * reset the PHY. 1177 */ 1178 adapter->hw.phy_reset_disable = FALSE; 1179 1180 em_init(adapter); 1181 1182 return(0); 1183} 1184 1185/********************************************************************* 1186 * 1187 * This routine maps the mbufs to tx descriptors. 1188 * 1189 * return 0 on success, positive on failure 1190 **********************************************************************/ 1191static int 1192em_encap(struct adapter *adapter, struct mbuf **m_headp) 1193{ 1194 u_int32_t txd_upper; 1195 u_int32_t txd_lower, txd_used = 0, txd_saved = 0; 1196 int i, j, error; 1197 1198 struct mbuf *m_head; 1199 1200 /* For 82544 Workaround */ 1201 DESC_ARRAY desc_array; 1202 u_int32_t array_elements; 1203 u_int32_t counter; 1204 struct m_tag *mtag; 1205 bus_dma_segment_t segs[EM_MAX_SCATTER]; 1206 bus_dmamap_t map; 1207 int nsegs; 1208 struct em_buffer *tx_buffer = NULL; 1209 struct em_tx_desc *current_tx_desc = NULL; 1210 struct ifnet *ifp = adapter->ifp; 1211 1212 m_head = *m_headp; 1213 1214 /* 1215 * Force a cleanup if number of TX descriptors 1216 * available hits the threshold 1217 */ 1218 if (adapter->num_tx_desc_avail <= EM_TX_CLEANUP_THRESHOLD) { 1219 em_clean_transmit_interrupts(adapter); 1220 if (adapter->num_tx_desc_avail <= EM_TX_CLEANUP_THRESHOLD) { 1221 adapter->no_tx_desc_avail1++; 1222 return(ENOBUFS); 1223 } 1224 } 1225 1226 /* 1227 * Map the packet for DMA. 1228 */ 1229 if (bus_dmamap_create(adapter->txtag, BUS_DMA_NOWAIT, &map)) { 1230 adapter->no_tx_map_avail++; 1231 return (ENOMEM); 1232 } 1233 error = bus_dmamap_load_mbuf_sg(adapter->txtag, map, m_head, segs, 1234 &nsegs, BUS_DMA_NOWAIT); 1235 if (error != 0) { 1236 adapter->no_tx_dma_setup++; 1237 bus_dmamap_destroy(adapter->txtag, map); 1238 return (error); 1239 } 1240 KASSERT(nsegs != 0, ("em_encap: empty packet")); 1241 1242 if (nsegs > adapter->num_tx_desc_avail) { 1243 adapter->no_tx_desc_avail2++; 1244 bus_dmamap_destroy(adapter->txtag, map); 1245 return (ENOBUFS); 1246 } 1247 1248 1249 if (ifp->if_hwassist > 0) { 1250 em_transmit_checksum_setup(adapter, m_head, 1251 &txd_upper, &txd_lower); 1252 } else 1253 txd_upper = txd_lower = 0; 1254 1255 1256 /* Find out if we are in vlan mode */ 1257 mtag = VLAN_OUTPUT_TAG(ifp, m_head); 1258 1259 /* 1260 * When operating in promiscuous mode, hardware encapsulation for 1261 * packets is disabled. This means we have to add the vlan 1262 * encapsulation in the driver, since it will have come down from the 1263 * VLAN layer with a tag instead of a VLAN header. 1264 */ 1265 if (mtag != NULL && adapter->em_insert_vlan_header) { 1266 struct ether_vlan_header *evl; 1267 struct ether_header eh; 1268 1269 m_head = m_pullup(m_head, sizeof(eh)); 1270 if (m_head == NULL) { 1271 *m_headp = NULL; 1272 bus_dmamap_destroy(adapter->txtag, map); 1273 return (ENOBUFS); 1274 } 1275 eh = *mtod(m_head, struct ether_header *); 1276 M_PREPEND(m_head, sizeof(*evl), M_DONTWAIT); 1277 if (m_head == NULL) { 1278 *m_headp = NULL; 1279 bus_dmamap_destroy(adapter->txtag, map); 1280 return (ENOBUFS); 1281 } 1282 m_head = m_pullup(m_head, sizeof(*evl)); 1283 if (m_head == NULL) { 1284 *m_headp = NULL; 1285 bus_dmamap_destroy(adapter->txtag, map); 1286 return (ENOBUFS); 1287 } 1288 evl = mtod(m_head, struct ether_vlan_header *); 1289 bcopy(&eh, evl, sizeof(*evl)); 1290 evl->evl_proto = evl->evl_encap_proto; 1291 evl->evl_encap_proto = htons(ETHERTYPE_VLAN); 1292 evl->evl_tag = htons(VLAN_TAG_VALUE(mtag)); 1293 m_tag_delete(m_head, mtag); 1294 mtag = NULL; 1295 *m_headp = m_head; 1296 } 1297 1298 i = adapter->next_avail_tx_desc; 1299 if (adapter->pcix_82544) { 1300 txd_saved = i; 1301 txd_used = 0; 1302 } 1303 for (j = 0; j < nsegs; j++) { 1304 /* If adapter is 82544 and on PCIX bus */ 1305 if(adapter->pcix_82544) { 1306 /* 1307 * Check the Address and Length combination and 1308 * split the data accordingly 1309 */ 1310 array_elements = em_fill_descriptors(segs[j].ds_addr, 1311 segs[j].ds_len, &desc_array); 1312 for (counter = 0; counter < array_elements; counter++) { 1313 if (txd_used == adapter->num_tx_desc_avail) { 1314 adapter->next_avail_tx_desc = txd_saved; 1315 adapter->no_tx_desc_avail2++; 1316 bus_dmamap_destroy(adapter->txtag, map); 1317 return (ENOBUFS); 1318 } 1319 tx_buffer = &adapter->tx_buffer_area[i]; 1320 current_tx_desc = &adapter->tx_desc_base[i]; 1321 current_tx_desc->buffer_addr = htole64( 1322 desc_array.descriptor[counter].address); 1323 current_tx_desc->lower.data = htole32( 1324 (adapter->txd_cmd | txd_lower | 1325 (u_int16_t)desc_array.descriptor[counter].length)); 1326 current_tx_desc->upper.data = htole32((txd_upper)); 1327 if (++i == adapter->num_tx_desc) 1328 i = 0; 1329 1330 tx_buffer->m_head = NULL; 1331 txd_used++; 1332 } 1333 } else { 1334 tx_buffer = &adapter->tx_buffer_area[i]; 1335 current_tx_desc = &adapter->tx_desc_base[i]; 1336 1337 current_tx_desc->buffer_addr = htole64(segs[j].ds_addr); 1338 current_tx_desc->lower.data = htole32( 1339 adapter->txd_cmd | txd_lower | segs[j].ds_len); 1340 current_tx_desc->upper.data = htole32(txd_upper); 1341 1342 if (++i == adapter->num_tx_desc) 1343 i = 0; 1344 1345 tx_buffer->m_head = NULL; 1346 } 1347 } 1348 1349 adapter->next_avail_tx_desc = i; 1350 if (adapter->pcix_82544) { 1351 adapter->num_tx_desc_avail -= txd_used; 1352 } 1353 else { 1354 adapter->num_tx_desc_avail -= nsegs; 1355 } 1356 1357 if (mtag != NULL) { 1358 /* Set the vlan id */ 1359 current_tx_desc->upper.fields.special = htole16(VLAN_TAG_VALUE(mtag)); 1360 1361 /* Tell hardware to add tag */ 1362 current_tx_desc->lower.data |= htole32(E1000_TXD_CMD_VLE); 1363 } 1364 1365 tx_buffer->m_head = m_head; 1366 tx_buffer->map = map; 1367 bus_dmamap_sync(adapter->txtag, map, BUS_DMASYNC_PREWRITE); 1368 1369 /* 1370 * Last Descriptor of Packet needs End Of Packet (EOP) 1371 */ 1372 current_tx_desc->lower.data |= htole32(E1000_TXD_CMD_EOP); 1373 1374 /* 1375 * Advance the Transmit Descriptor Tail (Tdt), this tells the E1000 1376 * that this frame is available to transmit. 1377 */ 1378 bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map, 1379 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1380 if (adapter->hw.mac_type == em_82547 && 1381 adapter->link_duplex == HALF_DUPLEX) { 1382 em_82547_move_tail_locked(adapter); 1383 } else { 1384 E1000_WRITE_REG(&adapter->hw, TDT, i); 1385 if (adapter->hw.mac_type == em_82547) { 1386 em_82547_update_fifo_head(adapter, m_head->m_pkthdr.len); 1387 } 1388 } 1389 1390 return(0); 1391} 1392 1393/********************************************************************* 1394 * 1395 * 82547 workaround to avoid controller hang in half-duplex environment. 1396 * The workaround is to avoid queuing a large packet that would span 1397 * the internal Tx FIFO ring boundary. We need to reset the FIFO pointers 1398 * in this case. We do that only when FIFO is quiescent. 1399 * 1400 **********************************************************************/ 1401static void 1402em_82547_move_tail_locked(struct adapter *adapter) 1403{ 1404 uint16_t hw_tdt; 1405 uint16_t sw_tdt; 1406 struct em_tx_desc *tx_desc; 1407 uint16_t length = 0; 1408 boolean_t eop = 0; 1409 1410 EM_LOCK_ASSERT(adapter); 1411 1412 hw_tdt = E1000_READ_REG(&adapter->hw, TDT); 1413 sw_tdt = adapter->next_avail_tx_desc; 1414 1415 while (hw_tdt != sw_tdt) { 1416 tx_desc = &adapter->tx_desc_base[hw_tdt]; 1417 length += tx_desc->lower.flags.length; 1418 eop = tx_desc->lower.data & E1000_TXD_CMD_EOP; 1419 if(++hw_tdt == adapter->num_tx_desc) 1420 hw_tdt = 0; 1421 1422 if(eop) { 1423 if (em_82547_fifo_workaround(adapter, length)) { 1424 adapter->tx_fifo_wrk_cnt++; 1425 callout_reset(&adapter->tx_fifo_timer, 1, 1426 em_82547_move_tail, adapter); 1427 break; 1428 } 1429 E1000_WRITE_REG(&adapter->hw, TDT, hw_tdt); 1430 em_82547_update_fifo_head(adapter, length); 1431 length = 0; 1432 } 1433 } 1434 return; 1435} 1436 1437static void 1438em_82547_move_tail(void *arg) 1439{ 1440 struct adapter *adapter = arg; 1441 1442 EM_LOCK(adapter); 1443 em_82547_move_tail_locked(adapter); 1444 EM_UNLOCK(adapter); 1445} 1446 1447static int 1448em_82547_fifo_workaround(struct adapter *adapter, int len) 1449{ 1450 int fifo_space, fifo_pkt_len; 1451 1452 fifo_pkt_len = EM_ROUNDUP(len + EM_FIFO_HDR, EM_FIFO_HDR); 1453 1454 if (adapter->link_duplex == HALF_DUPLEX) { 1455 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head; 1456 1457 if (fifo_pkt_len >= (EM_82547_PKT_THRESH + fifo_space)) { 1458 if (em_82547_tx_fifo_reset(adapter)) { 1459 return(0); 1460 } 1461 else { 1462 return(1); 1463 } 1464 } 1465 } 1466 1467 return(0); 1468} 1469 1470static void 1471em_82547_update_fifo_head(struct adapter *adapter, int len) 1472{ 1473 int fifo_pkt_len = EM_ROUNDUP(len + EM_FIFO_HDR, EM_FIFO_HDR); 1474 1475 /* tx_fifo_head is always 16 byte aligned */ 1476 adapter->tx_fifo_head += fifo_pkt_len; 1477 if (adapter->tx_fifo_head >= adapter->tx_fifo_size) { 1478 adapter->tx_fifo_head -= adapter->tx_fifo_size; 1479 } 1480 1481 return; 1482} 1483 1484 1485static int 1486em_82547_tx_fifo_reset(struct adapter *adapter) 1487{ 1488 uint32_t tctl; 1489 1490 if ( (E1000_READ_REG(&adapter->hw, TDT) == 1491 E1000_READ_REG(&adapter->hw, TDH)) && 1492 (E1000_READ_REG(&adapter->hw, TDFT) == 1493 E1000_READ_REG(&adapter->hw, TDFH)) && 1494 (E1000_READ_REG(&adapter->hw, TDFTS) == 1495 E1000_READ_REG(&adapter->hw, TDFHS)) && 1496 (E1000_READ_REG(&adapter->hw, TDFPC) == 0)) { 1497 1498 /* Disable TX unit */ 1499 tctl = E1000_READ_REG(&adapter->hw, TCTL); 1500 E1000_WRITE_REG(&adapter->hw, TCTL, tctl & ~E1000_TCTL_EN); 1501 1502 /* Reset FIFO pointers */ 1503 E1000_WRITE_REG(&adapter->hw, TDFT, adapter->tx_head_addr); 1504 E1000_WRITE_REG(&adapter->hw, TDFH, adapter->tx_head_addr); 1505 E1000_WRITE_REG(&adapter->hw, TDFTS, adapter->tx_head_addr); 1506 E1000_WRITE_REG(&adapter->hw, TDFHS, adapter->tx_head_addr); 1507 1508 /* Re-enable TX unit */ 1509 E1000_WRITE_REG(&adapter->hw, TCTL, tctl); 1510 E1000_WRITE_FLUSH(&adapter->hw); 1511 1512 adapter->tx_fifo_head = 0; 1513 adapter->tx_fifo_reset_cnt++; 1514 1515 return(TRUE); 1516 } 1517 else { 1518 return(FALSE); 1519 } 1520} 1521 1522static void 1523em_set_promisc(struct adapter * adapter) 1524{ 1525 1526 u_int32_t reg_rctl; 1527 struct ifnet *ifp = adapter->ifp; 1528 1529 reg_rctl = E1000_READ_REG(&adapter->hw, RCTL); 1530 1531 if (ifp->if_flags & IFF_PROMISC) { 1532 reg_rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); 1533 E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl); 1534 /* Disable VLAN stripping in promiscous mode 1535 * This enables bridging of vlan tagged frames to occur 1536 * and also allows vlan tags to be seen in tcpdump 1537 */ 1538 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) 1539 em_disable_vlans(adapter); 1540 adapter->em_insert_vlan_header = 1; 1541 } else if (ifp->if_flags & IFF_ALLMULTI) { 1542 reg_rctl |= E1000_RCTL_MPE; 1543 reg_rctl &= ~E1000_RCTL_UPE; 1544 E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl); 1545 adapter->em_insert_vlan_header = 0; 1546 } else 1547 adapter->em_insert_vlan_header = 0; 1548 1549 return; 1550} 1551 1552static void 1553em_disable_promisc(struct adapter * adapter) 1554{ 1555 u_int32_t reg_rctl; 1556 struct ifnet *ifp = adapter->ifp; 1557 1558 reg_rctl = E1000_READ_REG(&adapter->hw, RCTL); 1559 1560 reg_rctl &= (~E1000_RCTL_UPE); 1561 reg_rctl &= (~E1000_RCTL_MPE); 1562 E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl); 1563 1564 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) 1565 em_enable_vlans(adapter); 1566 adapter->em_insert_vlan_header = 0; 1567 1568 return; 1569} 1570 1571 1572/********************************************************************* 1573 * Multicast Update 1574 * 1575 * This routine is called whenever multicast address list is updated. 1576 * 1577 **********************************************************************/ 1578 1579static void 1580em_set_multi(struct adapter * adapter) 1581{ 1582 u_int32_t reg_rctl = 0; 1583 u_int8_t mta[MAX_NUM_MULTICAST_ADDRESSES * ETH_LENGTH_OF_ADDRESS]; 1584 struct ifmultiaddr *ifma; 1585 int mcnt = 0; 1586 struct ifnet *ifp = adapter->ifp; 1587 1588 IOCTL_DEBUGOUT("em_set_multi: begin"); 1589 1590 if (adapter->hw.mac_type == em_82542_rev2_0) { 1591 reg_rctl = E1000_READ_REG(&adapter->hw, RCTL); 1592 if (adapter->hw.pci_cmd_word & CMD_MEM_WRT_INVALIDATE) { 1593 em_pci_clear_mwi(&adapter->hw); 1594 } 1595 reg_rctl |= E1000_RCTL_RST; 1596 E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl); 1597 msec_delay(5); 1598 } 1599 1600 IF_ADDR_LOCK(ifp); 1601 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1602 if (ifma->ifma_addr->sa_family != AF_LINK) 1603 continue; 1604 1605 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES) break; 1606 1607 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr), 1608 &mta[mcnt*ETH_LENGTH_OF_ADDRESS], ETH_LENGTH_OF_ADDRESS); 1609 mcnt++; 1610 } 1611 IF_ADDR_UNLOCK(ifp); 1612 1613 if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES) { 1614 reg_rctl = E1000_READ_REG(&adapter->hw, RCTL); 1615 reg_rctl |= E1000_RCTL_MPE; 1616 E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl); 1617 } else 1618 em_mc_addr_list_update(&adapter->hw, mta, mcnt, 0, 1); 1619 1620 if (adapter->hw.mac_type == em_82542_rev2_0) { 1621 reg_rctl = E1000_READ_REG(&adapter->hw, RCTL); 1622 reg_rctl &= ~E1000_RCTL_RST; 1623 E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl); 1624 msec_delay(5); 1625 if (adapter->hw.pci_cmd_word & CMD_MEM_WRT_INVALIDATE) { 1626 em_pci_set_mwi(&adapter->hw); 1627 } 1628 } 1629 1630 return; 1631} 1632 1633 1634/********************************************************************* 1635 * Timer routine 1636 * 1637 * This routine checks for link status and updates statistics. 1638 * 1639 **********************************************************************/ 1640 1641static void 1642em_local_timer(void *arg) 1643{ 1644 struct ifnet *ifp; 1645 struct adapter * adapter = arg; 1646 ifp = adapter->ifp; 1647 1648 EM_LOCK(adapter); 1649 1650 em_check_for_link(&adapter->hw); 1651 em_print_link_status(adapter); 1652 em_update_stats_counters(adapter); 1653 if (em_display_debug_stats && ifp->if_drv_flags & IFF_DRV_RUNNING) { 1654 em_print_hw_stats(adapter); 1655 } 1656 em_smartspeed(adapter); 1657 1658 callout_reset(&adapter->timer, hz, em_local_timer, adapter); 1659 1660 EM_UNLOCK(adapter); 1661 return; 1662} 1663 1664static void 1665em_print_link_status(struct adapter * adapter) 1666{ 1667 struct ifnet *ifp = adapter->ifp; 1668 1669 if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU) { 1670 if (adapter->link_active == 0) { 1671 em_get_speed_and_duplex(&adapter->hw, 1672 &adapter->link_speed, 1673 &adapter->link_duplex); 1674 if (bootverbose) 1675 printf("em%d: Link is up %d Mbps %s\n", 1676 adapter->unit, 1677 adapter->link_speed, 1678 ((adapter->link_duplex == FULL_DUPLEX) ? 1679 "Full Duplex" : "Half Duplex")); 1680 adapter->link_active = 1; 1681 adapter->smartspeed = 0; 1682 if_link_state_change(ifp, LINK_STATE_UP); 1683 } 1684 } else { 1685 if (adapter->link_active == 1) { 1686 adapter->link_speed = 0; 1687 adapter->link_duplex = 0; 1688 if (bootverbose) 1689 printf("em%d: Link is Down\n", adapter->unit); 1690 adapter->link_active = 0; 1691 if_link_state_change(ifp, LINK_STATE_DOWN); 1692 } 1693 } 1694 1695 return; 1696} 1697 1698/********************************************************************* 1699 * 1700 * This routine disables all traffic on the adapter by issuing a 1701 * global reset on the MAC and deallocates TX/RX buffers. 1702 * 1703 **********************************************************************/ 1704 1705static void 1706em_stop(void *arg) 1707{ 1708 struct ifnet *ifp; 1709 struct adapter * adapter = arg; 1710 ifp = adapter->ifp; 1711 1712 mtx_assert(&adapter->mtx, MA_OWNED); 1713 1714 INIT_DEBUGOUT("em_stop: begin"); 1715 1716 em_disable_intr(adapter); 1717 em_reset_hw(&adapter->hw); 1718 callout_stop(&adapter->timer); 1719 callout_stop(&adapter->tx_fifo_timer); 1720 em_free_transmit_structures(adapter); 1721 em_free_receive_structures(adapter); 1722 1723 1724 /* Tell the stack that the interface is no longer active */ 1725 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 1726 1727 return; 1728} 1729 1730 1731/********************************************************************* 1732 * 1733 * Determine hardware revision. 1734 * 1735 **********************************************************************/ 1736static void 1737em_identify_hardware(struct adapter * adapter) 1738{ 1739 device_t dev = adapter->dev; 1740 1741 /* Make sure our PCI config space has the necessary stuff set */ 1742 adapter->hw.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2); 1743 if (!((adapter->hw.pci_cmd_word & PCIM_CMD_BUSMASTEREN) && 1744 (adapter->hw.pci_cmd_word & PCIM_CMD_MEMEN))) { 1745 printf("em%d: Memory Access and/or Bus Master bits were not set!\n", 1746 adapter->unit); 1747 adapter->hw.pci_cmd_word |= 1748 (PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN); 1749 pci_write_config(dev, PCIR_COMMAND, adapter->hw.pci_cmd_word, 2); 1750 } 1751 1752 /* Save off the information about this board */ 1753 adapter->hw.vendor_id = pci_get_vendor(dev); 1754 adapter->hw.device_id = pci_get_device(dev); 1755 adapter->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1); 1756 adapter->hw.subsystem_vendor_id = pci_read_config(dev, PCIR_SUBVEND_0, 2); 1757 adapter->hw.subsystem_id = pci_read_config(dev, PCIR_SUBDEV_0, 2); 1758 1759 /* Identify the MAC */ 1760 if (em_set_mac_type(&adapter->hw)) 1761 printf("em%d: Unknown MAC Type\n", adapter->unit); 1762 1763 if(adapter->hw.mac_type == em_82541 || 1764 adapter->hw.mac_type == em_82541_rev_2 || 1765 adapter->hw.mac_type == em_82547 || 1766 adapter->hw.mac_type == em_82547_rev_2) 1767 adapter->hw.phy_init_script = TRUE; 1768 1769 return; 1770} 1771 1772static int 1773em_allocate_pci_resources(struct adapter * adapter) 1774{ 1775 int val, rid; 1776 device_t dev = adapter->dev; 1777 1778 rid = PCIR_BAR(0); 1779 adapter->res_memory = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 1780 &rid, RF_ACTIVE); 1781 if (!(adapter->res_memory)) { 1782 printf("em%d: Unable to allocate bus resource: memory\n", 1783 adapter->unit); 1784 return(ENXIO); 1785 } 1786 adapter->osdep.mem_bus_space_tag = 1787 rman_get_bustag(adapter->res_memory); 1788 adapter->osdep.mem_bus_space_handle = 1789 rman_get_bushandle(adapter->res_memory); 1790 adapter->hw.hw_addr = (uint8_t *)&adapter->osdep.mem_bus_space_handle; 1791 1792 1793 if (adapter->hw.mac_type > em_82543) { 1794 /* Figure our where our IO BAR is ? */ 1795 for (rid = PCIR_BAR(0); rid < PCIR_CIS;) { 1796 val = pci_read_config(dev, rid, 4); 1797 if (E1000_BAR_TYPE(val) == E1000_BAR_TYPE_IO) { 1798 adapter->io_rid = rid; 1799 break; 1800 } 1801 rid += 4; 1802 /* check for 64bit BAR */ 1803 if (E1000_BAR_MEM_TYPE(val) == E1000_BAR_MEM_TYPE_64BIT) 1804 rid += 4; 1805 } 1806 if (rid >= PCIR_CIS) { 1807 printf("em%d: Unable to locate IO BAR\n", adapter->unit); 1808 return (ENXIO); 1809 } 1810 adapter->res_ioport = bus_alloc_resource_any(dev, 1811 SYS_RES_IOPORT, 1812 &adapter->io_rid, 1813 RF_ACTIVE); 1814 if (!(adapter->res_ioport)) { 1815 printf("em%d: Unable to allocate bus resource: ioport\n", 1816 adapter->unit); 1817 return(ENXIO); 1818 } 1819 adapter->hw.io_base = 0; 1820 adapter->osdep.io_bus_space_tag = 1821 rman_get_bustag(adapter->res_ioport); 1822 adapter->osdep.io_bus_space_handle = 1823 rman_get_bushandle(adapter->res_ioport); 1824 } 1825 1826 rid = 0x0; 1827 adapter->res_interrupt = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 1828 RF_SHAREABLE | 1829 RF_ACTIVE); 1830 if (!(adapter->res_interrupt)) { 1831 printf("em%d: Unable to allocate bus resource: interrupt\n", 1832 adapter->unit); 1833 return(ENXIO); 1834 } 1835 if (bus_setup_intr(dev, adapter->res_interrupt, 1836 INTR_TYPE_NET | INTR_MPSAFE, 1837 (void (*)(void *)) em_intr, adapter, 1838 &adapter->int_handler_tag)) { 1839 printf("em%d: Error registering interrupt handler!\n", 1840 adapter->unit); 1841 return(ENXIO); 1842 } 1843 1844 adapter->hw.back = &adapter->osdep; 1845 1846 return(0); 1847} 1848 1849static void 1850em_free_pci_resources(struct adapter * adapter) 1851{ 1852 device_t dev = adapter->dev; 1853 1854 if (adapter->res_interrupt != NULL) { 1855 bus_teardown_intr(dev, adapter->res_interrupt, 1856 adapter->int_handler_tag); 1857 bus_release_resource(dev, SYS_RES_IRQ, 0, 1858 adapter->res_interrupt); 1859 } 1860 if (adapter->res_memory != NULL) { 1861 bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0), 1862 adapter->res_memory); 1863 } 1864 1865 if (adapter->res_ioport != NULL) { 1866 bus_release_resource(dev, SYS_RES_IOPORT, adapter->io_rid, 1867 adapter->res_ioport); 1868 } 1869 return; 1870} 1871 1872/********************************************************************* 1873 * 1874 * Initialize the hardware to a configuration as specified by the 1875 * adapter structure. The controller is reset, the EEPROM is 1876 * verified, the MAC address is set, then the shared initialization 1877 * routines are called. 1878 * 1879 **********************************************************************/ 1880static int 1881em_hardware_init(struct adapter * adapter) 1882{ 1883 INIT_DEBUGOUT("em_hardware_init: begin"); 1884 /* Issue a global reset */ 1885 em_reset_hw(&adapter->hw); 1886 1887 /* When hardware is reset, fifo_head is also reset */ 1888 adapter->tx_fifo_head = 0; 1889 1890 /* Make sure we have a good EEPROM before we read from it */ 1891 if (em_validate_eeprom_checksum(&adapter->hw) < 0) { 1892 printf("em%d: The EEPROM Checksum Is Not Valid\n", 1893 adapter->unit); 1894 return(EIO); 1895 } 1896 1897 if (em_read_part_num(&adapter->hw, &(adapter->part_num)) < 0) { 1898 printf("em%d: EEPROM read error while reading part number\n", 1899 adapter->unit); 1900 return(EIO); 1901 } 1902 1903 if (em_init_hw(&adapter->hw) < 0) { 1904 printf("em%d: Hardware Initialization Failed", 1905 adapter->unit); 1906 return(EIO); 1907 } 1908 1909 em_check_for_link(&adapter->hw); 1910 if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU) 1911 adapter->link_active = 1; 1912 else 1913 adapter->link_active = 0; 1914 1915 if (adapter->link_active) { 1916 em_get_speed_and_duplex(&adapter->hw, 1917 &adapter->link_speed, 1918 &adapter->link_duplex); 1919 } else { 1920 adapter->link_speed = 0; 1921 adapter->link_duplex = 0; 1922 } 1923 1924 return(0); 1925} 1926 1927/********************************************************************* 1928 * 1929 * Setup networking device structure and register an interface. 1930 * 1931 **********************************************************************/ 1932static void 1933em_setup_interface(device_t dev, struct adapter * adapter) 1934{ 1935 struct ifnet *ifp; 1936 INIT_DEBUGOUT("em_setup_interface: begin"); 1937 1938 ifp = adapter->ifp = if_alloc(IFT_ETHER); 1939 if (ifp == NULL) 1940 panic("%s: can not if_alloc()", device_get_nameunit(dev)); 1941 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 1942 ifp->if_mtu = ETHERMTU; 1943 ifp->if_baudrate = 1000000000; 1944 ifp->if_init = em_init; 1945 ifp->if_softc = adapter; 1946 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1947 ifp->if_ioctl = em_ioctl; 1948 ifp->if_start = em_start; 1949 ifp->if_watchdog = em_watchdog; 1950 IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 1); 1951 ifp->if_snd.ifq_drv_maxlen = adapter->num_tx_desc - 1; 1952 IFQ_SET_READY(&ifp->if_snd); 1953 1954 ether_ifattach(ifp, adapter->hw.mac_addr); 1955 1956 ifp->if_capabilities = ifp->if_capenable = 0; 1957 1958 if (adapter->hw.mac_type >= em_82543) { 1959 ifp->if_capabilities |= IFCAP_HWCSUM; 1960 ifp->if_capenable |= IFCAP_HWCSUM; 1961 } 1962 1963 /* 1964 * Tell the upper layer(s) we support long frames. 1965 */ 1966 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 1967 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU; 1968 ifp->if_capenable |= IFCAP_VLAN_MTU; 1969 1970#ifdef DEVICE_POLLING 1971 ifp->if_capabilities |= IFCAP_POLLING; 1972#endif 1973 1974 /* 1975 * Specify the media types supported by this adapter and register 1976 * callbacks to update media and link information 1977 */ 1978 ifmedia_init(&adapter->media, IFM_IMASK, em_media_change, 1979 em_media_status); 1980 if (adapter->hw.media_type == em_media_type_fiber) { 1981 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX | IFM_FDX, 1982 0, NULL); 1983 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX, 1984 0, NULL); 1985 } else { 1986 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T, 0, NULL); 1987 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T | IFM_FDX, 1988 0, NULL); 1989 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX, 1990 0, NULL); 1991 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX | IFM_FDX, 1992 0, NULL); 1993 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T | IFM_FDX, 1994 0, NULL); 1995 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL); 1996 } 1997 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL); 1998 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO); 1999 2000 return; 2001} 2002 2003 2004/********************************************************************* 2005 * 2006 * Workaround for SmartSpeed on 82541 and 82547 controllers 2007 * 2008 **********************************************************************/ 2009static void 2010em_smartspeed(struct adapter *adapter) 2011{ 2012 uint16_t phy_tmp; 2013 2014 if(adapter->link_active || (adapter->hw.phy_type != em_phy_igp) || 2015 !adapter->hw.autoneg || !(adapter->hw.autoneg_advertised & ADVERTISE_1000_FULL)) 2016 return; 2017 2018 if(adapter->smartspeed == 0) { 2019 /* If Master/Slave config fault is asserted twice, 2020 * we assume back-to-back */ 2021 em_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp); 2022 if(!(phy_tmp & SR_1000T_MS_CONFIG_FAULT)) return; 2023 em_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp); 2024 if(phy_tmp & SR_1000T_MS_CONFIG_FAULT) { 2025 em_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, 2026 &phy_tmp); 2027 if(phy_tmp & CR_1000T_MS_ENABLE) { 2028 phy_tmp &= ~CR_1000T_MS_ENABLE; 2029 em_write_phy_reg(&adapter->hw, 2030 PHY_1000T_CTRL, phy_tmp); 2031 adapter->smartspeed++; 2032 if(adapter->hw.autoneg && 2033 !em_phy_setup_autoneg(&adapter->hw) && 2034 !em_read_phy_reg(&adapter->hw, PHY_CTRL, 2035 &phy_tmp)) { 2036 phy_tmp |= (MII_CR_AUTO_NEG_EN | 2037 MII_CR_RESTART_AUTO_NEG); 2038 em_write_phy_reg(&adapter->hw, 2039 PHY_CTRL, phy_tmp); 2040 } 2041 } 2042 } 2043 return; 2044 } else if(adapter->smartspeed == EM_SMARTSPEED_DOWNSHIFT) { 2045 /* If still no link, perhaps using 2/3 pair cable */ 2046 em_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_tmp); 2047 phy_tmp |= CR_1000T_MS_ENABLE; 2048 em_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, phy_tmp); 2049 if(adapter->hw.autoneg && 2050 !em_phy_setup_autoneg(&adapter->hw) && 2051 !em_read_phy_reg(&adapter->hw, PHY_CTRL, &phy_tmp)) { 2052 phy_tmp |= (MII_CR_AUTO_NEG_EN | 2053 MII_CR_RESTART_AUTO_NEG); 2054 em_write_phy_reg(&adapter->hw, PHY_CTRL, phy_tmp); 2055 } 2056 } 2057 /* Restart process after EM_SMARTSPEED_MAX iterations */ 2058 if(adapter->smartspeed++ == EM_SMARTSPEED_MAX) 2059 adapter->smartspeed = 0; 2060 2061 return; 2062} 2063 2064 2065/* 2066 * Manage DMA'able memory. 2067 */ 2068static void 2069em_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) 2070{ 2071 if (error) 2072 return; 2073 *(bus_addr_t*) arg = segs->ds_addr; 2074 return; 2075} 2076 2077static int 2078em_dma_malloc(struct adapter *adapter, bus_size_t size, 2079 struct em_dma_alloc *dma, int mapflags) 2080{ 2081 int r; 2082 2083 r = bus_dma_tag_create(NULL, /* parent */ 2084 PAGE_SIZE, 0, /* alignment, bounds */ 2085 BUS_SPACE_MAXADDR, /* lowaddr */ 2086 BUS_SPACE_MAXADDR, /* highaddr */ 2087 NULL, NULL, /* filter, filterarg */ 2088 size, /* maxsize */ 2089 1, /* nsegments */ 2090 size, /* maxsegsize */ 2091 BUS_DMA_ALLOCNOW, /* flags */ 2092 NULL, /* lockfunc */ 2093 NULL, /* lockarg */ 2094 &dma->dma_tag); 2095 if (r != 0) { 2096 printf("em%d: em_dma_malloc: bus_dma_tag_create failed; " 2097 "error %u\n", adapter->unit, r); 2098 goto fail_0; 2099 } 2100 2101 r = bus_dmamem_alloc(dma->dma_tag, (void**) &dma->dma_vaddr, 2102 BUS_DMA_NOWAIT, &dma->dma_map); 2103 if (r != 0) { 2104 printf("em%d: em_dma_malloc: bus_dmammem_alloc failed; " 2105 "size %ju, error %d\n", adapter->unit, 2106 (uintmax_t)size, r); 2107 goto fail_2; 2108 } 2109 2110 dma->dma_paddr = 0; 2111 r = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr, 2112 size, 2113 em_dmamap_cb, 2114 &dma->dma_paddr, 2115 mapflags | BUS_DMA_NOWAIT); 2116 if (r != 0 || dma->dma_paddr == 0) { 2117 printf("em%d: em_dma_malloc: bus_dmamap_load failed; " 2118 "error %u\n", adapter->unit, r); 2119 goto fail_3; 2120 } 2121 2122 return (0); 2123 2124fail_3: 2125 bus_dmamap_unload(dma->dma_tag, dma->dma_map); 2126fail_2: 2127 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map); 2128 bus_dma_tag_destroy(dma->dma_tag); 2129fail_0: 2130 dma->dma_map = NULL; 2131 dma->dma_tag = NULL; 2132 return (r); 2133} 2134 2135static void 2136em_dma_free(struct adapter *adapter, struct em_dma_alloc *dma) 2137{ 2138 bus_dmamap_unload(dma->dma_tag, dma->dma_map); 2139 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map); 2140 bus_dma_tag_destroy(dma->dma_tag); 2141} 2142 2143 2144/********************************************************************* 2145 * 2146 * Allocate memory for tx_buffer structures. The tx_buffer stores all 2147 * the information needed to transmit a packet on the wire. 2148 * 2149 **********************************************************************/ 2150static int 2151em_allocate_transmit_structures(struct adapter * adapter) 2152{ 2153 if (!(adapter->tx_buffer_area = 2154 (struct em_buffer *) malloc(sizeof(struct em_buffer) * 2155 adapter->num_tx_desc, M_DEVBUF, 2156 M_NOWAIT))) { 2157 printf("em%d: Unable to allocate tx_buffer memory\n", 2158 adapter->unit); 2159 return ENOMEM; 2160 } 2161 2162 bzero(adapter->tx_buffer_area, 2163 sizeof(struct em_buffer) * adapter->num_tx_desc); 2164 2165 return 0; 2166} 2167 2168/********************************************************************* 2169 * 2170 * Allocate and initialize transmit structures. 2171 * 2172 **********************************************************************/ 2173static int 2174em_setup_transmit_structures(struct adapter * adapter) 2175{ 2176 /* 2177 * Setup DMA descriptor areas. 2178 */ 2179 if (bus_dma_tag_create(NULL, /* parent */ 2180 1, 0, /* alignment, bounds */ 2181 BUS_SPACE_MAXADDR, /* lowaddr */ 2182 BUS_SPACE_MAXADDR, /* highaddr */ 2183 NULL, NULL, /* filter, filterarg */ 2184 MCLBYTES * 8, /* maxsize */ 2185 EM_MAX_SCATTER, /* nsegments */ 2186 MCLBYTES * 8, /* maxsegsize */ 2187 BUS_DMA_ALLOCNOW, /* flags */ 2188 NULL, /* lockfunc */ 2189 NULL, /* lockarg */ 2190 &adapter->txtag)) { 2191 printf("em%d: Unable to allocate TX DMA tag\n", adapter->unit); 2192 return (ENOMEM); 2193 } 2194 2195 if (em_allocate_transmit_structures(adapter)) 2196 return (ENOMEM); 2197 2198 bzero((void *) adapter->tx_desc_base, 2199 (sizeof(struct em_tx_desc)) * adapter->num_tx_desc); 2200 2201 adapter->next_avail_tx_desc = 0; 2202 adapter->oldest_used_tx_desc = 0; 2203 2204 /* Set number of descriptors available */ 2205 adapter->num_tx_desc_avail = adapter->num_tx_desc; 2206 2207 /* Set checksum context */ 2208 adapter->active_checksum_context = OFFLOAD_NONE; 2209 2210 return (0); 2211} 2212 2213/********************************************************************* 2214 * 2215 * Enable transmit unit. 2216 * 2217 **********************************************************************/ 2218static void 2219em_initialize_transmit_unit(struct adapter * adapter) 2220{ 2221 u_int32_t reg_tctl; 2222 u_int32_t reg_tipg = 0; 2223 u_int64_t bus_addr; 2224 2225 INIT_DEBUGOUT("em_initialize_transmit_unit: begin"); 2226 /* Setup the Base and Length of the Tx Descriptor Ring */ 2227 bus_addr = adapter->txdma.dma_paddr; 2228 E1000_WRITE_REG(&adapter->hw, TDBAL, (u_int32_t)bus_addr); 2229 E1000_WRITE_REG(&adapter->hw, TDBAH, (u_int32_t)(bus_addr >> 32)); 2230 E1000_WRITE_REG(&adapter->hw, TDLEN, 2231 adapter->num_tx_desc * 2232 sizeof(struct em_tx_desc)); 2233 2234 /* Setup the HW Tx Head and Tail descriptor pointers */ 2235 E1000_WRITE_REG(&adapter->hw, TDH, 0); 2236 E1000_WRITE_REG(&adapter->hw, TDT, 0); 2237 2238 2239 HW_DEBUGOUT2("Base = %x, Length = %x\n", 2240 E1000_READ_REG(&adapter->hw, TDBAL), 2241 E1000_READ_REG(&adapter->hw, TDLEN)); 2242 2243 /* Set the default values for the Tx Inter Packet Gap timer */ 2244 switch (adapter->hw.mac_type) { 2245 case em_82542_rev2_0: 2246 case em_82542_rev2_1: 2247 reg_tipg = DEFAULT_82542_TIPG_IPGT; 2248 reg_tipg |= DEFAULT_82542_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT; 2249 reg_tipg |= DEFAULT_82542_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT; 2250 break; 2251 default: 2252 if (adapter->hw.media_type == em_media_type_fiber) 2253 reg_tipg = DEFAULT_82543_TIPG_IPGT_FIBER; 2254 else 2255 reg_tipg = DEFAULT_82543_TIPG_IPGT_COPPER; 2256 reg_tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT; 2257 reg_tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT; 2258 } 2259 2260 E1000_WRITE_REG(&adapter->hw, TIPG, reg_tipg); 2261 E1000_WRITE_REG(&adapter->hw, TIDV, adapter->tx_int_delay.value); 2262 if(adapter->hw.mac_type >= em_82540) 2263 E1000_WRITE_REG(&adapter->hw, TADV, 2264 adapter->tx_abs_int_delay.value); 2265 2266 /* Program the Transmit Control Register */ 2267 reg_tctl = E1000_TCTL_PSP | E1000_TCTL_EN | 2268 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT); 2269 if (adapter->hw.mac_type >= em_82573) 2270 reg_tctl |= E1000_TCTL_MULR; 2271 if (adapter->link_duplex == 1) { 2272 reg_tctl |= E1000_FDX_COLLISION_DISTANCE << E1000_COLD_SHIFT; 2273 } else { 2274 reg_tctl |= E1000_HDX_COLLISION_DISTANCE << E1000_COLD_SHIFT; 2275 } 2276 E1000_WRITE_REG(&adapter->hw, TCTL, reg_tctl); 2277 2278 /* Setup Transmit Descriptor Settings for this adapter */ 2279 adapter->txd_cmd = E1000_TXD_CMD_IFCS | E1000_TXD_CMD_RS; 2280 2281 if (adapter->tx_int_delay.value > 0) 2282 adapter->txd_cmd |= E1000_TXD_CMD_IDE; 2283 2284 return; 2285} 2286 2287/********************************************************************* 2288 * 2289 * Free all transmit related data structures. 2290 * 2291 **********************************************************************/ 2292static void 2293em_free_transmit_structures(struct adapter * adapter) 2294{ 2295 struct em_buffer *tx_buffer; 2296 int i; 2297 2298 INIT_DEBUGOUT("free_transmit_structures: begin"); 2299 2300 if (adapter->tx_buffer_area != NULL) { 2301 tx_buffer = adapter->tx_buffer_area; 2302 for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) { 2303 if (tx_buffer->m_head != NULL) { 2304 bus_dmamap_unload(adapter->txtag, tx_buffer->map); 2305 bus_dmamap_destroy(adapter->txtag, tx_buffer->map); 2306 m_freem(tx_buffer->m_head); 2307 } 2308 tx_buffer->m_head = NULL; 2309 } 2310 } 2311 if (adapter->tx_buffer_area != NULL) { 2312 free(adapter->tx_buffer_area, M_DEVBUF); 2313 adapter->tx_buffer_area = NULL; 2314 } 2315 if (adapter->txtag != NULL) { 2316 bus_dma_tag_destroy(adapter->txtag); 2317 adapter->txtag = NULL; 2318 } 2319 return; 2320} 2321 2322/********************************************************************* 2323 * 2324 * The offload context needs to be set when we transfer the first 2325 * packet of a particular protocol (TCP/UDP). We change the 2326 * context only if the protocol type changes. 2327 * 2328 **********************************************************************/ 2329static void 2330em_transmit_checksum_setup(struct adapter * adapter, 2331 struct mbuf *mp, 2332 u_int32_t *txd_upper, 2333 u_int32_t *txd_lower) 2334{ 2335 struct em_context_desc *TXD; 2336 struct em_buffer *tx_buffer; 2337 int curr_txd; 2338 2339 if (mp->m_pkthdr.csum_flags) { 2340 2341 if (mp->m_pkthdr.csum_flags & CSUM_TCP) { 2342 *txd_upper = E1000_TXD_POPTS_TXSM << 8; 2343 *txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D; 2344 if (adapter->active_checksum_context == OFFLOAD_TCP_IP) 2345 return; 2346 else 2347 adapter->active_checksum_context = OFFLOAD_TCP_IP; 2348 2349 } else if (mp->m_pkthdr.csum_flags & CSUM_UDP) { 2350 *txd_upper = E1000_TXD_POPTS_TXSM << 8; 2351 *txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D; 2352 if (adapter->active_checksum_context == OFFLOAD_UDP_IP) 2353 return; 2354 else 2355 adapter->active_checksum_context = OFFLOAD_UDP_IP; 2356 } else { 2357 *txd_upper = 0; 2358 *txd_lower = 0; 2359 return; 2360 } 2361 } else { 2362 *txd_upper = 0; 2363 *txd_lower = 0; 2364 return; 2365 } 2366 2367 /* If we reach this point, the checksum offload context 2368 * needs to be reset. 2369 */ 2370 curr_txd = adapter->next_avail_tx_desc; 2371 tx_buffer = &adapter->tx_buffer_area[curr_txd]; 2372 TXD = (struct em_context_desc *) &adapter->tx_desc_base[curr_txd]; 2373 2374 TXD->lower_setup.ip_fields.ipcss = ETHER_HDR_LEN; 2375 TXD->lower_setup.ip_fields.ipcso = 2376 ETHER_HDR_LEN + offsetof(struct ip, ip_sum); 2377 TXD->lower_setup.ip_fields.ipcse = 2378 htole16(ETHER_HDR_LEN + sizeof(struct ip) - 1); 2379 2380 TXD->upper_setup.tcp_fields.tucss = 2381 ETHER_HDR_LEN + sizeof(struct ip); 2382 TXD->upper_setup.tcp_fields.tucse = htole16(0); 2383 2384 if (adapter->active_checksum_context == OFFLOAD_TCP_IP) { 2385 TXD->upper_setup.tcp_fields.tucso = 2386 ETHER_HDR_LEN + sizeof(struct ip) + 2387 offsetof(struct tcphdr, th_sum); 2388 } else if (adapter->active_checksum_context == OFFLOAD_UDP_IP) { 2389 TXD->upper_setup.tcp_fields.tucso = 2390 ETHER_HDR_LEN + sizeof(struct ip) + 2391 offsetof(struct udphdr, uh_sum); 2392 } 2393 2394 TXD->tcp_seg_setup.data = htole32(0); 2395 TXD->cmd_and_length = htole32(adapter->txd_cmd | E1000_TXD_CMD_DEXT); 2396 2397 tx_buffer->m_head = NULL; 2398 2399 if (++curr_txd == adapter->num_tx_desc) 2400 curr_txd = 0; 2401 2402 adapter->num_tx_desc_avail--; 2403 adapter->next_avail_tx_desc = curr_txd; 2404 2405 return; 2406} 2407 2408/********************************************************************** 2409 * 2410 * Examine each tx_buffer in the used queue. If the hardware is done 2411 * processing the packet then free associated resources. The 2412 * tx_buffer is put back on the free queue. 2413 * 2414 **********************************************************************/ 2415static void 2416em_clean_transmit_interrupts(struct adapter * adapter) 2417{ 2418 int i, num_avail; 2419 struct em_buffer *tx_buffer; 2420 struct em_tx_desc *tx_desc; 2421 struct ifnet *ifp = adapter->ifp; 2422 2423 mtx_assert(&adapter->mtx, MA_OWNED); 2424 2425 if (adapter->num_tx_desc_avail == adapter->num_tx_desc) 2426 return; 2427 2428 num_avail = adapter->num_tx_desc_avail; 2429 i = adapter->oldest_used_tx_desc; 2430 2431 tx_buffer = &adapter->tx_buffer_area[i]; 2432 tx_desc = &adapter->tx_desc_base[i]; 2433 2434 bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map, 2435 BUS_DMASYNC_POSTREAD); 2436 while (tx_desc->upper.fields.status & E1000_TXD_STAT_DD) { 2437 2438 tx_desc->upper.data = 0; 2439 num_avail++; 2440 2441 if (tx_buffer->m_head) { 2442 ifp->if_opackets++; 2443 bus_dmamap_unload(adapter->txtag, tx_buffer->map); 2444 bus_dmamap_destroy(adapter->txtag, tx_buffer->map); 2445 2446 m_freem(tx_buffer->m_head); 2447 tx_buffer->m_head = NULL; 2448 } 2449 2450 if (++i == adapter->num_tx_desc) 2451 i = 0; 2452 2453 tx_buffer = &adapter->tx_buffer_area[i]; 2454 tx_desc = &adapter->tx_desc_base[i]; 2455 } 2456 bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map, 2457 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2458 2459 adapter->oldest_used_tx_desc = i; 2460 2461 /* 2462 * If we have enough room, clear IFF_DRV_OACTIVE to tell the stack 2463 * that it is OK to send packets. 2464 * If there are no pending descriptors, clear the timeout. Otherwise, 2465 * if some descriptors have been freed, restart the timeout. 2466 */ 2467 if (num_avail > EM_TX_CLEANUP_THRESHOLD) { 2468 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 2469 if (num_avail == adapter->num_tx_desc) 2470 ifp->if_timer = 0; 2471 else if (num_avail == adapter->num_tx_desc_avail) 2472 ifp->if_timer = EM_TX_TIMEOUT; 2473 } 2474 adapter->num_tx_desc_avail = num_avail; 2475 return; 2476} 2477 2478/********************************************************************* 2479 * 2480 * Get a buffer from system mbuf buffer pool. 2481 * 2482 **********************************************************************/ 2483static int 2484em_get_buf(int i, struct adapter *adapter, 2485 struct mbuf *nmp) 2486{ 2487 register struct mbuf *mp = nmp; 2488 struct em_buffer *rx_buffer; 2489 struct ifnet *ifp; 2490 bus_addr_t paddr; 2491 int error; 2492 2493 ifp = adapter->ifp; 2494 2495 if (mp == NULL) { 2496 mp = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 2497 if (mp == NULL) { 2498 adapter->mbuf_cluster_failed++; 2499 return(ENOBUFS); 2500 } 2501 mp->m_len = mp->m_pkthdr.len = MCLBYTES; 2502 } else { 2503 mp->m_len = mp->m_pkthdr.len = MCLBYTES; 2504 mp->m_data = mp->m_ext.ext_buf; 2505 mp->m_next = NULL; 2506 } 2507 2508 if (ifp->if_mtu <= ETHERMTU) { 2509 m_adj(mp, ETHER_ALIGN); 2510 } 2511 2512 rx_buffer = &adapter->rx_buffer_area[i]; 2513 2514 /* 2515 * Using memory from the mbuf cluster pool, invoke the 2516 * bus_dma machinery to arrange the memory mapping. 2517 */ 2518 paddr = 0; 2519 error = bus_dmamap_load(adapter->rxtag, rx_buffer->map, 2520 mtod(mp, void *), mp->m_len, 2521 em_dmamap_cb, &paddr, 0); 2522 if (error || paddr == 0) { 2523 m_free(mp); 2524 return(error); 2525 } 2526 rx_buffer->m_head = mp; 2527 adapter->rx_desc_base[i].buffer_addr = htole64(paddr); 2528 bus_dmamap_sync(adapter->rxtag, rx_buffer->map, BUS_DMASYNC_PREREAD | 2529 BUS_DMASYNC_PREWRITE); 2530 2531 return(0); 2532} 2533 2534/********************************************************************* 2535 * 2536 * Allocate memory for rx_buffer structures. Since we use one 2537 * rx_buffer per received packet, the maximum number of rx_buffer's 2538 * that we'll need is equal to the number of receive descriptors 2539 * that we've allocated. 2540 * 2541 **********************************************************************/ 2542static int 2543em_allocate_receive_structures(struct adapter * adapter) 2544{ 2545 int i, error; 2546 struct em_buffer *rx_buffer; 2547 2548 if (!(adapter->rx_buffer_area = 2549 (struct em_buffer *) malloc(sizeof(struct em_buffer) * 2550 adapter->num_rx_desc, M_DEVBUF, 2551 M_NOWAIT))) { 2552 printf("em%d: Unable to allocate rx_buffer memory\n", 2553 adapter->unit); 2554 return(ENOMEM); 2555 } 2556 2557 bzero(adapter->rx_buffer_area, 2558 sizeof(struct em_buffer) * adapter->num_rx_desc); 2559 2560 error = bus_dma_tag_create(NULL, /* parent */ 2561 1, 0, /* alignment, bounds */ 2562 BUS_SPACE_MAXADDR, /* lowaddr */ 2563 BUS_SPACE_MAXADDR, /* highaddr */ 2564 NULL, NULL, /* filter, filterarg */ 2565 MCLBYTES, /* maxsize */ 2566 1, /* nsegments */ 2567 MCLBYTES, /* maxsegsize */ 2568 BUS_DMA_ALLOCNOW, /* flags */ 2569 NULL, /* lockfunc */ 2570 NULL, /* lockarg */ 2571 &adapter->rxtag); 2572 if (error != 0) { 2573 printf("em%d: em_allocate_receive_structures: " 2574 "bus_dma_tag_create failed; error %u\n", 2575 adapter->unit, error); 2576 goto fail_0; 2577 } 2578 2579 rx_buffer = adapter->rx_buffer_area; 2580 for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) { 2581 error = bus_dmamap_create(adapter->rxtag, BUS_DMA_NOWAIT, 2582 &rx_buffer->map); 2583 if (error != 0) { 2584 printf("em%d: em_allocate_receive_structures: " 2585 "bus_dmamap_create failed; error %u\n", 2586 adapter->unit, error); 2587 goto fail_1; 2588 } 2589 } 2590 2591 for (i = 0; i < adapter->num_rx_desc; i++) { 2592 error = em_get_buf(i, adapter, NULL); 2593 if (error != 0) { 2594 adapter->rx_buffer_area[i].m_head = NULL; 2595 adapter->rx_desc_base[i].buffer_addr = 0; 2596 return(error); 2597 } 2598 } 2599 bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map, 2600 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2601 2602 return(0); 2603 2604fail_1: 2605 bus_dma_tag_destroy(adapter->rxtag); 2606fail_0: 2607 adapter->rxtag = NULL; 2608 free(adapter->rx_buffer_area, M_DEVBUF); 2609 adapter->rx_buffer_area = NULL; 2610 return (error); 2611} 2612 2613/********************************************************************* 2614 * 2615 * Allocate and initialize receive structures. 2616 * 2617 **********************************************************************/ 2618static int 2619em_setup_receive_structures(struct adapter * adapter) 2620{ 2621 bzero((void *) adapter->rx_desc_base, 2622 (sizeof(struct em_rx_desc)) * adapter->num_rx_desc); 2623 2624 if (em_allocate_receive_structures(adapter)) 2625 return ENOMEM; 2626 2627 /* Setup our descriptor pointers */ 2628 adapter->next_rx_desc_to_check = 0; 2629 return(0); 2630} 2631 2632/********************************************************************* 2633 * 2634 * Enable receive unit. 2635 * 2636 **********************************************************************/ 2637static void 2638em_initialize_receive_unit(struct adapter * adapter) 2639{ 2640 u_int32_t reg_rctl; 2641 u_int32_t reg_rxcsum; 2642 struct ifnet *ifp; 2643 u_int64_t bus_addr; 2644 2645 INIT_DEBUGOUT("em_initialize_receive_unit: begin"); 2646 ifp = adapter->ifp; 2647 2648 /* Make sure receives are disabled while setting up the descriptor ring */ 2649 E1000_WRITE_REG(&adapter->hw, RCTL, 0); 2650 2651 /* Set the Receive Delay Timer Register */ 2652 E1000_WRITE_REG(&adapter->hw, RDTR, 2653 adapter->rx_int_delay.value | E1000_RDT_FPDB); 2654 2655 if(adapter->hw.mac_type >= em_82540) { 2656 E1000_WRITE_REG(&adapter->hw, RADV, 2657 adapter->rx_abs_int_delay.value); 2658 2659 /* Set the interrupt throttling rate. Value is calculated 2660 * as DEFAULT_ITR = 1/(MAX_INTS_PER_SEC * 256ns) */ 2661#define MAX_INTS_PER_SEC 8000 2662#define DEFAULT_ITR 1000000000/(MAX_INTS_PER_SEC * 256) 2663 E1000_WRITE_REG(&adapter->hw, ITR, DEFAULT_ITR); 2664 } 2665 2666 /* Setup the Base and Length of the Rx Descriptor Ring */ 2667 bus_addr = adapter->rxdma.dma_paddr; 2668 E1000_WRITE_REG(&adapter->hw, RDBAL, (u_int32_t)bus_addr); 2669 E1000_WRITE_REG(&adapter->hw, RDBAH, (u_int32_t)(bus_addr >> 32)); 2670 E1000_WRITE_REG(&adapter->hw, RDLEN, adapter->num_rx_desc * 2671 sizeof(struct em_rx_desc)); 2672 2673 /* Setup the HW Rx Head and Tail Descriptor Pointers */ 2674 E1000_WRITE_REG(&adapter->hw, RDH, 0); 2675 E1000_WRITE_REG(&adapter->hw, RDT, adapter->num_rx_desc - 1); 2676 2677 /* Setup the Receive Control Register */ 2678 reg_rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO | 2679 E1000_RCTL_RDMTS_HALF | 2680 (adapter->hw.mc_filter_type << E1000_RCTL_MO_SHIFT); 2681 2682 if (adapter->hw.tbi_compatibility_on == TRUE) 2683 reg_rctl |= E1000_RCTL_SBP; 2684 2685 2686 switch (adapter->rx_buffer_len) { 2687 default: 2688 case EM_RXBUFFER_2048: 2689 reg_rctl |= E1000_RCTL_SZ_2048; 2690 break; 2691 case EM_RXBUFFER_4096: 2692 reg_rctl |= E1000_RCTL_SZ_4096 | E1000_RCTL_BSEX | E1000_RCTL_LPE; 2693 break; 2694 case EM_RXBUFFER_8192: 2695 reg_rctl |= E1000_RCTL_SZ_8192 | E1000_RCTL_BSEX | E1000_RCTL_LPE; 2696 break; 2697 case EM_RXBUFFER_16384: 2698 reg_rctl |= E1000_RCTL_SZ_16384 | E1000_RCTL_BSEX | E1000_RCTL_LPE; 2699 break; 2700 } 2701 2702 if (ifp->if_mtu > ETHERMTU) 2703 reg_rctl |= E1000_RCTL_LPE; 2704 2705 /* Enable 82543 Receive Checksum Offload for TCP and UDP */ 2706 if ((adapter->hw.mac_type >= em_82543) && 2707 (ifp->if_capenable & IFCAP_RXCSUM)) { 2708 reg_rxcsum = E1000_READ_REG(&adapter->hw, RXCSUM); 2709 reg_rxcsum |= (E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL); 2710 E1000_WRITE_REG(&adapter->hw, RXCSUM, reg_rxcsum); 2711 } 2712 2713 /* Enable Receives */ 2714 E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl); 2715 2716 return; 2717} 2718 2719/********************************************************************* 2720 * 2721 * Free receive related data structures. 2722 * 2723 **********************************************************************/ 2724static void 2725em_free_receive_structures(struct adapter *adapter) 2726{ 2727 struct em_buffer *rx_buffer; 2728 int i; 2729 2730 INIT_DEBUGOUT("free_receive_structures: begin"); 2731 2732 if (adapter->rx_buffer_area != NULL) { 2733 rx_buffer = adapter->rx_buffer_area; 2734 for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) { 2735 if (rx_buffer->map != NULL) { 2736 bus_dmamap_unload(adapter->rxtag, rx_buffer->map); 2737 bus_dmamap_destroy(adapter->rxtag, rx_buffer->map); 2738 } 2739 if (rx_buffer->m_head != NULL) 2740 m_freem(rx_buffer->m_head); 2741 rx_buffer->m_head = NULL; 2742 } 2743 } 2744 if (adapter->rx_buffer_area != NULL) { 2745 free(adapter->rx_buffer_area, M_DEVBUF); 2746 adapter->rx_buffer_area = NULL; 2747 } 2748 if (adapter->rxtag != NULL) { 2749 bus_dma_tag_destroy(adapter->rxtag); 2750 adapter->rxtag = NULL; 2751 } 2752 return; 2753} 2754 2755/********************************************************************* 2756 * 2757 * This routine executes in interrupt context. It replenishes 2758 * the mbufs in the descriptor and sends data which has been 2759 * dma'ed into host memory to upper layer. 2760 * 2761 * We loop at most count times if count is > 0, or until done if 2762 * count < 0. 2763 * 2764 *********************************************************************/ 2765static void 2766em_process_receive_interrupts(struct adapter * adapter, int count) 2767{ 2768 struct ifnet *ifp; 2769 struct mbuf *mp; 2770 u_int8_t accept_frame = 0; 2771 u_int8_t eop = 0; 2772 u_int16_t len, desc_len, prev_len_adj; 2773 int i; 2774 2775 /* Pointer to the receive descriptor being examined. */ 2776 struct em_rx_desc *current_desc; 2777 2778 mtx_assert(&adapter->mtx, MA_OWNED); 2779 2780 ifp = adapter->ifp; 2781 i = adapter->next_rx_desc_to_check; 2782 current_desc = &adapter->rx_desc_base[i]; 2783 bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map, 2784 BUS_DMASYNC_POSTREAD); 2785 2786 if (!((current_desc->status) & E1000_RXD_STAT_DD)) { 2787 return; 2788 } 2789 2790 while ((current_desc->status & E1000_RXD_STAT_DD) && 2791 (count != 0) && 2792 (ifp->if_drv_flags & IFF_DRV_RUNNING)) { 2793 struct mbuf *m = NULL; 2794 2795 mp = adapter->rx_buffer_area[i].m_head; 2796 bus_dmamap_sync(adapter->rxtag, adapter->rx_buffer_area[i].map, 2797 BUS_DMASYNC_POSTREAD); 2798 2799 accept_frame = 1; 2800 prev_len_adj = 0; 2801 desc_len = le16toh(current_desc->length); 2802 if (current_desc->status & E1000_RXD_STAT_EOP) { 2803 count--; 2804 eop = 1; 2805 if (desc_len < ETHER_CRC_LEN) { 2806 len = 0; 2807 prev_len_adj = ETHER_CRC_LEN - desc_len; 2808 } 2809 else { 2810 len = desc_len - ETHER_CRC_LEN; 2811 } 2812 } else { 2813 eop = 0; 2814 len = desc_len; 2815 } 2816 2817 if (current_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK) { 2818 u_int8_t last_byte; 2819 u_int32_t pkt_len = desc_len; 2820 2821 if (adapter->fmp != NULL) 2822 pkt_len += adapter->fmp->m_pkthdr.len; 2823 2824 last_byte = *(mtod(mp, caddr_t) + desc_len - 1); 2825 2826 if (TBI_ACCEPT(&adapter->hw, current_desc->status, 2827 current_desc->errors, 2828 pkt_len, last_byte)) { 2829 em_tbi_adjust_stats(&adapter->hw, 2830 &adapter->stats, 2831 pkt_len, 2832 adapter->hw.mac_addr); 2833 if (len > 0) len--; 2834 } 2835 else { 2836 accept_frame = 0; 2837 } 2838 } 2839 2840 if (accept_frame) { 2841 2842 if (em_get_buf(i, adapter, NULL) == ENOBUFS) { 2843 adapter->dropped_pkts++; 2844 em_get_buf(i, adapter, mp); 2845 if (adapter->fmp != NULL) 2846 m_freem(adapter->fmp); 2847 adapter->fmp = NULL; 2848 adapter->lmp = NULL; 2849 break; 2850 } 2851 2852 /* Assign correct length to the current fragment */ 2853 mp->m_len = len; 2854 2855 if (adapter->fmp == NULL) { 2856 mp->m_pkthdr.len = len; 2857 adapter->fmp = mp; /* Store the first mbuf */ 2858 adapter->lmp = mp; 2859 } else { 2860 /* Chain mbuf's together */ 2861 mp->m_flags &= ~M_PKTHDR; 2862 /* 2863 * Adjust length of previous mbuf in chain if we 2864 * received less than 4 bytes in the last descriptor. 2865 */ 2866 if (prev_len_adj > 0) { 2867 adapter->lmp->m_len -= prev_len_adj; 2868 adapter->fmp->m_pkthdr.len -= prev_len_adj; 2869 } 2870 adapter->lmp->m_next = mp; 2871 adapter->lmp = adapter->lmp->m_next; 2872 adapter->fmp->m_pkthdr.len += len; 2873 } 2874 2875 if (eop) { 2876 adapter->fmp->m_pkthdr.rcvif = ifp; 2877 ifp->if_ipackets++; 2878 em_receive_checksum(adapter, current_desc, 2879 adapter->fmp); 2880 if (current_desc->status & E1000_RXD_STAT_VP) 2881 VLAN_INPUT_TAG(ifp, adapter->fmp, 2882 (le16toh(current_desc->special) & 2883 E1000_RXD_SPC_VLAN_MASK), 2884 adapter->fmp = NULL); 2885 2886 m = adapter->fmp; 2887 adapter->fmp = NULL; 2888 adapter->lmp = NULL; 2889 } 2890 } else { 2891 adapter->dropped_pkts++; 2892 em_get_buf(i, adapter, mp); 2893 if (adapter->fmp != NULL) 2894 m_freem(adapter->fmp); 2895 adapter->fmp = NULL; 2896 adapter->lmp = NULL; 2897 } 2898 2899 /* Zero out the receive descriptors status */ 2900 current_desc->status = 0; 2901 bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map, 2902 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2903 2904 /* Advance the E1000's Receive Queue #0 "Tail Pointer". */ 2905 E1000_WRITE_REG(&adapter->hw, RDT, i); 2906 2907 /* Advance our pointers to the next descriptor */ 2908 if (++i == adapter->num_rx_desc) 2909 i = 0; 2910 if (m != NULL) { 2911 adapter->next_rx_desc_to_check = i; 2912 EM_UNLOCK(adapter); 2913 (*ifp->if_input)(ifp, m); 2914 EM_LOCK(adapter); 2915 i = adapter->next_rx_desc_to_check; 2916 } 2917 current_desc = &adapter->rx_desc_base[i]; 2918 } 2919 adapter->next_rx_desc_to_check = i; 2920 return; 2921} 2922 2923/********************************************************************* 2924 * 2925 * Verify that the hardware indicated that the checksum is valid. 2926 * Inform the stack about the status of checksum so that stack 2927 * doesn't spend time verifying the checksum. 2928 * 2929 *********************************************************************/ 2930static void 2931em_receive_checksum(struct adapter *adapter, 2932 struct em_rx_desc *rx_desc, 2933 struct mbuf *mp) 2934{ 2935 /* 82543 or newer only */ 2936 if ((adapter->hw.mac_type < em_82543) || 2937 /* Ignore Checksum bit is set */ 2938 (rx_desc->status & E1000_RXD_STAT_IXSM)) { 2939 mp->m_pkthdr.csum_flags = 0; 2940 return; 2941 } 2942 2943 if (rx_desc->status & E1000_RXD_STAT_IPCS) { 2944 /* Did it pass? */ 2945 if (!(rx_desc->errors & E1000_RXD_ERR_IPE)) { 2946 /* IP Checksum Good */ 2947 mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED; 2948 mp->m_pkthdr.csum_flags |= CSUM_IP_VALID; 2949 2950 } else { 2951 mp->m_pkthdr.csum_flags = 0; 2952 } 2953 } 2954 2955 if (rx_desc->status & E1000_RXD_STAT_TCPCS) { 2956 /* Did it pass? */ 2957 if (!(rx_desc->errors & E1000_RXD_ERR_TCPE)) { 2958 mp->m_pkthdr.csum_flags |= 2959 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR); 2960 mp->m_pkthdr.csum_data = htons(0xffff); 2961 } 2962 } 2963 2964 return; 2965} 2966 2967 2968static void 2969em_enable_vlans(struct adapter *adapter) 2970{ 2971 uint32_t ctrl; 2972 2973 E1000_WRITE_REG(&adapter->hw, VET, ETHERTYPE_VLAN); 2974 2975 ctrl = E1000_READ_REG(&adapter->hw, CTRL); 2976 ctrl |= E1000_CTRL_VME; 2977 E1000_WRITE_REG(&adapter->hw, CTRL, ctrl); 2978 2979 return; 2980} 2981 2982static void 2983em_disable_vlans(struct adapter *adapter) 2984{ 2985 uint32_t ctrl; 2986 2987 ctrl = E1000_READ_REG(&adapter->hw, CTRL); 2988 ctrl &= ~E1000_CTRL_VME; 2989 E1000_WRITE_REG(&adapter->hw, CTRL, ctrl); 2990 2991 return; 2992} 2993 2994static void 2995em_enable_intr(struct adapter * adapter) 2996{ 2997 E1000_WRITE_REG(&adapter->hw, IMS, (IMS_ENABLE_MASK)); 2998 return; 2999} 3000 3001static void 3002em_disable_intr(struct adapter *adapter) 3003{ 3004 /* 3005 * The first version of 82542 had an errata where when link was forced it 3006 * would stay up even up even if the cable was disconnected. Sequence errors 3007 * were used to detect the disconnect and then the driver would unforce the link. 3008 * This code in the in the ISR. For this to work correctly the Sequence error 3009 * interrupt had to be enabled all the time. 3010 */ 3011 3012 if (adapter->hw.mac_type == em_82542_rev2_0) 3013 E1000_WRITE_REG(&adapter->hw, IMC, 3014 (0xffffffff & ~E1000_IMC_RXSEQ)); 3015 else 3016 E1000_WRITE_REG(&adapter->hw, IMC, 3017 0xffffffff); 3018 return; 3019} 3020 3021static int 3022em_is_valid_ether_addr(u_int8_t *addr) 3023{ 3024 char zero_addr[6] = { 0, 0, 0, 0, 0, 0 }; 3025 3026 if ((addr[0] & 1) || (!bcmp(addr, zero_addr, ETHER_ADDR_LEN))) { 3027 return (FALSE); 3028 } 3029 3030 return(TRUE); 3031} 3032 3033void 3034em_write_pci_cfg(struct em_hw *hw, 3035 uint32_t reg, 3036 uint16_t *value) 3037{ 3038 pci_write_config(((struct em_osdep *)hw->back)->dev, reg, 3039 *value, 2); 3040} 3041 3042void 3043em_read_pci_cfg(struct em_hw *hw, uint32_t reg, 3044 uint16_t *value) 3045{ 3046 *value = pci_read_config(((struct em_osdep *)hw->back)->dev, 3047 reg, 2); 3048 return; 3049} 3050 3051void 3052em_pci_set_mwi(struct em_hw *hw) 3053{ 3054 pci_write_config(((struct em_osdep *)hw->back)->dev, 3055 PCIR_COMMAND, 3056 (hw->pci_cmd_word | CMD_MEM_WRT_INVALIDATE), 2); 3057 return; 3058} 3059 3060void 3061em_pci_clear_mwi(struct em_hw *hw) 3062{ 3063 pci_write_config(((struct em_osdep *)hw->back)->dev, 3064 PCIR_COMMAND, 3065 (hw->pci_cmd_word & ~CMD_MEM_WRT_INVALIDATE), 2); 3066 return; 3067} 3068 3069/********************************************************************* 3070* 82544 Coexistence issue workaround. 3071* There are 2 issues. 3072* 1. Transmit Hang issue. 3073* To detect this issue, following equation can be used... 3074* SIZE[3:0] + ADDR[2:0] = SUM[3:0]. 3075* If SUM[3:0] is in between 1 to 4, we will have this issue. 3076* 3077* 2. DAC issue. 3078* To detect this issue, following equation can be used... 3079* SIZE[3:0] + ADDR[2:0] = SUM[3:0]. 3080* If SUM[3:0] is in between 9 to c, we will have this issue. 3081* 3082* 3083* WORKAROUND: 3084* Make sure we do not have ending address as 1,2,3,4(Hang) or 9,a,b,c (DAC) 3085* 3086*** *********************************************************************/ 3087static u_int32_t 3088em_fill_descriptors (bus_addr_t address, 3089 u_int32_t length, 3090 PDESC_ARRAY desc_array) 3091{ 3092 /* Since issue is sensitive to length and address.*/ 3093 /* Let us first check the address...*/ 3094 u_int32_t safe_terminator; 3095 if (length <= 4) { 3096 desc_array->descriptor[0].address = address; 3097 desc_array->descriptor[0].length = length; 3098 desc_array->elements = 1; 3099 return desc_array->elements; 3100 } 3101 safe_terminator = (u_int32_t)((((u_int32_t)address & 0x7) + (length & 0xF)) & 0xF); 3102 /* if it does not fall between 0x1 to 0x4 and 0x9 to 0xC then return */ 3103 if (safe_terminator == 0 || 3104 (safe_terminator > 4 && 3105 safe_terminator < 9) || 3106 (safe_terminator > 0xC && 3107 safe_terminator <= 0xF)) { 3108 desc_array->descriptor[0].address = address; 3109 desc_array->descriptor[0].length = length; 3110 desc_array->elements = 1; 3111 return desc_array->elements; 3112 } 3113 3114 desc_array->descriptor[0].address = address; 3115 desc_array->descriptor[0].length = length - 4; 3116 desc_array->descriptor[1].address = address + (length - 4); 3117 desc_array->descriptor[1].length = 4; 3118 desc_array->elements = 2; 3119 return desc_array->elements; 3120} 3121 3122/********************************************************************** 3123 * 3124 * Update the board statistics counters. 3125 * 3126 **********************************************************************/ 3127static void 3128em_update_stats_counters(struct adapter *adapter) 3129{ 3130 struct ifnet *ifp; 3131 3132 if(adapter->hw.media_type == em_media_type_copper || 3133 (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU)) { 3134 adapter->stats.symerrs += E1000_READ_REG(&adapter->hw, SYMERRS); 3135 adapter->stats.sec += E1000_READ_REG(&adapter->hw, SEC); 3136 } 3137 adapter->stats.crcerrs += E1000_READ_REG(&adapter->hw, CRCERRS); 3138 adapter->stats.mpc += E1000_READ_REG(&adapter->hw, MPC); 3139 adapter->stats.scc += E1000_READ_REG(&adapter->hw, SCC); 3140 adapter->stats.ecol += E1000_READ_REG(&adapter->hw, ECOL); 3141 3142 adapter->stats.mcc += E1000_READ_REG(&adapter->hw, MCC); 3143 adapter->stats.latecol += E1000_READ_REG(&adapter->hw, LATECOL); 3144 adapter->stats.colc += E1000_READ_REG(&adapter->hw, COLC); 3145 adapter->stats.dc += E1000_READ_REG(&adapter->hw, DC); 3146 adapter->stats.rlec += E1000_READ_REG(&adapter->hw, RLEC); 3147 adapter->stats.xonrxc += E1000_READ_REG(&adapter->hw, XONRXC); 3148 adapter->stats.xontxc += E1000_READ_REG(&adapter->hw, XONTXC); 3149 adapter->stats.xoffrxc += E1000_READ_REG(&adapter->hw, XOFFRXC); 3150 adapter->stats.xofftxc += E1000_READ_REG(&adapter->hw, XOFFTXC); 3151 adapter->stats.fcruc += E1000_READ_REG(&adapter->hw, FCRUC); 3152 adapter->stats.prc64 += E1000_READ_REG(&adapter->hw, PRC64); 3153 adapter->stats.prc127 += E1000_READ_REG(&adapter->hw, PRC127); 3154 adapter->stats.prc255 += E1000_READ_REG(&adapter->hw, PRC255); 3155 adapter->stats.prc511 += E1000_READ_REG(&adapter->hw, PRC511); 3156 adapter->stats.prc1023 += E1000_READ_REG(&adapter->hw, PRC1023); 3157 adapter->stats.prc1522 += E1000_READ_REG(&adapter->hw, PRC1522); 3158 adapter->stats.gprc += E1000_READ_REG(&adapter->hw, GPRC); 3159 adapter->stats.bprc += E1000_READ_REG(&adapter->hw, BPRC); 3160 adapter->stats.mprc += E1000_READ_REG(&adapter->hw, MPRC); 3161 adapter->stats.gptc += E1000_READ_REG(&adapter->hw, GPTC); 3162 3163 /* For the 64-bit byte counters the low dword must be read first. */ 3164 /* Both registers clear on the read of the high dword */ 3165 3166 adapter->stats.gorcl += E1000_READ_REG(&adapter->hw, GORCL); 3167 adapter->stats.gorch += E1000_READ_REG(&adapter->hw, GORCH); 3168 adapter->stats.gotcl += E1000_READ_REG(&adapter->hw, GOTCL); 3169 adapter->stats.gotch += E1000_READ_REG(&adapter->hw, GOTCH); 3170 3171 adapter->stats.rnbc += E1000_READ_REG(&adapter->hw, RNBC); 3172 adapter->stats.ruc += E1000_READ_REG(&adapter->hw, RUC); 3173 adapter->stats.rfc += E1000_READ_REG(&adapter->hw, RFC); 3174 adapter->stats.roc += E1000_READ_REG(&adapter->hw, ROC); 3175 adapter->stats.rjc += E1000_READ_REG(&adapter->hw, RJC); 3176 3177 adapter->stats.torl += E1000_READ_REG(&adapter->hw, TORL); 3178 adapter->stats.torh += E1000_READ_REG(&adapter->hw, TORH); 3179 adapter->stats.totl += E1000_READ_REG(&adapter->hw, TOTL); 3180 adapter->stats.toth += E1000_READ_REG(&adapter->hw, TOTH); 3181 3182 adapter->stats.tpr += E1000_READ_REG(&adapter->hw, TPR); 3183 adapter->stats.tpt += E1000_READ_REG(&adapter->hw, TPT); 3184 adapter->stats.ptc64 += E1000_READ_REG(&adapter->hw, PTC64); 3185 adapter->stats.ptc127 += E1000_READ_REG(&adapter->hw, PTC127); 3186 adapter->stats.ptc255 += E1000_READ_REG(&adapter->hw, PTC255); 3187 adapter->stats.ptc511 += E1000_READ_REG(&adapter->hw, PTC511); 3188 adapter->stats.ptc1023 += E1000_READ_REG(&adapter->hw, PTC1023); 3189 adapter->stats.ptc1522 += E1000_READ_REG(&adapter->hw, PTC1522); 3190 adapter->stats.mptc += E1000_READ_REG(&adapter->hw, MPTC); 3191 adapter->stats.bptc += E1000_READ_REG(&adapter->hw, BPTC); 3192 3193 if (adapter->hw.mac_type >= em_82543) { 3194 adapter->stats.algnerrc += 3195 E1000_READ_REG(&adapter->hw, ALGNERRC); 3196 adapter->stats.rxerrc += 3197 E1000_READ_REG(&adapter->hw, RXERRC); 3198 adapter->stats.tncrs += 3199 E1000_READ_REG(&adapter->hw, TNCRS); 3200 adapter->stats.cexterr += 3201 E1000_READ_REG(&adapter->hw, CEXTERR); 3202 adapter->stats.tsctc += 3203 E1000_READ_REG(&adapter->hw, TSCTC); 3204 adapter->stats.tsctfc += 3205 E1000_READ_REG(&adapter->hw, TSCTFC); 3206 } 3207 ifp = adapter->ifp; 3208 3209 ifp->if_collisions = adapter->stats.colc; 3210 3211 /* Rx Errors */ 3212 ifp->if_ierrors = 3213 adapter->dropped_pkts + 3214 adapter->stats.rxerrc + 3215 adapter->stats.crcerrs + 3216 adapter->stats.algnerrc + 3217 adapter->stats.rlec + 3218 adapter->stats.mpc + adapter->stats.cexterr; 3219 3220 /* Tx Errors */ 3221 ifp->if_oerrors = adapter->stats.ecol + adapter->stats.latecol + 3222 adapter->watchdog_events; 3223 3224} 3225 3226 3227/********************************************************************** 3228 * 3229 * This routine is called only when em_display_debug_stats is enabled. 3230 * This routine provides a way to take a look at important statistics 3231 * maintained by the driver and hardware. 3232 * 3233 **********************************************************************/ 3234static void 3235em_print_debug_info(struct adapter *adapter) 3236{ 3237 int unit = adapter->unit; 3238 uint8_t *hw_addr = adapter->hw.hw_addr; 3239 3240 printf("em%d: Adapter hardware address = %p \n", unit, hw_addr); 3241 printf("em%d:CTRL = 0x%x\n", unit, 3242 E1000_READ_REG(&adapter->hw, CTRL)); 3243 printf("em%d:RCTL = 0x%x PS=(0x8402)\n", unit, 3244 E1000_READ_REG(&adapter->hw, RCTL)); 3245 printf("em%d:tx_int_delay = %d, tx_abs_int_delay = %d\n", unit, 3246 E1000_READ_REG(&adapter->hw, TIDV), 3247 E1000_READ_REG(&adapter->hw, TADV)); 3248 printf("em%d:rx_int_delay = %d, rx_abs_int_delay = %d\n", unit, 3249 E1000_READ_REG(&adapter->hw, RDTR), 3250 E1000_READ_REG(&adapter->hw, RADV)); 3251 printf("em%d: fifo workaround = %lld, fifo_reset = %lld\n", unit, 3252 (long long)adapter->tx_fifo_wrk_cnt, 3253 (long long)adapter->tx_fifo_reset_cnt); 3254 printf("em%d: hw tdh = %d, hw tdt = %d\n", unit, 3255 E1000_READ_REG(&adapter->hw, TDH), 3256 E1000_READ_REG(&adapter->hw, TDT)); 3257 printf("em%d: Num Tx descriptors avail = %d\n", unit, 3258 adapter->num_tx_desc_avail); 3259 printf("em%d: Tx Descriptors not avail1 = %ld\n", unit, 3260 adapter->no_tx_desc_avail1); 3261 printf("em%d: Tx Descriptors not avail2 = %ld\n", unit, 3262 adapter->no_tx_desc_avail2); 3263 printf("em%d: Std mbuf failed = %ld\n", unit, 3264 adapter->mbuf_alloc_failed); 3265 printf("em%d: Std mbuf cluster failed = %ld\n", unit, 3266 adapter->mbuf_cluster_failed); 3267 printf("em%d: Driver dropped packets = %ld\n", unit, 3268 adapter->dropped_pkts); 3269 3270 return; 3271} 3272 3273static void 3274em_print_hw_stats(struct adapter *adapter) 3275{ 3276 int unit = adapter->unit; 3277 3278 printf("em%d: Excessive collisions = %lld\n", unit, 3279 (long long)adapter->stats.ecol); 3280 printf("em%d: Symbol errors = %lld\n", unit, 3281 (long long)adapter->stats.symerrs); 3282 printf("em%d: Sequence errors = %lld\n", unit, 3283 (long long)adapter->stats.sec); 3284 printf("em%d: Defer count = %lld\n", unit, 3285 (long long)adapter->stats.dc); 3286 3287 printf("em%d: Missed Packets = %lld\n", unit, 3288 (long long)adapter->stats.mpc); 3289 printf("em%d: Receive No Buffers = %lld\n", unit, 3290 (long long)adapter->stats.rnbc); 3291 printf("em%d: Receive length errors = %lld\n", unit, 3292 (long long)adapter->stats.rlec); 3293 printf("em%d: Receive errors = %lld\n", unit, 3294 (long long)adapter->stats.rxerrc); 3295 printf("em%d: Crc errors = %lld\n", unit, 3296 (long long)adapter->stats.crcerrs); 3297 printf("em%d: Alignment errors = %lld\n", unit, 3298 (long long)adapter->stats.algnerrc); 3299 printf("em%d: Carrier extension errors = %lld\n", unit, 3300 (long long)adapter->stats.cexterr); 3301 printf("em%d: RX overruns = %ld\n", unit, adapter->rx_overruns); 3302 printf("em%d: watchdog timeouts = %ld\n", unit, 3303 adapter->watchdog_events); 3304 3305 printf("em%d: XON Rcvd = %lld\n", unit, 3306 (long long)adapter->stats.xonrxc); 3307 printf("em%d: XON Xmtd = %lld\n", unit, 3308 (long long)adapter->stats.xontxc); 3309 printf("em%d: XOFF Rcvd = %lld\n", unit, 3310 (long long)adapter->stats.xoffrxc); 3311 printf("em%d: XOFF Xmtd = %lld\n", unit, 3312 (long long)adapter->stats.xofftxc); 3313 3314 printf("em%d: Good Packets Rcvd = %lld\n", unit, 3315 (long long)adapter->stats.gprc); 3316 printf("em%d: Good Packets Xmtd = %lld\n", unit, 3317 (long long)adapter->stats.gptc); 3318 3319 return; 3320} 3321 3322static int 3323em_sysctl_debug_info(SYSCTL_HANDLER_ARGS) 3324{ 3325 int error; 3326 int result; 3327 struct adapter *adapter; 3328 3329 result = -1; 3330 error = sysctl_handle_int(oidp, &result, 0, req); 3331 3332 if (error || !req->newptr) 3333 return (error); 3334 3335 if (result == 1) { 3336 adapter = (struct adapter *)arg1; 3337 em_print_debug_info(adapter); 3338 } 3339 3340 return error; 3341} 3342 3343 3344static int 3345em_sysctl_stats(SYSCTL_HANDLER_ARGS) 3346{ 3347 int error; 3348 int result; 3349 struct adapter *adapter; 3350 3351 result = -1; 3352 error = sysctl_handle_int(oidp, &result, 0, req); 3353 3354 if (error || !req->newptr) 3355 return (error); 3356 3357 if (result == 1) { 3358 adapter = (struct adapter *)arg1; 3359 em_print_hw_stats(adapter); 3360 } 3361 3362 return error; 3363} 3364 3365static int 3366em_sysctl_int_delay(SYSCTL_HANDLER_ARGS) 3367{ 3368 struct em_int_delay_info *info; 3369 struct adapter *adapter; 3370 u_int32_t regval; 3371 int error; 3372 int usecs; 3373 int ticks; 3374 3375 info = (struct em_int_delay_info *)arg1; 3376 usecs = info->value; 3377 error = sysctl_handle_int(oidp, &usecs, 0, req); 3378 if (error != 0 || req->newptr == NULL) 3379 return error; 3380 if (usecs < 0 || usecs > E1000_TICKS_TO_USECS(65535)) 3381 return EINVAL; 3382 info->value = usecs; 3383 ticks = E1000_USECS_TO_TICKS(usecs); 3384 3385 adapter = info->adapter; 3386 3387 EM_LOCK(adapter); 3388 regval = E1000_READ_OFFSET(&adapter->hw, info->offset); 3389 regval = (regval & ~0xffff) | (ticks & 0xffff); 3390 /* Handle a few special cases. */ 3391 switch (info->offset) { 3392 case E1000_RDTR: 3393 case E1000_82542_RDTR: 3394 regval |= E1000_RDT_FPDB; 3395 break; 3396 case E1000_TIDV: 3397 case E1000_82542_TIDV: 3398 if (ticks == 0) { 3399 adapter->txd_cmd &= ~E1000_TXD_CMD_IDE; 3400 /* Don't write 0 into the TIDV register. */ 3401 regval++; 3402 } else 3403 adapter->txd_cmd |= E1000_TXD_CMD_IDE; 3404 break; 3405 } 3406 E1000_WRITE_OFFSET(&adapter->hw, info->offset, regval); 3407 EM_UNLOCK(adapter); 3408 return 0; 3409} 3410 3411static void 3412em_add_int_delay_sysctl(struct adapter *adapter, const char *name, 3413 const char *description, struct em_int_delay_info *info, 3414 int offset, int value) 3415{ 3416 info->adapter = adapter; 3417 info->offset = offset; 3418 info->value = value; 3419 SYSCTL_ADD_PROC(device_get_sysctl_ctx(adapter->dev), 3420 SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)), 3421 OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW, 3422 info, 0, em_sysctl_int_delay, "I", description); 3423} 3424