if_em.c revision 151494
1/************************************************************************** 2 3Copyright (c) 2001-2005, Intel Corporation 4All rights reserved. 5 6Redistribution and use in source and binary forms, with or without 7modification, are permitted provided that the following conditions are met: 8 9 1. Redistributions of source code must retain the above copyright notice, 10 this list of conditions and the following disclaimer. 11 12 2. Redistributions in binary form must reproduce the above copyright 13 notice, this list of conditions and the following disclaimer in the 14 documentation and/or other materials provided with the distribution. 15 16 3. Neither the name of the Intel Corporation nor the names of its 17 contributors may be used to endorse or promote products derived from 18 this software without specific prior written permission. 19 20THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 24LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30POSSIBILITY OF SUCH DAMAGE. 31 32***************************************************************************/ 33 34/*$FreeBSD: head/sys/dev/em/if_em.c 151494 2005-10-20 08:46:43Z glebius $*/ 35 36#ifdef HAVE_KERNEL_OPTION_HEADERS 37#include "opt_device_polling.h" 38#endif 39 40#include <dev/em/if_em.h> 41 42/********************************************************************* 43 * Set this to one to display debug statistics 44 *********************************************************************/ 45int em_display_debug_stats = 0; 46 47/********************************************************************* 48 * Linked list of board private structures for all NICs found 49 *********************************************************************/ 50 51struct adapter *em_adapter_list = NULL; 52 53 54/********************************************************************* 55 * Driver version 56 *********************************************************************/ 57 58char em_driver_version[] = "2.1.7"; 59 60 61/********************************************************************* 62 * PCI Device ID Table 63 * 64 * Used by probe to select devices to load on 65 * Last field stores an index into em_strings 66 * Last entry must be all 0s 67 * 68 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index } 69 *********************************************************************/ 70 71static em_vendor_info_t em_vendor_info_array[] = 72{ 73 /* Intel(R) PRO/1000 Network Connection */ 74 { 0x8086, E1000_DEV_ID_82540EM, PCI_ANY_ID, PCI_ANY_ID, 0}, 75 { 0x8086, E1000_DEV_ID_82540EM_LOM, PCI_ANY_ID, PCI_ANY_ID, 0}, 76 { 0x8086, E1000_DEV_ID_82540EP, PCI_ANY_ID, PCI_ANY_ID, 0}, 77 { 0x8086, E1000_DEV_ID_82540EP_LOM, PCI_ANY_ID, PCI_ANY_ID, 0}, 78 { 0x8086, E1000_DEV_ID_82540EP_LP, PCI_ANY_ID, PCI_ANY_ID, 0}, 79 80 { 0x8086, E1000_DEV_ID_82541EI, PCI_ANY_ID, PCI_ANY_ID, 0}, 81 { 0x8086, E1000_DEV_ID_82541ER, PCI_ANY_ID, PCI_ANY_ID, 0}, 82 { 0x8086, E1000_DEV_ID_82541ER_LOM, PCI_ANY_ID, PCI_ANY_ID, 0}, 83 { 0x8086, E1000_DEV_ID_82541EI_MOBILE, PCI_ANY_ID, PCI_ANY_ID, 0}, 84 { 0x8086, E1000_DEV_ID_82541GI, PCI_ANY_ID, PCI_ANY_ID, 0}, 85 { 0x8086, E1000_DEV_ID_82541GI_LF, PCI_ANY_ID, PCI_ANY_ID, 0}, 86 { 0x8086, E1000_DEV_ID_82541GI_MOBILE, PCI_ANY_ID, PCI_ANY_ID, 0}, 87 88 { 0x8086, E1000_DEV_ID_82542, PCI_ANY_ID, PCI_ANY_ID, 0}, 89 90 { 0x8086, E1000_DEV_ID_82543GC_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0}, 91 { 0x8086, E1000_DEV_ID_82543GC_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0}, 92 93 { 0x8086, E1000_DEV_ID_82544EI_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0}, 94 { 0x8086, E1000_DEV_ID_82544EI_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0}, 95 { 0x8086, E1000_DEV_ID_82544GC_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0}, 96 { 0x8086, E1000_DEV_ID_82544GC_LOM, PCI_ANY_ID, PCI_ANY_ID, 0}, 97 98 { 0x8086, E1000_DEV_ID_82545EM_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0}, 99 { 0x8086, E1000_DEV_ID_82545EM_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0}, 100 { 0x8086, E1000_DEV_ID_82545GM_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0}, 101 { 0x8086, E1000_DEV_ID_82545GM_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0}, 102 { 0x8086, E1000_DEV_ID_82545GM_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0}, 103 104 { 0x8086, E1000_DEV_ID_82546EB_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0}, 105 { 0x8086, E1000_DEV_ID_82546EB_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0}, 106 { 0x8086, E1000_DEV_ID_82546EB_QUAD_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0}, 107 { 0x8086, E1000_DEV_ID_82546GB_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0}, 108 { 0x8086, E1000_DEV_ID_82546GB_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0}, 109 { 0x8086, E1000_DEV_ID_82546GB_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0}, 110 { 0x8086, E1000_DEV_ID_82546GB_PCIE, PCI_ANY_ID, PCI_ANY_ID, 0}, 111 { 0x8086, E1000_DEV_ID_82546GB_QUAD_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0}, 112 113 { 0x8086, E1000_DEV_ID_82547EI, PCI_ANY_ID, PCI_ANY_ID, 0}, 114 { 0x8086, E1000_DEV_ID_82547EI_MOBILE, PCI_ANY_ID, PCI_ANY_ID, 0}, 115 { 0x8086, E1000_DEV_ID_82547GI, PCI_ANY_ID, PCI_ANY_ID, 0}, 116 117 { 0x8086, E1000_DEV_ID_82573E, PCI_ANY_ID, PCI_ANY_ID, 0}, 118 { 0x8086, E1000_DEV_ID_82573E_IAMT, PCI_ANY_ID, PCI_ANY_ID, 0}, 119 120 /* required last entry */ 121 { 0, 0, 0, 0, 0} 122}; 123 124/********************************************************************* 125 * Table of branding strings for all supported NICs. 126 *********************************************************************/ 127 128static char *em_strings[] = { 129 "Intel(R) PRO/1000 Network Connection" 130}; 131 132/********************************************************************* 133 * Function prototypes 134 *********************************************************************/ 135static int em_probe(device_t); 136static int em_attach(device_t); 137static int em_detach(device_t); 138static int em_shutdown(device_t); 139static void em_intr(void *); 140static void em_start(struct ifnet *); 141static int em_ioctl(struct ifnet *, u_long, caddr_t); 142static void em_watchdog(struct ifnet *); 143static void em_init(void *); 144static void em_init_locked(struct adapter *); 145static void em_stop(void *); 146static void em_media_status(struct ifnet *, struct ifmediareq *); 147static int em_media_change(struct ifnet *); 148static void em_identify_hardware(struct adapter *); 149static int em_allocate_pci_resources(struct adapter *); 150static void em_free_pci_resources(struct adapter *); 151static void em_local_timer(void *); 152static int em_hardware_init(struct adapter *); 153static void em_setup_interface(device_t, struct adapter *); 154static int em_setup_transmit_structures(struct adapter *); 155static void em_initialize_transmit_unit(struct adapter *); 156static int em_setup_receive_structures(struct adapter *); 157static void em_initialize_receive_unit(struct adapter *); 158static void em_enable_intr(struct adapter *); 159static void em_disable_intr(struct adapter *); 160static void em_free_transmit_structures(struct adapter *); 161static void em_free_receive_structures(struct adapter *); 162static void em_update_stats_counters(struct adapter *); 163static void em_clean_transmit_interrupts(struct adapter *); 164static int em_allocate_receive_structures(struct adapter *); 165static int em_allocate_transmit_structures(struct adapter *); 166static void em_process_receive_interrupts(struct adapter *, int); 167static void em_receive_checksum(struct adapter *, 168 struct em_rx_desc *, 169 struct mbuf *); 170static void em_transmit_checksum_setup(struct adapter *, 171 struct mbuf *, 172 u_int32_t *, 173 u_int32_t *); 174static void em_set_promisc(struct adapter *); 175static void em_disable_promisc(struct adapter *); 176static void em_set_multi(struct adapter *); 177static void em_print_hw_stats(struct adapter *); 178static void em_print_link_status(struct adapter *); 179static int em_get_buf(int i, struct adapter *, 180 struct mbuf *); 181static void em_enable_vlans(struct adapter *); 182static void em_disable_vlans(struct adapter *); 183static int em_encap(struct adapter *, struct mbuf **); 184static void em_smartspeed(struct adapter *); 185static int em_82547_fifo_workaround(struct adapter *, int); 186static void em_82547_update_fifo_head(struct adapter *, int); 187static int em_82547_tx_fifo_reset(struct adapter *); 188static void em_82547_move_tail(void *arg); 189static void em_82547_move_tail_locked(struct adapter *); 190static int em_dma_malloc(struct adapter *, bus_size_t, 191 struct em_dma_alloc *, int); 192static void em_dma_free(struct adapter *, struct em_dma_alloc *); 193static void em_print_debug_info(struct adapter *); 194static int em_is_valid_ether_addr(u_int8_t *); 195static int em_sysctl_stats(SYSCTL_HANDLER_ARGS); 196static int em_sysctl_debug_info(SYSCTL_HANDLER_ARGS); 197static u_int32_t em_fill_descriptors (u_int64_t address, 198 u_int32_t length, 199 PDESC_ARRAY desc_array); 200static int em_sysctl_int_delay(SYSCTL_HANDLER_ARGS); 201static void em_add_int_delay_sysctl(struct adapter *, const char *, 202 const char *, struct em_int_delay_info *, 203 int, int); 204#ifdef DEVICE_POLLING 205static poll_handler_t em_poll; 206#endif 207 208/********************************************************************* 209 * FreeBSD Device Interface Entry Points 210 *********************************************************************/ 211 212static device_method_t em_methods[] = { 213 /* Device interface */ 214 DEVMETHOD(device_probe, em_probe), 215 DEVMETHOD(device_attach, em_attach), 216 DEVMETHOD(device_detach, em_detach), 217 DEVMETHOD(device_shutdown, em_shutdown), 218 {0, 0} 219}; 220 221static driver_t em_driver = { 222 "em", em_methods, sizeof(struct adapter ), 223}; 224 225static devclass_t em_devclass; 226DRIVER_MODULE(em, pci, em_driver, em_devclass, 0, 0); 227MODULE_DEPEND(em, pci, 1, 1, 1); 228MODULE_DEPEND(em, ether, 1, 1, 1); 229 230/********************************************************************* 231 * Tunable default values. 232 *********************************************************************/ 233 234#define E1000_TICKS_TO_USECS(ticks) ((1024 * (ticks) + 500) / 1000) 235#define E1000_USECS_TO_TICKS(usecs) ((1000 * (usecs) + 512) / 1024) 236 237static int em_tx_int_delay_dflt = E1000_TICKS_TO_USECS(EM_TIDV); 238static int em_rx_int_delay_dflt = E1000_TICKS_TO_USECS(EM_RDTR); 239static int em_tx_abs_int_delay_dflt = E1000_TICKS_TO_USECS(EM_TADV); 240static int em_rx_abs_int_delay_dflt = E1000_TICKS_TO_USECS(EM_RADV); 241 242TUNABLE_INT("hw.em.tx_int_delay", &em_tx_int_delay_dflt); 243TUNABLE_INT("hw.em.rx_int_delay", &em_rx_int_delay_dflt); 244TUNABLE_INT("hw.em.tx_abs_int_delay", &em_tx_abs_int_delay_dflt); 245TUNABLE_INT("hw.em.rx_abs_int_delay", &em_rx_abs_int_delay_dflt); 246 247/********************************************************************* 248 * Device identification routine 249 * 250 * em_probe determines if the driver should be loaded on 251 * adapter based on PCI vendor/device id of the adapter. 252 * 253 * return BUS_PROBE_DEFAULT on success, positive on failure 254 *********************************************************************/ 255 256static int 257em_probe(device_t dev) 258{ 259 em_vendor_info_t *ent; 260 261 u_int16_t pci_vendor_id = 0; 262 u_int16_t pci_device_id = 0; 263 u_int16_t pci_subvendor_id = 0; 264 u_int16_t pci_subdevice_id = 0; 265 char adapter_name[60]; 266 267 INIT_DEBUGOUT("em_probe: begin"); 268 269 pci_vendor_id = pci_get_vendor(dev); 270 if (pci_vendor_id != EM_VENDOR_ID) 271 return(ENXIO); 272 273 pci_device_id = pci_get_device(dev); 274 pci_subvendor_id = pci_get_subvendor(dev); 275 pci_subdevice_id = pci_get_subdevice(dev); 276 277 ent = em_vendor_info_array; 278 while (ent->vendor_id != 0) { 279 if ((pci_vendor_id == ent->vendor_id) && 280 (pci_device_id == ent->device_id) && 281 282 ((pci_subvendor_id == ent->subvendor_id) || 283 (ent->subvendor_id == PCI_ANY_ID)) && 284 285 ((pci_subdevice_id == ent->subdevice_id) || 286 (ent->subdevice_id == PCI_ANY_ID))) { 287 sprintf(adapter_name, "%s, Version - %s", 288 em_strings[ent->index], 289 em_driver_version); 290 device_set_desc_copy(dev, adapter_name); 291 return(BUS_PROBE_DEFAULT); 292 } 293 ent++; 294 } 295 296 return(ENXIO); 297} 298 299/********************************************************************* 300 * Device initialization routine 301 * 302 * The attach entry point is called when the driver is being loaded. 303 * This routine identifies the type of hardware, allocates all resources 304 * and initializes the hardware. 305 * 306 * return 0 on success, positive on failure 307 *********************************************************************/ 308 309static int 310em_attach(device_t dev) 311{ 312 struct adapter * adapter; 313 int tsize, rsize; 314 int error = 0; 315 316 INIT_DEBUGOUT("em_attach: begin"); 317 318 /* Allocate, clear, and link in our adapter structure */ 319 if (!(adapter = device_get_softc(dev))) { 320 printf("em: adapter structure allocation failed\n"); 321 return(ENOMEM); 322 } 323 bzero(adapter, sizeof(struct adapter )); 324 adapter->dev = dev; 325 adapter->osdep.dev = dev; 326 adapter->unit = device_get_unit(dev); 327 EM_LOCK_INIT(adapter, device_get_nameunit(dev)); 328 329 if (em_adapter_list != NULL) 330 em_adapter_list->prev = adapter; 331 adapter->next = em_adapter_list; 332 em_adapter_list = adapter; 333 334 /* SYSCTL stuff */ 335 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 336 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 337 OID_AUTO, "debug_info", CTLTYPE_INT|CTLFLAG_RW, 338 (void *)adapter, 0, 339 em_sysctl_debug_info, "I", "Debug Information"); 340 341 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 342 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 343 OID_AUTO, "stats", CTLTYPE_INT|CTLFLAG_RW, 344 (void *)adapter, 0, 345 em_sysctl_stats, "I", "Statistics"); 346 347 callout_init(&adapter->timer, CALLOUT_MPSAFE); 348 callout_init(&adapter->tx_fifo_timer, CALLOUT_MPSAFE); 349 350 /* Determine hardware revision */ 351 em_identify_hardware(adapter); 352 353 /* Set up some sysctls for the tunable interrupt delays */ 354 em_add_int_delay_sysctl(adapter, "rx_int_delay", 355 "receive interrupt delay in usecs", &adapter->rx_int_delay, 356 E1000_REG_OFFSET(&adapter->hw, RDTR), em_rx_int_delay_dflt); 357 em_add_int_delay_sysctl(adapter, "tx_int_delay", 358 "transmit interrupt delay in usecs", &adapter->tx_int_delay, 359 E1000_REG_OFFSET(&adapter->hw, TIDV), em_tx_int_delay_dflt); 360 if (adapter->hw.mac_type >= em_82540) { 361 em_add_int_delay_sysctl(adapter, "rx_abs_int_delay", 362 "receive interrupt delay limit in usecs", 363 &adapter->rx_abs_int_delay, 364 E1000_REG_OFFSET(&adapter->hw, RADV), 365 em_rx_abs_int_delay_dflt); 366 em_add_int_delay_sysctl(adapter, "tx_abs_int_delay", 367 "transmit interrupt delay limit in usecs", 368 &adapter->tx_abs_int_delay, 369 E1000_REG_OFFSET(&adapter->hw, TADV), 370 em_tx_abs_int_delay_dflt); 371 } 372 373 /* Parameters (to be read from user) */ 374 adapter->num_tx_desc = EM_MAX_TXD; 375 adapter->num_rx_desc = EM_MAX_RXD; 376 adapter->hw.autoneg = DO_AUTO_NEG; 377 adapter->hw.wait_autoneg_complete = WAIT_FOR_AUTO_NEG_DEFAULT; 378 adapter->hw.autoneg_advertised = AUTONEG_ADV_DEFAULT; 379 adapter->hw.tbi_compatibility_en = TRUE; 380 adapter->rx_buffer_len = EM_RXBUFFER_2048; 381 382 /* 383 * These parameters control the automatic generation(Tx) and 384 * response(Rx) to Ethernet PAUSE frames. 385 */ 386 adapter->hw.fc_high_water = FC_DEFAULT_HI_THRESH; 387 adapter->hw.fc_low_water = FC_DEFAULT_LO_THRESH; 388 adapter->hw.fc_pause_time = FC_DEFAULT_TX_TIMER; 389 adapter->hw.fc_send_xon = TRUE; 390 adapter->hw.fc = em_fc_full; 391 392 adapter->hw.phy_init_script = 1; 393 adapter->hw.phy_reset_disable = FALSE; 394 395#ifndef EM_MASTER_SLAVE 396 adapter->hw.master_slave = em_ms_hw_default; 397#else 398 adapter->hw.master_slave = EM_MASTER_SLAVE; 399#endif 400 /* 401 * Set the max frame size assuming standard ethernet 402 * sized frames 403 */ 404 adapter->hw.max_frame_size = 405 ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN; 406 407 adapter->hw.min_frame_size = 408 MINIMUM_ETHERNET_PACKET_SIZE + ETHER_CRC_LEN; 409 410 /* 411 * This controls when hardware reports transmit completion 412 * status. 413 */ 414 adapter->hw.report_tx_early = 1; 415 416 417 if (em_allocate_pci_resources(adapter)) { 418 printf("em%d: Allocation of PCI resources failed\n", 419 adapter->unit); 420 error = ENXIO; 421 goto err_pci; 422 } 423 424 425 /* Initialize eeprom parameters */ 426 em_init_eeprom_params(&adapter->hw); 427 428 tsize = EM_ROUNDUP(adapter->num_tx_desc * 429 sizeof(struct em_tx_desc), 4096); 430 431 /* Allocate Transmit Descriptor ring */ 432 if (em_dma_malloc(adapter, tsize, &adapter->txdma, BUS_DMA_NOWAIT)) { 433 printf("em%d: Unable to allocate tx_desc memory\n", 434 adapter->unit); 435 error = ENOMEM; 436 goto err_tx_desc; 437 } 438 adapter->tx_desc_base = (struct em_tx_desc *) adapter->txdma.dma_vaddr; 439 440 rsize = EM_ROUNDUP(adapter->num_rx_desc * 441 sizeof(struct em_rx_desc), 4096); 442 443 /* Allocate Receive Descriptor ring */ 444 if (em_dma_malloc(adapter, rsize, &adapter->rxdma, BUS_DMA_NOWAIT)) { 445 printf("em%d: Unable to allocate rx_desc memory\n", 446 adapter->unit); 447 error = ENOMEM; 448 goto err_rx_desc; 449 } 450 adapter->rx_desc_base = (struct em_rx_desc *) adapter->rxdma.dma_vaddr; 451 452 /* Initialize the hardware */ 453 if (em_hardware_init(adapter)) { 454 printf("em%d: Unable to initialize the hardware\n", 455 adapter->unit); 456 error = EIO; 457 goto err_hw_init; 458 } 459 460 /* Copy the permanent MAC address out of the EEPROM */ 461 if (em_read_mac_addr(&adapter->hw) < 0) { 462 printf("em%d: EEPROM read error while reading mac address\n", 463 adapter->unit); 464 error = EIO; 465 goto err_mac_addr; 466 } 467 468 if (!em_is_valid_ether_addr(adapter->hw.mac_addr)) { 469 printf("em%d: Invalid mac address\n", adapter->unit); 470 error = EIO; 471 goto err_mac_addr; 472 } 473 474 /* Setup OS specific network interface */ 475 em_setup_interface(dev, adapter); 476 477 /* Initialize statistics */ 478 em_clear_hw_cntrs(&adapter->hw); 479 em_update_stats_counters(adapter); 480 adapter->hw.get_link_status = 1; 481 em_check_for_link(&adapter->hw); 482 483 /* Print the link status */ 484 if (adapter->link_active == 1) { 485 em_get_speed_and_duplex(&adapter->hw, &adapter->link_speed, 486 &adapter->link_duplex); 487 printf("em%d: Speed:%d Mbps Duplex:%s\n", 488 adapter->unit, 489 adapter->link_speed, 490 adapter->link_duplex == FULL_DUPLEX ? "Full" : "Half"); 491 } else 492 printf("em%d: Speed:N/A Duplex:N/A\n", adapter->unit); 493 494 /* Identify 82544 on PCIX */ 495 em_get_bus_info(&adapter->hw); 496 if(adapter->hw.bus_type == em_bus_type_pcix && 497 adapter->hw.mac_type == em_82544) { 498 adapter->pcix_82544 = TRUE; 499 } 500 else { 501 adapter->pcix_82544 = FALSE; 502 } 503 INIT_DEBUGOUT("em_attach: end"); 504 return(0); 505 506err_mac_addr: 507err_hw_init: 508 em_dma_free(adapter, &adapter->rxdma); 509err_rx_desc: 510 em_dma_free(adapter, &adapter->txdma); 511err_tx_desc: 512err_pci: 513 em_free_pci_resources(adapter); 514 return(error); 515 516} 517 518/********************************************************************* 519 * Device removal routine 520 * 521 * The detach entry point is called when the driver is being removed. 522 * This routine stops the adapter and deallocates all the resources 523 * that were allocated for driver operation. 524 * 525 * return 0 on success, positive on failure 526 *********************************************************************/ 527 528static int 529em_detach(device_t dev) 530{ 531 struct adapter * adapter = device_get_softc(dev); 532 struct ifnet *ifp = adapter->ifp; 533 534 INIT_DEBUGOUT("em_detach: begin"); 535 536#ifdef DEVICE_POLLING 537 if (ifp->if_capenable & IFCAP_POLLING) 538 ether_poll_deregister(ifp); 539#endif 540 541 EM_LOCK(adapter); 542 adapter->in_detach = 1; 543 em_stop(adapter); 544 em_phy_hw_reset(&adapter->hw); 545 EM_UNLOCK(adapter); 546 ether_ifdetach(adapter->ifp); 547 548 em_free_pci_resources(adapter); 549 bus_generic_detach(dev); 550 if_free(ifp); 551 552 /* Free Transmit Descriptor ring */ 553 if (adapter->tx_desc_base) { 554 em_dma_free(adapter, &adapter->txdma); 555 adapter->tx_desc_base = NULL; 556 } 557 558 /* Free Receive Descriptor ring */ 559 if (adapter->rx_desc_base) { 560 em_dma_free(adapter, &adapter->rxdma); 561 adapter->rx_desc_base = NULL; 562 } 563 564 /* Remove from the adapter list */ 565 if (em_adapter_list == adapter) 566 em_adapter_list = adapter->next; 567 if (adapter->next != NULL) 568 adapter->next->prev = adapter->prev; 569 if (adapter->prev != NULL) 570 adapter->prev->next = adapter->next; 571 572 EM_LOCK_DESTROY(adapter); 573 574 return(0); 575} 576 577/********************************************************************* 578 * 579 * Shutdown entry point 580 * 581 **********************************************************************/ 582 583static int 584em_shutdown(device_t dev) 585{ 586 struct adapter *adapter = device_get_softc(dev); 587 EM_LOCK(adapter); 588 em_stop(adapter); 589 EM_UNLOCK(adapter); 590 return(0); 591} 592 593 594/********************************************************************* 595 * Transmit entry point 596 * 597 * em_start is called by the stack to initiate a transmit. 598 * The driver will remain in this routine as long as there are 599 * packets to transmit and transmit resources are available. 600 * In case resources are not available stack is notified and 601 * the packet is requeued. 602 **********************************************************************/ 603 604static void 605em_start_locked(struct ifnet *ifp) 606{ 607 struct mbuf *m_head; 608 struct adapter *adapter = ifp->if_softc; 609 610 mtx_assert(&adapter->mtx, MA_OWNED); 611 612 if (!adapter->link_active) 613 return; 614 615 while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) { 616 617 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); 618 619 if (m_head == NULL) break; 620 621 /* 622 * em_encap() can modify our pointer, and or make it NULL on 623 * failure. In that event, we can't requeue. 624 */ 625 if (em_encap(adapter, &m_head)) { 626 if (m_head == NULL) 627 break; 628 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 629 IFQ_DRV_PREPEND(&ifp->if_snd, m_head); 630 break; 631 } 632 633 /* Send a copy of the frame to the BPF listener */ 634 BPF_MTAP(ifp, m_head); 635 636 /* Set timeout in case hardware has problems transmitting */ 637 ifp->if_timer = EM_TX_TIMEOUT; 638 639 } 640 return; 641} 642 643static void 644em_start(struct ifnet *ifp) 645{ 646 struct adapter *adapter = ifp->if_softc; 647 648 EM_LOCK(adapter); 649 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 650 em_start_locked(ifp); 651 EM_UNLOCK(adapter); 652 return; 653} 654 655/********************************************************************* 656 * Ioctl entry point 657 * 658 * em_ioctl is called when the user wants to configure the 659 * interface. 660 * 661 * return 0 on success, positive on failure 662 **********************************************************************/ 663 664static int 665em_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 666{ 667 int mask, reinit, error = 0; 668 struct ifreq *ifr = (struct ifreq *) data; 669 struct adapter * adapter = ifp->if_softc; 670 671 if (adapter->in_detach) return(error); 672 673 switch (command) { 674 case SIOCSIFADDR: 675 case SIOCGIFADDR: 676 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCxIFADDR (Get/Set Interface Addr)"); 677 ether_ioctl(ifp, command, data); 678 break; 679 case SIOCSIFMTU: 680 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFMTU (Set Interface MTU)"); 681 if (ifr->ifr_mtu > MAX_JUMBO_FRAME_SIZE - ETHER_HDR_LEN || \ 682 /* 82573 does not support jumbo frames */ 683 (adapter->hw.mac_type == em_82573 && ifr->ifr_mtu > ETHERMTU) ) { 684 error = EINVAL; 685 } else { 686 EM_LOCK(adapter); 687 ifp->if_mtu = ifr->ifr_mtu; 688 adapter->hw.max_frame_size = 689 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; 690 em_init_locked(adapter); 691 EM_UNLOCK(adapter); 692 } 693 break; 694 case SIOCSIFFLAGS: 695 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFFLAGS (Set Interface Flags)"); 696 EM_LOCK(adapter); 697 if (ifp->if_flags & IFF_UP) { 698 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { 699 em_init_locked(adapter); 700 } 701 702 em_disable_promisc(adapter); 703 em_set_promisc(adapter); 704 } else { 705 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 706 em_stop(adapter); 707 } 708 } 709 EM_UNLOCK(adapter); 710 break; 711 case SIOCADDMULTI: 712 case SIOCDELMULTI: 713 IOCTL_DEBUGOUT("ioctl rcv'd: SIOC(ADD|DEL)MULTI"); 714 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 715 EM_LOCK(adapter); 716 em_disable_intr(adapter); 717 em_set_multi(adapter); 718 if (adapter->hw.mac_type == em_82542_rev2_0) { 719 em_initialize_receive_unit(adapter); 720 } 721#ifdef DEVICE_POLLING 722 if (!(ifp->if_capenable & IFCAP_POLLING)) 723#endif 724 em_enable_intr(adapter); 725 EM_UNLOCK(adapter); 726 } 727 break; 728 case SIOCSIFMEDIA: 729 case SIOCGIFMEDIA: 730 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCxIFMEDIA (Get/Set Interface Media)"); 731 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command); 732 break; 733 case SIOCSIFCAP: 734 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFCAP (Set Capabilities)"); 735 reinit = 0; 736 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 737#ifdef DEVICE_POLLING 738 if (mask & IFCAP_POLLING) { 739 if (ifr->ifr_reqcap & IFCAP_POLLING) { 740 error = ether_poll_register(em_poll, ifp); 741 if (error) 742 return(error); 743 EM_LOCK(adapter); 744 em_disable_intr(adapter); 745 ifp->if_capenable |= IFCAP_POLLING; 746 EM_UNLOCK(adapter); 747 } else { 748 error = ether_poll_deregister(ifp); 749 /* Enable interrupt even in error case */ 750 EM_LOCK(adapter); 751 em_enable_intr(adapter); 752 ifp->if_capenable &= ~IFCAP_POLLING; 753 EM_UNLOCK(adapter); 754 } 755 } 756#endif 757 if (mask & IFCAP_HWCSUM) { 758 ifp->if_capenable ^= IFCAP_HWCSUM; 759 reinit = 1; 760 } 761 if (mask & IFCAP_VLAN_HWTAGGING) { 762 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 763 reinit = 1; 764 } 765 if (reinit && (ifp->if_drv_flags & IFF_DRV_RUNNING)) 766 em_init(adapter); 767 break; 768 default: 769 IOCTL_DEBUGOUT1("ioctl received: UNKNOWN (0x%x)", (int)command); 770 error = EINVAL; 771 } 772 773 return(error); 774} 775 776/********************************************************************* 777 * Watchdog entry point 778 * 779 * This routine is called whenever hardware quits transmitting. 780 * 781 **********************************************************************/ 782 783static void 784em_watchdog(struct ifnet *ifp) 785{ 786 struct adapter * adapter; 787 adapter = ifp->if_softc; 788 789 /* If we are in this routine because of pause frames, then 790 * don't reset the hardware. 791 */ 792 if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_TXOFF) { 793 ifp->if_timer = EM_TX_TIMEOUT; 794 return; 795 } 796 797 if (em_check_for_link(&adapter->hw)) 798 printf("em%d: watchdog timeout -- resetting\n", adapter->unit); 799 800 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 801 802 em_init(adapter); 803 804 ifp->if_oerrors++; 805 return; 806} 807 808/********************************************************************* 809 * Init entry point 810 * 811 * This routine is used in two ways. It is used by the stack as 812 * init entry point in network interface structure. It is also used 813 * by the driver as a hw/sw initialization routine to get to a 814 * consistent state. 815 * 816 * return 0 on success, positive on failure 817 **********************************************************************/ 818 819static void 820em_init_locked(struct adapter * adapter) 821{ 822 struct ifnet *ifp; 823 824 uint32_t pba; 825 ifp = adapter->ifp; 826 827 INIT_DEBUGOUT("em_init: begin"); 828 829 mtx_assert(&adapter->mtx, MA_OWNED); 830 831 em_stop(adapter); 832 833 /* Packet Buffer Allocation (PBA) 834 * Writing PBA sets the receive portion of the buffer 835 * the remainder is used for the transmit buffer. 836 * 837 * Devices before the 82547 had a Packet Buffer of 64K. 838 * Default allocation: PBA=48K for Rx, leaving 16K for Tx. 839 * After the 82547 the buffer was reduced to 40K. 840 * Default allocation: PBA=30K for Rx, leaving 10K for Tx. 841 * Note: default does not leave enough room for Jumbo Frame >10k. 842 */ 843 if(adapter->hw.mac_type < em_82547) { 844 /* Total FIFO is 64K */ 845 if(adapter->rx_buffer_len > EM_RXBUFFER_8192) 846 pba = E1000_PBA_40K; /* 40K for Rx, 24K for Tx */ 847 else 848 pba = E1000_PBA_48K; /* 48K for Rx, 16K for Tx */ 849 } else { 850 /* Total FIFO is 40K */ 851 if(adapter->hw.max_frame_size > EM_RXBUFFER_8192) { 852 pba = E1000_PBA_22K; /* 22K for Rx, 18K for Tx */ 853 } else { 854 pba = E1000_PBA_30K; /* 30K for Rx, 10K for Tx */ 855 } 856 adapter->tx_fifo_head = 0; 857 adapter->tx_head_addr = pba << EM_TX_HEAD_ADDR_SHIFT; 858 adapter->tx_fifo_size = (E1000_PBA_40K - pba) << EM_PBA_BYTES_SHIFT; 859 } 860 INIT_DEBUGOUT1("em_init: pba=%dK",pba); 861 E1000_WRITE_REG(&adapter->hw, PBA, pba); 862 863 /* Get the latest mac address, User can use a LAA */ 864 bcopy(IFP2ENADDR(adapter->ifp), adapter->hw.mac_addr, 865 ETHER_ADDR_LEN); 866 867 /* Initialize the hardware */ 868 if (em_hardware_init(adapter)) { 869 printf("em%d: Unable to initialize the hardware\n", 870 adapter->unit); 871 return; 872 } 873 874 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) 875 em_enable_vlans(adapter); 876 877 /* Prepare transmit descriptors and buffers */ 878 if (em_setup_transmit_structures(adapter)) { 879 printf("em%d: Could not setup transmit structures\n", 880 adapter->unit); 881 em_stop(adapter); 882 return; 883 } 884 em_initialize_transmit_unit(adapter); 885 886 /* Setup Multicast table */ 887 em_set_multi(adapter); 888 889 /* Prepare receive descriptors and buffers */ 890 if (em_setup_receive_structures(adapter)) { 891 printf("em%d: Could not setup receive structures\n", 892 adapter->unit); 893 em_stop(adapter); 894 return; 895 } 896 em_initialize_receive_unit(adapter); 897 898 /* Don't loose promiscuous settings */ 899 em_set_promisc(adapter); 900 901 ifp->if_drv_flags |= IFF_DRV_RUNNING; 902 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 903 904 if (adapter->hw.mac_type >= em_82543) { 905 if (ifp->if_capenable & IFCAP_TXCSUM) 906 ifp->if_hwassist = EM_CHECKSUM_FEATURES; 907 else 908 ifp->if_hwassist = 0; 909 } 910 911 callout_reset(&adapter->timer, hz, em_local_timer, adapter); 912 em_clear_hw_cntrs(&adapter->hw); 913#ifdef DEVICE_POLLING 914 /* 915 * Only enable interrupts if we are not polling, make sure 916 * they are off otherwise. 917 */ 918 if (ifp->if_capenable & IFCAP_POLLING) 919 em_disable_intr(adapter); 920 else 921#endif /* DEVICE_POLLING */ 922 em_enable_intr(adapter); 923 924 /* Don't reset the phy next time init gets called */ 925 adapter->hw.phy_reset_disable = TRUE; 926 927 return; 928} 929 930static void 931em_init(void *arg) 932{ 933 struct adapter * adapter = arg; 934 935 EM_LOCK(adapter); 936 em_init_locked(adapter); 937 EM_UNLOCK(adapter); 938 return; 939} 940 941 942#ifdef DEVICE_POLLING 943static void 944em_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count) 945{ 946 struct adapter *adapter = ifp->if_softc; 947 u_int32_t reg_icr; 948 949 mtx_assert(&adapter->mtx, MA_OWNED); 950 951 if (cmd == POLL_AND_CHECK_STATUS) { 952 reg_icr = E1000_READ_REG(&adapter->hw, ICR); 953 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { 954 callout_stop(&adapter->timer); 955 adapter->hw.get_link_status = 1; 956 em_check_for_link(&adapter->hw); 957 em_print_link_status(adapter); 958 callout_reset(&adapter->timer, hz, em_local_timer, adapter); 959 } 960 } 961 em_process_receive_interrupts(adapter, count); 962 em_clean_transmit_interrupts(adapter); 963 964 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 965 em_start_locked(ifp); 966} 967 968static void 969em_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) 970{ 971 struct adapter *adapter = ifp->if_softc; 972 973 EM_LOCK(adapter); 974 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 975 em_poll_locked(ifp, cmd, count); 976 EM_UNLOCK(adapter); 977} 978#endif /* DEVICE_POLLING */ 979 980/********************************************************************* 981 * 982 * Interrupt Service routine 983 * 984 **********************************************************************/ 985static void 986em_intr(void *arg) 987{ 988 struct adapter *adapter = arg; 989 struct ifnet *ifp; 990 uint32_t reg_icr; 991 int wantinit = 0; 992 993 EM_LOCK(adapter); 994 995 ifp = adapter->ifp; 996 997#ifdef DEVICE_POLLING 998 if (ifp->if_capenable & IFCAP_POLLING) { 999 EM_UNLOCK(adapter); 1000 return; 1001 } 1002#endif /* DEVICE_POLLING */ 1003 1004 for (;;) { 1005 reg_icr = E1000_READ_REG(&adapter->hw, ICR); 1006 if (reg_icr == 0) 1007 break; 1008 1009 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1010 em_process_receive_interrupts(adapter, -1); 1011 em_clean_transmit_interrupts(adapter); 1012 } 1013 1014 /* Link status change */ 1015 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { 1016 callout_stop(&adapter->timer); 1017 adapter->hw.get_link_status = 1; 1018 em_check_for_link(&adapter->hw); 1019 em_print_link_status(adapter); 1020 callout_reset(&adapter->timer, hz, em_local_timer, 1021 adapter); 1022 } 1023 1024 if (reg_icr & E1000_ICR_RXO) { 1025 log(LOG_WARNING, "%s: RX overrun\n", ifp->if_xname); 1026 wantinit = 1; 1027 } 1028 } 1029#if 0 1030 if (wantinit) 1031 em_init_locked(adapter); 1032#endif 1033 if (ifp->if_drv_flags & IFF_DRV_RUNNING && 1034 !IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1035 em_start_locked(ifp); 1036 1037 EM_UNLOCK(adapter); 1038 return; 1039} 1040 1041 1042 1043/********************************************************************* 1044 * 1045 * Media Ioctl callback 1046 * 1047 * This routine is called whenever the user queries the status of 1048 * the interface using ifconfig. 1049 * 1050 **********************************************************************/ 1051static void 1052em_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) 1053{ 1054 struct adapter * adapter = ifp->if_softc; 1055 1056 INIT_DEBUGOUT("em_media_status: begin"); 1057 1058 em_check_for_link(&adapter->hw); 1059 if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU) { 1060 if (adapter->link_active == 0) { 1061 em_get_speed_and_duplex(&adapter->hw, 1062 &adapter->link_speed, 1063 &adapter->link_duplex); 1064 adapter->link_active = 1; 1065 } 1066 } else { 1067 if (adapter->link_active == 1) { 1068 adapter->link_speed = 0; 1069 adapter->link_duplex = 0; 1070 adapter->link_active = 0; 1071 } 1072 } 1073 1074 ifmr->ifm_status = IFM_AVALID; 1075 ifmr->ifm_active = IFM_ETHER; 1076 1077 if (!adapter->link_active) 1078 return; 1079 1080 ifmr->ifm_status |= IFM_ACTIVE; 1081 1082 if (adapter->hw.media_type == em_media_type_fiber) { 1083 ifmr->ifm_active |= IFM_1000_SX | IFM_FDX; 1084 } else { 1085 switch (adapter->link_speed) { 1086 case 10: 1087 ifmr->ifm_active |= IFM_10_T; 1088 break; 1089 case 100: 1090 ifmr->ifm_active |= IFM_100_TX; 1091 break; 1092 case 1000: 1093 ifmr->ifm_active |= IFM_1000_T; 1094 break; 1095 } 1096 if (adapter->link_duplex == FULL_DUPLEX) 1097 ifmr->ifm_active |= IFM_FDX; 1098 else 1099 ifmr->ifm_active |= IFM_HDX; 1100 } 1101 return; 1102} 1103 1104/********************************************************************* 1105 * 1106 * Media Ioctl callback 1107 * 1108 * This routine is called when the user changes speed/duplex using 1109 * media/mediopt option with ifconfig. 1110 * 1111 **********************************************************************/ 1112static int 1113em_media_change(struct ifnet *ifp) 1114{ 1115 struct adapter * adapter = ifp->if_softc; 1116 struct ifmedia *ifm = &adapter->media; 1117 1118 INIT_DEBUGOUT("em_media_change: begin"); 1119 1120 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 1121 return(EINVAL); 1122 1123 switch (IFM_SUBTYPE(ifm->ifm_media)) { 1124 case IFM_AUTO: 1125 adapter->hw.autoneg = DO_AUTO_NEG; 1126 adapter->hw.autoneg_advertised = AUTONEG_ADV_DEFAULT; 1127 break; 1128 case IFM_1000_SX: 1129 case IFM_1000_T: 1130 adapter->hw.autoneg = DO_AUTO_NEG; 1131 adapter->hw.autoneg_advertised = ADVERTISE_1000_FULL; 1132 break; 1133 case IFM_100_TX: 1134 adapter->hw.autoneg = FALSE; 1135 adapter->hw.autoneg_advertised = 0; 1136 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) 1137 adapter->hw.forced_speed_duplex = em_100_full; 1138 else 1139 adapter->hw.forced_speed_duplex = em_100_half; 1140 break; 1141 case IFM_10_T: 1142 adapter->hw.autoneg = FALSE; 1143 adapter->hw.autoneg_advertised = 0; 1144 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) 1145 adapter->hw.forced_speed_duplex = em_10_full; 1146 else 1147 adapter->hw.forced_speed_duplex = em_10_half; 1148 break; 1149 default: 1150 printf("em%d: Unsupported media type\n", adapter->unit); 1151 } 1152 1153 /* As the speed/duplex settings my have changed we need to 1154 * reset the PHY. 1155 */ 1156 adapter->hw.phy_reset_disable = FALSE; 1157 1158 em_init(adapter); 1159 1160 return(0); 1161} 1162 1163/********************************************************************* 1164 * 1165 * This routine maps the mbufs to tx descriptors. 1166 * 1167 * return 0 on success, positive on failure 1168 **********************************************************************/ 1169static int 1170em_encap(struct adapter *adapter, struct mbuf **m_headp) 1171{ 1172 u_int32_t txd_upper; 1173 u_int32_t txd_lower, txd_used = 0, txd_saved = 0; 1174 int i, j, error; 1175 u_int64_t address; 1176 1177 struct mbuf *m_head; 1178 1179 /* For 82544 Workaround */ 1180 DESC_ARRAY desc_array; 1181 u_int32_t array_elements; 1182 u_int32_t counter; 1183 struct m_tag *mtag; 1184 bus_dma_segment_t segs[EM_MAX_SCATTER]; 1185 bus_dmamap_t map; 1186 int nsegs; 1187 struct em_buffer *tx_buffer = NULL; 1188 struct em_tx_desc *current_tx_desc = NULL; 1189 struct ifnet *ifp = adapter->ifp; 1190 1191 m_head = *m_headp; 1192 1193 /* 1194 * Force a cleanup if number of TX descriptors 1195 * available hits the threshold 1196 */ 1197 if (adapter->num_tx_desc_avail <= EM_TX_CLEANUP_THRESHOLD) { 1198 em_clean_transmit_interrupts(adapter); 1199 if (adapter->num_tx_desc_avail <= EM_TX_CLEANUP_THRESHOLD) { 1200 adapter->no_tx_desc_avail1++; 1201 return(ENOBUFS); 1202 } 1203 } 1204 1205 /* 1206 * Map the packet for DMA. 1207 */ 1208 if (bus_dmamap_create(adapter->txtag, BUS_DMA_NOWAIT, &map)) { 1209 adapter->no_tx_map_avail++; 1210 return (ENOMEM); 1211 } 1212 error = bus_dmamap_load_mbuf_sg(adapter->txtag, map, m_head, segs, 1213 &nsegs, BUS_DMA_NOWAIT); 1214 if (error != 0) { 1215 adapter->no_tx_dma_setup++; 1216 bus_dmamap_destroy(adapter->txtag, map); 1217 return (error); 1218 } 1219 KASSERT(nsegs != 0, ("em_encap: empty packet")); 1220 1221 if (nsegs > adapter->num_tx_desc_avail) { 1222 adapter->no_tx_desc_avail2++; 1223 bus_dmamap_destroy(adapter->txtag, map); 1224 return (ENOBUFS); 1225 } 1226 1227 1228 if (ifp->if_hwassist > 0) { 1229 em_transmit_checksum_setup(adapter, m_head, 1230 &txd_upper, &txd_lower); 1231 } else 1232 txd_upper = txd_lower = 0; 1233 1234 1235 /* Find out if we are in vlan mode */ 1236 mtag = VLAN_OUTPUT_TAG(ifp, m_head); 1237 1238 /* 1239 * When operating in promiscuous mode, hardware encapsulation for 1240 * packets is disabled. This means we have to add the vlan 1241 * encapsulation in the driver, since it will have come down from the 1242 * VLAN layer with a tag instead of a VLAN header. 1243 */ 1244 if (mtag != NULL && adapter->em_insert_vlan_header) { 1245 struct ether_vlan_header *evl; 1246 struct ether_header eh; 1247 1248 m_head = m_pullup(m_head, sizeof(eh)); 1249 if (m_head == NULL) { 1250 *m_headp = NULL; 1251 bus_dmamap_destroy(adapter->txtag, map); 1252 return (ENOBUFS); 1253 } 1254 eh = *mtod(m_head, struct ether_header *); 1255 M_PREPEND(m_head, sizeof(*evl), M_DONTWAIT); 1256 if (m_head == NULL) { 1257 *m_headp = NULL; 1258 bus_dmamap_destroy(adapter->txtag, map); 1259 return (ENOBUFS); 1260 } 1261 m_head = m_pullup(m_head, sizeof(*evl)); 1262 if (m_head == NULL) { 1263 *m_headp = NULL; 1264 bus_dmamap_destroy(adapter->txtag, map); 1265 return (ENOBUFS); 1266 } 1267 evl = mtod(m_head, struct ether_vlan_header *); 1268 bcopy(&eh, evl, sizeof(*evl)); 1269 evl->evl_proto = evl->evl_encap_proto; 1270 evl->evl_encap_proto = htons(ETHERTYPE_VLAN); 1271 evl->evl_tag = htons(VLAN_TAG_VALUE(mtag)); 1272 m_tag_delete(m_head, mtag); 1273 mtag = NULL; 1274 *m_headp = m_head; 1275 } 1276 1277 i = adapter->next_avail_tx_desc; 1278 if (adapter->pcix_82544) { 1279 txd_saved = i; 1280 txd_used = 0; 1281 } 1282 for (j = 0; j < nsegs; j++) { 1283 /* If adapter is 82544 and on PCIX bus */ 1284 if(adapter->pcix_82544) { 1285 array_elements = 0; 1286 address = htole64(segs[j].ds_addr); 1287 /* 1288 * Check the Address and Length combination and 1289 * split the data accordingly 1290 */ 1291 array_elements = em_fill_descriptors(address, 1292 htole32(segs[j].ds_len), 1293 &desc_array); 1294 for (counter = 0; counter < array_elements; counter++) { 1295 if (txd_used == adapter->num_tx_desc_avail) { 1296 adapter->next_avail_tx_desc = txd_saved; 1297 adapter->no_tx_desc_avail2++; 1298 bus_dmamap_destroy(adapter->txtag, map); 1299 return (ENOBUFS); 1300 } 1301 tx_buffer = &adapter->tx_buffer_area[i]; 1302 current_tx_desc = &adapter->tx_desc_base[i]; 1303 current_tx_desc->buffer_addr = htole64( 1304 desc_array.descriptor[counter].address); 1305 current_tx_desc->lower.data = htole32( 1306 (adapter->txd_cmd | txd_lower | 1307 (u_int16_t)desc_array.descriptor[counter].length)); 1308 current_tx_desc->upper.data = htole32((txd_upper)); 1309 if (++i == adapter->num_tx_desc) 1310 i = 0; 1311 1312 tx_buffer->m_head = NULL; 1313 txd_used++; 1314 } 1315 } else { 1316 tx_buffer = &adapter->tx_buffer_area[i]; 1317 current_tx_desc = &adapter->tx_desc_base[i]; 1318 1319 current_tx_desc->buffer_addr = htole64(segs[j].ds_addr); 1320 current_tx_desc->lower.data = htole32( 1321 adapter->txd_cmd | txd_lower | segs[j].ds_len); 1322 current_tx_desc->upper.data = htole32(txd_upper); 1323 1324 if (++i == adapter->num_tx_desc) 1325 i = 0; 1326 1327 tx_buffer->m_head = NULL; 1328 } 1329 } 1330 1331 adapter->next_avail_tx_desc = i; 1332 if (adapter->pcix_82544) { 1333 adapter->num_tx_desc_avail -= txd_used; 1334 } 1335 else { 1336 adapter->num_tx_desc_avail -= nsegs; 1337 } 1338 1339 if (mtag != NULL) { 1340 /* Set the vlan id */ 1341 current_tx_desc->upper.fields.special = htole16(VLAN_TAG_VALUE(mtag)); 1342 1343 /* Tell hardware to add tag */ 1344 current_tx_desc->lower.data |= htole32(E1000_TXD_CMD_VLE); 1345 } 1346 1347 tx_buffer->m_head = m_head; 1348 tx_buffer->map = map; 1349 bus_dmamap_sync(adapter->txtag, map, BUS_DMASYNC_PREWRITE); 1350 1351 /* 1352 * Last Descriptor of Packet needs End Of Packet (EOP) 1353 */ 1354 current_tx_desc->lower.data |= htole32(E1000_TXD_CMD_EOP); 1355 1356 /* 1357 * Advance the Transmit Descriptor Tail (Tdt), this tells the E1000 1358 * that this frame is available to transmit. 1359 */ 1360 bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map, 1361 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1362 if (adapter->hw.mac_type == em_82547 && 1363 adapter->link_duplex == HALF_DUPLEX) { 1364 em_82547_move_tail_locked(adapter); 1365 } else { 1366 E1000_WRITE_REG(&adapter->hw, TDT, i); 1367 if (adapter->hw.mac_type == em_82547) { 1368 em_82547_update_fifo_head(adapter, m_head->m_pkthdr.len); 1369 } 1370 } 1371 1372 return(0); 1373} 1374 1375/********************************************************************* 1376 * 1377 * 82547 workaround to avoid controller hang in half-duplex environment. 1378 * The workaround is to avoid queuing a large packet that would span 1379 * the internal Tx FIFO ring boundary. We need to reset the FIFO pointers 1380 * in this case. We do that only when FIFO is quiescent. 1381 * 1382 **********************************************************************/ 1383static void 1384em_82547_move_tail_locked(struct adapter *adapter) 1385{ 1386 uint16_t hw_tdt; 1387 uint16_t sw_tdt; 1388 struct em_tx_desc *tx_desc; 1389 uint16_t length = 0; 1390 boolean_t eop = 0; 1391 1392 EM_LOCK_ASSERT(adapter); 1393 1394 hw_tdt = E1000_READ_REG(&adapter->hw, TDT); 1395 sw_tdt = adapter->next_avail_tx_desc; 1396 1397 while (hw_tdt != sw_tdt) { 1398 tx_desc = &adapter->tx_desc_base[hw_tdt]; 1399 length += tx_desc->lower.flags.length; 1400 eop = tx_desc->lower.data & E1000_TXD_CMD_EOP; 1401 if(++hw_tdt == adapter->num_tx_desc) 1402 hw_tdt = 0; 1403 1404 if(eop) { 1405 if (em_82547_fifo_workaround(adapter, length)) { 1406 adapter->tx_fifo_wrk_cnt++; 1407 callout_reset(&adapter->tx_fifo_timer, 1, 1408 em_82547_move_tail, adapter); 1409 break; 1410 } 1411 E1000_WRITE_REG(&adapter->hw, TDT, hw_tdt); 1412 em_82547_update_fifo_head(adapter, length); 1413 length = 0; 1414 } 1415 } 1416 return; 1417} 1418 1419static void 1420em_82547_move_tail(void *arg) 1421{ 1422 struct adapter *adapter = arg; 1423 1424 EM_LOCK(adapter); 1425 em_82547_move_tail_locked(adapter); 1426 EM_UNLOCK(adapter); 1427} 1428 1429static int 1430em_82547_fifo_workaround(struct adapter *adapter, int len) 1431{ 1432 int fifo_space, fifo_pkt_len; 1433 1434 fifo_pkt_len = EM_ROUNDUP(len + EM_FIFO_HDR, EM_FIFO_HDR); 1435 1436 if (adapter->link_duplex == HALF_DUPLEX) { 1437 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head; 1438 1439 if (fifo_pkt_len >= (EM_82547_PKT_THRESH + fifo_space)) { 1440 if (em_82547_tx_fifo_reset(adapter)) { 1441 return(0); 1442 } 1443 else { 1444 return(1); 1445 } 1446 } 1447 } 1448 1449 return(0); 1450} 1451 1452static void 1453em_82547_update_fifo_head(struct adapter *adapter, int len) 1454{ 1455 int fifo_pkt_len = EM_ROUNDUP(len + EM_FIFO_HDR, EM_FIFO_HDR); 1456 1457 /* tx_fifo_head is always 16 byte aligned */ 1458 adapter->tx_fifo_head += fifo_pkt_len; 1459 if (adapter->tx_fifo_head >= adapter->tx_fifo_size) { 1460 adapter->tx_fifo_head -= adapter->tx_fifo_size; 1461 } 1462 1463 return; 1464} 1465 1466 1467static int 1468em_82547_tx_fifo_reset(struct adapter *adapter) 1469{ 1470 uint32_t tctl; 1471 1472 if ( (E1000_READ_REG(&adapter->hw, TDT) == 1473 E1000_READ_REG(&adapter->hw, TDH)) && 1474 (E1000_READ_REG(&adapter->hw, TDFT) == 1475 E1000_READ_REG(&adapter->hw, TDFH)) && 1476 (E1000_READ_REG(&adapter->hw, TDFTS) == 1477 E1000_READ_REG(&adapter->hw, TDFHS)) && 1478 (E1000_READ_REG(&adapter->hw, TDFPC) == 0)) { 1479 1480 /* Disable TX unit */ 1481 tctl = E1000_READ_REG(&adapter->hw, TCTL); 1482 E1000_WRITE_REG(&adapter->hw, TCTL, tctl & ~E1000_TCTL_EN); 1483 1484 /* Reset FIFO pointers */ 1485 E1000_WRITE_REG(&adapter->hw, TDFT, adapter->tx_head_addr); 1486 E1000_WRITE_REG(&adapter->hw, TDFH, adapter->tx_head_addr); 1487 E1000_WRITE_REG(&adapter->hw, TDFTS, adapter->tx_head_addr); 1488 E1000_WRITE_REG(&adapter->hw, TDFHS, adapter->tx_head_addr); 1489 1490 /* Re-enable TX unit */ 1491 E1000_WRITE_REG(&adapter->hw, TCTL, tctl); 1492 E1000_WRITE_FLUSH(&adapter->hw); 1493 1494 adapter->tx_fifo_head = 0; 1495 adapter->tx_fifo_reset_cnt++; 1496 1497 return(TRUE); 1498 } 1499 else { 1500 return(FALSE); 1501 } 1502} 1503 1504static void 1505em_set_promisc(struct adapter * adapter) 1506{ 1507 1508 u_int32_t reg_rctl; 1509 struct ifnet *ifp = adapter->ifp; 1510 1511 reg_rctl = E1000_READ_REG(&adapter->hw, RCTL); 1512 1513 if (ifp->if_flags & IFF_PROMISC) { 1514 reg_rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); 1515 E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl); 1516 /* Disable VLAN stripping in promiscous mode 1517 * This enables bridging of vlan tagged frames to occur 1518 * and also allows vlan tags to be seen in tcpdump 1519 */ 1520 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) 1521 em_disable_vlans(adapter); 1522 adapter->em_insert_vlan_header = 1; 1523 } else if (ifp->if_flags & IFF_ALLMULTI) { 1524 reg_rctl |= E1000_RCTL_MPE; 1525 reg_rctl &= ~E1000_RCTL_UPE; 1526 E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl); 1527 adapter->em_insert_vlan_header = 0; 1528 } else 1529 adapter->em_insert_vlan_header = 0; 1530 1531 return; 1532} 1533 1534static void 1535em_disable_promisc(struct adapter * adapter) 1536{ 1537 u_int32_t reg_rctl; 1538 struct ifnet *ifp = adapter->ifp; 1539 1540 reg_rctl = E1000_READ_REG(&adapter->hw, RCTL); 1541 1542 reg_rctl &= (~E1000_RCTL_UPE); 1543 reg_rctl &= (~E1000_RCTL_MPE); 1544 E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl); 1545 1546 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) 1547 em_enable_vlans(adapter); 1548 adapter->em_insert_vlan_header = 0; 1549 1550 return; 1551} 1552 1553 1554/********************************************************************* 1555 * Multicast Update 1556 * 1557 * This routine is called whenever multicast address list is updated. 1558 * 1559 **********************************************************************/ 1560 1561static void 1562em_set_multi(struct adapter * adapter) 1563{ 1564 u_int32_t reg_rctl = 0; 1565 u_int8_t mta[MAX_NUM_MULTICAST_ADDRESSES * ETH_LENGTH_OF_ADDRESS]; 1566 struct ifmultiaddr *ifma; 1567 int mcnt = 0; 1568 struct ifnet *ifp = adapter->ifp; 1569 1570 IOCTL_DEBUGOUT("em_set_multi: begin"); 1571 1572 if (adapter->hw.mac_type == em_82542_rev2_0) { 1573 reg_rctl = E1000_READ_REG(&adapter->hw, RCTL); 1574 if (adapter->hw.pci_cmd_word & CMD_MEM_WRT_INVALIDATE) { 1575 em_pci_clear_mwi(&adapter->hw); 1576 } 1577 reg_rctl |= E1000_RCTL_RST; 1578 E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl); 1579 msec_delay(5); 1580 } 1581 1582 IF_ADDR_LOCK(ifp); 1583 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1584 if (ifma->ifma_addr->sa_family != AF_LINK) 1585 continue; 1586 1587 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES) break; 1588 1589 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr), 1590 &mta[mcnt*ETH_LENGTH_OF_ADDRESS], ETH_LENGTH_OF_ADDRESS); 1591 mcnt++; 1592 } 1593 IF_ADDR_UNLOCK(ifp); 1594 1595 if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES) { 1596 reg_rctl = E1000_READ_REG(&adapter->hw, RCTL); 1597 reg_rctl |= E1000_RCTL_MPE; 1598 E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl); 1599 } else 1600 em_mc_addr_list_update(&adapter->hw, mta, mcnt, 0, 1); 1601 1602 if (adapter->hw.mac_type == em_82542_rev2_0) { 1603 reg_rctl = E1000_READ_REG(&adapter->hw, RCTL); 1604 reg_rctl &= ~E1000_RCTL_RST; 1605 E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl); 1606 msec_delay(5); 1607 if (adapter->hw.pci_cmd_word & CMD_MEM_WRT_INVALIDATE) { 1608 em_pci_set_mwi(&adapter->hw); 1609 } 1610 } 1611 1612 return; 1613} 1614 1615 1616/********************************************************************* 1617 * Timer routine 1618 * 1619 * This routine checks for link status and updates statistics. 1620 * 1621 **********************************************************************/ 1622 1623static void 1624em_local_timer(void *arg) 1625{ 1626 struct ifnet *ifp; 1627 struct adapter * adapter = arg; 1628 ifp = adapter->ifp; 1629 1630 EM_LOCK(adapter); 1631 1632 em_check_for_link(&adapter->hw); 1633 em_print_link_status(adapter); 1634 em_update_stats_counters(adapter); 1635 if (em_display_debug_stats && ifp->if_drv_flags & IFF_DRV_RUNNING) { 1636 em_print_hw_stats(adapter); 1637 } 1638 em_smartspeed(adapter); 1639 1640 callout_reset(&adapter->timer, hz, em_local_timer, adapter); 1641 1642 EM_UNLOCK(adapter); 1643 return; 1644} 1645 1646static void 1647em_print_link_status(struct adapter * adapter) 1648{ 1649 struct ifnet *ifp = adapter->ifp; 1650 1651 if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU) { 1652 if (adapter->link_active == 0) { 1653 em_get_speed_and_duplex(&adapter->hw, 1654 &adapter->link_speed, 1655 &adapter->link_duplex); 1656 if (bootverbose) 1657 printf("em%d: Link is up %d Mbps %s\n", 1658 adapter->unit, 1659 adapter->link_speed, 1660 ((adapter->link_duplex == FULL_DUPLEX) ? 1661 "Full Duplex" : "Half Duplex")); 1662 adapter->link_active = 1; 1663 adapter->smartspeed = 0; 1664 if_link_state_change(ifp, LINK_STATE_UP); 1665 } 1666 } else { 1667 if (adapter->link_active == 1) { 1668 adapter->link_speed = 0; 1669 adapter->link_duplex = 0; 1670 if (bootverbose) 1671 printf("em%d: Link is Down\n", adapter->unit); 1672 adapter->link_active = 0; 1673 if_link_state_change(ifp, LINK_STATE_DOWN); 1674 } 1675 } 1676 1677 return; 1678} 1679 1680/********************************************************************* 1681 * 1682 * This routine disables all traffic on the adapter by issuing a 1683 * global reset on the MAC and deallocates TX/RX buffers. 1684 * 1685 **********************************************************************/ 1686 1687static void 1688em_stop(void *arg) 1689{ 1690 struct ifnet *ifp; 1691 struct adapter * adapter = arg; 1692 ifp = adapter->ifp; 1693 1694 mtx_assert(&adapter->mtx, MA_OWNED); 1695 1696 INIT_DEBUGOUT("em_stop: begin"); 1697 1698 em_disable_intr(adapter); 1699 em_reset_hw(&adapter->hw); 1700 callout_stop(&adapter->timer); 1701 callout_stop(&adapter->tx_fifo_timer); 1702 em_free_transmit_structures(adapter); 1703 em_free_receive_structures(adapter); 1704 1705 1706 /* Tell the stack that the interface is no longer active */ 1707 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 1708 1709 return; 1710} 1711 1712 1713/********************************************************************* 1714 * 1715 * Determine hardware revision. 1716 * 1717 **********************************************************************/ 1718static void 1719em_identify_hardware(struct adapter * adapter) 1720{ 1721 device_t dev = adapter->dev; 1722 1723 /* Make sure our PCI config space has the necessary stuff set */ 1724 adapter->hw.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2); 1725 if (!((adapter->hw.pci_cmd_word & PCIM_CMD_BUSMASTEREN) && 1726 (adapter->hw.pci_cmd_word & PCIM_CMD_MEMEN))) { 1727 printf("em%d: Memory Access and/or Bus Master bits were not set!\n", 1728 adapter->unit); 1729 adapter->hw.pci_cmd_word |= 1730 (PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN); 1731 pci_write_config(dev, PCIR_COMMAND, adapter->hw.pci_cmd_word, 2); 1732 } 1733 1734 /* Save off the information about this board */ 1735 adapter->hw.vendor_id = pci_get_vendor(dev); 1736 adapter->hw.device_id = pci_get_device(dev); 1737 adapter->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1); 1738 adapter->hw.subsystem_vendor_id = pci_read_config(dev, PCIR_SUBVEND_0, 2); 1739 adapter->hw.subsystem_id = pci_read_config(dev, PCIR_SUBDEV_0, 2); 1740 1741 /* Identify the MAC */ 1742 if (em_set_mac_type(&adapter->hw)) 1743 printf("em%d: Unknown MAC Type\n", adapter->unit); 1744 1745 if(adapter->hw.mac_type == em_82541 || 1746 adapter->hw.mac_type == em_82541_rev_2 || 1747 adapter->hw.mac_type == em_82547 || 1748 adapter->hw.mac_type == em_82547_rev_2) 1749 adapter->hw.phy_init_script = TRUE; 1750 1751 return; 1752} 1753 1754static int 1755em_allocate_pci_resources(struct adapter * adapter) 1756{ 1757 int i, val, rid; 1758 device_t dev = adapter->dev; 1759 1760 rid = EM_MMBA; 1761 adapter->res_memory = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 1762 &rid, RF_ACTIVE); 1763 if (!(adapter->res_memory)) { 1764 printf("em%d: Unable to allocate bus resource: memory\n", 1765 adapter->unit); 1766 return(ENXIO); 1767 } 1768 adapter->osdep.mem_bus_space_tag = 1769 rman_get_bustag(adapter->res_memory); 1770 adapter->osdep.mem_bus_space_handle = 1771 rman_get_bushandle(adapter->res_memory); 1772 adapter->hw.hw_addr = (uint8_t *)&adapter->osdep.mem_bus_space_handle; 1773 1774 1775 if (adapter->hw.mac_type > em_82543) { 1776 /* Figure our where our IO BAR is ? */ 1777 rid = EM_MMBA; 1778 for (i = 0; i < 5; i++) { 1779 val = pci_read_config(dev, rid, 4); 1780 if (val & 0x00000001) { 1781 adapter->io_rid = rid; 1782 break; 1783 } 1784 rid += 4; 1785 } 1786 1787 adapter->res_ioport = bus_alloc_resource_any(dev, 1788 SYS_RES_IOPORT, 1789 &adapter->io_rid, 1790 RF_ACTIVE); 1791 if (!(adapter->res_ioport)) { 1792 printf("em%d: Unable to allocate bus resource: ioport\n", 1793 adapter->unit); 1794 return(ENXIO); 1795 } 1796 1797 adapter->hw.io_base = 1798 rman_get_start(adapter->res_ioport); 1799 } 1800 1801 rid = 0x0; 1802 adapter->res_interrupt = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 1803 RF_SHAREABLE | 1804 RF_ACTIVE); 1805 if (!(adapter->res_interrupt)) { 1806 printf("em%d: Unable to allocate bus resource: interrupt\n", 1807 adapter->unit); 1808 return(ENXIO); 1809 } 1810 if (bus_setup_intr(dev, adapter->res_interrupt, 1811 INTR_TYPE_NET | INTR_MPSAFE, 1812 (void (*)(void *)) em_intr, adapter, 1813 &adapter->int_handler_tag)) { 1814 printf("em%d: Error registering interrupt handler!\n", 1815 adapter->unit); 1816 return(ENXIO); 1817 } 1818 1819 adapter->hw.back = &adapter->osdep; 1820 1821 return(0); 1822} 1823 1824static void 1825em_free_pci_resources(struct adapter * adapter) 1826{ 1827 device_t dev = adapter->dev; 1828 1829 if (adapter->res_interrupt != NULL) { 1830 bus_teardown_intr(dev, adapter->res_interrupt, 1831 adapter->int_handler_tag); 1832 bus_release_resource(dev, SYS_RES_IRQ, 0, 1833 adapter->res_interrupt); 1834 } 1835 if (adapter->res_memory != NULL) { 1836 bus_release_resource(dev, SYS_RES_MEMORY, EM_MMBA, 1837 adapter->res_memory); 1838 } 1839 1840 if (adapter->res_ioport != NULL) { 1841 bus_release_resource(dev, SYS_RES_IOPORT, adapter->io_rid, 1842 adapter->res_ioport); 1843 } 1844 return; 1845} 1846 1847/********************************************************************* 1848 * 1849 * Initialize the hardware to a configuration as specified by the 1850 * adapter structure. The controller is reset, the EEPROM is 1851 * verified, the MAC address is set, then the shared initialization 1852 * routines are called. 1853 * 1854 **********************************************************************/ 1855static int 1856em_hardware_init(struct adapter * adapter) 1857{ 1858 INIT_DEBUGOUT("em_hardware_init: begin"); 1859 /* Issue a global reset */ 1860 em_reset_hw(&adapter->hw); 1861 1862 /* When hardware is reset, fifo_head is also reset */ 1863 adapter->tx_fifo_head = 0; 1864 1865 /* Make sure we have a good EEPROM before we read from it */ 1866 if (em_validate_eeprom_checksum(&adapter->hw) < 0) { 1867 printf("em%d: The EEPROM Checksum Is Not Valid\n", 1868 adapter->unit); 1869 return(EIO); 1870 } 1871 1872 if (em_read_part_num(&adapter->hw, &(adapter->part_num)) < 0) { 1873 printf("em%d: EEPROM read error while reading part number\n", 1874 adapter->unit); 1875 return(EIO); 1876 } 1877 1878 if (em_init_hw(&adapter->hw) < 0) { 1879 printf("em%d: Hardware Initialization Failed", 1880 adapter->unit); 1881 return(EIO); 1882 } 1883 1884 em_check_for_link(&adapter->hw); 1885 if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU) 1886 adapter->link_active = 1; 1887 else 1888 adapter->link_active = 0; 1889 1890 if (adapter->link_active) { 1891 em_get_speed_and_duplex(&adapter->hw, 1892 &adapter->link_speed, 1893 &adapter->link_duplex); 1894 } else { 1895 adapter->link_speed = 0; 1896 adapter->link_duplex = 0; 1897 } 1898 1899 return(0); 1900} 1901 1902/********************************************************************* 1903 * 1904 * Setup networking device structure and register an interface. 1905 * 1906 **********************************************************************/ 1907static void 1908em_setup_interface(device_t dev, struct adapter * adapter) 1909{ 1910 struct ifnet *ifp; 1911 INIT_DEBUGOUT("em_setup_interface: begin"); 1912 1913 ifp = adapter->ifp = if_alloc(IFT_ETHER); 1914 if (ifp == NULL) 1915 panic("%s: can not if_alloc()", device_get_nameunit(dev)); 1916 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 1917 ifp->if_mtu = ETHERMTU; 1918 ifp->if_baudrate = 1000000000; 1919 ifp->if_init = em_init; 1920 ifp->if_softc = adapter; 1921 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1922 ifp->if_ioctl = em_ioctl; 1923 ifp->if_start = em_start; 1924 ifp->if_watchdog = em_watchdog; 1925 IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 1); 1926 ifp->if_snd.ifq_drv_maxlen = adapter->num_tx_desc - 1; 1927 IFQ_SET_READY(&ifp->if_snd); 1928 1929 ether_ifattach(ifp, adapter->hw.mac_addr); 1930 1931 ifp->if_capabilities = ifp->if_capenable = 0; 1932 1933 if (adapter->hw.mac_type >= em_82543) { 1934 ifp->if_capabilities |= IFCAP_HWCSUM; 1935 ifp->if_capenable |= IFCAP_HWCSUM; 1936 } 1937 1938 /* 1939 * Tell the upper layer(s) we support long frames. 1940 */ 1941 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 1942 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU; 1943 ifp->if_capenable |= IFCAP_VLAN_MTU; 1944 1945#ifdef DEVICE_POLLING 1946 ifp->if_capabilities |= IFCAP_POLLING; 1947#endif 1948 1949 /* 1950 * Specify the media types supported by this adapter and register 1951 * callbacks to update media and link information 1952 */ 1953 ifmedia_init(&adapter->media, IFM_IMASK, em_media_change, 1954 em_media_status); 1955 if (adapter->hw.media_type == em_media_type_fiber) { 1956 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX | IFM_FDX, 1957 0, NULL); 1958 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX, 1959 0, NULL); 1960 } else { 1961 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T, 0, NULL); 1962 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T | IFM_FDX, 1963 0, NULL); 1964 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX, 1965 0, NULL); 1966 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX | IFM_FDX, 1967 0, NULL); 1968 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T | IFM_FDX, 1969 0, NULL); 1970 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL); 1971 } 1972 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL); 1973 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO); 1974 1975 return; 1976} 1977 1978 1979/********************************************************************* 1980 * 1981 * Workaround for SmartSpeed on 82541 and 82547 controllers 1982 * 1983 **********************************************************************/ 1984static void 1985em_smartspeed(struct adapter *adapter) 1986{ 1987 uint16_t phy_tmp; 1988 1989 if(adapter->link_active || (adapter->hw.phy_type != em_phy_igp) || 1990 !adapter->hw.autoneg || !(adapter->hw.autoneg_advertised & ADVERTISE_1000_FULL)) 1991 return; 1992 1993 if(adapter->smartspeed == 0) { 1994 /* If Master/Slave config fault is asserted twice, 1995 * we assume back-to-back */ 1996 em_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp); 1997 if(!(phy_tmp & SR_1000T_MS_CONFIG_FAULT)) return; 1998 em_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp); 1999 if(phy_tmp & SR_1000T_MS_CONFIG_FAULT) { 2000 em_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, 2001 &phy_tmp); 2002 if(phy_tmp & CR_1000T_MS_ENABLE) { 2003 phy_tmp &= ~CR_1000T_MS_ENABLE; 2004 em_write_phy_reg(&adapter->hw, 2005 PHY_1000T_CTRL, phy_tmp); 2006 adapter->smartspeed++; 2007 if(adapter->hw.autoneg && 2008 !em_phy_setup_autoneg(&adapter->hw) && 2009 !em_read_phy_reg(&adapter->hw, PHY_CTRL, 2010 &phy_tmp)) { 2011 phy_tmp |= (MII_CR_AUTO_NEG_EN | 2012 MII_CR_RESTART_AUTO_NEG); 2013 em_write_phy_reg(&adapter->hw, 2014 PHY_CTRL, phy_tmp); 2015 } 2016 } 2017 } 2018 return; 2019 } else if(adapter->smartspeed == EM_SMARTSPEED_DOWNSHIFT) { 2020 /* If still no link, perhaps using 2/3 pair cable */ 2021 em_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_tmp); 2022 phy_tmp |= CR_1000T_MS_ENABLE; 2023 em_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, phy_tmp); 2024 if(adapter->hw.autoneg && 2025 !em_phy_setup_autoneg(&adapter->hw) && 2026 !em_read_phy_reg(&adapter->hw, PHY_CTRL, &phy_tmp)) { 2027 phy_tmp |= (MII_CR_AUTO_NEG_EN | 2028 MII_CR_RESTART_AUTO_NEG); 2029 em_write_phy_reg(&adapter->hw, PHY_CTRL, phy_tmp); 2030 } 2031 } 2032 /* Restart process after EM_SMARTSPEED_MAX iterations */ 2033 if(adapter->smartspeed++ == EM_SMARTSPEED_MAX) 2034 adapter->smartspeed = 0; 2035 2036 return; 2037} 2038 2039 2040/* 2041 * Manage DMA'able memory. 2042 */ 2043static void 2044em_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) 2045{ 2046 if (error) 2047 return; 2048 *(bus_addr_t*) arg = segs->ds_addr; 2049 return; 2050} 2051 2052static int 2053em_dma_malloc(struct adapter *adapter, bus_size_t size, 2054 struct em_dma_alloc *dma, int mapflags) 2055{ 2056 int r; 2057 2058 r = bus_dma_tag_create(NULL, /* parent */ 2059 PAGE_SIZE, 0, /* alignment, bounds */ 2060 BUS_SPACE_MAXADDR, /* lowaddr */ 2061 BUS_SPACE_MAXADDR, /* highaddr */ 2062 NULL, NULL, /* filter, filterarg */ 2063 size, /* maxsize */ 2064 1, /* nsegments */ 2065 size, /* maxsegsize */ 2066 BUS_DMA_ALLOCNOW, /* flags */ 2067 NULL, /* lockfunc */ 2068 NULL, /* lockarg */ 2069 &dma->dma_tag); 2070 if (r != 0) { 2071 printf("em%d: em_dma_malloc: bus_dma_tag_create failed; " 2072 "error %u\n", adapter->unit, r); 2073 goto fail_0; 2074 } 2075 2076 r = bus_dmamem_alloc(dma->dma_tag, (void**) &dma->dma_vaddr, 2077 BUS_DMA_NOWAIT, &dma->dma_map); 2078 if (r != 0) { 2079 printf("em%d: em_dma_malloc: bus_dmammem_alloc failed; " 2080 "size %ju, error %d\n", adapter->unit, 2081 (uintmax_t)size, r); 2082 goto fail_2; 2083 } 2084 2085 r = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr, 2086 size, 2087 em_dmamap_cb, 2088 &dma->dma_paddr, 2089 mapflags | BUS_DMA_NOWAIT); 2090 if (r != 0) { 2091 printf("em%d: em_dma_malloc: bus_dmamap_load failed; " 2092 "error %u\n", adapter->unit, r); 2093 goto fail_3; 2094 } 2095 2096 dma->dma_size = size; 2097 return (0); 2098 2099fail_3: 2100 bus_dmamap_unload(dma->dma_tag, dma->dma_map); 2101fail_2: 2102 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map); 2103 bus_dma_tag_destroy(dma->dma_tag); 2104fail_0: 2105 dma->dma_map = NULL; 2106 dma->dma_tag = NULL; 2107 return (r); 2108} 2109 2110static void 2111em_dma_free(struct adapter *adapter, struct em_dma_alloc *dma) 2112{ 2113 bus_dmamap_unload(dma->dma_tag, dma->dma_map); 2114 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map); 2115 bus_dma_tag_destroy(dma->dma_tag); 2116} 2117 2118 2119/********************************************************************* 2120 * 2121 * Allocate memory for tx_buffer structures. The tx_buffer stores all 2122 * the information needed to transmit a packet on the wire. 2123 * 2124 **********************************************************************/ 2125static int 2126em_allocate_transmit_structures(struct adapter * adapter) 2127{ 2128 if (!(adapter->tx_buffer_area = 2129 (struct em_buffer *) malloc(sizeof(struct em_buffer) * 2130 adapter->num_tx_desc, M_DEVBUF, 2131 M_NOWAIT))) { 2132 printf("em%d: Unable to allocate tx_buffer memory\n", 2133 adapter->unit); 2134 return ENOMEM; 2135 } 2136 2137 bzero(adapter->tx_buffer_area, 2138 sizeof(struct em_buffer) * adapter->num_tx_desc); 2139 2140 return 0; 2141} 2142 2143/********************************************************************* 2144 * 2145 * Allocate and initialize transmit structures. 2146 * 2147 **********************************************************************/ 2148static int 2149em_setup_transmit_structures(struct adapter * adapter) 2150{ 2151 /* 2152 * Setup DMA descriptor areas. 2153 */ 2154 if (bus_dma_tag_create(NULL, /* parent */ 2155 1, 0, /* alignment, bounds */ 2156 BUS_SPACE_MAXADDR, /* lowaddr */ 2157 BUS_SPACE_MAXADDR, /* highaddr */ 2158 NULL, NULL, /* filter, filterarg */ 2159 MCLBYTES * 8, /* maxsize */ 2160 EM_MAX_SCATTER, /* nsegments */ 2161 MCLBYTES * 8, /* maxsegsize */ 2162 BUS_DMA_ALLOCNOW, /* flags */ 2163 NULL, /* lockfunc */ 2164 NULL, /* lockarg */ 2165 &adapter->txtag)) { 2166 printf("em%d: Unable to allocate TX DMA tag\n", adapter->unit); 2167 return (ENOMEM); 2168 } 2169 2170 if (em_allocate_transmit_structures(adapter)) 2171 return (ENOMEM); 2172 2173 bzero((void *) adapter->tx_desc_base, 2174 (sizeof(struct em_tx_desc)) * adapter->num_tx_desc); 2175 2176 adapter->next_avail_tx_desc = 0; 2177 adapter->oldest_used_tx_desc = 0; 2178 2179 /* Set number of descriptors available */ 2180 adapter->num_tx_desc_avail = adapter->num_tx_desc; 2181 2182 /* Set checksum context */ 2183 adapter->active_checksum_context = OFFLOAD_NONE; 2184 2185 return (0); 2186} 2187 2188/********************************************************************* 2189 * 2190 * Enable transmit unit. 2191 * 2192 **********************************************************************/ 2193static void 2194em_initialize_transmit_unit(struct adapter * adapter) 2195{ 2196 u_int32_t reg_tctl; 2197 u_int32_t reg_tipg = 0; 2198 u_int64_t bus_addr; 2199 2200 INIT_DEBUGOUT("em_initialize_transmit_unit: begin"); 2201 /* Setup the Base and Length of the Tx Descriptor Ring */ 2202 bus_addr = adapter->txdma.dma_paddr; 2203 E1000_WRITE_REG(&adapter->hw, TDBAL, (u_int32_t)bus_addr); 2204 E1000_WRITE_REG(&adapter->hw, TDBAH, (u_int32_t)(bus_addr >> 32)); 2205 E1000_WRITE_REG(&adapter->hw, TDLEN, 2206 adapter->num_tx_desc * 2207 sizeof(struct em_tx_desc)); 2208 2209 /* Setup the HW Tx Head and Tail descriptor pointers */ 2210 E1000_WRITE_REG(&adapter->hw, TDH, 0); 2211 E1000_WRITE_REG(&adapter->hw, TDT, 0); 2212 2213 2214 HW_DEBUGOUT2("Base = %x, Length = %x\n", 2215 E1000_READ_REG(&adapter->hw, TDBAL), 2216 E1000_READ_REG(&adapter->hw, TDLEN)); 2217 2218 /* Set the default values for the Tx Inter Packet Gap timer */ 2219 switch (adapter->hw.mac_type) { 2220 case em_82542_rev2_0: 2221 case em_82542_rev2_1: 2222 reg_tipg = DEFAULT_82542_TIPG_IPGT; 2223 reg_tipg |= DEFAULT_82542_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT; 2224 reg_tipg |= DEFAULT_82542_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT; 2225 break; 2226 default: 2227 if (adapter->hw.media_type == em_media_type_fiber) 2228 reg_tipg = DEFAULT_82543_TIPG_IPGT_FIBER; 2229 else 2230 reg_tipg = DEFAULT_82543_TIPG_IPGT_COPPER; 2231 reg_tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT; 2232 reg_tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT; 2233 } 2234 2235 E1000_WRITE_REG(&adapter->hw, TIPG, reg_tipg); 2236 E1000_WRITE_REG(&adapter->hw, TIDV, adapter->tx_int_delay.value); 2237 if(adapter->hw.mac_type >= em_82540) 2238 E1000_WRITE_REG(&adapter->hw, TADV, 2239 adapter->tx_abs_int_delay.value); 2240 2241 /* Program the Transmit Control Register */ 2242 reg_tctl = E1000_TCTL_PSP | E1000_TCTL_EN | 2243 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT); 2244 if (adapter->hw.mac_type >= em_82573) 2245 reg_tctl |= E1000_TCTL_MULR; 2246 if (adapter->link_duplex == 1) { 2247 reg_tctl |= E1000_FDX_COLLISION_DISTANCE << E1000_COLD_SHIFT; 2248 } else { 2249 reg_tctl |= E1000_HDX_COLLISION_DISTANCE << E1000_COLD_SHIFT; 2250 } 2251 E1000_WRITE_REG(&adapter->hw, TCTL, reg_tctl); 2252 2253 /* Setup Transmit Descriptor Settings for this adapter */ 2254 adapter->txd_cmd = E1000_TXD_CMD_IFCS | E1000_TXD_CMD_RS; 2255 2256 if (adapter->tx_int_delay.value > 0) 2257 adapter->txd_cmd |= E1000_TXD_CMD_IDE; 2258 2259 return; 2260} 2261 2262/********************************************************************* 2263 * 2264 * Free all transmit related data structures. 2265 * 2266 **********************************************************************/ 2267static void 2268em_free_transmit_structures(struct adapter * adapter) 2269{ 2270 struct em_buffer *tx_buffer; 2271 int i; 2272 2273 INIT_DEBUGOUT("free_transmit_structures: begin"); 2274 2275 if (adapter->tx_buffer_area != NULL) { 2276 tx_buffer = adapter->tx_buffer_area; 2277 for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) { 2278 if (tx_buffer->m_head != NULL) { 2279 bus_dmamap_unload(adapter->txtag, tx_buffer->map); 2280 bus_dmamap_destroy(adapter->txtag, tx_buffer->map); 2281 m_freem(tx_buffer->m_head); 2282 } 2283 tx_buffer->m_head = NULL; 2284 } 2285 } 2286 if (adapter->tx_buffer_area != NULL) { 2287 free(adapter->tx_buffer_area, M_DEVBUF); 2288 adapter->tx_buffer_area = NULL; 2289 } 2290 if (adapter->txtag != NULL) { 2291 bus_dma_tag_destroy(adapter->txtag); 2292 adapter->txtag = NULL; 2293 } 2294 return; 2295} 2296 2297/********************************************************************* 2298 * 2299 * The offload context needs to be set when we transfer the first 2300 * packet of a particular protocol (TCP/UDP). We change the 2301 * context only if the protocol type changes. 2302 * 2303 **********************************************************************/ 2304static void 2305em_transmit_checksum_setup(struct adapter * adapter, 2306 struct mbuf *mp, 2307 u_int32_t *txd_upper, 2308 u_int32_t *txd_lower) 2309{ 2310 struct em_context_desc *TXD; 2311 struct em_buffer *tx_buffer; 2312 int curr_txd; 2313 2314 if (mp->m_pkthdr.csum_flags) { 2315 2316 if (mp->m_pkthdr.csum_flags & CSUM_TCP) { 2317 *txd_upper = E1000_TXD_POPTS_TXSM << 8; 2318 *txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D; 2319 if (adapter->active_checksum_context == OFFLOAD_TCP_IP) 2320 return; 2321 else 2322 adapter->active_checksum_context = OFFLOAD_TCP_IP; 2323 2324 } else if (mp->m_pkthdr.csum_flags & CSUM_UDP) { 2325 *txd_upper = E1000_TXD_POPTS_TXSM << 8; 2326 *txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D; 2327 if (adapter->active_checksum_context == OFFLOAD_UDP_IP) 2328 return; 2329 else 2330 adapter->active_checksum_context = OFFLOAD_UDP_IP; 2331 } else { 2332 *txd_upper = 0; 2333 *txd_lower = 0; 2334 return; 2335 } 2336 } else { 2337 *txd_upper = 0; 2338 *txd_lower = 0; 2339 return; 2340 } 2341 2342 /* If we reach this point, the checksum offload context 2343 * needs to be reset. 2344 */ 2345 curr_txd = adapter->next_avail_tx_desc; 2346 tx_buffer = &adapter->tx_buffer_area[curr_txd]; 2347 TXD = (struct em_context_desc *) &adapter->tx_desc_base[curr_txd]; 2348 2349 TXD->lower_setup.ip_fields.ipcss = ETHER_HDR_LEN; 2350 TXD->lower_setup.ip_fields.ipcso = 2351 ETHER_HDR_LEN + offsetof(struct ip, ip_sum); 2352 TXD->lower_setup.ip_fields.ipcse = 2353 htole16(ETHER_HDR_LEN + sizeof(struct ip) - 1); 2354 2355 TXD->upper_setup.tcp_fields.tucss = 2356 ETHER_HDR_LEN + sizeof(struct ip); 2357 TXD->upper_setup.tcp_fields.tucse = htole16(0); 2358 2359 if (adapter->active_checksum_context == OFFLOAD_TCP_IP) { 2360 TXD->upper_setup.tcp_fields.tucso = 2361 ETHER_HDR_LEN + sizeof(struct ip) + 2362 offsetof(struct tcphdr, th_sum); 2363 } else if (adapter->active_checksum_context == OFFLOAD_UDP_IP) { 2364 TXD->upper_setup.tcp_fields.tucso = 2365 ETHER_HDR_LEN + sizeof(struct ip) + 2366 offsetof(struct udphdr, uh_sum); 2367 } 2368 2369 TXD->tcp_seg_setup.data = htole32(0); 2370 TXD->cmd_and_length = htole32(adapter->txd_cmd | E1000_TXD_CMD_DEXT); 2371 2372 tx_buffer->m_head = NULL; 2373 2374 if (++curr_txd == adapter->num_tx_desc) 2375 curr_txd = 0; 2376 2377 adapter->num_tx_desc_avail--; 2378 adapter->next_avail_tx_desc = curr_txd; 2379 2380 return; 2381} 2382 2383/********************************************************************** 2384 * 2385 * Examine each tx_buffer in the used queue. If the hardware is done 2386 * processing the packet then free associated resources. The 2387 * tx_buffer is put back on the free queue. 2388 * 2389 **********************************************************************/ 2390static void 2391em_clean_transmit_interrupts(struct adapter * adapter) 2392{ 2393 int i, num_avail; 2394 struct em_buffer *tx_buffer; 2395 struct em_tx_desc *tx_desc; 2396 struct ifnet *ifp = adapter->ifp; 2397 2398 mtx_assert(&adapter->mtx, MA_OWNED); 2399 2400 if (adapter->num_tx_desc_avail == adapter->num_tx_desc) 2401 return; 2402 2403 num_avail = adapter->num_tx_desc_avail; 2404 i = adapter->oldest_used_tx_desc; 2405 2406 tx_buffer = &adapter->tx_buffer_area[i]; 2407 tx_desc = &adapter->tx_desc_base[i]; 2408 2409 bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map, 2410 BUS_DMASYNC_POSTREAD); 2411 while (tx_desc->upper.fields.status & E1000_TXD_STAT_DD) { 2412 2413 tx_desc->upper.data = 0; 2414 num_avail++; 2415 2416 if (tx_buffer->m_head) { 2417 ifp->if_opackets++; 2418 bus_dmamap_unload(adapter->txtag, tx_buffer->map); 2419 bus_dmamap_destroy(adapter->txtag, tx_buffer->map); 2420 2421 m_freem(tx_buffer->m_head); 2422 tx_buffer->m_head = NULL; 2423 } 2424 2425 if (++i == adapter->num_tx_desc) 2426 i = 0; 2427 2428 tx_buffer = &adapter->tx_buffer_area[i]; 2429 tx_desc = &adapter->tx_desc_base[i]; 2430 } 2431 bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map, 2432 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2433 2434 adapter->oldest_used_tx_desc = i; 2435 2436 /* 2437 * If we have enough room, clear IFF_DRV_OACTIVE to tell the stack 2438 * that it is OK to send packets. 2439 * If there are no pending descriptors, clear the timeout. Otherwise, 2440 * if some descriptors have been freed, restart the timeout. 2441 */ 2442 if (num_avail > EM_TX_CLEANUP_THRESHOLD) { 2443 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 2444 if (num_avail == adapter->num_tx_desc) 2445 ifp->if_timer = 0; 2446 else if (num_avail == adapter->num_tx_desc_avail) 2447 ifp->if_timer = EM_TX_TIMEOUT; 2448 } 2449 adapter->num_tx_desc_avail = num_avail; 2450 return; 2451} 2452 2453/********************************************************************* 2454 * 2455 * Get a buffer from system mbuf buffer pool. 2456 * 2457 **********************************************************************/ 2458static int 2459em_get_buf(int i, struct adapter *adapter, 2460 struct mbuf *nmp) 2461{ 2462 register struct mbuf *mp = nmp; 2463 struct em_buffer *rx_buffer; 2464 struct ifnet *ifp; 2465 bus_addr_t paddr; 2466 int error; 2467 2468 ifp = adapter->ifp; 2469 2470 if (mp == NULL) { 2471 mp = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 2472 if (mp == NULL) { 2473 adapter->mbuf_cluster_failed++; 2474 return(ENOBUFS); 2475 } 2476 mp->m_len = mp->m_pkthdr.len = MCLBYTES; 2477 } else { 2478 mp->m_len = mp->m_pkthdr.len = MCLBYTES; 2479 mp->m_data = mp->m_ext.ext_buf; 2480 mp->m_next = NULL; 2481 } 2482 2483 if (ifp->if_mtu <= ETHERMTU) { 2484 m_adj(mp, ETHER_ALIGN); 2485 } 2486 2487 rx_buffer = &adapter->rx_buffer_area[i]; 2488 2489 /* 2490 * Using memory from the mbuf cluster pool, invoke the 2491 * bus_dma machinery to arrange the memory mapping. 2492 */ 2493 error = bus_dmamap_load(adapter->rxtag, rx_buffer->map, 2494 mtod(mp, void *), mp->m_len, 2495 em_dmamap_cb, &paddr, 0); 2496 if (error) { 2497 m_free(mp); 2498 return(error); 2499 } 2500 rx_buffer->m_head = mp; 2501 adapter->rx_desc_base[i].buffer_addr = htole64(paddr); 2502 bus_dmamap_sync(adapter->rxtag, rx_buffer->map, BUS_DMASYNC_PREREAD | 2503 BUS_DMASYNC_PREWRITE); 2504 2505 return(0); 2506} 2507 2508/********************************************************************* 2509 * 2510 * Allocate memory for rx_buffer structures. Since we use one 2511 * rx_buffer per received packet, the maximum number of rx_buffer's 2512 * that we'll need is equal to the number of receive descriptors 2513 * that we've allocated. 2514 * 2515 **********************************************************************/ 2516static int 2517em_allocate_receive_structures(struct adapter * adapter) 2518{ 2519 int i, error; 2520 struct em_buffer *rx_buffer; 2521 2522 if (!(adapter->rx_buffer_area = 2523 (struct em_buffer *) malloc(sizeof(struct em_buffer) * 2524 adapter->num_rx_desc, M_DEVBUF, 2525 M_NOWAIT))) { 2526 printf("em%d: Unable to allocate rx_buffer memory\n", 2527 adapter->unit); 2528 return(ENOMEM); 2529 } 2530 2531 bzero(adapter->rx_buffer_area, 2532 sizeof(struct em_buffer) * adapter->num_rx_desc); 2533 2534 error = bus_dma_tag_create(NULL, /* parent */ 2535 1, 0, /* alignment, bounds */ 2536 BUS_SPACE_MAXADDR, /* lowaddr */ 2537 BUS_SPACE_MAXADDR, /* highaddr */ 2538 NULL, NULL, /* filter, filterarg */ 2539 MCLBYTES, /* maxsize */ 2540 1, /* nsegments */ 2541 MCLBYTES, /* maxsegsize */ 2542 BUS_DMA_ALLOCNOW, /* flags */ 2543 NULL, /* lockfunc */ 2544 NULL, /* lockarg */ 2545 &adapter->rxtag); 2546 if (error != 0) { 2547 printf("em%d: em_allocate_receive_structures: " 2548 "bus_dma_tag_create failed; error %u\n", 2549 adapter->unit, error); 2550 goto fail_0; 2551 } 2552 2553 rx_buffer = adapter->rx_buffer_area; 2554 for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) { 2555 error = bus_dmamap_create(adapter->rxtag, BUS_DMA_NOWAIT, 2556 &rx_buffer->map); 2557 if (error != 0) { 2558 printf("em%d: em_allocate_receive_structures: " 2559 "bus_dmamap_create failed; error %u\n", 2560 adapter->unit, error); 2561 goto fail_1; 2562 } 2563 } 2564 2565 for (i = 0; i < adapter->num_rx_desc; i++) { 2566 error = em_get_buf(i, adapter, NULL); 2567 if (error != 0) { 2568 adapter->rx_buffer_area[i].m_head = NULL; 2569 adapter->rx_desc_base[i].buffer_addr = 0; 2570 return(error); 2571 } 2572 } 2573 bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map, 2574 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2575 2576 return(0); 2577 2578fail_1: 2579 bus_dma_tag_destroy(adapter->rxtag); 2580fail_0: 2581 adapter->rxtag = NULL; 2582 free(adapter->rx_buffer_area, M_DEVBUF); 2583 adapter->rx_buffer_area = NULL; 2584 return (error); 2585} 2586 2587/********************************************************************* 2588 * 2589 * Allocate and initialize receive structures. 2590 * 2591 **********************************************************************/ 2592static int 2593em_setup_receive_structures(struct adapter * adapter) 2594{ 2595 bzero((void *) adapter->rx_desc_base, 2596 (sizeof(struct em_rx_desc)) * adapter->num_rx_desc); 2597 2598 if (em_allocate_receive_structures(adapter)) 2599 return ENOMEM; 2600 2601 /* Setup our descriptor pointers */ 2602 adapter->next_rx_desc_to_check = 0; 2603 return(0); 2604} 2605 2606/********************************************************************* 2607 * 2608 * Enable receive unit. 2609 * 2610 **********************************************************************/ 2611static void 2612em_initialize_receive_unit(struct adapter * adapter) 2613{ 2614 u_int32_t reg_rctl; 2615 u_int32_t reg_rxcsum; 2616 struct ifnet *ifp; 2617 u_int64_t bus_addr; 2618 2619 INIT_DEBUGOUT("em_initialize_receive_unit: begin"); 2620 ifp = adapter->ifp; 2621 2622 /* Make sure receives are disabled while setting up the descriptor ring */ 2623 E1000_WRITE_REG(&adapter->hw, RCTL, 0); 2624 2625 /* Set the Receive Delay Timer Register */ 2626 E1000_WRITE_REG(&adapter->hw, RDTR, 2627 adapter->rx_int_delay.value | E1000_RDT_FPDB); 2628 2629 if(adapter->hw.mac_type >= em_82540) { 2630 E1000_WRITE_REG(&adapter->hw, RADV, 2631 adapter->rx_abs_int_delay.value); 2632 2633 /* Set the interrupt throttling rate. Value is calculated 2634 * as DEFAULT_ITR = 1/(MAX_INTS_PER_SEC * 256ns) */ 2635#define MAX_INTS_PER_SEC 8000 2636#define DEFAULT_ITR 1000000000/(MAX_INTS_PER_SEC * 256) 2637 E1000_WRITE_REG(&adapter->hw, ITR, DEFAULT_ITR); 2638 } 2639 2640 /* Setup the Base and Length of the Rx Descriptor Ring */ 2641 bus_addr = adapter->rxdma.dma_paddr; 2642 E1000_WRITE_REG(&adapter->hw, RDBAL, (u_int32_t)bus_addr); 2643 E1000_WRITE_REG(&adapter->hw, RDBAH, (u_int32_t)(bus_addr >> 32)); 2644 E1000_WRITE_REG(&adapter->hw, RDLEN, adapter->num_rx_desc * 2645 sizeof(struct em_rx_desc)); 2646 2647 /* Setup the HW Rx Head and Tail Descriptor Pointers */ 2648 E1000_WRITE_REG(&adapter->hw, RDH, 0); 2649 E1000_WRITE_REG(&adapter->hw, RDT, adapter->num_rx_desc - 1); 2650 2651 /* Setup the Receive Control Register */ 2652 reg_rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO | 2653 E1000_RCTL_RDMTS_HALF | 2654 (adapter->hw.mc_filter_type << E1000_RCTL_MO_SHIFT); 2655 2656 if (adapter->hw.tbi_compatibility_on == TRUE) 2657 reg_rctl |= E1000_RCTL_SBP; 2658 2659 2660 switch (adapter->rx_buffer_len) { 2661 default: 2662 case EM_RXBUFFER_2048: 2663 reg_rctl |= E1000_RCTL_SZ_2048; 2664 break; 2665 case EM_RXBUFFER_4096: 2666 reg_rctl |= E1000_RCTL_SZ_4096 | E1000_RCTL_BSEX | E1000_RCTL_LPE; 2667 break; 2668 case EM_RXBUFFER_8192: 2669 reg_rctl |= E1000_RCTL_SZ_8192 | E1000_RCTL_BSEX | E1000_RCTL_LPE; 2670 break; 2671 case EM_RXBUFFER_16384: 2672 reg_rctl |= E1000_RCTL_SZ_16384 | E1000_RCTL_BSEX | E1000_RCTL_LPE; 2673 break; 2674 } 2675 2676 if (ifp->if_mtu > ETHERMTU) 2677 reg_rctl |= E1000_RCTL_LPE; 2678 2679 /* Enable 82543 Receive Checksum Offload for TCP and UDP */ 2680 if ((adapter->hw.mac_type >= em_82543) && 2681 (ifp->if_capenable & IFCAP_RXCSUM)) { 2682 reg_rxcsum = E1000_READ_REG(&adapter->hw, RXCSUM); 2683 reg_rxcsum |= (E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL); 2684 E1000_WRITE_REG(&adapter->hw, RXCSUM, reg_rxcsum); 2685 } 2686 2687 /* Enable Receives */ 2688 E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl); 2689 2690 return; 2691} 2692 2693/********************************************************************* 2694 * 2695 * Free receive related data structures. 2696 * 2697 **********************************************************************/ 2698static void 2699em_free_receive_structures(struct adapter *adapter) 2700{ 2701 struct em_buffer *rx_buffer; 2702 int i; 2703 2704 INIT_DEBUGOUT("free_receive_structures: begin"); 2705 2706 if (adapter->rx_buffer_area != NULL) { 2707 rx_buffer = adapter->rx_buffer_area; 2708 for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) { 2709 if (rx_buffer->map != NULL) { 2710 bus_dmamap_unload(adapter->rxtag, rx_buffer->map); 2711 bus_dmamap_destroy(adapter->rxtag, rx_buffer->map); 2712 } 2713 if (rx_buffer->m_head != NULL) 2714 m_freem(rx_buffer->m_head); 2715 rx_buffer->m_head = NULL; 2716 } 2717 } 2718 if (adapter->rx_buffer_area != NULL) { 2719 free(adapter->rx_buffer_area, M_DEVBUF); 2720 adapter->rx_buffer_area = NULL; 2721 } 2722 if (adapter->rxtag != NULL) { 2723 bus_dma_tag_destroy(adapter->rxtag); 2724 adapter->rxtag = NULL; 2725 } 2726 return; 2727} 2728 2729/********************************************************************* 2730 * 2731 * This routine executes in interrupt context. It replenishes 2732 * the mbufs in the descriptor and sends data which has been 2733 * dma'ed into host memory to upper layer. 2734 * 2735 * We loop at most count times if count is > 0, or until done if 2736 * count < 0. 2737 * 2738 *********************************************************************/ 2739static void 2740em_process_receive_interrupts(struct adapter * adapter, int count) 2741{ 2742 struct ifnet *ifp; 2743 struct mbuf *mp; 2744 u_int8_t accept_frame = 0; 2745 u_int8_t eop = 0; 2746 u_int16_t len, desc_len, prev_len_adj; 2747 int i; 2748 2749 /* Pointer to the receive descriptor being examined. */ 2750 struct em_rx_desc *current_desc; 2751 2752 mtx_assert(&adapter->mtx, MA_OWNED); 2753 2754 ifp = adapter->ifp; 2755 i = adapter->next_rx_desc_to_check; 2756 current_desc = &adapter->rx_desc_base[i]; 2757 bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map, 2758 BUS_DMASYNC_POSTREAD); 2759 2760 if (!((current_desc->status) & E1000_RXD_STAT_DD)) { 2761 return; 2762 } 2763 2764 while ((current_desc->status & E1000_RXD_STAT_DD) && 2765 (count != 0) && 2766 (ifp->if_drv_flags & IFF_DRV_RUNNING)) { 2767 struct mbuf *m = NULL; 2768 2769 mp = adapter->rx_buffer_area[i].m_head; 2770 bus_dmamap_sync(adapter->rxtag, adapter->rx_buffer_area[i].map, 2771 BUS_DMASYNC_POSTREAD); 2772 2773 accept_frame = 1; 2774 prev_len_adj = 0; 2775 desc_len = le16toh(current_desc->length); 2776 if (current_desc->status & E1000_RXD_STAT_EOP) { 2777 count--; 2778 eop = 1; 2779 if (desc_len < ETHER_CRC_LEN) { 2780 len = 0; 2781 prev_len_adj = ETHER_CRC_LEN - desc_len; 2782 } 2783 else { 2784 len = desc_len - ETHER_CRC_LEN; 2785 } 2786 } else { 2787 eop = 0; 2788 len = desc_len; 2789 } 2790 2791 if (current_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK) { 2792 u_int8_t last_byte; 2793 u_int32_t pkt_len = desc_len; 2794 2795 if (adapter->fmp != NULL) 2796 pkt_len += adapter->fmp->m_pkthdr.len; 2797 2798 last_byte = *(mtod(mp, caddr_t) + desc_len - 1); 2799 2800 if (TBI_ACCEPT(&adapter->hw, current_desc->status, 2801 current_desc->errors, 2802 pkt_len, last_byte)) { 2803 em_tbi_adjust_stats(&adapter->hw, 2804 &adapter->stats, 2805 pkt_len, 2806 adapter->hw.mac_addr); 2807 if (len > 0) len--; 2808 } 2809 else { 2810 accept_frame = 0; 2811 } 2812 } 2813 2814 if (accept_frame) { 2815 2816 if (em_get_buf(i, adapter, NULL) == ENOBUFS) { 2817 adapter->dropped_pkts++; 2818 em_get_buf(i, adapter, mp); 2819 if (adapter->fmp != NULL) 2820 m_freem(adapter->fmp); 2821 adapter->fmp = NULL; 2822 adapter->lmp = NULL; 2823 break; 2824 } 2825 2826 /* Assign correct length to the current fragment */ 2827 mp->m_len = len; 2828 2829 if (adapter->fmp == NULL) { 2830 mp->m_pkthdr.len = len; 2831 adapter->fmp = mp; /* Store the first mbuf */ 2832 adapter->lmp = mp; 2833 } else { 2834 /* Chain mbuf's together */ 2835 mp->m_flags &= ~M_PKTHDR; 2836 /* 2837 * Adjust length of previous mbuf in chain if we 2838 * received less than 4 bytes in the last descriptor. 2839 */ 2840 if (prev_len_adj > 0) { 2841 adapter->lmp->m_len -= prev_len_adj; 2842 adapter->fmp->m_pkthdr.len -= prev_len_adj; 2843 } 2844 adapter->lmp->m_next = mp; 2845 adapter->lmp = adapter->lmp->m_next; 2846 adapter->fmp->m_pkthdr.len += len; 2847 } 2848 2849 if (eop) { 2850 adapter->fmp->m_pkthdr.rcvif = ifp; 2851 ifp->if_ipackets++; 2852 em_receive_checksum(adapter, current_desc, 2853 adapter->fmp); 2854 if (current_desc->status & E1000_RXD_STAT_VP) 2855 VLAN_INPUT_TAG(ifp, adapter->fmp, 2856 (current_desc->special & 2857 E1000_RXD_SPC_VLAN_MASK), 2858 adapter->fmp = NULL); 2859 2860 m = adapter->fmp; 2861 adapter->fmp = NULL; 2862 adapter->lmp = NULL; 2863 } 2864 } else { 2865 adapter->dropped_pkts++; 2866 em_get_buf(i, adapter, mp); 2867 if (adapter->fmp != NULL) 2868 m_freem(adapter->fmp); 2869 adapter->fmp = NULL; 2870 adapter->lmp = NULL; 2871 } 2872 2873 /* Zero out the receive descriptors status */ 2874 current_desc->status = 0; 2875 bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map, 2876 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2877 2878 /* Advance the E1000's Receive Queue #0 "Tail Pointer". */ 2879 E1000_WRITE_REG(&adapter->hw, RDT, i); 2880 2881 /* Advance our pointers to the next descriptor */ 2882 if (++i == adapter->num_rx_desc) 2883 i = 0; 2884 if (m != NULL) { 2885 adapter->next_rx_desc_to_check = i; 2886 EM_UNLOCK(adapter); 2887 (*ifp->if_input)(ifp, m); 2888 EM_LOCK(adapter); 2889 i = adapter->next_rx_desc_to_check; 2890 } 2891 current_desc = &adapter->rx_desc_base[i]; 2892 } 2893 adapter->next_rx_desc_to_check = i; 2894 return; 2895} 2896 2897/********************************************************************* 2898 * 2899 * Verify that the hardware indicated that the checksum is valid. 2900 * Inform the stack about the status of checksum so that stack 2901 * doesn't spend time verifying the checksum. 2902 * 2903 *********************************************************************/ 2904static void 2905em_receive_checksum(struct adapter *adapter, 2906 struct em_rx_desc *rx_desc, 2907 struct mbuf *mp) 2908{ 2909 /* 82543 or newer only */ 2910 if ((adapter->hw.mac_type < em_82543) || 2911 /* Ignore Checksum bit is set */ 2912 (rx_desc->status & E1000_RXD_STAT_IXSM)) { 2913 mp->m_pkthdr.csum_flags = 0; 2914 return; 2915 } 2916 2917 if (rx_desc->status & E1000_RXD_STAT_IPCS) { 2918 /* Did it pass? */ 2919 if (!(rx_desc->errors & E1000_RXD_ERR_IPE)) { 2920 /* IP Checksum Good */ 2921 mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED; 2922 mp->m_pkthdr.csum_flags |= CSUM_IP_VALID; 2923 2924 } else { 2925 mp->m_pkthdr.csum_flags = 0; 2926 } 2927 } 2928 2929 if (rx_desc->status & E1000_RXD_STAT_TCPCS) { 2930 /* Did it pass? */ 2931 if (!(rx_desc->errors & E1000_RXD_ERR_TCPE)) { 2932 mp->m_pkthdr.csum_flags |= 2933 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR); 2934 mp->m_pkthdr.csum_data = htons(0xffff); 2935 } 2936 } 2937 2938 return; 2939} 2940 2941 2942static void 2943em_enable_vlans(struct adapter *adapter) 2944{ 2945 uint32_t ctrl; 2946 2947 E1000_WRITE_REG(&adapter->hw, VET, ETHERTYPE_VLAN); 2948 2949 ctrl = E1000_READ_REG(&adapter->hw, CTRL); 2950 ctrl |= E1000_CTRL_VME; 2951 E1000_WRITE_REG(&adapter->hw, CTRL, ctrl); 2952 2953 return; 2954} 2955 2956static void 2957em_disable_vlans(struct adapter *adapter) 2958{ 2959 uint32_t ctrl; 2960 2961 ctrl = E1000_READ_REG(&adapter->hw, CTRL); 2962 ctrl &= ~E1000_CTRL_VME; 2963 E1000_WRITE_REG(&adapter->hw, CTRL, ctrl); 2964 2965 return; 2966} 2967 2968static void 2969em_enable_intr(struct adapter * adapter) 2970{ 2971 E1000_WRITE_REG(&adapter->hw, IMS, (IMS_ENABLE_MASK)); 2972 return; 2973} 2974 2975static void 2976em_disable_intr(struct adapter *adapter) 2977{ 2978 /* 2979 * The first version of 82542 had an errata where when link was forced it 2980 * would stay up even up even if the cable was disconnected. Sequence errors 2981 * were used to detect the disconnect and then the driver would unforce the link. 2982 * This code in the in the ISR. For this to work correctly the Sequence error 2983 * interrupt had to be enabled all the time. 2984 */ 2985 2986 if (adapter->hw.mac_type == em_82542_rev2_0) 2987 E1000_WRITE_REG(&adapter->hw, IMC, 2988 (0xffffffff & ~E1000_IMC_RXSEQ)); 2989 else 2990 E1000_WRITE_REG(&adapter->hw, IMC, 2991 0xffffffff); 2992 return; 2993} 2994 2995static int 2996em_is_valid_ether_addr(u_int8_t *addr) 2997{ 2998 char zero_addr[6] = { 0, 0, 0, 0, 0, 0 }; 2999 3000 if ((addr[0] & 1) || (!bcmp(addr, zero_addr, ETHER_ADDR_LEN))) { 3001 return (FALSE); 3002 } 3003 3004 return(TRUE); 3005} 3006 3007void 3008em_write_pci_cfg(struct em_hw *hw, 3009 uint32_t reg, 3010 uint16_t *value) 3011{ 3012 pci_write_config(((struct em_osdep *)hw->back)->dev, reg, 3013 *value, 2); 3014} 3015 3016void 3017em_read_pci_cfg(struct em_hw *hw, uint32_t reg, 3018 uint16_t *value) 3019{ 3020 *value = pci_read_config(((struct em_osdep *)hw->back)->dev, 3021 reg, 2); 3022 return; 3023} 3024 3025void 3026em_pci_set_mwi(struct em_hw *hw) 3027{ 3028 pci_write_config(((struct em_osdep *)hw->back)->dev, 3029 PCIR_COMMAND, 3030 (hw->pci_cmd_word | CMD_MEM_WRT_INVALIDATE), 2); 3031 return; 3032} 3033 3034void 3035em_pci_clear_mwi(struct em_hw *hw) 3036{ 3037 pci_write_config(((struct em_osdep *)hw->back)->dev, 3038 PCIR_COMMAND, 3039 (hw->pci_cmd_word & ~CMD_MEM_WRT_INVALIDATE), 2); 3040 return; 3041} 3042 3043uint32_t 3044em_io_read(struct em_hw *hw, unsigned long port) 3045{ 3046 return(inl(port)); 3047} 3048 3049void 3050em_io_write(struct em_hw *hw, unsigned long port, uint32_t value) 3051{ 3052 outl(port, value); 3053 return; 3054} 3055 3056/********************************************************************* 3057* 82544 Coexistence issue workaround. 3058* There are 2 issues. 3059* 1. Transmit Hang issue. 3060* To detect this issue, following equation can be used... 3061* SIZE[3:0] + ADDR[2:0] = SUM[3:0]. 3062* If SUM[3:0] is in between 1 to 4, we will have this issue. 3063* 3064* 2. DAC issue. 3065* To detect this issue, following equation can be used... 3066* SIZE[3:0] + ADDR[2:0] = SUM[3:0]. 3067* If SUM[3:0] is in between 9 to c, we will have this issue. 3068* 3069* 3070* WORKAROUND: 3071* Make sure we do not have ending address as 1,2,3,4(Hang) or 9,a,b,c (DAC) 3072* 3073*** *********************************************************************/ 3074static u_int32_t 3075em_fill_descriptors (u_int64_t address, 3076 u_int32_t length, 3077 PDESC_ARRAY desc_array) 3078{ 3079 /* Since issue is sensitive to length and address.*/ 3080 /* Let us first check the address...*/ 3081 u_int32_t safe_terminator; 3082 if (length <= 4) { 3083 desc_array->descriptor[0].address = address; 3084 desc_array->descriptor[0].length = length; 3085 desc_array->elements = 1; 3086 return desc_array->elements; 3087 } 3088 safe_terminator = (u_int32_t)((((u_int32_t)address & 0x7) + (length & 0xF)) & 0xF); 3089 /* if it does not fall between 0x1 to 0x4 and 0x9 to 0xC then return */ 3090 if (safe_terminator == 0 || 3091 (safe_terminator > 4 && 3092 safe_terminator < 9) || 3093 (safe_terminator > 0xC && 3094 safe_terminator <= 0xF)) { 3095 desc_array->descriptor[0].address = address; 3096 desc_array->descriptor[0].length = length; 3097 desc_array->elements = 1; 3098 return desc_array->elements; 3099 } 3100 3101 desc_array->descriptor[0].address = address; 3102 desc_array->descriptor[0].length = length - 4; 3103 desc_array->descriptor[1].address = address + (length - 4); 3104 desc_array->descriptor[1].length = 4; 3105 desc_array->elements = 2; 3106 return desc_array->elements; 3107} 3108 3109/********************************************************************** 3110 * 3111 * Update the board statistics counters. 3112 * 3113 **********************************************************************/ 3114static void 3115em_update_stats_counters(struct adapter *adapter) 3116{ 3117 struct ifnet *ifp; 3118 3119 if(adapter->hw.media_type == em_media_type_copper || 3120 (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU)) { 3121 adapter->stats.symerrs += E1000_READ_REG(&adapter->hw, SYMERRS); 3122 adapter->stats.sec += E1000_READ_REG(&adapter->hw, SEC); 3123 } 3124 adapter->stats.crcerrs += E1000_READ_REG(&adapter->hw, CRCERRS); 3125 adapter->stats.mpc += E1000_READ_REG(&adapter->hw, MPC); 3126 adapter->stats.scc += E1000_READ_REG(&adapter->hw, SCC); 3127 adapter->stats.ecol += E1000_READ_REG(&adapter->hw, ECOL); 3128 3129 adapter->stats.mcc += E1000_READ_REG(&adapter->hw, MCC); 3130 adapter->stats.latecol += E1000_READ_REG(&adapter->hw, LATECOL); 3131 adapter->stats.colc += E1000_READ_REG(&adapter->hw, COLC); 3132 adapter->stats.dc += E1000_READ_REG(&adapter->hw, DC); 3133 adapter->stats.rlec += E1000_READ_REG(&adapter->hw, RLEC); 3134 adapter->stats.xonrxc += E1000_READ_REG(&adapter->hw, XONRXC); 3135 adapter->stats.xontxc += E1000_READ_REG(&adapter->hw, XONTXC); 3136 adapter->stats.xoffrxc += E1000_READ_REG(&adapter->hw, XOFFRXC); 3137 adapter->stats.xofftxc += E1000_READ_REG(&adapter->hw, XOFFTXC); 3138 adapter->stats.fcruc += E1000_READ_REG(&adapter->hw, FCRUC); 3139 adapter->stats.prc64 += E1000_READ_REG(&adapter->hw, PRC64); 3140 adapter->stats.prc127 += E1000_READ_REG(&adapter->hw, PRC127); 3141 adapter->stats.prc255 += E1000_READ_REG(&adapter->hw, PRC255); 3142 adapter->stats.prc511 += E1000_READ_REG(&adapter->hw, PRC511); 3143 adapter->stats.prc1023 += E1000_READ_REG(&adapter->hw, PRC1023); 3144 adapter->stats.prc1522 += E1000_READ_REG(&adapter->hw, PRC1522); 3145 adapter->stats.gprc += E1000_READ_REG(&adapter->hw, GPRC); 3146 adapter->stats.bprc += E1000_READ_REG(&adapter->hw, BPRC); 3147 adapter->stats.mprc += E1000_READ_REG(&adapter->hw, MPRC); 3148 adapter->stats.gptc += E1000_READ_REG(&adapter->hw, GPTC); 3149 3150 /* For the 64-bit byte counters the low dword must be read first. */ 3151 /* Both registers clear on the read of the high dword */ 3152 3153 adapter->stats.gorcl += E1000_READ_REG(&adapter->hw, GORCL); 3154 adapter->stats.gorch += E1000_READ_REG(&adapter->hw, GORCH); 3155 adapter->stats.gotcl += E1000_READ_REG(&adapter->hw, GOTCL); 3156 adapter->stats.gotch += E1000_READ_REG(&adapter->hw, GOTCH); 3157 3158 adapter->stats.rnbc += E1000_READ_REG(&adapter->hw, RNBC); 3159 adapter->stats.ruc += E1000_READ_REG(&adapter->hw, RUC); 3160 adapter->stats.rfc += E1000_READ_REG(&adapter->hw, RFC); 3161 adapter->stats.roc += E1000_READ_REG(&adapter->hw, ROC); 3162 adapter->stats.rjc += E1000_READ_REG(&adapter->hw, RJC); 3163 3164 adapter->stats.torl += E1000_READ_REG(&adapter->hw, TORL); 3165 adapter->stats.torh += E1000_READ_REG(&adapter->hw, TORH); 3166 adapter->stats.totl += E1000_READ_REG(&adapter->hw, TOTL); 3167 adapter->stats.toth += E1000_READ_REG(&adapter->hw, TOTH); 3168 3169 adapter->stats.tpr += E1000_READ_REG(&adapter->hw, TPR); 3170 adapter->stats.tpt += E1000_READ_REG(&adapter->hw, TPT); 3171 adapter->stats.ptc64 += E1000_READ_REG(&adapter->hw, PTC64); 3172 adapter->stats.ptc127 += E1000_READ_REG(&adapter->hw, PTC127); 3173 adapter->stats.ptc255 += E1000_READ_REG(&adapter->hw, PTC255); 3174 adapter->stats.ptc511 += E1000_READ_REG(&adapter->hw, PTC511); 3175 adapter->stats.ptc1023 += E1000_READ_REG(&adapter->hw, PTC1023); 3176 adapter->stats.ptc1522 += E1000_READ_REG(&adapter->hw, PTC1522); 3177 adapter->stats.mptc += E1000_READ_REG(&adapter->hw, MPTC); 3178 adapter->stats.bptc += E1000_READ_REG(&adapter->hw, BPTC); 3179 3180 if (adapter->hw.mac_type >= em_82543) { 3181 adapter->stats.algnerrc += 3182 E1000_READ_REG(&adapter->hw, ALGNERRC); 3183 adapter->stats.rxerrc += 3184 E1000_READ_REG(&adapter->hw, RXERRC); 3185 adapter->stats.tncrs += 3186 E1000_READ_REG(&adapter->hw, TNCRS); 3187 adapter->stats.cexterr += 3188 E1000_READ_REG(&adapter->hw, CEXTERR); 3189 adapter->stats.tsctc += 3190 E1000_READ_REG(&adapter->hw, TSCTC); 3191 adapter->stats.tsctfc += 3192 E1000_READ_REG(&adapter->hw, TSCTFC); 3193 } 3194 ifp = adapter->ifp; 3195 3196 /* Fill out the OS statistics structure */ 3197 ifp->if_ibytes = adapter->stats.gorcl; 3198 ifp->if_obytes = adapter->stats.gotcl; 3199 ifp->if_imcasts = adapter->stats.mprc; 3200 ifp->if_collisions = adapter->stats.colc; 3201 3202 /* Rx Errors */ 3203 ifp->if_ierrors = 3204 adapter->dropped_pkts + 3205 adapter->stats.rxerrc + 3206 adapter->stats.crcerrs + 3207 adapter->stats.algnerrc + 3208 adapter->stats.rlec + 3209 adapter->stats.mpc + adapter->stats.cexterr; 3210 3211 /* Tx Errors */ 3212 ifp->if_oerrors = adapter->stats.ecol + adapter->stats.latecol; 3213 3214} 3215 3216 3217/********************************************************************** 3218 * 3219 * This routine is called only when em_display_debug_stats is enabled. 3220 * This routine provides a way to take a look at important statistics 3221 * maintained by the driver and hardware. 3222 * 3223 **********************************************************************/ 3224static void 3225em_print_debug_info(struct adapter *adapter) 3226{ 3227 int unit = adapter->unit; 3228 uint8_t *hw_addr = adapter->hw.hw_addr; 3229 3230 printf("em%d: Adapter hardware address = %p \n", unit, hw_addr); 3231 printf("em%d:CTRL = 0x%x\n", unit, 3232 E1000_READ_REG(&adapter->hw, CTRL)); 3233 printf("em%d:RCTL = 0x%x PS=(0x8402)\n", unit, 3234 E1000_READ_REG(&adapter->hw, RCTL)); 3235 printf("em%d:tx_int_delay = %d, tx_abs_int_delay = %d\n", unit, 3236 E1000_READ_REG(&adapter->hw, TIDV), 3237 E1000_READ_REG(&adapter->hw, TADV)); 3238 printf("em%d:rx_int_delay = %d, rx_abs_int_delay = %d\n", unit, 3239 E1000_READ_REG(&adapter->hw, RDTR), 3240 E1000_READ_REG(&adapter->hw, RADV)); 3241 printf("em%d: fifo workaround = %lld, fifo_reset = %lld\n", unit, 3242 (long long)adapter->tx_fifo_wrk_cnt, 3243 (long long)adapter->tx_fifo_reset_cnt); 3244 printf("em%d: hw tdh = %d, hw tdt = %d\n", unit, 3245 E1000_READ_REG(&adapter->hw, TDH), 3246 E1000_READ_REG(&adapter->hw, TDT)); 3247 printf("em%d: Num Tx descriptors avail = %d\n", unit, 3248 adapter->num_tx_desc_avail); 3249 printf("em%d: Tx Descriptors not avail1 = %ld\n", unit, 3250 adapter->no_tx_desc_avail1); 3251 printf("em%d: Tx Descriptors not avail2 = %ld\n", unit, 3252 adapter->no_tx_desc_avail2); 3253 printf("em%d: Std mbuf failed = %ld\n", unit, 3254 adapter->mbuf_alloc_failed); 3255 printf("em%d: Std mbuf cluster failed = %ld\n", unit, 3256 adapter->mbuf_cluster_failed); 3257 printf("em%d: Driver dropped packets = %ld\n", unit, 3258 adapter->dropped_pkts); 3259 3260 return; 3261} 3262 3263static void 3264em_print_hw_stats(struct adapter *adapter) 3265{ 3266 int unit = adapter->unit; 3267 3268 printf("em%d: Excessive collisions = %lld\n", unit, 3269 (long long)adapter->stats.ecol); 3270 printf("em%d: Symbol errors = %lld\n", unit, 3271 (long long)adapter->stats.symerrs); 3272 printf("em%d: Sequence errors = %lld\n", unit, 3273 (long long)adapter->stats.sec); 3274 printf("em%d: Defer count = %lld\n", unit, 3275 (long long)adapter->stats.dc); 3276 3277 printf("em%d: Missed Packets = %lld\n", unit, 3278 (long long)adapter->stats.mpc); 3279 printf("em%d: Receive No Buffers = %lld\n", unit, 3280 (long long)adapter->stats.rnbc); 3281 printf("em%d: Receive length errors = %lld\n", unit, 3282 (long long)adapter->stats.rlec); 3283 printf("em%d: Receive errors = %lld\n", unit, 3284 (long long)adapter->stats.rxerrc); 3285 printf("em%d: Crc errors = %lld\n", unit, 3286 (long long)adapter->stats.crcerrs); 3287 printf("em%d: Alignment errors = %lld\n", unit, 3288 (long long)adapter->stats.algnerrc); 3289 printf("em%d: Carrier extension errors = %lld\n", unit, 3290 (long long)adapter->stats.cexterr); 3291 3292 printf("em%d: XON Rcvd = %lld\n", unit, 3293 (long long)adapter->stats.xonrxc); 3294 printf("em%d: XON Xmtd = %lld\n", unit, 3295 (long long)adapter->stats.xontxc); 3296 printf("em%d: XOFF Rcvd = %lld\n", unit, 3297 (long long)adapter->stats.xoffrxc); 3298 printf("em%d: XOFF Xmtd = %lld\n", unit, 3299 (long long)adapter->stats.xofftxc); 3300 3301 printf("em%d: Good Packets Rcvd = %lld\n", unit, 3302 (long long)adapter->stats.gprc); 3303 printf("em%d: Good Packets Xmtd = %lld\n", unit, 3304 (long long)adapter->stats.gptc); 3305 3306 return; 3307} 3308 3309static int 3310em_sysctl_debug_info(SYSCTL_HANDLER_ARGS) 3311{ 3312 int error; 3313 int result; 3314 struct adapter *adapter; 3315 3316 result = -1; 3317 error = sysctl_handle_int(oidp, &result, 0, req); 3318 3319 if (error || !req->newptr) 3320 return (error); 3321 3322 if (result == 1) { 3323 adapter = (struct adapter *)arg1; 3324 em_print_debug_info(adapter); 3325 } 3326 3327 return error; 3328} 3329 3330 3331static int 3332em_sysctl_stats(SYSCTL_HANDLER_ARGS) 3333{ 3334 int error; 3335 int result; 3336 struct adapter *adapter; 3337 3338 result = -1; 3339 error = sysctl_handle_int(oidp, &result, 0, req); 3340 3341 if (error || !req->newptr) 3342 return (error); 3343 3344 if (result == 1) { 3345 adapter = (struct adapter *)arg1; 3346 em_print_hw_stats(adapter); 3347 } 3348 3349 return error; 3350} 3351 3352static int 3353em_sysctl_int_delay(SYSCTL_HANDLER_ARGS) 3354{ 3355 struct em_int_delay_info *info; 3356 struct adapter *adapter; 3357 u_int32_t regval; 3358 int error; 3359 int usecs; 3360 int ticks; 3361 int s; 3362 3363 info = (struct em_int_delay_info *)arg1; 3364 adapter = info->adapter; 3365 usecs = info->value; 3366 error = sysctl_handle_int(oidp, &usecs, 0, req); 3367 if (error != 0 || req->newptr == NULL) 3368 return error; 3369 if (usecs < 0 || usecs > E1000_TICKS_TO_USECS(65535)) 3370 return EINVAL; 3371 info->value = usecs; 3372 ticks = E1000_USECS_TO_TICKS(usecs); 3373 3374 s = splimp(); 3375 regval = E1000_READ_OFFSET(&adapter->hw, info->offset); 3376 regval = (regval & ~0xffff) | (ticks & 0xffff); 3377 /* Handle a few special cases. */ 3378 switch (info->offset) { 3379 case E1000_RDTR: 3380 case E1000_82542_RDTR: 3381 regval |= E1000_RDT_FPDB; 3382 break; 3383 case E1000_TIDV: 3384 case E1000_82542_TIDV: 3385 if (ticks == 0) { 3386 adapter->txd_cmd &= ~E1000_TXD_CMD_IDE; 3387 /* Don't write 0 into the TIDV register. */ 3388 regval++; 3389 } else 3390 adapter->txd_cmd |= E1000_TXD_CMD_IDE; 3391 break; 3392 } 3393 E1000_WRITE_OFFSET(&adapter->hw, info->offset, regval); 3394 splx(s); 3395 return 0; 3396} 3397 3398static void 3399em_add_int_delay_sysctl(struct adapter *adapter, const char *name, 3400 const char *description, struct em_int_delay_info *info, 3401 int offset, int value) 3402{ 3403 info->adapter = adapter; 3404 info->offset = offset; 3405 info->value = value; 3406 SYSCTL_ADD_PROC(device_get_sysctl_ctx(adapter->dev), 3407 SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)), 3408 OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW, 3409 info, 0, em_sysctl_int_delay, "I", description); 3410} 3411