1/******************************************************************************* 2 3 Intel PRO/1000 Linux driver 4 Copyright(c) 1999 - 2006 Intel Corporation. 5 6 This program is free software; you can redistribute it and/or modify it 7 under the terms and conditions of the GNU General Public License, 8 version 2, as published by the Free Software Foundation. 9 10 This program is distributed in the hope it will be useful, but WITHOUT 11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 more details. 14 15 You should have received a copy of the GNU General Public License along with 16 this program; if not, write to the Free Software Foundation, Inc., 17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 18 19 The full GNU General Public License is included in this distribution in 20 the file called "COPYING". 21 22 Contact Information: 23 Linux NICS <linux.nics@intel.com> 24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 26 27*******************************************************************************/ 28 29#include "e1000.h" 30#include <net/ip6_checksum.h> 31 32char e1000_driver_name[] = "e1000"; 33static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver"; 34#define DRV_VERSION "7.3.21-k8-NAPI" 35const char e1000_driver_version[] = DRV_VERSION; 36static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation."; 37 38/* e1000_pci_tbl - PCI Device ID Table 39 * 40 * Last entry must be all 0s 41 * 42 * Macro expands to... 43 * {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)} 44 */ 45static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = { 46 INTEL_E1000_ETHERNET_DEVICE(0x1000), 47 INTEL_E1000_ETHERNET_DEVICE(0x1001), 48 INTEL_E1000_ETHERNET_DEVICE(0x1004), 49 INTEL_E1000_ETHERNET_DEVICE(0x1008), 50 INTEL_E1000_ETHERNET_DEVICE(0x1009), 51 INTEL_E1000_ETHERNET_DEVICE(0x100C), 52 INTEL_E1000_ETHERNET_DEVICE(0x100D), 53 INTEL_E1000_ETHERNET_DEVICE(0x100E), 54 INTEL_E1000_ETHERNET_DEVICE(0x100F), 55 INTEL_E1000_ETHERNET_DEVICE(0x1010), 56 INTEL_E1000_ETHERNET_DEVICE(0x1011), 57 INTEL_E1000_ETHERNET_DEVICE(0x1012), 58 INTEL_E1000_ETHERNET_DEVICE(0x1013), 59 INTEL_E1000_ETHERNET_DEVICE(0x1014), 60 INTEL_E1000_ETHERNET_DEVICE(0x1015), 61 INTEL_E1000_ETHERNET_DEVICE(0x1016), 62 INTEL_E1000_ETHERNET_DEVICE(0x1017), 63 INTEL_E1000_ETHERNET_DEVICE(0x1018), 64 INTEL_E1000_ETHERNET_DEVICE(0x1019), 65 INTEL_E1000_ETHERNET_DEVICE(0x101A), 66 INTEL_E1000_ETHERNET_DEVICE(0x101D), 67 INTEL_E1000_ETHERNET_DEVICE(0x101E), 68 INTEL_E1000_ETHERNET_DEVICE(0x1026), 69 INTEL_E1000_ETHERNET_DEVICE(0x1027), 70 INTEL_E1000_ETHERNET_DEVICE(0x1028), 71 INTEL_E1000_ETHERNET_DEVICE(0x1075), 72 INTEL_E1000_ETHERNET_DEVICE(0x1076), 73 INTEL_E1000_ETHERNET_DEVICE(0x1077), 74 INTEL_E1000_ETHERNET_DEVICE(0x1078), 75 INTEL_E1000_ETHERNET_DEVICE(0x1079), 76 INTEL_E1000_ETHERNET_DEVICE(0x107A), 77 INTEL_E1000_ETHERNET_DEVICE(0x107B), 78 INTEL_E1000_ETHERNET_DEVICE(0x107C), 79 INTEL_E1000_ETHERNET_DEVICE(0x108A), 80 INTEL_E1000_ETHERNET_DEVICE(0x1099), 81 INTEL_E1000_ETHERNET_DEVICE(0x10B5), 82 /* required last entry */ 83 {0,} 84}; 85 86MODULE_DEVICE_TABLE(pci, e1000_pci_tbl); 87 88int e1000_up(struct e1000_adapter *adapter); 89void e1000_down(struct e1000_adapter *adapter); 90void e1000_reinit_locked(struct e1000_adapter *adapter); 91void e1000_reset(struct e1000_adapter *adapter); 92int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx); 93int e1000_setup_all_tx_resources(struct e1000_adapter *adapter); 94int e1000_setup_all_rx_resources(struct e1000_adapter *adapter); 95void e1000_free_all_tx_resources(struct e1000_adapter *adapter); 96void e1000_free_all_rx_resources(struct e1000_adapter *adapter); 97static int e1000_setup_tx_resources(struct e1000_adapter *adapter, 98 struct e1000_tx_ring *txdr); 99static int e1000_setup_rx_resources(struct e1000_adapter *adapter, 100 struct e1000_rx_ring *rxdr); 101static void e1000_free_tx_resources(struct e1000_adapter *adapter, 102 struct e1000_tx_ring *tx_ring); 103static void e1000_free_rx_resources(struct e1000_adapter *adapter, 104 struct e1000_rx_ring *rx_ring); 105void e1000_update_stats(struct e1000_adapter *adapter); 106 107static int e1000_init_module(void); 108static void e1000_exit_module(void); 109static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent); 110static void __devexit e1000_remove(struct pci_dev *pdev); 111static int e1000_alloc_queues(struct e1000_adapter *adapter); 112static int e1000_sw_init(struct e1000_adapter *adapter); 113static int e1000_open(struct net_device *netdev); 114static int e1000_close(struct net_device *netdev); 115static void e1000_configure_tx(struct e1000_adapter *adapter); 116static void e1000_configure_rx(struct e1000_adapter *adapter); 117static void e1000_setup_rctl(struct e1000_adapter *adapter); 118static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter); 119static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter); 120static void e1000_clean_tx_ring(struct e1000_adapter *adapter, 121 struct e1000_tx_ring *tx_ring); 122static void e1000_clean_rx_ring(struct e1000_adapter *adapter, 123 struct e1000_rx_ring *rx_ring); 124static void e1000_set_rx_mode(struct net_device *netdev); 125static void e1000_update_phy_info(unsigned long data); 126static void e1000_watchdog(unsigned long data); 127static void e1000_82547_tx_fifo_stall(unsigned long data); 128static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, 129 struct net_device *netdev); 130static struct net_device_stats * e1000_get_stats(struct net_device *netdev); 131static int e1000_change_mtu(struct net_device *netdev, int new_mtu); 132static int e1000_set_mac(struct net_device *netdev, void *p); 133static irqreturn_t e1000_intr(int irq, void *data); 134static bool e1000_clean_tx_irq(struct e1000_adapter *adapter, 135 struct e1000_tx_ring *tx_ring); 136static int e1000_clean(struct napi_struct *napi, int budget); 137static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, 138 struct e1000_rx_ring *rx_ring, 139 int *work_done, int work_to_do); 140static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter, 141 struct e1000_rx_ring *rx_ring, 142 int *work_done, int work_to_do); 143static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter, 144 struct e1000_rx_ring *rx_ring, 145 int cleaned_count); 146static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter, 147 struct e1000_rx_ring *rx_ring, 148 int cleaned_count); 149static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd); 150static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, 151 int cmd); 152static void e1000_enter_82542_rst(struct e1000_adapter *adapter); 153static void e1000_leave_82542_rst(struct e1000_adapter *adapter); 154static void e1000_tx_timeout(struct net_device *dev); 155static void e1000_reset_task(struct work_struct *work); 156static void e1000_smartspeed(struct e1000_adapter *adapter); 157static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter, 158 struct sk_buff *skb); 159 160static void e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp); 161static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid); 162static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid); 163static void e1000_restore_vlan(struct e1000_adapter *adapter); 164 165#ifdef CONFIG_PM 166static int e1000_suspend(struct pci_dev *pdev, pm_message_t state); 167static int e1000_resume(struct pci_dev *pdev); 168#endif 169static void e1000_shutdown(struct pci_dev *pdev); 170 171#ifdef CONFIG_NET_POLL_CONTROLLER 172/* for netdump / net console */ 173static void e1000_netpoll (struct net_device *netdev); 174#endif 175 176#define COPYBREAK_DEFAULT 256 177static unsigned int copybreak __read_mostly = COPYBREAK_DEFAULT; 178module_param(copybreak, uint, 0644); 179MODULE_PARM_DESC(copybreak, 180 "Maximum size of packet that is copied to a new buffer on receive"); 181 182static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev, 183 pci_channel_state_t state); 184static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev); 185static void e1000_io_resume(struct pci_dev *pdev); 186 187static struct pci_error_handlers e1000_err_handler = { 188 .error_detected = e1000_io_error_detected, 189 .slot_reset = e1000_io_slot_reset, 190 .resume = e1000_io_resume, 191}; 192 193static struct pci_driver e1000_driver = { 194 .name = e1000_driver_name, 195 .id_table = e1000_pci_tbl, 196 .probe = e1000_probe, 197 .remove = __devexit_p(e1000_remove), 198#ifdef CONFIG_PM 199 /* Power Managment Hooks */ 200 .suspend = e1000_suspend, 201 .resume = e1000_resume, 202#endif 203 .shutdown = e1000_shutdown, 204 .err_handler = &e1000_err_handler 205}; 206 207MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); 208MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver"); 209MODULE_LICENSE("GPL"); 210MODULE_VERSION(DRV_VERSION); 211 212static int debug = NETIF_MSG_DRV | NETIF_MSG_PROBE; 213module_param(debug, int, 0); 214MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); 215 216/** 217 * e1000_get_hw_dev - return device 218 * used by hardware layer to print debugging information 219 * 220 **/ 221struct net_device *e1000_get_hw_dev(struct e1000_hw *hw) 222{ 223 struct e1000_adapter *adapter = hw->back; 224 return adapter->netdev; 225} 226 227/** 228 * e1000_init_module - Driver Registration Routine 229 * 230 * e1000_init_module is the first routine called when the driver is 231 * loaded. All it does is register with the PCI subsystem. 232 **/ 233 234static int __init e1000_init_module(void) 235{ 236 int ret; 237 pr_info("%s - version %s\n", e1000_driver_string, e1000_driver_version); 238 239 pr_info("%s\n", e1000_copyright); 240 241 ret = pci_register_driver(&e1000_driver); 242 if (copybreak != COPYBREAK_DEFAULT) { 243 if (copybreak == 0) 244 pr_info("copybreak disabled\n"); 245 else 246 pr_info("copybreak enabled for " 247 "packets <= %u bytes\n", copybreak); 248 } 249 return ret; 250} 251 252module_init(e1000_init_module); 253 254/** 255 * e1000_exit_module - Driver Exit Cleanup Routine 256 * 257 * e1000_exit_module is called just before the driver is removed 258 * from memory. 259 **/ 260 261static void __exit e1000_exit_module(void) 262{ 263 pci_unregister_driver(&e1000_driver); 264} 265 266module_exit(e1000_exit_module); 267 268static int e1000_request_irq(struct e1000_adapter *adapter) 269{ 270 struct net_device *netdev = adapter->netdev; 271 irq_handler_t handler = e1000_intr; 272 int irq_flags = IRQF_SHARED; 273 int err; 274 275 err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name, 276 netdev); 277 if (err) { 278 e_err(probe, "Unable to allocate interrupt Error: %d\n", err); 279 } 280 281 return err; 282} 283 284static void e1000_free_irq(struct e1000_adapter *adapter) 285{ 286 struct net_device *netdev = adapter->netdev; 287 288 free_irq(adapter->pdev->irq, netdev); 289} 290 291/** 292 * e1000_irq_disable - Mask off interrupt generation on the NIC 293 * @adapter: board private structure 294 **/ 295 296static void e1000_irq_disable(struct e1000_adapter *adapter) 297{ 298 struct e1000_hw *hw = &adapter->hw; 299 300 ew32(IMC, ~0); 301 E1000_WRITE_FLUSH(); 302 synchronize_irq(adapter->pdev->irq); 303} 304 305/** 306 * e1000_irq_enable - Enable default interrupt generation settings 307 * @adapter: board private structure 308 **/ 309 310static void e1000_irq_enable(struct e1000_adapter *adapter) 311{ 312 struct e1000_hw *hw = &adapter->hw; 313 314 ew32(IMS, IMS_ENABLE_MASK); 315 E1000_WRITE_FLUSH(); 316} 317 318static void e1000_update_mng_vlan(struct e1000_adapter *adapter) 319{ 320 struct e1000_hw *hw = &adapter->hw; 321 struct net_device *netdev = adapter->netdev; 322 u16 vid = hw->mng_cookie.vlan_id; 323 u16 old_vid = adapter->mng_vlan_id; 324 if (adapter->vlgrp) { 325 if (!vlan_group_get_device(adapter->vlgrp, vid)) { 326 if (hw->mng_cookie.status & 327 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) { 328 e1000_vlan_rx_add_vid(netdev, vid); 329 adapter->mng_vlan_id = vid; 330 } else 331 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; 332 333 if ((old_vid != (u16)E1000_MNG_VLAN_NONE) && 334 (vid != old_vid) && 335 !vlan_group_get_device(adapter->vlgrp, old_vid)) 336 e1000_vlan_rx_kill_vid(netdev, old_vid); 337 } else 338 adapter->mng_vlan_id = vid; 339 } 340} 341 342static void e1000_init_manageability(struct e1000_adapter *adapter) 343{ 344 struct e1000_hw *hw = &adapter->hw; 345 346 if (adapter->en_mng_pt) { 347 u32 manc = er32(MANC); 348 349 /* disable hardware interception of ARP */ 350 manc &= ~(E1000_MANC_ARP_EN); 351 352 ew32(MANC, manc); 353 } 354} 355 356static void e1000_release_manageability(struct e1000_adapter *adapter) 357{ 358 struct e1000_hw *hw = &adapter->hw; 359 360 if (adapter->en_mng_pt) { 361 u32 manc = er32(MANC); 362 363 /* re-enable hardware interception of ARP */ 364 manc |= E1000_MANC_ARP_EN; 365 366 ew32(MANC, manc); 367 } 368} 369 370/** 371 * e1000_configure - configure the hardware for RX and TX 372 * @adapter = private board structure 373 **/ 374static void e1000_configure(struct e1000_adapter *adapter) 375{ 376 struct net_device *netdev = adapter->netdev; 377 int i; 378 379 e1000_set_rx_mode(netdev); 380 381 e1000_restore_vlan(adapter); 382 e1000_init_manageability(adapter); 383 384 e1000_configure_tx(adapter); 385 e1000_setup_rctl(adapter); 386 e1000_configure_rx(adapter); 387 /* call E1000_DESC_UNUSED which always leaves 388 * at least 1 descriptor unused to make sure 389 * next_to_use != next_to_clean */ 390 for (i = 0; i < adapter->num_rx_queues; i++) { 391 struct e1000_rx_ring *ring = &adapter->rx_ring[i]; 392 adapter->alloc_rx_buf(adapter, ring, 393 E1000_DESC_UNUSED(ring)); 394 } 395} 396 397int e1000_up(struct e1000_adapter *adapter) 398{ 399 struct e1000_hw *hw = &adapter->hw; 400 401 /* hardware has been reset, we need to reload some things */ 402 e1000_configure(adapter); 403 404 clear_bit(__E1000_DOWN, &adapter->flags); 405 406 napi_enable(&adapter->napi); 407 408 e1000_irq_enable(adapter); 409 410 netif_wake_queue(adapter->netdev); 411 412 /* fire a link change interrupt to start the watchdog */ 413 ew32(ICS, E1000_ICS_LSC); 414 return 0; 415} 416 417/** 418 * e1000_power_up_phy - restore link in case the phy was powered down 419 * @adapter: address of board private structure 420 * 421 * The phy may be powered down to save power and turn off link when the 422 * driver is unloaded and wake on lan is not enabled (among others) 423 * *** this routine MUST be followed by a call to e1000_reset *** 424 * 425 **/ 426 427void e1000_power_up_phy(struct e1000_adapter *adapter) 428{ 429 struct e1000_hw *hw = &adapter->hw; 430 u16 mii_reg = 0; 431 432 /* Just clear the power down bit to wake the phy back up */ 433 if (hw->media_type == e1000_media_type_copper) { 434 /* according to the manual, the phy will retain its 435 * settings across a power-down/up cycle */ 436 e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg); 437 mii_reg &= ~MII_CR_POWER_DOWN; 438 e1000_write_phy_reg(hw, PHY_CTRL, mii_reg); 439 } 440} 441 442static void e1000_power_down_phy(struct e1000_adapter *adapter) 443{ 444 struct e1000_hw *hw = &adapter->hw; 445 446 /* Power down the PHY so no link is implied when interface is down * 447 * The PHY cannot be powered down if any of the following is true * 448 * (a) WoL is enabled 449 * (b) AMT is active 450 * (c) SoL/IDER session is active */ 451 if (!adapter->wol && hw->mac_type >= e1000_82540 && 452 hw->media_type == e1000_media_type_copper) { 453 u16 mii_reg = 0; 454 455 switch (hw->mac_type) { 456 case e1000_82540: 457 case e1000_82545: 458 case e1000_82545_rev_3: 459 case e1000_82546: 460 case e1000_82546_rev_3: 461 case e1000_82541: 462 case e1000_82541_rev_2: 463 case e1000_82547: 464 case e1000_82547_rev_2: 465 if (er32(MANC) & E1000_MANC_SMBUS_EN) 466 goto out; 467 break; 468 default: 469 goto out; 470 } 471 e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg); 472 mii_reg |= MII_CR_POWER_DOWN; 473 e1000_write_phy_reg(hw, PHY_CTRL, mii_reg); 474 mdelay(1); 475 } 476out: 477 return; 478} 479 480void e1000_down(struct e1000_adapter *adapter) 481{ 482 struct e1000_hw *hw = &adapter->hw; 483 struct net_device *netdev = adapter->netdev; 484 u32 rctl, tctl; 485 486 487 /* disable receives in the hardware */ 488 rctl = er32(RCTL); 489 ew32(RCTL, rctl & ~E1000_RCTL_EN); 490 /* flush and sleep below */ 491 492 netif_tx_disable(netdev); 493 494 /* disable transmits in the hardware */ 495 tctl = er32(TCTL); 496 tctl &= ~E1000_TCTL_EN; 497 ew32(TCTL, tctl); 498 /* flush both disables and wait for them to finish */ 499 E1000_WRITE_FLUSH(); 500 msleep(10); 501 502 napi_disable(&adapter->napi); 503 504 e1000_irq_disable(adapter); 505 506 /* 507 * Setting DOWN must be after irq_disable to prevent 508 * a screaming interrupt. Setting DOWN also prevents 509 * timers and tasks from rescheduling. 510 */ 511 set_bit(__E1000_DOWN, &adapter->flags); 512 513 del_timer_sync(&adapter->tx_fifo_stall_timer); 514 del_timer_sync(&adapter->watchdog_timer); 515 del_timer_sync(&adapter->phy_info_timer); 516 517 adapter->link_speed = 0; 518 adapter->link_duplex = 0; 519 netif_carrier_off(netdev); 520 521 e1000_reset(adapter); 522 e1000_clean_all_tx_rings(adapter); 523 e1000_clean_all_rx_rings(adapter); 524} 525 526void e1000_reinit_locked(struct e1000_adapter *adapter) 527{ 528 WARN_ON(in_interrupt()); 529 while (test_and_set_bit(__E1000_RESETTING, &adapter->flags)) 530 msleep(1); 531 e1000_down(adapter); 532 e1000_up(adapter); 533 clear_bit(__E1000_RESETTING, &adapter->flags); 534} 535 536void e1000_reset(struct e1000_adapter *adapter) 537{ 538 struct e1000_hw *hw = &adapter->hw; 539 u32 pba = 0, tx_space, min_tx_space, min_rx_space; 540 bool legacy_pba_adjust = false; 541 u16 hwm; 542 543 /* Repartition Pba for greater than 9k mtu 544 * To take effect CTRL.RST is required. 545 */ 546 547 switch (hw->mac_type) { 548 case e1000_82542_rev2_0: 549 case e1000_82542_rev2_1: 550 case e1000_82543: 551 case e1000_82544: 552 case e1000_82540: 553 case e1000_82541: 554 case e1000_82541_rev_2: 555 legacy_pba_adjust = true; 556 pba = E1000_PBA_48K; 557 break; 558 case e1000_82545: 559 case e1000_82545_rev_3: 560 case e1000_82546: 561 case e1000_82546_rev_3: 562 pba = E1000_PBA_48K; 563 break; 564 case e1000_82547: 565 case e1000_82547_rev_2: 566 legacy_pba_adjust = true; 567 pba = E1000_PBA_30K; 568 break; 569 case e1000_undefined: 570 case e1000_num_macs: 571 break; 572 } 573 574 if (legacy_pba_adjust) { 575 if (hw->max_frame_size > E1000_RXBUFFER_8192) 576 pba -= 8; /* allocate more FIFO for Tx */ 577 578 if (hw->mac_type == e1000_82547) { 579 adapter->tx_fifo_head = 0; 580 adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT; 581 adapter->tx_fifo_size = 582 (E1000_PBA_40K - pba) << E1000_PBA_BYTES_SHIFT; 583 atomic_set(&adapter->tx_fifo_stall, 0); 584 } 585 } else if (hw->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) { 586 /* adjust PBA for jumbo frames */ 587 ew32(PBA, pba); 588 589 /* To maintain wire speed transmits, the Tx FIFO should be 590 * large enough to accommodate two full transmit packets, 591 * rounded up to the next 1KB and expressed in KB. Likewise, 592 * the Rx FIFO should be large enough to accommodate at least 593 * one full receive packet and is similarly rounded up and 594 * expressed in KB. */ 595 pba = er32(PBA); 596 /* upper 16 bits has Tx packet buffer allocation size in KB */ 597 tx_space = pba >> 16; 598 /* lower 16 bits has Rx packet buffer allocation size in KB */ 599 pba &= 0xffff; 600 /* 601 * the tx fifo also stores 16 bytes of information about the tx 602 * but don't include ethernet FCS because hardware appends it 603 */ 604 min_tx_space = (hw->max_frame_size + 605 sizeof(struct e1000_tx_desc) - 606 ETH_FCS_LEN) * 2; 607 min_tx_space = ALIGN(min_tx_space, 1024); 608 min_tx_space >>= 10; 609 /* software strips receive CRC, so leave room for it */ 610 min_rx_space = hw->max_frame_size; 611 min_rx_space = ALIGN(min_rx_space, 1024); 612 min_rx_space >>= 10; 613 614 /* If current Tx allocation is less than the min Tx FIFO size, 615 * and the min Tx FIFO size is less than the current Rx FIFO 616 * allocation, take space away from current Rx allocation */ 617 if (tx_space < min_tx_space && 618 ((min_tx_space - tx_space) < pba)) { 619 pba = pba - (min_tx_space - tx_space); 620 621 /* PCI/PCIx hardware has PBA alignment constraints */ 622 switch (hw->mac_type) { 623 case e1000_82545 ... e1000_82546_rev_3: 624 pba &= ~(E1000_PBA_8K - 1); 625 break; 626 default: 627 break; 628 } 629 630 /* if short on rx space, rx wins and must trump tx 631 * adjustment or use Early Receive if available */ 632 if (pba < min_rx_space) 633 pba = min_rx_space; 634 } 635 } 636 637 ew32(PBA, pba); 638 639 /* 640 * flow control settings: 641 * The high water mark must be low enough to fit one full frame 642 * (or the size used for early receive) above it in the Rx FIFO. 643 * Set it to the lower of: 644 * - 90% of the Rx FIFO size, and 645 * - the full Rx FIFO size minus the early receive size (for parts 646 * with ERT support assuming ERT set to E1000_ERT_2048), or 647 * - the full Rx FIFO size minus one full frame 648 */ 649 hwm = min(((pba << 10) * 9 / 10), 650 ((pba << 10) - hw->max_frame_size)); 651 652 hw->fc_high_water = hwm & 0xFFF8; /* 8-byte granularity */ 653 hw->fc_low_water = hw->fc_high_water - 8; 654 hw->fc_pause_time = E1000_FC_PAUSE_TIME; 655 hw->fc_send_xon = 1; 656 hw->fc = hw->original_fc; 657 658 /* Allow time for pending master requests to run */ 659 e1000_reset_hw(hw); 660 if (hw->mac_type >= e1000_82544) 661 ew32(WUC, 0); 662 663 if (e1000_init_hw(hw)) 664 e_dev_err("Hardware Error\n"); 665 e1000_update_mng_vlan(adapter); 666 667 /* if (adapter->hwflags & HWFLAGS_PHY_PWR_BIT) { */ 668 if (hw->mac_type >= e1000_82544 && 669 hw->autoneg == 1 && 670 hw->autoneg_advertised == ADVERTISE_1000_FULL) { 671 u32 ctrl = er32(CTRL); 672 /* clear phy power management bit if we are in gig only mode, 673 * which if enabled will attempt negotiation to 100Mb, which 674 * can cause a loss of link at power off or driver unload */ 675 ctrl &= ~E1000_CTRL_SWDPIN3; 676 ew32(CTRL, ctrl); 677 } 678 679 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */ 680 ew32(VET, ETHERNET_IEEE_VLAN_TYPE); 681 682 e1000_reset_adaptive(hw); 683 e1000_phy_get_info(hw, &adapter->phy_info); 684 685 e1000_release_manageability(adapter); 686} 687 688/** 689 * Dump the eeprom for users having checksum issues 690 **/ 691static void e1000_dump_eeprom(struct e1000_adapter *adapter) 692{ 693 struct net_device *netdev = adapter->netdev; 694 struct ethtool_eeprom eeprom; 695 const struct ethtool_ops *ops = netdev->ethtool_ops; 696 u8 *data; 697 int i; 698 u16 csum_old, csum_new = 0; 699 700 eeprom.len = ops->get_eeprom_len(netdev); 701 eeprom.offset = 0; 702 703 data = kmalloc(eeprom.len, GFP_KERNEL); 704 if (!data) { 705 pr_err("Unable to allocate memory to dump EEPROM data\n"); 706 return; 707 } 708 709 ops->get_eeprom(netdev, &eeprom, data); 710 711 csum_old = (data[EEPROM_CHECKSUM_REG * 2]) + 712 (data[EEPROM_CHECKSUM_REG * 2 + 1] << 8); 713 for (i = 0; i < EEPROM_CHECKSUM_REG * 2; i += 2) 714 csum_new += data[i] + (data[i + 1] << 8); 715 csum_new = EEPROM_SUM - csum_new; 716 717 pr_err("/*********************/\n"); 718 pr_err("Current EEPROM Checksum : 0x%04x\n", csum_old); 719 pr_err("Calculated : 0x%04x\n", csum_new); 720 721 pr_err("Offset Values\n"); 722 pr_err("======== ======\n"); 723 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, data, 128, 0); 724 725 pr_err("Include this output when contacting your support provider.\n"); 726 pr_err("This is not a software error! Something bad happened to\n"); 727 pr_err("your hardware or EEPROM image. Ignoring this problem could\n"); 728 pr_err("result in further problems, possibly loss of data,\n"); 729 pr_err("corruption or system hangs!\n"); 730 pr_err("The MAC Address will be reset to 00:00:00:00:00:00,\n"); 731 pr_err("which is invalid and requires you to set the proper MAC\n"); 732 pr_err("address manually before continuing to enable this network\n"); 733 pr_err("device. Please inspect the EEPROM dump and report the\n"); 734 pr_err("issue to your hardware vendor or Intel Customer Support.\n"); 735 pr_err("/*********************/\n"); 736 737 kfree(data); 738} 739 740/** 741 * e1000_is_need_ioport - determine if an adapter needs ioport resources or not 742 * @pdev: PCI device information struct 743 * 744 * Return true if an adapter needs ioport resources 745 **/ 746static int e1000_is_need_ioport(struct pci_dev *pdev) 747{ 748 switch (pdev->device) { 749 case E1000_DEV_ID_82540EM: 750 case E1000_DEV_ID_82540EM_LOM: 751 case E1000_DEV_ID_82540EP: 752 case E1000_DEV_ID_82540EP_LOM: 753 case E1000_DEV_ID_82540EP_LP: 754 case E1000_DEV_ID_82541EI: 755 case E1000_DEV_ID_82541EI_MOBILE: 756 case E1000_DEV_ID_82541ER: 757 case E1000_DEV_ID_82541ER_LOM: 758 case E1000_DEV_ID_82541GI: 759 case E1000_DEV_ID_82541GI_LF: 760 case E1000_DEV_ID_82541GI_MOBILE: 761 case E1000_DEV_ID_82544EI_COPPER: 762 case E1000_DEV_ID_82544EI_FIBER: 763 case E1000_DEV_ID_82544GC_COPPER: 764 case E1000_DEV_ID_82544GC_LOM: 765 case E1000_DEV_ID_82545EM_COPPER: 766 case E1000_DEV_ID_82545EM_FIBER: 767 case E1000_DEV_ID_82546EB_COPPER: 768 case E1000_DEV_ID_82546EB_FIBER: 769 case E1000_DEV_ID_82546EB_QUAD_COPPER: 770 return true; 771 default: 772 return false; 773 } 774} 775 776static const struct net_device_ops e1000_netdev_ops = { 777 .ndo_open = e1000_open, 778 .ndo_stop = e1000_close, 779 .ndo_start_xmit = e1000_xmit_frame, 780 .ndo_get_stats = e1000_get_stats, 781 .ndo_set_rx_mode = e1000_set_rx_mode, 782 .ndo_set_mac_address = e1000_set_mac, 783 .ndo_tx_timeout = e1000_tx_timeout, 784 .ndo_change_mtu = e1000_change_mtu, 785 .ndo_do_ioctl = e1000_ioctl, 786 .ndo_validate_addr = eth_validate_addr, 787 788 .ndo_vlan_rx_register = e1000_vlan_rx_register, 789 .ndo_vlan_rx_add_vid = e1000_vlan_rx_add_vid, 790 .ndo_vlan_rx_kill_vid = e1000_vlan_rx_kill_vid, 791#ifdef CONFIG_NET_POLL_CONTROLLER 792 .ndo_poll_controller = e1000_netpoll, 793#endif 794}; 795 796/** 797 * e1000_probe - Device Initialization Routine 798 * @pdev: PCI device information struct 799 * @ent: entry in e1000_pci_tbl 800 * 801 * Returns 0 on success, negative on failure 802 * 803 * e1000_probe initializes an adapter identified by a pci_dev structure. 804 * The OS initialization, configuring of the adapter private structure, 805 * and a hardware reset occur. 806 **/ 807static int __devinit e1000_probe(struct pci_dev *pdev, 808 const struct pci_device_id *ent) 809{ 810 struct net_device *netdev; 811 struct e1000_adapter *adapter; 812 struct e1000_hw *hw; 813 814 static int cards_found = 0; 815 static int global_quad_port_a = 0; /* global ksp3 port a indication */ 816 int i, err, pci_using_dac; 817 u16 eeprom_data = 0; 818 u16 eeprom_apme_mask = E1000_EEPROM_APME; 819 int bars, need_ioport; 820 821 /* do not allocate ioport bars when not needed */ 822 need_ioport = e1000_is_need_ioport(pdev); 823 if (need_ioport) { 824 bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO); 825 err = pci_enable_device(pdev); 826 } else { 827 bars = pci_select_bars(pdev, IORESOURCE_MEM); 828 err = pci_enable_device_mem(pdev); 829 } 830 if (err) 831 return err; 832 833 if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) && 834 !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) { 835 pci_using_dac = 1; 836 } else { 837 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); 838 if (err) { 839 err = dma_set_coherent_mask(&pdev->dev, 840 DMA_BIT_MASK(32)); 841 if (err) { 842 pr_err("No usable DMA config, aborting\n"); 843 goto err_dma; 844 } 845 } 846 pci_using_dac = 0; 847 } 848 849 err = pci_request_selected_regions(pdev, bars, e1000_driver_name); 850 if (err) 851 goto err_pci_reg; 852 853 pci_set_master(pdev); 854 err = pci_save_state(pdev); 855 if (err) 856 goto err_alloc_etherdev; 857 858 err = -ENOMEM; 859 netdev = alloc_etherdev(sizeof(struct e1000_adapter)); 860 if (!netdev) 861 goto err_alloc_etherdev; 862 863 SET_NETDEV_DEV(netdev, &pdev->dev); 864 865 pci_set_drvdata(pdev, netdev); 866 adapter = netdev_priv(netdev); 867 adapter->netdev = netdev; 868 adapter->pdev = pdev; 869 adapter->msg_enable = (1 << debug) - 1; 870 adapter->bars = bars; 871 adapter->need_ioport = need_ioport; 872 873 hw = &adapter->hw; 874 hw->back = adapter; 875 876 err = -EIO; 877 hw->hw_addr = pci_ioremap_bar(pdev, BAR_0); 878 if (!hw->hw_addr) 879 goto err_ioremap; 880 881 if (adapter->need_ioport) { 882 for (i = BAR_1; i <= BAR_5; i++) { 883 if (pci_resource_len(pdev, i) == 0) 884 continue; 885 if (pci_resource_flags(pdev, i) & IORESOURCE_IO) { 886 hw->io_base = pci_resource_start(pdev, i); 887 break; 888 } 889 } 890 } 891 892 netdev->netdev_ops = &e1000_netdev_ops; 893 e1000_set_ethtool_ops(netdev); 894 netdev->watchdog_timeo = 5 * HZ; 895 netif_napi_add(netdev, &adapter->napi, e1000_clean, 64); 896 897 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); 898 899 adapter->bd_number = cards_found; 900 901 /* setup the private structure */ 902 903 err = e1000_sw_init(adapter); 904 if (err) 905 goto err_sw_init; 906 907 err = -EIO; 908 909 if (hw->mac_type >= e1000_82543) { 910 netdev->features = NETIF_F_SG | 911 NETIF_F_HW_CSUM | 912 NETIF_F_HW_VLAN_TX | 913 NETIF_F_HW_VLAN_RX | 914 NETIF_F_HW_VLAN_FILTER; 915 } 916 917 if ((hw->mac_type >= e1000_82544) && 918 (hw->mac_type != e1000_82547)) 919 netdev->features |= NETIF_F_TSO; 920 921 if (pci_using_dac) 922 netdev->features |= NETIF_F_HIGHDMA; 923 924 netdev->vlan_features |= NETIF_F_TSO; 925 netdev->vlan_features |= NETIF_F_HW_CSUM; 926 netdev->vlan_features |= NETIF_F_SG; 927 928 adapter->en_mng_pt = e1000_enable_mng_pass_thru(hw); 929 930 /* initialize eeprom parameters */ 931 if (e1000_init_eeprom_params(hw)) { 932 e_err(probe, "EEPROM initialization failed\n"); 933 goto err_eeprom; 934 } 935 936 /* before reading the EEPROM, reset the controller to 937 * put the device in a known good starting state */ 938 939 e1000_reset_hw(hw); 940 941 /* make sure the EEPROM is good */ 942 if (e1000_validate_eeprom_checksum(hw) < 0) { 943 e_err(probe, "The EEPROM Checksum Is Not Valid\n"); 944 e1000_dump_eeprom(adapter); 945 /* 946 * set MAC address to all zeroes to invalidate and temporary 947 * disable this device for the user. This blocks regular 948 * traffic while still permitting ethtool ioctls from reaching 949 * the hardware as well as allowing the user to run the 950 * interface after manually setting a hw addr using 951 * `ip set address` 952 */ 953 memset(hw->mac_addr, 0, netdev->addr_len); 954 } else { 955 /* copy the MAC address out of the EEPROM */ 956 if (e1000_read_mac_addr(hw)) 957 e_err(probe, "EEPROM Read Error\n"); 958 } 959 /* don't block initalization here due to bad MAC address */ 960 memcpy(netdev->dev_addr, hw->mac_addr, netdev->addr_len); 961 memcpy(netdev->perm_addr, hw->mac_addr, netdev->addr_len); 962 963 if (!is_valid_ether_addr(netdev->perm_addr)) 964 e_err(probe, "Invalid MAC Address\n"); 965 966 e1000_get_bus_info(hw); 967 968 init_timer(&adapter->tx_fifo_stall_timer); 969 adapter->tx_fifo_stall_timer.function = &e1000_82547_tx_fifo_stall; 970 adapter->tx_fifo_stall_timer.data = (unsigned long)adapter; 971 972 init_timer(&adapter->watchdog_timer); 973 adapter->watchdog_timer.function = &e1000_watchdog; 974 adapter->watchdog_timer.data = (unsigned long) adapter; 975 976 init_timer(&adapter->phy_info_timer); 977 adapter->phy_info_timer.function = &e1000_update_phy_info; 978 adapter->phy_info_timer.data = (unsigned long)adapter; 979 980 INIT_WORK(&adapter->reset_task, e1000_reset_task); 981 982 e1000_check_options(adapter); 983 984 /* Initial Wake on LAN setting 985 * If APM wake is enabled in the EEPROM, 986 * enable the ACPI Magic Packet filter 987 */ 988 989 switch (hw->mac_type) { 990 case e1000_82542_rev2_0: 991 case e1000_82542_rev2_1: 992 case e1000_82543: 993 break; 994 case e1000_82544: 995 e1000_read_eeprom(hw, 996 EEPROM_INIT_CONTROL2_REG, 1, &eeprom_data); 997 eeprom_apme_mask = E1000_EEPROM_82544_APM; 998 break; 999 case e1000_82546: 1000 case e1000_82546_rev_3: 1001 if (er32(STATUS) & E1000_STATUS_FUNC_1){ 1002 e1000_read_eeprom(hw, 1003 EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data); 1004 break; 1005 } 1006 /* Fall Through */ 1007 default: 1008 e1000_read_eeprom(hw, 1009 EEPROM_INIT_CONTROL3_PORT_A, 1, &eeprom_data); 1010 break; 1011 } 1012 if (eeprom_data & eeprom_apme_mask) 1013 adapter->eeprom_wol |= E1000_WUFC_MAG; 1014 1015 /* now that we have the eeprom settings, apply the special cases 1016 * where the eeprom may be wrong or the board simply won't support 1017 * wake on lan on a particular port */ 1018 switch (pdev->device) { 1019 case E1000_DEV_ID_82546GB_PCIE: 1020 adapter->eeprom_wol = 0; 1021 break; 1022 case E1000_DEV_ID_82546EB_FIBER: 1023 case E1000_DEV_ID_82546GB_FIBER: 1024 /* Wake events only supported on port A for dual fiber 1025 * regardless of eeprom setting */ 1026 if (er32(STATUS) & E1000_STATUS_FUNC_1) 1027 adapter->eeprom_wol = 0; 1028 break; 1029 case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3: 1030 /* if quad port adapter, disable WoL on all but port A */ 1031 if (global_quad_port_a != 0) 1032 adapter->eeprom_wol = 0; 1033 else 1034 adapter->quad_port_a = 1; 1035 /* Reset for multiple quad port adapters */ 1036 if (++global_quad_port_a == 4) 1037 global_quad_port_a = 0; 1038 break; 1039 } 1040 1041 /* initialize the wol settings based on the eeprom settings */ 1042 adapter->wol = adapter->eeprom_wol; 1043 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); 1044 1045 /* reset the hardware with the new settings */ 1046 e1000_reset(adapter); 1047 1048 strcpy(netdev->name, "eth%d"); 1049 err = register_netdev(netdev); 1050 if (err) 1051 goto err_register; 1052 1053 /* print bus type/speed/width info */ 1054 e_info(probe, "(PCI%s:%dMHz:%d-bit) %pM\n", 1055 ((hw->bus_type == e1000_bus_type_pcix) ? "-X" : ""), 1056 ((hw->bus_speed == e1000_bus_speed_133) ? 133 : 1057 (hw->bus_speed == e1000_bus_speed_120) ? 120 : 1058 (hw->bus_speed == e1000_bus_speed_100) ? 100 : 1059 (hw->bus_speed == e1000_bus_speed_66) ? 66 : 33), 1060 ((hw->bus_width == e1000_bus_width_64) ? 64 : 32), 1061 netdev->dev_addr); 1062 1063 /* carrier off reporting is important to ethtool even BEFORE open */ 1064 netif_carrier_off(netdev); 1065 1066 e_info(probe, "Intel(R) PRO/1000 Network Connection\n"); 1067 1068 cards_found++; 1069 return 0; 1070 1071err_register: 1072err_eeprom: 1073 e1000_phy_hw_reset(hw); 1074 1075 if (hw->flash_address) 1076 iounmap(hw->flash_address); 1077 kfree(adapter->tx_ring); 1078 kfree(adapter->rx_ring); 1079err_sw_init: 1080 iounmap(hw->hw_addr); 1081err_ioremap: 1082 free_netdev(netdev); 1083err_alloc_etherdev: 1084 pci_release_selected_regions(pdev, bars); 1085err_pci_reg: 1086err_dma: 1087 pci_disable_device(pdev); 1088 return err; 1089} 1090 1091/** 1092 * e1000_remove - Device Removal Routine 1093 * @pdev: PCI device information struct 1094 * 1095 * e1000_remove is called by the PCI subsystem to alert the driver 1096 * that it should release a PCI device. The could be caused by a 1097 * Hot-Plug event, or because the driver is going to be removed from 1098 * memory. 1099 **/ 1100 1101static void __devexit e1000_remove(struct pci_dev *pdev) 1102{ 1103 struct net_device *netdev = pci_get_drvdata(pdev); 1104 struct e1000_adapter *adapter = netdev_priv(netdev); 1105 struct e1000_hw *hw = &adapter->hw; 1106 1107 set_bit(__E1000_DOWN, &adapter->flags); 1108 del_timer_sync(&adapter->tx_fifo_stall_timer); 1109 del_timer_sync(&adapter->watchdog_timer); 1110 del_timer_sync(&adapter->phy_info_timer); 1111 1112 cancel_work_sync(&adapter->reset_task); 1113 1114 e1000_release_manageability(adapter); 1115 1116 unregister_netdev(netdev); 1117 1118 e1000_phy_hw_reset(hw); 1119 1120 kfree(adapter->tx_ring); 1121 kfree(adapter->rx_ring); 1122 1123 iounmap(hw->hw_addr); 1124 if (hw->flash_address) 1125 iounmap(hw->flash_address); 1126 pci_release_selected_regions(pdev, adapter->bars); 1127 1128 free_netdev(netdev); 1129 1130 pci_disable_device(pdev); 1131} 1132 1133/** 1134 * e1000_sw_init - Initialize general software structures (struct e1000_adapter) 1135 * @adapter: board private structure to initialize 1136 * 1137 * e1000_sw_init initializes the Adapter private data structure. 1138 * Fields are initialized based on PCI device information and 1139 * OS network device settings (MTU size). 1140 **/ 1141 1142static int __devinit e1000_sw_init(struct e1000_adapter *adapter) 1143{ 1144 struct e1000_hw *hw = &adapter->hw; 1145 struct net_device *netdev = adapter->netdev; 1146 struct pci_dev *pdev = adapter->pdev; 1147 1148 /* PCI config space info */ 1149 1150 hw->vendor_id = pdev->vendor; 1151 hw->device_id = pdev->device; 1152 hw->subsystem_vendor_id = pdev->subsystem_vendor; 1153 hw->subsystem_id = pdev->subsystem_device; 1154 hw->revision_id = pdev->revision; 1155 1156 pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word); 1157 1158 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE; 1159 hw->max_frame_size = netdev->mtu + 1160 ENET_HEADER_SIZE + ETHERNET_FCS_SIZE; 1161 hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE; 1162 1163 /* identify the MAC */ 1164 1165 if (e1000_set_mac_type(hw)) { 1166 e_err(probe, "Unknown MAC Type\n"); 1167 return -EIO; 1168 } 1169 1170 switch (hw->mac_type) { 1171 default: 1172 break; 1173 case e1000_82541: 1174 case e1000_82547: 1175 case e1000_82541_rev_2: 1176 case e1000_82547_rev_2: 1177 hw->phy_init_script = 1; 1178 break; 1179 } 1180 1181 e1000_set_media_type(hw); 1182 1183 hw->wait_autoneg_complete = false; 1184 hw->tbi_compatibility_en = true; 1185 hw->adaptive_ifs = true; 1186 1187 /* Copper options */ 1188 1189 if (hw->media_type == e1000_media_type_copper) { 1190 hw->mdix = AUTO_ALL_MODES; 1191 hw->disable_polarity_correction = false; 1192 hw->master_slave = E1000_MASTER_SLAVE; 1193 } 1194 1195 adapter->num_tx_queues = 1; 1196 adapter->num_rx_queues = 1; 1197 1198 if (e1000_alloc_queues(adapter)) { 1199 e_err(probe, "Unable to allocate memory for queues\n"); 1200 return -ENOMEM; 1201 } 1202 1203 /* Explicitly disable IRQ since the NIC can be in any state. */ 1204 e1000_irq_disable(adapter); 1205 1206 spin_lock_init(&adapter->stats_lock); 1207 1208 set_bit(__E1000_DOWN, &adapter->flags); 1209 1210 return 0; 1211} 1212 1213/** 1214 * e1000_alloc_queues - Allocate memory for all rings 1215 * @adapter: board private structure to initialize 1216 * 1217 * We allocate one ring per queue at run-time since we don't know the 1218 * number of queues at compile-time. 1219 **/ 1220 1221static int __devinit e1000_alloc_queues(struct e1000_adapter *adapter) 1222{ 1223 adapter->tx_ring = kcalloc(adapter->num_tx_queues, 1224 sizeof(struct e1000_tx_ring), GFP_KERNEL); 1225 if (!adapter->tx_ring) 1226 return -ENOMEM; 1227 1228 adapter->rx_ring = kcalloc(adapter->num_rx_queues, 1229 sizeof(struct e1000_rx_ring), GFP_KERNEL); 1230 if (!adapter->rx_ring) { 1231 kfree(adapter->tx_ring); 1232 return -ENOMEM; 1233 } 1234 1235 return E1000_SUCCESS; 1236} 1237 1238/** 1239 * e1000_open - Called when a network interface is made active 1240 * @netdev: network interface device structure 1241 * 1242 * Returns 0 on success, negative value on failure 1243 * 1244 * The open entry point is called when a network interface is made 1245 * active by the system (IFF_UP). At this point all resources needed 1246 * for transmit and receive operations are allocated, the interrupt 1247 * handler is registered with the OS, the watchdog timer is started, 1248 * and the stack is notified that the interface is ready. 1249 **/ 1250 1251static int e1000_open(struct net_device *netdev) 1252{ 1253 struct e1000_adapter *adapter = netdev_priv(netdev); 1254 struct e1000_hw *hw = &adapter->hw; 1255 int err; 1256 1257 /* disallow open during test */ 1258 if (test_bit(__E1000_TESTING, &adapter->flags)) 1259 return -EBUSY; 1260 1261 netif_carrier_off(netdev); 1262 1263 /* allocate transmit descriptors */ 1264 err = e1000_setup_all_tx_resources(adapter); 1265 if (err) 1266 goto err_setup_tx; 1267 1268 /* allocate receive descriptors */ 1269 err = e1000_setup_all_rx_resources(adapter); 1270 if (err) 1271 goto err_setup_rx; 1272 1273 e1000_power_up_phy(adapter); 1274 1275 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; 1276 if ((hw->mng_cookie.status & 1277 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) { 1278 e1000_update_mng_vlan(adapter); 1279 } 1280 1281 /* before we allocate an interrupt, we must be ready to handle it. 1282 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt 1283 * as soon as we call pci_request_irq, so we have to setup our 1284 * clean_rx handler before we do so. */ 1285 e1000_configure(adapter); 1286 1287 err = e1000_request_irq(adapter); 1288 if (err) 1289 goto err_req_irq; 1290 1291 /* From here on the code is the same as e1000_up() */ 1292 clear_bit(__E1000_DOWN, &adapter->flags); 1293 1294 napi_enable(&adapter->napi); 1295 1296 e1000_irq_enable(adapter); 1297 1298 netif_start_queue(netdev); 1299 1300 /* fire a link status change interrupt to start the watchdog */ 1301 ew32(ICS, E1000_ICS_LSC); 1302 1303 return E1000_SUCCESS; 1304 1305err_req_irq: 1306 e1000_power_down_phy(adapter); 1307 e1000_free_all_rx_resources(adapter); 1308err_setup_rx: 1309 e1000_free_all_tx_resources(adapter); 1310err_setup_tx: 1311 e1000_reset(adapter); 1312 1313 return err; 1314} 1315 1316/** 1317 * e1000_close - Disables a network interface 1318 * @netdev: network interface device structure 1319 * 1320 * Returns 0, this is not allowed to fail 1321 * 1322 * The close entry point is called when an interface is de-activated 1323 * by the OS. The hardware is still under the drivers control, but 1324 * needs to be disabled. A global MAC reset is issued to stop the 1325 * hardware, and all transmit and receive resources are freed. 1326 **/ 1327 1328static int e1000_close(struct net_device *netdev) 1329{ 1330 struct e1000_adapter *adapter = netdev_priv(netdev); 1331 struct e1000_hw *hw = &adapter->hw; 1332 1333 WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags)); 1334 e1000_down(adapter); 1335 e1000_power_down_phy(adapter); 1336 e1000_free_irq(adapter); 1337 1338 e1000_free_all_tx_resources(adapter); 1339 e1000_free_all_rx_resources(adapter); 1340 1341 /* kill manageability vlan ID if supported, but not if a vlan with 1342 * the same ID is registered on the host OS (let 8021q kill it) */ 1343 if ((hw->mng_cookie.status & 1344 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) && 1345 !(adapter->vlgrp && 1346 vlan_group_get_device(adapter->vlgrp, adapter->mng_vlan_id))) { 1347 e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id); 1348 } 1349 1350 return 0; 1351} 1352 1353/** 1354 * e1000_check_64k_bound - check that memory doesn't cross 64kB boundary 1355 * @adapter: address of board private structure 1356 * @start: address of beginning of memory 1357 * @len: length of memory 1358 **/ 1359static bool e1000_check_64k_bound(struct e1000_adapter *adapter, void *start, 1360 unsigned long len) 1361{ 1362 struct e1000_hw *hw = &adapter->hw; 1363 unsigned long begin = (unsigned long)start; 1364 unsigned long end = begin + len; 1365 1366 /* First rev 82545 and 82546 need to not allow any memory 1367 * write location to cross 64k boundary due to errata 23 */ 1368 if (hw->mac_type == e1000_82545 || 1369 hw->mac_type == e1000_82546) { 1370 return ((begin ^ (end - 1)) >> 16) != 0 ? false : true; 1371 } 1372 1373 return true; 1374} 1375 1376/** 1377 * e1000_setup_tx_resources - allocate Tx resources (Descriptors) 1378 * @adapter: board private structure 1379 * @txdr: tx descriptor ring (for a specific queue) to setup 1380 * 1381 * Return 0 on success, negative on failure 1382 **/ 1383 1384static int e1000_setup_tx_resources(struct e1000_adapter *adapter, 1385 struct e1000_tx_ring *txdr) 1386{ 1387 struct pci_dev *pdev = adapter->pdev; 1388 int size; 1389 1390 size = sizeof(struct e1000_buffer) * txdr->count; 1391 txdr->buffer_info = vmalloc(size); 1392 if (!txdr->buffer_info) { 1393 e_err(probe, "Unable to allocate memory for the Tx descriptor " 1394 "ring\n"); 1395 return -ENOMEM; 1396 } 1397 memset(txdr->buffer_info, 0, size); 1398 1399 /* round up to nearest 4K */ 1400 1401 txdr->size = txdr->count * sizeof(struct e1000_tx_desc); 1402 txdr->size = ALIGN(txdr->size, 4096); 1403 1404 txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma, 1405 GFP_KERNEL); 1406 if (!txdr->desc) { 1407setup_tx_desc_die: 1408 vfree(txdr->buffer_info); 1409 e_err(probe, "Unable to allocate memory for the Tx descriptor " 1410 "ring\n"); 1411 return -ENOMEM; 1412 } 1413 1414 /* Fix for errata 23, can't cross 64kB boundary */ 1415 if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) { 1416 void *olddesc = txdr->desc; 1417 dma_addr_t olddma = txdr->dma; 1418 e_err(tx_err, "txdr align check failed: %u bytes at %p\n", 1419 txdr->size, txdr->desc); 1420 /* Try again, without freeing the previous */ 1421 txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, 1422 &txdr->dma, GFP_KERNEL); 1423 /* Failed allocation, critical failure */ 1424 if (!txdr->desc) { 1425 dma_free_coherent(&pdev->dev, txdr->size, olddesc, 1426 olddma); 1427 goto setup_tx_desc_die; 1428 } 1429 1430 if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) { 1431 /* give up */ 1432 dma_free_coherent(&pdev->dev, txdr->size, txdr->desc, 1433 txdr->dma); 1434 dma_free_coherent(&pdev->dev, txdr->size, olddesc, 1435 olddma); 1436 e_err(probe, "Unable to allocate aligned memory " 1437 "for the transmit descriptor ring\n"); 1438 vfree(txdr->buffer_info); 1439 return -ENOMEM; 1440 } else { 1441 /* Free old allocation, new allocation was successful */ 1442 dma_free_coherent(&pdev->dev, txdr->size, olddesc, 1443 olddma); 1444 } 1445 } 1446 memset(txdr->desc, 0, txdr->size); 1447 1448 txdr->next_to_use = 0; 1449 txdr->next_to_clean = 0; 1450 1451 return 0; 1452} 1453 1454/** 1455 * e1000_setup_all_tx_resources - wrapper to allocate Tx resources 1456 * (Descriptors) for all queues 1457 * @adapter: board private structure 1458 * 1459 * Return 0 on success, negative on failure 1460 **/ 1461 1462int e1000_setup_all_tx_resources(struct e1000_adapter *adapter) 1463{ 1464 int i, err = 0; 1465 1466 for (i = 0; i < adapter->num_tx_queues; i++) { 1467 err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]); 1468 if (err) { 1469 e_err(probe, "Allocation for Tx Queue %u failed\n", i); 1470 for (i-- ; i >= 0; i--) 1471 e1000_free_tx_resources(adapter, 1472 &adapter->tx_ring[i]); 1473 break; 1474 } 1475 } 1476 1477 return err; 1478} 1479 1480/** 1481 * e1000_configure_tx - Configure 8254x Transmit Unit after Reset 1482 * @adapter: board private structure 1483 * 1484 * Configure the Tx unit of the MAC after a reset. 1485 **/ 1486 1487static void e1000_configure_tx(struct e1000_adapter *adapter) 1488{ 1489 u64 tdba; 1490 struct e1000_hw *hw = &adapter->hw; 1491 u32 tdlen, tctl, tipg; 1492 u32 ipgr1, ipgr2; 1493 1494 /* Setup the HW Tx Head and Tail descriptor pointers */ 1495 1496 switch (adapter->num_tx_queues) { 1497 case 1: 1498 default: 1499 tdba = adapter->tx_ring[0].dma; 1500 tdlen = adapter->tx_ring[0].count * 1501 sizeof(struct e1000_tx_desc); 1502 ew32(TDLEN, tdlen); 1503 ew32(TDBAH, (tdba >> 32)); 1504 ew32(TDBAL, (tdba & 0x00000000ffffffffULL)); 1505 ew32(TDT, 0); 1506 ew32(TDH, 0); 1507 adapter->tx_ring[0].tdh = ((hw->mac_type >= e1000_82543) ? E1000_TDH : E1000_82542_TDH); 1508 adapter->tx_ring[0].tdt = ((hw->mac_type >= e1000_82543) ? E1000_TDT : E1000_82542_TDT); 1509 break; 1510 } 1511 1512 /* Set the default values for the Tx Inter Packet Gap timer */ 1513 if ((hw->media_type == e1000_media_type_fiber || 1514 hw->media_type == e1000_media_type_internal_serdes)) 1515 tipg = DEFAULT_82543_TIPG_IPGT_FIBER; 1516 else 1517 tipg = DEFAULT_82543_TIPG_IPGT_COPPER; 1518 1519 switch (hw->mac_type) { 1520 case e1000_82542_rev2_0: 1521 case e1000_82542_rev2_1: 1522 tipg = DEFAULT_82542_TIPG_IPGT; 1523 ipgr1 = DEFAULT_82542_TIPG_IPGR1; 1524 ipgr2 = DEFAULT_82542_TIPG_IPGR2; 1525 break; 1526 default: 1527 ipgr1 = DEFAULT_82543_TIPG_IPGR1; 1528 ipgr2 = DEFAULT_82543_TIPG_IPGR2; 1529 break; 1530 } 1531 tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT; 1532 tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT; 1533 ew32(TIPG, tipg); 1534 1535 /* Set the Tx Interrupt Delay register */ 1536 1537 ew32(TIDV, adapter->tx_int_delay); 1538 if (hw->mac_type >= e1000_82540) 1539 ew32(TADV, adapter->tx_abs_int_delay); 1540 1541 /* Program the Transmit Control Register */ 1542 1543 tctl = er32(TCTL); 1544 tctl &= ~E1000_TCTL_CT; 1545 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC | 1546 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT); 1547 1548 e1000_config_collision_dist(hw); 1549 1550 /* Setup Transmit Descriptor Settings for eop descriptor */ 1551 adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS; 1552 1553 /* only set IDE if we are delaying interrupts using the timers */ 1554 if (adapter->tx_int_delay) 1555 adapter->txd_cmd |= E1000_TXD_CMD_IDE; 1556 1557 if (hw->mac_type < e1000_82543) 1558 adapter->txd_cmd |= E1000_TXD_CMD_RPS; 1559 else 1560 adapter->txd_cmd |= E1000_TXD_CMD_RS; 1561 1562 if (hw->mac_type == e1000_82544 && 1563 hw->bus_type == e1000_bus_type_pcix) 1564 adapter->pcix_82544 = 1; 1565 1566 ew32(TCTL, tctl); 1567 1568} 1569 1570/** 1571 * e1000_setup_rx_resources - allocate Rx resources (Descriptors) 1572 * @adapter: board private structure 1573 * @rxdr: rx descriptor ring (for a specific queue) to setup 1574 * 1575 * Returns 0 on success, negative on failure 1576 **/ 1577 1578static int e1000_setup_rx_resources(struct e1000_adapter *adapter, 1579 struct e1000_rx_ring *rxdr) 1580{ 1581 struct pci_dev *pdev = adapter->pdev; 1582 int size, desc_len; 1583 1584 size = sizeof(struct e1000_buffer) * rxdr->count; 1585 rxdr->buffer_info = vmalloc(size); 1586 if (!rxdr->buffer_info) { 1587 e_err(probe, "Unable to allocate memory for the Rx descriptor " 1588 "ring\n"); 1589 return -ENOMEM; 1590 } 1591 memset(rxdr->buffer_info, 0, size); 1592 1593 desc_len = sizeof(struct e1000_rx_desc); 1594 1595 /* Round up to nearest 4K */ 1596 1597 rxdr->size = rxdr->count * desc_len; 1598 rxdr->size = ALIGN(rxdr->size, 4096); 1599 1600 rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma, 1601 GFP_KERNEL); 1602 1603 if (!rxdr->desc) { 1604 e_err(probe, "Unable to allocate memory for the Rx descriptor " 1605 "ring\n"); 1606setup_rx_desc_die: 1607 vfree(rxdr->buffer_info); 1608 return -ENOMEM; 1609 } 1610 1611 /* Fix for errata 23, can't cross 64kB boundary */ 1612 if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) { 1613 void *olddesc = rxdr->desc; 1614 dma_addr_t olddma = rxdr->dma; 1615 e_err(rx_err, "rxdr align check failed: %u bytes at %p\n", 1616 rxdr->size, rxdr->desc); 1617 /* Try again, without freeing the previous */ 1618 rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, 1619 &rxdr->dma, GFP_KERNEL); 1620 /* Failed allocation, critical failure */ 1621 if (!rxdr->desc) { 1622 dma_free_coherent(&pdev->dev, rxdr->size, olddesc, 1623 olddma); 1624 e_err(probe, "Unable to allocate memory for the Rx " 1625 "descriptor ring\n"); 1626 goto setup_rx_desc_die; 1627 } 1628 1629 if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) { 1630 /* give up */ 1631 dma_free_coherent(&pdev->dev, rxdr->size, rxdr->desc, 1632 rxdr->dma); 1633 dma_free_coherent(&pdev->dev, rxdr->size, olddesc, 1634 olddma); 1635 e_err(probe, "Unable to allocate aligned memory for " 1636 "the Rx descriptor ring\n"); 1637 goto setup_rx_desc_die; 1638 } else { 1639 /* Free old allocation, new allocation was successful */ 1640 dma_free_coherent(&pdev->dev, rxdr->size, olddesc, 1641 olddma); 1642 } 1643 } 1644 memset(rxdr->desc, 0, rxdr->size); 1645 1646 rxdr->next_to_clean = 0; 1647 rxdr->next_to_use = 0; 1648 rxdr->rx_skb_top = NULL; 1649 1650 return 0; 1651} 1652 1653/** 1654 * e1000_setup_all_rx_resources - wrapper to allocate Rx resources 1655 * (Descriptors) for all queues 1656 * @adapter: board private structure 1657 * 1658 * Return 0 on success, negative on failure 1659 **/ 1660 1661int e1000_setup_all_rx_resources(struct e1000_adapter *adapter) 1662{ 1663 int i, err = 0; 1664 1665 for (i = 0; i < adapter->num_rx_queues; i++) { 1666 err = e1000_setup_rx_resources(adapter, &adapter->rx_ring[i]); 1667 if (err) { 1668 e_err(probe, "Allocation for Rx Queue %u failed\n", i); 1669 for (i-- ; i >= 0; i--) 1670 e1000_free_rx_resources(adapter, 1671 &adapter->rx_ring[i]); 1672 break; 1673 } 1674 } 1675 1676 return err; 1677} 1678 1679/** 1680 * e1000_setup_rctl - configure the receive control registers 1681 * @adapter: Board private structure 1682 **/ 1683static void e1000_setup_rctl(struct e1000_adapter *adapter) 1684{ 1685 struct e1000_hw *hw = &adapter->hw; 1686 u32 rctl; 1687 1688 rctl = er32(RCTL); 1689 1690 rctl &= ~(3 << E1000_RCTL_MO_SHIFT); 1691 1692 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | 1693 E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF | 1694 (hw->mc_filter_type << E1000_RCTL_MO_SHIFT); 1695 1696 if (hw->tbi_compatibility_on == 1) 1697 rctl |= E1000_RCTL_SBP; 1698 else 1699 rctl &= ~E1000_RCTL_SBP; 1700 1701 if (adapter->netdev->mtu <= ETH_DATA_LEN) 1702 rctl &= ~E1000_RCTL_LPE; 1703 else 1704 rctl |= E1000_RCTL_LPE; 1705 1706 /* Setup buffer sizes */ 1707 rctl &= ~E1000_RCTL_SZ_4096; 1708 rctl |= E1000_RCTL_BSEX; 1709 switch (adapter->rx_buffer_len) { 1710 case E1000_RXBUFFER_2048: 1711 default: 1712 rctl |= E1000_RCTL_SZ_2048; 1713 rctl &= ~E1000_RCTL_BSEX; 1714 break; 1715 case E1000_RXBUFFER_4096: 1716 rctl |= E1000_RCTL_SZ_4096; 1717 break; 1718 case E1000_RXBUFFER_8192: 1719 rctl |= E1000_RCTL_SZ_8192; 1720 break; 1721 case E1000_RXBUFFER_16384: 1722 rctl |= E1000_RCTL_SZ_16384; 1723 break; 1724 } 1725 1726 ew32(RCTL, rctl); 1727} 1728 1729/** 1730 * e1000_configure_rx - Configure 8254x Receive Unit after Reset 1731 * @adapter: board private structure 1732 * 1733 * Configure the Rx unit of the MAC after a reset. 1734 **/ 1735 1736static void e1000_configure_rx(struct e1000_adapter *adapter) 1737{ 1738 u64 rdba; 1739 struct e1000_hw *hw = &adapter->hw; 1740 u32 rdlen, rctl, rxcsum; 1741 1742 if (adapter->netdev->mtu > ETH_DATA_LEN) { 1743 rdlen = adapter->rx_ring[0].count * 1744 sizeof(struct e1000_rx_desc); 1745 adapter->clean_rx = e1000_clean_jumbo_rx_irq; 1746 adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers; 1747 } else { 1748 rdlen = adapter->rx_ring[0].count * 1749 sizeof(struct e1000_rx_desc); 1750 adapter->clean_rx = e1000_clean_rx_irq; 1751 adapter->alloc_rx_buf = e1000_alloc_rx_buffers; 1752 } 1753 1754 /* disable receives while setting up the descriptors */ 1755 rctl = er32(RCTL); 1756 ew32(RCTL, rctl & ~E1000_RCTL_EN); 1757 1758 /* set the Receive Delay Timer Register */ 1759 ew32(RDTR, adapter->rx_int_delay); 1760 1761 if (hw->mac_type >= e1000_82540) { 1762 ew32(RADV, adapter->rx_abs_int_delay); 1763 if (adapter->itr_setting != 0) 1764 ew32(ITR, 1000000000 / (adapter->itr * 256)); 1765 } 1766 1767 /* Setup the HW Rx Head and Tail Descriptor Pointers and 1768 * the Base and Length of the Rx Descriptor Ring */ 1769 switch (adapter->num_rx_queues) { 1770 case 1: 1771 default: 1772 rdba = adapter->rx_ring[0].dma; 1773 ew32(RDLEN, rdlen); 1774 ew32(RDBAH, (rdba >> 32)); 1775 ew32(RDBAL, (rdba & 0x00000000ffffffffULL)); 1776 ew32(RDT, 0); 1777 ew32(RDH, 0); 1778 adapter->rx_ring[0].rdh = ((hw->mac_type >= e1000_82543) ? E1000_RDH : E1000_82542_RDH); 1779 adapter->rx_ring[0].rdt = ((hw->mac_type >= e1000_82543) ? E1000_RDT : E1000_82542_RDT); 1780 break; 1781 } 1782 1783 /* Enable 82543 Receive Checksum Offload for TCP and UDP */ 1784 if (hw->mac_type >= e1000_82543) { 1785 rxcsum = er32(RXCSUM); 1786 if (adapter->rx_csum) 1787 rxcsum |= E1000_RXCSUM_TUOFL; 1788 else 1789 /* don't need to clear IPPCSE as it defaults to 0 */ 1790 rxcsum &= ~E1000_RXCSUM_TUOFL; 1791 ew32(RXCSUM, rxcsum); 1792 } 1793 1794 /* Enable Receives */ 1795 ew32(RCTL, rctl); 1796} 1797 1798/** 1799 * e1000_free_tx_resources - Free Tx Resources per Queue 1800 * @adapter: board private structure 1801 * @tx_ring: Tx descriptor ring for a specific queue 1802 * 1803 * Free all transmit software resources 1804 **/ 1805 1806static void e1000_free_tx_resources(struct e1000_adapter *adapter, 1807 struct e1000_tx_ring *tx_ring) 1808{ 1809 struct pci_dev *pdev = adapter->pdev; 1810 1811 e1000_clean_tx_ring(adapter, tx_ring); 1812 1813 vfree(tx_ring->buffer_info); 1814 tx_ring->buffer_info = NULL; 1815 1816 dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc, 1817 tx_ring->dma); 1818 1819 tx_ring->desc = NULL; 1820} 1821 1822/** 1823 * e1000_free_all_tx_resources - Free Tx Resources for All Queues 1824 * @adapter: board private structure 1825 * 1826 * Free all transmit software resources 1827 **/ 1828 1829void e1000_free_all_tx_resources(struct e1000_adapter *adapter) 1830{ 1831 int i; 1832 1833 for (i = 0; i < adapter->num_tx_queues; i++) 1834 e1000_free_tx_resources(adapter, &adapter->tx_ring[i]); 1835} 1836 1837static void e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter, 1838 struct e1000_buffer *buffer_info) 1839{ 1840 if (buffer_info->dma) { 1841 if (buffer_info->mapped_as_page) 1842 dma_unmap_page(&adapter->pdev->dev, buffer_info->dma, 1843 buffer_info->length, DMA_TO_DEVICE); 1844 else 1845 dma_unmap_single(&adapter->pdev->dev, buffer_info->dma, 1846 buffer_info->length, 1847 DMA_TO_DEVICE); 1848 buffer_info->dma = 0; 1849 } 1850 if (buffer_info->skb) { 1851 dev_kfree_skb_any(buffer_info->skb); 1852 buffer_info->skb = NULL; 1853 } 1854 buffer_info->time_stamp = 0; 1855 /* buffer_info must be completely set up in the transmit path */ 1856} 1857 1858/** 1859 * e1000_clean_tx_ring - Free Tx Buffers 1860 * @adapter: board private structure 1861 * @tx_ring: ring to be cleaned 1862 **/ 1863 1864static void e1000_clean_tx_ring(struct e1000_adapter *adapter, 1865 struct e1000_tx_ring *tx_ring) 1866{ 1867 struct e1000_hw *hw = &adapter->hw; 1868 struct e1000_buffer *buffer_info; 1869 unsigned long size; 1870 unsigned int i; 1871 1872 /* Free all the Tx ring sk_buffs */ 1873 1874 for (i = 0; i < tx_ring->count; i++) { 1875 buffer_info = &tx_ring->buffer_info[i]; 1876 e1000_unmap_and_free_tx_resource(adapter, buffer_info); 1877 } 1878 1879 size = sizeof(struct e1000_buffer) * tx_ring->count; 1880 memset(tx_ring->buffer_info, 0, size); 1881 1882 /* Zero out the descriptor ring */ 1883 1884 memset(tx_ring->desc, 0, tx_ring->size); 1885 1886 tx_ring->next_to_use = 0; 1887 tx_ring->next_to_clean = 0; 1888 tx_ring->last_tx_tso = 0; 1889 1890 writel(0, hw->hw_addr + tx_ring->tdh); 1891 writel(0, hw->hw_addr + tx_ring->tdt); 1892} 1893 1894/** 1895 * e1000_clean_all_tx_rings - Free Tx Buffers for all queues 1896 * @adapter: board private structure 1897 **/ 1898 1899static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter) 1900{ 1901 int i; 1902 1903 for (i = 0; i < adapter->num_tx_queues; i++) 1904 e1000_clean_tx_ring(adapter, &adapter->tx_ring[i]); 1905} 1906 1907/** 1908 * e1000_free_rx_resources - Free Rx Resources 1909 * @adapter: board private structure 1910 * @rx_ring: ring to clean the resources from 1911 * 1912 * Free all receive software resources 1913 **/ 1914 1915static void e1000_free_rx_resources(struct e1000_adapter *adapter, 1916 struct e1000_rx_ring *rx_ring) 1917{ 1918 struct pci_dev *pdev = adapter->pdev; 1919 1920 e1000_clean_rx_ring(adapter, rx_ring); 1921 1922 vfree(rx_ring->buffer_info); 1923 rx_ring->buffer_info = NULL; 1924 1925 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc, 1926 rx_ring->dma); 1927 1928 rx_ring->desc = NULL; 1929} 1930 1931/** 1932 * e1000_free_all_rx_resources - Free Rx Resources for All Queues 1933 * @adapter: board private structure 1934 * 1935 * Free all receive software resources 1936 **/ 1937 1938void e1000_free_all_rx_resources(struct e1000_adapter *adapter) 1939{ 1940 int i; 1941 1942 for (i = 0; i < adapter->num_rx_queues; i++) 1943 e1000_free_rx_resources(adapter, &adapter->rx_ring[i]); 1944} 1945 1946/** 1947 * e1000_clean_rx_ring - Free Rx Buffers per Queue 1948 * @adapter: board private structure 1949 * @rx_ring: ring to free buffers from 1950 **/ 1951 1952static void e1000_clean_rx_ring(struct e1000_adapter *adapter, 1953 struct e1000_rx_ring *rx_ring) 1954{ 1955 struct e1000_hw *hw = &adapter->hw; 1956 struct e1000_buffer *buffer_info; 1957 struct pci_dev *pdev = adapter->pdev; 1958 unsigned long size; 1959 unsigned int i; 1960 1961 /* Free all the Rx ring sk_buffs */ 1962 for (i = 0; i < rx_ring->count; i++) { 1963 buffer_info = &rx_ring->buffer_info[i]; 1964 if (buffer_info->dma && 1965 adapter->clean_rx == e1000_clean_rx_irq) { 1966 dma_unmap_single(&pdev->dev, buffer_info->dma, 1967 buffer_info->length, 1968 DMA_FROM_DEVICE); 1969 } else if (buffer_info->dma && 1970 adapter->clean_rx == e1000_clean_jumbo_rx_irq) { 1971 dma_unmap_page(&pdev->dev, buffer_info->dma, 1972 buffer_info->length, 1973 DMA_FROM_DEVICE); 1974 } 1975 1976 buffer_info->dma = 0; 1977 if (buffer_info->page) { 1978 put_page(buffer_info->page); 1979 buffer_info->page = NULL; 1980 } 1981 if (buffer_info->skb) { 1982 dev_kfree_skb(buffer_info->skb); 1983 buffer_info->skb = NULL; 1984 } 1985 } 1986 1987 /* there also may be some cached data from a chained receive */ 1988 if (rx_ring->rx_skb_top) { 1989 dev_kfree_skb(rx_ring->rx_skb_top); 1990 rx_ring->rx_skb_top = NULL; 1991 } 1992 1993 size = sizeof(struct e1000_buffer) * rx_ring->count; 1994 memset(rx_ring->buffer_info, 0, size); 1995 1996 /* Zero out the descriptor ring */ 1997 memset(rx_ring->desc, 0, rx_ring->size); 1998 1999 rx_ring->next_to_clean = 0; 2000 rx_ring->next_to_use = 0; 2001 2002 writel(0, hw->hw_addr + rx_ring->rdh); 2003 writel(0, hw->hw_addr + rx_ring->rdt); 2004} 2005 2006/** 2007 * e1000_clean_all_rx_rings - Free Rx Buffers for all queues 2008 * @adapter: board private structure 2009 **/ 2010 2011static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter) 2012{ 2013 int i; 2014 2015 for (i = 0; i < adapter->num_rx_queues; i++) 2016 e1000_clean_rx_ring(adapter, &adapter->rx_ring[i]); 2017} 2018 2019/* The 82542 2.0 (revision 2) needs to have the receive unit in reset 2020 * and memory write and invalidate disabled for certain operations 2021 */ 2022static void e1000_enter_82542_rst(struct e1000_adapter *adapter) 2023{ 2024 struct e1000_hw *hw = &adapter->hw; 2025 struct net_device *netdev = adapter->netdev; 2026 u32 rctl; 2027 2028 e1000_pci_clear_mwi(hw); 2029 2030 rctl = er32(RCTL); 2031 rctl |= E1000_RCTL_RST; 2032 ew32(RCTL, rctl); 2033 E1000_WRITE_FLUSH(); 2034 mdelay(5); 2035 2036 if (netif_running(netdev)) 2037 e1000_clean_all_rx_rings(adapter); 2038} 2039 2040static void e1000_leave_82542_rst(struct e1000_adapter *adapter) 2041{ 2042 struct e1000_hw *hw = &adapter->hw; 2043 struct net_device *netdev = adapter->netdev; 2044 u32 rctl; 2045 2046 rctl = er32(RCTL); 2047 rctl &= ~E1000_RCTL_RST; 2048 ew32(RCTL, rctl); 2049 E1000_WRITE_FLUSH(); 2050 mdelay(5); 2051 2052 if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE) 2053 e1000_pci_set_mwi(hw); 2054 2055 if (netif_running(netdev)) { 2056 /* No need to loop, because 82542 supports only 1 queue */ 2057 struct e1000_rx_ring *ring = &adapter->rx_ring[0]; 2058 e1000_configure_rx(adapter); 2059 adapter->alloc_rx_buf(adapter, ring, E1000_DESC_UNUSED(ring)); 2060 } 2061} 2062 2063/** 2064 * e1000_set_mac - Change the Ethernet Address of the NIC 2065 * @netdev: network interface device structure 2066 * @p: pointer to an address structure 2067 * 2068 * Returns 0 on success, negative on failure 2069 **/ 2070 2071static int e1000_set_mac(struct net_device *netdev, void *p) 2072{ 2073 struct e1000_adapter *adapter = netdev_priv(netdev); 2074 struct e1000_hw *hw = &adapter->hw; 2075 struct sockaddr *addr = p; 2076 2077 if (!is_valid_ether_addr(addr->sa_data)) 2078 return -EADDRNOTAVAIL; 2079 2080 /* 82542 2.0 needs to be in reset to write receive address registers */ 2081 2082 if (hw->mac_type == e1000_82542_rev2_0) 2083 e1000_enter_82542_rst(adapter); 2084 2085 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 2086 memcpy(hw->mac_addr, addr->sa_data, netdev->addr_len); 2087 2088 e1000_rar_set(hw, hw->mac_addr, 0); 2089 2090 if (hw->mac_type == e1000_82542_rev2_0) 2091 e1000_leave_82542_rst(adapter); 2092 2093 return 0; 2094} 2095 2096/** 2097 * e1000_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set 2098 * @netdev: network interface device structure 2099 * 2100 * The set_rx_mode entry point is called whenever the unicast or multicast 2101 * address lists or the network interface flags are updated. This routine is 2102 * responsible for configuring the hardware for proper unicast, multicast, 2103 * promiscuous mode, and all-multi behavior. 2104 **/ 2105 2106static void e1000_set_rx_mode(struct net_device *netdev) 2107{ 2108 struct e1000_adapter *adapter = netdev_priv(netdev); 2109 struct e1000_hw *hw = &adapter->hw; 2110 struct netdev_hw_addr *ha; 2111 bool use_uc = false; 2112 u32 rctl; 2113 u32 hash_value; 2114 int i, rar_entries = E1000_RAR_ENTRIES; 2115 int mta_reg_count = E1000_NUM_MTA_REGISTERS; 2116 u32 *mcarray = kcalloc(mta_reg_count, sizeof(u32), GFP_ATOMIC); 2117 2118 if (!mcarray) { 2119 e_err(probe, "memory allocation failed\n"); 2120 return; 2121 } 2122 2123 /* Check for Promiscuous and All Multicast modes */ 2124 2125 rctl = er32(RCTL); 2126 2127 if (netdev->flags & IFF_PROMISC) { 2128 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); 2129 rctl &= ~E1000_RCTL_VFE; 2130 } else { 2131 if (netdev->flags & IFF_ALLMULTI) 2132 rctl |= E1000_RCTL_MPE; 2133 else 2134 rctl &= ~E1000_RCTL_MPE; 2135 /* Enable VLAN filter if there is a VLAN */ 2136 if (adapter->vlgrp) 2137 rctl |= E1000_RCTL_VFE; 2138 } 2139 2140 if (netdev_uc_count(netdev) > rar_entries - 1) { 2141 rctl |= E1000_RCTL_UPE; 2142 } else if (!(netdev->flags & IFF_PROMISC)) { 2143 rctl &= ~E1000_RCTL_UPE; 2144 use_uc = true; 2145 } 2146 2147 ew32(RCTL, rctl); 2148 2149 /* 82542 2.0 needs to be in reset to write receive address registers */ 2150 2151 if (hw->mac_type == e1000_82542_rev2_0) 2152 e1000_enter_82542_rst(adapter); 2153 2154 /* load the first 14 addresses into the exact filters 1-14. Unicast 2155 * addresses take precedence to avoid disabling unicast filtering 2156 * when possible. 2157 * 2158 * RAR 0 is used for the station MAC adddress 2159 * if there are not 14 addresses, go ahead and clear the filters 2160 */ 2161 i = 1; 2162 if (use_uc) 2163 netdev_for_each_uc_addr(ha, netdev) { 2164 if (i == rar_entries) 2165 break; 2166 e1000_rar_set(hw, ha->addr, i++); 2167 } 2168 2169 netdev_for_each_mc_addr(ha, netdev) { 2170 if (i == rar_entries) { 2171 /* load any remaining addresses into the hash table */ 2172 u32 hash_reg, hash_bit, mta; 2173 hash_value = e1000_hash_mc_addr(hw, ha->addr); 2174 hash_reg = (hash_value >> 5) & 0x7F; 2175 hash_bit = hash_value & 0x1F; 2176 mta = (1 << hash_bit); 2177 mcarray[hash_reg] |= mta; 2178 } else { 2179 e1000_rar_set(hw, ha->addr, i++); 2180 } 2181 } 2182 2183 for (; i < rar_entries; i++) { 2184 E1000_WRITE_REG_ARRAY(hw, RA, i << 1, 0); 2185 E1000_WRITE_FLUSH(); 2186 E1000_WRITE_REG_ARRAY(hw, RA, (i << 1) + 1, 0); 2187 E1000_WRITE_FLUSH(); 2188 } 2189 2190 /* write the hash table completely, write from bottom to avoid 2191 * both stupid write combining chipsets, and flushing each write */ 2192 for (i = mta_reg_count - 1; i >= 0 ; i--) { 2193 /* 2194 * If we are on an 82544 has an errata where writing odd 2195 * offsets overwrites the previous even offset, but writing 2196 * backwards over the range solves the issue by always 2197 * writing the odd offset first 2198 */ 2199 E1000_WRITE_REG_ARRAY(hw, MTA, i, mcarray[i]); 2200 } 2201 E1000_WRITE_FLUSH(); 2202 2203 if (hw->mac_type == e1000_82542_rev2_0) 2204 e1000_leave_82542_rst(adapter); 2205 2206 kfree(mcarray); 2207} 2208 2209/* Need to wait a few seconds after link up to get diagnostic information from 2210 * the phy */ 2211 2212static void e1000_update_phy_info(unsigned long data) 2213{ 2214 struct e1000_adapter *adapter = (struct e1000_adapter *)data; 2215 struct e1000_hw *hw = &adapter->hw; 2216 e1000_phy_get_info(hw, &adapter->phy_info); 2217} 2218 2219/** 2220 * e1000_82547_tx_fifo_stall - Timer Call-back 2221 * @data: pointer to adapter cast into an unsigned long 2222 **/ 2223 2224static void e1000_82547_tx_fifo_stall(unsigned long data) 2225{ 2226 struct e1000_adapter *adapter = (struct e1000_adapter *)data; 2227 struct e1000_hw *hw = &adapter->hw; 2228 struct net_device *netdev = adapter->netdev; 2229 u32 tctl; 2230 2231 if (atomic_read(&adapter->tx_fifo_stall)) { 2232 if ((er32(TDT) == er32(TDH)) && 2233 (er32(TDFT) == er32(TDFH)) && 2234 (er32(TDFTS) == er32(TDFHS))) { 2235 tctl = er32(TCTL); 2236 ew32(TCTL, tctl & ~E1000_TCTL_EN); 2237 ew32(TDFT, adapter->tx_head_addr); 2238 ew32(TDFH, adapter->tx_head_addr); 2239 ew32(TDFTS, adapter->tx_head_addr); 2240 ew32(TDFHS, adapter->tx_head_addr); 2241 ew32(TCTL, tctl); 2242 E1000_WRITE_FLUSH(); 2243 2244 adapter->tx_fifo_head = 0; 2245 atomic_set(&adapter->tx_fifo_stall, 0); 2246 netif_wake_queue(netdev); 2247 } else if (!test_bit(__E1000_DOWN, &adapter->flags)) { 2248 mod_timer(&adapter->tx_fifo_stall_timer, jiffies + 1); 2249 } 2250 } 2251} 2252 2253bool e1000_has_link(struct e1000_adapter *adapter) 2254{ 2255 struct e1000_hw *hw = &adapter->hw; 2256 bool link_active = false; 2257 2258 /* get_link_status is set on LSC (link status) interrupt or 2259 * rx sequence error interrupt. get_link_status will stay 2260 * false until the e1000_check_for_link establishes link 2261 * for copper adapters ONLY 2262 */ 2263 switch (hw->media_type) { 2264 case e1000_media_type_copper: 2265 if (hw->get_link_status) { 2266 e1000_check_for_link(hw); 2267 link_active = !hw->get_link_status; 2268 } else { 2269 link_active = true; 2270 } 2271 break; 2272 case e1000_media_type_fiber: 2273 e1000_check_for_link(hw); 2274 link_active = !!(er32(STATUS) & E1000_STATUS_LU); 2275 break; 2276 case e1000_media_type_internal_serdes: 2277 e1000_check_for_link(hw); 2278 link_active = hw->serdes_has_link; 2279 break; 2280 default: 2281 break; 2282 } 2283 2284 return link_active; 2285} 2286 2287/** 2288 * e1000_watchdog - Timer Call-back 2289 * @data: pointer to adapter cast into an unsigned long 2290 **/ 2291static void e1000_watchdog(unsigned long data) 2292{ 2293 struct e1000_adapter *adapter = (struct e1000_adapter *)data; 2294 struct e1000_hw *hw = &adapter->hw; 2295 struct net_device *netdev = adapter->netdev; 2296 struct e1000_tx_ring *txdr = adapter->tx_ring; 2297 u32 link, tctl; 2298 2299 link = e1000_has_link(adapter); 2300 if ((netif_carrier_ok(netdev)) && link) 2301 goto link_up; 2302 2303 if (link) { 2304 if (!netif_carrier_ok(netdev)) { 2305 u32 ctrl; 2306 bool txb2b = true; 2307 /* update snapshot of PHY registers on LSC */ 2308 e1000_get_speed_and_duplex(hw, 2309 &adapter->link_speed, 2310 &adapter->link_duplex); 2311 2312 ctrl = er32(CTRL); 2313 pr_info("%s NIC Link is Up %d Mbps %s, " 2314 "Flow Control: %s\n", 2315 netdev->name, 2316 adapter->link_speed, 2317 adapter->link_duplex == FULL_DUPLEX ? 2318 "Full Duplex" : "Half Duplex", 2319 ((ctrl & E1000_CTRL_TFCE) && (ctrl & 2320 E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl & 2321 E1000_CTRL_RFCE) ? "RX" : ((ctrl & 2322 E1000_CTRL_TFCE) ? "TX" : "None"))); 2323 2324 /* adjust timeout factor according to speed/duplex */ 2325 adapter->tx_timeout_factor = 1; 2326 switch (adapter->link_speed) { 2327 case SPEED_10: 2328 txb2b = false; 2329 adapter->tx_timeout_factor = 16; 2330 break; 2331 case SPEED_100: 2332 txb2b = false; 2333 /* maybe add some timeout factor ? */ 2334 break; 2335 } 2336 2337 /* enable transmits in the hardware */ 2338 tctl = er32(TCTL); 2339 tctl |= E1000_TCTL_EN; 2340 ew32(TCTL, tctl); 2341 2342 netif_carrier_on(netdev); 2343 if (!test_bit(__E1000_DOWN, &adapter->flags)) 2344 mod_timer(&adapter->phy_info_timer, 2345 round_jiffies(jiffies + 2 * HZ)); 2346 adapter->smartspeed = 0; 2347 } 2348 } else { 2349 if (netif_carrier_ok(netdev)) { 2350 adapter->link_speed = 0; 2351 adapter->link_duplex = 0; 2352 pr_info("%s NIC Link is Down\n", 2353 netdev->name); 2354 netif_carrier_off(netdev); 2355 2356 if (!test_bit(__E1000_DOWN, &adapter->flags)) 2357 mod_timer(&adapter->phy_info_timer, 2358 round_jiffies(jiffies + 2 * HZ)); 2359 } 2360 2361 e1000_smartspeed(adapter); 2362 } 2363 2364link_up: 2365 e1000_update_stats(adapter); 2366 2367 hw->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old; 2368 adapter->tpt_old = adapter->stats.tpt; 2369 hw->collision_delta = adapter->stats.colc - adapter->colc_old; 2370 adapter->colc_old = adapter->stats.colc; 2371 2372 adapter->gorcl = adapter->stats.gorcl - adapter->gorcl_old; 2373 adapter->gorcl_old = adapter->stats.gorcl; 2374 adapter->gotcl = adapter->stats.gotcl - adapter->gotcl_old; 2375 adapter->gotcl_old = adapter->stats.gotcl; 2376 2377 e1000_update_adaptive(hw); 2378 2379 if (!netif_carrier_ok(netdev)) { 2380 if (E1000_DESC_UNUSED(txdr) + 1 < txdr->count) { 2381 /* We've lost link, so the controller stops DMA, 2382 * but we've got queued Tx work that's never going 2383 * to get done, so reset controller to flush Tx. 2384 * (Do the reset outside of interrupt context). */ 2385 adapter->tx_timeout_count++; 2386 schedule_work(&adapter->reset_task); 2387 /* return immediately since reset is imminent */ 2388 return; 2389 } 2390 } 2391 2392 /* Simple mode for Interrupt Throttle Rate (ITR) */ 2393 if (hw->mac_type >= e1000_82540 && adapter->itr_setting == 4) { 2394 /* 2395 * Symmetric Tx/Rx gets a reduced ITR=2000; 2396 * Total asymmetrical Tx or Rx gets ITR=8000; 2397 * everyone else is between 2000-8000. 2398 */ 2399 u32 goc = (adapter->gotcl + adapter->gorcl) / 10000; 2400 u32 dif = (adapter->gotcl > adapter->gorcl ? 2401 adapter->gotcl - adapter->gorcl : 2402 adapter->gorcl - adapter->gotcl) / 10000; 2403 u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000; 2404 2405 ew32(ITR, 1000000000 / (itr * 256)); 2406 } 2407 2408 /* Cause software interrupt to ensure rx ring is cleaned */ 2409 ew32(ICS, E1000_ICS_RXDMT0); 2410 2411 /* Force detection of hung controller every watchdog period */ 2412 adapter->detect_tx_hung = true; 2413 2414 /* Reset the timer */ 2415 if (!test_bit(__E1000_DOWN, &adapter->flags)) 2416 mod_timer(&adapter->watchdog_timer, 2417 round_jiffies(jiffies + 2 * HZ)); 2418} 2419 2420enum latency_range { 2421 lowest_latency = 0, 2422 low_latency = 1, 2423 bulk_latency = 2, 2424 latency_invalid = 255 2425}; 2426 2427/** 2428 * e1000_update_itr - update the dynamic ITR value based on statistics 2429 * @adapter: pointer to adapter 2430 * @itr_setting: current adapter->itr 2431 * @packets: the number of packets during this measurement interval 2432 * @bytes: the number of bytes during this measurement interval 2433 * 2434 * Stores a new ITR value based on packets and byte 2435 * counts during the last interrupt. The advantage of per interrupt 2436 * computation is faster updates and more accurate ITR for the current 2437 * traffic pattern. Constants in this function were computed 2438 * based on theoretical maximum wire speed and thresholds were set based 2439 * on testing data as well as attempting to minimize response time 2440 * while increasing bulk throughput. 2441 * this functionality is controlled by the InterruptThrottleRate module 2442 * parameter (see e1000_param.c) 2443 **/ 2444static unsigned int e1000_update_itr(struct e1000_adapter *adapter, 2445 u16 itr_setting, int packets, int bytes) 2446{ 2447 unsigned int retval = itr_setting; 2448 struct e1000_hw *hw = &adapter->hw; 2449 2450 if (unlikely(hw->mac_type < e1000_82540)) 2451 goto update_itr_done; 2452 2453 if (packets == 0) 2454 goto update_itr_done; 2455 2456 switch (itr_setting) { 2457 case lowest_latency: 2458 /* jumbo frames get bulk treatment*/ 2459 if (bytes/packets > 8000) 2460 retval = bulk_latency; 2461 else if ((packets < 5) && (bytes > 512)) 2462 retval = low_latency; 2463 break; 2464 case low_latency: /* 50 usec aka 20000 ints/s */ 2465 if (bytes > 10000) { 2466 /* jumbo frames need bulk latency setting */ 2467 if (bytes/packets > 8000) 2468 retval = bulk_latency; 2469 else if ((packets < 10) || ((bytes/packets) > 1200)) 2470 retval = bulk_latency; 2471 else if ((packets > 35)) 2472 retval = lowest_latency; 2473 } else if (bytes/packets > 2000) 2474 retval = bulk_latency; 2475 else if (packets <= 2 && bytes < 512) 2476 retval = lowest_latency; 2477 break; 2478 case bulk_latency: /* 250 usec aka 4000 ints/s */ 2479 if (bytes > 25000) { 2480 if (packets > 35) 2481 retval = low_latency; 2482 } else if (bytes < 6000) { 2483 retval = low_latency; 2484 } 2485 break; 2486 } 2487 2488update_itr_done: 2489 return retval; 2490} 2491 2492static void e1000_set_itr(struct e1000_adapter *adapter) 2493{ 2494 struct e1000_hw *hw = &adapter->hw; 2495 u16 current_itr; 2496 u32 new_itr = adapter->itr; 2497 2498 if (unlikely(hw->mac_type < e1000_82540)) 2499 return; 2500 2501 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */ 2502 if (unlikely(adapter->link_speed != SPEED_1000)) { 2503 current_itr = 0; 2504 new_itr = 4000; 2505 goto set_itr_now; 2506 } 2507 2508 adapter->tx_itr = e1000_update_itr(adapter, 2509 adapter->tx_itr, 2510 adapter->total_tx_packets, 2511 adapter->total_tx_bytes); 2512 /* conservative mode (itr 3) eliminates the lowest_latency setting */ 2513 if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency) 2514 adapter->tx_itr = low_latency; 2515 2516 adapter->rx_itr = e1000_update_itr(adapter, 2517 adapter->rx_itr, 2518 adapter->total_rx_packets, 2519 adapter->total_rx_bytes); 2520 /* conservative mode (itr 3) eliminates the lowest_latency setting */ 2521 if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency) 2522 adapter->rx_itr = low_latency; 2523 2524 current_itr = max(adapter->rx_itr, adapter->tx_itr); 2525 2526 switch (current_itr) { 2527 /* counts and packets in update_itr are dependent on these numbers */ 2528 case lowest_latency: 2529 new_itr = 70000; 2530 break; 2531 case low_latency: 2532 new_itr = 20000; /* aka hwitr = ~200 */ 2533 break; 2534 case bulk_latency: 2535 new_itr = 4000; 2536 break; 2537 default: 2538 break; 2539 } 2540 2541set_itr_now: 2542 if (new_itr != adapter->itr) { 2543 /* this attempts to bias the interrupt rate towards Bulk 2544 * by adding intermediate steps when interrupt rate is 2545 * increasing */ 2546 new_itr = new_itr > adapter->itr ? 2547 min(adapter->itr + (new_itr >> 2), new_itr) : 2548 new_itr; 2549 adapter->itr = new_itr; 2550 ew32(ITR, 1000000000 / (new_itr * 256)); 2551 } 2552} 2553 2554#define E1000_TX_FLAGS_CSUM 0x00000001 2555#define E1000_TX_FLAGS_VLAN 0x00000002 2556#define E1000_TX_FLAGS_TSO 0x00000004 2557#define E1000_TX_FLAGS_IPV4 0x00000008 2558#define E1000_TX_FLAGS_VLAN_MASK 0xffff0000 2559#define E1000_TX_FLAGS_VLAN_SHIFT 16 2560 2561static int e1000_tso(struct e1000_adapter *adapter, 2562 struct e1000_tx_ring *tx_ring, struct sk_buff *skb) 2563{ 2564 struct e1000_context_desc *context_desc; 2565 struct e1000_buffer *buffer_info; 2566 unsigned int i; 2567 u32 cmd_length = 0; 2568 u16 ipcse = 0, tucse, mss; 2569 u8 ipcss, ipcso, tucss, tucso, hdr_len; 2570 int err; 2571 2572 if (skb_is_gso(skb)) { 2573 if (skb_header_cloned(skb)) { 2574 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 2575 if (err) 2576 return err; 2577 } 2578 2579 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 2580 mss = skb_shinfo(skb)->gso_size; 2581 if (skb->protocol == htons(ETH_P_IP)) { 2582 struct iphdr *iph = ip_hdr(skb); 2583 iph->tot_len = 0; 2584 iph->check = 0; 2585 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, 2586 iph->daddr, 0, 2587 IPPROTO_TCP, 2588 0); 2589 cmd_length = E1000_TXD_CMD_IP; 2590 ipcse = skb_transport_offset(skb) - 1; 2591 } else if (skb->protocol == htons(ETH_P_IPV6)) { 2592 ipv6_hdr(skb)->payload_len = 0; 2593 tcp_hdr(skb)->check = 2594 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, 2595 &ipv6_hdr(skb)->daddr, 2596 0, IPPROTO_TCP, 0); 2597 ipcse = 0; 2598 } 2599 ipcss = skb_network_offset(skb); 2600 ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data; 2601 tucss = skb_transport_offset(skb); 2602 tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data; 2603 tucse = 0; 2604 2605 cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE | 2606 E1000_TXD_CMD_TCP | (skb->len - (hdr_len))); 2607 2608 i = tx_ring->next_to_use; 2609 context_desc = E1000_CONTEXT_DESC(*tx_ring, i); 2610 buffer_info = &tx_ring->buffer_info[i]; 2611 2612 context_desc->lower_setup.ip_fields.ipcss = ipcss; 2613 context_desc->lower_setup.ip_fields.ipcso = ipcso; 2614 context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse); 2615 context_desc->upper_setup.tcp_fields.tucss = tucss; 2616 context_desc->upper_setup.tcp_fields.tucso = tucso; 2617 context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse); 2618 context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss); 2619 context_desc->tcp_seg_setup.fields.hdr_len = hdr_len; 2620 context_desc->cmd_and_length = cpu_to_le32(cmd_length); 2621 2622 buffer_info->time_stamp = jiffies; 2623 buffer_info->next_to_watch = i; 2624 2625 if (++i == tx_ring->count) i = 0; 2626 tx_ring->next_to_use = i; 2627 2628 return true; 2629 } 2630 return false; 2631} 2632 2633static bool e1000_tx_csum(struct e1000_adapter *adapter, 2634 struct e1000_tx_ring *tx_ring, struct sk_buff *skb) 2635{ 2636 struct e1000_context_desc *context_desc; 2637 struct e1000_buffer *buffer_info; 2638 unsigned int i; 2639 u8 css; 2640 u32 cmd_len = E1000_TXD_CMD_DEXT; 2641 2642 if (skb->ip_summed != CHECKSUM_PARTIAL) 2643 return false; 2644 2645 switch (skb->protocol) { 2646 case cpu_to_be16(ETH_P_IP): 2647 if (ip_hdr(skb)->protocol == IPPROTO_TCP) 2648 cmd_len |= E1000_TXD_CMD_TCP; 2649 break; 2650 case cpu_to_be16(ETH_P_IPV6): 2651 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) 2652 cmd_len |= E1000_TXD_CMD_TCP; 2653 break; 2654 default: 2655 if (unlikely(net_ratelimit())) 2656 e_warn(drv, "checksum_partial proto=%x!\n", 2657 skb->protocol); 2658 break; 2659 } 2660 2661 css = skb_transport_offset(skb); 2662 2663 i = tx_ring->next_to_use; 2664 buffer_info = &tx_ring->buffer_info[i]; 2665 context_desc = E1000_CONTEXT_DESC(*tx_ring, i); 2666 2667 context_desc->lower_setup.ip_config = 0; 2668 context_desc->upper_setup.tcp_fields.tucss = css; 2669 context_desc->upper_setup.tcp_fields.tucso = 2670 css + skb->csum_offset; 2671 context_desc->upper_setup.tcp_fields.tucse = 0; 2672 context_desc->tcp_seg_setup.data = 0; 2673 context_desc->cmd_and_length = cpu_to_le32(cmd_len); 2674 2675 buffer_info->time_stamp = jiffies; 2676 buffer_info->next_to_watch = i; 2677 2678 if (unlikely(++i == tx_ring->count)) i = 0; 2679 tx_ring->next_to_use = i; 2680 2681 return true; 2682} 2683 2684#define E1000_MAX_TXD_PWR 12 2685#define E1000_MAX_DATA_PER_TXD (1<<E1000_MAX_TXD_PWR) 2686 2687static int e1000_tx_map(struct e1000_adapter *adapter, 2688 struct e1000_tx_ring *tx_ring, 2689 struct sk_buff *skb, unsigned int first, 2690 unsigned int max_per_txd, unsigned int nr_frags, 2691 unsigned int mss) 2692{ 2693 struct e1000_hw *hw = &adapter->hw; 2694 struct pci_dev *pdev = adapter->pdev; 2695 struct e1000_buffer *buffer_info; 2696 unsigned int len = skb_headlen(skb); 2697 unsigned int offset = 0, size, count = 0, i; 2698 unsigned int f; 2699 2700 i = tx_ring->next_to_use; 2701 2702 while (len) { 2703 buffer_info = &tx_ring->buffer_info[i]; 2704 size = min(len, max_per_txd); 2705 if (!skb->data_len && tx_ring->last_tx_tso && 2706 !skb_is_gso(skb)) { 2707 tx_ring->last_tx_tso = 0; 2708 size -= 4; 2709 } 2710 2711 if (unlikely(mss && !nr_frags && size == len && size > 8)) 2712 size -= 4; 2713 if (unlikely((hw->bus_type == e1000_bus_type_pcix) && 2714 (size > 2015) && count == 0)) 2715 size = 2015; 2716 2717 if (unlikely(adapter->pcix_82544 && 2718 !((unsigned long)(skb->data + offset + size - 1) & 4) && 2719 size > 4)) 2720 size -= 4; 2721 2722 buffer_info->length = size; 2723 /* set time_stamp *before* dma to help avoid a possible race */ 2724 buffer_info->time_stamp = jiffies; 2725 buffer_info->mapped_as_page = false; 2726 buffer_info->dma = dma_map_single(&pdev->dev, 2727 skb->data + offset, 2728 size, DMA_TO_DEVICE); 2729 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) 2730 goto dma_error; 2731 buffer_info->next_to_watch = i; 2732 2733 len -= size; 2734 offset += size; 2735 count++; 2736 if (len) { 2737 i++; 2738 if (unlikely(i == tx_ring->count)) 2739 i = 0; 2740 } 2741 } 2742 2743 for (f = 0; f < nr_frags; f++) { 2744 struct skb_frag_struct *frag; 2745 2746 frag = &skb_shinfo(skb)->frags[f]; 2747 len = frag->size; 2748 offset = frag->page_offset; 2749 2750 while (len) { 2751 i++; 2752 if (unlikely(i == tx_ring->count)) 2753 i = 0; 2754 2755 buffer_info = &tx_ring->buffer_info[i]; 2756 size = min(len, max_per_txd); 2757 if (unlikely(mss && f == (nr_frags-1) && size == len && size > 8)) 2758 size -= 4; 2759 if (unlikely(adapter->pcix_82544 && 2760 !((unsigned long)(page_to_phys(frag->page) + offset 2761 + size - 1) & 4) && 2762 size > 4)) 2763 size -= 4; 2764 2765 buffer_info->length = size; 2766 buffer_info->time_stamp = jiffies; 2767 buffer_info->mapped_as_page = true; 2768 buffer_info->dma = dma_map_page(&pdev->dev, frag->page, 2769 offset, size, 2770 DMA_TO_DEVICE); 2771 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) 2772 goto dma_error; 2773 buffer_info->next_to_watch = i; 2774 2775 len -= size; 2776 offset += size; 2777 count++; 2778 } 2779 } 2780 2781 tx_ring->buffer_info[i].skb = skb; 2782 tx_ring->buffer_info[first].next_to_watch = i; 2783 2784 return count; 2785 2786dma_error: 2787 dev_err(&pdev->dev, "TX DMA map failed\n"); 2788 buffer_info->dma = 0; 2789 if (count) 2790 count--; 2791 2792 while (count--) { 2793 if (i==0) 2794 i += tx_ring->count; 2795 i--; 2796 buffer_info = &tx_ring->buffer_info[i]; 2797 e1000_unmap_and_free_tx_resource(adapter, buffer_info); 2798 } 2799 2800 return 0; 2801} 2802 2803static void e1000_tx_queue(struct e1000_adapter *adapter, 2804 struct e1000_tx_ring *tx_ring, int tx_flags, 2805 int count) 2806{ 2807 struct e1000_hw *hw = &adapter->hw; 2808 struct e1000_tx_desc *tx_desc = NULL; 2809 struct e1000_buffer *buffer_info; 2810 u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS; 2811 unsigned int i; 2812 2813 if (likely(tx_flags & E1000_TX_FLAGS_TSO)) { 2814 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D | 2815 E1000_TXD_CMD_TSE; 2816 txd_upper |= E1000_TXD_POPTS_TXSM << 8; 2817 2818 if (likely(tx_flags & E1000_TX_FLAGS_IPV4)) 2819 txd_upper |= E1000_TXD_POPTS_IXSM << 8; 2820 } 2821 2822 if (likely(tx_flags & E1000_TX_FLAGS_CSUM)) { 2823 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D; 2824 txd_upper |= E1000_TXD_POPTS_TXSM << 8; 2825 } 2826 2827 if (unlikely(tx_flags & E1000_TX_FLAGS_VLAN)) { 2828 txd_lower |= E1000_TXD_CMD_VLE; 2829 txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK); 2830 } 2831 2832 i = tx_ring->next_to_use; 2833 2834 while (count--) { 2835 buffer_info = &tx_ring->buffer_info[i]; 2836 tx_desc = E1000_TX_DESC(*tx_ring, i); 2837 tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); 2838 tx_desc->lower.data = 2839 cpu_to_le32(txd_lower | buffer_info->length); 2840 tx_desc->upper.data = cpu_to_le32(txd_upper); 2841 if (unlikely(++i == tx_ring->count)) i = 0; 2842 } 2843 2844 tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd); 2845 2846 /* Force memory writes to complete before letting h/w 2847 * know there are new descriptors to fetch. (Only 2848 * applicable for weak-ordered memory model archs, 2849 * such as IA-64). */ 2850 wmb(); 2851 2852 tx_ring->next_to_use = i; 2853 writel(i, hw->hw_addr + tx_ring->tdt); 2854 /* we need this if more than one processor can write to our tail 2855 * at a time, it syncronizes IO on IA64/Altix systems */ 2856 mmiowb(); 2857} 2858 2859 2860#define E1000_FIFO_HDR 0x10 2861#define E1000_82547_PAD_LEN 0x3E0 2862 2863static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter, 2864 struct sk_buff *skb) 2865{ 2866 u32 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head; 2867 u32 skb_fifo_len = skb->len + E1000_FIFO_HDR; 2868 2869 skb_fifo_len = ALIGN(skb_fifo_len, E1000_FIFO_HDR); 2870 2871 if (adapter->link_duplex != HALF_DUPLEX) 2872 goto no_fifo_stall_required; 2873 2874 if (atomic_read(&adapter->tx_fifo_stall)) 2875 return 1; 2876 2877 if (skb_fifo_len >= (E1000_82547_PAD_LEN + fifo_space)) { 2878 atomic_set(&adapter->tx_fifo_stall, 1); 2879 return 1; 2880 } 2881 2882no_fifo_stall_required: 2883 adapter->tx_fifo_head += skb_fifo_len; 2884 if (adapter->tx_fifo_head >= adapter->tx_fifo_size) 2885 adapter->tx_fifo_head -= adapter->tx_fifo_size; 2886 return 0; 2887} 2888 2889static int __e1000_maybe_stop_tx(struct net_device *netdev, int size) 2890{ 2891 struct e1000_adapter *adapter = netdev_priv(netdev); 2892 struct e1000_tx_ring *tx_ring = adapter->tx_ring; 2893 2894 netif_stop_queue(netdev); 2895 /* Herbert's original patch had: 2896 * smp_mb__after_netif_stop_queue(); 2897 * but since that doesn't exist yet, just open code it. */ 2898 smp_mb(); 2899 2900 /* We need to check again in a case another CPU has just 2901 * made room available. */ 2902 if (likely(E1000_DESC_UNUSED(tx_ring) < size)) 2903 return -EBUSY; 2904 2905 /* A reprieve! */ 2906 netif_start_queue(netdev); 2907 ++adapter->restart_queue; 2908 return 0; 2909} 2910 2911static int e1000_maybe_stop_tx(struct net_device *netdev, 2912 struct e1000_tx_ring *tx_ring, int size) 2913{ 2914 if (likely(E1000_DESC_UNUSED(tx_ring) >= size)) 2915 return 0; 2916 return __e1000_maybe_stop_tx(netdev, size); 2917} 2918 2919#define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 ) 2920static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, 2921 struct net_device *netdev) 2922{ 2923 struct e1000_adapter *adapter = netdev_priv(netdev); 2924 struct e1000_hw *hw = &adapter->hw; 2925 struct e1000_tx_ring *tx_ring; 2926 unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD; 2927 unsigned int max_txd_pwr = E1000_MAX_TXD_PWR; 2928 unsigned int tx_flags = 0; 2929 unsigned int len = skb_headlen(skb); 2930 unsigned int nr_frags; 2931 unsigned int mss; 2932 int count = 0; 2933 int tso; 2934 unsigned int f; 2935 2936 /* This goes back to the question of how to logically map a tx queue 2937 * to a flow. Right now, performance is impacted slightly negatively 2938 * if using multiple tx queues. If the stack breaks away from a 2939 * single qdisc implementation, we can look at this again. */ 2940 tx_ring = adapter->tx_ring; 2941 2942 if (unlikely(skb->len <= 0)) { 2943 dev_kfree_skb_any(skb); 2944 return NETDEV_TX_OK; 2945 } 2946 2947 mss = skb_shinfo(skb)->gso_size; 2948 /* The controller does a simple calculation to 2949 * make sure there is enough room in the FIFO before 2950 * initiating the DMA for each buffer. The calc is: 2951 * 4 = ceil(buffer len/mss). To make sure we don't 2952 * overrun the FIFO, adjust the max buffer len if mss 2953 * drops. */ 2954 if (mss) { 2955 u8 hdr_len; 2956 max_per_txd = min(mss << 2, max_per_txd); 2957 max_txd_pwr = fls(max_per_txd) - 1; 2958 2959 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 2960 if (skb->data_len && hdr_len == len) { 2961 switch (hw->mac_type) { 2962 unsigned int pull_size; 2963 case e1000_82544: 2964 if ((unsigned long)(skb_tail_pointer(skb) - 1) & 4) 2965 break; 2966 /* fall through */ 2967 pull_size = min((unsigned int)4, skb->data_len); 2968 if (!__pskb_pull_tail(skb, pull_size)) { 2969 e_err(drv, "__pskb_pull_tail " 2970 "failed.\n"); 2971 dev_kfree_skb_any(skb); 2972 return NETDEV_TX_OK; 2973 } 2974 len = skb_headlen(skb); 2975 break; 2976 default: 2977 /* do nothing */ 2978 break; 2979 } 2980 } 2981 } 2982 2983 /* reserve a descriptor for the offload context */ 2984 if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL)) 2985 count++; 2986 count++; 2987 2988 if (!skb->data_len && tx_ring->last_tx_tso && !skb_is_gso(skb)) 2989 count++; 2990 2991 count += TXD_USE_COUNT(len, max_txd_pwr); 2992 2993 if (adapter->pcix_82544) 2994 count++; 2995 2996 if (unlikely((hw->bus_type == e1000_bus_type_pcix) && 2997 (len > 2015))) 2998 count++; 2999 3000 nr_frags = skb_shinfo(skb)->nr_frags; 3001 for (f = 0; f < nr_frags; f++) 3002 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size, 3003 max_txd_pwr); 3004 if (adapter->pcix_82544) 3005 count += nr_frags; 3006 3007 /* need: count + 2 desc gap to keep tail from touching 3008 * head, otherwise try next time */ 3009 if (unlikely(e1000_maybe_stop_tx(netdev, tx_ring, count + 2))) 3010 return NETDEV_TX_BUSY; 3011 3012 if (unlikely(hw->mac_type == e1000_82547)) { 3013 if (unlikely(e1000_82547_fifo_workaround(adapter, skb))) { 3014 netif_stop_queue(netdev); 3015 if (!test_bit(__E1000_DOWN, &adapter->flags)) 3016 mod_timer(&adapter->tx_fifo_stall_timer, 3017 jiffies + 1); 3018 return NETDEV_TX_BUSY; 3019 } 3020 } 3021 3022 if (unlikely(adapter->vlgrp && vlan_tx_tag_present(skb))) { 3023 tx_flags |= E1000_TX_FLAGS_VLAN; 3024 tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT); 3025 } 3026 3027 first = tx_ring->next_to_use; 3028 3029 tso = e1000_tso(adapter, tx_ring, skb); 3030 if (tso < 0) { 3031 dev_kfree_skb_any(skb); 3032 return NETDEV_TX_OK; 3033 } 3034 3035 if (likely(tso)) { 3036 if (likely(hw->mac_type != e1000_82544)) 3037 tx_ring->last_tx_tso = 1; 3038 tx_flags |= E1000_TX_FLAGS_TSO; 3039 } else if (likely(e1000_tx_csum(adapter, tx_ring, skb))) 3040 tx_flags |= E1000_TX_FLAGS_CSUM; 3041 3042 if (likely(skb->protocol == htons(ETH_P_IP))) 3043 tx_flags |= E1000_TX_FLAGS_IPV4; 3044 3045 count = e1000_tx_map(adapter, tx_ring, skb, first, max_per_txd, 3046 nr_frags, mss); 3047 3048 if (count) { 3049 e1000_tx_queue(adapter, tx_ring, tx_flags, count); 3050 /* Make sure there is space in the ring for the next send. */ 3051 e1000_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 2); 3052 3053 } else { 3054 dev_kfree_skb_any(skb); 3055 tx_ring->buffer_info[first].time_stamp = 0; 3056 tx_ring->next_to_use = first; 3057 } 3058 3059 return NETDEV_TX_OK; 3060} 3061 3062/** 3063 * e1000_tx_timeout - Respond to a Tx Hang 3064 * @netdev: network interface device structure 3065 **/ 3066 3067static void e1000_tx_timeout(struct net_device *netdev) 3068{ 3069 struct e1000_adapter *adapter = netdev_priv(netdev); 3070 3071 /* Do the reset outside of interrupt context */ 3072 adapter->tx_timeout_count++; 3073 schedule_work(&adapter->reset_task); 3074} 3075 3076static void e1000_reset_task(struct work_struct *work) 3077{ 3078 struct e1000_adapter *adapter = 3079 container_of(work, struct e1000_adapter, reset_task); 3080 3081 e1000_reinit_locked(adapter); 3082} 3083 3084/** 3085 * e1000_get_stats - Get System Network Statistics 3086 * @netdev: network interface device structure 3087 * 3088 * Returns the address of the device statistics structure. 3089 * The statistics are actually updated from the timer callback. 3090 **/ 3091 3092static struct net_device_stats *e1000_get_stats(struct net_device *netdev) 3093{ 3094 /* only return the current stats */ 3095 return &netdev->stats; 3096} 3097 3098/** 3099 * e1000_change_mtu - Change the Maximum Transfer Unit 3100 * @netdev: network interface device structure 3101 * @new_mtu: new value for maximum frame size 3102 * 3103 * Returns 0 on success, negative on failure 3104 **/ 3105 3106static int e1000_change_mtu(struct net_device *netdev, int new_mtu) 3107{ 3108 struct e1000_adapter *adapter = netdev_priv(netdev); 3109 struct e1000_hw *hw = &adapter->hw; 3110 int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE; 3111 3112 if ((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) || 3113 (max_frame > MAX_JUMBO_FRAME_SIZE)) { 3114 e_err(probe, "Invalid MTU setting\n"); 3115 return -EINVAL; 3116 } 3117 3118 /* Adapter-specific max frame size limits. */ 3119 switch (hw->mac_type) { 3120 case e1000_undefined ... e1000_82542_rev2_1: 3121 if (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)) { 3122 e_err(probe, "Jumbo Frames not supported.\n"); 3123 return -EINVAL; 3124 } 3125 break; 3126 default: 3127 /* Capable of supporting up to MAX_JUMBO_FRAME_SIZE limit. */ 3128 break; 3129 } 3130 3131 while (test_and_set_bit(__E1000_RESETTING, &adapter->flags)) 3132 msleep(1); 3133 /* e1000_down has a dependency on max_frame_size */ 3134 hw->max_frame_size = max_frame; 3135 if (netif_running(netdev)) 3136 e1000_down(adapter); 3137 3138 /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN 3139 * means we reserve 2 more, this pushes us to allocate from the next 3140 * larger slab size. 3141 * i.e. RXBUFFER_2048 --> size-4096 slab 3142 * however with the new *_jumbo_rx* routines, jumbo receives will use 3143 * fragmented skbs */ 3144 3145 if (max_frame <= E1000_RXBUFFER_2048) 3146 adapter->rx_buffer_len = E1000_RXBUFFER_2048; 3147 else 3148#if (PAGE_SIZE >= E1000_RXBUFFER_16384) 3149 adapter->rx_buffer_len = E1000_RXBUFFER_16384; 3150#elif (PAGE_SIZE >= E1000_RXBUFFER_4096) 3151 adapter->rx_buffer_len = PAGE_SIZE; 3152#endif 3153 3154 /* adjust allocation if LPE protects us, and we aren't using SBP */ 3155 if (!hw->tbi_compatibility_on && 3156 ((max_frame == (ETH_FRAME_LEN + ETH_FCS_LEN)) || 3157 (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE))) 3158 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE; 3159 3160 pr_info("%s changing MTU from %d to %d\n", 3161 netdev->name, netdev->mtu, new_mtu); 3162 netdev->mtu = new_mtu; 3163 3164 if (netif_running(netdev)) 3165 e1000_up(adapter); 3166 else 3167 e1000_reset(adapter); 3168 3169 clear_bit(__E1000_RESETTING, &adapter->flags); 3170 3171 return 0; 3172} 3173 3174/** 3175 * e1000_update_stats - Update the board statistics counters 3176 * @adapter: board private structure 3177 **/ 3178 3179void e1000_update_stats(struct e1000_adapter *adapter) 3180{ 3181 struct net_device *netdev = adapter->netdev; 3182 struct e1000_hw *hw = &adapter->hw; 3183 struct pci_dev *pdev = adapter->pdev; 3184 unsigned long flags; 3185 u16 phy_tmp; 3186 3187#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF 3188 3189 /* 3190 * Prevent stats update while adapter is being reset, or if the pci 3191 * connection is down. 3192 */ 3193 if (adapter->link_speed == 0) 3194 return; 3195 if (pci_channel_offline(pdev)) 3196 return; 3197 3198 spin_lock_irqsave(&adapter->stats_lock, flags); 3199 3200 /* these counters are modified from e1000_tbi_adjust_stats, 3201 * called from the interrupt context, so they must only 3202 * be written while holding adapter->stats_lock 3203 */ 3204 3205 adapter->stats.crcerrs += er32(CRCERRS); 3206 adapter->stats.gprc += er32(GPRC); 3207 adapter->stats.gorcl += er32(GORCL); 3208 adapter->stats.gorch += er32(GORCH); 3209 adapter->stats.bprc += er32(BPRC); 3210 adapter->stats.mprc += er32(MPRC); 3211 adapter->stats.roc += er32(ROC); 3212 3213 adapter->stats.prc64 += er32(PRC64); 3214 adapter->stats.prc127 += er32(PRC127); 3215 adapter->stats.prc255 += er32(PRC255); 3216 adapter->stats.prc511 += er32(PRC511); 3217 adapter->stats.prc1023 += er32(PRC1023); 3218 adapter->stats.prc1522 += er32(PRC1522); 3219 3220 adapter->stats.symerrs += er32(SYMERRS); 3221 adapter->stats.mpc += er32(MPC); 3222 adapter->stats.scc += er32(SCC); 3223 adapter->stats.ecol += er32(ECOL); 3224 adapter->stats.mcc += er32(MCC); 3225 adapter->stats.latecol += er32(LATECOL); 3226 adapter->stats.dc += er32(DC); 3227 adapter->stats.sec += er32(SEC); 3228 adapter->stats.rlec += er32(RLEC); 3229 adapter->stats.xonrxc += er32(XONRXC); 3230 adapter->stats.xontxc += er32(XONTXC); 3231 adapter->stats.xoffrxc += er32(XOFFRXC); 3232 adapter->stats.xofftxc += er32(XOFFTXC); 3233 adapter->stats.fcruc += er32(FCRUC); 3234 adapter->stats.gptc += er32(GPTC); 3235 adapter->stats.gotcl += er32(GOTCL); 3236 adapter->stats.gotch += er32(GOTCH); 3237 adapter->stats.rnbc += er32(RNBC); 3238 adapter->stats.ruc += er32(RUC); 3239 adapter->stats.rfc += er32(RFC); 3240 adapter->stats.rjc += er32(RJC); 3241 adapter->stats.torl += er32(TORL); 3242 adapter->stats.torh += er32(TORH); 3243 adapter->stats.totl += er32(TOTL); 3244 adapter->stats.toth += er32(TOTH); 3245 adapter->stats.tpr += er32(TPR); 3246 3247 adapter->stats.ptc64 += er32(PTC64); 3248 adapter->stats.ptc127 += er32(PTC127); 3249 adapter->stats.ptc255 += er32(PTC255); 3250 adapter->stats.ptc511 += er32(PTC511); 3251 adapter->stats.ptc1023 += er32(PTC1023); 3252 adapter->stats.ptc1522 += er32(PTC1522); 3253 3254 adapter->stats.mptc += er32(MPTC); 3255 adapter->stats.bptc += er32(BPTC); 3256 3257 /* used for adaptive IFS */ 3258 3259 hw->tx_packet_delta = er32(TPT); 3260 adapter->stats.tpt += hw->tx_packet_delta; 3261 hw->collision_delta = er32(COLC); 3262 adapter->stats.colc += hw->collision_delta; 3263 3264 if (hw->mac_type >= e1000_82543) { 3265 adapter->stats.algnerrc += er32(ALGNERRC); 3266 adapter->stats.rxerrc += er32(RXERRC); 3267 adapter->stats.tncrs += er32(TNCRS); 3268 adapter->stats.cexterr += er32(CEXTERR); 3269 adapter->stats.tsctc += er32(TSCTC); 3270 adapter->stats.tsctfc += er32(TSCTFC); 3271 } 3272 3273 /* Fill out the OS statistics structure */ 3274 netdev->stats.multicast = adapter->stats.mprc; 3275 netdev->stats.collisions = adapter->stats.colc; 3276 3277 /* Rx Errors */ 3278 3279 /* RLEC on some newer hardware can be incorrect so build 3280 * our own version based on RUC and ROC */ 3281 netdev->stats.rx_errors = adapter->stats.rxerrc + 3282 adapter->stats.crcerrs + adapter->stats.algnerrc + 3283 adapter->stats.ruc + adapter->stats.roc + 3284 adapter->stats.cexterr; 3285 adapter->stats.rlerrc = adapter->stats.ruc + adapter->stats.roc; 3286 netdev->stats.rx_length_errors = adapter->stats.rlerrc; 3287 netdev->stats.rx_crc_errors = adapter->stats.crcerrs; 3288 netdev->stats.rx_frame_errors = adapter->stats.algnerrc; 3289 netdev->stats.rx_missed_errors = adapter->stats.mpc; 3290 3291 /* Tx Errors */ 3292 adapter->stats.txerrc = adapter->stats.ecol + adapter->stats.latecol; 3293 netdev->stats.tx_errors = adapter->stats.txerrc; 3294 netdev->stats.tx_aborted_errors = adapter->stats.ecol; 3295 netdev->stats.tx_window_errors = adapter->stats.latecol; 3296 netdev->stats.tx_carrier_errors = adapter->stats.tncrs; 3297 if (hw->bad_tx_carr_stats_fd && 3298 adapter->link_duplex == FULL_DUPLEX) { 3299 netdev->stats.tx_carrier_errors = 0; 3300 adapter->stats.tncrs = 0; 3301 } 3302 3303 /* Tx Dropped needs to be maintained elsewhere */ 3304 3305 /* Phy Stats */ 3306 if (hw->media_type == e1000_media_type_copper) { 3307 if ((adapter->link_speed == SPEED_1000) && 3308 (!e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) { 3309 phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK; 3310 adapter->phy_stats.idle_errors += phy_tmp; 3311 } 3312 3313 if ((hw->mac_type <= e1000_82546) && 3314 (hw->phy_type == e1000_phy_m88) && 3315 !e1000_read_phy_reg(hw, M88E1000_RX_ERR_CNTR, &phy_tmp)) 3316 adapter->phy_stats.receive_errors += phy_tmp; 3317 } 3318 3319 /* Management Stats */ 3320 if (hw->has_smbus) { 3321 adapter->stats.mgptc += er32(MGTPTC); 3322 adapter->stats.mgprc += er32(MGTPRC); 3323 adapter->stats.mgpdc += er32(MGTPDC); 3324 } 3325 3326 spin_unlock_irqrestore(&adapter->stats_lock, flags); 3327} 3328 3329/** 3330 * e1000_intr - Interrupt Handler 3331 * @irq: interrupt number 3332 * @data: pointer to a network interface device structure 3333 **/ 3334 3335static irqreturn_t e1000_intr(int irq, void *data) 3336{ 3337 struct net_device *netdev = data; 3338 struct e1000_adapter *adapter = netdev_priv(netdev); 3339 struct e1000_hw *hw = &adapter->hw; 3340 u32 icr = er32(ICR); 3341 3342 if (unlikely((!icr) || test_bit(__E1000_DOWN, &adapter->flags))) 3343 return IRQ_NONE; /* Not our interrupt */ 3344 3345 if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) { 3346 hw->get_link_status = 1; 3347 /* guard against interrupt when we're going down */ 3348 if (!test_bit(__E1000_DOWN, &adapter->flags)) 3349 mod_timer(&adapter->watchdog_timer, jiffies + 1); 3350 } 3351 3352 /* disable interrupts, without the synchronize_irq bit */ 3353 ew32(IMC, ~0); 3354 E1000_WRITE_FLUSH(); 3355 3356 if (likely(napi_schedule_prep(&adapter->napi))) { 3357 adapter->total_tx_bytes = 0; 3358 adapter->total_tx_packets = 0; 3359 adapter->total_rx_bytes = 0; 3360 adapter->total_rx_packets = 0; 3361 __napi_schedule(&adapter->napi); 3362 } else { 3363 /* this really should not happen! if it does it is basically a 3364 * bug, but not a hard error, so enable ints and continue */ 3365 if (!test_bit(__E1000_DOWN, &adapter->flags)) 3366 e1000_irq_enable(adapter); 3367 } 3368 3369 return IRQ_HANDLED; 3370} 3371 3372/** 3373 * e1000_clean - NAPI Rx polling callback 3374 * @adapter: board private structure 3375 **/ 3376static int e1000_clean(struct napi_struct *napi, int budget) 3377{ 3378 struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, napi); 3379 int tx_clean_complete = 0, work_done = 0; 3380 3381 tx_clean_complete = e1000_clean_tx_irq(adapter, &adapter->tx_ring[0]); 3382 3383 adapter->clean_rx(adapter, &adapter->rx_ring[0], &work_done, budget); 3384 3385 if (!tx_clean_complete) 3386 work_done = budget; 3387 3388 /* If budget not fully consumed, exit the polling mode */ 3389 if (work_done < budget) { 3390 if (likely(adapter->itr_setting & 3)) 3391 e1000_set_itr(adapter); 3392 napi_complete(napi); 3393 if (!test_bit(__E1000_DOWN, &adapter->flags)) 3394 e1000_irq_enable(adapter); 3395 } 3396 3397 return work_done; 3398} 3399 3400/** 3401 * e1000_clean_tx_irq - Reclaim resources after transmit completes 3402 * @adapter: board private structure 3403 **/ 3404static bool e1000_clean_tx_irq(struct e1000_adapter *adapter, 3405 struct e1000_tx_ring *tx_ring) 3406{ 3407 struct e1000_hw *hw = &adapter->hw; 3408 struct net_device *netdev = adapter->netdev; 3409 struct e1000_tx_desc *tx_desc, *eop_desc; 3410 struct e1000_buffer *buffer_info; 3411 unsigned int i, eop; 3412 unsigned int count = 0; 3413 unsigned int total_tx_bytes=0, total_tx_packets=0; 3414 3415 i = tx_ring->next_to_clean; 3416 eop = tx_ring->buffer_info[i].next_to_watch; 3417 eop_desc = E1000_TX_DESC(*tx_ring, eop); 3418 3419 while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) && 3420 (count < tx_ring->count)) { 3421 bool cleaned = false; 3422 rmb(); /* read buffer_info after eop_desc */ 3423 for ( ; !cleaned; count++) { 3424 tx_desc = E1000_TX_DESC(*tx_ring, i); 3425 buffer_info = &tx_ring->buffer_info[i]; 3426 cleaned = (i == eop); 3427 3428 if (cleaned) { 3429 struct sk_buff *skb = buffer_info->skb; 3430 unsigned int segs, bytecount; 3431 segs = skb_shinfo(skb)->gso_segs ?: 1; 3432 /* multiply data chunks by size of headers */ 3433 bytecount = ((segs - 1) * skb_headlen(skb)) + 3434 skb->len; 3435 total_tx_packets += segs; 3436 total_tx_bytes += bytecount; 3437 } 3438 e1000_unmap_and_free_tx_resource(adapter, buffer_info); 3439 tx_desc->upper.data = 0; 3440 3441 if (unlikely(++i == tx_ring->count)) i = 0; 3442 } 3443 3444 eop = tx_ring->buffer_info[i].next_to_watch; 3445 eop_desc = E1000_TX_DESC(*tx_ring, eop); 3446 } 3447 3448 tx_ring->next_to_clean = i; 3449 3450#define TX_WAKE_THRESHOLD 32 3451 if (unlikely(count && netif_carrier_ok(netdev) && 3452 E1000_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD)) { 3453 /* Make sure that anybody stopping the queue after this 3454 * sees the new next_to_clean. 3455 */ 3456 smp_mb(); 3457 3458 if (netif_queue_stopped(netdev) && 3459 !(test_bit(__E1000_DOWN, &adapter->flags))) { 3460 netif_wake_queue(netdev); 3461 ++adapter->restart_queue; 3462 } 3463 } 3464 3465 if (adapter->detect_tx_hung) { 3466 /* Detect a transmit hang in hardware, this serializes the 3467 * check with the clearing of time_stamp and movement of i */ 3468 adapter->detect_tx_hung = false; 3469 if (tx_ring->buffer_info[eop].time_stamp && 3470 time_after(jiffies, tx_ring->buffer_info[eop].time_stamp + 3471 (adapter->tx_timeout_factor * HZ)) && 3472 !(er32(STATUS) & E1000_STATUS_TXOFF)) { 3473 3474 /* detected Tx unit hang */ 3475 e_err(drv, "Detected Tx Unit Hang\n" 3476 " Tx Queue <%lu>\n" 3477 " TDH <%x>\n" 3478 " TDT <%x>\n" 3479 " next_to_use <%x>\n" 3480 " next_to_clean <%x>\n" 3481 "buffer_info[next_to_clean]\n" 3482 " time_stamp <%lx>\n" 3483 " next_to_watch <%x>\n" 3484 " jiffies <%lx>\n" 3485 " next_to_watch.status <%x>\n", 3486 (unsigned long)((tx_ring - adapter->tx_ring) / 3487 sizeof(struct e1000_tx_ring)), 3488 readl(hw->hw_addr + tx_ring->tdh), 3489 readl(hw->hw_addr + tx_ring->tdt), 3490 tx_ring->next_to_use, 3491 tx_ring->next_to_clean, 3492 tx_ring->buffer_info[eop].time_stamp, 3493 eop, 3494 jiffies, 3495 eop_desc->upper.fields.status); 3496 netif_stop_queue(netdev); 3497 } 3498 } 3499 adapter->total_tx_bytes += total_tx_bytes; 3500 adapter->total_tx_packets += total_tx_packets; 3501 netdev->stats.tx_bytes += total_tx_bytes; 3502 netdev->stats.tx_packets += total_tx_packets; 3503 return (count < tx_ring->count); 3504} 3505 3506/** 3507 * e1000_rx_checksum - Receive Checksum Offload for 82543 3508 * @adapter: board private structure 3509 * @status_err: receive descriptor status and error fields 3510 * @csum: receive descriptor csum field 3511 * @sk_buff: socket buffer with received data 3512 **/ 3513 3514static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err, 3515 u32 csum, struct sk_buff *skb) 3516{ 3517 struct e1000_hw *hw = &adapter->hw; 3518 u16 status = (u16)status_err; 3519 u8 errors = (u8)(status_err >> 24); 3520 skb->ip_summed = CHECKSUM_NONE; 3521 3522 /* 82543 or newer only */ 3523 if (unlikely(hw->mac_type < e1000_82543)) return; 3524 /* Ignore Checksum bit is set */ 3525 if (unlikely(status & E1000_RXD_STAT_IXSM)) return; 3526 /* TCP/UDP checksum error bit is set */ 3527 if (unlikely(errors & E1000_RXD_ERR_TCPE)) { 3528 /* let the stack verify checksum errors */ 3529 adapter->hw_csum_err++; 3530 return; 3531 } 3532 /* TCP/UDP Checksum has not been calculated */ 3533 if (!(status & E1000_RXD_STAT_TCPCS)) 3534 return; 3535 3536 /* It must be a TCP or UDP packet with a valid checksum */ 3537 if (likely(status & E1000_RXD_STAT_TCPCS)) { 3538 /* TCP checksum is good */ 3539 skb->ip_summed = CHECKSUM_UNNECESSARY; 3540 } 3541 adapter->hw_csum_good++; 3542} 3543 3544/** 3545 * e1000_consume_page - helper function 3546 **/ 3547static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb, 3548 u16 length) 3549{ 3550 bi->page = NULL; 3551 skb->len += length; 3552 skb->data_len += length; 3553 skb->truesize += length; 3554} 3555 3556/** 3557 * e1000_receive_skb - helper function to handle rx indications 3558 * @adapter: board private structure 3559 * @status: descriptor status field as written by hardware 3560 * @vlan: descriptor vlan field as written by hardware (no le/be conversion) 3561 * @skb: pointer to sk_buff to be indicated to stack 3562 */ 3563static void e1000_receive_skb(struct e1000_adapter *adapter, u8 status, 3564 __le16 vlan, struct sk_buff *skb) 3565{ 3566 if (unlikely(adapter->vlgrp && (status & E1000_RXD_STAT_VP))) { 3567 vlan_hwaccel_receive_skb(skb, adapter->vlgrp, 3568 le16_to_cpu(vlan) & 3569 E1000_RXD_SPC_VLAN_MASK); 3570 } else { 3571 netif_receive_skb(skb); 3572 } 3573} 3574 3575/** 3576 * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy 3577 * @adapter: board private structure 3578 * @rx_ring: ring to clean 3579 * @work_done: amount of napi work completed this call 3580 * @work_to_do: max amount of work allowed for this call to do 3581 * 3582 * the return value indicates whether actual cleaning was done, there 3583 * is no guarantee that everything was cleaned 3584 */ 3585static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter, 3586 struct e1000_rx_ring *rx_ring, 3587 int *work_done, int work_to_do) 3588{ 3589 struct e1000_hw *hw = &adapter->hw; 3590 struct net_device *netdev = adapter->netdev; 3591 struct pci_dev *pdev = adapter->pdev; 3592 struct e1000_rx_desc *rx_desc, *next_rxd; 3593 struct e1000_buffer *buffer_info, *next_buffer; 3594 unsigned long irq_flags; 3595 u32 length; 3596 unsigned int i; 3597 int cleaned_count = 0; 3598 bool cleaned = false; 3599 unsigned int total_rx_bytes=0, total_rx_packets=0; 3600 3601 i = rx_ring->next_to_clean; 3602 rx_desc = E1000_RX_DESC(*rx_ring, i); 3603 buffer_info = &rx_ring->buffer_info[i]; 3604 3605 while (rx_desc->status & E1000_RXD_STAT_DD) { 3606 struct sk_buff *skb; 3607 u8 status; 3608 3609 if (*work_done >= work_to_do) 3610 break; 3611 (*work_done)++; 3612 rmb(); /* read descriptor and rx_buffer_info after status DD */ 3613 3614 status = rx_desc->status; 3615 skb = buffer_info->skb; 3616 buffer_info->skb = NULL; 3617 3618 if (++i == rx_ring->count) i = 0; 3619 next_rxd = E1000_RX_DESC(*rx_ring, i); 3620 prefetch(next_rxd); 3621 3622 next_buffer = &rx_ring->buffer_info[i]; 3623 3624 cleaned = true; 3625 cleaned_count++; 3626 dma_unmap_page(&pdev->dev, buffer_info->dma, 3627 buffer_info->length, DMA_FROM_DEVICE); 3628 buffer_info->dma = 0; 3629 3630 length = le16_to_cpu(rx_desc->length); 3631 3632 /* errors is only valid for DD + EOP descriptors */ 3633 if (unlikely((status & E1000_RXD_STAT_EOP) && 3634 (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK))) { 3635 u8 last_byte = *(skb->data + length - 1); 3636 if (TBI_ACCEPT(hw, status, rx_desc->errors, length, 3637 last_byte)) { 3638 spin_lock_irqsave(&adapter->stats_lock, 3639 irq_flags); 3640 e1000_tbi_adjust_stats(hw, &adapter->stats, 3641 length, skb->data); 3642 spin_unlock_irqrestore(&adapter->stats_lock, 3643 irq_flags); 3644 length--; 3645 } else { 3646 /* recycle both page and skb */ 3647 buffer_info->skb = skb; 3648 /* an error means any chain goes out the window 3649 * too */ 3650 if (rx_ring->rx_skb_top) 3651 dev_kfree_skb(rx_ring->rx_skb_top); 3652 rx_ring->rx_skb_top = NULL; 3653 goto next_desc; 3654 } 3655 } 3656 3657#define rxtop rx_ring->rx_skb_top 3658 if (!(status & E1000_RXD_STAT_EOP)) { 3659 /* this descriptor is only the beginning (or middle) */ 3660 if (!rxtop) { 3661 /* this is the beginning of a chain */ 3662 rxtop = skb; 3663 skb_fill_page_desc(rxtop, 0, buffer_info->page, 3664 0, length); 3665 } else { 3666 /* this is the middle of a chain */ 3667 skb_fill_page_desc(rxtop, 3668 skb_shinfo(rxtop)->nr_frags, 3669 buffer_info->page, 0, length); 3670 /* re-use the skb, only consumed the page */ 3671 buffer_info->skb = skb; 3672 } 3673 e1000_consume_page(buffer_info, rxtop, length); 3674 goto next_desc; 3675 } else { 3676 if (rxtop) { 3677 /* end of the chain */ 3678 skb_fill_page_desc(rxtop, 3679 skb_shinfo(rxtop)->nr_frags, 3680 buffer_info->page, 0, length); 3681 /* re-use the current skb, we only consumed the 3682 * page */ 3683 buffer_info->skb = skb; 3684 skb = rxtop; 3685 rxtop = NULL; 3686 e1000_consume_page(buffer_info, skb, length); 3687 } else { 3688 /* no chain, got EOP, this buf is the packet 3689 * copybreak to save the put_page/alloc_page */ 3690 if (length <= copybreak && 3691 skb_tailroom(skb) >= length) { 3692 u8 *vaddr; 3693 vaddr = kmap_atomic(buffer_info->page, 3694 KM_SKB_DATA_SOFTIRQ); 3695 memcpy(skb_tail_pointer(skb), vaddr, length); 3696 kunmap_atomic(vaddr, 3697 KM_SKB_DATA_SOFTIRQ); 3698 /* re-use the page, so don't erase 3699 * buffer_info->page */ 3700 skb_put(skb, length); 3701 } else { 3702 skb_fill_page_desc(skb, 0, 3703 buffer_info->page, 0, 3704 length); 3705 e1000_consume_page(buffer_info, skb, 3706 length); 3707 } 3708 } 3709 } 3710 3711 e1000_rx_checksum(adapter, 3712 (u32)(status) | 3713 ((u32)(rx_desc->errors) << 24), 3714 le16_to_cpu(rx_desc->csum), skb); 3715 3716 pskb_trim(skb, skb->len - 4); 3717 3718 /* probably a little skewed due to removing CRC */ 3719 total_rx_bytes += skb->len; 3720 total_rx_packets++; 3721 3722 /* eth type trans needs skb->data to point to something */ 3723 if (!pskb_may_pull(skb, ETH_HLEN)) { 3724 e_err(drv, "pskb_may_pull failed.\n"); 3725 dev_kfree_skb(skb); 3726 goto next_desc; 3727 } 3728 3729 skb->protocol = eth_type_trans(skb, netdev); 3730 3731 e1000_receive_skb(adapter, status, rx_desc->special, skb); 3732 3733next_desc: 3734 rx_desc->status = 0; 3735 3736 /* return some buffers to hardware, one at a time is too slow */ 3737 if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) { 3738 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count); 3739 cleaned_count = 0; 3740 } 3741 3742 /* use prefetched values */ 3743 rx_desc = next_rxd; 3744 buffer_info = next_buffer; 3745 } 3746 rx_ring->next_to_clean = i; 3747 3748 cleaned_count = E1000_DESC_UNUSED(rx_ring); 3749 if (cleaned_count) 3750 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count); 3751 3752 adapter->total_rx_packets += total_rx_packets; 3753 adapter->total_rx_bytes += total_rx_bytes; 3754 netdev->stats.rx_bytes += total_rx_bytes; 3755 netdev->stats.rx_packets += total_rx_packets; 3756 return cleaned; 3757} 3758 3759/* 3760 * this should improve performance for small packets with large amounts 3761 * of reassembly being done in the stack 3762 */ 3763static void e1000_check_copybreak(struct net_device *netdev, 3764 struct e1000_buffer *buffer_info, 3765 u32 length, struct sk_buff **skb) 3766{ 3767 struct sk_buff *new_skb; 3768 3769 if (length > copybreak) 3770 return; 3771 3772 new_skb = netdev_alloc_skb_ip_align(netdev, length); 3773 if (!new_skb) 3774 return; 3775 3776 skb_copy_to_linear_data_offset(new_skb, -NET_IP_ALIGN, 3777 (*skb)->data - NET_IP_ALIGN, 3778 length + NET_IP_ALIGN); 3779 /* save the skb in buffer_info as good */ 3780 buffer_info->skb = *skb; 3781 *skb = new_skb; 3782} 3783 3784/** 3785 * e1000_clean_rx_irq - Send received data up the network stack; legacy 3786 * @adapter: board private structure 3787 * @rx_ring: ring to clean 3788 * @work_done: amount of napi work completed this call 3789 * @work_to_do: max amount of work allowed for this call to do 3790 */ 3791static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, 3792 struct e1000_rx_ring *rx_ring, 3793 int *work_done, int work_to_do) 3794{ 3795 struct e1000_hw *hw = &adapter->hw; 3796 struct net_device *netdev = adapter->netdev; 3797 struct pci_dev *pdev = adapter->pdev; 3798 struct e1000_rx_desc *rx_desc, *next_rxd; 3799 struct e1000_buffer *buffer_info, *next_buffer; 3800 unsigned long flags; 3801 u32 length; 3802 unsigned int i; 3803 int cleaned_count = 0; 3804 bool cleaned = false; 3805 unsigned int total_rx_bytes=0, total_rx_packets=0; 3806 3807 i = rx_ring->next_to_clean; 3808 rx_desc = E1000_RX_DESC(*rx_ring, i); 3809 buffer_info = &rx_ring->buffer_info[i]; 3810 3811 while (rx_desc->status & E1000_RXD_STAT_DD) { 3812 struct sk_buff *skb; 3813 u8 status; 3814 3815 if (*work_done >= work_to_do) 3816 break; 3817 (*work_done)++; 3818 rmb(); /* read descriptor and rx_buffer_info after status DD */ 3819 3820 status = rx_desc->status; 3821 skb = buffer_info->skb; 3822 buffer_info->skb = NULL; 3823 3824 prefetch(skb->data - NET_IP_ALIGN); 3825 3826 if (++i == rx_ring->count) i = 0; 3827 next_rxd = E1000_RX_DESC(*rx_ring, i); 3828 prefetch(next_rxd); 3829 3830 next_buffer = &rx_ring->buffer_info[i]; 3831 3832 cleaned = true; 3833 cleaned_count++; 3834 dma_unmap_single(&pdev->dev, buffer_info->dma, 3835 buffer_info->length, DMA_FROM_DEVICE); 3836 buffer_info->dma = 0; 3837 3838 length = le16_to_cpu(rx_desc->length); 3839 /* !EOP means multiple descriptors were used to store a single 3840 * packet, if thats the case we need to toss it. In fact, we 3841 * to toss every packet with the EOP bit clear and the next 3842 * frame that _does_ have the EOP bit set, as it is by 3843 * definition only a frame fragment 3844 */ 3845 if (unlikely(!(status & E1000_RXD_STAT_EOP))) 3846 adapter->discarding = true; 3847 3848 if (adapter->discarding) { 3849 /* All receives must fit into a single buffer */ 3850 e_dbg("Receive packet consumed multiple buffers\n"); 3851 /* recycle */ 3852 buffer_info->skb = skb; 3853 if (status & E1000_RXD_STAT_EOP) 3854 adapter->discarding = false; 3855 goto next_desc; 3856 } 3857 3858 if (unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) { 3859 u8 last_byte = *(skb->data + length - 1); 3860 if (TBI_ACCEPT(hw, status, rx_desc->errors, length, 3861 last_byte)) { 3862 spin_lock_irqsave(&adapter->stats_lock, flags); 3863 e1000_tbi_adjust_stats(hw, &adapter->stats, 3864 length, skb->data); 3865 spin_unlock_irqrestore(&adapter->stats_lock, 3866 flags); 3867 length--; 3868 } else { 3869 /* recycle */ 3870 buffer_info->skb = skb; 3871 goto next_desc; 3872 } 3873 } 3874 3875 length -= 4; 3876 3877 /* probably a little skewed due to removing CRC */ 3878 total_rx_bytes += length; 3879 total_rx_packets++; 3880 3881 e1000_check_copybreak(netdev, buffer_info, length, &skb); 3882 3883 skb_put(skb, length); 3884 3885 /* Receive Checksum Offload */ 3886 e1000_rx_checksum(adapter, 3887 (u32)(status) | 3888 ((u32)(rx_desc->errors) << 24), 3889 le16_to_cpu(rx_desc->csum), skb); 3890 3891 skb->protocol = eth_type_trans(skb, netdev); 3892 3893 e1000_receive_skb(adapter, status, rx_desc->special, skb); 3894 3895next_desc: 3896 rx_desc->status = 0; 3897 3898 /* return some buffers to hardware, one at a time is too slow */ 3899 if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) { 3900 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count); 3901 cleaned_count = 0; 3902 } 3903 3904 /* use prefetched values */ 3905 rx_desc = next_rxd; 3906 buffer_info = next_buffer; 3907 } 3908 rx_ring->next_to_clean = i; 3909 3910 cleaned_count = E1000_DESC_UNUSED(rx_ring); 3911 if (cleaned_count) 3912 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count); 3913 3914 adapter->total_rx_packets += total_rx_packets; 3915 adapter->total_rx_bytes += total_rx_bytes; 3916 netdev->stats.rx_bytes += total_rx_bytes; 3917 netdev->stats.rx_packets += total_rx_packets; 3918 return cleaned; 3919} 3920 3921/** 3922 * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers 3923 * @adapter: address of board private structure 3924 * @rx_ring: pointer to receive ring structure 3925 * @cleaned_count: number of buffers to allocate this pass 3926 **/ 3927 3928static void 3929e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter, 3930 struct e1000_rx_ring *rx_ring, int cleaned_count) 3931{ 3932 struct net_device *netdev = adapter->netdev; 3933 struct pci_dev *pdev = adapter->pdev; 3934 struct e1000_rx_desc *rx_desc; 3935 struct e1000_buffer *buffer_info; 3936 struct sk_buff *skb; 3937 unsigned int i; 3938 unsigned int bufsz = 256 - 16 /*for skb_reserve */ ; 3939 3940 i = rx_ring->next_to_use; 3941 buffer_info = &rx_ring->buffer_info[i]; 3942 3943 while (cleaned_count--) { 3944 skb = buffer_info->skb; 3945 if (skb) { 3946 skb_trim(skb, 0); 3947 goto check_page; 3948 } 3949 3950 skb = netdev_alloc_skb_ip_align(netdev, bufsz); 3951 if (unlikely(!skb)) { 3952 /* Better luck next round */ 3953 adapter->alloc_rx_buff_failed++; 3954 break; 3955 } 3956 3957 /* Fix for errata 23, can't cross 64kB boundary */ 3958 if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) { 3959 struct sk_buff *oldskb = skb; 3960 e_err(rx_err, "skb align check failed: %u bytes at " 3961 "%p\n", bufsz, skb->data); 3962 /* Try again, without freeing the previous */ 3963 skb = netdev_alloc_skb_ip_align(netdev, bufsz); 3964 /* Failed allocation, critical failure */ 3965 if (!skb) { 3966 dev_kfree_skb(oldskb); 3967 adapter->alloc_rx_buff_failed++; 3968 break; 3969 } 3970 3971 if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) { 3972 /* give up */ 3973 dev_kfree_skb(skb); 3974 dev_kfree_skb(oldskb); 3975 break; /* while (cleaned_count--) */ 3976 } 3977 3978 /* Use new allocation */ 3979 dev_kfree_skb(oldskb); 3980 } 3981 buffer_info->skb = skb; 3982 buffer_info->length = adapter->rx_buffer_len; 3983check_page: 3984 /* allocate a new page if necessary */ 3985 if (!buffer_info->page) { 3986 buffer_info->page = alloc_page(GFP_ATOMIC); 3987 if (unlikely(!buffer_info->page)) { 3988 adapter->alloc_rx_buff_failed++; 3989 break; 3990 } 3991 } 3992 3993 if (!buffer_info->dma) { 3994 buffer_info->dma = dma_map_page(&pdev->dev, 3995 buffer_info->page, 0, 3996 buffer_info->length, 3997 DMA_FROM_DEVICE); 3998 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { 3999 put_page(buffer_info->page); 4000 dev_kfree_skb(skb); 4001 buffer_info->page = NULL; 4002 buffer_info->skb = NULL; 4003 buffer_info->dma = 0; 4004 adapter->alloc_rx_buff_failed++; 4005 break; /* while !buffer_info->skb */ 4006 } 4007 } 4008 4009 rx_desc = E1000_RX_DESC(*rx_ring, i); 4010 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); 4011 4012 if (unlikely(++i == rx_ring->count)) 4013 i = 0; 4014 buffer_info = &rx_ring->buffer_info[i]; 4015 } 4016 4017 if (likely(rx_ring->next_to_use != i)) { 4018 rx_ring->next_to_use = i; 4019 if (unlikely(i-- == 0)) 4020 i = (rx_ring->count - 1); 4021 4022 /* Force memory writes to complete before letting h/w 4023 * know there are new descriptors to fetch. (Only 4024 * applicable for weak-ordered memory model archs, 4025 * such as IA-64). */ 4026 wmb(); 4027 writel(i, adapter->hw.hw_addr + rx_ring->rdt); 4028 } 4029} 4030 4031/** 4032 * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended 4033 * @adapter: address of board private structure 4034 **/ 4035 4036static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter, 4037 struct e1000_rx_ring *rx_ring, 4038 int cleaned_count) 4039{ 4040 struct e1000_hw *hw = &adapter->hw; 4041 struct net_device *netdev = adapter->netdev; 4042 struct pci_dev *pdev = adapter->pdev; 4043 struct e1000_rx_desc *rx_desc; 4044 struct e1000_buffer *buffer_info; 4045 struct sk_buff *skb; 4046 unsigned int i; 4047 unsigned int bufsz = adapter->rx_buffer_len; 4048 4049 i = rx_ring->next_to_use; 4050 buffer_info = &rx_ring->buffer_info[i]; 4051 4052 while (cleaned_count--) { 4053 skb = buffer_info->skb; 4054 if (skb) { 4055 skb_trim(skb, 0); 4056 goto map_skb; 4057 } 4058 4059 skb = netdev_alloc_skb_ip_align(netdev, bufsz); 4060 if (unlikely(!skb)) { 4061 /* Better luck next round */ 4062 adapter->alloc_rx_buff_failed++; 4063 break; 4064 } 4065 4066 /* Fix for errata 23, can't cross 64kB boundary */ 4067 if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) { 4068 struct sk_buff *oldskb = skb; 4069 e_err(rx_err, "skb align check failed: %u bytes at " 4070 "%p\n", bufsz, skb->data); 4071 /* Try again, without freeing the previous */ 4072 skb = netdev_alloc_skb_ip_align(netdev, bufsz); 4073 /* Failed allocation, critical failure */ 4074 if (!skb) { 4075 dev_kfree_skb(oldskb); 4076 adapter->alloc_rx_buff_failed++; 4077 break; 4078 } 4079 4080 if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) { 4081 /* give up */ 4082 dev_kfree_skb(skb); 4083 dev_kfree_skb(oldskb); 4084 adapter->alloc_rx_buff_failed++; 4085 break; /* while !buffer_info->skb */ 4086 } 4087 4088 /* Use new allocation */ 4089 dev_kfree_skb(oldskb); 4090 } 4091 buffer_info->skb = skb; 4092 buffer_info->length = adapter->rx_buffer_len; 4093map_skb: 4094 buffer_info->dma = dma_map_single(&pdev->dev, 4095 skb->data, 4096 buffer_info->length, 4097 DMA_FROM_DEVICE); 4098 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { 4099 dev_kfree_skb(skb); 4100 buffer_info->skb = NULL; 4101 buffer_info->dma = 0; 4102 adapter->alloc_rx_buff_failed++; 4103 break; /* while !buffer_info->skb */ 4104 } 4105 4106 4107 /* Fix for errata 23, can't cross 64kB boundary */ 4108 if (!e1000_check_64k_bound(adapter, 4109 (void *)(unsigned long)buffer_info->dma, 4110 adapter->rx_buffer_len)) { 4111 e_err(rx_err, "dma align check failed: %u bytes at " 4112 "%p\n", adapter->rx_buffer_len, 4113 (void *)(unsigned long)buffer_info->dma); 4114 dev_kfree_skb(skb); 4115 buffer_info->skb = NULL; 4116 4117 dma_unmap_single(&pdev->dev, buffer_info->dma, 4118 adapter->rx_buffer_len, 4119 DMA_FROM_DEVICE); 4120 buffer_info->dma = 0; 4121 4122 adapter->alloc_rx_buff_failed++; 4123 break; /* while !buffer_info->skb */ 4124 } 4125 rx_desc = E1000_RX_DESC(*rx_ring, i); 4126 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); 4127 4128 if (unlikely(++i == rx_ring->count)) 4129 i = 0; 4130 buffer_info = &rx_ring->buffer_info[i]; 4131 } 4132 4133 if (likely(rx_ring->next_to_use != i)) { 4134 rx_ring->next_to_use = i; 4135 if (unlikely(i-- == 0)) 4136 i = (rx_ring->count - 1); 4137 4138 /* Force memory writes to complete before letting h/w 4139 * know there are new descriptors to fetch. (Only 4140 * applicable for weak-ordered memory model archs, 4141 * such as IA-64). */ 4142 wmb(); 4143 writel(i, hw->hw_addr + rx_ring->rdt); 4144 } 4145} 4146 4147 4148static void e1000_smartspeed(struct e1000_adapter *adapter) 4149{ 4150 struct e1000_hw *hw = &adapter->hw; 4151 u16 phy_status; 4152 u16 phy_ctrl; 4153 4154 if ((hw->phy_type != e1000_phy_igp) || !hw->autoneg || 4155 !(hw->autoneg_advertised & ADVERTISE_1000_FULL)) 4156 return; 4157 4158 if (adapter->smartspeed == 0) { 4159 /* If Master/Slave config fault is asserted twice, 4160 * we assume back-to-back */ 4161 e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status); 4162 if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return; 4163 e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status); 4164 if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return; 4165 e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl); 4166 if (phy_ctrl & CR_1000T_MS_ENABLE) { 4167 phy_ctrl &= ~CR_1000T_MS_ENABLE; 4168 e1000_write_phy_reg(hw, PHY_1000T_CTRL, 4169 phy_ctrl); 4170 adapter->smartspeed++; 4171 if (!e1000_phy_setup_autoneg(hw) && 4172 !e1000_read_phy_reg(hw, PHY_CTRL, 4173 &phy_ctrl)) { 4174 phy_ctrl |= (MII_CR_AUTO_NEG_EN | 4175 MII_CR_RESTART_AUTO_NEG); 4176 e1000_write_phy_reg(hw, PHY_CTRL, 4177 phy_ctrl); 4178 } 4179 } 4180 return; 4181 } else if (adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) { 4182 /* If still no link, perhaps using 2/3 pair cable */ 4183 e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl); 4184 phy_ctrl |= CR_1000T_MS_ENABLE; 4185 e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_ctrl); 4186 if (!e1000_phy_setup_autoneg(hw) && 4187 !e1000_read_phy_reg(hw, PHY_CTRL, &phy_ctrl)) { 4188 phy_ctrl |= (MII_CR_AUTO_NEG_EN | 4189 MII_CR_RESTART_AUTO_NEG); 4190 e1000_write_phy_reg(hw, PHY_CTRL, phy_ctrl); 4191 } 4192 } 4193 /* Restart process after E1000_SMARTSPEED_MAX iterations */ 4194 if (adapter->smartspeed++ == E1000_SMARTSPEED_MAX) 4195 adapter->smartspeed = 0; 4196} 4197 4198/** 4199 * e1000_ioctl - 4200 * @netdev: 4201 * @ifreq: 4202 * @cmd: 4203 **/ 4204 4205static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) 4206{ 4207 switch (cmd) { 4208 case SIOCGMIIPHY: 4209 case SIOCGMIIREG: 4210 case SIOCSMIIREG: 4211 return e1000_mii_ioctl(netdev, ifr, cmd); 4212 default: 4213 return -EOPNOTSUPP; 4214 } 4215} 4216 4217/** 4218 * e1000_mii_ioctl - 4219 * @netdev: 4220 * @ifreq: 4221 * @cmd: 4222 **/ 4223 4224static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, 4225 int cmd) 4226{ 4227 struct e1000_adapter *adapter = netdev_priv(netdev); 4228 struct e1000_hw *hw = &adapter->hw; 4229 struct mii_ioctl_data *data = if_mii(ifr); 4230 int retval; 4231 u16 mii_reg; 4232 u16 spddplx; 4233 unsigned long flags; 4234 4235 if (hw->media_type != e1000_media_type_copper) 4236 return -EOPNOTSUPP; 4237 4238 switch (cmd) { 4239 case SIOCGMIIPHY: 4240 data->phy_id = hw->phy_addr; 4241 break; 4242 case SIOCGMIIREG: 4243 spin_lock_irqsave(&adapter->stats_lock, flags); 4244 if (e1000_read_phy_reg(hw, data->reg_num & 0x1F, 4245 &data->val_out)) { 4246 spin_unlock_irqrestore(&adapter->stats_lock, flags); 4247 return -EIO; 4248 } 4249 spin_unlock_irqrestore(&adapter->stats_lock, flags); 4250 break; 4251 case SIOCSMIIREG: 4252 if (data->reg_num & ~(0x1F)) 4253 return -EFAULT; 4254 mii_reg = data->val_in; 4255 spin_lock_irqsave(&adapter->stats_lock, flags); 4256 if (e1000_write_phy_reg(hw, data->reg_num, 4257 mii_reg)) { 4258 spin_unlock_irqrestore(&adapter->stats_lock, flags); 4259 return -EIO; 4260 } 4261 spin_unlock_irqrestore(&adapter->stats_lock, flags); 4262 if (hw->media_type == e1000_media_type_copper) { 4263 switch (data->reg_num) { 4264 case PHY_CTRL: 4265 if (mii_reg & MII_CR_POWER_DOWN) 4266 break; 4267 if (mii_reg & MII_CR_AUTO_NEG_EN) { 4268 hw->autoneg = 1; 4269 hw->autoneg_advertised = 0x2F; 4270 } else { 4271 if (mii_reg & 0x40) 4272 spddplx = SPEED_1000; 4273 else if (mii_reg & 0x2000) 4274 spddplx = SPEED_100; 4275 else 4276 spddplx = SPEED_10; 4277 spddplx += (mii_reg & 0x100) 4278 ? DUPLEX_FULL : 4279 DUPLEX_HALF; 4280 retval = e1000_set_spd_dplx(adapter, 4281 spddplx); 4282 if (retval) 4283 return retval; 4284 } 4285 if (netif_running(adapter->netdev)) 4286 e1000_reinit_locked(adapter); 4287 else 4288 e1000_reset(adapter); 4289 break; 4290 case M88E1000_PHY_SPEC_CTRL: 4291 case M88E1000_EXT_PHY_SPEC_CTRL: 4292 if (e1000_phy_reset(hw)) 4293 return -EIO; 4294 break; 4295 } 4296 } else { 4297 switch (data->reg_num) { 4298 case PHY_CTRL: 4299 if (mii_reg & MII_CR_POWER_DOWN) 4300 break; 4301 if (netif_running(adapter->netdev)) 4302 e1000_reinit_locked(adapter); 4303 else 4304 e1000_reset(adapter); 4305 break; 4306 } 4307 } 4308 break; 4309 default: 4310 return -EOPNOTSUPP; 4311 } 4312 return E1000_SUCCESS; 4313} 4314 4315void e1000_pci_set_mwi(struct e1000_hw *hw) 4316{ 4317 struct e1000_adapter *adapter = hw->back; 4318 int ret_val = pci_set_mwi(adapter->pdev); 4319 4320 if (ret_val) 4321 e_err(probe, "Error in setting MWI\n"); 4322} 4323 4324void e1000_pci_clear_mwi(struct e1000_hw *hw) 4325{ 4326 struct e1000_adapter *adapter = hw->back; 4327 4328 pci_clear_mwi(adapter->pdev); 4329} 4330 4331int e1000_pcix_get_mmrbc(struct e1000_hw *hw) 4332{ 4333 struct e1000_adapter *adapter = hw->back; 4334 return pcix_get_mmrbc(adapter->pdev); 4335} 4336 4337void e1000_pcix_set_mmrbc(struct e1000_hw *hw, int mmrbc) 4338{ 4339 struct e1000_adapter *adapter = hw->back; 4340 pcix_set_mmrbc(adapter->pdev, mmrbc); 4341} 4342 4343void e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value) 4344{ 4345 outl(value, port); 4346} 4347 4348static void e1000_vlan_rx_register(struct net_device *netdev, 4349 struct vlan_group *grp) 4350{ 4351 struct e1000_adapter *adapter = netdev_priv(netdev); 4352 struct e1000_hw *hw = &adapter->hw; 4353 u32 ctrl, rctl; 4354 4355 if (!test_bit(__E1000_DOWN, &adapter->flags)) 4356 e1000_irq_disable(adapter); 4357 adapter->vlgrp = grp; 4358 4359 if (grp) { 4360 /* enable VLAN tag insert/strip */ 4361 ctrl = er32(CTRL); 4362 ctrl |= E1000_CTRL_VME; 4363 ew32(CTRL, ctrl); 4364 4365 /* enable VLAN receive filtering */ 4366 rctl = er32(RCTL); 4367 rctl &= ~E1000_RCTL_CFIEN; 4368 if (!(netdev->flags & IFF_PROMISC)) 4369 rctl |= E1000_RCTL_VFE; 4370 ew32(RCTL, rctl); 4371 e1000_update_mng_vlan(adapter); 4372 } else { 4373 /* disable VLAN tag insert/strip */ 4374 ctrl = er32(CTRL); 4375 ctrl &= ~E1000_CTRL_VME; 4376 ew32(CTRL, ctrl); 4377 4378 /* disable VLAN receive filtering */ 4379 rctl = er32(RCTL); 4380 rctl &= ~E1000_RCTL_VFE; 4381 ew32(RCTL, rctl); 4382 4383 if (adapter->mng_vlan_id != (u16)E1000_MNG_VLAN_NONE) { 4384 e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id); 4385 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; 4386 } 4387 } 4388 4389 if (!test_bit(__E1000_DOWN, &adapter->flags)) 4390 e1000_irq_enable(adapter); 4391} 4392 4393static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid) 4394{ 4395 struct e1000_adapter *adapter = netdev_priv(netdev); 4396 struct e1000_hw *hw = &adapter->hw; 4397 u32 vfta, index; 4398 4399 if ((hw->mng_cookie.status & 4400 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) && 4401 (vid == adapter->mng_vlan_id)) 4402 return; 4403 /* add VID to filter table */ 4404 index = (vid >> 5) & 0x7F; 4405 vfta = E1000_READ_REG_ARRAY(hw, VFTA, index); 4406 vfta |= (1 << (vid & 0x1F)); 4407 e1000_write_vfta(hw, index, vfta); 4408} 4409 4410static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) 4411{ 4412 struct e1000_adapter *adapter = netdev_priv(netdev); 4413 struct e1000_hw *hw = &adapter->hw; 4414 u32 vfta, index; 4415 4416 if (!test_bit(__E1000_DOWN, &adapter->flags)) 4417 e1000_irq_disable(adapter); 4418 vlan_group_set_device(adapter->vlgrp, vid, NULL); 4419 if (!test_bit(__E1000_DOWN, &adapter->flags)) 4420 e1000_irq_enable(adapter); 4421 4422 /* remove VID from filter table */ 4423 index = (vid >> 5) & 0x7F; 4424 vfta = E1000_READ_REG_ARRAY(hw, VFTA, index); 4425 vfta &= ~(1 << (vid & 0x1F)); 4426 e1000_write_vfta(hw, index, vfta); 4427} 4428 4429static void e1000_restore_vlan(struct e1000_adapter *adapter) 4430{ 4431 e1000_vlan_rx_register(adapter->netdev, adapter->vlgrp); 4432 4433 if (adapter->vlgrp) { 4434 u16 vid; 4435 for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) { 4436 if (!vlan_group_get_device(adapter->vlgrp, vid)) 4437 continue; 4438 e1000_vlan_rx_add_vid(adapter->netdev, vid); 4439 } 4440 } 4441} 4442 4443int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx) 4444{ 4445 struct e1000_hw *hw = &adapter->hw; 4446 4447 hw->autoneg = 0; 4448 4449 /* Fiber NICs only allow 1000 gbps Full duplex */ 4450 if ((hw->media_type == e1000_media_type_fiber) && 4451 spddplx != (SPEED_1000 + DUPLEX_FULL)) { 4452 e_err(probe, "Unsupported Speed/Duplex configuration\n"); 4453 return -EINVAL; 4454 } 4455 4456 switch (spddplx) { 4457 case SPEED_10 + DUPLEX_HALF: 4458 hw->forced_speed_duplex = e1000_10_half; 4459 break; 4460 case SPEED_10 + DUPLEX_FULL: 4461 hw->forced_speed_duplex = e1000_10_full; 4462 break; 4463 case SPEED_100 + DUPLEX_HALF: 4464 hw->forced_speed_duplex = e1000_100_half; 4465 break; 4466 case SPEED_100 + DUPLEX_FULL: 4467 hw->forced_speed_duplex = e1000_100_full; 4468 break; 4469 case SPEED_1000 + DUPLEX_FULL: 4470 hw->autoneg = 1; 4471 hw->autoneg_advertised = ADVERTISE_1000_FULL; 4472 break; 4473 case SPEED_1000 + DUPLEX_HALF: /* not supported */ 4474 default: 4475 e_err(probe, "Unsupported Speed/Duplex configuration\n"); 4476 return -EINVAL; 4477 } 4478 return 0; 4479} 4480 4481static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake) 4482{ 4483 struct net_device *netdev = pci_get_drvdata(pdev); 4484 struct e1000_adapter *adapter = netdev_priv(netdev); 4485 struct e1000_hw *hw = &adapter->hw; 4486 u32 ctrl, ctrl_ext, rctl, status; 4487 u32 wufc = adapter->wol; 4488#ifdef CONFIG_PM 4489 int retval = 0; 4490#endif 4491 4492 netif_device_detach(netdev); 4493 4494 if (netif_running(netdev)) { 4495 WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags)); 4496 e1000_down(adapter); 4497 } 4498 4499#ifdef CONFIG_PM 4500 retval = pci_save_state(pdev); 4501 if (retval) 4502 return retval; 4503#endif 4504 4505 status = er32(STATUS); 4506 if (status & E1000_STATUS_LU) 4507 wufc &= ~E1000_WUFC_LNKC; 4508 4509 if (wufc) { 4510 e1000_setup_rctl(adapter); 4511 e1000_set_rx_mode(netdev); 4512 4513 /* turn on all-multi mode if wake on multicast is enabled */ 4514 if (wufc & E1000_WUFC_MC) { 4515 rctl = er32(RCTL); 4516 rctl |= E1000_RCTL_MPE; 4517 ew32(RCTL, rctl); 4518 } 4519 4520 if (hw->mac_type >= e1000_82540) { 4521 ctrl = er32(CTRL); 4522 /* advertise wake from D3Cold */ 4523 #define E1000_CTRL_ADVD3WUC 0x00100000 4524 /* phy power management enable */ 4525 #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000 4526 ctrl |= E1000_CTRL_ADVD3WUC | 4527 E1000_CTRL_EN_PHY_PWR_MGMT; 4528 ew32(CTRL, ctrl); 4529 } 4530 4531 if (hw->media_type == e1000_media_type_fiber || 4532 hw->media_type == e1000_media_type_internal_serdes) { 4533 /* keep the laser running in D3 */ 4534 ctrl_ext = er32(CTRL_EXT); 4535 ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA; 4536 ew32(CTRL_EXT, ctrl_ext); 4537 } 4538 4539 ew32(WUC, E1000_WUC_PME_EN); 4540 ew32(WUFC, wufc); 4541 } else { 4542 ew32(WUC, 0); 4543 ew32(WUFC, 0); 4544 } 4545 4546 e1000_release_manageability(adapter); 4547 4548 *enable_wake = !!wufc; 4549 4550 /* make sure adapter isn't asleep if manageability is enabled */ 4551 if (adapter->en_mng_pt) 4552 *enable_wake = true; 4553 4554 if (netif_running(netdev)) 4555 e1000_free_irq(adapter); 4556 4557 pci_disable_device(pdev); 4558 4559 return 0; 4560} 4561 4562#ifdef CONFIG_PM 4563static int e1000_suspend(struct pci_dev *pdev, pm_message_t state) 4564{ 4565 int retval; 4566 bool wake; 4567 4568 retval = __e1000_shutdown(pdev, &wake); 4569 if (retval) 4570 return retval; 4571 4572 if (wake) { 4573 pci_prepare_to_sleep(pdev); 4574 } else { 4575 pci_wake_from_d3(pdev, false); 4576 pci_set_power_state(pdev, PCI_D3hot); 4577 } 4578 4579 return 0; 4580} 4581 4582static int e1000_resume(struct pci_dev *pdev) 4583{ 4584 struct net_device *netdev = pci_get_drvdata(pdev); 4585 struct e1000_adapter *adapter = netdev_priv(netdev); 4586 struct e1000_hw *hw = &adapter->hw; 4587 u32 err; 4588 4589 pci_set_power_state(pdev, PCI_D0); 4590 pci_restore_state(pdev); 4591 pci_save_state(pdev); 4592 4593 if (adapter->need_ioport) 4594 err = pci_enable_device(pdev); 4595 else 4596 err = pci_enable_device_mem(pdev); 4597 if (err) { 4598 pr_err("Cannot enable PCI device from suspend\n"); 4599 return err; 4600 } 4601 pci_set_master(pdev); 4602 4603 pci_enable_wake(pdev, PCI_D3hot, 0); 4604 pci_enable_wake(pdev, PCI_D3cold, 0); 4605 4606 if (netif_running(netdev)) { 4607 err = e1000_request_irq(adapter); 4608 if (err) 4609 return err; 4610 } 4611 4612 e1000_power_up_phy(adapter); 4613 e1000_reset(adapter); 4614 ew32(WUS, ~0); 4615 4616 e1000_init_manageability(adapter); 4617 4618 if (netif_running(netdev)) 4619 e1000_up(adapter); 4620 4621 netif_device_attach(netdev); 4622 4623 return 0; 4624} 4625#endif 4626 4627static void e1000_shutdown(struct pci_dev *pdev) 4628{ 4629 bool wake; 4630 4631 __e1000_shutdown(pdev, &wake); 4632 4633 if (system_state == SYSTEM_POWER_OFF) { 4634 pci_wake_from_d3(pdev, wake); 4635 pci_set_power_state(pdev, PCI_D3hot); 4636 } 4637} 4638 4639#ifdef CONFIG_NET_POLL_CONTROLLER 4640/* 4641 * Polling 'interrupt' - used by things like netconsole to send skbs 4642 * without having to re-enable interrupts. It's not called while 4643 * the interrupt routine is executing. 4644 */ 4645static void e1000_netpoll(struct net_device *netdev) 4646{ 4647 struct e1000_adapter *adapter = netdev_priv(netdev); 4648 4649 disable_irq(adapter->pdev->irq); 4650 e1000_intr(adapter->pdev->irq, netdev); 4651 enable_irq(adapter->pdev->irq); 4652} 4653#endif 4654 4655/** 4656 * e1000_io_error_detected - called when PCI error is detected 4657 * @pdev: Pointer to PCI device 4658 * @state: The current pci connection state 4659 * 4660 * This function is called after a PCI bus error affecting 4661 * this device has been detected. 4662 */ 4663static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev, 4664 pci_channel_state_t state) 4665{ 4666 struct net_device *netdev = pci_get_drvdata(pdev); 4667 struct e1000_adapter *adapter = netdev_priv(netdev); 4668 4669 netif_device_detach(netdev); 4670 4671 if (state == pci_channel_io_perm_failure) 4672 return PCI_ERS_RESULT_DISCONNECT; 4673 4674 if (netif_running(netdev)) 4675 e1000_down(adapter); 4676 pci_disable_device(pdev); 4677 4678 /* Request a slot slot reset. */ 4679 return PCI_ERS_RESULT_NEED_RESET; 4680} 4681 4682/** 4683 * e1000_io_slot_reset - called after the pci bus has been reset. 4684 * @pdev: Pointer to PCI device 4685 * 4686 * Restart the card from scratch, as if from a cold-boot. Implementation 4687 * resembles the first-half of the e1000_resume routine. 4688 */ 4689static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev) 4690{ 4691 struct net_device *netdev = pci_get_drvdata(pdev); 4692 struct e1000_adapter *adapter = netdev_priv(netdev); 4693 struct e1000_hw *hw = &adapter->hw; 4694 int err; 4695 4696 if (adapter->need_ioport) 4697 err = pci_enable_device(pdev); 4698 else 4699 err = pci_enable_device_mem(pdev); 4700 if (err) { 4701 pr_err("Cannot re-enable PCI device after reset.\n"); 4702 return PCI_ERS_RESULT_DISCONNECT; 4703 } 4704 pci_set_master(pdev); 4705 4706 pci_enable_wake(pdev, PCI_D3hot, 0); 4707 pci_enable_wake(pdev, PCI_D3cold, 0); 4708 4709 e1000_reset(adapter); 4710 ew32(WUS, ~0); 4711 4712 return PCI_ERS_RESULT_RECOVERED; 4713} 4714 4715/** 4716 * e1000_io_resume - called when traffic can start flowing again. 4717 * @pdev: Pointer to PCI device 4718 * 4719 * This callback is called when the error recovery driver tells us that 4720 * its OK to resume normal operation. Implementation resembles the 4721 * second-half of the e1000_resume routine. 4722 */ 4723static void e1000_io_resume(struct pci_dev *pdev) 4724{ 4725 struct net_device *netdev = pci_get_drvdata(pdev); 4726 struct e1000_adapter *adapter = netdev_priv(netdev); 4727 4728 e1000_init_manageability(adapter); 4729 4730 if (netif_running(netdev)) { 4731 if (e1000_up(adapter)) { 4732 pr_info("can't bring device back up after reset\n"); 4733 return; 4734 } 4735 } 4736 4737 netif_device_attach(netdev); 4738} 4739 4740/* e1000_main.c */ 4741