1/******************************************************************************* 2 3 Intel PRO/1000 Linux driver 4 Copyright(c) 1999 - 2006 Intel Corporation. 5 6 This program is free software; you can redistribute it and/or modify it 7 under the terms and conditions of the GNU General Public License, 8 version 2, as published by the Free Software Foundation. 9 10 This program is distributed in the hope it will be useful, but WITHOUT 11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 more details. 14 15 You should have received a copy of the GNU General Public License along with 16 this program; if not, write to the Free Software Foundation, Inc., 17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 18 19 The full GNU General Public License is included in this distribution in 20 the file called "COPYING". 21 22 Contact Information: 23 Linux NICS <linux.nics@intel.com> 24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 26 27*******************************************************************************/ 28 29#include "e1000.h" 30#include <net/ip6_checksum.h> 31 32char e1000_driver_name[] = "e1000"; 33static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver"; 34#ifndef CONFIG_E1000_NAPI 35#define DRIVERNAPI 36#else 37#define DRIVERNAPI "-NAPI" 38#endif 39#define DRV_VERSION "7.3.20-k2"DRIVERNAPI 40char e1000_driver_version[] = DRV_VERSION; 41static char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation."; 42 43/* e1000_pci_tbl - PCI Device ID Table 44 * 45 * Last entry must be all 0s 46 * 47 * Macro expands to... 48 * {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)} 49 */ 50static struct pci_device_id e1000_pci_tbl[] = { 51 INTEL_E1000_ETHERNET_DEVICE(0x1000), 52 INTEL_E1000_ETHERNET_DEVICE(0x1001), 53 INTEL_E1000_ETHERNET_DEVICE(0x1004), 54 INTEL_E1000_ETHERNET_DEVICE(0x1008), 55 INTEL_E1000_ETHERNET_DEVICE(0x1009), 56 INTEL_E1000_ETHERNET_DEVICE(0x100C), 57 INTEL_E1000_ETHERNET_DEVICE(0x100D), 58 INTEL_E1000_ETHERNET_DEVICE(0x100E), 59 INTEL_E1000_ETHERNET_DEVICE(0x100F), 60 INTEL_E1000_ETHERNET_DEVICE(0x1010), 61 INTEL_E1000_ETHERNET_DEVICE(0x1011), 62 INTEL_E1000_ETHERNET_DEVICE(0x1012), 63 INTEL_E1000_ETHERNET_DEVICE(0x1013), 64 INTEL_E1000_ETHERNET_DEVICE(0x1014), 65 INTEL_E1000_ETHERNET_DEVICE(0x1015), 66 INTEL_E1000_ETHERNET_DEVICE(0x1016), 67 INTEL_E1000_ETHERNET_DEVICE(0x1017), 68 INTEL_E1000_ETHERNET_DEVICE(0x1018), 69 INTEL_E1000_ETHERNET_DEVICE(0x1019), 70 INTEL_E1000_ETHERNET_DEVICE(0x101A), 71 INTEL_E1000_ETHERNET_DEVICE(0x101D), 72 INTEL_E1000_ETHERNET_DEVICE(0x101E), 73 INTEL_E1000_ETHERNET_DEVICE(0x1026), 74 INTEL_E1000_ETHERNET_DEVICE(0x1027), 75 INTEL_E1000_ETHERNET_DEVICE(0x1028), 76 INTEL_E1000_ETHERNET_DEVICE(0x1049), 77 INTEL_E1000_ETHERNET_DEVICE(0x104A), 78 INTEL_E1000_ETHERNET_DEVICE(0x104B), 79 INTEL_E1000_ETHERNET_DEVICE(0x104C), 80 INTEL_E1000_ETHERNET_DEVICE(0x104D), 81 INTEL_E1000_ETHERNET_DEVICE(0x105E), 82 INTEL_E1000_ETHERNET_DEVICE(0x105F), 83 INTEL_E1000_ETHERNET_DEVICE(0x1060), 84 INTEL_E1000_ETHERNET_DEVICE(0x1075), 85 INTEL_E1000_ETHERNET_DEVICE(0x1076), 86 INTEL_E1000_ETHERNET_DEVICE(0x1077), 87 INTEL_E1000_ETHERNET_DEVICE(0x1078), 88 INTEL_E1000_ETHERNET_DEVICE(0x1079), 89 INTEL_E1000_ETHERNET_DEVICE(0x107A), 90 INTEL_E1000_ETHERNET_DEVICE(0x107B), 91 INTEL_E1000_ETHERNET_DEVICE(0x107C), 92 INTEL_E1000_ETHERNET_DEVICE(0x107D), 93 INTEL_E1000_ETHERNET_DEVICE(0x107E), 94 INTEL_E1000_ETHERNET_DEVICE(0x107F), 95 INTEL_E1000_ETHERNET_DEVICE(0x108A), 96 INTEL_E1000_ETHERNET_DEVICE(0x108B), 97 INTEL_E1000_ETHERNET_DEVICE(0x108C), 98 INTEL_E1000_ETHERNET_DEVICE(0x1096), 99 INTEL_E1000_ETHERNET_DEVICE(0x1098), 100 INTEL_E1000_ETHERNET_DEVICE(0x1099), 101 INTEL_E1000_ETHERNET_DEVICE(0x109A), 102 INTEL_E1000_ETHERNET_DEVICE(0x10A4), 103 INTEL_E1000_ETHERNET_DEVICE(0x10B5), 104 INTEL_E1000_ETHERNET_DEVICE(0x10B9), 105 INTEL_E1000_ETHERNET_DEVICE(0x10BA), 106 INTEL_E1000_ETHERNET_DEVICE(0x10BB), 107 INTEL_E1000_ETHERNET_DEVICE(0x10BC), 108 INTEL_E1000_ETHERNET_DEVICE(0x10C4), 109 INTEL_E1000_ETHERNET_DEVICE(0x10C5), 110 /* required last entry */ 111 {0,} 112}; 113 114MODULE_DEVICE_TABLE(pci, e1000_pci_tbl); 115 116int e1000_up(struct e1000_adapter *adapter); 117void e1000_down(struct e1000_adapter *adapter); 118void e1000_reinit_locked(struct e1000_adapter *adapter); 119void e1000_reset(struct e1000_adapter *adapter); 120int e1000_set_spd_dplx(struct e1000_adapter *adapter, uint16_t spddplx); 121int e1000_setup_all_tx_resources(struct e1000_adapter *adapter); 122int e1000_setup_all_rx_resources(struct e1000_adapter *adapter); 123void e1000_free_all_tx_resources(struct e1000_adapter *adapter); 124void e1000_free_all_rx_resources(struct e1000_adapter *adapter); 125static int e1000_setup_tx_resources(struct e1000_adapter *adapter, 126 struct e1000_tx_ring *txdr); 127static int e1000_setup_rx_resources(struct e1000_adapter *adapter, 128 struct e1000_rx_ring *rxdr); 129static void e1000_free_tx_resources(struct e1000_adapter *adapter, 130 struct e1000_tx_ring *tx_ring); 131static void e1000_free_rx_resources(struct e1000_adapter *adapter, 132 struct e1000_rx_ring *rx_ring); 133void e1000_update_stats(struct e1000_adapter *adapter); 134 135static int e1000_init_module(void); 136static void e1000_exit_module(void); 137static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent); 138static void __devexit e1000_remove(struct pci_dev *pdev); 139static int e1000_alloc_queues(struct e1000_adapter *adapter); 140static int e1000_sw_init(struct e1000_adapter *adapter); 141static int e1000_open(struct net_device *netdev); 142static int e1000_close(struct net_device *netdev); 143static void e1000_configure_tx(struct e1000_adapter *adapter); 144static void e1000_configure_rx(struct e1000_adapter *adapter); 145static void e1000_setup_rctl(struct e1000_adapter *adapter); 146static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter); 147static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter); 148static void e1000_clean_tx_ring(struct e1000_adapter *adapter, 149 struct e1000_tx_ring *tx_ring); 150static void e1000_clean_rx_ring(struct e1000_adapter *adapter, 151 struct e1000_rx_ring *rx_ring); 152static void e1000_set_multi(struct net_device *netdev); 153static void e1000_update_phy_info(unsigned long data); 154static void e1000_watchdog(unsigned long data); 155static void e1000_82547_tx_fifo_stall(unsigned long data); 156static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev); 157static struct net_device_stats * e1000_get_stats(struct net_device *netdev); 158static int e1000_change_mtu(struct net_device *netdev, int new_mtu); 159static int e1000_set_mac(struct net_device *netdev, void *p); 160static irqreturn_t e1000_intr(int irq, void *data); 161static irqreturn_t e1000_intr_msi(int irq, void *data); 162static boolean_t e1000_clean_tx_irq(struct e1000_adapter *adapter, 163 struct e1000_tx_ring *tx_ring); 164#ifdef CONFIG_E1000_NAPI 165static int e1000_clean(struct net_device *poll_dev, int *budget); 166static boolean_t e1000_clean_rx_irq(struct e1000_adapter *adapter, 167 struct e1000_rx_ring *rx_ring, 168 int *work_done, int work_to_do); 169static boolean_t e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, 170 struct e1000_rx_ring *rx_ring, 171 int *work_done, int work_to_do); 172#else 173static boolean_t e1000_clean_rx_irq(struct e1000_adapter *adapter, 174 struct e1000_rx_ring *rx_ring); 175static boolean_t e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, 176 struct e1000_rx_ring *rx_ring); 177#endif 178static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter, 179 struct e1000_rx_ring *rx_ring, 180 int cleaned_count); 181static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter, 182 struct e1000_rx_ring *rx_ring, 183 int cleaned_count); 184static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd); 185static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, 186 int cmd); 187void e1000_set_ethtool_ops(struct net_device *netdev); 188static void e1000_enter_82542_rst(struct e1000_adapter *adapter); 189static void e1000_leave_82542_rst(struct e1000_adapter *adapter); 190static void e1000_tx_timeout(struct net_device *dev); 191static void e1000_reset_task(struct work_struct *work); 192static void e1000_smartspeed(struct e1000_adapter *adapter); 193static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter, 194 struct sk_buff *skb); 195 196static void e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp); 197static void e1000_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid); 198static void e1000_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid); 199static void e1000_restore_vlan(struct e1000_adapter *adapter); 200 201static int e1000_suspend(struct pci_dev *pdev, pm_message_t state); 202#ifdef CONFIG_PM 203static int e1000_resume(struct pci_dev *pdev); 204#endif 205static void e1000_shutdown(struct pci_dev *pdev); 206 207#ifdef CONFIG_NET_POLL_CONTROLLER 208/* for netdump / net console */ 209static void e1000_netpoll (struct net_device *netdev); 210#endif 211 212extern void e1000_check_options(struct e1000_adapter *adapter); 213 214#define COPYBREAK_DEFAULT 256 215static unsigned int copybreak __read_mostly = COPYBREAK_DEFAULT; 216module_param(copybreak, uint, 0644); 217MODULE_PARM_DESC(copybreak, 218 "Maximum size of packet that is copied to a new buffer on receive"); 219 220static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev, 221 pci_channel_state_t state); 222static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev); 223static void e1000_io_resume(struct pci_dev *pdev); 224 225static struct pci_error_handlers e1000_err_handler = { 226 .error_detected = e1000_io_error_detected, 227 .slot_reset = e1000_io_slot_reset, 228 .resume = e1000_io_resume, 229}; 230 231static struct pci_driver e1000_driver = { 232 .name = e1000_driver_name, 233 .id_table = e1000_pci_tbl, 234 .probe = e1000_probe, 235 .remove = __devexit_p(e1000_remove), 236#ifdef CONFIG_PM 237 /* Power Managment Hooks */ 238 .suspend = e1000_suspend, 239 .resume = e1000_resume, 240#endif 241 .shutdown = e1000_shutdown, 242 .err_handler = &e1000_err_handler 243}; 244 245MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); 246MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver"); 247MODULE_LICENSE("GPL"); 248MODULE_VERSION(DRV_VERSION); 249 250static int debug = NETIF_MSG_DRV | NETIF_MSG_PROBE; 251module_param(debug, int, 0); 252MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); 253 254/** 255 * e1000_init_module - Driver Registration Routine 256 * 257 * e1000_init_module is the first routine called when the driver is 258 * loaded. All it does is register with the PCI subsystem. 259 **/ 260 261static int __init 262e1000_init_module(void) 263{ 264 int ret; 265 printk(KERN_INFO "%s - version %s\n", 266 e1000_driver_string, e1000_driver_version); 267 268 printk(KERN_INFO "%s\n", e1000_copyright); 269 270 ret = pci_register_driver(&e1000_driver); 271 if (copybreak != COPYBREAK_DEFAULT) { 272 if (copybreak == 0) 273 printk(KERN_INFO "e1000: copybreak disabled\n"); 274 else 275 printk(KERN_INFO "e1000: copybreak enabled for " 276 "packets <= %u bytes\n", copybreak); 277 } 278 return ret; 279} 280 281module_init(e1000_init_module); 282 283/** 284 * e1000_exit_module - Driver Exit Cleanup Routine 285 * 286 * e1000_exit_module is called just before the driver is removed 287 * from memory. 288 **/ 289 290static void __exit 291e1000_exit_module(void) 292{ 293 pci_unregister_driver(&e1000_driver); 294} 295 296module_exit(e1000_exit_module); 297 298static int e1000_request_irq(struct e1000_adapter *adapter) 299{ 300 struct net_device *netdev = adapter->netdev; 301 void (*handler) = &e1000_intr; 302 int irq_flags = IRQF_SHARED; 303 int err; 304 305 if (adapter->hw.mac_type >= e1000_82571) { 306 adapter->have_msi = !pci_enable_msi(adapter->pdev); 307 if (adapter->have_msi) { 308 handler = &e1000_intr_msi; 309 irq_flags = 0; 310 } 311 } 312 313 err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name, 314 netdev); 315 if (err) { 316 if (adapter->have_msi) 317 pci_disable_msi(adapter->pdev); 318 DPRINTK(PROBE, ERR, 319 "Unable to allocate interrupt Error: %d\n", err); 320 } 321 322 return err; 323} 324 325static void e1000_free_irq(struct e1000_adapter *adapter) 326{ 327 struct net_device *netdev = adapter->netdev; 328 329 free_irq(adapter->pdev->irq, netdev); 330 331 if (adapter->have_msi) 332 pci_disable_msi(adapter->pdev); 333} 334 335/** 336 * e1000_irq_disable - Mask off interrupt generation on the NIC 337 * @adapter: board private structure 338 **/ 339 340static void 341e1000_irq_disable(struct e1000_adapter *adapter) 342{ 343 atomic_inc(&adapter->irq_sem); 344 E1000_WRITE_REG(&adapter->hw, IMC, ~0); 345 E1000_WRITE_FLUSH(&adapter->hw); 346 synchronize_irq(adapter->pdev->irq); 347} 348 349/** 350 * e1000_irq_enable - Enable default interrupt generation settings 351 * @adapter: board private structure 352 **/ 353 354static void 355e1000_irq_enable(struct e1000_adapter *adapter) 356{ 357 if (likely(atomic_dec_and_test(&adapter->irq_sem))) { 358 E1000_WRITE_REG(&adapter->hw, IMS, IMS_ENABLE_MASK); 359 E1000_WRITE_FLUSH(&adapter->hw); 360 } 361} 362 363static void 364e1000_update_mng_vlan(struct e1000_adapter *adapter) 365{ 366 struct net_device *netdev = adapter->netdev; 367 uint16_t vid = adapter->hw.mng_cookie.vlan_id; 368 uint16_t old_vid = adapter->mng_vlan_id; 369 if (adapter->vlgrp) { 370 if (!vlan_group_get_device(adapter->vlgrp, vid)) { 371 if (adapter->hw.mng_cookie.status & 372 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) { 373 e1000_vlan_rx_add_vid(netdev, vid); 374 adapter->mng_vlan_id = vid; 375 } else 376 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; 377 378 if ((old_vid != (uint16_t)E1000_MNG_VLAN_NONE) && 379 (vid != old_vid) && 380 !vlan_group_get_device(adapter->vlgrp, old_vid)) 381 e1000_vlan_rx_kill_vid(netdev, old_vid); 382 } else 383 adapter->mng_vlan_id = vid; 384 } 385} 386 387/** 388 * e1000_release_hw_control - release control of the h/w to f/w 389 * @adapter: address of board private structure 390 * 391 * e1000_release_hw_control resets {CTRL_EXT|FWSM}:DRV_LOAD bit. 392 * For ASF and Pass Through versions of f/w this means that the 393 * driver is no longer loaded. For AMT version (only with 82573) i 394 * of the f/w this means that the network i/f is closed. 395 * 396 **/ 397 398static void 399e1000_release_hw_control(struct e1000_adapter *adapter) 400{ 401 uint32_t ctrl_ext; 402 uint32_t swsm; 403 404 /* Let firmware taken over control of h/w */ 405 switch (adapter->hw.mac_type) { 406 case e1000_82573: 407 swsm = E1000_READ_REG(&adapter->hw, SWSM); 408 E1000_WRITE_REG(&adapter->hw, SWSM, 409 swsm & ~E1000_SWSM_DRV_LOAD); 410 break; 411 case e1000_82571: 412 case e1000_82572: 413 case e1000_80003es2lan: 414 case e1000_ich8lan: 415 ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT); 416 E1000_WRITE_REG(&adapter->hw, CTRL_EXT, 417 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD); 418 break; 419 default: 420 break; 421 } 422} 423 424/** 425 * e1000_get_hw_control - get control of the h/w from f/w 426 * @adapter: address of board private structure 427 * 428 * e1000_get_hw_control sets {CTRL_EXT|FWSM}:DRV_LOAD bit. 429 * For ASF and Pass Through versions of f/w this means that 430 * the driver is loaded. For AMT version (only with 82573) 431 * of the f/w this means that the network i/f is open. 432 * 433 **/ 434 435static void 436e1000_get_hw_control(struct e1000_adapter *adapter) 437{ 438 uint32_t ctrl_ext; 439 uint32_t swsm; 440 441 /* Let firmware know the driver has taken over */ 442 switch (adapter->hw.mac_type) { 443 case e1000_82573: 444 swsm = E1000_READ_REG(&adapter->hw, SWSM); 445 E1000_WRITE_REG(&adapter->hw, SWSM, 446 swsm | E1000_SWSM_DRV_LOAD); 447 break; 448 case e1000_82571: 449 case e1000_82572: 450 case e1000_80003es2lan: 451 case e1000_ich8lan: 452 ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT); 453 E1000_WRITE_REG(&adapter->hw, CTRL_EXT, 454 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); 455 break; 456 default: 457 break; 458 } 459} 460 461static void 462e1000_init_manageability(struct e1000_adapter *adapter) 463{ 464 if (adapter->en_mng_pt) { 465 uint32_t manc = E1000_READ_REG(&adapter->hw, MANC); 466 467 /* disable hardware interception of ARP */ 468 manc &= ~(E1000_MANC_ARP_EN); 469 470 /* enable receiving management packets to the host */ 471 /* this will probably generate destination unreachable messages 472 * from the host OS, but the packets will be handled on SMBUS */ 473 if (adapter->hw.has_manc2h) { 474 uint32_t manc2h = E1000_READ_REG(&adapter->hw, MANC2H); 475 476 manc |= E1000_MANC_EN_MNG2HOST; 477#define E1000_MNG2HOST_PORT_623 (1 << 5) 478#define E1000_MNG2HOST_PORT_664 (1 << 6) 479 manc2h |= E1000_MNG2HOST_PORT_623; 480 manc2h |= E1000_MNG2HOST_PORT_664; 481 E1000_WRITE_REG(&adapter->hw, MANC2H, manc2h); 482 } 483 484 E1000_WRITE_REG(&adapter->hw, MANC, manc); 485 } 486} 487 488static void 489e1000_release_manageability(struct e1000_adapter *adapter) 490{ 491 if (adapter->en_mng_pt) { 492 uint32_t manc = E1000_READ_REG(&adapter->hw, MANC); 493 494 /* re-enable hardware interception of ARP */ 495 manc |= E1000_MANC_ARP_EN; 496 497 if (adapter->hw.has_manc2h) 498 manc &= ~E1000_MANC_EN_MNG2HOST; 499 500 /* don't explicitly have to mess with MANC2H since 501 * MANC has an enable disable that gates MANC2H */ 502 503 E1000_WRITE_REG(&adapter->hw, MANC, manc); 504 } 505} 506 507/** 508 * e1000_configure - configure the hardware for RX and TX 509 * @adapter = private board structure 510 **/ 511static void e1000_configure(struct e1000_adapter *adapter) 512{ 513 struct net_device *netdev = adapter->netdev; 514 int i; 515 516 e1000_set_multi(netdev); 517 518 e1000_restore_vlan(adapter); 519 e1000_init_manageability(adapter); 520 521 e1000_configure_tx(adapter); 522 e1000_setup_rctl(adapter); 523 e1000_configure_rx(adapter); 524 /* call E1000_DESC_UNUSED which always leaves 525 * at least 1 descriptor unused to make sure 526 * next_to_use != next_to_clean */ 527 for (i = 0; i < adapter->num_rx_queues; i++) { 528 struct e1000_rx_ring *ring = &adapter->rx_ring[i]; 529 adapter->alloc_rx_buf(adapter, ring, 530 E1000_DESC_UNUSED(ring)); 531 } 532 533 adapter->tx_queue_len = netdev->tx_queue_len; 534} 535 536int e1000_up(struct e1000_adapter *adapter) 537{ 538 /* hardware has been reset, we need to reload some things */ 539 e1000_configure(adapter); 540 541 clear_bit(__E1000_DOWN, &adapter->flags); 542 543#ifdef CONFIG_E1000_NAPI 544 netif_poll_enable(adapter->netdev); 545#endif 546 e1000_irq_enable(adapter); 547 548 /* fire a link change interrupt to start the watchdog */ 549 E1000_WRITE_REG(&adapter->hw, ICS, E1000_ICS_LSC); 550 return 0; 551} 552 553/** 554 * e1000_power_up_phy - restore link in case the phy was powered down 555 * @adapter: address of board private structure 556 * 557 * The phy may be powered down to save power and turn off link when the 558 * driver is unloaded and wake on lan is not enabled (among others) 559 * *** this routine MUST be followed by a call to e1000_reset *** 560 * 561 **/ 562 563void e1000_power_up_phy(struct e1000_adapter *adapter) 564{ 565 uint16_t mii_reg = 0; 566 567 /* Just clear the power down bit to wake the phy back up */ 568 if (adapter->hw.media_type == e1000_media_type_copper) { 569 /* according to the manual, the phy will retain its 570 * settings across a power-down/up cycle */ 571 e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &mii_reg); 572 mii_reg &= ~MII_CR_POWER_DOWN; 573 e1000_write_phy_reg(&adapter->hw, PHY_CTRL, mii_reg); 574 } 575} 576 577static void e1000_power_down_phy(struct e1000_adapter *adapter) 578{ 579 /* Power down the PHY so no link is implied when interface is down * 580 * The PHY cannot be powered down if any of the following is TRUE * 581 * (a) WoL is enabled 582 * (b) AMT is active 583 * (c) SoL/IDER session is active */ 584 if (!adapter->wol && adapter->hw.mac_type >= e1000_82540 && 585 adapter->hw.media_type == e1000_media_type_copper) { 586 uint16_t mii_reg = 0; 587 588 switch (adapter->hw.mac_type) { 589 case e1000_82540: 590 case e1000_82545: 591 case e1000_82545_rev_3: 592 case e1000_82546: 593 case e1000_82546_rev_3: 594 case e1000_82541: 595 case e1000_82541_rev_2: 596 case e1000_82547: 597 case e1000_82547_rev_2: 598 if (E1000_READ_REG(&adapter->hw, MANC) & 599 E1000_MANC_SMBUS_EN) 600 goto out; 601 break; 602 case e1000_82571: 603 case e1000_82572: 604 case e1000_82573: 605 case e1000_80003es2lan: 606 case e1000_ich8lan: 607 if (e1000_check_mng_mode(&adapter->hw) || 608 e1000_check_phy_reset_block(&adapter->hw)) 609 goto out; 610 break; 611 default: 612 goto out; 613 } 614 e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &mii_reg); 615 mii_reg |= MII_CR_POWER_DOWN; 616 e1000_write_phy_reg(&adapter->hw, PHY_CTRL, mii_reg); 617 mdelay(1); 618 } 619out: 620 return; 621} 622 623void 624e1000_down(struct e1000_adapter *adapter) 625{ 626 struct net_device *netdev = adapter->netdev; 627 628 /* signal that we're down so the interrupt handler does not 629 * reschedule our watchdog timer */ 630 set_bit(__E1000_DOWN, &adapter->flags); 631 632#ifdef CONFIG_E1000_NAPI 633 netif_poll_disable(netdev); 634#endif 635 e1000_irq_disable(adapter); 636 637 del_timer_sync(&adapter->tx_fifo_stall_timer); 638 del_timer_sync(&adapter->watchdog_timer); 639 del_timer_sync(&adapter->phy_info_timer); 640 641 netdev->tx_queue_len = adapter->tx_queue_len; 642 adapter->link_speed = 0; 643 adapter->link_duplex = 0; 644 netif_carrier_off(netdev); 645 netif_stop_queue(netdev); 646 647 e1000_reset(adapter); 648 e1000_clean_all_tx_rings(adapter); 649 e1000_clean_all_rx_rings(adapter); 650} 651 652void 653e1000_reinit_locked(struct e1000_adapter *adapter) 654{ 655 WARN_ON(in_interrupt()); 656 while (test_and_set_bit(__E1000_RESETTING, &adapter->flags)) 657 msleep(1); 658 e1000_down(adapter); 659 e1000_up(adapter); 660 clear_bit(__E1000_RESETTING, &adapter->flags); 661} 662 663void 664e1000_reset(struct e1000_adapter *adapter) 665{ 666 uint32_t pba = 0, tx_space, min_tx_space, min_rx_space; 667 uint16_t fc_high_water_mark = E1000_FC_HIGH_DIFF; 668 boolean_t legacy_pba_adjust = FALSE; 669 670 /* Repartition Pba for greater than 9k mtu 671 * To take effect CTRL.RST is required. 672 */ 673 674 switch (adapter->hw.mac_type) { 675 case e1000_82542_rev2_0: 676 case e1000_82542_rev2_1: 677 case e1000_82543: 678 case e1000_82544: 679 case e1000_82540: 680 case e1000_82541: 681 case e1000_82541_rev_2: 682 legacy_pba_adjust = TRUE; 683 pba = E1000_PBA_48K; 684 break; 685 case e1000_82545: 686 case e1000_82545_rev_3: 687 case e1000_82546: 688 case e1000_82546_rev_3: 689 pba = E1000_PBA_48K; 690 break; 691 case e1000_82547: 692 case e1000_82547_rev_2: 693 legacy_pba_adjust = TRUE; 694 pba = E1000_PBA_30K; 695 break; 696 case e1000_82571: 697 case e1000_82572: 698 case e1000_80003es2lan: 699 pba = E1000_PBA_38K; 700 break; 701 case e1000_82573: 702 pba = E1000_PBA_20K; 703 break; 704 case e1000_ich8lan: 705 pba = E1000_PBA_8K; 706 case e1000_undefined: 707 case e1000_num_macs: 708 break; 709 } 710 711 if (legacy_pba_adjust == TRUE) { 712 if (adapter->netdev->mtu > E1000_RXBUFFER_8192) 713 pba -= 8; /* allocate more FIFO for Tx */ 714 715 if (adapter->hw.mac_type == e1000_82547) { 716 adapter->tx_fifo_head = 0; 717 adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT; 718 adapter->tx_fifo_size = 719 (E1000_PBA_40K - pba) << E1000_PBA_BYTES_SHIFT; 720 atomic_set(&adapter->tx_fifo_stall, 0); 721 } 722 } else if (adapter->hw.max_frame_size > MAXIMUM_ETHERNET_FRAME_SIZE) { 723 /* adjust PBA for jumbo frames */ 724 E1000_WRITE_REG(&adapter->hw, PBA, pba); 725 726 /* To maintain wire speed transmits, the Tx FIFO should be 727 * large enough to accomodate two full transmit packets, 728 * rounded up to the next 1KB and expressed in KB. Likewise, 729 * the Rx FIFO should be large enough to accomodate at least 730 * one full receive packet and is similarly rounded up and 731 * expressed in KB. */ 732 pba = E1000_READ_REG(&adapter->hw, PBA); 733 /* upper 16 bits has Tx packet buffer allocation size in KB */ 734 tx_space = pba >> 16; 735 /* lower 16 bits has Rx packet buffer allocation size in KB */ 736 pba &= 0xffff; 737 /* don't include ethernet FCS because hardware appends/strips */ 738 min_rx_space = adapter->netdev->mtu + ENET_HEADER_SIZE + 739 VLAN_TAG_SIZE; 740 min_tx_space = min_rx_space; 741 min_tx_space *= 2; 742 min_tx_space = ALIGN(min_tx_space, 1024); 743 min_tx_space >>= 10; 744 min_rx_space = ALIGN(min_rx_space, 1024); 745 min_rx_space >>= 10; 746 747 /* If current Tx allocation is less than the min Tx FIFO size, 748 * and the min Tx FIFO size is less than the current Rx FIFO 749 * allocation, take space away from current Rx allocation */ 750 if (tx_space < min_tx_space && 751 ((min_tx_space - tx_space) < pba)) { 752 pba = pba - (min_tx_space - tx_space); 753 754 /* PCI/PCIx hardware has PBA alignment constraints */ 755 switch (adapter->hw.mac_type) { 756 case e1000_82545 ... e1000_82546_rev_3: 757 pba &= ~(E1000_PBA_8K - 1); 758 break; 759 default: 760 break; 761 } 762 763 /* if short on rx space, rx wins and must trump tx 764 * adjustment or use Early Receive if available */ 765 if (pba < min_rx_space) { 766 switch (adapter->hw.mac_type) { 767 case e1000_82573: 768 /* ERT enabled in e1000_configure_rx */ 769 break; 770 default: 771 pba = min_rx_space; 772 break; 773 } 774 } 775 } 776 } 777 778 E1000_WRITE_REG(&adapter->hw, PBA, pba); 779 780 /* flow control settings */ 781 /* Set the FC high water mark to 90% of the FIFO size. 782 * Required to clear last 3 LSB */ 783 fc_high_water_mark = ((pba * 9216)/10) & 0xFFF8; 784 /* We can't use 90% on small FIFOs because the remainder 785 * would be less than 1 full frame. In this case, we size 786 * it to allow at least a full frame above the high water 787 * mark. */ 788 if (pba < E1000_PBA_16K) 789 fc_high_water_mark = (pba * 1024) - 1600; 790 791 adapter->hw.fc_high_water = fc_high_water_mark; 792 adapter->hw.fc_low_water = fc_high_water_mark - 8; 793 if (adapter->hw.mac_type == e1000_80003es2lan) 794 adapter->hw.fc_pause_time = 0xFFFF; 795 else 796 adapter->hw.fc_pause_time = E1000_FC_PAUSE_TIME; 797 adapter->hw.fc_send_xon = 1; 798 adapter->hw.fc = adapter->hw.original_fc; 799 800 /* Allow time for pending master requests to run */ 801 e1000_reset_hw(&adapter->hw); 802 if (adapter->hw.mac_type >= e1000_82544) 803 E1000_WRITE_REG(&adapter->hw, WUC, 0); 804 805 if (e1000_init_hw(&adapter->hw)) 806 DPRINTK(PROBE, ERR, "Hardware Error\n"); 807 e1000_update_mng_vlan(adapter); 808 809 /* if (adapter->hwflags & HWFLAGS_PHY_PWR_BIT) { */ 810 if (adapter->hw.mac_type >= e1000_82544 && 811 adapter->hw.mac_type <= e1000_82547_rev_2 && 812 adapter->hw.autoneg == 1 && 813 adapter->hw.autoneg_advertised == ADVERTISE_1000_FULL) { 814 uint32_t ctrl = E1000_READ_REG(&adapter->hw, CTRL); 815 /* clear phy power management bit if we are in gig only mode, 816 * which if enabled will attempt negotiation to 100Mb, which 817 * can cause a loss of link at power off or driver unload */ 818 ctrl &= ~E1000_CTRL_SWDPIN3; 819 E1000_WRITE_REG(&adapter->hw, CTRL, ctrl); 820 } 821 822 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */ 823 E1000_WRITE_REG(&adapter->hw, VET, ETHERNET_IEEE_VLAN_TYPE); 824 825 e1000_reset_adaptive(&adapter->hw); 826 e1000_phy_get_info(&adapter->hw, &adapter->phy_info); 827 828 if (!adapter->smart_power_down && 829 (adapter->hw.mac_type == e1000_82571 || 830 adapter->hw.mac_type == e1000_82572)) { 831 uint16_t phy_data = 0; 832 /* speed up time to link by disabling smart power down, ignore 833 * the return value of this function because there is nothing 834 * different we would do if it failed */ 835 e1000_read_phy_reg(&adapter->hw, IGP02E1000_PHY_POWER_MGMT, 836 &phy_data); 837 phy_data &= ~IGP02E1000_PM_SPD; 838 e1000_write_phy_reg(&adapter->hw, IGP02E1000_PHY_POWER_MGMT, 839 phy_data); 840 } 841 842 e1000_release_manageability(adapter); 843} 844 845/** 846 * e1000_probe - Device Initialization Routine 847 * @pdev: PCI device information struct 848 * @ent: entry in e1000_pci_tbl 849 * 850 * Returns 0 on success, negative on failure 851 * 852 * e1000_probe initializes an adapter identified by a pci_dev structure. 853 * The OS initialization, configuring of the adapter private structure, 854 * and a hardware reset occur. 855 **/ 856 857static int __devinit 858e1000_probe(struct pci_dev *pdev, 859 const struct pci_device_id *ent) 860{ 861 struct net_device *netdev; 862 struct e1000_adapter *adapter; 863 unsigned long mmio_start, mmio_len; 864 unsigned long flash_start, flash_len; 865 866 static int cards_found = 0; 867 static int global_quad_port_a = 0; /* global ksp3 port a indication */ 868 int i, err, pci_using_dac; 869 uint16_t eeprom_data = 0; 870 uint16_t eeprom_apme_mask = E1000_EEPROM_APME; 871 if ((err = pci_enable_device(pdev))) 872 return err; 873 874 if (!(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK)) && 875 !(err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK))) { 876 pci_using_dac = 1; 877 } else { 878 if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) && 879 (err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK))) { 880 E1000_ERR("No usable DMA configuration, aborting\n"); 881 goto err_dma; 882 } 883 pci_using_dac = 0; 884 } 885 886 if ((err = pci_request_regions(pdev, e1000_driver_name))) 887 goto err_pci_reg; 888 889 pci_set_master(pdev); 890 891 err = -ENOMEM; 892 netdev = alloc_etherdev(sizeof(struct e1000_adapter)); 893 if (!netdev) 894 goto err_alloc_etherdev; 895 896 SET_MODULE_OWNER(netdev); 897 SET_NETDEV_DEV(netdev, &pdev->dev); 898 899 pci_set_drvdata(pdev, netdev); 900 adapter = netdev_priv(netdev); 901 adapter->netdev = netdev; 902 adapter->pdev = pdev; 903 adapter->hw.back = adapter; 904 adapter->msg_enable = (1 << debug) - 1; 905 906 mmio_start = pci_resource_start(pdev, BAR_0); 907 mmio_len = pci_resource_len(pdev, BAR_0); 908 909 err = -EIO; 910 adapter->hw.hw_addr = ioremap(mmio_start, mmio_len); 911 if (!adapter->hw.hw_addr) 912 goto err_ioremap; 913 914 for (i = BAR_1; i <= BAR_5; i++) { 915 if (pci_resource_len(pdev, i) == 0) 916 continue; 917 if (pci_resource_flags(pdev, i) & IORESOURCE_IO) { 918 adapter->hw.io_base = pci_resource_start(pdev, i); 919 break; 920 } 921 } 922 923 netdev->open = &e1000_open; 924 netdev->stop = &e1000_close; 925 netdev->hard_start_xmit = &e1000_xmit_frame; 926 netdev->get_stats = &e1000_get_stats; 927 netdev->set_multicast_list = &e1000_set_multi; 928 netdev->set_mac_address = &e1000_set_mac; 929 netdev->change_mtu = &e1000_change_mtu; 930 netdev->do_ioctl = &e1000_ioctl; 931 e1000_set_ethtool_ops(netdev); 932 netdev->tx_timeout = &e1000_tx_timeout; 933 netdev->watchdog_timeo = 5 * HZ; 934#ifdef CONFIG_E1000_NAPI 935 netdev->poll = &e1000_clean; 936 netdev->weight = 64; 937#endif 938 netdev->vlan_rx_register = e1000_vlan_rx_register; 939 netdev->vlan_rx_add_vid = e1000_vlan_rx_add_vid; 940 netdev->vlan_rx_kill_vid = e1000_vlan_rx_kill_vid; 941#ifdef CONFIG_NET_POLL_CONTROLLER 942 netdev->poll_controller = e1000_netpoll; 943#endif 944 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); 945 946 netdev->mem_start = mmio_start; 947 netdev->mem_end = mmio_start + mmio_len; 948 netdev->base_addr = adapter->hw.io_base; 949 950 adapter->bd_number = cards_found; 951 952 /* setup the private structure */ 953 954 if ((err = e1000_sw_init(adapter))) 955 goto err_sw_init; 956 957 err = -EIO; 958 /* Flash BAR mapping must happen after e1000_sw_init 959 * because it depends on mac_type */ 960 if ((adapter->hw.mac_type == e1000_ich8lan) && 961 (pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) { 962 flash_start = pci_resource_start(pdev, 1); 963 flash_len = pci_resource_len(pdev, 1); 964 adapter->hw.flash_address = ioremap(flash_start, flash_len); 965 if (!adapter->hw.flash_address) 966 goto err_flashmap; 967 } 968 969 if (e1000_check_phy_reset_block(&adapter->hw)) 970 DPRINTK(PROBE, INFO, "PHY reset is blocked due to SOL/IDER session.\n"); 971 972 if (adapter->hw.mac_type >= e1000_82543) { 973 netdev->features = NETIF_F_SG | 974 NETIF_F_HW_CSUM | 975 NETIF_F_HW_VLAN_TX | 976 NETIF_F_HW_VLAN_RX | 977 NETIF_F_HW_VLAN_FILTER; 978 if (adapter->hw.mac_type == e1000_ich8lan) 979 netdev->features &= ~NETIF_F_HW_VLAN_FILTER; 980 } 981 982 if ((adapter->hw.mac_type >= e1000_82544) && 983 (adapter->hw.mac_type != e1000_82547)) 984 netdev->features |= NETIF_F_TSO; 985 986 if (adapter->hw.mac_type > e1000_82547_rev_2) 987 netdev->features |= NETIF_F_TSO6; 988 if (pci_using_dac) 989 netdev->features |= NETIF_F_HIGHDMA; 990 991 netdev->features |= NETIF_F_LLTX; 992 993 adapter->en_mng_pt = e1000_enable_mng_pass_thru(&adapter->hw); 994 995 /* initialize eeprom parameters */ 996 997 if (e1000_init_eeprom_params(&adapter->hw)) { 998 E1000_ERR("EEPROM initialization failed\n"); 999 goto err_eeprom; 1000 } 1001 1002 /* before reading the EEPROM, reset the controller to 1003 * put the device in a known good starting state */ 1004 1005 e1000_reset_hw(&adapter->hw); 1006 1007 /* make sure the EEPROM is good */ 1008 1009 if (e1000_validate_eeprom_checksum(&adapter->hw) < 0) { 1010 DPRINTK(PROBE, ERR, "The EEPROM Checksum Is Not Valid\n"); 1011 goto err_eeprom; 1012 } 1013 1014 /* copy the MAC address out of the EEPROM */ 1015 1016 if (e1000_read_mac_addr(&adapter->hw)) 1017 DPRINTK(PROBE, ERR, "EEPROM Read Error\n"); 1018 memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len); 1019 memcpy(netdev->perm_addr, adapter->hw.mac_addr, netdev->addr_len); 1020 1021 if (!is_valid_ether_addr(netdev->perm_addr)) { 1022 DPRINTK(PROBE, ERR, "Invalid MAC Address\n"); 1023 goto err_eeprom; 1024 } 1025 1026 e1000_get_bus_info(&adapter->hw); 1027 1028 init_timer(&adapter->tx_fifo_stall_timer); 1029 adapter->tx_fifo_stall_timer.function = &e1000_82547_tx_fifo_stall; 1030 adapter->tx_fifo_stall_timer.data = (unsigned long) adapter; 1031 1032 init_timer(&adapter->watchdog_timer); 1033 adapter->watchdog_timer.function = &e1000_watchdog; 1034 adapter->watchdog_timer.data = (unsigned long) adapter; 1035 1036 init_timer(&adapter->phy_info_timer); 1037 adapter->phy_info_timer.function = &e1000_update_phy_info; 1038 adapter->phy_info_timer.data = (unsigned long) adapter; 1039 1040 INIT_WORK(&adapter->reset_task, e1000_reset_task); 1041 1042 e1000_check_options(adapter); 1043 1044 /* Initial Wake on LAN setting 1045 * If APM wake is enabled in the EEPROM, 1046 * enable the ACPI Magic Packet filter 1047 */ 1048 1049 switch (adapter->hw.mac_type) { 1050 case e1000_82542_rev2_0: 1051 case e1000_82542_rev2_1: 1052 case e1000_82543: 1053 break; 1054 case e1000_82544: 1055 e1000_read_eeprom(&adapter->hw, 1056 EEPROM_INIT_CONTROL2_REG, 1, &eeprom_data); 1057 eeprom_apme_mask = E1000_EEPROM_82544_APM; 1058 break; 1059 case e1000_ich8lan: 1060 e1000_read_eeprom(&adapter->hw, 1061 EEPROM_INIT_CONTROL1_REG, 1, &eeprom_data); 1062 eeprom_apme_mask = E1000_EEPROM_ICH8_APME; 1063 break; 1064 case e1000_82546: 1065 case e1000_82546_rev_3: 1066 case e1000_82571: 1067 case e1000_80003es2lan: 1068 if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_FUNC_1){ 1069 e1000_read_eeprom(&adapter->hw, 1070 EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data); 1071 break; 1072 } 1073 /* Fall Through */ 1074 default: 1075 e1000_read_eeprom(&adapter->hw, 1076 EEPROM_INIT_CONTROL3_PORT_A, 1, &eeprom_data); 1077 break; 1078 } 1079 if (eeprom_data & eeprom_apme_mask) 1080 adapter->eeprom_wol |= E1000_WUFC_MAG; 1081 1082 /* now that we have the eeprom settings, apply the special cases 1083 * where the eeprom may be wrong or the board simply won't support 1084 * wake on lan on a particular port */ 1085 switch (pdev->device) { 1086 case E1000_DEV_ID_82546GB_PCIE: 1087 adapter->eeprom_wol = 0; 1088 break; 1089 case E1000_DEV_ID_82546EB_FIBER: 1090 case E1000_DEV_ID_82546GB_FIBER: 1091 case E1000_DEV_ID_82571EB_FIBER: 1092 /* Wake events only supported on port A for dual fiber 1093 * regardless of eeprom setting */ 1094 if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_FUNC_1) 1095 adapter->eeprom_wol = 0; 1096 break; 1097 case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3: 1098 case E1000_DEV_ID_82571EB_QUAD_COPPER: 1099 case E1000_DEV_ID_82571EB_QUAD_COPPER_LOWPROFILE: 1100 /* if quad port adapter, disable WoL on all but port A */ 1101 if (global_quad_port_a != 0) 1102 adapter->eeprom_wol = 0; 1103 else 1104 adapter->quad_port_a = 1; 1105 /* Reset for multiple quad port adapters */ 1106 if (++global_quad_port_a == 4) 1107 global_quad_port_a = 0; 1108 break; 1109 } 1110 1111 /* initialize the wol settings based on the eeprom settings */ 1112 adapter->wol = adapter->eeprom_wol; 1113 1114 /* print bus type/speed/width info */ 1115 { 1116 struct e1000_hw *hw = &adapter->hw; 1117 DPRINTK(PROBE, INFO, "(PCI%s:%s:%s) ", 1118 ((hw->bus_type == e1000_bus_type_pcix) ? "-X" : 1119 (hw->bus_type == e1000_bus_type_pci_express ? " Express":"")), 1120 ((hw->bus_speed == e1000_bus_speed_2500) ? "2.5Gb/s" : 1121 (hw->bus_speed == e1000_bus_speed_133) ? "133MHz" : 1122 (hw->bus_speed == e1000_bus_speed_120) ? "120MHz" : 1123 (hw->bus_speed == e1000_bus_speed_100) ? "100MHz" : 1124 (hw->bus_speed == e1000_bus_speed_66) ? "66MHz" : "33MHz"), 1125 ((hw->bus_width == e1000_bus_width_64) ? "64-bit" : 1126 (hw->bus_width == e1000_bus_width_pciex_4) ? "Width x4" : 1127 (hw->bus_width == e1000_bus_width_pciex_1) ? "Width x1" : 1128 "32-bit")); 1129 } 1130 1131 for (i = 0; i < 6; i++) 1132 printk("%2.2x%c", netdev->dev_addr[i], i == 5 ? '\n' : ':'); 1133 1134 /* reset the hardware with the new settings */ 1135 e1000_reset(adapter); 1136 1137 /* If the controller is 82573 and f/w is AMT, do not set 1138 * DRV_LOAD until the interface is up. For all other cases, 1139 * let the f/w know that the h/w is now under the control 1140 * of the driver. */ 1141 if (adapter->hw.mac_type != e1000_82573 || 1142 !e1000_check_mng_mode(&adapter->hw)) 1143 e1000_get_hw_control(adapter); 1144 1145 /* tell the stack to leave us alone until e1000_open() is called */ 1146 netif_carrier_off(netdev); 1147 netif_stop_queue(netdev); 1148#ifdef CONFIG_E1000_NAPI 1149 netif_poll_disable(netdev); 1150#endif 1151 1152 strcpy(netdev->name, "eth%d"); 1153 if ((err = register_netdev(netdev))) 1154 goto err_register; 1155 1156 DPRINTK(PROBE, INFO, "Intel(R) PRO/1000 Network Connection\n"); 1157 1158 cards_found++; 1159 return 0; 1160 1161err_register: 1162 e1000_release_hw_control(adapter); 1163err_eeprom: 1164 if (!e1000_check_phy_reset_block(&adapter->hw)) 1165 e1000_phy_hw_reset(&adapter->hw); 1166 1167 if (adapter->hw.flash_address) 1168 iounmap(adapter->hw.flash_address); 1169err_flashmap: 1170#ifdef CONFIG_E1000_NAPI 1171 for (i = 0; i < adapter->num_rx_queues; i++) 1172 dev_put(&adapter->polling_netdev[i]); 1173#endif 1174 1175 kfree(adapter->tx_ring); 1176 kfree(adapter->rx_ring); 1177#ifdef CONFIG_E1000_NAPI 1178 kfree(adapter->polling_netdev); 1179#endif 1180err_sw_init: 1181 iounmap(adapter->hw.hw_addr); 1182err_ioremap: 1183 free_netdev(netdev); 1184err_alloc_etherdev: 1185 pci_release_regions(pdev); 1186err_pci_reg: 1187err_dma: 1188 pci_disable_device(pdev); 1189 return err; 1190} 1191 1192/** 1193 * e1000_remove - Device Removal Routine 1194 * @pdev: PCI device information struct 1195 * 1196 * e1000_remove is called by the PCI subsystem to alert the driver 1197 * that it should release a PCI device. The could be caused by a 1198 * Hot-Plug event, or because the driver is going to be removed from 1199 * memory. 1200 **/ 1201 1202static void __devexit 1203e1000_remove(struct pci_dev *pdev) 1204{ 1205 struct net_device *netdev = pci_get_drvdata(pdev); 1206 struct e1000_adapter *adapter = netdev_priv(netdev); 1207#ifdef CONFIG_E1000_NAPI 1208 int i; 1209#endif 1210 1211 cancel_work_sync(&adapter->reset_task); 1212 1213 e1000_release_manageability(adapter); 1214 1215 /* Release control of h/w to f/w. If f/w is AMT enabled, this 1216 * would have already happened in close and is redundant. */ 1217 e1000_release_hw_control(adapter); 1218 1219 unregister_netdev(netdev); 1220#ifdef CONFIG_E1000_NAPI 1221 for (i = 0; i < adapter->num_rx_queues; i++) 1222 dev_put(&adapter->polling_netdev[i]); 1223#endif 1224 1225 if (!e1000_check_phy_reset_block(&adapter->hw)) 1226 e1000_phy_hw_reset(&adapter->hw); 1227 1228 kfree(adapter->tx_ring); 1229 kfree(adapter->rx_ring); 1230#ifdef CONFIG_E1000_NAPI 1231 kfree(adapter->polling_netdev); 1232#endif 1233 1234 iounmap(adapter->hw.hw_addr); 1235 if (adapter->hw.flash_address) 1236 iounmap(adapter->hw.flash_address); 1237 pci_release_regions(pdev); 1238 1239 free_netdev(netdev); 1240 1241 pci_disable_device(pdev); 1242} 1243 1244/** 1245 * e1000_sw_init - Initialize general software structures (struct e1000_adapter) 1246 * @adapter: board private structure to initialize 1247 * 1248 * e1000_sw_init initializes the Adapter private data structure. 1249 * Fields are initialized based on PCI device information and 1250 * OS network device settings (MTU size). 1251 **/ 1252 1253static int __devinit 1254e1000_sw_init(struct e1000_adapter *adapter) 1255{ 1256 struct e1000_hw *hw = &adapter->hw; 1257 struct net_device *netdev = adapter->netdev; 1258 struct pci_dev *pdev = adapter->pdev; 1259#ifdef CONFIG_E1000_NAPI 1260 int i; 1261#endif 1262 1263 /* PCI config space info */ 1264 1265 hw->vendor_id = pdev->vendor; 1266 hw->device_id = pdev->device; 1267 hw->subsystem_vendor_id = pdev->subsystem_vendor; 1268 hw->subsystem_id = pdev->subsystem_device; 1269 1270 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id); 1271 1272 pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word); 1273 1274 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE; 1275 adapter->rx_ps_bsize0 = E1000_RXBUFFER_128; 1276 hw->max_frame_size = netdev->mtu + 1277 ENET_HEADER_SIZE + ETHERNET_FCS_SIZE; 1278 hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE; 1279 1280 /* identify the MAC */ 1281 1282 if (e1000_set_mac_type(hw)) { 1283 DPRINTK(PROBE, ERR, "Unknown MAC Type\n"); 1284 return -EIO; 1285 } 1286 1287 switch (hw->mac_type) { 1288 default: 1289 break; 1290 case e1000_82541: 1291 case e1000_82547: 1292 case e1000_82541_rev_2: 1293 case e1000_82547_rev_2: 1294 hw->phy_init_script = 1; 1295 break; 1296 } 1297 1298 e1000_set_media_type(hw); 1299 1300 hw->wait_autoneg_complete = FALSE; 1301 hw->tbi_compatibility_en = TRUE; 1302 hw->adaptive_ifs = TRUE; 1303 1304 /* Copper options */ 1305 1306 if (hw->media_type == e1000_media_type_copper) { 1307 hw->mdix = AUTO_ALL_MODES; 1308 hw->disable_polarity_correction = FALSE; 1309 hw->master_slave = E1000_MASTER_SLAVE; 1310 } 1311 1312 adapter->num_tx_queues = 1; 1313 adapter->num_rx_queues = 1; 1314 1315 if (e1000_alloc_queues(adapter)) { 1316 DPRINTK(PROBE, ERR, "Unable to allocate memory for queues\n"); 1317 return -ENOMEM; 1318 } 1319 1320#ifdef CONFIG_E1000_NAPI 1321 for (i = 0; i < adapter->num_rx_queues; i++) { 1322 adapter->polling_netdev[i].priv = adapter; 1323 adapter->polling_netdev[i].poll = &e1000_clean; 1324 adapter->polling_netdev[i].weight = 64; 1325 dev_hold(&adapter->polling_netdev[i]); 1326 set_bit(__LINK_STATE_START, &adapter->polling_netdev[i].state); 1327 } 1328 spin_lock_init(&adapter->tx_queue_lock); 1329#endif 1330 1331 /* Explicitly disable IRQ since the NIC can be in any state. */ 1332 atomic_set(&adapter->irq_sem, 0); 1333 e1000_irq_disable(adapter); 1334 1335 spin_lock_init(&adapter->stats_lock); 1336 1337 set_bit(__E1000_DOWN, &adapter->flags); 1338 1339 return 0; 1340} 1341 1342/** 1343 * e1000_alloc_queues - Allocate memory for all rings 1344 * @adapter: board private structure to initialize 1345 * 1346 * We allocate one ring per queue at run-time since we don't know the 1347 * number of queues at compile-time. The polling_netdev array is 1348 * intended for Multiqueue, but should work fine with a single queue. 1349 **/ 1350 1351static int __devinit 1352e1000_alloc_queues(struct e1000_adapter *adapter) 1353{ 1354 adapter->tx_ring = kcalloc(adapter->num_tx_queues, 1355 sizeof(struct e1000_tx_ring), GFP_KERNEL); 1356 if (!adapter->tx_ring) 1357 return -ENOMEM; 1358 1359 adapter->rx_ring = kcalloc(adapter->num_rx_queues, 1360 sizeof(struct e1000_rx_ring), GFP_KERNEL); 1361 if (!adapter->rx_ring) { 1362 kfree(adapter->tx_ring); 1363 return -ENOMEM; 1364 } 1365 1366#ifdef CONFIG_E1000_NAPI 1367 adapter->polling_netdev = kcalloc(adapter->num_rx_queues, 1368 sizeof(struct net_device), 1369 GFP_KERNEL); 1370 if (!adapter->polling_netdev) { 1371 kfree(adapter->tx_ring); 1372 kfree(adapter->rx_ring); 1373 return -ENOMEM; 1374 } 1375#endif 1376 1377 return E1000_SUCCESS; 1378} 1379 1380/** 1381 * e1000_open - Called when a network interface is made active 1382 * @netdev: network interface device structure 1383 * 1384 * Returns 0 on success, negative value on failure 1385 * 1386 * The open entry point is called when a network interface is made 1387 * active by the system (IFF_UP). At this point all resources needed 1388 * for transmit and receive operations are allocated, the interrupt 1389 * handler is registered with the OS, the watchdog timer is started, 1390 * and the stack is notified that the interface is ready. 1391 **/ 1392 1393static int 1394e1000_open(struct net_device *netdev) 1395{ 1396 struct e1000_adapter *adapter = netdev_priv(netdev); 1397 int err; 1398 1399 /* disallow open during test */ 1400 if (test_bit(__E1000_TESTING, &adapter->flags)) 1401 return -EBUSY; 1402 1403 /* allocate transmit descriptors */ 1404 err = e1000_setup_all_tx_resources(adapter); 1405 if (err) 1406 goto err_setup_tx; 1407 1408 /* allocate receive descriptors */ 1409 err = e1000_setup_all_rx_resources(adapter); 1410 if (err) 1411 goto err_setup_rx; 1412 1413 e1000_power_up_phy(adapter); 1414 1415 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; 1416 if ((adapter->hw.mng_cookie.status & 1417 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) { 1418 e1000_update_mng_vlan(adapter); 1419 } 1420 1421 /* If AMT is enabled, let the firmware know that the network 1422 * interface is now open */ 1423 if (adapter->hw.mac_type == e1000_82573 && 1424 e1000_check_mng_mode(&adapter->hw)) 1425 e1000_get_hw_control(adapter); 1426 1427 /* before we allocate an interrupt, we must be ready to handle it. 1428 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt 1429 * as soon as we call pci_request_irq, so we have to setup our 1430 * clean_rx handler before we do so. */ 1431 e1000_configure(adapter); 1432 1433 err = e1000_request_irq(adapter); 1434 if (err) 1435 goto err_req_irq; 1436 1437 /* From here on the code is the same as e1000_up() */ 1438 clear_bit(__E1000_DOWN, &adapter->flags); 1439 1440#ifdef CONFIG_E1000_NAPI 1441 netif_poll_enable(netdev); 1442#endif 1443 1444 e1000_irq_enable(adapter); 1445 1446 /* fire a link status change interrupt to start the watchdog */ 1447 E1000_WRITE_REG(&adapter->hw, ICS, E1000_ICS_LSC); 1448 1449 return E1000_SUCCESS; 1450 1451err_req_irq: 1452 e1000_release_hw_control(adapter); 1453 e1000_power_down_phy(adapter); 1454 e1000_free_all_rx_resources(adapter); 1455err_setup_rx: 1456 e1000_free_all_tx_resources(adapter); 1457err_setup_tx: 1458 e1000_reset(adapter); 1459 1460 return err; 1461} 1462 1463/** 1464 * e1000_close - Disables a network interface 1465 * @netdev: network interface device structure 1466 * 1467 * Returns 0, this is not allowed to fail 1468 * 1469 * The close entry point is called when an interface is de-activated 1470 * by the OS. The hardware is still under the drivers control, but 1471 * needs to be disabled. A global MAC reset is issued to stop the 1472 * hardware, and all transmit and receive resources are freed. 1473 **/ 1474 1475static int 1476e1000_close(struct net_device *netdev) 1477{ 1478 struct e1000_adapter *adapter = netdev_priv(netdev); 1479 1480 WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags)); 1481 e1000_down(adapter); 1482 e1000_power_down_phy(adapter); 1483 e1000_free_irq(adapter); 1484 1485 e1000_free_all_tx_resources(adapter); 1486 e1000_free_all_rx_resources(adapter); 1487 1488 /* kill manageability vlan ID if supported, but not if a vlan with 1489 * the same ID is registered on the host OS (let 8021q kill it) */ 1490 if ((adapter->hw.mng_cookie.status & 1491 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) && 1492 !(adapter->vlgrp && 1493 vlan_group_get_device(adapter->vlgrp, adapter->mng_vlan_id))) { 1494 e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id); 1495 } 1496 1497 /* If AMT is enabled, let the firmware know that the network 1498 * interface is now closed */ 1499 if (adapter->hw.mac_type == e1000_82573 && 1500 e1000_check_mng_mode(&adapter->hw)) 1501 e1000_release_hw_control(adapter); 1502 1503 return 0; 1504} 1505 1506/** 1507 * e1000_check_64k_bound - check that memory doesn't cross 64kB boundary 1508 * @adapter: address of board private structure 1509 * @start: address of beginning of memory 1510 * @len: length of memory 1511 **/ 1512static boolean_t 1513e1000_check_64k_bound(struct e1000_adapter *adapter, 1514 void *start, unsigned long len) 1515{ 1516 unsigned long begin = (unsigned long) start; 1517 unsigned long end = begin + len; 1518 1519 /* First rev 82545 and 82546 need to not allow any memory 1520 * write location to cross 64k boundary due to errata 23 */ 1521 if (adapter->hw.mac_type == e1000_82545 || 1522 adapter->hw.mac_type == e1000_82546) { 1523 return ((begin ^ (end - 1)) >> 16) != 0 ? FALSE : TRUE; 1524 } 1525 1526 return TRUE; 1527} 1528 1529/** 1530 * e1000_setup_tx_resources - allocate Tx resources (Descriptors) 1531 * @adapter: board private structure 1532 * @txdr: tx descriptor ring (for a specific queue) to setup 1533 * 1534 * Return 0 on success, negative on failure 1535 **/ 1536 1537static int 1538e1000_setup_tx_resources(struct e1000_adapter *adapter, 1539 struct e1000_tx_ring *txdr) 1540{ 1541 struct pci_dev *pdev = adapter->pdev; 1542 int size; 1543 1544 size = sizeof(struct e1000_buffer) * txdr->count; 1545 txdr->buffer_info = vmalloc(size); 1546 if (!txdr->buffer_info) { 1547 DPRINTK(PROBE, ERR, 1548 "Unable to allocate memory for the transmit descriptor ring\n"); 1549 return -ENOMEM; 1550 } 1551 memset(txdr->buffer_info, 0, size); 1552 1553 /* round up to nearest 4K */ 1554 1555 txdr->size = txdr->count * sizeof(struct e1000_tx_desc); 1556 txdr->size = ALIGN(txdr->size, 4096); 1557 1558 txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma); 1559 if (!txdr->desc) { 1560setup_tx_desc_die: 1561 vfree(txdr->buffer_info); 1562 DPRINTK(PROBE, ERR, 1563 "Unable to allocate memory for the transmit descriptor ring\n"); 1564 return -ENOMEM; 1565 } 1566 1567 /* Fix for errata 23, can't cross 64kB boundary */ 1568 if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) { 1569 void *olddesc = txdr->desc; 1570 dma_addr_t olddma = txdr->dma; 1571 DPRINTK(TX_ERR, ERR, "txdr align check failed: %u bytes " 1572 "at %p\n", txdr->size, txdr->desc); 1573 /* Try again, without freeing the previous */ 1574 txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma); 1575 /* Failed allocation, critical failure */ 1576 if (!txdr->desc) { 1577 pci_free_consistent(pdev, txdr->size, olddesc, olddma); 1578 goto setup_tx_desc_die; 1579 } 1580 1581 if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) { 1582 /* give up */ 1583 pci_free_consistent(pdev, txdr->size, txdr->desc, 1584 txdr->dma); 1585 pci_free_consistent(pdev, txdr->size, olddesc, olddma); 1586 DPRINTK(PROBE, ERR, 1587 "Unable to allocate aligned memory " 1588 "for the transmit descriptor ring\n"); 1589 vfree(txdr->buffer_info); 1590 return -ENOMEM; 1591 } else { 1592 /* Free old allocation, new allocation was successful */ 1593 pci_free_consistent(pdev, txdr->size, olddesc, olddma); 1594 } 1595 } 1596 memset(txdr->desc, 0, txdr->size); 1597 1598 txdr->next_to_use = 0; 1599 txdr->next_to_clean = 0; 1600 spin_lock_init(&txdr->tx_lock); 1601 1602 return 0; 1603} 1604 1605/** 1606 * e1000_setup_all_tx_resources - wrapper to allocate Tx resources 1607 * (Descriptors) for all queues 1608 * @adapter: board private structure 1609 * 1610 * Return 0 on success, negative on failure 1611 **/ 1612 1613int 1614e1000_setup_all_tx_resources(struct e1000_adapter *adapter) 1615{ 1616 int i, err = 0; 1617 1618 for (i = 0; i < adapter->num_tx_queues; i++) { 1619 err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]); 1620 if (err) { 1621 DPRINTK(PROBE, ERR, 1622 "Allocation for Tx Queue %u failed\n", i); 1623 for (i-- ; i >= 0; i--) 1624 e1000_free_tx_resources(adapter, 1625 &adapter->tx_ring[i]); 1626 break; 1627 } 1628 } 1629 1630 return err; 1631} 1632 1633/** 1634 * e1000_configure_tx - Configure 8254x Transmit Unit after Reset 1635 * @adapter: board private structure 1636 * 1637 * Configure the Tx unit of the MAC after a reset. 1638 **/ 1639 1640static void 1641e1000_configure_tx(struct e1000_adapter *adapter) 1642{ 1643 uint64_t tdba; 1644 struct e1000_hw *hw = &adapter->hw; 1645 uint32_t tdlen, tctl, tipg, tarc; 1646 uint32_t ipgr1, ipgr2; 1647 1648 /* Setup the HW Tx Head and Tail descriptor pointers */ 1649 1650 switch (adapter->num_tx_queues) { 1651 case 1: 1652 default: 1653 tdba = adapter->tx_ring[0].dma; 1654 tdlen = adapter->tx_ring[0].count * 1655 sizeof(struct e1000_tx_desc); 1656 E1000_WRITE_REG(hw, TDLEN, tdlen); 1657 E1000_WRITE_REG(hw, TDBAH, (tdba >> 32)); 1658 E1000_WRITE_REG(hw, TDBAL, (tdba & 0x00000000ffffffffULL)); 1659 E1000_WRITE_REG(hw, TDT, 0); 1660 E1000_WRITE_REG(hw, TDH, 0); 1661 adapter->tx_ring[0].tdh = ((hw->mac_type >= e1000_82543) ? E1000_TDH : E1000_82542_TDH); 1662 adapter->tx_ring[0].tdt = ((hw->mac_type >= e1000_82543) ? E1000_TDT : E1000_82542_TDT); 1663 break; 1664 } 1665 1666 /* Set the default values for the Tx Inter Packet Gap timer */ 1667 if (adapter->hw.mac_type <= e1000_82547_rev_2 && 1668 (hw->media_type == e1000_media_type_fiber || 1669 hw->media_type == e1000_media_type_internal_serdes)) 1670 tipg = DEFAULT_82543_TIPG_IPGT_FIBER; 1671 else 1672 tipg = DEFAULT_82543_TIPG_IPGT_COPPER; 1673 1674 switch (hw->mac_type) { 1675 case e1000_82542_rev2_0: 1676 case e1000_82542_rev2_1: 1677 tipg = DEFAULT_82542_TIPG_IPGT; 1678 ipgr1 = DEFAULT_82542_TIPG_IPGR1; 1679 ipgr2 = DEFAULT_82542_TIPG_IPGR2; 1680 break; 1681 case e1000_80003es2lan: 1682 ipgr1 = DEFAULT_82543_TIPG_IPGR1; 1683 ipgr2 = DEFAULT_80003ES2LAN_TIPG_IPGR2; 1684 break; 1685 default: 1686 ipgr1 = DEFAULT_82543_TIPG_IPGR1; 1687 ipgr2 = DEFAULT_82543_TIPG_IPGR2; 1688 break; 1689 } 1690 tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT; 1691 tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT; 1692 E1000_WRITE_REG(hw, TIPG, tipg); 1693 1694 /* Set the Tx Interrupt Delay register */ 1695 1696 E1000_WRITE_REG(hw, TIDV, adapter->tx_int_delay); 1697 if (hw->mac_type >= e1000_82540) 1698 E1000_WRITE_REG(hw, TADV, adapter->tx_abs_int_delay); 1699 1700 /* Program the Transmit Control Register */ 1701 1702 tctl = E1000_READ_REG(hw, TCTL); 1703 tctl &= ~E1000_TCTL_CT; 1704 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC | 1705 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT); 1706 1707 if (hw->mac_type == e1000_82571 || hw->mac_type == e1000_82572) { 1708 tarc = E1000_READ_REG(hw, TARC0); 1709 /* set the speed mode bit, we'll clear it if we're not at 1710 * gigabit link later */ 1711 tarc |= (1 << 21); 1712 E1000_WRITE_REG(hw, TARC0, tarc); 1713 } else if (hw->mac_type == e1000_80003es2lan) { 1714 tarc = E1000_READ_REG(hw, TARC0); 1715 tarc |= 1; 1716 E1000_WRITE_REG(hw, TARC0, tarc); 1717 tarc = E1000_READ_REG(hw, TARC1); 1718 tarc |= 1; 1719 E1000_WRITE_REG(hw, TARC1, tarc); 1720 } 1721 1722 e1000_config_collision_dist(hw); 1723 1724 /* Setup Transmit Descriptor Settings for eop descriptor */ 1725 adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS; 1726 1727 /* only set IDE if we are delaying interrupts using the timers */ 1728 if (adapter->tx_int_delay) 1729 adapter->txd_cmd |= E1000_TXD_CMD_IDE; 1730 1731 if (hw->mac_type < e1000_82543) 1732 adapter->txd_cmd |= E1000_TXD_CMD_RPS; 1733 else 1734 adapter->txd_cmd |= E1000_TXD_CMD_RS; 1735 1736 if (hw->mac_type == e1000_82544 && 1737 hw->bus_type == e1000_bus_type_pcix) 1738 adapter->pcix_82544 = 1; 1739 1740 E1000_WRITE_REG(hw, TCTL, tctl); 1741 1742} 1743 1744/** 1745 * e1000_setup_rx_resources - allocate Rx resources (Descriptors) 1746 * @adapter: board private structure 1747 * @rxdr: rx descriptor ring (for a specific queue) to setup 1748 * 1749 * Returns 0 on success, negative on failure 1750 **/ 1751 1752static int 1753e1000_setup_rx_resources(struct e1000_adapter *adapter, 1754 struct e1000_rx_ring *rxdr) 1755{ 1756 struct pci_dev *pdev = adapter->pdev; 1757 int size, desc_len; 1758 1759 size = sizeof(struct e1000_buffer) * rxdr->count; 1760 rxdr->buffer_info = vmalloc(size); 1761 if (!rxdr->buffer_info) { 1762 DPRINTK(PROBE, ERR, 1763 "Unable to allocate memory for the receive descriptor ring\n"); 1764 return -ENOMEM; 1765 } 1766 memset(rxdr->buffer_info, 0, size); 1767 1768 rxdr->ps_page = kcalloc(rxdr->count, sizeof(struct e1000_ps_page), 1769 GFP_KERNEL); 1770 if (!rxdr->ps_page) { 1771 vfree(rxdr->buffer_info); 1772 DPRINTK(PROBE, ERR, 1773 "Unable to allocate memory for the receive descriptor ring\n"); 1774 return -ENOMEM; 1775 } 1776 1777 rxdr->ps_page_dma = kcalloc(rxdr->count, 1778 sizeof(struct e1000_ps_page_dma), 1779 GFP_KERNEL); 1780 if (!rxdr->ps_page_dma) { 1781 vfree(rxdr->buffer_info); 1782 kfree(rxdr->ps_page); 1783 DPRINTK(PROBE, ERR, 1784 "Unable to allocate memory for the receive descriptor ring\n"); 1785 return -ENOMEM; 1786 } 1787 1788 if (adapter->hw.mac_type <= e1000_82547_rev_2) 1789 desc_len = sizeof(struct e1000_rx_desc); 1790 else 1791 desc_len = sizeof(union e1000_rx_desc_packet_split); 1792 1793 /* Round up to nearest 4K */ 1794 1795 rxdr->size = rxdr->count * desc_len; 1796 rxdr->size = ALIGN(rxdr->size, 4096); 1797 1798 rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma); 1799 1800 if (!rxdr->desc) { 1801 DPRINTK(PROBE, ERR, 1802 "Unable to allocate memory for the receive descriptor ring\n"); 1803setup_rx_desc_die: 1804 vfree(rxdr->buffer_info); 1805 kfree(rxdr->ps_page); 1806 kfree(rxdr->ps_page_dma); 1807 return -ENOMEM; 1808 } 1809 1810 /* Fix for errata 23, can't cross 64kB boundary */ 1811 if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) { 1812 void *olddesc = rxdr->desc; 1813 dma_addr_t olddma = rxdr->dma; 1814 DPRINTK(RX_ERR, ERR, "rxdr align check failed: %u bytes " 1815 "at %p\n", rxdr->size, rxdr->desc); 1816 /* Try again, without freeing the previous */ 1817 rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma); 1818 /* Failed allocation, critical failure */ 1819 if (!rxdr->desc) { 1820 pci_free_consistent(pdev, rxdr->size, olddesc, olddma); 1821 DPRINTK(PROBE, ERR, 1822 "Unable to allocate memory " 1823 "for the receive descriptor ring\n"); 1824 goto setup_rx_desc_die; 1825 } 1826 1827 if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) { 1828 /* give up */ 1829 pci_free_consistent(pdev, rxdr->size, rxdr->desc, 1830 rxdr->dma); 1831 pci_free_consistent(pdev, rxdr->size, olddesc, olddma); 1832 DPRINTK(PROBE, ERR, 1833 "Unable to allocate aligned memory " 1834 "for the receive descriptor ring\n"); 1835 goto setup_rx_desc_die; 1836 } else { 1837 /* Free old allocation, new allocation was successful */ 1838 pci_free_consistent(pdev, rxdr->size, olddesc, olddma); 1839 } 1840 } 1841 memset(rxdr->desc, 0, rxdr->size); 1842 1843 rxdr->next_to_clean = 0; 1844 rxdr->next_to_use = 0; 1845 1846 return 0; 1847} 1848 1849/** 1850 * e1000_setup_all_rx_resources - wrapper to allocate Rx resources 1851 * (Descriptors) for all queues 1852 * @adapter: board private structure 1853 * 1854 * Return 0 on success, negative on failure 1855 **/ 1856 1857int 1858e1000_setup_all_rx_resources(struct e1000_adapter *adapter) 1859{ 1860 int i, err = 0; 1861 1862 for (i = 0; i < adapter->num_rx_queues; i++) { 1863 err = e1000_setup_rx_resources(adapter, &adapter->rx_ring[i]); 1864 if (err) { 1865 DPRINTK(PROBE, ERR, 1866 "Allocation for Rx Queue %u failed\n", i); 1867 for (i-- ; i >= 0; i--) 1868 e1000_free_rx_resources(adapter, 1869 &adapter->rx_ring[i]); 1870 break; 1871 } 1872 } 1873 1874 return err; 1875} 1876 1877/** 1878 * e1000_setup_rctl - configure the receive control registers 1879 * @adapter: Board private structure 1880 **/ 1881#define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \ 1882 (((S) & (PAGE_SIZE - 1)) ? 1 : 0)) 1883static void 1884e1000_setup_rctl(struct e1000_adapter *adapter) 1885{ 1886 uint32_t rctl, rfctl; 1887 uint32_t psrctl = 0; 1888#ifndef CONFIG_E1000_DISABLE_PACKET_SPLIT 1889 uint32_t pages = 0; 1890#endif 1891 1892 rctl = E1000_READ_REG(&adapter->hw, RCTL); 1893 1894 rctl &= ~(3 << E1000_RCTL_MO_SHIFT); 1895 1896 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | 1897 E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF | 1898 (adapter->hw.mc_filter_type << E1000_RCTL_MO_SHIFT); 1899 1900 if (adapter->hw.tbi_compatibility_on == 1) 1901 rctl |= E1000_RCTL_SBP; 1902 else 1903 rctl &= ~E1000_RCTL_SBP; 1904 1905 if (adapter->netdev->mtu <= ETH_DATA_LEN) 1906 rctl &= ~E1000_RCTL_LPE; 1907 else 1908 rctl |= E1000_RCTL_LPE; 1909 1910 /* Setup buffer sizes */ 1911 rctl &= ~E1000_RCTL_SZ_4096; 1912 rctl |= E1000_RCTL_BSEX; 1913 switch (adapter->rx_buffer_len) { 1914 case E1000_RXBUFFER_256: 1915 rctl |= E1000_RCTL_SZ_256; 1916 rctl &= ~E1000_RCTL_BSEX; 1917 break; 1918 case E1000_RXBUFFER_512: 1919 rctl |= E1000_RCTL_SZ_512; 1920 rctl &= ~E1000_RCTL_BSEX; 1921 break; 1922 case E1000_RXBUFFER_1024: 1923 rctl |= E1000_RCTL_SZ_1024; 1924 rctl &= ~E1000_RCTL_BSEX; 1925 break; 1926 case E1000_RXBUFFER_2048: 1927 default: 1928 rctl |= E1000_RCTL_SZ_2048; 1929 rctl &= ~E1000_RCTL_BSEX; 1930 break; 1931 case E1000_RXBUFFER_4096: 1932 rctl |= E1000_RCTL_SZ_4096; 1933 break; 1934 case E1000_RXBUFFER_8192: 1935 rctl |= E1000_RCTL_SZ_8192; 1936 break; 1937 case E1000_RXBUFFER_16384: 1938 rctl |= E1000_RCTL_SZ_16384; 1939 break; 1940 } 1941 1942#ifndef CONFIG_E1000_DISABLE_PACKET_SPLIT 1943 /* 82571 and greater support packet-split where the protocol 1944 * header is placed in skb->data and the packet data is 1945 * placed in pages hanging off of skb_shinfo(skb)->nr_frags. 1946 * In the case of a non-split, skb->data is linearly filled, 1947 * followed by the page buffers. Therefore, skb->data is 1948 * sized to hold the largest protocol header. 1949 */ 1950 /* allocations using alloc_page take too long for regular MTU 1951 * so only enable packet split for jumbo frames */ 1952 pages = PAGE_USE_COUNT(adapter->netdev->mtu); 1953 if ((adapter->hw.mac_type >= e1000_82571) && (pages <= 3) && 1954 PAGE_SIZE <= 16384 && (rctl & E1000_RCTL_LPE)) 1955 adapter->rx_ps_pages = pages; 1956 else 1957 adapter->rx_ps_pages = 0; 1958#endif 1959 if (adapter->rx_ps_pages) { 1960 /* Configure extra packet-split registers */ 1961 rfctl = E1000_READ_REG(&adapter->hw, RFCTL); 1962 rfctl |= E1000_RFCTL_EXTEN; 1963 /* disable packet split support for IPv6 extension headers, 1964 * because some malformed IPv6 headers can hang the RX */ 1965 rfctl |= (E1000_RFCTL_IPV6_EX_DIS | 1966 E1000_RFCTL_NEW_IPV6_EXT_DIS); 1967 1968 E1000_WRITE_REG(&adapter->hw, RFCTL, rfctl); 1969 1970 rctl |= E1000_RCTL_DTYP_PS; 1971 1972 psrctl |= adapter->rx_ps_bsize0 >> 1973 E1000_PSRCTL_BSIZE0_SHIFT; 1974 1975 switch (adapter->rx_ps_pages) { 1976 case 3: 1977 psrctl |= PAGE_SIZE << 1978 E1000_PSRCTL_BSIZE3_SHIFT; 1979 case 2: 1980 psrctl |= PAGE_SIZE << 1981 E1000_PSRCTL_BSIZE2_SHIFT; 1982 case 1: 1983 psrctl |= PAGE_SIZE >> 1984 E1000_PSRCTL_BSIZE1_SHIFT; 1985 break; 1986 } 1987 1988 E1000_WRITE_REG(&adapter->hw, PSRCTL, psrctl); 1989 } 1990 1991 E1000_WRITE_REG(&adapter->hw, RCTL, rctl); 1992} 1993 1994/** 1995 * e1000_configure_rx - Configure 8254x Receive Unit after Reset 1996 * @adapter: board private structure 1997 * 1998 * Configure the Rx unit of the MAC after a reset. 1999 **/ 2000 2001static void 2002e1000_configure_rx(struct e1000_adapter *adapter) 2003{ 2004 uint64_t rdba; 2005 struct e1000_hw *hw = &adapter->hw; 2006 uint32_t rdlen, rctl, rxcsum, ctrl_ext; 2007 2008 if (adapter->rx_ps_pages) { 2009 /* this is a 32 byte descriptor */ 2010 rdlen = adapter->rx_ring[0].count * 2011 sizeof(union e1000_rx_desc_packet_split); 2012 adapter->clean_rx = e1000_clean_rx_irq_ps; 2013 adapter->alloc_rx_buf = e1000_alloc_rx_buffers_ps; 2014 } else { 2015 rdlen = adapter->rx_ring[0].count * 2016 sizeof(struct e1000_rx_desc); 2017 adapter->clean_rx = e1000_clean_rx_irq; 2018 adapter->alloc_rx_buf = e1000_alloc_rx_buffers; 2019 } 2020 2021 /* disable receives while setting up the descriptors */ 2022 rctl = E1000_READ_REG(hw, RCTL); 2023 E1000_WRITE_REG(hw, RCTL, rctl & ~E1000_RCTL_EN); 2024 2025 /* set the Receive Delay Timer Register */ 2026 E1000_WRITE_REG(hw, RDTR, adapter->rx_int_delay); 2027 2028 if (hw->mac_type >= e1000_82540) { 2029 E1000_WRITE_REG(hw, RADV, adapter->rx_abs_int_delay); 2030 if (adapter->itr_setting != 0) 2031 E1000_WRITE_REG(hw, ITR, 2032 1000000000 / (adapter->itr * 256)); 2033 } 2034 2035 if (hw->mac_type >= e1000_82571) { 2036 ctrl_ext = E1000_READ_REG(hw, CTRL_EXT); 2037 /* Reset delay timers after every interrupt */ 2038 ctrl_ext |= E1000_CTRL_EXT_INT_TIMER_CLR; 2039#ifdef CONFIG_E1000_NAPI 2040 /* Auto-Mask interrupts upon ICR access */ 2041 ctrl_ext |= E1000_CTRL_EXT_IAME; 2042 E1000_WRITE_REG(hw, IAM, 0xffffffff); 2043#endif 2044 E1000_WRITE_REG(hw, CTRL_EXT, ctrl_ext); 2045 E1000_WRITE_FLUSH(hw); 2046 } 2047 2048 /* Setup the HW Rx Head and Tail Descriptor Pointers and 2049 * the Base and Length of the Rx Descriptor Ring */ 2050 switch (adapter->num_rx_queues) { 2051 case 1: 2052 default: 2053 rdba = adapter->rx_ring[0].dma; 2054 E1000_WRITE_REG(hw, RDLEN, rdlen); 2055 E1000_WRITE_REG(hw, RDBAH, (rdba >> 32)); 2056 E1000_WRITE_REG(hw, RDBAL, (rdba & 0x00000000ffffffffULL)); 2057 E1000_WRITE_REG(hw, RDT, 0); 2058 E1000_WRITE_REG(hw, RDH, 0); 2059 adapter->rx_ring[0].rdh = ((hw->mac_type >= e1000_82543) ? E1000_RDH : E1000_82542_RDH); 2060 adapter->rx_ring[0].rdt = ((hw->mac_type >= e1000_82543) ? E1000_RDT : E1000_82542_RDT); 2061 break; 2062 } 2063 2064 /* Enable 82543 Receive Checksum Offload for TCP and UDP */ 2065 if (hw->mac_type >= e1000_82543) { 2066 rxcsum = E1000_READ_REG(hw, RXCSUM); 2067 if (adapter->rx_csum == TRUE) { 2068 rxcsum |= E1000_RXCSUM_TUOFL; 2069 2070 /* Enable 82571 IPv4 payload checksum for UDP fragments 2071 * Must be used in conjunction with packet-split. */ 2072 if ((hw->mac_type >= e1000_82571) && 2073 (adapter->rx_ps_pages)) { 2074 rxcsum |= E1000_RXCSUM_IPPCSE; 2075 } 2076 } else { 2077 rxcsum &= ~E1000_RXCSUM_TUOFL; 2078 /* don't need to clear IPPCSE as it defaults to 0 */ 2079 } 2080 E1000_WRITE_REG(hw, RXCSUM, rxcsum); 2081 } 2082 2083 /* enable early receives on 82573, only takes effect if using > 2048 2084 * byte total frame size. for example only for jumbo frames */ 2085#define E1000_ERT_2048 0x100 2086 if (hw->mac_type == e1000_82573) 2087 E1000_WRITE_REG(hw, ERT, E1000_ERT_2048); 2088 2089 /* Enable Receives */ 2090 E1000_WRITE_REG(hw, RCTL, rctl); 2091} 2092 2093/** 2094 * e1000_free_tx_resources - Free Tx Resources per Queue 2095 * @adapter: board private structure 2096 * @tx_ring: Tx descriptor ring for a specific queue 2097 * 2098 * Free all transmit software resources 2099 **/ 2100 2101static void 2102e1000_free_tx_resources(struct e1000_adapter *adapter, 2103 struct e1000_tx_ring *tx_ring) 2104{ 2105 struct pci_dev *pdev = adapter->pdev; 2106 2107 e1000_clean_tx_ring(adapter, tx_ring); 2108 2109 vfree(tx_ring->buffer_info); 2110 tx_ring->buffer_info = NULL; 2111 2112 pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma); 2113 2114 tx_ring->desc = NULL; 2115} 2116 2117/** 2118 * e1000_free_all_tx_resources - Free Tx Resources for All Queues 2119 * @adapter: board private structure 2120 * 2121 * Free all transmit software resources 2122 **/ 2123 2124void 2125e1000_free_all_tx_resources(struct e1000_adapter *adapter) 2126{ 2127 int i; 2128 2129 for (i = 0; i < adapter->num_tx_queues; i++) 2130 e1000_free_tx_resources(adapter, &adapter->tx_ring[i]); 2131} 2132 2133static void 2134e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter, 2135 struct e1000_buffer *buffer_info) 2136{ 2137 if (buffer_info->dma) { 2138 pci_unmap_page(adapter->pdev, 2139 buffer_info->dma, 2140 buffer_info->length, 2141 PCI_DMA_TODEVICE); 2142 buffer_info->dma = 0; 2143 } 2144 if (buffer_info->skb) { 2145 dev_kfree_skb_any(buffer_info->skb); 2146 buffer_info->skb = NULL; 2147 } 2148 /* buffer_info must be completely set up in the transmit path */ 2149} 2150 2151/** 2152 * e1000_clean_tx_ring - Free Tx Buffers 2153 * @adapter: board private structure 2154 * @tx_ring: ring to be cleaned 2155 **/ 2156 2157static void 2158e1000_clean_tx_ring(struct e1000_adapter *adapter, 2159 struct e1000_tx_ring *tx_ring) 2160{ 2161 struct e1000_buffer *buffer_info; 2162 unsigned long size; 2163 unsigned int i; 2164 2165 /* Free all the Tx ring sk_buffs */ 2166 2167 for (i = 0; i < tx_ring->count; i++) { 2168 buffer_info = &tx_ring->buffer_info[i]; 2169 e1000_unmap_and_free_tx_resource(adapter, buffer_info); 2170 } 2171 2172 size = sizeof(struct e1000_buffer) * tx_ring->count; 2173 memset(tx_ring->buffer_info, 0, size); 2174 2175 /* Zero out the descriptor ring */ 2176 2177 memset(tx_ring->desc, 0, tx_ring->size); 2178 2179 tx_ring->next_to_use = 0; 2180 tx_ring->next_to_clean = 0; 2181 tx_ring->last_tx_tso = 0; 2182 2183 writel(0, adapter->hw.hw_addr + tx_ring->tdh); 2184 writel(0, adapter->hw.hw_addr + tx_ring->tdt); 2185} 2186 2187/** 2188 * e1000_clean_all_tx_rings - Free Tx Buffers for all queues 2189 * @adapter: board private structure 2190 **/ 2191 2192static void 2193e1000_clean_all_tx_rings(struct e1000_adapter *adapter) 2194{ 2195 int i; 2196 2197 for (i = 0; i < adapter->num_tx_queues; i++) 2198 e1000_clean_tx_ring(adapter, &adapter->tx_ring[i]); 2199} 2200 2201/** 2202 * e1000_free_rx_resources - Free Rx Resources 2203 * @adapter: board private structure 2204 * @rx_ring: ring to clean the resources from 2205 * 2206 * Free all receive software resources 2207 **/ 2208 2209static void 2210e1000_free_rx_resources(struct e1000_adapter *adapter, 2211 struct e1000_rx_ring *rx_ring) 2212{ 2213 struct pci_dev *pdev = adapter->pdev; 2214 2215 e1000_clean_rx_ring(adapter, rx_ring); 2216 2217 vfree(rx_ring->buffer_info); 2218 rx_ring->buffer_info = NULL; 2219 kfree(rx_ring->ps_page); 2220 rx_ring->ps_page = NULL; 2221 kfree(rx_ring->ps_page_dma); 2222 rx_ring->ps_page_dma = NULL; 2223 2224 pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma); 2225 2226 rx_ring->desc = NULL; 2227} 2228 2229/** 2230 * e1000_free_all_rx_resources - Free Rx Resources for All Queues 2231 * @adapter: board private structure 2232 * 2233 * Free all receive software resources 2234 **/ 2235 2236void 2237e1000_free_all_rx_resources(struct e1000_adapter *adapter) 2238{ 2239 int i; 2240 2241 for (i = 0; i < adapter->num_rx_queues; i++) 2242 e1000_free_rx_resources(adapter, &adapter->rx_ring[i]); 2243} 2244 2245/** 2246 * e1000_clean_rx_ring - Free Rx Buffers per Queue 2247 * @adapter: board private structure 2248 * @rx_ring: ring to free buffers from 2249 **/ 2250 2251static void 2252e1000_clean_rx_ring(struct e1000_adapter *adapter, 2253 struct e1000_rx_ring *rx_ring) 2254{ 2255 struct e1000_buffer *buffer_info; 2256 struct e1000_ps_page *ps_page; 2257 struct e1000_ps_page_dma *ps_page_dma; 2258 struct pci_dev *pdev = adapter->pdev; 2259 unsigned long size; 2260 unsigned int i, j; 2261 2262 /* Free all the Rx ring sk_buffs */ 2263 for (i = 0; i < rx_ring->count; i++) { 2264 buffer_info = &rx_ring->buffer_info[i]; 2265 if (buffer_info->skb) { 2266 pci_unmap_single(pdev, 2267 buffer_info->dma, 2268 buffer_info->length, 2269 PCI_DMA_FROMDEVICE); 2270 2271 dev_kfree_skb(buffer_info->skb); 2272 buffer_info->skb = NULL; 2273 } 2274 ps_page = &rx_ring->ps_page[i]; 2275 ps_page_dma = &rx_ring->ps_page_dma[i]; 2276 for (j = 0; j < adapter->rx_ps_pages; j++) { 2277 if (!ps_page->ps_page[j]) break; 2278 pci_unmap_page(pdev, 2279 ps_page_dma->ps_page_dma[j], 2280 PAGE_SIZE, PCI_DMA_FROMDEVICE); 2281 ps_page_dma->ps_page_dma[j] = 0; 2282 put_page(ps_page->ps_page[j]); 2283 ps_page->ps_page[j] = NULL; 2284 } 2285 } 2286 2287 size = sizeof(struct e1000_buffer) * rx_ring->count; 2288 memset(rx_ring->buffer_info, 0, size); 2289 size = sizeof(struct e1000_ps_page) * rx_ring->count; 2290 memset(rx_ring->ps_page, 0, size); 2291 size = sizeof(struct e1000_ps_page_dma) * rx_ring->count; 2292 memset(rx_ring->ps_page_dma, 0, size); 2293 2294 /* Zero out the descriptor ring */ 2295 2296 memset(rx_ring->desc, 0, rx_ring->size); 2297 2298 rx_ring->next_to_clean = 0; 2299 rx_ring->next_to_use = 0; 2300 2301 writel(0, adapter->hw.hw_addr + rx_ring->rdh); 2302 writel(0, adapter->hw.hw_addr + rx_ring->rdt); 2303} 2304 2305/** 2306 * e1000_clean_all_rx_rings - Free Rx Buffers for all queues 2307 * @adapter: board private structure 2308 **/ 2309 2310static void 2311e1000_clean_all_rx_rings(struct e1000_adapter *adapter) 2312{ 2313 int i; 2314 2315 for (i = 0; i < adapter->num_rx_queues; i++) 2316 e1000_clean_rx_ring(adapter, &adapter->rx_ring[i]); 2317} 2318 2319/* The 82542 2.0 (revision 2) needs to have the receive unit in reset 2320 * and memory write and invalidate disabled for certain operations 2321 */ 2322static void 2323e1000_enter_82542_rst(struct e1000_adapter *adapter) 2324{ 2325 struct net_device *netdev = adapter->netdev; 2326 uint32_t rctl; 2327 2328 e1000_pci_clear_mwi(&adapter->hw); 2329 2330 rctl = E1000_READ_REG(&adapter->hw, RCTL); 2331 rctl |= E1000_RCTL_RST; 2332 E1000_WRITE_REG(&adapter->hw, RCTL, rctl); 2333 E1000_WRITE_FLUSH(&adapter->hw); 2334 mdelay(5); 2335 2336 if (netif_running(netdev)) 2337 e1000_clean_all_rx_rings(adapter); 2338} 2339 2340static void 2341e1000_leave_82542_rst(struct e1000_adapter *adapter) 2342{ 2343 struct net_device *netdev = adapter->netdev; 2344 uint32_t rctl; 2345 2346 rctl = E1000_READ_REG(&adapter->hw, RCTL); 2347 rctl &= ~E1000_RCTL_RST; 2348 E1000_WRITE_REG(&adapter->hw, RCTL, rctl); 2349 E1000_WRITE_FLUSH(&adapter->hw); 2350 mdelay(5); 2351 2352 if (adapter->hw.pci_cmd_word & PCI_COMMAND_INVALIDATE) 2353 e1000_pci_set_mwi(&adapter->hw); 2354 2355 if (netif_running(netdev)) { 2356 /* No need to loop, because 82542 supports only 1 queue */ 2357 struct e1000_rx_ring *ring = &adapter->rx_ring[0]; 2358 e1000_configure_rx(adapter); 2359 adapter->alloc_rx_buf(adapter, ring, E1000_DESC_UNUSED(ring)); 2360 } 2361} 2362 2363/** 2364 * e1000_set_mac - Change the Ethernet Address of the NIC 2365 * @netdev: network interface device structure 2366 * @p: pointer to an address structure 2367 * 2368 * Returns 0 on success, negative on failure 2369 **/ 2370 2371static int 2372e1000_set_mac(struct net_device *netdev, void *p) 2373{ 2374 struct e1000_adapter *adapter = netdev_priv(netdev); 2375 struct sockaddr *addr = p; 2376 2377 if (!is_valid_ether_addr(addr->sa_data)) 2378 return -EADDRNOTAVAIL; 2379 2380 /* 82542 2.0 needs to be in reset to write receive address registers */ 2381 2382 if (adapter->hw.mac_type == e1000_82542_rev2_0) 2383 e1000_enter_82542_rst(adapter); 2384 2385 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 2386 memcpy(adapter->hw.mac_addr, addr->sa_data, netdev->addr_len); 2387 2388 e1000_rar_set(&adapter->hw, adapter->hw.mac_addr, 0); 2389 2390 /* With 82571 controllers, LAA may be overwritten (with the default) 2391 * due to controller reset from the other port. */ 2392 if (adapter->hw.mac_type == e1000_82571) { 2393 adapter->hw.laa_is_present = 1; 2394 2395 /* Hold a copy of the LAA in RAR[14] This is done so that 2396 * between the time RAR[0] gets clobbered and the time it 2397 * gets fixed (in e1000_watchdog), the actual LAA is in one 2398 * of the RARs and no incoming packets directed to this port 2399 * are dropped. Eventaully the LAA will be in RAR[0] and 2400 * RAR[14] */ 2401 e1000_rar_set(&adapter->hw, adapter->hw.mac_addr, 2402 E1000_RAR_ENTRIES - 1); 2403 } 2404 2405 if (adapter->hw.mac_type == e1000_82542_rev2_0) 2406 e1000_leave_82542_rst(adapter); 2407 2408 return 0; 2409} 2410 2411/** 2412 * e1000_set_multi - Multicast and Promiscuous mode set 2413 * @netdev: network interface device structure 2414 * 2415 * The set_multi entry point is called whenever the multicast address 2416 * list or the network interface flags are updated. This routine is 2417 * responsible for configuring the hardware for proper multicast, 2418 * promiscuous mode, and all-multi behavior. 2419 **/ 2420 2421static void 2422e1000_set_multi(struct net_device *netdev) 2423{ 2424 struct e1000_adapter *adapter = netdev_priv(netdev); 2425 struct e1000_hw *hw = &adapter->hw; 2426 struct dev_mc_list *mc_ptr; 2427 uint32_t rctl; 2428 uint32_t hash_value; 2429 int i, rar_entries = E1000_RAR_ENTRIES; 2430 int mta_reg_count = (hw->mac_type == e1000_ich8lan) ? 2431 E1000_NUM_MTA_REGISTERS_ICH8LAN : 2432 E1000_NUM_MTA_REGISTERS; 2433 2434 if (adapter->hw.mac_type == e1000_ich8lan) 2435 rar_entries = E1000_RAR_ENTRIES_ICH8LAN; 2436 2437 if (adapter->hw.mac_type == e1000_82571) 2438 rar_entries--; 2439 2440 /* Check for Promiscuous and All Multicast modes */ 2441 2442 rctl = E1000_READ_REG(hw, RCTL); 2443 2444 if (netdev->flags & IFF_PROMISC) { 2445 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); 2446 } else if (netdev->flags & IFF_ALLMULTI) { 2447 rctl |= E1000_RCTL_MPE; 2448 rctl &= ~E1000_RCTL_UPE; 2449 } else { 2450 rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE); 2451 } 2452 2453 E1000_WRITE_REG(hw, RCTL, rctl); 2454 2455 /* 82542 2.0 needs to be in reset to write receive address registers */ 2456 2457 if (hw->mac_type == e1000_82542_rev2_0) 2458 e1000_enter_82542_rst(adapter); 2459 2460 /* load the first 14 multicast address into the exact filters 1-14 2461 * RAR 0 is used for the station MAC adddress 2462 * if there are not 14 addresses, go ahead and clear the filters 2463 * -- with 82571 controllers only 0-13 entries are filled here 2464 */ 2465 mc_ptr = netdev->mc_list; 2466 2467 for (i = 1; i < rar_entries; i++) { 2468 if (mc_ptr) { 2469 e1000_rar_set(hw, mc_ptr->dmi_addr, i); 2470 mc_ptr = mc_ptr->next; 2471 } else { 2472 E1000_WRITE_REG_ARRAY(hw, RA, i << 1, 0); 2473 E1000_WRITE_FLUSH(hw); 2474 E1000_WRITE_REG_ARRAY(hw, RA, (i << 1) + 1, 0); 2475 E1000_WRITE_FLUSH(hw); 2476 } 2477 } 2478 2479 /* clear the old settings from the multicast hash table */ 2480 2481 for (i = 0; i < mta_reg_count; i++) { 2482 E1000_WRITE_REG_ARRAY(hw, MTA, i, 0); 2483 E1000_WRITE_FLUSH(hw); 2484 } 2485 2486 /* load any remaining addresses into the hash table */ 2487 2488 for (; mc_ptr; mc_ptr = mc_ptr->next) { 2489 hash_value = e1000_hash_mc_addr(hw, mc_ptr->dmi_addr); 2490 e1000_mta_set(hw, hash_value); 2491 } 2492 2493 if (hw->mac_type == e1000_82542_rev2_0) 2494 e1000_leave_82542_rst(adapter); 2495} 2496 2497/* Need to wait a few seconds after link up to get diagnostic information from 2498 * the phy */ 2499 2500static void 2501e1000_update_phy_info(unsigned long data) 2502{ 2503 struct e1000_adapter *adapter = (struct e1000_adapter *) data; 2504 e1000_phy_get_info(&adapter->hw, &adapter->phy_info); 2505} 2506 2507/** 2508 * e1000_82547_tx_fifo_stall - Timer Call-back 2509 * @data: pointer to adapter cast into an unsigned long 2510 **/ 2511 2512static void 2513e1000_82547_tx_fifo_stall(unsigned long data) 2514{ 2515 struct e1000_adapter *adapter = (struct e1000_adapter *) data; 2516 struct net_device *netdev = adapter->netdev; 2517 uint32_t tctl; 2518 2519 if (atomic_read(&adapter->tx_fifo_stall)) { 2520 if ((E1000_READ_REG(&adapter->hw, TDT) == 2521 E1000_READ_REG(&adapter->hw, TDH)) && 2522 (E1000_READ_REG(&adapter->hw, TDFT) == 2523 E1000_READ_REG(&adapter->hw, TDFH)) && 2524 (E1000_READ_REG(&adapter->hw, TDFTS) == 2525 E1000_READ_REG(&adapter->hw, TDFHS))) { 2526 tctl = E1000_READ_REG(&adapter->hw, TCTL); 2527 E1000_WRITE_REG(&adapter->hw, TCTL, 2528 tctl & ~E1000_TCTL_EN); 2529 E1000_WRITE_REG(&adapter->hw, TDFT, 2530 adapter->tx_head_addr); 2531 E1000_WRITE_REG(&adapter->hw, TDFH, 2532 adapter->tx_head_addr); 2533 E1000_WRITE_REG(&adapter->hw, TDFTS, 2534 adapter->tx_head_addr); 2535 E1000_WRITE_REG(&adapter->hw, TDFHS, 2536 adapter->tx_head_addr); 2537 E1000_WRITE_REG(&adapter->hw, TCTL, tctl); 2538 E1000_WRITE_FLUSH(&adapter->hw); 2539 2540 adapter->tx_fifo_head = 0; 2541 atomic_set(&adapter->tx_fifo_stall, 0); 2542 netif_wake_queue(netdev); 2543 } else { 2544 mod_timer(&adapter->tx_fifo_stall_timer, jiffies + 1); 2545 } 2546 } 2547} 2548 2549/** 2550 * e1000_watchdog - Timer Call-back 2551 * @data: pointer to adapter cast into an unsigned long 2552 **/ 2553static void 2554e1000_watchdog(unsigned long data) 2555{ 2556 struct e1000_adapter *adapter = (struct e1000_adapter *) data; 2557 struct net_device *netdev = adapter->netdev; 2558 struct e1000_tx_ring *txdr = adapter->tx_ring; 2559 uint32_t link, tctl; 2560 int32_t ret_val; 2561 2562 ret_val = e1000_check_for_link(&adapter->hw); 2563 if ((ret_val == E1000_ERR_PHY) && 2564 (adapter->hw.phy_type == e1000_phy_igp_3) && 2565 (E1000_READ_REG(&adapter->hw, CTRL) & E1000_PHY_CTRL_GBE_DISABLE)) { 2566 /* See e1000_kumeran_lock_loss_workaround() */ 2567 DPRINTK(LINK, INFO, 2568 "Gigabit has been disabled, downgrading speed\n"); 2569 } 2570 2571 if (adapter->hw.mac_type == e1000_82573) { 2572 e1000_enable_tx_pkt_filtering(&adapter->hw); 2573 if (adapter->mng_vlan_id != adapter->hw.mng_cookie.vlan_id) 2574 e1000_update_mng_vlan(adapter); 2575 } 2576 2577 if ((adapter->hw.media_type == e1000_media_type_internal_serdes) && 2578 !(E1000_READ_REG(&adapter->hw, TXCW) & E1000_TXCW_ANE)) 2579 link = !adapter->hw.serdes_link_down; 2580 else 2581 link = E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU; 2582 2583 if (link) { 2584 if (!netif_carrier_ok(netdev)) { 2585 uint32_t ctrl; 2586 boolean_t txb2b = 1; 2587 e1000_get_speed_and_duplex(&adapter->hw, 2588 &adapter->link_speed, 2589 &adapter->link_duplex); 2590 2591 ctrl = E1000_READ_REG(&adapter->hw, CTRL); 2592 DPRINTK(LINK, INFO, "NIC Link is Up %d Mbps %s, " 2593 "Flow Control: %s\n", 2594 adapter->link_speed, 2595 adapter->link_duplex == FULL_DUPLEX ? 2596 "Full Duplex" : "Half Duplex", 2597 ((ctrl & E1000_CTRL_TFCE) && (ctrl & 2598 E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl & 2599 E1000_CTRL_RFCE) ? "RX" : ((ctrl & 2600 E1000_CTRL_TFCE) ? "TX" : "None" ))); 2601 2602 /* tweak tx_queue_len according to speed/duplex 2603 * and adjust the timeout factor */ 2604 netdev->tx_queue_len = adapter->tx_queue_len; 2605 adapter->tx_timeout_factor = 1; 2606 switch (adapter->link_speed) { 2607 case SPEED_10: 2608 txb2b = 0; 2609 netdev->tx_queue_len = 10; 2610 adapter->tx_timeout_factor = 8; 2611 break; 2612 case SPEED_100: 2613 txb2b = 0; 2614 netdev->tx_queue_len = 100; 2615 /* maybe add some timeout factor ? */ 2616 break; 2617 } 2618 2619 if ((adapter->hw.mac_type == e1000_82571 || 2620 adapter->hw.mac_type == e1000_82572) && 2621 txb2b == 0) { 2622 uint32_t tarc0; 2623 tarc0 = E1000_READ_REG(&adapter->hw, TARC0); 2624 tarc0 &= ~(1 << 21); 2625 E1000_WRITE_REG(&adapter->hw, TARC0, tarc0); 2626 } 2627 2628 /* disable TSO for pcie and 10/100 speeds, to avoid 2629 * some hardware issues */ 2630 if (!adapter->tso_force && 2631 adapter->hw.bus_type == e1000_bus_type_pci_express){ 2632 switch (adapter->link_speed) { 2633 case SPEED_10: 2634 case SPEED_100: 2635 DPRINTK(PROBE,INFO, 2636 "10/100 speed: disabling TSO\n"); 2637 netdev->features &= ~NETIF_F_TSO; 2638 netdev->features &= ~NETIF_F_TSO6; 2639 break; 2640 case SPEED_1000: 2641 netdev->features |= NETIF_F_TSO; 2642 netdev->features |= NETIF_F_TSO6; 2643 break; 2644 default: 2645 /* oops */ 2646 break; 2647 } 2648 } 2649 2650 /* enable transmits in the hardware, need to do this 2651 * after setting TARC0 */ 2652 tctl = E1000_READ_REG(&adapter->hw, TCTL); 2653 tctl |= E1000_TCTL_EN; 2654 E1000_WRITE_REG(&adapter->hw, TCTL, tctl); 2655 2656 netif_carrier_on(netdev); 2657 netif_wake_queue(netdev); 2658 mod_timer(&adapter->phy_info_timer, round_jiffies(jiffies + 2 * HZ)); 2659 adapter->smartspeed = 0; 2660 } else { 2661 /* make sure the receive unit is started */ 2662 if (adapter->hw.rx_needs_kicking) { 2663 struct e1000_hw *hw = &adapter->hw; 2664 uint32_t rctl = E1000_READ_REG(hw, RCTL); 2665 E1000_WRITE_REG(hw, RCTL, rctl | E1000_RCTL_EN); 2666 } 2667 } 2668 } else { 2669 if (netif_carrier_ok(netdev)) { 2670 adapter->link_speed = 0; 2671 adapter->link_duplex = 0; 2672 DPRINTK(LINK, INFO, "NIC Link is Down\n"); 2673 netif_carrier_off(netdev); 2674 netif_stop_queue(netdev); 2675 mod_timer(&adapter->phy_info_timer, round_jiffies(jiffies + 2 * HZ)); 2676 2677 if (adapter->hw.mac_type == e1000_80003es2lan) 2678 /* reset device */ 2679 schedule_work(&adapter->reset_task); 2680 } 2681 2682 e1000_smartspeed(adapter); 2683 } 2684 2685 e1000_update_stats(adapter); 2686 2687 adapter->hw.tx_packet_delta = adapter->stats.tpt - adapter->tpt_old; 2688 adapter->tpt_old = adapter->stats.tpt; 2689 adapter->hw.collision_delta = adapter->stats.colc - adapter->colc_old; 2690 adapter->colc_old = adapter->stats.colc; 2691 2692 adapter->gorcl = adapter->stats.gorcl - adapter->gorcl_old; 2693 adapter->gorcl_old = adapter->stats.gorcl; 2694 adapter->gotcl = adapter->stats.gotcl - adapter->gotcl_old; 2695 adapter->gotcl_old = adapter->stats.gotcl; 2696 2697 e1000_update_adaptive(&adapter->hw); 2698 2699 if (!netif_carrier_ok(netdev)) { 2700 if (E1000_DESC_UNUSED(txdr) + 1 < txdr->count) { 2701 /* We've lost link, so the controller stops DMA, 2702 * but we've got queued Tx work that's never going 2703 * to get done, so reset controller to flush Tx. 2704 * (Do the reset outside of interrupt context). */ 2705 adapter->tx_timeout_count++; 2706 schedule_work(&adapter->reset_task); 2707 } 2708 } 2709 2710 /* Cause software interrupt to ensure rx ring is cleaned */ 2711 E1000_WRITE_REG(&adapter->hw, ICS, E1000_ICS_RXDMT0); 2712 2713 /* Force detection of hung controller every watchdog period */ 2714 adapter->detect_tx_hung = TRUE; 2715 2716 /* With 82571 controllers, LAA may be overwritten due to controller 2717 * reset from the other port. Set the appropriate LAA in RAR[0] */ 2718 if (adapter->hw.mac_type == e1000_82571 && adapter->hw.laa_is_present) 2719 e1000_rar_set(&adapter->hw, adapter->hw.mac_addr, 0); 2720 2721 /* Reset the timer */ 2722 mod_timer(&adapter->watchdog_timer, round_jiffies(jiffies + 2 * HZ)); 2723} 2724 2725enum latency_range { 2726 lowest_latency = 0, 2727 low_latency = 1, 2728 bulk_latency = 2, 2729 latency_invalid = 255 2730}; 2731 2732/** 2733 * e1000_update_itr - update the dynamic ITR value based on statistics 2734 * Stores a new ITR value based on packets and byte 2735 * counts during the last interrupt. The advantage of per interrupt 2736 * computation is faster updates and more accurate ITR for the current 2737 * traffic pattern. Constants in this function were computed 2738 * based on theoretical maximum wire speed and thresholds were set based 2739 * on testing data as well as attempting to minimize response time 2740 * while increasing bulk throughput. 2741 * this functionality is controlled by the InterruptThrottleRate module 2742 * parameter (see e1000_param.c) 2743 * @adapter: pointer to adapter 2744 * @itr_setting: current adapter->itr 2745 * @packets: the number of packets during this measurement interval 2746 * @bytes: the number of bytes during this measurement interval 2747 **/ 2748static unsigned int e1000_update_itr(struct e1000_adapter *adapter, 2749 uint16_t itr_setting, 2750 int packets, 2751 int bytes) 2752{ 2753 unsigned int retval = itr_setting; 2754 struct e1000_hw *hw = &adapter->hw; 2755 2756 if (unlikely(hw->mac_type < e1000_82540)) 2757 goto update_itr_done; 2758 2759 if (packets == 0) 2760 goto update_itr_done; 2761 2762 switch (itr_setting) { 2763 case lowest_latency: 2764 /* jumbo frames get bulk treatment*/ 2765 if (bytes/packets > 8000) 2766 retval = bulk_latency; 2767 else if ((packets < 5) && (bytes > 512)) 2768 retval = low_latency; 2769 break; 2770 case low_latency: /* 50 usec aka 20000 ints/s */ 2771 if (bytes > 10000) { 2772 /* jumbo frames need bulk latency setting */ 2773 if (bytes/packets > 8000) 2774 retval = bulk_latency; 2775 else if ((packets < 10) || ((bytes/packets) > 1200)) 2776 retval = bulk_latency; 2777 else if ((packets > 35)) 2778 retval = lowest_latency; 2779 } else if (bytes/packets > 2000) 2780 retval = bulk_latency; 2781 else if (packets <= 2 && bytes < 512) 2782 retval = lowest_latency; 2783 break; 2784 case bulk_latency: /* 250 usec aka 4000 ints/s */ 2785 if (bytes > 25000) { 2786 if (packets > 35) 2787 retval = low_latency; 2788 } else if (bytes < 6000) { 2789 retval = low_latency; 2790 } 2791 break; 2792 } 2793 2794update_itr_done: 2795 return retval; 2796} 2797 2798static void e1000_set_itr(struct e1000_adapter *adapter) 2799{ 2800 struct e1000_hw *hw = &adapter->hw; 2801 uint16_t current_itr; 2802 uint32_t new_itr = adapter->itr; 2803 2804 if (unlikely(hw->mac_type < e1000_82540)) 2805 return; 2806 2807 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */ 2808 if (unlikely(adapter->link_speed != SPEED_1000)) { 2809 current_itr = 0; 2810 new_itr = 4000; 2811 goto set_itr_now; 2812 } 2813 2814 adapter->tx_itr = e1000_update_itr(adapter, 2815 adapter->tx_itr, 2816 adapter->total_tx_packets, 2817 adapter->total_tx_bytes); 2818 /* conservative mode (itr 3) eliminates the lowest_latency setting */ 2819 if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency) 2820 adapter->tx_itr = low_latency; 2821 2822 adapter->rx_itr = e1000_update_itr(adapter, 2823 adapter->rx_itr, 2824 adapter->total_rx_packets, 2825 adapter->total_rx_bytes); 2826 /* conservative mode (itr 3) eliminates the lowest_latency setting */ 2827 if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency) 2828 adapter->rx_itr = low_latency; 2829 2830 current_itr = max(adapter->rx_itr, adapter->tx_itr); 2831 2832 switch (current_itr) { 2833 /* counts and packets in update_itr are dependent on these numbers */ 2834 case lowest_latency: 2835 new_itr = 70000; 2836 break; 2837 case low_latency: 2838 new_itr = 20000; /* aka hwitr = ~200 */ 2839 break; 2840 case bulk_latency: 2841 new_itr = 4000; 2842 break; 2843 default: 2844 break; 2845 } 2846 2847set_itr_now: 2848 if (new_itr != adapter->itr) { 2849 /* this attempts to bias the interrupt rate towards Bulk 2850 * by adding intermediate steps when interrupt rate is 2851 * increasing */ 2852 new_itr = new_itr > adapter->itr ? 2853 min(adapter->itr + (new_itr >> 2), new_itr) : 2854 new_itr; 2855 adapter->itr = new_itr; 2856 E1000_WRITE_REG(hw, ITR, 1000000000 / (new_itr * 256)); 2857 } 2858 2859 return; 2860} 2861 2862#define E1000_TX_FLAGS_CSUM 0x00000001 2863#define E1000_TX_FLAGS_VLAN 0x00000002 2864#define E1000_TX_FLAGS_TSO 0x00000004 2865#define E1000_TX_FLAGS_IPV4 0x00000008 2866#define E1000_TX_FLAGS_VLAN_MASK 0xffff0000 2867#define E1000_TX_FLAGS_VLAN_SHIFT 16 2868 2869static int 2870e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring, 2871 struct sk_buff *skb) 2872{ 2873 struct e1000_context_desc *context_desc; 2874 struct e1000_buffer *buffer_info; 2875 unsigned int i; 2876 uint32_t cmd_length = 0; 2877 uint16_t ipcse = 0, tucse, mss; 2878 uint8_t ipcss, ipcso, tucss, tucso, hdr_len; 2879 int err; 2880 2881 if (skb_is_gso(skb)) { 2882 if (skb_header_cloned(skb)) { 2883 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 2884 if (err) 2885 return err; 2886 } 2887 2888 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 2889 mss = skb_shinfo(skb)->gso_size; 2890 if (skb->protocol == htons(ETH_P_IP)) { 2891 struct iphdr *iph = ip_hdr(skb); 2892 iph->tot_len = 0; 2893 iph->check = 0; 2894 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, 2895 iph->daddr, 0, 2896 IPPROTO_TCP, 2897 0); 2898 cmd_length = E1000_TXD_CMD_IP; 2899 ipcse = skb_transport_offset(skb) - 1; 2900 } else if (skb->protocol == htons(ETH_P_IPV6)) { 2901 ipv6_hdr(skb)->payload_len = 0; 2902 tcp_hdr(skb)->check = 2903 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, 2904 &ipv6_hdr(skb)->daddr, 2905 0, IPPROTO_TCP, 0); 2906 ipcse = 0; 2907 } 2908 ipcss = skb_network_offset(skb); 2909 ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data; 2910 tucss = skb_transport_offset(skb); 2911 tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data; 2912 tucse = 0; 2913 2914 cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE | 2915 E1000_TXD_CMD_TCP | (skb->len - (hdr_len))); 2916 2917 i = tx_ring->next_to_use; 2918 context_desc = E1000_CONTEXT_DESC(*tx_ring, i); 2919 buffer_info = &tx_ring->buffer_info[i]; 2920 2921 context_desc->lower_setup.ip_fields.ipcss = ipcss; 2922 context_desc->lower_setup.ip_fields.ipcso = ipcso; 2923 context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse); 2924 context_desc->upper_setup.tcp_fields.tucss = tucss; 2925 context_desc->upper_setup.tcp_fields.tucso = tucso; 2926 context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse); 2927 context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss); 2928 context_desc->tcp_seg_setup.fields.hdr_len = hdr_len; 2929 context_desc->cmd_and_length = cpu_to_le32(cmd_length); 2930 2931 buffer_info->time_stamp = jiffies; 2932 buffer_info->next_to_watch = i; 2933 2934 if (++i == tx_ring->count) i = 0; 2935 tx_ring->next_to_use = i; 2936 2937 return TRUE; 2938 } 2939 return FALSE; 2940} 2941 2942static boolean_t 2943e1000_tx_csum(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring, 2944 struct sk_buff *skb) 2945{ 2946 struct e1000_context_desc *context_desc; 2947 struct e1000_buffer *buffer_info; 2948 unsigned int i; 2949 uint8_t css; 2950 2951 if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { 2952 css = skb_transport_offset(skb); 2953 2954 i = tx_ring->next_to_use; 2955 buffer_info = &tx_ring->buffer_info[i]; 2956 context_desc = E1000_CONTEXT_DESC(*tx_ring, i); 2957 2958 context_desc->lower_setup.ip_config = 0; 2959 context_desc->upper_setup.tcp_fields.tucss = css; 2960 context_desc->upper_setup.tcp_fields.tucso = 2961 css + skb->csum_offset; 2962 context_desc->upper_setup.tcp_fields.tucse = 0; 2963 context_desc->tcp_seg_setup.data = 0; 2964 context_desc->cmd_and_length = cpu_to_le32(E1000_TXD_CMD_DEXT); 2965 2966 buffer_info->time_stamp = jiffies; 2967 buffer_info->next_to_watch = i; 2968 2969 if (unlikely(++i == tx_ring->count)) i = 0; 2970 tx_ring->next_to_use = i; 2971 2972 return TRUE; 2973 } 2974 2975 return FALSE; 2976} 2977 2978#define E1000_MAX_TXD_PWR 12 2979#define E1000_MAX_DATA_PER_TXD (1<<E1000_MAX_TXD_PWR) 2980 2981static int 2982e1000_tx_map(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring, 2983 struct sk_buff *skb, unsigned int first, unsigned int max_per_txd, 2984 unsigned int nr_frags, unsigned int mss) 2985{ 2986 struct e1000_buffer *buffer_info; 2987 unsigned int len = skb->len; 2988 unsigned int offset = 0, size, count = 0, i; 2989 unsigned int f; 2990 len -= skb->data_len; 2991 2992 i = tx_ring->next_to_use; 2993 2994 while (len) { 2995 buffer_info = &tx_ring->buffer_info[i]; 2996 size = min(len, max_per_txd); 2997 if (!skb->data_len && tx_ring->last_tx_tso && 2998 !skb_is_gso(skb)) { 2999 tx_ring->last_tx_tso = 0; 3000 size -= 4; 3001 } 3002 3003 if (unlikely(mss && !nr_frags && size == len && size > 8)) 3004 size -= 4; 3005 if (unlikely((adapter->hw.bus_type == e1000_bus_type_pcix) && 3006 (size > 2015) && count == 0)) 3007 size = 2015; 3008 3009 if (unlikely(adapter->pcix_82544 && 3010 !((unsigned long)(skb->data + offset + size - 1) & 4) && 3011 size > 4)) 3012 size -= 4; 3013 3014 buffer_info->length = size; 3015 buffer_info->dma = 3016 pci_map_single(adapter->pdev, 3017 skb->data + offset, 3018 size, 3019 PCI_DMA_TODEVICE); 3020 buffer_info->time_stamp = jiffies; 3021 buffer_info->next_to_watch = i; 3022 3023 len -= size; 3024 offset += size; 3025 count++; 3026 if (unlikely(++i == tx_ring->count)) i = 0; 3027 } 3028 3029 for (f = 0; f < nr_frags; f++) { 3030 struct skb_frag_struct *frag; 3031 3032 frag = &skb_shinfo(skb)->frags[f]; 3033 len = frag->size; 3034 offset = frag->page_offset; 3035 3036 while (len) { 3037 buffer_info = &tx_ring->buffer_info[i]; 3038 size = min(len, max_per_txd); 3039 if (unlikely(mss && f == (nr_frags-1) && size == len && size > 8)) 3040 size -= 4; 3041 if (unlikely(adapter->pcix_82544 && 3042 !((unsigned long)(frag->page+offset+size-1) & 4) && 3043 size > 4)) 3044 size -= 4; 3045 3046 buffer_info->length = size; 3047 buffer_info->dma = 3048 pci_map_page(adapter->pdev, 3049 frag->page, 3050 offset, 3051 size, 3052 PCI_DMA_TODEVICE); 3053 buffer_info->time_stamp = jiffies; 3054 buffer_info->next_to_watch = i; 3055 3056 len -= size; 3057 offset += size; 3058 count++; 3059 if (unlikely(++i == tx_ring->count)) i = 0; 3060 } 3061 } 3062 3063 i = (i == 0) ? tx_ring->count - 1 : i - 1; 3064 tx_ring->buffer_info[i].skb = skb; 3065 tx_ring->buffer_info[first].next_to_watch = i; 3066 3067 return count; 3068} 3069 3070static void 3071e1000_tx_queue(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring, 3072 int tx_flags, int count) 3073{ 3074 struct e1000_tx_desc *tx_desc = NULL; 3075 struct e1000_buffer *buffer_info; 3076 uint32_t txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS; 3077 unsigned int i; 3078 3079 if (likely(tx_flags & E1000_TX_FLAGS_TSO)) { 3080 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D | 3081 E1000_TXD_CMD_TSE; 3082 txd_upper |= E1000_TXD_POPTS_TXSM << 8; 3083 3084 if (likely(tx_flags & E1000_TX_FLAGS_IPV4)) 3085 txd_upper |= E1000_TXD_POPTS_IXSM << 8; 3086 } 3087 3088 if (likely(tx_flags & E1000_TX_FLAGS_CSUM)) { 3089 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D; 3090 txd_upper |= E1000_TXD_POPTS_TXSM << 8; 3091 } 3092 3093 if (unlikely(tx_flags & E1000_TX_FLAGS_VLAN)) { 3094 txd_lower |= E1000_TXD_CMD_VLE; 3095 txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK); 3096 } 3097 3098 i = tx_ring->next_to_use; 3099 3100 while (count--) { 3101 buffer_info = &tx_ring->buffer_info[i]; 3102 tx_desc = E1000_TX_DESC(*tx_ring, i); 3103 tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); 3104 tx_desc->lower.data = 3105 cpu_to_le32(txd_lower | buffer_info->length); 3106 tx_desc->upper.data = cpu_to_le32(txd_upper); 3107 if (unlikely(++i == tx_ring->count)) i = 0; 3108 } 3109 3110 tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd); 3111 3112 /* Force memory writes to complete before letting h/w 3113 * know there are new descriptors to fetch. (Only 3114 * applicable for weak-ordered memory model archs, 3115 * such as IA-64). */ 3116 wmb(); 3117 3118 tx_ring->next_to_use = i; 3119 writel(i, adapter->hw.hw_addr + tx_ring->tdt); 3120 /* we need this if more than one processor can write to our tail 3121 * at a time, it syncronizes IO on IA64/Altix systems */ 3122 mmiowb(); 3123} 3124 3125 3126#define E1000_FIFO_HDR 0x10 3127#define E1000_82547_PAD_LEN 0x3E0 3128 3129static int 3130e1000_82547_fifo_workaround(struct e1000_adapter *adapter, struct sk_buff *skb) 3131{ 3132 uint32_t fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head; 3133 uint32_t skb_fifo_len = skb->len + E1000_FIFO_HDR; 3134 3135 skb_fifo_len = ALIGN(skb_fifo_len, E1000_FIFO_HDR); 3136 3137 if (adapter->link_duplex != HALF_DUPLEX) 3138 goto no_fifo_stall_required; 3139 3140 if (atomic_read(&adapter->tx_fifo_stall)) 3141 return 1; 3142 3143 if (skb_fifo_len >= (E1000_82547_PAD_LEN + fifo_space)) { 3144 atomic_set(&adapter->tx_fifo_stall, 1); 3145 return 1; 3146 } 3147 3148no_fifo_stall_required: 3149 adapter->tx_fifo_head += skb_fifo_len; 3150 if (adapter->tx_fifo_head >= adapter->tx_fifo_size) 3151 adapter->tx_fifo_head -= adapter->tx_fifo_size; 3152 return 0; 3153} 3154 3155#define MINIMUM_DHCP_PACKET_SIZE 282 3156static int 3157e1000_transfer_dhcp_info(struct e1000_adapter *adapter, struct sk_buff *skb) 3158{ 3159 struct e1000_hw *hw = &adapter->hw; 3160 uint16_t length, offset; 3161 if (vlan_tx_tag_present(skb)) { 3162 if (!((vlan_tx_tag_get(skb) == adapter->hw.mng_cookie.vlan_id) && 3163 ( adapter->hw.mng_cookie.status & 3164 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) ) 3165 return 0; 3166 } 3167 if (skb->len > MINIMUM_DHCP_PACKET_SIZE) { 3168 struct ethhdr *eth = (struct ethhdr *) skb->data; 3169 if ((htons(ETH_P_IP) == eth->h_proto)) { 3170 const struct iphdr *ip = 3171 (struct iphdr *)((uint8_t *)skb->data+14); 3172 if (IPPROTO_UDP == ip->protocol) { 3173 struct udphdr *udp = 3174 (struct udphdr *)((uint8_t *)ip + 3175 (ip->ihl << 2)); 3176 if (ntohs(udp->dest) == 67) { 3177 offset = (uint8_t *)udp + 8 - skb->data; 3178 length = skb->len - offset; 3179 3180 return e1000_mng_write_dhcp_info(hw, 3181 (uint8_t *)udp + 8, 3182 length); 3183 } 3184 } 3185 } 3186 } 3187 return 0; 3188} 3189 3190static int __e1000_maybe_stop_tx(struct net_device *netdev, int size) 3191{ 3192 struct e1000_adapter *adapter = netdev_priv(netdev); 3193 struct e1000_tx_ring *tx_ring = adapter->tx_ring; 3194 3195 netif_stop_queue(netdev); 3196 /* Herbert's original patch had: 3197 * smp_mb__after_netif_stop_queue(); 3198 * but since that doesn't exist yet, just open code it. */ 3199 smp_mb(); 3200 3201 /* We need to check again in a case another CPU has just 3202 * made room available. */ 3203 if (likely(E1000_DESC_UNUSED(tx_ring) < size)) 3204 return -EBUSY; 3205 3206 /* A reprieve! */ 3207 netif_start_queue(netdev); 3208 ++adapter->restart_queue; 3209 return 0; 3210} 3211 3212static int e1000_maybe_stop_tx(struct net_device *netdev, 3213 struct e1000_tx_ring *tx_ring, int size) 3214{ 3215 if (likely(E1000_DESC_UNUSED(tx_ring) >= size)) 3216 return 0; 3217 return __e1000_maybe_stop_tx(netdev, size); 3218} 3219 3220#define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 ) 3221static int 3222e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) 3223{ 3224 struct e1000_adapter *adapter = netdev_priv(netdev); 3225 struct e1000_tx_ring *tx_ring; 3226 unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD; 3227 unsigned int max_txd_pwr = E1000_MAX_TXD_PWR; 3228 unsigned int tx_flags = 0; 3229 unsigned int len = skb->len; 3230 unsigned long flags; 3231 unsigned int nr_frags = 0; 3232 unsigned int mss = 0; 3233 int count = 0; 3234 int tso; 3235 unsigned int f; 3236 len -= skb->data_len; 3237 3238 /* This goes back to the question of how to logically map a tx queue 3239 * to a flow. Right now, performance is impacted slightly negatively 3240 * if using multiple tx queues. If the stack breaks away from a 3241 * single qdisc implementation, we can look at this again. */ 3242 tx_ring = adapter->tx_ring; 3243 3244 if (unlikely(skb->len <= 0)) { 3245 dev_kfree_skb_any(skb); 3246 return NETDEV_TX_OK; 3247 } 3248 3249 if (adapter->hw.mac_type >= e1000_82571) 3250 max_per_txd = 8192; 3251 3252 mss = skb_shinfo(skb)->gso_size; 3253 /* The controller does a simple calculation to 3254 * make sure there is enough room in the FIFO before 3255 * initiating the DMA for each buffer. The calc is: 3256 * 4 = ceil(buffer len/mss). To make sure we don't 3257 * overrun the FIFO, adjust the max buffer len if mss 3258 * drops. */ 3259 if (mss) { 3260 uint8_t hdr_len; 3261 max_per_txd = min(mss << 2, max_per_txd); 3262 max_txd_pwr = fls(max_per_txd) - 1; 3263 3264 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 3265 if (skb->data_len && (hdr_len == (skb->len - skb->data_len))) { 3266 switch (adapter->hw.mac_type) { 3267 unsigned int pull_size; 3268 case e1000_82544: 3269 if ((unsigned long)(skb_tail_pointer(skb) - 1) & 4) 3270 break; 3271 /* fall through */ 3272 case e1000_82571: 3273 case e1000_82572: 3274 case e1000_82573: 3275 case e1000_ich8lan: 3276 pull_size = min((unsigned int)4, skb->data_len); 3277 if (!__pskb_pull_tail(skb, pull_size)) { 3278 DPRINTK(DRV, ERR, 3279 "__pskb_pull_tail failed.\n"); 3280 dev_kfree_skb_any(skb); 3281 return NETDEV_TX_OK; 3282 } 3283 len = skb->len - skb->data_len; 3284 break; 3285 default: 3286 /* do nothing */ 3287 break; 3288 } 3289 } 3290 } 3291 3292 /* reserve a descriptor for the offload context */ 3293 if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL)) 3294 count++; 3295 count++; 3296 3297 if (!skb->data_len && tx_ring->last_tx_tso && !skb_is_gso(skb)) 3298 count++; 3299 3300 count += TXD_USE_COUNT(len, max_txd_pwr); 3301 3302 if (adapter->pcix_82544) 3303 count++; 3304 3305 if (unlikely((adapter->hw.bus_type == e1000_bus_type_pcix) && 3306 (len > 2015))) 3307 count++; 3308 3309 nr_frags = skb_shinfo(skb)->nr_frags; 3310 for (f = 0; f < nr_frags; f++) 3311 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size, 3312 max_txd_pwr); 3313 if (adapter->pcix_82544) 3314 count += nr_frags; 3315 3316 3317 if (adapter->hw.tx_pkt_filtering && 3318 (adapter->hw.mac_type == e1000_82573)) 3319 e1000_transfer_dhcp_info(adapter, skb); 3320 3321 if (!spin_trylock_irqsave(&tx_ring->tx_lock, flags)) 3322 /* Collision - tell upper layer to requeue */ 3323 return NETDEV_TX_LOCKED; 3324 3325 /* need: count + 2 desc gap to keep tail from touching 3326 * head, otherwise try next time */ 3327 if (unlikely(e1000_maybe_stop_tx(netdev, tx_ring, count + 2))) { 3328 spin_unlock_irqrestore(&tx_ring->tx_lock, flags); 3329 return NETDEV_TX_BUSY; 3330 } 3331 3332 if (unlikely(adapter->hw.mac_type == e1000_82547)) { 3333 if (unlikely(e1000_82547_fifo_workaround(adapter, skb))) { 3334 netif_stop_queue(netdev); 3335 mod_timer(&adapter->tx_fifo_stall_timer, jiffies + 1); 3336 spin_unlock_irqrestore(&tx_ring->tx_lock, flags); 3337 return NETDEV_TX_BUSY; 3338 } 3339 } 3340 3341 if (unlikely(adapter->vlgrp && vlan_tx_tag_present(skb))) { 3342 tx_flags |= E1000_TX_FLAGS_VLAN; 3343 tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT); 3344 } 3345 3346 first = tx_ring->next_to_use; 3347 3348 tso = e1000_tso(adapter, tx_ring, skb); 3349 if (tso < 0) { 3350 dev_kfree_skb_any(skb); 3351 spin_unlock_irqrestore(&tx_ring->tx_lock, flags); 3352 return NETDEV_TX_OK; 3353 } 3354 3355 if (likely(tso)) { 3356 tx_ring->last_tx_tso = 1; 3357 tx_flags |= E1000_TX_FLAGS_TSO; 3358 } else if (likely(e1000_tx_csum(adapter, tx_ring, skb))) 3359 tx_flags |= E1000_TX_FLAGS_CSUM; 3360 3361 /* Old method was to assume IPv4 packet by default if TSO was enabled. 3362 * 82571 hardware supports TSO capabilities for IPv6 as well... 3363 * no longer assume, we must. */ 3364 if (likely(skb->protocol == htons(ETH_P_IP))) 3365 tx_flags |= E1000_TX_FLAGS_IPV4; 3366 3367 e1000_tx_queue(adapter, tx_ring, tx_flags, 3368 e1000_tx_map(adapter, tx_ring, skb, first, 3369 max_per_txd, nr_frags, mss)); 3370 3371 netdev->trans_start = jiffies; 3372 3373 /* Make sure there is space in the ring for the next send. */ 3374 e1000_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 2); 3375 3376 spin_unlock_irqrestore(&tx_ring->tx_lock, flags); 3377 return NETDEV_TX_OK; 3378} 3379 3380/** 3381 * e1000_tx_timeout - Respond to a Tx Hang 3382 * @netdev: network interface device structure 3383 **/ 3384 3385static void 3386e1000_tx_timeout(struct net_device *netdev) 3387{ 3388 struct e1000_adapter *adapter = netdev_priv(netdev); 3389 3390 /* Do the reset outside of interrupt context */ 3391 adapter->tx_timeout_count++; 3392 schedule_work(&adapter->reset_task); 3393} 3394 3395static void 3396e1000_reset_task(struct work_struct *work) 3397{ 3398 struct e1000_adapter *adapter = 3399 container_of(work, struct e1000_adapter, reset_task); 3400 3401 e1000_reinit_locked(adapter); 3402} 3403 3404/** 3405 * e1000_get_stats - Get System Network Statistics 3406 * @netdev: network interface device structure 3407 * 3408 * Returns the address of the device statistics structure. 3409 * The statistics are actually updated from the timer callback. 3410 **/ 3411 3412static struct net_device_stats * 3413e1000_get_stats(struct net_device *netdev) 3414{ 3415 struct e1000_adapter *adapter = netdev_priv(netdev); 3416 3417 /* only return the current stats */ 3418 return &adapter->net_stats; 3419} 3420 3421/** 3422 * e1000_change_mtu - Change the Maximum Transfer Unit 3423 * @netdev: network interface device structure 3424 * @new_mtu: new value for maximum frame size 3425 * 3426 * Returns 0 on success, negative on failure 3427 **/ 3428 3429static int 3430e1000_change_mtu(struct net_device *netdev, int new_mtu) 3431{ 3432 struct e1000_adapter *adapter = netdev_priv(netdev); 3433 int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE; 3434 uint16_t eeprom_data = 0; 3435 3436 if ((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) || 3437 (max_frame > MAX_JUMBO_FRAME_SIZE)) { 3438 DPRINTK(PROBE, ERR, "Invalid MTU setting\n"); 3439 return -EINVAL; 3440 } 3441 3442 /* Adapter-specific max frame size limits. */ 3443 switch (adapter->hw.mac_type) { 3444 case e1000_undefined ... e1000_82542_rev2_1: 3445 case e1000_ich8lan: 3446 if (max_frame > MAXIMUM_ETHERNET_FRAME_SIZE) { 3447 DPRINTK(PROBE, ERR, "Jumbo Frames not supported.\n"); 3448 return -EINVAL; 3449 } 3450 break; 3451 case e1000_82573: 3452 /* Jumbo Frames not supported if: 3453 * - this is not an 82573L device 3454 * - ASPM is enabled in any way (0x1A bits 3:2) */ 3455 e1000_read_eeprom(&adapter->hw, EEPROM_INIT_3GIO_3, 1, 3456 &eeprom_data); 3457 if ((adapter->hw.device_id != E1000_DEV_ID_82573L) || 3458 (eeprom_data & EEPROM_WORD1A_ASPM_MASK)) { 3459 if (max_frame > MAXIMUM_ETHERNET_FRAME_SIZE) { 3460 DPRINTK(PROBE, ERR, 3461 "Jumbo Frames not supported.\n"); 3462 return -EINVAL; 3463 } 3464 break; 3465 } 3466 /* ERT will be enabled later to enable wire speed receives */ 3467 3468 /* fall through to get support */ 3469 case e1000_82571: 3470 case e1000_82572: 3471 case e1000_80003es2lan: 3472#define MAX_STD_JUMBO_FRAME_SIZE 9234 3473 if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) { 3474 DPRINTK(PROBE, ERR, "MTU > 9216 not supported.\n"); 3475 return -EINVAL; 3476 } 3477 break; 3478 default: 3479 /* Capable of supporting up to MAX_JUMBO_FRAME_SIZE limit. */ 3480 break; 3481 } 3482 3483 /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN 3484 * means we reserve 2 more, this pushes us to allocate from the next 3485 * larger slab size 3486 * i.e. RXBUFFER_2048 --> size-4096 slab */ 3487 3488 if (max_frame <= E1000_RXBUFFER_256) 3489 adapter->rx_buffer_len = E1000_RXBUFFER_256; 3490 else if (max_frame <= E1000_RXBUFFER_512) 3491 adapter->rx_buffer_len = E1000_RXBUFFER_512; 3492 else if (max_frame <= E1000_RXBUFFER_1024) 3493 adapter->rx_buffer_len = E1000_RXBUFFER_1024; 3494 else if (max_frame <= E1000_RXBUFFER_2048) 3495 adapter->rx_buffer_len = E1000_RXBUFFER_2048; 3496 else if (max_frame <= E1000_RXBUFFER_4096) 3497 adapter->rx_buffer_len = E1000_RXBUFFER_4096; 3498 else if (max_frame <= E1000_RXBUFFER_8192) 3499 adapter->rx_buffer_len = E1000_RXBUFFER_8192; 3500 else if (max_frame <= E1000_RXBUFFER_16384) 3501 adapter->rx_buffer_len = E1000_RXBUFFER_16384; 3502 3503 /* adjust allocation if LPE protects us, and we aren't using SBP */ 3504 if (!adapter->hw.tbi_compatibility_on && 3505 ((max_frame == MAXIMUM_ETHERNET_FRAME_SIZE) || 3506 (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE))) 3507 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE; 3508 3509 netdev->mtu = new_mtu; 3510 adapter->hw.max_frame_size = max_frame; 3511 3512 if (netif_running(netdev)) 3513 e1000_reinit_locked(adapter); 3514 3515 return 0; 3516} 3517 3518/** 3519 * e1000_update_stats - Update the board statistics counters 3520 * @adapter: board private structure 3521 **/ 3522 3523void 3524e1000_update_stats(struct e1000_adapter *adapter) 3525{ 3526 struct e1000_hw *hw = &adapter->hw; 3527 struct pci_dev *pdev = adapter->pdev; 3528 unsigned long flags; 3529 uint16_t phy_tmp; 3530 3531#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF 3532 3533 /* 3534 * Prevent stats update while adapter is being reset, or if the pci 3535 * connection is down. 3536 */ 3537 if (adapter->link_speed == 0) 3538 return; 3539 if (pci_channel_offline(pdev)) 3540 return; 3541 3542 spin_lock_irqsave(&adapter->stats_lock, flags); 3543 3544 /* these counters are modified from e1000_adjust_tbi_stats, 3545 * called from the interrupt context, so they must only 3546 * be written while holding adapter->stats_lock 3547 */ 3548 3549 adapter->stats.crcerrs += E1000_READ_REG(hw, CRCERRS); 3550 adapter->stats.gprc += E1000_READ_REG(hw, GPRC); 3551 adapter->stats.gorcl += E1000_READ_REG(hw, GORCL); 3552 adapter->stats.gorch += E1000_READ_REG(hw, GORCH); 3553 adapter->stats.bprc += E1000_READ_REG(hw, BPRC); 3554 adapter->stats.mprc += E1000_READ_REG(hw, MPRC); 3555 adapter->stats.roc += E1000_READ_REG(hw, ROC); 3556 3557 if (adapter->hw.mac_type != e1000_ich8lan) { 3558 adapter->stats.prc64 += E1000_READ_REG(hw, PRC64); 3559 adapter->stats.prc127 += E1000_READ_REG(hw, PRC127); 3560 adapter->stats.prc255 += E1000_READ_REG(hw, PRC255); 3561 adapter->stats.prc511 += E1000_READ_REG(hw, PRC511); 3562 adapter->stats.prc1023 += E1000_READ_REG(hw, PRC1023); 3563 adapter->stats.prc1522 += E1000_READ_REG(hw, PRC1522); 3564 } 3565 3566 adapter->stats.symerrs += E1000_READ_REG(hw, SYMERRS); 3567 adapter->stats.mpc += E1000_READ_REG(hw, MPC); 3568 adapter->stats.scc += E1000_READ_REG(hw, SCC); 3569 adapter->stats.ecol += E1000_READ_REG(hw, ECOL); 3570 adapter->stats.mcc += E1000_READ_REG(hw, MCC); 3571 adapter->stats.latecol += E1000_READ_REG(hw, LATECOL); 3572 adapter->stats.dc += E1000_READ_REG(hw, DC); 3573 adapter->stats.sec += E1000_READ_REG(hw, SEC); 3574 adapter->stats.rlec += E1000_READ_REG(hw, RLEC); 3575 adapter->stats.xonrxc += E1000_READ_REG(hw, XONRXC); 3576 adapter->stats.xontxc += E1000_READ_REG(hw, XONTXC); 3577 adapter->stats.xoffrxc += E1000_READ_REG(hw, XOFFRXC); 3578 adapter->stats.xofftxc += E1000_READ_REG(hw, XOFFTXC); 3579 adapter->stats.fcruc += E1000_READ_REG(hw, FCRUC); 3580 adapter->stats.gptc += E1000_READ_REG(hw, GPTC); 3581 adapter->stats.gotcl += E1000_READ_REG(hw, GOTCL); 3582 adapter->stats.gotch += E1000_READ_REG(hw, GOTCH); 3583 adapter->stats.rnbc += E1000_READ_REG(hw, RNBC); 3584 adapter->stats.ruc += E1000_READ_REG(hw, RUC); 3585 adapter->stats.rfc += E1000_READ_REG(hw, RFC); 3586 adapter->stats.rjc += E1000_READ_REG(hw, RJC); 3587 adapter->stats.torl += E1000_READ_REG(hw, TORL); 3588 adapter->stats.torh += E1000_READ_REG(hw, TORH); 3589 adapter->stats.totl += E1000_READ_REG(hw, TOTL); 3590 adapter->stats.toth += E1000_READ_REG(hw, TOTH); 3591 adapter->stats.tpr += E1000_READ_REG(hw, TPR); 3592 3593 if (adapter->hw.mac_type != e1000_ich8lan) { 3594 adapter->stats.ptc64 += E1000_READ_REG(hw, PTC64); 3595 adapter->stats.ptc127 += E1000_READ_REG(hw, PTC127); 3596 adapter->stats.ptc255 += E1000_READ_REG(hw, PTC255); 3597 adapter->stats.ptc511 += E1000_READ_REG(hw, PTC511); 3598 adapter->stats.ptc1023 += E1000_READ_REG(hw, PTC1023); 3599 adapter->stats.ptc1522 += E1000_READ_REG(hw, PTC1522); 3600 } 3601 3602 adapter->stats.mptc += E1000_READ_REG(hw, MPTC); 3603 adapter->stats.bptc += E1000_READ_REG(hw, BPTC); 3604 3605 /* used for adaptive IFS */ 3606 3607 hw->tx_packet_delta = E1000_READ_REG(hw, TPT); 3608 adapter->stats.tpt += hw->tx_packet_delta; 3609 hw->collision_delta = E1000_READ_REG(hw, COLC); 3610 adapter->stats.colc += hw->collision_delta; 3611 3612 if (hw->mac_type >= e1000_82543) { 3613 adapter->stats.algnerrc += E1000_READ_REG(hw, ALGNERRC); 3614 adapter->stats.rxerrc += E1000_READ_REG(hw, RXERRC); 3615 adapter->stats.tncrs += E1000_READ_REG(hw, TNCRS); 3616 adapter->stats.cexterr += E1000_READ_REG(hw, CEXTERR); 3617 adapter->stats.tsctc += E1000_READ_REG(hw, TSCTC); 3618 adapter->stats.tsctfc += E1000_READ_REG(hw, TSCTFC); 3619 } 3620 if (hw->mac_type > e1000_82547_rev_2) { 3621 adapter->stats.iac += E1000_READ_REG(hw, IAC); 3622 adapter->stats.icrxoc += E1000_READ_REG(hw, ICRXOC); 3623 3624 if (adapter->hw.mac_type != e1000_ich8lan) { 3625 adapter->stats.icrxptc += E1000_READ_REG(hw, ICRXPTC); 3626 adapter->stats.icrxatc += E1000_READ_REG(hw, ICRXATC); 3627 adapter->stats.ictxptc += E1000_READ_REG(hw, ICTXPTC); 3628 adapter->stats.ictxatc += E1000_READ_REG(hw, ICTXATC); 3629 adapter->stats.ictxqec += E1000_READ_REG(hw, ICTXQEC); 3630 adapter->stats.ictxqmtc += E1000_READ_REG(hw, ICTXQMTC); 3631 adapter->stats.icrxdmtc += E1000_READ_REG(hw, ICRXDMTC); 3632 } 3633 } 3634 3635 /* Fill out the OS statistics structure */ 3636 adapter->net_stats.rx_packets = adapter->stats.gprc; 3637 adapter->net_stats.tx_packets = adapter->stats.gptc; 3638 adapter->net_stats.rx_bytes = adapter->stats.gorcl; 3639 adapter->net_stats.tx_bytes = adapter->stats.gotcl; 3640 adapter->net_stats.multicast = adapter->stats.mprc; 3641 adapter->net_stats.collisions = adapter->stats.colc; 3642 3643 /* Rx Errors */ 3644 3645 /* RLEC on some newer hardware can be incorrect so build 3646 * our own version based on RUC and ROC */ 3647 adapter->net_stats.rx_errors = adapter->stats.rxerrc + 3648 adapter->stats.crcerrs + adapter->stats.algnerrc + 3649 adapter->stats.ruc + adapter->stats.roc + 3650 adapter->stats.cexterr; 3651 adapter->stats.rlerrc = adapter->stats.ruc + adapter->stats.roc; 3652 adapter->net_stats.rx_length_errors = adapter->stats.rlerrc; 3653 adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs; 3654 adapter->net_stats.rx_frame_errors = adapter->stats.algnerrc; 3655 adapter->net_stats.rx_missed_errors = adapter->stats.mpc; 3656 3657 /* Tx Errors */ 3658 adapter->stats.txerrc = adapter->stats.ecol + adapter->stats.latecol; 3659 adapter->net_stats.tx_errors = adapter->stats.txerrc; 3660 adapter->net_stats.tx_aborted_errors = adapter->stats.ecol; 3661 adapter->net_stats.tx_window_errors = adapter->stats.latecol; 3662 adapter->net_stats.tx_carrier_errors = adapter->stats.tncrs; 3663 if (adapter->hw.bad_tx_carr_stats_fd && 3664 adapter->link_duplex == FULL_DUPLEX) { 3665 adapter->net_stats.tx_carrier_errors = 0; 3666 adapter->stats.tncrs = 0; 3667 } 3668 3669 /* Tx Dropped needs to be maintained elsewhere */ 3670 3671 /* Phy Stats */ 3672 if (hw->media_type == e1000_media_type_copper) { 3673 if ((adapter->link_speed == SPEED_1000) && 3674 (!e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) { 3675 phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK; 3676 adapter->phy_stats.idle_errors += phy_tmp; 3677 } 3678 3679 if ((hw->mac_type <= e1000_82546) && 3680 (hw->phy_type == e1000_phy_m88) && 3681 !e1000_read_phy_reg(hw, M88E1000_RX_ERR_CNTR, &phy_tmp)) 3682 adapter->phy_stats.receive_errors += phy_tmp; 3683 } 3684 3685 /* Management Stats */ 3686 if (adapter->hw.has_smbus) { 3687 adapter->stats.mgptc += E1000_READ_REG(hw, MGTPTC); 3688 adapter->stats.mgprc += E1000_READ_REG(hw, MGTPRC); 3689 adapter->stats.mgpdc += E1000_READ_REG(hw, MGTPDC); 3690 } 3691 3692 spin_unlock_irqrestore(&adapter->stats_lock, flags); 3693} 3694 3695/** 3696 * e1000_intr_msi - Interrupt Handler 3697 * @irq: interrupt number 3698 * @data: pointer to a network interface device structure 3699 **/ 3700 3701static irqreturn_t 3702e1000_intr_msi(int irq, void *data) 3703{ 3704 struct net_device *netdev = data; 3705 struct e1000_adapter *adapter = netdev_priv(netdev); 3706 struct e1000_hw *hw = &adapter->hw; 3707#ifndef CONFIG_E1000_NAPI 3708 int i; 3709#endif 3710 uint32_t icr = E1000_READ_REG(hw, ICR); 3711 3712#ifdef CONFIG_E1000_NAPI 3713 /* read ICR disables interrupts using IAM, so keep up with our 3714 * enable/disable accounting */ 3715 atomic_inc(&adapter->irq_sem); 3716#endif 3717 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { 3718 hw->get_link_status = 1; 3719 if (netif_carrier_ok(netdev) && 3720 (adapter->hw.mac_type == e1000_80003es2lan)) { 3721 /* disable receives */ 3722 uint32_t rctl = E1000_READ_REG(hw, RCTL); 3723 E1000_WRITE_REG(hw, RCTL, rctl & ~E1000_RCTL_EN); 3724 } 3725 /* guard against interrupt when we're going down */ 3726 if (!test_bit(__E1000_DOWN, &adapter->flags)) 3727 mod_timer(&adapter->watchdog_timer, jiffies + 1); 3728 } 3729 3730#ifdef CONFIG_E1000_NAPI 3731 if (likely(netif_rx_schedule_prep(netdev))) { 3732 adapter->total_tx_bytes = 0; 3733 adapter->total_tx_packets = 0; 3734 adapter->total_rx_bytes = 0; 3735 adapter->total_rx_packets = 0; 3736 __netif_rx_schedule(netdev); 3737 } else 3738 e1000_irq_enable(adapter); 3739#else 3740 adapter->total_tx_bytes = 0; 3741 adapter->total_rx_bytes = 0; 3742 adapter->total_tx_packets = 0; 3743 adapter->total_rx_packets = 0; 3744 3745 for (i = 0; i < E1000_MAX_INTR; i++) 3746 if (unlikely(!adapter->clean_rx(adapter, adapter->rx_ring) & 3747 !e1000_clean_tx_irq(adapter, adapter->tx_ring))) 3748 break; 3749 3750 if (likely(adapter->itr_setting & 3)) 3751 e1000_set_itr(adapter); 3752#endif 3753 3754 return IRQ_HANDLED; 3755} 3756 3757/** 3758 * e1000_intr - Interrupt Handler 3759 * @irq: interrupt number 3760 * @data: pointer to a network interface device structure 3761 **/ 3762 3763static irqreturn_t 3764e1000_intr(int irq, void *data) 3765{ 3766 struct net_device *netdev = data; 3767 struct e1000_adapter *adapter = netdev_priv(netdev); 3768 struct e1000_hw *hw = &adapter->hw; 3769 uint32_t rctl, icr = E1000_READ_REG(hw, ICR); 3770#ifndef CONFIG_E1000_NAPI 3771 int i; 3772#endif 3773 if (unlikely(!icr)) 3774 return IRQ_NONE; /* Not our interrupt */ 3775 3776#ifdef CONFIG_E1000_NAPI 3777 /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is 3778 * not set, then the adapter didn't send an interrupt */ 3779 if (unlikely(hw->mac_type >= e1000_82571 && 3780 !(icr & E1000_ICR_INT_ASSERTED))) 3781 return IRQ_NONE; 3782 3783 /* Interrupt Auto-Mask...upon reading ICR, 3784 * interrupts are masked. No need for the 3785 * IMC write, but it does mean we should 3786 * account for it ASAP. */ 3787 if (likely(hw->mac_type >= e1000_82571)) 3788 atomic_inc(&adapter->irq_sem); 3789#endif 3790 3791 if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) { 3792 hw->get_link_status = 1; 3793 if (netif_carrier_ok(netdev) && 3794 (adapter->hw.mac_type == e1000_80003es2lan)) { 3795 /* disable receives */ 3796 rctl = E1000_READ_REG(hw, RCTL); 3797 E1000_WRITE_REG(hw, RCTL, rctl & ~E1000_RCTL_EN); 3798 } 3799 /* guard against interrupt when we're going down */ 3800 if (!test_bit(__E1000_DOWN, &adapter->flags)) 3801 mod_timer(&adapter->watchdog_timer, jiffies + 1); 3802 } 3803 3804#ifdef CONFIG_E1000_NAPI 3805 if (unlikely(hw->mac_type < e1000_82571)) { 3806 /* disable interrupts, without the synchronize_irq bit */ 3807 atomic_inc(&adapter->irq_sem); 3808 E1000_WRITE_REG(hw, IMC, ~0); 3809 E1000_WRITE_FLUSH(hw); 3810 } 3811 if (likely(netif_rx_schedule_prep(netdev))) { 3812 adapter->total_tx_bytes = 0; 3813 adapter->total_tx_packets = 0; 3814 adapter->total_rx_bytes = 0; 3815 adapter->total_rx_packets = 0; 3816 __netif_rx_schedule(netdev); 3817 } else 3818 /* this really should not happen! if it does it is basically a 3819 * bug, but not a hard error, so enable ints and continue */ 3820 e1000_irq_enable(adapter); 3821#else 3822 /* Writing IMC and IMS is needed for 82547. 3823 * Due to Hub Link bus being occupied, an interrupt 3824 * de-assertion message is not able to be sent. 3825 * When an interrupt assertion message is generated later, 3826 * two messages are re-ordered and sent out. 3827 * That causes APIC to think 82547 is in de-assertion 3828 * state, while 82547 is in assertion state, resulting 3829 * in dead lock. Writing IMC forces 82547 into 3830 * de-assertion state. 3831 */ 3832 if (hw->mac_type == e1000_82547 || hw->mac_type == e1000_82547_rev_2) { 3833 atomic_inc(&adapter->irq_sem); 3834 E1000_WRITE_REG(hw, IMC, ~0); 3835 } 3836 3837 adapter->total_tx_bytes = 0; 3838 adapter->total_rx_bytes = 0; 3839 adapter->total_tx_packets = 0; 3840 adapter->total_rx_packets = 0; 3841 3842 for (i = 0; i < E1000_MAX_INTR; i++) 3843 if (unlikely(!adapter->clean_rx(adapter, adapter->rx_ring) & 3844 !e1000_clean_tx_irq(adapter, adapter->tx_ring))) 3845 break; 3846 3847 if (likely(adapter->itr_setting & 3)) 3848 e1000_set_itr(adapter); 3849 3850 if (hw->mac_type == e1000_82547 || hw->mac_type == e1000_82547_rev_2) 3851 e1000_irq_enable(adapter); 3852 3853#endif 3854 return IRQ_HANDLED; 3855} 3856 3857#ifdef CONFIG_E1000_NAPI 3858/** 3859 * e1000_clean - NAPI Rx polling callback 3860 * @adapter: board private structure 3861 **/ 3862 3863static int 3864e1000_clean(struct net_device *poll_dev, int *budget) 3865{ 3866 struct e1000_adapter *adapter; 3867 int work_to_do = min(*budget, poll_dev->quota); 3868 int tx_cleaned = 0, work_done = 0; 3869 3870 /* Must NOT use netdev_priv macro here. */ 3871 adapter = poll_dev->priv; 3872 3873 /* Keep link state information with original netdev */ 3874 if (!netif_carrier_ok(poll_dev)) 3875 goto quit_polling; 3876 3877 /* e1000_clean is called per-cpu. This lock protects 3878 * tx_ring[0] from being cleaned by multiple cpus 3879 * simultaneously. A failure obtaining the lock means 3880 * tx_ring[0] is currently being cleaned anyway. */ 3881 if (spin_trylock(&adapter->tx_queue_lock)) { 3882 tx_cleaned = e1000_clean_tx_irq(adapter, 3883 &adapter->tx_ring[0]); 3884 spin_unlock(&adapter->tx_queue_lock); 3885 } 3886 3887 adapter->clean_rx(adapter, &adapter->rx_ring[0], 3888 &work_done, work_to_do); 3889 3890 *budget -= work_done; 3891 poll_dev->quota -= work_done; 3892 3893 /* If no Tx and not enough Rx work done, exit the polling mode */ 3894 if ((!tx_cleaned && (work_done == 0)) || 3895 !netif_running(poll_dev)) { 3896quit_polling: 3897 if (likely(adapter->itr_setting & 3)) 3898 e1000_set_itr(adapter); 3899 netif_rx_complete(poll_dev); 3900 e1000_irq_enable(adapter); 3901 return 0; 3902 } 3903 3904 return 1; 3905} 3906 3907#endif 3908/** 3909 * e1000_clean_tx_irq - Reclaim resources after transmit completes 3910 * @adapter: board private structure 3911 **/ 3912 3913static boolean_t 3914e1000_clean_tx_irq(struct e1000_adapter *adapter, 3915 struct e1000_tx_ring *tx_ring) 3916{ 3917 struct net_device *netdev = adapter->netdev; 3918 struct e1000_tx_desc *tx_desc, *eop_desc; 3919 struct e1000_buffer *buffer_info; 3920 unsigned int i, eop; 3921#ifdef CONFIG_E1000_NAPI 3922 unsigned int count = 0; 3923#endif 3924 boolean_t cleaned = FALSE; 3925 unsigned int total_tx_bytes=0, total_tx_packets=0; 3926 3927 i = tx_ring->next_to_clean; 3928 eop = tx_ring->buffer_info[i].next_to_watch; 3929 eop_desc = E1000_TX_DESC(*tx_ring, eop); 3930 3931 while (eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) { 3932 for (cleaned = FALSE; !cleaned; ) { 3933 tx_desc = E1000_TX_DESC(*tx_ring, i); 3934 buffer_info = &tx_ring->buffer_info[i]; 3935 cleaned = (i == eop); 3936 3937 if (cleaned) { 3938 struct sk_buff *skb = buffer_info->skb; 3939 unsigned int segs, bytecount; 3940 segs = skb_shinfo(skb)->gso_segs ?: 1; 3941 /* multiply data chunks by size of headers */ 3942 bytecount = ((segs - 1) * skb_headlen(skb)) + 3943 skb->len; 3944 total_tx_packets += segs; 3945 total_tx_bytes += bytecount; 3946 } 3947 e1000_unmap_and_free_tx_resource(adapter, buffer_info); 3948 tx_desc->upper.data = 0; 3949 3950 if (unlikely(++i == tx_ring->count)) i = 0; 3951 } 3952 3953 eop = tx_ring->buffer_info[i].next_to_watch; 3954 eop_desc = E1000_TX_DESC(*tx_ring, eop); 3955#ifdef CONFIG_E1000_NAPI 3956#define E1000_TX_WEIGHT 64 3957 /* weight of a sort for tx, to avoid endless transmit cleanup */ 3958 if (count++ == E1000_TX_WEIGHT) break; 3959#endif 3960 } 3961 3962 tx_ring->next_to_clean = i; 3963 3964#define TX_WAKE_THRESHOLD 32 3965 if (unlikely(cleaned && netif_carrier_ok(netdev) && 3966 E1000_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD)) { 3967 /* Make sure that anybody stopping the queue after this 3968 * sees the new next_to_clean. 3969 */ 3970 smp_mb(); 3971 if (netif_queue_stopped(netdev)) { 3972 netif_wake_queue(netdev); 3973 ++adapter->restart_queue; 3974 } 3975 } 3976 3977 if (adapter->detect_tx_hung) { 3978 /* Detect a transmit hang in hardware, this serializes the 3979 * check with the clearing of time_stamp and movement of i */ 3980 adapter->detect_tx_hung = FALSE; 3981 if (tx_ring->buffer_info[eop].dma && 3982 time_after(jiffies, tx_ring->buffer_info[eop].time_stamp + 3983 (adapter->tx_timeout_factor * HZ)) 3984 && !(E1000_READ_REG(&adapter->hw, STATUS) & 3985 E1000_STATUS_TXOFF)) { 3986 3987 /* detected Tx unit hang */ 3988 DPRINTK(DRV, ERR, "Detected Tx Unit Hang\n" 3989 " Tx Queue <%lu>\n" 3990 " TDH <%x>\n" 3991 " TDT <%x>\n" 3992 " next_to_use <%x>\n" 3993 " next_to_clean <%x>\n" 3994 "buffer_info[next_to_clean]\n" 3995 " time_stamp <%lx>\n" 3996 " next_to_watch <%x>\n" 3997 " jiffies <%lx>\n" 3998 " next_to_watch.status <%x>\n", 3999 (unsigned long)((tx_ring - adapter->tx_ring) / 4000 sizeof(struct e1000_tx_ring)), 4001 readl(adapter->hw.hw_addr + tx_ring->tdh), 4002 readl(adapter->hw.hw_addr + tx_ring->tdt), 4003 tx_ring->next_to_use, 4004 tx_ring->next_to_clean, 4005 tx_ring->buffer_info[eop].time_stamp, 4006 eop, 4007 jiffies, 4008 eop_desc->upper.fields.status); 4009 netif_stop_queue(netdev); 4010 } 4011 } 4012 adapter->total_tx_bytes += total_tx_bytes; 4013 adapter->total_tx_packets += total_tx_packets; 4014 return cleaned; 4015} 4016 4017/** 4018 * e1000_rx_checksum - Receive Checksum Offload for 82543 4019 * @adapter: board private structure 4020 * @status_err: receive descriptor status and error fields 4021 * @csum: receive descriptor csum field 4022 * @sk_buff: socket buffer with received data 4023 **/ 4024 4025static void 4026e1000_rx_checksum(struct e1000_adapter *adapter, 4027 uint32_t status_err, uint32_t csum, 4028 struct sk_buff *skb) 4029{ 4030 uint16_t status = (uint16_t)status_err; 4031 uint8_t errors = (uint8_t)(status_err >> 24); 4032 skb->ip_summed = CHECKSUM_NONE; 4033 4034 /* 82543 or newer only */ 4035 if (unlikely(adapter->hw.mac_type < e1000_82543)) return; 4036 /* Ignore Checksum bit is set */ 4037 if (unlikely(status & E1000_RXD_STAT_IXSM)) return; 4038 /* TCP/UDP checksum error bit is set */ 4039 if (unlikely(errors & E1000_RXD_ERR_TCPE)) { 4040 /* let the stack verify checksum errors */ 4041 adapter->hw_csum_err++; 4042 return; 4043 } 4044 /* TCP/UDP Checksum has not been calculated */ 4045 if (adapter->hw.mac_type <= e1000_82547_rev_2) { 4046 if (!(status & E1000_RXD_STAT_TCPCS)) 4047 return; 4048 } else { 4049 if (!(status & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS))) 4050 return; 4051 } 4052 /* It must be a TCP or UDP packet with a valid checksum */ 4053 if (likely(status & E1000_RXD_STAT_TCPCS)) { 4054 /* TCP checksum is good */ 4055 skb->ip_summed = CHECKSUM_UNNECESSARY; 4056 } else if (adapter->hw.mac_type > e1000_82547_rev_2) { 4057 /* IP fragment with UDP payload */ 4058 /* Hardware complements the payload checksum, so we undo it 4059 * and then put the value in host order for further stack use. 4060 */ 4061 csum = ntohl(csum ^ 0xFFFF); 4062 skb->csum = csum; 4063 skb->ip_summed = CHECKSUM_COMPLETE; 4064 } 4065 adapter->hw_csum_good++; 4066} 4067 4068/** 4069 * e1000_clean_rx_irq - Send received data up the network stack; legacy 4070 * @adapter: board private structure 4071 **/ 4072 4073static boolean_t 4074#ifdef CONFIG_E1000_NAPI 4075e1000_clean_rx_irq(struct e1000_adapter *adapter, 4076 struct e1000_rx_ring *rx_ring, 4077 int *work_done, int work_to_do) 4078#else 4079e1000_clean_rx_irq(struct e1000_adapter *adapter, 4080 struct e1000_rx_ring *rx_ring) 4081#endif 4082{ 4083 struct net_device *netdev = adapter->netdev; 4084 struct pci_dev *pdev = adapter->pdev; 4085 struct e1000_rx_desc *rx_desc, *next_rxd; 4086 struct e1000_buffer *buffer_info, *next_buffer; 4087 unsigned long flags; 4088 uint32_t length; 4089 uint8_t last_byte; 4090 unsigned int i; 4091 int cleaned_count = 0; 4092 boolean_t cleaned = FALSE; 4093 unsigned int total_rx_bytes=0, total_rx_packets=0; 4094 4095 i = rx_ring->next_to_clean; 4096 rx_desc = E1000_RX_DESC(*rx_ring, i); 4097 buffer_info = &rx_ring->buffer_info[i]; 4098 4099 while (rx_desc->status & E1000_RXD_STAT_DD) { 4100 struct sk_buff *skb; 4101 u8 status; 4102 4103#ifdef CONFIG_E1000_NAPI 4104 if (*work_done >= work_to_do) 4105 break; 4106 (*work_done)++; 4107#endif 4108 status = rx_desc->status; 4109 skb = buffer_info->skb; 4110 buffer_info->skb = NULL; 4111 4112 prefetch(skb->data - NET_IP_ALIGN); 4113 4114 if (++i == rx_ring->count) i = 0; 4115 next_rxd = E1000_RX_DESC(*rx_ring, i); 4116 prefetch(next_rxd); 4117 4118 next_buffer = &rx_ring->buffer_info[i]; 4119 4120 cleaned = TRUE; 4121 cleaned_count++; 4122 pci_unmap_single(pdev, 4123 buffer_info->dma, 4124 buffer_info->length, 4125 PCI_DMA_FROMDEVICE); 4126 4127 length = le16_to_cpu(rx_desc->length); 4128 4129 if (unlikely(!(status & E1000_RXD_STAT_EOP))) { 4130 /* All receives must fit into a single buffer */ 4131 E1000_DBG("%s: Receive packet consumed multiple" 4132 " buffers\n", netdev->name); 4133 /* recycle */ 4134 buffer_info->skb = skb; 4135 goto next_desc; 4136 } 4137 4138 if (unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) { 4139 last_byte = *(skb->data + length - 1); 4140 if (TBI_ACCEPT(&adapter->hw, status, 4141 rx_desc->errors, length, last_byte)) { 4142 spin_lock_irqsave(&adapter->stats_lock, flags); 4143 e1000_tbi_adjust_stats(&adapter->hw, 4144 &adapter->stats, 4145 length, skb->data); 4146 spin_unlock_irqrestore(&adapter->stats_lock, 4147 flags); 4148 length--; 4149 } else { 4150 /* recycle */ 4151 buffer_info->skb = skb; 4152 goto next_desc; 4153 } 4154 } 4155 4156 length -= 4; 4157 4158 /* probably a little skewed due to removing CRC */ 4159 total_rx_bytes += length; 4160 total_rx_packets++; 4161 4162 /* code added for copybreak, this should improve 4163 * performance for small packets with large amounts 4164 * of reassembly being done in the stack */ 4165 if (length < copybreak) { 4166 struct sk_buff *new_skb = 4167 netdev_alloc_skb(netdev, length + NET_IP_ALIGN); 4168 if (new_skb) { 4169 skb_reserve(new_skb, NET_IP_ALIGN); 4170 skb_copy_to_linear_data_offset(new_skb, 4171 -NET_IP_ALIGN, 4172 (skb->data - 4173 NET_IP_ALIGN), 4174 (length + 4175 NET_IP_ALIGN)); 4176 /* save the skb in buffer_info as good */ 4177 buffer_info->skb = skb; 4178 skb = new_skb; 4179 } 4180 /* else just continue with the old one */ 4181 } 4182 /* end copybreak code */ 4183 skb_put(skb, length); 4184 4185 /* Receive Checksum Offload */ 4186 e1000_rx_checksum(adapter, 4187 (uint32_t)(status) | 4188 ((uint32_t)(rx_desc->errors) << 24), 4189 le16_to_cpu(rx_desc->csum), skb); 4190 4191 skb->protocol = eth_type_trans(skb, netdev); 4192#ifdef CONFIG_E1000_NAPI 4193 if (unlikely(adapter->vlgrp && 4194 (status & E1000_RXD_STAT_VP))) { 4195 vlan_hwaccel_receive_skb(skb, adapter->vlgrp, 4196 le16_to_cpu(rx_desc->special) & 4197 E1000_RXD_SPC_VLAN_MASK); 4198 } else { 4199 netif_receive_skb(skb); 4200 } 4201#else /* CONFIG_E1000_NAPI */ 4202 if (unlikely(adapter->vlgrp && 4203 (status & E1000_RXD_STAT_VP))) { 4204 vlan_hwaccel_rx(skb, adapter->vlgrp, 4205 le16_to_cpu(rx_desc->special) & 4206 E1000_RXD_SPC_VLAN_MASK); 4207 } else { 4208 netif_rx(skb); 4209 } 4210#endif /* CONFIG_E1000_NAPI */ 4211 netdev->last_rx = jiffies; 4212 4213next_desc: 4214 rx_desc->status = 0; 4215 4216 /* return some buffers to hardware, one at a time is too slow */ 4217 if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) { 4218 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count); 4219 cleaned_count = 0; 4220 } 4221 4222 /* use prefetched values */ 4223 rx_desc = next_rxd; 4224 buffer_info = next_buffer; 4225 } 4226 rx_ring->next_to_clean = i; 4227 4228 cleaned_count = E1000_DESC_UNUSED(rx_ring); 4229 if (cleaned_count) 4230 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count); 4231 4232 adapter->total_rx_packets += total_rx_packets; 4233 adapter->total_rx_bytes += total_rx_bytes; 4234 return cleaned; 4235} 4236 4237/** 4238 * e1000_clean_rx_irq_ps - Send received data up the network stack; packet split 4239 * @adapter: board private structure 4240 **/ 4241 4242static boolean_t 4243#ifdef CONFIG_E1000_NAPI 4244e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, 4245 struct e1000_rx_ring *rx_ring, 4246 int *work_done, int work_to_do) 4247#else 4248e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, 4249 struct e1000_rx_ring *rx_ring) 4250#endif 4251{ 4252 union e1000_rx_desc_packet_split *rx_desc, *next_rxd; 4253 struct net_device *netdev = adapter->netdev; 4254 struct pci_dev *pdev = adapter->pdev; 4255 struct e1000_buffer *buffer_info, *next_buffer; 4256 struct e1000_ps_page *ps_page; 4257 struct e1000_ps_page_dma *ps_page_dma; 4258 struct sk_buff *skb; 4259 unsigned int i, j; 4260 uint32_t length, staterr; 4261 int cleaned_count = 0; 4262 boolean_t cleaned = FALSE; 4263 unsigned int total_rx_bytes=0, total_rx_packets=0; 4264 4265 i = rx_ring->next_to_clean; 4266 rx_desc = E1000_RX_DESC_PS(*rx_ring, i); 4267 staterr = le32_to_cpu(rx_desc->wb.middle.status_error); 4268 buffer_info = &rx_ring->buffer_info[i]; 4269 4270 while (staterr & E1000_RXD_STAT_DD) { 4271 ps_page = &rx_ring->ps_page[i]; 4272 ps_page_dma = &rx_ring->ps_page_dma[i]; 4273#ifdef CONFIG_E1000_NAPI 4274 if (unlikely(*work_done >= work_to_do)) 4275 break; 4276 (*work_done)++; 4277#endif 4278 skb = buffer_info->skb; 4279 4280 /* in the packet split case this is header only */ 4281 prefetch(skb->data - NET_IP_ALIGN); 4282 4283 if (++i == rx_ring->count) i = 0; 4284 next_rxd = E1000_RX_DESC_PS(*rx_ring, i); 4285 prefetch(next_rxd); 4286 4287 next_buffer = &rx_ring->buffer_info[i]; 4288 4289 cleaned = TRUE; 4290 cleaned_count++; 4291 pci_unmap_single(pdev, buffer_info->dma, 4292 buffer_info->length, 4293 PCI_DMA_FROMDEVICE); 4294 4295 if (unlikely(!(staterr & E1000_RXD_STAT_EOP))) { 4296 E1000_DBG("%s: Packet Split buffers didn't pick up" 4297 " the full packet\n", netdev->name); 4298 dev_kfree_skb_irq(skb); 4299 goto next_desc; 4300 } 4301 4302 if (unlikely(staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK)) { 4303 dev_kfree_skb_irq(skb); 4304 goto next_desc; 4305 } 4306 4307 length = le16_to_cpu(rx_desc->wb.middle.length0); 4308 4309 if (unlikely(!length)) { 4310 E1000_DBG("%s: Last part of the packet spanning" 4311 " multiple descriptors\n", netdev->name); 4312 dev_kfree_skb_irq(skb); 4313 goto next_desc; 4314 } 4315 4316 /* Good Receive */ 4317 skb_put(skb, length); 4318 4319 { 4320 /* this looks ugly, but it seems compiler issues make it 4321 more efficient than reusing j */ 4322 int l1 = le16_to_cpu(rx_desc->wb.upper.length[0]); 4323 4324 /* page alloc/put takes too long and effects small packet 4325 * throughput, so unsplit small packets and save the alloc/put*/ 4326 if (l1 && (l1 <= copybreak) && ((length + l1) <= adapter->rx_ps_bsize0)) { 4327 u8 *vaddr; 4328 /* there is no documentation about how to call 4329 * kmap_atomic, so we can't hold the mapping 4330 * very long */ 4331 pci_dma_sync_single_for_cpu(pdev, 4332 ps_page_dma->ps_page_dma[0], 4333 PAGE_SIZE, 4334 PCI_DMA_FROMDEVICE); 4335 vaddr = kmap_atomic(ps_page->ps_page[0], 4336 KM_SKB_DATA_SOFTIRQ); 4337 memcpy(skb_tail_pointer(skb), vaddr, l1); 4338 kunmap_atomic(vaddr, KM_SKB_DATA_SOFTIRQ); 4339 pci_dma_sync_single_for_device(pdev, 4340 ps_page_dma->ps_page_dma[0], 4341 PAGE_SIZE, PCI_DMA_FROMDEVICE); 4342 /* remove the CRC */ 4343 l1 -= 4; 4344 skb_put(skb, l1); 4345 goto copydone; 4346 } /* if */ 4347 } 4348 4349 for (j = 0; j < adapter->rx_ps_pages; j++) { 4350 if (!(length= le16_to_cpu(rx_desc->wb.upper.length[j]))) 4351 break; 4352 pci_unmap_page(pdev, ps_page_dma->ps_page_dma[j], 4353 PAGE_SIZE, PCI_DMA_FROMDEVICE); 4354 ps_page_dma->ps_page_dma[j] = 0; 4355 skb_fill_page_desc(skb, j, ps_page->ps_page[j], 0, 4356 length); 4357 ps_page->ps_page[j] = NULL; 4358 skb->len += length; 4359 skb->data_len += length; 4360 skb->truesize += length; 4361 } 4362 4363 /* strip the ethernet crc, problem is we're using pages now so 4364 * this whole operation can get a little cpu intensive */ 4365 pskb_trim(skb, skb->len - 4); 4366 4367copydone: 4368 total_rx_bytes += skb->len; 4369 total_rx_packets++; 4370 4371 e1000_rx_checksum(adapter, staterr, 4372 le16_to_cpu(rx_desc->wb.lower.hi_dword.csum_ip.csum), skb); 4373 skb->protocol = eth_type_trans(skb, netdev); 4374 4375 if (likely(rx_desc->wb.upper.header_status & 4376 cpu_to_le16(E1000_RXDPS_HDRSTAT_HDRSP))) 4377 adapter->rx_hdr_split++; 4378#ifdef CONFIG_E1000_NAPI 4379 if (unlikely(adapter->vlgrp && (staterr & E1000_RXD_STAT_VP))) { 4380 vlan_hwaccel_receive_skb(skb, adapter->vlgrp, 4381 le16_to_cpu(rx_desc->wb.middle.vlan) & 4382 E1000_RXD_SPC_VLAN_MASK); 4383 } else { 4384 netif_receive_skb(skb); 4385 } 4386#else /* CONFIG_E1000_NAPI */ 4387 if (unlikely(adapter->vlgrp && (staterr & E1000_RXD_STAT_VP))) { 4388 vlan_hwaccel_rx(skb, adapter->vlgrp, 4389 le16_to_cpu(rx_desc->wb.middle.vlan) & 4390 E1000_RXD_SPC_VLAN_MASK); 4391 } else { 4392 netif_rx(skb); 4393 } 4394#endif /* CONFIG_E1000_NAPI */ 4395 netdev->last_rx = jiffies; 4396 4397next_desc: 4398 rx_desc->wb.middle.status_error &= cpu_to_le32(~0xFF); 4399 buffer_info->skb = NULL; 4400 4401 /* return some buffers to hardware, one at a time is too slow */ 4402 if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) { 4403 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count); 4404 cleaned_count = 0; 4405 } 4406 4407 /* use prefetched values */ 4408 rx_desc = next_rxd; 4409 buffer_info = next_buffer; 4410 4411 staterr = le32_to_cpu(rx_desc->wb.middle.status_error); 4412 } 4413 rx_ring->next_to_clean = i; 4414 4415 cleaned_count = E1000_DESC_UNUSED(rx_ring); 4416 if (cleaned_count) 4417 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count); 4418 4419 adapter->total_rx_packets += total_rx_packets; 4420 adapter->total_rx_bytes += total_rx_bytes; 4421 return cleaned; 4422} 4423 4424/** 4425 * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended 4426 * @adapter: address of board private structure 4427 **/ 4428 4429static void 4430e1000_alloc_rx_buffers(struct e1000_adapter *adapter, 4431 struct e1000_rx_ring *rx_ring, 4432 int cleaned_count) 4433{ 4434 struct net_device *netdev = adapter->netdev; 4435 struct pci_dev *pdev = adapter->pdev; 4436 struct e1000_rx_desc *rx_desc; 4437 struct e1000_buffer *buffer_info; 4438 struct sk_buff *skb; 4439 unsigned int i; 4440 unsigned int bufsz = adapter->rx_buffer_len + NET_IP_ALIGN; 4441 4442 i = rx_ring->next_to_use; 4443 buffer_info = &rx_ring->buffer_info[i]; 4444 4445 while (cleaned_count--) { 4446 skb = buffer_info->skb; 4447 if (skb) { 4448 skb_trim(skb, 0); 4449 goto map_skb; 4450 } 4451 4452 skb = netdev_alloc_skb(netdev, bufsz); 4453 if (unlikely(!skb)) { 4454 /* Better luck next round */ 4455 adapter->alloc_rx_buff_failed++; 4456 break; 4457 } 4458 4459 /* Fix for errata 23, can't cross 64kB boundary */ 4460 if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) { 4461 struct sk_buff *oldskb = skb; 4462 DPRINTK(RX_ERR, ERR, "skb align check failed: %u bytes " 4463 "at %p\n", bufsz, skb->data); 4464 /* Try again, without freeing the previous */ 4465 skb = netdev_alloc_skb(netdev, bufsz); 4466 /* Failed allocation, critical failure */ 4467 if (!skb) { 4468 dev_kfree_skb(oldskb); 4469 break; 4470 } 4471 4472 if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) { 4473 /* give up */ 4474 dev_kfree_skb(skb); 4475 dev_kfree_skb(oldskb); 4476 break; /* while !buffer_info->skb */ 4477 } 4478 4479 /* Use new allocation */ 4480 dev_kfree_skb(oldskb); 4481 } 4482 /* Make buffer alignment 2 beyond a 16 byte boundary 4483 * this will result in a 16 byte aligned IP header after 4484 * the 14 byte MAC header is removed 4485 */ 4486 skb_reserve(skb, NET_IP_ALIGN); 4487 4488 buffer_info->skb = skb; 4489 buffer_info->length = adapter->rx_buffer_len; 4490map_skb: 4491 buffer_info->dma = pci_map_single(pdev, 4492 skb->data, 4493 adapter->rx_buffer_len, 4494 PCI_DMA_FROMDEVICE); 4495 4496 /* Fix for errata 23, can't cross 64kB boundary */ 4497 if (!e1000_check_64k_bound(adapter, 4498 (void *)(unsigned long)buffer_info->dma, 4499 adapter->rx_buffer_len)) { 4500 DPRINTK(RX_ERR, ERR, 4501 "dma align check failed: %u bytes at %p\n", 4502 adapter->rx_buffer_len, 4503 (void *)(unsigned long)buffer_info->dma); 4504 dev_kfree_skb(skb); 4505 buffer_info->skb = NULL; 4506 4507 pci_unmap_single(pdev, buffer_info->dma, 4508 adapter->rx_buffer_len, 4509 PCI_DMA_FROMDEVICE); 4510 4511 break; /* while !buffer_info->skb */ 4512 } 4513 rx_desc = E1000_RX_DESC(*rx_ring, i); 4514 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); 4515 4516 if (unlikely(++i == rx_ring->count)) 4517 i = 0; 4518 buffer_info = &rx_ring->buffer_info[i]; 4519 } 4520 4521 if (likely(rx_ring->next_to_use != i)) { 4522 rx_ring->next_to_use = i; 4523 if (unlikely(i-- == 0)) 4524 i = (rx_ring->count - 1); 4525 4526 /* Force memory writes to complete before letting h/w 4527 * know there are new descriptors to fetch. (Only 4528 * applicable for weak-ordered memory model archs, 4529 * such as IA-64). */ 4530 wmb(); 4531 writel(i, adapter->hw.hw_addr + rx_ring->rdt); 4532 } 4533} 4534 4535/** 4536 * e1000_alloc_rx_buffers_ps - Replace used receive buffers; packet split 4537 * @adapter: address of board private structure 4538 **/ 4539 4540static void 4541e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter, 4542 struct e1000_rx_ring *rx_ring, 4543 int cleaned_count) 4544{ 4545 struct net_device *netdev = adapter->netdev; 4546 struct pci_dev *pdev = adapter->pdev; 4547 union e1000_rx_desc_packet_split *rx_desc; 4548 struct e1000_buffer *buffer_info; 4549 struct e1000_ps_page *ps_page; 4550 struct e1000_ps_page_dma *ps_page_dma; 4551 struct sk_buff *skb; 4552 unsigned int i, j; 4553 4554 i = rx_ring->next_to_use; 4555 buffer_info = &rx_ring->buffer_info[i]; 4556 ps_page = &rx_ring->ps_page[i]; 4557 ps_page_dma = &rx_ring->ps_page_dma[i]; 4558 4559 while (cleaned_count--) { 4560 rx_desc = E1000_RX_DESC_PS(*rx_ring, i); 4561 4562 for (j = 0; j < PS_PAGE_BUFFERS; j++) { 4563 if (j < adapter->rx_ps_pages) { 4564 if (likely(!ps_page->ps_page[j])) { 4565 ps_page->ps_page[j] = 4566 alloc_page(GFP_ATOMIC); 4567 if (unlikely(!ps_page->ps_page[j])) { 4568 adapter->alloc_rx_buff_failed++; 4569 goto no_buffers; 4570 } 4571 ps_page_dma->ps_page_dma[j] = 4572 pci_map_page(pdev, 4573 ps_page->ps_page[j], 4574 0, PAGE_SIZE, 4575 PCI_DMA_FROMDEVICE); 4576 } 4577 /* Refresh the desc even if buffer_addrs didn't 4578 * change because each write-back erases 4579 * this info. 4580 */ 4581 rx_desc->read.buffer_addr[j+1] = 4582 cpu_to_le64(ps_page_dma->ps_page_dma[j]); 4583 } else 4584 rx_desc->read.buffer_addr[j+1] = ~0; 4585 } 4586 4587 skb = netdev_alloc_skb(netdev, 4588 adapter->rx_ps_bsize0 + NET_IP_ALIGN); 4589 4590 if (unlikely(!skb)) { 4591 adapter->alloc_rx_buff_failed++; 4592 break; 4593 } 4594 4595 /* Make buffer alignment 2 beyond a 16 byte boundary 4596 * this will result in a 16 byte aligned IP header after 4597 * the 14 byte MAC header is removed 4598 */ 4599 skb_reserve(skb, NET_IP_ALIGN); 4600 4601 buffer_info->skb = skb; 4602 buffer_info->length = adapter->rx_ps_bsize0; 4603 buffer_info->dma = pci_map_single(pdev, skb->data, 4604 adapter->rx_ps_bsize0, 4605 PCI_DMA_FROMDEVICE); 4606 4607 rx_desc->read.buffer_addr[0] = cpu_to_le64(buffer_info->dma); 4608 4609 if (unlikely(++i == rx_ring->count)) i = 0; 4610 buffer_info = &rx_ring->buffer_info[i]; 4611 ps_page = &rx_ring->ps_page[i]; 4612 ps_page_dma = &rx_ring->ps_page_dma[i]; 4613 } 4614 4615no_buffers: 4616 if (likely(rx_ring->next_to_use != i)) { 4617 rx_ring->next_to_use = i; 4618 if (unlikely(i-- == 0)) i = (rx_ring->count - 1); 4619 4620 /* Force memory writes to complete before letting h/w 4621 * know there are new descriptors to fetch. (Only 4622 * applicable for weak-ordered memory model archs, 4623 * such as IA-64). */ 4624 wmb(); 4625 /* Hardware increments by 16 bytes, but packet split 4626 * descriptors are 32 bytes...so we increment tail 4627 * twice as much. 4628 */ 4629 writel(i<<1, adapter->hw.hw_addr + rx_ring->rdt); 4630 } 4631} 4632 4633 4634static void 4635e1000_smartspeed(struct e1000_adapter *adapter) 4636{ 4637 uint16_t phy_status; 4638 uint16_t phy_ctrl; 4639 4640 if ((adapter->hw.phy_type != e1000_phy_igp) || !adapter->hw.autoneg || 4641 !(adapter->hw.autoneg_advertised & ADVERTISE_1000_FULL)) 4642 return; 4643 4644 if (adapter->smartspeed == 0) { 4645 /* If Master/Slave config fault is asserted twice, 4646 * we assume back-to-back */ 4647 e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_status); 4648 if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return; 4649 e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_status); 4650 if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return; 4651 e1000_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_ctrl); 4652 if (phy_ctrl & CR_1000T_MS_ENABLE) { 4653 phy_ctrl &= ~CR_1000T_MS_ENABLE; 4654 e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, 4655 phy_ctrl); 4656 adapter->smartspeed++; 4657 if (!e1000_phy_setup_autoneg(&adapter->hw) && 4658 !e1000_read_phy_reg(&adapter->hw, PHY_CTRL, 4659 &phy_ctrl)) { 4660 phy_ctrl |= (MII_CR_AUTO_NEG_EN | 4661 MII_CR_RESTART_AUTO_NEG); 4662 e1000_write_phy_reg(&adapter->hw, PHY_CTRL, 4663 phy_ctrl); 4664 } 4665 } 4666 return; 4667 } else if (adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) { 4668 /* If still no link, perhaps using 2/3 pair cable */ 4669 e1000_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_ctrl); 4670 phy_ctrl |= CR_1000T_MS_ENABLE; 4671 e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, phy_ctrl); 4672 if (!e1000_phy_setup_autoneg(&adapter->hw) && 4673 !e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &phy_ctrl)) { 4674 phy_ctrl |= (MII_CR_AUTO_NEG_EN | 4675 MII_CR_RESTART_AUTO_NEG); 4676 e1000_write_phy_reg(&adapter->hw, PHY_CTRL, phy_ctrl); 4677 } 4678 } 4679 /* Restart process after E1000_SMARTSPEED_MAX iterations */ 4680 if (adapter->smartspeed++ == E1000_SMARTSPEED_MAX) 4681 adapter->smartspeed = 0; 4682} 4683 4684/** 4685 * e1000_ioctl - 4686 * @netdev: 4687 * @ifreq: 4688 * @cmd: 4689 **/ 4690 4691static int 4692e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) 4693{ 4694 switch (cmd) { 4695 case SIOCGMIIPHY: 4696 case SIOCGMIIREG: 4697 case SIOCSMIIREG: 4698 return e1000_mii_ioctl(netdev, ifr, cmd); 4699 default: 4700 return -EOPNOTSUPP; 4701 } 4702} 4703 4704/** 4705 * e1000_mii_ioctl - 4706 * @netdev: 4707 * @ifreq: 4708 * @cmd: 4709 **/ 4710 4711static int 4712e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) 4713{ 4714 struct e1000_adapter *adapter = netdev_priv(netdev); 4715 struct mii_ioctl_data *data = if_mii(ifr); 4716 int retval; 4717 uint16_t mii_reg; 4718 uint16_t spddplx; 4719 unsigned long flags; 4720 4721 if (adapter->hw.media_type != e1000_media_type_copper) 4722 return -EOPNOTSUPP; 4723 4724 switch (cmd) { 4725 case SIOCGMIIPHY: 4726 data->phy_id = adapter->hw.phy_addr; 4727 break; 4728 case SIOCGMIIREG: 4729 if (!capable(CAP_NET_ADMIN)) 4730 return -EPERM; 4731 spin_lock_irqsave(&adapter->stats_lock, flags); 4732 if (e1000_read_phy_reg(&adapter->hw, data->reg_num & 0x1F, 4733 &data->val_out)) { 4734 spin_unlock_irqrestore(&adapter->stats_lock, flags); 4735 return -EIO; 4736 } 4737 spin_unlock_irqrestore(&adapter->stats_lock, flags); 4738 break; 4739 case SIOCSMIIREG: 4740 if (!capable(CAP_NET_ADMIN)) 4741 return -EPERM; 4742 if (data->reg_num & ~(0x1F)) 4743 return -EFAULT; 4744 mii_reg = data->val_in; 4745 spin_lock_irqsave(&adapter->stats_lock, flags); 4746 if (e1000_write_phy_reg(&adapter->hw, data->reg_num, 4747 mii_reg)) { 4748 spin_unlock_irqrestore(&adapter->stats_lock, flags); 4749 return -EIO; 4750 } 4751 if (adapter->hw.media_type == e1000_media_type_copper) { 4752 switch (data->reg_num) { 4753 case PHY_CTRL: 4754 if (mii_reg & MII_CR_POWER_DOWN) 4755 break; 4756 if (mii_reg & MII_CR_AUTO_NEG_EN) { 4757 adapter->hw.autoneg = 1; 4758 adapter->hw.autoneg_advertised = 0x2F; 4759 } else { 4760 if (mii_reg & 0x40) 4761 spddplx = SPEED_1000; 4762 else if (mii_reg & 0x2000) 4763 spddplx = SPEED_100; 4764 else 4765 spddplx = SPEED_10; 4766 spddplx += (mii_reg & 0x100) 4767 ? DUPLEX_FULL : 4768 DUPLEX_HALF; 4769 retval = e1000_set_spd_dplx(adapter, 4770 spddplx); 4771 if (retval) { 4772 spin_unlock_irqrestore( 4773 &adapter->stats_lock, 4774 flags); 4775 return retval; 4776 } 4777 } 4778 if (netif_running(adapter->netdev)) 4779 e1000_reinit_locked(adapter); 4780 else 4781 e1000_reset(adapter); 4782 break; 4783 case M88E1000_PHY_SPEC_CTRL: 4784 case M88E1000_EXT_PHY_SPEC_CTRL: 4785 if (e1000_phy_reset(&adapter->hw)) { 4786 spin_unlock_irqrestore( 4787 &adapter->stats_lock, flags); 4788 return -EIO; 4789 } 4790 break; 4791 } 4792 } else { 4793 switch (data->reg_num) { 4794 case PHY_CTRL: 4795 if (mii_reg & MII_CR_POWER_DOWN) 4796 break; 4797 if (netif_running(adapter->netdev)) 4798 e1000_reinit_locked(adapter); 4799 else 4800 e1000_reset(adapter); 4801 break; 4802 } 4803 } 4804 spin_unlock_irqrestore(&adapter->stats_lock, flags); 4805 break; 4806 default: 4807 return -EOPNOTSUPP; 4808 } 4809 return E1000_SUCCESS; 4810} 4811 4812void 4813e1000_pci_set_mwi(struct e1000_hw *hw) 4814{ 4815 struct e1000_adapter *adapter = hw->back; 4816 int ret_val = pci_set_mwi(adapter->pdev); 4817 4818 if (ret_val) 4819 DPRINTK(PROBE, ERR, "Error in setting MWI\n"); 4820} 4821 4822void 4823e1000_pci_clear_mwi(struct e1000_hw *hw) 4824{ 4825 struct e1000_adapter *adapter = hw->back; 4826 4827 pci_clear_mwi(adapter->pdev); 4828} 4829 4830void 4831e1000_read_pci_cfg(struct e1000_hw *hw, uint32_t reg, uint16_t *value) 4832{ 4833 struct e1000_adapter *adapter = hw->back; 4834 4835 pci_read_config_word(adapter->pdev, reg, value); 4836} 4837 4838void 4839e1000_write_pci_cfg(struct e1000_hw *hw, uint32_t reg, uint16_t *value) 4840{ 4841 struct e1000_adapter *adapter = hw->back; 4842 4843 pci_write_config_word(adapter->pdev, reg, *value); 4844} 4845 4846int32_t 4847e1000_read_pcie_cap_reg(struct e1000_hw *hw, uint32_t reg, uint16_t *value) 4848{ 4849 struct e1000_adapter *adapter = hw->back; 4850 uint16_t cap_offset; 4851 4852 cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP); 4853 if (!cap_offset) 4854 return -E1000_ERR_CONFIG; 4855 4856 pci_read_config_word(adapter->pdev, cap_offset + reg, value); 4857 4858 return E1000_SUCCESS; 4859} 4860 4861void 4862e1000_io_write(struct e1000_hw *hw, unsigned long port, uint32_t value) 4863{ 4864 outl(value, port); 4865} 4866 4867static void 4868e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp) 4869{ 4870 struct e1000_adapter *adapter = netdev_priv(netdev); 4871 uint32_t ctrl, rctl; 4872 4873 e1000_irq_disable(adapter); 4874 adapter->vlgrp = grp; 4875 4876 if (grp) { 4877 /* enable VLAN tag insert/strip */ 4878 ctrl = E1000_READ_REG(&adapter->hw, CTRL); 4879 ctrl |= E1000_CTRL_VME; 4880 E1000_WRITE_REG(&adapter->hw, CTRL, ctrl); 4881 4882 if (adapter->hw.mac_type != e1000_ich8lan) { 4883 /* enable VLAN receive filtering */ 4884 rctl = E1000_READ_REG(&adapter->hw, RCTL); 4885 rctl |= E1000_RCTL_VFE; 4886 rctl &= ~E1000_RCTL_CFIEN; 4887 E1000_WRITE_REG(&adapter->hw, RCTL, rctl); 4888 e1000_update_mng_vlan(adapter); 4889 } 4890 } else { 4891 /* disable VLAN tag insert/strip */ 4892 ctrl = E1000_READ_REG(&adapter->hw, CTRL); 4893 ctrl &= ~E1000_CTRL_VME; 4894 E1000_WRITE_REG(&adapter->hw, CTRL, ctrl); 4895 4896 if (adapter->hw.mac_type != e1000_ich8lan) { 4897 /* disable VLAN filtering */ 4898 rctl = E1000_READ_REG(&adapter->hw, RCTL); 4899 rctl &= ~E1000_RCTL_VFE; 4900 E1000_WRITE_REG(&adapter->hw, RCTL, rctl); 4901 if (adapter->mng_vlan_id != 4902 (uint16_t)E1000_MNG_VLAN_NONE) { 4903 e1000_vlan_rx_kill_vid(netdev, 4904 adapter->mng_vlan_id); 4905 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; 4906 } 4907 } 4908 } 4909 4910 e1000_irq_enable(adapter); 4911} 4912 4913static void 4914e1000_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid) 4915{ 4916 struct e1000_adapter *adapter = netdev_priv(netdev); 4917 uint32_t vfta, index; 4918 4919 if ((adapter->hw.mng_cookie.status & 4920 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) && 4921 (vid == adapter->mng_vlan_id)) 4922 return; 4923 /* add VID to filter table */ 4924 index = (vid >> 5) & 0x7F; 4925 vfta = E1000_READ_REG_ARRAY(&adapter->hw, VFTA, index); 4926 vfta |= (1 << (vid & 0x1F)); 4927 e1000_write_vfta(&adapter->hw, index, vfta); 4928} 4929 4930static void 4931e1000_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid) 4932{ 4933 struct e1000_adapter *adapter = netdev_priv(netdev); 4934 uint32_t vfta, index; 4935 4936 e1000_irq_disable(adapter); 4937 vlan_group_set_device(adapter->vlgrp, vid, NULL); 4938 e1000_irq_enable(adapter); 4939 4940 if ((adapter->hw.mng_cookie.status & 4941 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) && 4942 (vid == adapter->mng_vlan_id)) { 4943 /* release control to f/w */ 4944 e1000_release_hw_control(adapter); 4945 return; 4946 } 4947 4948 /* remove VID from filter table */ 4949 index = (vid >> 5) & 0x7F; 4950 vfta = E1000_READ_REG_ARRAY(&adapter->hw, VFTA, index); 4951 vfta &= ~(1 << (vid & 0x1F)); 4952 e1000_write_vfta(&adapter->hw, index, vfta); 4953} 4954 4955static void 4956e1000_restore_vlan(struct e1000_adapter *adapter) 4957{ 4958 e1000_vlan_rx_register(adapter->netdev, adapter->vlgrp); 4959 4960 if (adapter->vlgrp) { 4961 uint16_t vid; 4962 for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) { 4963 if (!vlan_group_get_device(adapter->vlgrp, vid)) 4964 continue; 4965 e1000_vlan_rx_add_vid(adapter->netdev, vid); 4966 } 4967 } 4968} 4969 4970int 4971e1000_set_spd_dplx(struct e1000_adapter *adapter, uint16_t spddplx) 4972{ 4973 adapter->hw.autoneg = 0; 4974 4975 /* Fiber NICs only allow 1000 gbps Full duplex */ 4976 if ((adapter->hw.media_type == e1000_media_type_fiber) && 4977 spddplx != (SPEED_1000 + DUPLEX_FULL)) { 4978 DPRINTK(PROBE, ERR, "Unsupported Speed/Duplex configuration\n"); 4979 return -EINVAL; 4980 } 4981 4982 switch (spddplx) { 4983 case SPEED_10 + DUPLEX_HALF: 4984 adapter->hw.forced_speed_duplex = e1000_10_half; 4985 break; 4986 case SPEED_10 + DUPLEX_FULL: 4987 adapter->hw.forced_speed_duplex = e1000_10_full; 4988 break; 4989 case SPEED_100 + DUPLEX_HALF: 4990 adapter->hw.forced_speed_duplex = e1000_100_half; 4991 break; 4992 case SPEED_100 + DUPLEX_FULL: 4993 adapter->hw.forced_speed_duplex = e1000_100_full; 4994 break; 4995 case SPEED_1000 + DUPLEX_FULL: 4996 adapter->hw.autoneg = 1; 4997 adapter->hw.autoneg_advertised = ADVERTISE_1000_FULL; 4998 break; 4999 case SPEED_1000 + DUPLEX_HALF: /* not supported */ 5000 default: 5001 DPRINTK(PROBE, ERR, "Unsupported Speed/Duplex configuration\n"); 5002 return -EINVAL; 5003 } 5004 return 0; 5005} 5006 5007static int 5008e1000_suspend(struct pci_dev *pdev, pm_message_t state) 5009{ 5010 struct net_device *netdev = pci_get_drvdata(pdev); 5011 struct e1000_adapter *adapter = netdev_priv(netdev); 5012 uint32_t ctrl, ctrl_ext, rctl, status; 5013 uint32_t wufc = adapter->wol; 5014#ifdef CONFIG_PM 5015 int retval = 0; 5016#endif 5017 5018 netif_device_detach(netdev); 5019 5020 if (netif_running(netdev)) { 5021 WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags)); 5022 e1000_down(adapter); 5023 } 5024 5025#ifdef CONFIG_PM 5026 retval = pci_save_state(pdev); 5027 if (retval) 5028 return retval; 5029#endif 5030 5031 status = E1000_READ_REG(&adapter->hw, STATUS); 5032 if (status & E1000_STATUS_LU) 5033 wufc &= ~E1000_WUFC_LNKC; 5034 5035 if (wufc) { 5036 e1000_setup_rctl(adapter); 5037 e1000_set_multi(netdev); 5038 5039 /* turn on all-multi mode if wake on multicast is enabled */ 5040 if (wufc & E1000_WUFC_MC) { 5041 rctl = E1000_READ_REG(&adapter->hw, RCTL); 5042 rctl |= E1000_RCTL_MPE; 5043 E1000_WRITE_REG(&adapter->hw, RCTL, rctl); 5044 } 5045 5046 if (adapter->hw.mac_type >= e1000_82540) { 5047 ctrl = E1000_READ_REG(&adapter->hw, CTRL); 5048 /* advertise wake from D3Cold */ 5049 #define E1000_CTRL_ADVD3WUC 0x00100000 5050 /* phy power management enable */ 5051 #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000 5052 ctrl |= E1000_CTRL_ADVD3WUC | 5053 E1000_CTRL_EN_PHY_PWR_MGMT; 5054 E1000_WRITE_REG(&adapter->hw, CTRL, ctrl); 5055 } 5056 5057 if (adapter->hw.media_type == e1000_media_type_fiber || 5058 adapter->hw.media_type == e1000_media_type_internal_serdes) { 5059 /* keep the laser running in D3 */ 5060 ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT); 5061 ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA; 5062 E1000_WRITE_REG(&adapter->hw, CTRL_EXT, ctrl_ext); 5063 } 5064 5065 /* Allow time for pending master requests to run */ 5066 e1000_disable_pciex_master(&adapter->hw); 5067 5068 E1000_WRITE_REG(&adapter->hw, WUC, E1000_WUC_PME_EN); 5069 E1000_WRITE_REG(&adapter->hw, WUFC, wufc); 5070 pci_enable_wake(pdev, PCI_D3hot, 1); 5071 pci_enable_wake(pdev, PCI_D3cold, 1); 5072 } else { 5073 E1000_WRITE_REG(&adapter->hw, WUC, 0); 5074 E1000_WRITE_REG(&adapter->hw, WUFC, 0); 5075 pci_enable_wake(pdev, PCI_D3hot, 0); 5076 pci_enable_wake(pdev, PCI_D3cold, 0); 5077 } 5078 5079 e1000_release_manageability(adapter); 5080 5081 /* make sure adapter isn't asleep if manageability is enabled */ 5082 if (adapter->en_mng_pt) { 5083 pci_enable_wake(pdev, PCI_D3hot, 1); 5084 pci_enable_wake(pdev, PCI_D3cold, 1); 5085 } 5086 5087 if (adapter->hw.phy_type == e1000_phy_igp_3) 5088 e1000_phy_powerdown_workaround(&adapter->hw); 5089 5090 if (netif_running(netdev)) 5091 e1000_free_irq(adapter); 5092 5093 /* Release control of h/w to f/w. If f/w is AMT enabled, this 5094 * would have already happened in close and is redundant. */ 5095 e1000_release_hw_control(adapter); 5096 5097 pci_disable_device(pdev); 5098 5099 pci_set_power_state(pdev, pci_choose_state(pdev, state)); 5100 5101 return 0; 5102} 5103 5104#ifdef CONFIG_PM 5105static int 5106e1000_resume(struct pci_dev *pdev) 5107{ 5108 struct net_device *netdev = pci_get_drvdata(pdev); 5109 struct e1000_adapter *adapter = netdev_priv(netdev); 5110 uint32_t err; 5111 5112 pci_set_power_state(pdev, PCI_D0); 5113 pci_restore_state(pdev); 5114 if ((err = pci_enable_device(pdev))) { 5115 printk(KERN_ERR "e1000: Cannot enable PCI device from suspend\n"); 5116 return err; 5117 } 5118 pci_set_master(pdev); 5119 5120 pci_enable_wake(pdev, PCI_D3hot, 0); 5121 pci_enable_wake(pdev, PCI_D3cold, 0); 5122 5123 if (netif_running(netdev) && (err = e1000_request_irq(adapter))) 5124 return err; 5125 5126 e1000_power_up_phy(adapter); 5127 e1000_reset(adapter); 5128 E1000_WRITE_REG(&adapter->hw, WUS, ~0); 5129 5130 e1000_init_manageability(adapter); 5131 5132 if (netif_running(netdev)) 5133 e1000_up(adapter); 5134 5135 netif_device_attach(netdev); 5136 5137 /* If the controller is 82573 and f/w is AMT, do not set 5138 * DRV_LOAD until the interface is up. For all other cases, 5139 * let the f/w know that the h/w is now under the control 5140 * of the driver. */ 5141 if (adapter->hw.mac_type != e1000_82573 || 5142 !e1000_check_mng_mode(&adapter->hw)) 5143 e1000_get_hw_control(adapter); 5144 5145 return 0; 5146} 5147#endif 5148 5149static void e1000_shutdown(struct pci_dev *pdev) 5150{ 5151 e1000_suspend(pdev, PMSG_SUSPEND); 5152} 5153 5154#ifdef CONFIG_NET_POLL_CONTROLLER 5155/* 5156 * Polling 'interrupt' - used by things like netconsole to send skbs 5157 * without having to re-enable interrupts. It's not called while 5158 * the interrupt routine is executing. 5159 */ 5160static void 5161e1000_netpoll(struct net_device *netdev) 5162{ 5163 struct e1000_adapter *adapter = netdev_priv(netdev); 5164 5165 disable_irq(adapter->pdev->irq); 5166 e1000_intr(adapter->pdev->irq, netdev); 5167 e1000_clean_tx_irq(adapter, adapter->tx_ring); 5168#ifndef CONFIG_E1000_NAPI 5169 adapter->clean_rx(adapter, adapter->rx_ring); 5170#endif 5171 enable_irq(adapter->pdev->irq); 5172} 5173#endif 5174 5175/** 5176 * e1000_io_error_detected - called when PCI error is detected 5177 * @pdev: Pointer to PCI device 5178 * @state: The current pci conneection state 5179 * 5180 * This function is called after a PCI bus error affecting 5181 * this device has been detected. 5182 */ 5183static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state) 5184{ 5185 struct net_device *netdev = pci_get_drvdata(pdev); 5186 struct e1000_adapter *adapter = netdev->priv; 5187 5188 netif_device_detach(netdev); 5189 5190 if (netif_running(netdev)) 5191 e1000_down(adapter); 5192 pci_disable_device(pdev); 5193 5194 /* Request a slot slot reset. */ 5195 return PCI_ERS_RESULT_NEED_RESET; 5196} 5197 5198/** 5199 * e1000_io_slot_reset - called after the pci bus has been reset. 5200 * @pdev: Pointer to PCI device 5201 * 5202 * Restart the card from scratch, as if from a cold-boot. Implementation 5203 * resembles the first-half of the e1000_resume routine. 5204 */ 5205static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev) 5206{ 5207 struct net_device *netdev = pci_get_drvdata(pdev); 5208 struct e1000_adapter *adapter = netdev->priv; 5209 5210 if (pci_enable_device(pdev)) { 5211 printk(KERN_ERR "e1000: Cannot re-enable PCI device after reset.\n"); 5212 return PCI_ERS_RESULT_DISCONNECT; 5213 } 5214 pci_set_master(pdev); 5215 5216 pci_enable_wake(pdev, PCI_D3hot, 0); 5217 pci_enable_wake(pdev, PCI_D3cold, 0); 5218 5219 e1000_reset(adapter); 5220 E1000_WRITE_REG(&adapter->hw, WUS, ~0); 5221 5222 return PCI_ERS_RESULT_RECOVERED; 5223} 5224 5225/** 5226 * e1000_io_resume - called when traffic can start flowing again. 5227 * @pdev: Pointer to PCI device 5228 * 5229 * This callback is called when the error recovery driver tells us that 5230 * its OK to resume normal operation. Implementation resembles the 5231 * second-half of the e1000_resume routine. 5232 */ 5233static void e1000_io_resume(struct pci_dev *pdev) 5234{ 5235 struct net_device *netdev = pci_get_drvdata(pdev); 5236 struct e1000_adapter *adapter = netdev->priv; 5237 5238 e1000_init_manageability(adapter); 5239 5240 if (netif_running(netdev)) { 5241 if (e1000_up(adapter)) { 5242 printk("e1000: can't bring device back up after reset\n"); 5243 return; 5244 } 5245 } 5246 5247 netif_device_attach(netdev); 5248 5249 /* If the controller is 82573 and f/w is AMT, do not set 5250 * DRV_LOAD until the interface is up. For all other cases, 5251 * let the f/w know that the h/w is now under the control 5252 * of the driver. */ 5253 if (adapter->hw.mac_type != e1000_82573 || 5254 !e1000_check_mng_mode(&adapter->hw)) 5255 e1000_get_hw_control(adapter); 5256 5257} 5258 5259/* e1000_main.c */ 5260