1/******************************************************************************* 2 3 Intel PRO/10GbE Linux driver 4 Copyright(c) 1999 - 2006 Intel Corporation. 5 6 This program is free software; you can redistribute it and/or modify it 7 under the terms and conditions of the GNU General Public License, 8 version 2, as published by the Free Software Foundation. 9 10 This program is distributed in the hope it will be useful, but WITHOUT 11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 more details. 14 15 You should have received a copy of the GNU General Public License along with 16 this program; if not, write to the Free Software Foundation, Inc., 17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 18 19 The full GNU General Public License is included in this distribution in 20 the file called "COPYING". 21 22 Contact Information: 23 Linux NICS <linux.nics@intel.com> 24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 26 27*******************************************************************************/ 28 29#include "ixgb.h" 30 31char ixgb_driver_name[] = "ixgb"; 32static char ixgb_driver_string[] = "Intel(R) PRO/10GbE Network Driver"; 33 34#ifndef CONFIG_IXGB_NAPI 35#define DRIVERNAPI 36#else 37#define DRIVERNAPI "-NAPI" 38#endif 39#define DRV_VERSION "1.0.126-k2"DRIVERNAPI 40char ixgb_driver_version[] = DRV_VERSION; 41static char ixgb_copyright[] = "Copyright (c) 1999-2006 Intel Corporation."; 42 43/* ixgb_pci_tbl - PCI Device ID Table 44 * 45 * Wildcard entries (PCI_ANY_ID) should come last 46 * Last entry must be all 0s 47 * 48 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, 49 * Class, Class Mask, private data (not used) } 50 */ 51static struct pci_device_id ixgb_pci_tbl[] = { 52 {INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX, 53 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, 54 {INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX_CX4, 55 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, 56 {INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX_SR, 57 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, 58 {INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX_LR, 59 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, 60 61 /* required last entry */ 62 {0,} 63}; 64 65MODULE_DEVICE_TABLE(pci, ixgb_pci_tbl); 66 67/* Local Function Prototypes */ 68 69int ixgb_up(struct ixgb_adapter *adapter); 70void ixgb_down(struct ixgb_adapter *adapter, boolean_t kill_watchdog); 71void ixgb_reset(struct ixgb_adapter *adapter); 72int ixgb_setup_tx_resources(struct ixgb_adapter *adapter); 73int ixgb_setup_rx_resources(struct ixgb_adapter *adapter); 74void ixgb_free_tx_resources(struct ixgb_adapter *adapter); 75void ixgb_free_rx_resources(struct ixgb_adapter *adapter); 76void ixgb_update_stats(struct ixgb_adapter *adapter); 77 78static int ixgb_init_module(void); 79static void ixgb_exit_module(void); 80static int ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent); 81static void __devexit ixgb_remove(struct pci_dev *pdev); 82static int ixgb_sw_init(struct ixgb_adapter *adapter); 83static int ixgb_open(struct net_device *netdev); 84static int ixgb_close(struct net_device *netdev); 85static void ixgb_configure_tx(struct ixgb_adapter *adapter); 86static void ixgb_configure_rx(struct ixgb_adapter *adapter); 87static void ixgb_setup_rctl(struct ixgb_adapter *adapter); 88static void ixgb_clean_tx_ring(struct ixgb_adapter *adapter); 89static void ixgb_clean_rx_ring(struct ixgb_adapter *adapter); 90static void ixgb_set_multi(struct net_device *netdev); 91static void ixgb_watchdog(unsigned long data); 92static int ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev); 93static struct net_device_stats *ixgb_get_stats(struct net_device *netdev); 94static int ixgb_change_mtu(struct net_device *netdev, int new_mtu); 95static int ixgb_set_mac(struct net_device *netdev, void *p); 96static irqreturn_t ixgb_intr(int irq, void *data); 97static boolean_t ixgb_clean_tx_irq(struct ixgb_adapter *adapter); 98 99#ifdef CONFIG_IXGB_NAPI 100static int ixgb_clean(struct net_device *netdev, int *budget); 101static boolean_t ixgb_clean_rx_irq(struct ixgb_adapter *adapter, 102 int *work_done, int work_to_do); 103#else 104static boolean_t ixgb_clean_rx_irq(struct ixgb_adapter *adapter); 105#endif 106static void ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter); 107void ixgb_set_ethtool_ops(struct net_device *netdev); 108static void ixgb_tx_timeout(struct net_device *dev); 109static void ixgb_tx_timeout_task(struct work_struct *work); 110static void ixgb_vlan_rx_register(struct net_device *netdev, 111 struct vlan_group *grp); 112static void ixgb_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid); 113static void ixgb_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid); 114static void ixgb_restore_vlan(struct ixgb_adapter *adapter); 115 116#ifdef CONFIG_NET_POLL_CONTROLLER 117/* for netdump / net console */ 118static void ixgb_netpoll(struct net_device *dev); 119#endif 120 121static pci_ers_result_t ixgb_io_error_detected (struct pci_dev *pdev, 122 enum pci_channel_state state); 123static pci_ers_result_t ixgb_io_slot_reset (struct pci_dev *pdev); 124static void ixgb_io_resume (struct pci_dev *pdev); 125 126/* Exported from other modules */ 127extern void ixgb_check_options(struct ixgb_adapter *adapter); 128 129static struct pci_error_handlers ixgb_err_handler = { 130 .error_detected = ixgb_io_error_detected, 131 .slot_reset = ixgb_io_slot_reset, 132 .resume = ixgb_io_resume, 133}; 134 135static struct pci_driver ixgb_driver = { 136 .name = ixgb_driver_name, 137 .id_table = ixgb_pci_tbl, 138 .probe = ixgb_probe, 139 .remove = __devexit_p(ixgb_remove), 140 .err_handler = &ixgb_err_handler 141}; 142 143MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); 144MODULE_DESCRIPTION("Intel(R) PRO/10GbE Network Driver"); 145MODULE_LICENSE("GPL"); 146MODULE_VERSION(DRV_VERSION); 147 148#define DEFAULT_DEBUG_LEVEL_SHIFT 3 149static int debug = DEFAULT_DEBUG_LEVEL_SHIFT; 150module_param(debug, int, 0); 151MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); 152 153/* some defines for controlling descriptor fetches in h/w */ 154#define RXDCTL_WTHRESH_DEFAULT 15 /* chip writes back at this many or RXT0 */ 155#define RXDCTL_PTHRESH_DEFAULT 0 /* chip considers prefech below 156 * this */ 157#define RXDCTL_HTHRESH_DEFAULT 0 /* chip will only prefetch if tail 158 * is pushed this many descriptors 159 * from head */ 160 161/** 162 * ixgb_init_module - Driver Registration Routine 163 * 164 * ixgb_init_module is the first routine called when the driver is 165 * loaded. All it does is register with the PCI subsystem. 166 **/ 167 168static int __init 169ixgb_init_module(void) 170{ 171 printk(KERN_INFO "%s - version %s\n", 172 ixgb_driver_string, ixgb_driver_version); 173 174 printk(KERN_INFO "%s\n", ixgb_copyright); 175 176 return pci_register_driver(&ixgb_driver); 177} 178 179module_init(ixgb_init_module); 180 181/** 182 * ixgb_exit_module - Driver Exit Cleanup Routine 183 * 184 * ixgb_exit_module is called just before the driver is removed 185 * from memory. 186 **/ 187 188static void __exit 189ixgb_exit_module(void) 190{ 191 pci_unregister_driver(&ixgb_driver); 192} 193 194module_exit(ixgb_exit_module); 195 196/** 197 * ixgb_irq_disable - Mask off interrupt generation on the NIC 198 * @adapter: board private structure 199 **/ 200 201static void 202ixgb_irq_disable(struct ixgb_adapter *adapter) 203{ 204 atomic_inc(&adapter->irq_sem); 205 IXGB_WRITE_REG(&adapter->hw, IMC, ~0); 206 IXGB_WRITE_FLUSH(&adapter->hw); 207 synchronize_irq(adapter->pdev->irq); 208} 209 210/** 211 * ixgb_irq_enable - Enable default interrupt generation settings 212 * @adapter: board private structure 213 **/ 214 215static void 216ixgb_irq_enable(struct ixgb_adapter *adapter) 217{ 218 if(atomic_dec_and_test(&adapter->irq_sem)) { 219 IXGB_WRITE_REG(&adapter->hw, IMS, 220 IXGB_INT_RXT0 | IXGB_INT_RXDMT0 | IXGB_INT_TXDW | 221 IXGB_INT_LSC); 222 IXGB_WRITE_FLUSH(&adapter->hw); 223 } 224} 225 226int 227ixgb_up(struct ixgb_adapter *adapter) 228{ 229 struct net_device *netdev = adapter->netdev; 230 int err, irq_flags = IRQF_SHARED; 231 int max_frame = netdev->mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH; 232 struct ixgb_hw *hw = &adapter->hw; 233 234 /* hardware has been reset, we need to reload some things */ 235 236 ixgb_rar_set(hw, netdev->dev_addr, 0); 237 ixgb_set_multi(netdev); 238 239 ixgb_restore_vlan(adapter); 240 241 ixgb_configure_tx(adapter); 242 ixgb_setup_rctl(adapter); 243 ixgb_configure_rx(adapter); 244 ixgb_alloc_rx_buffers(adapter); 245 246 /* disable interrupts and get the hardware into a known state */ 247 IXGB_WRITE_REG(&adapter->hw, IMC, 0xffffffff); 248 249 /* only enable MSI if bus is in PCI-X mode */ 250 if (IXGB_READ_REG(&adapter->hw, STATUS) & IXGB_STATUS_PCIX_MODE) { 251 err = pci_enable_msi(adapter->pdev); 252 if (!err) { 253 adapter->have_msi = 1; 254 irq_flags = 0; 255 } 256 /* proceed to try to request regular interrupt */ 257 } 258 259 err = request_irq(adapter->pdev->irq, &ixgb_intr, irq_flags, 260 netdev->name, netdev); 261 if (err) { 262 if (adapter->have_msi) 263 pci_disable_msi(adapter->pdev); 264 DPRINTK(PROBE, ERR, 265 "Unable to allocate interrupt Error: %d\n", err); 266 return err; 267 } 268 269 if((hw->max_frame_size != max_frame) || 270 (hw->max_frame_size != 271 (IXGB_READ_REG(hw, MFS) >> IXGB_MFS_SHIFT))) { 272 273 hw->max_frame_size = max_frame; 274 275 IXGB_WRITE_REG(hw, MFS, hw->max_frame_size << IXGB_MFS_SHIFT); 276 277 if(hw->max_frame_size > 278 IXGB_MAX_ENET_FRAME_SIZE_WITHOUT_FCS + ENET_FCS_LENGTH) { 279 uint32_t ctrl0 = IXGB_READ_REG(hw, CTRL0); 280 281 if(!(ctrl0 & IXGB_CTRL0_JFE)) { 282 ctrl0 |= IXGB_CTRL0_JFE; 283 IXGB_WRITE_REG(hw, CTRL0, ctrl0); 284 } 285 } 286 } 287 288 mod_timer(&adapter->watchdog_timer, jiffies); 289 290#ifdef CONFIG_IXGB_NAPI 291 netif_poll_enable(netdev); 292#endif 293 ixgb_irq_enable(adapter); 294 295 return 0; 296} 297 298void 299ixgb_down(struct ixgb_adapter *adapter, boolean_t kill_watchdog) 300{ 301 struct net_device *netdev = adapter->netdev; 302 303 ixgb_irq_disable(adapter); 304 free_irq(adapter->pdev->irq, netdev); 305 306 if (adapter->have_msi) 307 pci_disable_msi(adapter->pdev); 308 309 if(kill_watchdog) 310 del_timer_sync(&adapter->watchdog_timer); 311#ifdef CONFIG_IXGB_NAPI 312 netif_poll_disable(netdev); 313#endif 314 adapter->link_speed = 0; 315 adapter->link_duplex = 0; 316 netif_carrier_off(netdev); 317 netif_stop_queue(netdev); 318 319 ixgb_reset(adapter); 320 ixgb_clean_tx_ring(adapter); 321 ixgb_clean_rx_ring(adapter); 322} 323 324void 325ixgb_reset(struct ixgb_adapter *adapter) 326{ 327 328 ixgb_adapter_stop(&adapter->hw); 329 if(!ixgb_init_hw(&adapter->hw)) 330 DPRINTK(PROBE, ERR, "ixgb_init_hw failed.\n"); 331} 332 333/** 334 * ixgb_probe - Device Initialization Routine 335 * @pdev: PCI device information struct 336 * @ent: entry in ixgb_pci_tbl 337 * 338 * Returns 0 on success, negative on failure 339 * 340 * ixgb_probe initializes an adapter identified by a pci_dev structure. 341 * The OS initialization, configuring of the adapter private structure, 342 * and a hardware reset occur. 343 **/ 344 345static int __devinit 346ixgb_probe(struct pci_dev *pdev, 347 const struct pci_device_id *ent) 348{ 349 struct net_device *netdev = NULL; 350 struct ixgb_adapter *adapter; 351 static int cards_found = 0; 352 unsigned long mmio_start; 353 int mmio_len; 354 int pci_using_dac; 355 int i; 356 int err; 357 358 if((err = pci_enable_device(pdev))) 359 return err; 360 361 if(!(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK)) && 362 !(err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK))) { 363 pci_using_dac = 1; 364 } else { 365 if((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) || 366 (err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK))) { 367 printk(KERN_ERR 368 "ixgb: No usable DMA configuration, aborting\n"); 369 goto err_dma_mask; 370 } 371 pci_using_dac = 0; 372 } 373 374 if((err = pci_request_regions(pdev, ixgb_driver_name))) 375 goto err_request_regions; 376 377 pci_set_master(pdev); 378 379 netdev = alloc_etherdev(sizeof(struct ixgb_adapter)); 380 if(!netdev) { 381 err = -ENOMEM; 382 goto err_alloc_etherdev; 383 } 384 385 SET_MODULE_OWNER(netdev); 386 SET_NETDEV_DEV(netdev, &pdev->dev); 387 388 pci_set_drvdata(pdev, netdev); 389 adapter = netdev_priv(netdev); 390 adapter->netdev = netdev; 391 adapter->pdev = pdev; 392 adapter->hw.back = adapter; 393 adapter->msg_enable = netif_msg_init(debug, DEFAULT_DEBUG_LEVEL_SHIFT); 394 395 mmio_start = pci_resource_start(pdev, BAR_0); 396 mmio_len = pci_resource_len(pdev, BAR_0); 397 398 adapter->hw.hw_addr = ioremap(mmio_start, mmio_len); 399 if(!adapter->hw.hw_addr) { 400 err = -EIO; 401 goto err_ioremap; 402 } 403 404 for(i = BAR_1; i <= BAR_5; i++) { 405 if(pci_resource_len(pdev, i) == 0) 406 continue; 407 if(pci_resource_flags(pdev, i) & IORESOURCE_IO) { 408 adapter->hw.io_base = pci_resource_start(pdev, i); 409 break; 410 } 411 } 412 413 netdev->open = &ixgb_open; 414 netdev->stop = &ixgb_close; 415 netdev->hard_start_xmit = &ixgb_xmit_frame; 416 netdev->get_stats = &ixgb_get_stats; 417 netdev->set_multicast_list = &ixgb_set_multi; 418 netdev->set_mac_address = &ixgb_set_mac; 419 netdev->change_mtu = &ixgb_change_mtu; 420 ixgb_set_ethtool_ops(netdev); 421 netdev->tx_timeout = &ixgb_tx_timeout; 422 netdev->watchdog_timeo = 5 * HZ; 423#ifdef CONFIG_IXGB_NAPI 424 netdev->poll = &ixgb_clean; 425 netdev->weight = 64; 426#endif 427 netdev->vlan_rx_register = ixgb_vlan_rx_register; 428 netdev->vlan_rx_add_vid = ixgb_vlan_rx_add_vid; 429 netdev->vlan_rx_kill_vid = ixgb_vlan_rx_kill_vid; 430#ifdef CONFIG_NET_POLL_CONTROLLER 431 netdev->poll_controller = ixgb_netpoll; 432#endif 433 434 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); 435 netdev->mem_start = mmio_start; 436 netdev->mem_end = mmio_start + mmio_len; 437 netdev->base_addr = adapter->hw.io_base; 438 439 adapter->bd_number = cards_found; 440 adapter->link_speed = 0; 441 adapter->link_duplex = 0; 442 443 /* setup the private structure */ 444 445 if((err = ixgb_sw_init(adapter))) 446 goto err_sw_init; 447 448 netdev->features = NETIF_F_SG | 449 NETIF_F_HW_CSUM | 450 NETIF_F_HW_VLAN_TX | 451 NETIF_F_HW_VLAN_RX | 452 NETIF_F_HW_VLAN_FILTER; 453 netdev->features |= NETIF_F_TSO; 454#ifdef NETIF_F_LLTX 455 netdev->features |= NETIF_F_LLTX; 456#endif 457 458 if(pci_using_dac) 459 netdev->features |= NETIF_F_HIGHDMA; 460 461 /* make sure the EEPROM is good */ 462 463 if(!ixgb_validate_eeprom_checksum(&adapter->hw)) { 464 DPRINTK(PROBE, ERR, "The EEPROM Checksum Is Not Valid\n"); 465 err = -EIO; 466 goto err_eeprom; 467 } 468 469 ixgb_get_ee_mac_addr(&adapter->hw, netdev->dev_addr); 470 memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len); 471 472 if(!is_valid_ether_addr(netdev->perm_addr)) { 473 DPRINTK(PROBE, ERR, "Invalid MAC Address\n"); 474 err = -EIO; 475 goto err_eeprom; 476 } 477 478 adapter->part_num = ixgb_get_ee_pba_number(&adapter->hw); 479 480 init_timer(&adapter->watchdog_timer); 481 adapter->watchdog_timer.function = &ixgb_watchdog; 482 adapter->watchdog_timer.data = (unsigned long)adapter; 483 484 INIT_WORK(&adapter->tx_timeout_task, ixgb_tx_timeout_task); 485 486 strcpy(netdev->name, "eth%d"); 487 if((err = register_netdev(netdev))) 488 goto err_register; 489 490 /* we're going to reset, so assume we have no link for now */ 491 492 netif_carrier_off(netdev); 493 netif_stop_queue(netdev); 494 495 DPRINTK(PROBE, INFO, "Intel(R) PRO/10GbE Network Connection\n"); 496 ixgb_check_options(adapter); 497 /* reset the hardware with the new settings */ 498 499 ixgb_reset(adapter); 500 501 cards_found++; 502 return 0; 503 504err_register: 505err_sw_init: 506err_eeprom: 507 iounmap(adapter->hw.hw_addr); 508err_ioremap: 509 free_netdev(netdev); 510err_alloc_etherdev: 511 pci_release_regions(pdev); 512err_request_regions: 513err_dma_mask: 514 pci_disable_device(pdev); 515 return err; 516} 517 518/** 519 * ixgb_remove - Device Removal Routine 520 * @pdev: PCI device information struct 521 * 522 * ixgb_remove is called by the PCI subsystem to alert the driver 523 * that it should release a PCI device. The could be caused by a 524 * Hot-Plug event, or because the driver is going to be removed from 525 * memory. 526 **/ 527 528static void __devexit 529ixgb_remove(struct pci_dev *pdev) 530{ 531 struct net_device *netdev = pci_get_drvdata(pdev); 532 struct ixgb_adapter *adapter = netdev_priv(netdev); 533 534 unregister_netdev(netdev); 535 536 iounmap(adapter->hw.hw_addr); 537 pci_release_regions(pdev); 538 539 free_netdev(netdev); 540} 541 542/** 543 * ixgb_sw_init - Initialize general software structures (struct ixgb_adapter) 544 * @adapter: board private structure to initialize 545 * 546 * ixgb_sw_init initializes the Adapter private data structure. 547 * Fields are initialized based on PCI device information and 548 * OS network device settings (MTU size). 549 **/ 550 551static int __devinit 552ixgb_sw_init(struct ixgb_adapter *adapter) 553{ 554 struct ixgb_hw *hw = &adapter->hw; 555 struct net_device *netdev = adapter->netdev; 556 struct pci_dev *pdev = adapter->pdev; 557 558 /* PCI config space info */ 559 560 hw->vendor_id = pdev->vendor; 561 hw->device_id = pdev->device; 562 hw->subsystem_vendor_id = pdev->subsystem_vendor; 563 hw->subsystem_id = pdev->subsystem_device; 564 565 hw->max_frame_size = netdev->mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH; 566 adapter->rx_buffer_len = hw->max_frame_size; 567 568 if((hw->device_id == IXGB_DEVICE_ID_82597EX) 569 || (hw->device_id == IXGB_DEVICE_ID_82597EX_CX4) 570 || (hw->device_id == IXGB_DEVICE_ID_82597EX_LR) 571 || (hw->device_id == IXGB_DEVICE_ID_82597EX_SR)) 572 hw->mac_type = ixgb_82597; 573 else { 574 /* should never have loaded on this device */ 575 DPRINTK(PROBE, ERR, "unsupported device id\n"); 576 } 577 578 /* enable flow control to be programmed */ 579 hw->fc.send_xon = 1; 580 581 atomic_set(&adapter->irq_sem, 1); 582 spin_lock_init(&adapter->tx_lock); 583 584 return 0; 585} 586 587/** 588 * ixgb_open - Called when a network interface is made active 589 * @netdev: network interface device structure 590 * 591 * Returns 0 on success, negative value on failure 592 * 593 * The open entry point is called when a network interface is made 594 * active by the system (IFF_UP). At this point all resources needed 595 * for transmit and receive operations are allocated, the interrupt 596 * handler is registered with the OS, the watchdog timer is started, 597 * and the stack is notified that the interface is ready. 598 **/ 599 600static int 601ixgb_open(struct net_device *netdev) 602{ 603 struct ixgb_adapter *adapter = netdev_priv(netdev); 604 int err; 605 606 /* allocate transmit descriptors */ 607 608 if((err = ixgb_setup_tx_resources(adapter))) 609 goto err_setup_tx; 610 611 /* allocate receive descriptors */ 612 613 if((err = ixgb_setup_rx_resources(adapter))) 614 goto err_setup_rx; 615 616 if((err = ixgb_up(adapter))) 617 goto err_up; 618 619 return 0; 620 621err_up: 622 ixgb_free_rx_resources(adapter); 623err_setup_rx: 624 ixgb_free_tx_resources(adapter); 625err_setup_tx: 626 ixgb_reset(adapter); 627 628 return err; 629} 630 631/** 632 * ixgb_close - Disables a network interface 633 * @netdev: network interface device structure 634 * 635 * Returns 0, this is not allowed to fail 636 * 637 * The close entry point is called when an interface is de-activated 638 * by the OS. The hardware is still under the drivers control, but 639 * needs to be disabled. A global MAC reset is issued to stop the 640 * hardware, and all transmit and receive resources are freed. 641 **/ 642 643static int 644ixgb_close(struct net_device *netdev) 645{ 646 struct ixgb_adapter *adapter = netdev_priv(netdev); 647 648 ixgb_down(adapter, TRUE); 649 650 ixgb_free_tx_resources(adapter); 651 ixgb_free_rx_resources(adapter); 652 653 return 0; 654} 655 656/** 657 * ixgb_setup_tx_resources - allocate Tx resources (Descriptors) 658 * @adapter: board private structure 659 * 660 * Return 0 on success, negative on failure 661 **/ 662 663int 664ixgb_setup_tx_resources(struct ixgb_adapter *adapter) 665{ 666 struct ixgb_desc_ring *txdr = &adapter->tx_ring; 667 struct pci_dev *pdev = adapter->pdev; 668 int size; 669 670 size = sizeof(struct ixgb_buffer) * txdr->count; 671 txdr->buffer_info = vmalloc(size); 672 if(!txdr->buffer_info) { 673 DPRINTK(PROBE, ERR, 674 "Unable to allocate transmit descriptor ring memory\n"); 675 return -ENOMEM; 676 } 677 memset(txdr->buffer_info, 0, size); 678 679 /* round up to nearest 4K */ 680 681 txdr->size = txdr->count * sizeof(struct ixgb_tx_desc); 682 txdr->size = ALIGN(txdr->size, 4096); 683 684 txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma); 685 if(!txdr->desc) { 686 vfree(txdr->buffer_info); 687 DPRINTK(PROBE, ERR, 688 "Unable to allocate transmit descriptor memory\n"); 689 return -ENOMEM; 690 } 691 memset(txdr->desc, 0, txdr->size); 692 693 txdr->next_to_use = 0; 694 txdr->next_to_clean = 0; 695 696 return 0; 697} 698 699/** 700 * ixgb_configure_tx - Configure 82597 Transmit Unit after Reset. 701 * @adapter: board private structure 702 * 703 * Configure the Tx unit of the MAC after a reset. 704 **/ 705 706static void 707ixgb_configure_tx(struct ixgb_adapter *adapter) 708{ 709 uint64_t tdba = adapter->tx_ring.dma; 710 uint32_t tdlen = adapter->tx_ring.count * sizeof(struct ixgb_tx_desc); 711 uint32_t tctl; 712 struct ixgb_hw *hw = &adapter->hw; 713 714 /* Setup the Base and Length of the Tx Descriptor Ring 715 * tx_ring.dma can be either a 32 or 64 bit value 716 */ 717 718 IXGB_WRITE_REG(hw, TDBAL, (tdba & 0x00000000ffffffffULL)); 719 IXGB_WRITE_REG(hw, TDBAH, (tdba >> 32)); 720 721 IXGB_WRITE_REG(hw, TDLEN, tdlen); 722 723 /* Setup the HW Tx Head and Tail descriptor pointers */ 724 725 IXGB_WRITE_REG(hw, TDH, 0); 726 IXGB_WRITE_REG(hw, TDT, 0); 727 728 /* don't set up txdctl, it induces performance problems if configured 729 * incorrectly */ 730 /* Set the Tx Interrupt Delay register */ 731 732 IXGB_WRITE_REG(hw, TIDV, adapter->tx_int_delay); 733 734 /* Program the Transmit Control Register */ 735 736 tctl = IXGB_TCTL_TCE | IXGB_TCTL_TXEN | IXGB_TCTL_TPDE; 737 IXGB_WRITE_REG(hw, TCTL, tctl); 738 739 /* Setup Transmit Descriptor Settings for this adapter */ 740 adapter->tx_cmd_type = 741 IXGB_TX_DESC_TYPE 742 | (adapter->tx_int_delay_enable ? IXGB_TX_DESC_CMD_IDE : 0); 743} 744 745/** 746 * ixgb_setup_rx_resources - allocate Rx resources (Descriptors) 747 * @adapter: board private structure 748 * 749 * Returns 0 on success, negative on failure 750 **/ 751 752int 753ixgb_setup_rx_resources(struct ixgb_adapter *adapter) 754{ 755 struct ixgb_desc_ring *rxdr = &adapter->rx_ring; 756 struct pci_dev *pdev = adapter->pdev; 757 int size; 758 759 size = sizeof(struct ixgb_buffer) * rxdr->count; 760 rxdr->buffer_info = vmalloc(size); 761 if(!rxdr->buffer_info) { 762 DPRINTK(PROBE, ERR, 763 "Unable to allocate receive descriptor ring\n"); 764 return -ENOMEM; 765 } 766 memset(rxdr->buffer_info, 0, size); 767 768 /* Round up to nearest 4K */ 769 770 rxdr->size = rxdr->count * sizeof(struct ixgb_rx_desc); 771 rxdr->size = ALIGN(rxdr->size, 4096); 772 773 rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma); 774 775 if(!rxdr->desc) { 776 vfree(rxdr->buffer_info); 777 DPRINTK(PROBE, ERR, 778 "Unable to allocate receive descriptors\n"); 779 return -ENOMEM; 780 } 781 memset(rxdr->desc, 0, rxdr->size); 782 783 rxdr->next_to_clean = 0; 784 rxdr->next_to_use = 0; 785 786 return 0; 787} 788 789/** 790 * ixgb_setup_rctl - configure the receive control register 791 * @adapter: Board private structure 792 **/ 793 794static void 795ixgb_setup_rctl(struct ixgb_adapter *adapter) 796{ 797 uint32_t rctl; 798 799 rctl = IXGB_READ_REG(&adapter->hw, RCTL); 800 801 rctl &= ~(3 << IXGB_RCTL_MO_SHIFT); 802 803 rctl |= 804 IXGB_RCTL_BAM | IXGB_RCTL_RDMTS_1_2 | 805 IXGB_RCTL_RXEN | IXGB_RCTL_CFF | 806 (adapter->hw.mc_filter_type << IXGB_RCTL_MO_SHIFT); 807 808 rctl |= IXGB_RCTL_SECRC; 809 810 if (adapter->rx_buffer_len <= IXGB_RXBUFFER_2048) 811 rctl |= IXGB_RCTL_BSIZE_2048; 812 else if (adapter->rx_buffer_len <= IXGB_RXBUFFER_4096) 813 rctl |= IXGB_RCTL_BSIZE_4096; 814 else if (adapter->rx_buffer_len <= IXGB_RXBUFFER_8192) 815 rctl |= IXGB_RCTL_BSIZE_8192; 816 else if (adapter->rx_buffer_len <= IXGB_RXBUFFER_16384) 817 rctl |= IXGB_RCTL_BSIZE_16384; 818 819 IXGB_WRITE_REG(&adapter->hw, RCTL, rctl); 820} 821 822/** 823 * ixgb_configure_rx - Configure 82597 Receive Unit after Reset. 824 * @adapter: board private structure 825 * 826 * Configure the Rx unit of the MAC after a reset. 827 **/ 828 829static void 830ixgb_configure_rx(struct ixgb_adapter *adapter) 831{ 832 uint64_t rdba = adapter->rx_ring.dma; 833 uint32_t rdlen = adapter->rx_ring.count * sizeof(struct ixgb_rx_desc); 834 struct ixgb_hw *hw = &adapter->hw; 835 uint32_t rctl; 836 uint32_t rxcsum; 837 uint32_t rxdctl; 838 839 /* make sure receives are disabled while setting up the descriptors */ 840 841 rctl = IXGB_READ_REG(hw, RCTL); 842 IXGB_WRITE_REG(hw, RCTL, rctl & ~IXGB_RCTL_RXEN); 843 844 /* set the Receive Delay Timer Register */ 845 846 IXGB_WRITE_REG(hw, RDTR, adapter->rx_int_delay); 847 848 /* Setup the Base and Length of the Rx Descriptor Ring */ 849 850 IXGB_WRITE_REG(hw, RDBAL, (rdba & 0x00000000ffffffffULL)); 851 IXGB_WRITE_REG(hw, RDBAH, (rdba >> 32)); 852 853 IXGB_WRITE_REG(hw, RDLEN, rdlen); 854 855 /* Setup the HW Rx Head and Tail Descriptor Pointers */ 856 IXGB_WRITE_REG(hw, RDH, 0); 857 IXGB_WRITE_REG(hw, RDT, 0); 858 859 /* set up pre-fetching of receive buffers so we get some before we 860 * run out (default hardware behavior is to run out before fetching 861 * more). This sets up to fetch if HTHRESH rx descriptors are avail 862 * and the descriptors in hw cache are below PTHRESH. This avoids 863 * the hardware behavior of fetching <=512 descriptors in a single 864 * burst that pre-empts all other activity, usually causing fifo 865 * overflows. */ 866 /* use WTHRESH to burst write 16 descriptors or burst when RXT0 */ 867 rxdctl = RXDCTL_WTHRESH_DEFAULT << IXGB_RXDCTL_WTHRESH_SHIFT | 868 RXDCTL_HTHRESH_DEFAULT << IXGB_RXDCTL_HTHRESH_SHIFT | 869 RXDCTL_PTHRESH_DEFAULT << IXGB_RXDCTL_PTHRESH_SHIFT; 870 IXGB_WRITE_REG(hw, RXDCTL, rxdctl); 871 872 /* Enable Receive Checksum Offload for TCP and UDP */ 873 if(adapter->rx_csum == TRUE) { 874 rxcsum = IXGB_READ_REG(hw, RXCSUM); 875 rxcsum |= IXGB_RXCSUM_TUOFL; 876 IXGB_WRITE_REG(hw, RXCSUM, rxcsum); 877 } 878 879 /* Enable Receives */ 880 881 IXGB_WRITE_REG(hw, RCTL, rctl); 882} 883 884/** 885 * ixgb_free_tx_resources - Free Tx Resources 886 * @adapter: board private structure 887 * 888 * Free all transmit software resources 889 **/ 890 891void 892ixgb_free_tx_resources(struct ixgb_adapter *adapter) 893{ 894 struct pci_dev *pdev = adapter->pdev; 895 896 ixgb_clean_tx_ring(adapter); 897 898 vfree(adapter->tx_ring.buffer_info); 899 adapter->tx_ring.buffer_info = NULL; 900 901 pci_free_consistent(pdev, adapter->tx_ring.size, 902 adapter->tx_ring.desc, adapter->tx_ring.dma); 903 904 adapter->tx_ring.desc = NULL; 905} 906 907static void 908ixgb_unmap_and_free_tx_resource(struct ixgb_adapter *adapter, 909 struct ixgb_buffer *buffer_info) 910{ 911 struct pci_dev *pdev = adapter->pdev; 912 913 if (buffer_info->dma) 914 pci_unmap_page(pdev, buffer_info->dma, buffer_info->length, 915 PCI_DMA_TODEVICE); 916 917 if (buffer_info->skb) 918 dev_kfree_skb_any(buffer_info->skb); 919 920 buffer_info->skb = NULL; 921 buffer_info->dma = 0; 922 buffer_info->time_stamp = 0; 923 /* these fields must always be initialized in tx 924 * buffer_info->length = 0; 925 * buffer_info->next_to_watch = 0; */ 926} 927 928/** 929 * ixgb_clean_tx_ring - Free Tx Buffers 930 * @adapter: board private structure 931 **/ 932 933static void 934ixgb_clean_tx_ring(struct ixgb_adapter *adapter) 935{ 936 struct ixgb_desc_ring *tx_ring = &adapter->tx_ring; 937 struct ixgb_buffer *buffer_info; 938 unsigned long size; 939 unsigned int i; 940 941 /* Free all the Tx ring sk_buffs */ 942 943 for(i = 0; i < tx_ring->count; i++) { 944 buffer_info = &tx_ring->buffer_info[i]; 945 ixgb_unmap_and_free_tx_resource(adapter, buffer_info); 946 } 947 948 size = sizeof(struct ixgb_buffer) * tx_ring->count; 949 memset(tx_ring->buffer_info, 0, size); 950 951 /* Zero out the descriptor ring */ 952 953 memset(tx_ring->desc, 0, tx_ring->size); 954 955 tx_ring->next_to_use = 0; 956 tx_ring->next_to_clean = 0; 957 958 IXGB_WRITE_REG(&adapter->hw, TDH, 0); 959 IXGB_WRITE_REG(&adapter->hw, TDT, 0); 960} 961 962/** 963 * ixgb_free_rx_resources - Free Rx Resources 964 * @adapter: board private structure 965 * 966 * Free all receive software resources 967 **/ 968 969void 970ixgb_free_rx_resources(struct ixgb_adapter *adapter) 971{ 972 struct ixgb_desc_ring *rx_ring = &adapter->rx_ring; 973 struct pci_dev *pdev = adapter->pdev; 974 975 ixgb_clean_rx_ring(adapter); 976 977 vfree(rx_ring->buffer_info); 978 rx_ring->buffer_info = NULL; 979 980 pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma); 981 982 rx_ring->desc = NULL; 983} 984 985/** 986 * ixgb_clean_rx_ring - Free Rx Buffers 987 * @adapter: board private structure 988 **/ 989 990static void 991ixgb_clean_rx_ring(struct ixgb_adapter *adapter) 992{ 993 struct ixgb_desc_ring *rx_ring = &adapter->rx_ring; 994 struct ixgb_buffer *buffer_info; 995 struct pci_dev *pdev = adapter->pdev; 996 unsigned long size; 997 unsigned int i; 998 999 /* Free all the Rx ring sk_buffs */ 1000 1001 for(i = 0; i < rx_ring->count; i++) { 1002 buffer_info = &rx_ring->buffer_info[i]; 1003 if(buffer_info->skb) { 1004 1005 pci_unmap_single(pdev, 1006 buffer_info->dma, 1007 buffer_info->length, 1008 PCI_DMA_FROMDEVICE); 1009 1010 dev_kfree_skb(buffer_info->skb); 1011 1012 buffer_info->skb = NULL; 1013 } 1014 } 1015 1016 size = sizeof(struct ixgb_buffer) * rx_ring->count; 1017 memset(rx_ring->buffer_info, 0, size); 1018 1019 /* Zero out the descriptor ring */ 1020 1021 memset(rx_ring->desc, 0, rx_ring->size); 1022 1023 rx_ring->next_to_clean = 0; 1024 rx_ring->next_to_use = 0; 1025 1026 IXGB_WRITE_REG(&adapter->hw, RDH, 0); 1027 IXGB_WRITE_REG(&adapter->hw, RDT, 0); 1028} 1029 1030/** 1031 * ixgb_set_mac - Change the Ethernet Address of the NIC 1032 * @netdev: network interface device structure 1033 * @p: pointer to an address structure 1034 * 1035 * Returns 0 on success, negative on failure 1036 **/ 1037 1038static int 1039ixgb_set_mac(struct net_device *netdev, void *p) 1040{ 1041 struct ixgb_adapter *adapter = netdev_priv(netdev); 1042 struct sockaddr *addr = p; 1043 1044 if(!is_valid_ether_addr(addr->sa_data)) 1045 return -EADDRNOTAVAIL; 1046 1047 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 1048 1049 ixgb_rar_set(&adapter->hw, addr->sa_data, 0); 1050 1051 return 0; 1052} 1053 1054/** 1055 * ixgb_set_multi - Multicast and Promiscuous mode set 1056 * @netdev: network interface device structure 1057 * 1058 * The set_multi entry point is called whenever the multicast address 1059 * list or the network interface flags are updated. This routine is 1060 * responsible for configuring the hardware for proper multicast, 1061 * promiscuous mode, and all-multi behavior. 1062 **/ 1063 1064static void 1065ixgb_set_multi(struct net_device *netdev) 1066{ 1067 struct ixgb_adapter *adapter = netdev_priv(netdev); 1068 struct ixgb_hw *hw = &adapter->hw; 1069 struct dev_mc_list *mc_ptr; 1070 uint32_t rctl; 1071 int i; 1072 1073 /* Check for Promiscuous and All Multicast modes */ 1074 1075 rctl = IXGB_READ_REG(hw, RCTL); 1076 1077 if(netdev->flags & IFF_PROMISC) { 1078 rctl |= (IXGB_RCTL_UPE | IXGB_RCTL_MPE); 1079 } else if(netdev->flags & IFF_ALLMULTI) { 1080 rctl |= IXGB_RCTL_MPE; 1081 rctl &= ~IXGB_RCTL_UPE; 1082 } else { 1083 rctl &= ~(IXGB_RCTL_UPE | IXGB_RCTL_MPE); 1084 } 1085 1086 if(netdev->mc_count > IXGB_MAX_NUM_MULTICAST_ADDRESSES) { 1087 rctl |= IXGB_RCTL_MPE; 1088 IXGB_WRITE_REG(hw, RCTL, rctl); 1089 } else { 1090 uint8_t mta[netdev->mc_count * IXGB_ETH_LENGTH_OF_ADDRESS]; 1091 1092 IXGB_WRITE_REG(hw, RCTL, rctl); 1093 1094 for(i = 0, mc_ptr = netdev->mc_list; mc_ptr; 1095 i++, mc_ptr = mc_ptr->next) 1096 memcpy(&mta[i * IXGB_ETH_LENGTH_OF_ADDRESS], 1097 mc_ptr->dmi_addr, IXGB_ETH_LENGTH_OF_ADDRESS); 1098 1099 ixgb_mc_addr_list_update(hw, mta, netdev->mc_count, 0); 1100 } 1101} 1102 1103/** 1104 * ixgb_watchdog - Timer Call-back 1105 * @data: pointer to netdev cast into an unsigned long 1106 **/ 1107 1108static void 1109ixgb_watchdog(unsigned long data) 1110{ 1111 struct ixgb_adapter *adapter = (struct ixgb_adapter *)data; 1112 struct net_device *netdev = adapter->netdev; 1113 struct ixgb_desc_ring *txdr = &adapter->tx_ring; 1114 1115 ixgb_check_for_link(&adapter->hw); 1116 1117 if (ixgb_check_for_bad_link(&adapter->hw)) { 1118 /* force the reset path */ 1119 netif_stop_queue(netdev); 1120 } 1121 1122 if(adapter->hw.link_up) { 1123 if(!netif_carrier_ok(netdev)) { 1124 DPRINTK(LINK, INFO, 1125 "NIC Link is Up 10000 Mbps Full Duplex\n"); 1126 adapter->link_speed = 10000; 1127 adapter->link_duplex = FULL_DUPLEX; 1128 netif_carrier_on(netdev); 1129 netif_wake_queue(netdev); 1130 } 1131 } else { 1132 if(netif_carrier_ok(netdev)) { 1133 adapter->link_speed = 0; 1134 adapter->link_duplex = 0; 1135 DPRINTK(LINK, INFO, "NIC Link is Down\n"); 1136 netif_carrier_off(netdev); 1137 netif_stop_queue(netdev); 1138 1139 } 1140 } 1141 1142 ixgb_update_stats(adapter); 1143 1144 if(!netif_carrier_ok(netdev)) { 1145 if(IXGB_DESC_UNUSED(txdr) + 1 < txdr->count) { 1146 /* We've lost link, so the controller stops DMA, 1147 * but we've got queued Tx work that's never going 1148 * to get done, so reset controller to flush Tx. 1149 * (Do the reset outside of interrupt context). */ 1150 schedule_work(&adapter->tx_timeout_task); 1151 } 1152 } 1153 1154 /* Force detection of hung controller every watchdog period */ 1155 adapter->detect_tx_hung = TRUE; 1156 1157 /* generate an interrupt to force clean up of any stragglers */ 1158 IXGB_WRITE_REG(&adapter->hw, ICS, IXGB_INT_TXDW); 1159 1160 /* Reset the timer */ 1161 mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ); 1162} 1163 1164#define IXGB_TX_FLAGS_CSUM 0x00000001 1165#define IXGB_TX_FLAGS_VLAN 0x00000002 1166#define IXGB_TX_FLAGS_TSO 0x00000004 1167 1168static int 1169ixgb_tso(struct ixgb_adapter *adapter, struct sk_buff *skb) 1170{ 1171 struct ixgb_context_desc *context_desc; 1172 unsigned int i; 1173 uint8_t ipcss, ipcso, tucss, tucso, hdr_len; 1174 uint16_t ipcse, tucse, mss; 1175 int err; 1176 1177 if (likely(skb_is_gso(skb))) { 1178 struct ixgb_buffer *buffer_info; 1179 struct iphdr *iph; 1180 1181 if (skb_header_cloned(skb)) { 1182 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 1183 if (err) 1184 return err; 1185 } 1186 1187 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 1188 mss = skb_shinfo(skb)->gso_size; 1189 iph = ip_hdr(skb); 1190 iph->tot_len = 0; 1191 iph->check = 0; 1192 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, 1193 iph->daddr, 0, 1194 IPPROTO_TCP, 0); 1195 ipcss = skb_network_offset(skb); 1196 ipcso = (void *)&(iph->check) - (void *)skb->data; 1197 ipcse = skb_transport_offset(skb) - 1; 1198 tucss = skb_transport_offset(skb); 1199 tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data; 1200 tucse = 0; 1201 1202 i = adapter->tx_ring.next_to_use; 1203 context_desc = IXGB_CONTEXT_DESC(adapter->tx_ring, i); 1204 buffer_info = &adapter->tx_ring.buffer_info[i]; 1205 WARN_ON(buffer_info->dma != 0); 1206 1207 context_desc->ipcss = ipcss; 1208 context_desc->ipcso = ipcso; 1209 context_desc->ipcse = cpu_to_le16(ipcse); 1210 context_desc->tucss = tucss; 1211 context_desc->tucso = tucso; 1212 context_desc->tucse = cpu_to_le16(tucse); 1213 context_desc->mss = cpu_to_le16(mss); 1214 context_desc->hdr_len = hdr_len; 1215 context_desc->status = 0; 1216 context_desc->cmd_type_len = cpu_to_le32( 1217 IXGB_CONTEXT_DESC_TYPE 1218 | IXGB_CONTEXT_DESC_CMD_TSE 1219 | IXGB_CONTEXT_DESC_CMD_IP 1220 | IXGB_CONTEXT_DESC_CMD_TCP 1221 | IXGB_CONTEXT_DESC_CMD_IDE 1222 | (skb->len - (hdr_len))); 1223 1224 1225 if(++i == adapter->tx_ring.count) i = 0; 1226 adapter->tx_ring.next_to_use = i; 1227 1228 return 1; 1229 } 1230 1231 return 0; 1232} 1233 1234static boolean_t 1235ixgb_tx_csum(struct ixgb_adapter *adapter, struct sk_buff *skb) 1236{ 1237 struct ixgb_context_desc *context_desc; 1238 unsigned int i; 1239 uint8_t css, cso; 1240 1241 if(likely(skb->ip_summed == CHECKSUM_PARTIAL)) { 1242 struct ixgb_buffer *buffer_info; 1243 css = skb_transport_offset(skb); 1244 cso = css + skb->csum_offset; 1245 1246 i = adapter->tx_ring.next_to_use; 1247 context_desc = IXGB_CONTEXT_DESC(adapter->tx_ring, i); 1248 buffer_info = &adapter->tx_ring.buffer_info[i]; 1249 WARN_ON(buffer_info->dma != 0); 1250 1251 context_desc->tucss = css; 1252 context_desc->tucso = cso; 1253 context_desc->tucse = 0; 1254 /* zero out any previously existing data in one instruction */ 1255 *(uint32_t *)&(context_desc->ipcss) = 0; 1256 context_desc->status = 0; 1257 context_desc->hdr_len = 0; 1258 context_desc->mss = 0; 1259 context_desc->cmd_type_len = 1260 cpu_to_le32(IXGB_CONTEXT_DESC_TYPE 1261 | IXGB_TX_DESC_CMD_IDE); 1262 1263 if(++i == adapter->tx_ring.count) i = 0; 1264 adapter->tx_ring.next_to_use = i; 1265 1266 return TRUE; 1267 } 1268 1269 return FALSE; 1270} 1271 1272#define IXGB_MAX_TXD_PWR 14 1273#define IXGB_MAX_DATA_PER_TXD (1<<IXGB_MAX_TXD_PWR) 1274 1275static int 1276ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb, 1277 unsigned int first) 1278{ 1279 struct ixgb_desc_ring *tx_ring = &adapter->tx_ring; 1280 struct ixgb_buffer *buffer_info; 1281 int len = skb->len; 1282 unsigned int offset = 0, size, count = 0, i; 1283 unsigned int mss = skb_shinfo(skb)->gso_size; 1284 1285 unsigned int nr_frags = skb_shinfo(skb)->nr_frags; 1286 unsigned int f; 1287 1288 len -= skb->data_len; 1289 1290 i = tx_ring->next_to_use; 1291 1292 while(len) { 1293 buffer_info = &tx_ring->buffer_info[i]; 1294 size = min(len, IXGB_MAX_DATA_PER_TXD); 1295 if (unlikely(mss && !nr_frags && size == len && size > 8)) 1296 size -= 4; 1297 1298 buffer_info->length = size; 1299 WARN_ON(buffer_info->dma != 0); 1300 buffer_info->dma = 1301 pci_map_single(adapter->pdev, 1302 skb->data + offset, 1303 size, 1304 PCI_DMA_TODEVICE); 1305 buffer_info->time_stamp = jiffies; 1306 buffer_info->next_to_watch = 0; 1307 1308 len -= size; 1309 offset += size; 1310 count++; 1311 if(++i == tx_ring->count) i = 0; 1312 } 1313 1314 for(f = 0; f < nr_frags; f++) { 1315 struct skb_frag_struct *frag; 1316 1317 frag = &skb_shinfo(skb)->frags[f]; 1318 len = frag->size; 1319 offset = 0; 1320 1321 while(len) { 1322 buffer_info = &tx_ring->buffer_info[i]; 1323 size = min(len, IXGB_MAX_DATA_PER_TXD); 1324 1325 if (unlikely(mss && !nr_frags && size == len 1326 && size > 8)) 1327 size -= 4; 1328 1329 buffer_info->length = size; 1330 buffer_info->dma = 1331 pci_map_page(adapter->pdev, 1332 frag->page, 1333 frag->page_offset + offset, 1334 size, 1335 PCI_DMA_TODEVICE); 1336 buffer_info->time_stamp = jiffies; 1337 buffer_info->next_to_watch = 0; 1338 1339 len -= size; 1340 offset += size; 1341 count++; 1342 if(++i == tx_ring->count) i = 0; 1343 } 1344 } 1345 i = (i == 0) ? tx_ring->count - 1 : i - 1; 1346 tx_ring->buffer_info[i].skb = skb; 1347 tx_ring->buffer_info[first].next_to_watch = i; 1348 1349 return count; 1350} 1351 1352static void 1353ixgb_tx_queue(struct ixgb_adapter *adapter, int count, int vlan_id,int tx_flags) 1354{ 1355 struct ixgb_desc_ring *tx_ring = &adapter->tx_ring; 1356 struct ixgb_tx_desc *tx_desc = NULL; 1357 struct ixgb_buffer *buffer_info; 1358 uint32_t cmd_type_len = adapter->tx_cmd_type; 1359 uint8_t status = 0; 1360 uint8_t popts = 0; 1361 unsigned int i; 1362 1363 if(tx_flags & IXGB_TX_FLAGS_TSO) { 1364 cmd_type_len |= IXGB_TX_DESC_CMD_TSE; 1365 popts |= (IXGB_TX_DESC_POPTS_IXSM | IXGB_TX_DESC_POPTS_TXSM); 1366 } 1367 1368 if(tx_flags & IXGB_TX_FLAGS_CSUM) 1369 popts |= IXGB_TX_DESC_POPTS_TXSM; 1370 1371 if(tx_flags & IXGB_TX_FLAGS_VLAN) { 1372 cmd_type_len |= IXGB_TX_DESC_CMD_VLE; 1373 } 1374 1375 i = tx_ring->next_to_use; 1376 1377 while(count--) { 1378 buffer_info = &tx_ring->buffer_info[i]; 1379 tx_desc = IXGB_TX_DESC(*tx_ring, i); 1380 tx_desc->buff_addr = cpu_to_le64(buffer_info->dma); 1381 tx_desc->cmd_type_len = 1382 cpu_to_le32(cmd_type_len | buffer_info->length); 1383 tx_desc->status = status; 1384 tx_desc->popts = popts; 1385 tx_desc->vlan = cpu_to_le16(vlan_id); 1386 1387 if(++i == tx_ring->count) i = 0; 1388 } 1389 1390 tx_desc->cmd_type_len |= cpu_to_le32(IXGB_TX_DESC_CMD_EOP 1391 | IXGB_TX_DESC_CMD_RS ); 1392 1393 /* Force memory writes to complete before letting h/w 1394 * know there are new descriptors to fetch. (Only 1395 * applicable for weak-ordered memory model archs, 1396 * such as IA-64). */ 1397 wmb(); 1398 1399 tx_ring->next_to_use = i; 1400 IXGB_WRITE_REG(&adapter->hw, TDT, i); 1401} 1402 1403static int __ixgb_maybe_stop_tx(struct net_device *netdev, int size) 1404{ 1405 struct ixgb_adapter *adapter = netdev_priv(netdev); 1406 struct ixgb_desc_ring *tx_ring = &adapter->tx_ring; 1407 1408 netif_stop_queue(netdev); 1409 /* Herbert's original patch had: 1410 * smp_mb__after_netif_stop_queue(); 1411 * but since that doesn't exist yet, just open code it. */ 1412 smp_mb(); 1413 1414 /* We need to check again in a case another CPU has just 1415 * made room available. */ 1416 if (likely(IXGB_DESC_UNUSED(tx_ring) < size)) 1417 return -EBUSY; 1418 1419 /* A reprieve! */ 1420 netif_start_queue(netdev); 1421 ++adapter->restart_queue; 1422 return 0; 1423} 1424 1425static int ixgb_maybe_stop_tx(struct net_device *netdev, 1426 struct ixgb_desc_ring *tx_ring, int size) 1427{ 1428 if (likely(IXGB_DESC_UNUSED(tx_ring) >= size)) 1429 return 0; 1430 return __ixgb_maybe_stop_tx(netdev, size); 1431} 1432 1433 1434/* Tx Descriptors needed, worst case */ 1435#define TXD_USE_COUNT(S) (((S) >> IXGB_MAX_TXD_PWR) + \ 1436 (((S) & (IXGB_MAX_DATA_PER_TXD - 1)) ? 1 : 0)) 1437#define DESC_NEEDED TXD_USE_COUNT(IXGB_MAX_DATA_PER_TXD) /* skb->date */ + \ 1438 MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1 /* for context */ \ 1439 + 1 1440 1441static int 1442ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev) 1443{ 1444 struct ixgb_adapter *adapter = netdev_priv(netdev); 1445 unsigned int first; 1446 unsigned int tx_flags = 0; 1447 unsigned long flags; 1448 int vlan_id = 0; 1449 int tso; 1450 1451 if(skb->len <= 0) { 1452 dev_kfree_skb_any(skb); 1453 return 0; 1454 } 1455 1456#ifdef NETIF_F_LLTX 1457 local_irq_save(flags); 1458 if (!spin_trylock(&adapter->tx_lock)) { 1459 /* Collision - tell upper layer to requeue */ 1460 local_irq_restore(flags); 1461 return NETDEV_TX_LOCKED; 1462 } 1463#else 1464 spin_lock_irqsave(&adapter->tx_lock, flags); 1465#endif 1466 1467 if (unlikely(ixgb_maybe_stop_tx(netdev, &adapter->tx_ring, 1468 DESC_NEEDED))) { 1469 netif_stop_queue(netdev); 1470 spin_unlock_irqrestore(&adapter->tx_lock, flags); 1471 return NETDEV_TX_BUSY; 1472 } 1473 1474#ifndef NETIF_F_LLTX 1475 spin_unlock_irqrestore(&adapter->tx_lock, flags); 1476#endif 1477 1478 if(adapter->vlgrp && vlan_tx_tag_present(skb)) { 1479 tx_flags |= IXGB_TX_FLAGS_VLAN; 1480 vlan_id = vlan_tx_tag_get(skb); 1481 } 1482 1483 first = adapter->tx_ring.next_to_use; 1484 1485 tso = ixgb_tso(adapter, skb); 1486 if (tso < 0) { 1487 dev_kfree_skb_any(skb); 1488#ifdef NETIF_F_LLTX 1489 spin_unlock_irqrestore(&adapter->tx_lock, flags); 1490#endif 1491 return NETDEV_TX_OK; 1492 } 1493 1494 if (likely(tso)) 1495 tx_flags |= IXGB_TX_FLAGS_TSO; 1496 else if(ixgb_tx_csum(adapter, skb)) 1497 tx_flags |= IXGB_TX_FLAGS_CSUM; 1498 1499 ixgb_tx_queue(adapter, ixgb_tx_map(adapter, skb, first), vlan_id, 1500 tx_flags); 1501 1502 netdev->trans_start = jiffies; 1503 1504#ifdef NETIF_F_LLTX 1505 /* Make sure there is space in the ring for the next send. */ 1506 ixgb_maybe_stop_tx(netdev, &adapter->tx_ring, DESC_NEEDED); 1507 1508 spin_unlock_irqrestore(&adapter->tx_lock, flags); 1509 1510#endif 1511 return NETDEV_TX_OK; 1512} 1513 1514/** 1515 * ixgb_tx_timeout - Respond to a Tx Hang 1516 * @netdev: network interface device structure 1517 **/ 1518 1519static void 1520ixgb_tx_timeout(struct net_device *netdev) 1521{ 1522 struct ixgb_adapter *adapter = netdev_priv(netdev); 1523 1524 /* Do the reset outside of interrupt context */ 1525 schedule_work(&adapter->tx_timeout_task); 1526} 1527 1528static void 1529ixgb_tx_timeout_task(struct work_struct *work) 1530{ 1531 struct ixgb_adapter *adapter = 1532 container_of(work, struct ixgb_adapter, tx_timeout_task); 1533 1534 adapter->tx_timeout_count++; 1535 ixgb_down(adapter, TRUE); 1536 ixgb_up(adapter); 1537} 1538 1539/** 1540 * ixgb_get_stats - Get System Network Statistics 1541 * @netdev: network interface device structure 1542 * 1543 * Returns the address of the device statistics structure. 1544 * The statistics are actually updated from the timer callback. 1545 **/ 1546 1547static struct net_device_stats * 1548ixgb_get_stats(struct net_device *netdev) 1549{ 1550 struct ixgb_adapter *adapter = netdev_priv(netdev); 1551 1552 return &adapter->net_stats; 1553} 1554 1555/** 1556 * ixgb_change_mtu - Change the Maximum Transfer Unit 1557 * @netdev: network interface device structure 1558 * @new_mtu: new value for maximum frame size 1559 * 1560 * Returns 0 on success, negative on failure 1561 **/ 1562 1563static int 1564ixgb_change_mtu(struct net_device *netdev, int new_mtu) 1565{ 1566 struct ixgb_adapter *adapter = netdev_priv(netdev); 1567 int max_frame = new_mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH; 1568 int old_max_frame = netdev->mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH; 1569 1570 1571 if((max_frame < IXGB_MIN_ENET_FRAME_SIZE_WITHOUT_FCS + ENET_FCS_LENGTH) 1572 || (max_frame > IXGB_MAX_JUMBO_FRAME_SIZE + ENET_FCS_LENGTH)) { 1573 DPRINTK(PROBE, ERR, "Invalid MTU setting %d\n", new_mtu); 1574 return -EINVAL; 1575 } 1576 1577 adapter->rx_buffer_len = max_frame; 1578 1579 netdev->mtu = new_mtu; 1580 1581 if ((old_max_frame != max_frame) && netif_running(netdev)) { 1582 ixgb_down(adapter, TRUE); 1583 ixgb_up(adapter); 1584 } 1585 1586 return 0; 1587} 1588 1589/** 1590 * ixgb_update_stats - Update the board statistics counters. 1591 * @adapter: board private structure 1592 **/ 1593 1594void 1595ixgb_update_stats(struct ixgb_adapter *adapter) 1596{ 1597 struct net_device *netdev = adapter->netdev; 1598 struct pci_dev *pdev = adapter->pdev; 1599 1600 /* Prevent stats update while adapter is being reset */ 1601 if (pci_channel_offline(pdev)) 1602 return; 1603 1604 if((netdev->flags & IFF_PROMISC) || (netdev->flags & IFF_ALLMULTI) || 1605 (netdev->mc_count > IXGB_MAX_NUM_MULTICAST_ADDRESSES)) { 1606 u64 multi = IXGB_READ_REG(&adapter->hw, MPRCL); 1607 u32 bcast_l = IXGB_READ_REG(&adapter->hw, BPRCL); 1608 u32 bcast_h = IXGB_READ_REG(&adapter->hw, BPRCH); 1609 u64 bcast = ((u64)bcast_h << 32) | bcast_l; 1610 1611 multi |= ((u64)IXGB_READ_REG(&adapter->hw, MPRCH) << 32); 1612 /* fix up multicast stats by removing broadcasts */ 1613 if(multi >= bcast) 1614 multi -= bcast; 1615 1616 adapter->stats.mprcl += (multi & 0xFFFFFFFF); 1617 adapter->stats.mprch += (multi >> 32); 1618 adapter->stats.bprcl += bcast_l; 1619 adapter->stats.bprch += bcast_h; 1620 } else { 1621 adapter->stats.mprcl += IXGB_READ_REG(&adapter->hw, MPRCL); 1622 adapter->stats.mprch += IXGB_READ_REG(&adapter->hw, MPRCH); 1623 adapter->stats.bprcl += IXGB_READ_REG(&adapter->hw, BPRCL); 1624 adapter->stats.bprch += IXGB_READ_REG(&adapter->hw, BPRCH); 1625 } 1626 adapter->stats.tprl += IXGB_READ_REG(&adapter->hw, TPRL); 1627 adapter->stats.tprh += IXGB_READ_REG(&adapter->hw, TPRH); 1628 adapter->stats.gprcl += IXGB_READ_REG(&adapter->hw, GPRCL); 1629 adapter->stats.gprch += IXGB_READ_REG(&adapter->hw, GPRCH); 1630 adapter->stats.uprcl += IXGB_READ_REG(&adapter->hw, UPRCL); 1631 adapter->stats.uprch += IXGB_READ_REG(&adapter->hw, UPRCH); 1632 adapter->stats.vprcl += IXGB_READ_REG(&adapter->hw, VPRCL); 1633 adapter->stats.vprch += IXGB_READ_REG(&adapter->hw, VPRCH); 1634 adapter->stats.jprcl += IXGB_READ_REG(&adapter->hw, JPRCL); 1635 adapter->stats.jprch += IXGB_READ_REG(&adapter->hw, JPRCH); 1636 adapter->stats.gorcl += IXGB_READ_REG(&adapter->hw, GORCL); 1637 adapter->stats.gorch += IXGB_READ_REG(&adapter->hw, GORCH); 1638 adapter->stats.torl += IXGB_READ_REG(&adapter->hw, TORL); 1639 adapter->stats.torh += IXGB_READ_REG(&adapter->hw, TORH); 1640 adapter->stats.rnbc += IXGB_READ_REG(&adapter->hw, RNBC); 1641 adapter->stats.ruc += IXGB_READ_REG(&adapter->hw, RUC); 1642 adapter->stats.roc += IXGB_READ_REG(&adapter->hw, ROC); 1643 adapter->stats.rlec += IXGB_READ_REG(&adapter->hw, RLEC); 1644 adapter->stats.crcerrs += IXGB_READ_REG(&adapter->hw, CRCERRS); 1645 adapter->stats.icbc += IXGB_READ_REG(&adapter->hw, ICBC); 1646 adapter->stats.ecbc += IXGB_READ_REG(&adapter->hw, ECBC); 1647 adapter->stats.mpc += IXGB_READ_REG(&adapter->hw, MPC); 1648 adapter->stats.tptl += IXGB_READ_REG(&adapter->hw, TPTL); 1649 adapter->stats.tpth += IXGB_READ_REG(&adapter->hw, TPTH); 1650 adapter->stats.gptcl += IXGB_READ_REG(&adapter->hw, GPTCL); 1651 adapter->stats.gptch += IXGB_READ_REG(&adapter->hw, GPTCH); 1652 adapter->stats.bptcl += IXGB_READ_REG(&adapter->hw, BPTCL); 1653 adapter->stats.bptch += IXGB_READ_REG(&adapter->hw, BPTCH); 1654 adapter->stats.mptcl += IXGB_READ_REG(&adapter->hw, MPTCL); 1655 adapter->stats.mptch += IXGB_READ_REG(&adapter->hw, MPTCH); 1656 adapter->stats.uptcl += IXGB_READ_REG(&adapter->hw, UPTCL); 1657 adapter->stats.uptch += IXGB_READ_REG(&adapter->hw, UPTCH); 1658 adapter->stats.vptcl += IXGB_READ_REG(&adapter->hw, VPTCL); 1659 adapter->stats.vptch += IXGB_READ_REG(&adapter->hw, VPTCH); 1660 adapter->stats.jptcl += IXGB_READ_REG(&adapter->hw, JPTCL); 1661 adapter->stats.jptch += IXGB_READ_REG(&adapter->hw, JPTCH); 1662 adapter->stats.gotcl += IXGB_READ_REG(&adapter->hw, GOTCL); 1663 adapter->stats.gotch += IXGB_READ_REG(&adapter->hw, GOTCH); 1664 adapter->stats.totl += IXGB_READ_REG(&adapter->hw, TOTL); 1665 adapter->stats.toth += IXGB_READ_REG(&adapter->hw, TOTH); 1666 adapter->stats.dc += IXGB_READ_REG(&adapter->hw, DC); 1667 adapter->stats.plt64c += IXGB_READ_REG(&adapter->hw, PLT64C); 1668 adapter->stats.tsctc += IXGB_READ_REG(&adapter->hw, TSCTC); 1669 adapter->stats.tsctfc += IXGB_READ_REG(&adapter->hw, TSCTFC); 1670 adapter->stats.ibic += IXGB_READ_REG(&adapter->hw, IBIC); 1671 adapter->stats.rfc += IXGB_READ_REG(&adapter->hw, RFC); 1672 adapter->stats.lfc += IXGB_READ_REG(&adapter->hw, LFC); 1673 adapter->stats.pfrc += IXGB_READ_REG(&adapter->hw, PFRC); 1674 adapter->stats.pftc += IXGB_READ_REG(&adapter->hw, PFTC); 1675 adapter->stats.mcfrc += IXGB_READ_REG(&adapter->hw, MCFRC); 1676 adapter->stats.mcftc += IXGB_READ_REG(&adapter->hw, MCFTC); 1677 adapter->stats.xonrxc += IXGB_READ_REG(&adapter->hw, XONRXC); 1678 adapter->stats.xontxc += IXGB_READ_REG(&adapter->hw, XONTXC); 1679 adapter->stats.xoffrxc += IXGB_READ_REG(&adapter->hw, XOFFRXC); 1680 adapter->stats.xofftxc += IXGB_READ_REG(&adapter->hw, XOFFTXC); 1681 adapter->stats.rjc += IXGB_READ_REG(&adapter->hw, RJC); 1682 1683 /* Fill out the OS statistics structure */ 1684 1685 adapter->net_stats.rx_packets = adapter->stats.gprcl; 1686 adapter->net_stats.tx_packets = adapter->stats.gptcl; 1687 adapter->net_stats.rx_bytes = adapter->stats.gorcl; 1688 adapter->net_stats.tx_bytes = adapter->stats.gotcl; 1689 adapter->net_stats.multicast = adapter->stats.mprcl; 1690 adapter->net_stats.collisions = 0; 1691 1692 /* ignore RLEC as it reports errors for padded (<64bytes) frames 1693 * with a length in the type/len field */ 1694 adapter->net_stats.rx_errors = 1695 /* adapter->stats.rnbc + */ adapter->stats.crcerrs + 1696 adapter->stats.ruc + 1697 adapter->stats.roc /*+ adapter->stats.rlec */ + 1698 adapter->stats.icbc + 1699 adapter->stats.ecbc + adapter->stats.mpc; 1700 1701 /* see above 1702 * adapter->net_stats.rx_length_errors = adapter->stats.rlec; 1703 */ 1704 1705 adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs; 1706 adapter->net_stats.rx_fifo_errors = adapter->stats.mpc; 1707 adapter->net_stats.rx_missed_errors = adapter->stats.mpc; 1708 adapter->net_stats.rx_over_errors = adapter->stats.mpc; 1709 1710 adapter->net_stats.tx_errors = 0; 1711 adapter->net_stats.rx_frame_errors = 0; 1712 adapter->net_stats.tx_aborted_errors = 0; 1713 adapter->net_stats.tx_carrier_errors = 0; 1714 adapter->net_stats.tx_fifo_errors = 0; 1715 adapter->net_stats.tx_heartbeat_errors = 0; 1716 adapter->net_stats.tx_window_errors = 0; 1717} 1718 1719#define IXGB_MAX_INTR 10 1720/** 1721 * ixgb_intr - Interrupt Handler 1722 * @irq: interrupt number 1723 * @data: pointer to a network interface device structure 1724 **/ 1725 1726static irqreturn_t 1727ixgb_intr(int irq, void *data) 1728{ 1729 struct net_device *netdev = data; 1730 struct ixgb_adapter *adapter = netdev_priv(netdev); 1731 struct ixgb_hw *hw = &adapter->hw; 1732 uint32_t icr = IXGB_READ_REG(hw, ICR); 1733#ifndef CONFIG_IXGB_NAPI 1734 unsigned int i; 1735#endif 1736 1737 if(unlikely(!icr)) 1738 return IRQ_NONE; /* Not our interrupt */ 1739 1740 if(unlikely(icr & (IXGB_INT_RXSEQ | IXGB_INT_LSC))) { 1741 mod_timer(&adapter->watchdog_timer, jiffies); 1742 } 1743 1744#ifdef CONFIG_IXGB_NAPI 1745 if(netif_rx_schedule_prep(netdev)) { 1746 1747 /* Disable interrupts and register for poll. The flush 1748 of the posted write is intentionally left out. 1749 */ 1750 1751 atomic_inc(&adapter->irq_sem); 1752 IXGB_WRITE_REG(&adapter->hw, IMC, ~0); 1753 __netif_rx_schedule(netdev); 1754 } 1755#else 1756 /* yes, that is actually a & and it is meant to make sure that 1757 * every pass through this for loop checks both receive and 1758 * transmit queues for completed descriptors, intended to 1759 * avoid starvation issues and assist tx/rx fairness. */ 1760 for(i = 0; i < IXGB_MAX_INTR; i++) 1761 if(!ixgb_clean_rx_irq(adapter) & 1762 !ixgb_clean_tx_irq(adapter)) 1763 break; 1764#endif 1765 return IRQ_HANDLED; 1766} 1767 1768#ifdef CONFIG_IXGB_NAPI 1769/** 1770 * ixgb_clean - NAPI Rx polling callback 1771 * @adapter: board private structure 1772 **/ 1773 1774static int 1775ixgb_clean(struct net_device *netdev, int *budget) 1776{ 1777 struct ixgb_adapter *adapter = netdev_priv(netdev); 1778 int work_to_do = min(*budget, netdev->quota); 1779 int tx_cleaned; 1780 int work_done = 0; 1781 1782 tx_cleaned = ixgb_clean_tx_irq(adapter); 1783 ixgb_clean_rx_irq(adapter, &work_done, work_to_do); 1784 1785 *budget -= work_done; 1786 netdev->quota -= work_done; 1787 1788 /* if no Tx and not enough Rx work done, exit the polling mode */ 1789 if((!tx_cleaned && (work_done == 0)) || !netif_running(netdev)) { 1790 netif_rx_complete(netdev); 1791 ixgb_irq_enable(adapter); 1792 return 0; 1793 } 1794 1795 return 1; 1796} 1797#endif 1798 1799/** 1800 * ixgb_clean_tx_irq - Reclaim resources after transmit completes 1801 * @adapter: board private structure 1802 **/ 1803 1804static boolean_t 1805ixgb_clean_tx_irq(struct ixgb_adapter *adapter) 1806{ 1807 struct ixgb_desc_ring *tx_ring = &adapter->tx_ring; 1808 struct net_device *netdev = adapter->netdev; 1809 struct ixgb_tx_desc *tx_desc, *eop_desc; 1810 struct ixgb_buffer *buffer_info; 1811 unsigned int i, eop; 1812 boolean_t cleaned = FALSE; 1813 1814 i = tx_ring->next_to_clean; 1815 eop = tx_ring->buffer_info[i].next_to_watch; 1816 eop_desc = IXGB_TX_DESC(*tx_ring, eop); 1817 1818 while(eop_desc->status & IXGB_TX_DESC_STATUS_DD) { 1819 1820 for(cleaned = FALSE; !cleaned; ) { 1821 tx_desc = IXGB_TX_DESC(*tx_ring, i); 1822 buffer_info = &tx_ring->buffer_info[i]; 1823 1824 if (tx_desc->popts 1825 & (IXGB_TX_DESC_POPTS_TXSM | 1826 IXGB_TX_DESC_POPTS_IXSM)) 1827 adapter->hw_csum_tx_good++; 1828 1829 ixgb_unmap_and_free_tx_resource(adapter, buffer_info); 1830 1831 *(uint32_t *)&(tx_desc->status) = 0; 1832 1833 cleaned = (i == eop); 1834 if(++i == tx_ring->count) i = 0; 1835 } 1836 1837 eop = tx_ring->buffer_info[i].next_to_watch; 1838 eop_desc = IXGB_TX_DESC(*tx_ring, eop); 1839 } 1840 1841 tx_ring->next_to_clean = i; 1842 1843 if (unlikely(netif_queue_stopped(netdev))) { 1844 spin_lock(&adapter->tx_lock); 1845 if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev) && 1846 (IXGB_DESC_UNUSED(tx_ring) >= DESC_NEEDED)) 1847 netif_wake_queue(netdev); 1848 spin_unlock(&adapter->tx_lock); 1849 } 1850 1851 if(adapter->detect_tx_hung) { 1852 /* detect a transmit hang in hardware, this serializes the 1853 * check with the clearing of time_stamp and movement of i */ 1854 adapter->detect_tx_hung = FALSE; 1855 if (tx_ring->buffer_info[eop].dma && 1856 time_after(jiffies, tx_ring->buffer_info[eop].time_stamp + HZ) 1857 && !(IXGB_READ_REG(&adapter->hw, STATUS) & 1858 IXGB_STATUS_TXOFF)) { 1859 /* detected Tx unit hang */ 1860 DPRINTK(DRV, ERR, "Detected Tx Unit Hang\n" 1861 " TDH <%x>\n" 1862 " TDT <%x>\n" 1863 " next_to_use <%x>\n" 1864 " next_to_clean <%x>\n" 1865 "buffer_info[next_to_clean]\n" 1866 " time_stamp <%lx>\n" 1867 " next_to_watch <%x>\n" 1868 " jiffies <%lx>\n" 1869 " next_to_watch.status <%x>\n", 1870 IXGB_READ_REG(&adapter->hw, TDH), 1871 IXGB_READ_REG(&adapter->hw, TDT), 1872 tx_ring->next_to_use, 1873 tx_ring->next_to_clean, 1874 tx_ring->buffer_info[eop].time_stamp, 1875 eop, 1876 jiffies, 1877 eop_desc->status); 1878 netif_stop_queue(netdev); 1879 } 1880 } 1881 1882 return cleaned; 1883} 1884 1885/** 1886 * ixgb_rx_checksum - Receive Checksum Offload for 82597. 1887 * @adapter: board private structure 1888 * @rx_desc: receive descriptor 1889 * @sk_buff: socket buffer with received data 1890 **/ 1891 1892static void 1893ixgb_rx_checksum(struct ixgb_adapter *adapter, 1894 struct ixgb_rx_desc *rx_desc, 1895 struct sk_buff *skb) 1896{ 1897 /* Ignore Checksum bit is set OR 1898 * TCP Checksum has not been calculated 1899 */ 1900 if((rx_desc->status & IXGB_RX_DESC_STATUS_IXSM) || 1901 (!(rx_desc->status & IXGB_RX_DESC_STATUS_TCPCS))) { 1902 skb->ip_summed = CHECKSUM_NONE; 1903 return; 1904 } 1905 1906 /* At this point we know the hardware did the TCP checksum */ 1907 /* now look at the TCP checksum error bit */ 1908 if(rx_desc->errors & IXGB_RX_DESC_ERRORS_TCPE) { 1909 /* let the stack verify checksum errors */ 1910 skb->ip_summed = CHECKSUM_NONE; 1911 adapter->hw_csum_rx_error++; 1912 } else { 1913 /* TCP checksum is good */ 1914 skb->ip_summed = CHECKSUM_UNNECESSARY; 1915 adapter->hw_csum_rx_good++; 1916 } 1917} 1918 1919/** 1920 * ixgb_clean_rx_irq - Send received data up the network stack, 1921 * @adapter: board private structure 1922 **/ 1923 1924static boolean_t 1925#ifdef CONFIG_IXGB_NAPI 1926ixgb_clean_rx_irq(struct ixgb_adapter *adapter, int *work_done, int work_to_do) 1927#else 1928ixgb_clean_rx_irq(struct ixgb_adapter *adapter) 1929#endif 1930{ 1931 struct ixgb_desc_ring *rx_ring = &adapter->rx_ring; 1932 struct net_device *netdev = adapter->netdev; 1933 struct pci_dev *pdev = adapter->pdev; 1934 struct ixgb_rx_desc *rx_desc, *next_rxd; 1935 struct ixgb_buffer *buffer_info, *next_buffer, *next2_buffer; 1936 uint32_t length; 1937 unsigned int i, j; 1938 boolean_t cleaned = FALSE; 1939 1940 i = rx_ring->next_to_clean; 1941 rx_desc = IXGB_RX_DESC(*rx_ring, i); 1942 buffer_info = &rx_ring->buffer_info[i]; 1943 1944 while(rx_desc->status & IXGB_RX_DESC_STATUS_DD) { 1945 struct sk_buff *skb, *next_skb; 1946 u8 status; 1947 1948#ifdef CONFIG_IXGB_NAPI 1949 if(*work_done >= work_to_do) 1950 break; 1951 1952 (*work_done)++; 1953#endif 1954 status = rx_desc->status; 1955 skb = buffer_info->skb; 1956 buffer_info->skb = NULL; 1957 1958 prefetch(skb->data); 1959 1960 if(++i == rx_ring->count) i = 0; 1961 next_rxd = IXGB_RX_DESC(*rx_ring, i); 1962 prefetch(next_rxd); 1963 1964 if((j = i + 1) == rx_ring->count) j = 0; 1965 next2_buffer = &rx_ring->buffer_info[j]; 1966 prefetch(next2_buffer); 1967 1968 next_buffer = &rx_ring->buffer_info[i]; 1969 next_skb = next_buffer->skb; 1970 prefetch(next_skb); 1971 1972 cleaned = TRUE; 1973 1974 pci_unmap_single(pdev, 1975 buffer_info->dma, 1976 buffer_info->length, 1977 PCI_DMA_FROMDEVICE); 1978 1979 length = le16_to_cpu(rx_desc->length); 1980 1981 if(unlikely(!(status & IXGB_RX_DESC_STATUS_EOP))) { 1982 1983 /* All receives must fit into a single buffer */ 1984 1985 IXGB_DBG("Receive packet consumed multiple buffers " 1986 "length<%x>\n", length); 1987 1988 dev_kfree_skb_irq(skb); 1989 goto rxdesc_done; 1990 } 1991 1992 if (unlikely(rx_desc->errors 1993 & (IXGB_RX_DESC_ERRORS_CE | IXGB_RX_DESC_ERRORS_SE 1994 | IXGB_RX_DESC_ERRORS_P | 1995 IXGB_RX_DESC_ERRORS_RXE))) { 1996 1997 dev_kfree_skb_irq(skb); 1998 goto rxdesc_done; 1999 } 2000 2001 /* code added for copybreak, this should improve 2002 * performance for small packets with large amounts 2003 * of reassembly being done in the stack */ 2004#define IXGB_CB_LENGTH 256 2005 if (length < IXGB_CB_LENGTH) { 2006 struct sk_buff *new_skb = 2007 netdev_alloc_skb(netdev, length + NET_IP_ALIGN); 2008 if (new_skb) { 2009 skb_reserve(new_skb, NET_IP_ALIGN); 2010 skb_copy_to_linear_data_offset(new_skb, 2011 -NET_IP_ALIGN, 2012 (skb->data - 2013 NET_IP_ALIGN), 2014 (length + 2015 NET_IP_ALIGN)); 2016 /* save the skb in buffer_info as good */ 2017 buffer_info->skb = skb; 2018 skb = new_skb; 2019 } 2020 } 2021 /* end copybreak code */ 2022 2023 /* Good Receive */ 2024 skb_put(skb, length); 2025 2026 /* Receive Checksum Offload */ 2027 ixgb_rx_checksum(adapter, rx_desc, skb); 2028 2029 skb->protocol = eth_type_trans(skb, netdev); 2030#ifdef CONFIG_IXGB_NAPI 2031 if(adapter->vlgrp && (status & IXGB_RX_DESC_STATUS_VP)) { 2032 vlan_hwaccel_receive_skb(skb, adapter->vlgrp, 2033 le16_to_cpu(rx_desc->special) & 2034 IXGB_RX_DESC_SPECIAL_VLAN_MASK); 2035 } else { 2036 netif_receive_skb(skb); 2037 } 2038#else /* CONFIG_IXGB_NAPI */ 2039 if(adapter->vlgrp && (status & IXGB_RX_DESC_STATUS_VP)) { 2040 vlan_hwaccel_rx(skb, adapter->vlgrp, 2041 le16_to_cpu(rx_desc->special) & 2042 IXGB_RX_DESC_SPECIAL_VLAN_MASK); 2043 } else { 2044 netif_rx(skb); 2045 } 2046#endif /* CONFIG_IXGB_NAPI */ 2047 netdev->last_rx = jiffies; 2048 2049rxdesc_done: 2050 /* clean up descriptor, might be written over by hw */ 2051 rx_desc->status = 0; 2052 2053 /* use prefetched values */ 2054 rx_desc = next_rxd; 2055 buffer_info = next_buffer; 2056 } 2057 2058 rx_ring->next_to_clean = i; 2059 2060 ixgb_alloc_rx_buffers(adapter); 2061 2062 return cleaned; 2063} 2064 2065/** 2066 * ixgb_alloc_rx_buffers - Replace used receive buffers 2067 * @adapter: address of board private structure 2068 **/ 2069 2070static void 2071ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter) 2072{ 2073 struct ixgb_desc_ring *rx_ring = &adapter->rx_ring; 2074 struct net_device *netdev = adapter->netdev; 2075 struct pci_dev *pdev = adapter->pdev; 2076 struct ixgb_rx_desc *rx_desc; 2077 struct ixgb_buffer *buffer_info; 2078 struct sk_buff *skb; 2079 unsigned int i; 2080 int num_group_tail_writes; 2081 long cleancount; 2082 2083 i = rx_ring->next_to_use; 2084 buffer_info = &rx_ring->buffer_info[i]; 2085 cleancount = IXGB_DESC_UNUSED(rx_ring); 2086 2087 num_group_tail_writes = IXGB_RX_BUFFER_WRITE; 2088 2089 /* leave three descriptors unused */ 2090 while(--cleancount > 2) { 2091 /* recycle! its good for you */ 2092 skb = buffer_info->skb; 2093 if (skb) { 2094 skb_trim(skb, 0); 2095 goto map_skb; 2096 } 2097 2098 skb = netdev_alloc_skb(netdev, adapter->rx_buffer_len 2099 + NET_IP_ALIGN); 2100 if (unlikely(!skb)) { 2101 /* Better luck next round */ 2102 adapter->alloc_rx_buff_failed++; 2103 break; 2104 } 2105 2106 /* Make buffer alignment 2 beyond a 16 byte boundary 2107 * this will result in a 16 byte aligned IP header after 2108 * the 14 byte MAC header is removed 2109 */ 2110 skb_reserve(skb, NET_IP_ALIGN); 2111 2112 buffer_info->skb = skb; 2113 buffer_info->length = adapter->rx_buffer_len; 2114map_skb: 2115 buffer_info->dma = pci_map_single(pdev, 2116 skb->data, 2117 adapter->rx_buffer_len, 2118 PCI_DMA_FROMDEVICE); 2119 2120 rx_desc = IXGB_RX_DESC(*rx_ring, i); 2121 rx_desc->buff_addr = cpu_to_le64(buffer_info->dma); 2122 rx_desc->status = 0; 2123 2124 2125 if(++i == rx_ring->count) i = 0; 2126 buffer_info = &rx_ring->buffer_info[i]; 2127 } 2128 2129 if (likely(rx_ring->next_to_use != i)) { 2130 rx_ring->next_to_use = i; 2131 if (unlikely(i-- == 0)) 2132 i = (rx_ring->count - 1); 2133 2134 /* Force memory writes to complete before letting h/w 2135 * know there are new descriptors to fetch. (Only 2136 * applicable for weak-ordered memory model archs, such 2137 * as IA-64). */ 2138 wmb(); 2139 IXGB_WRITE_REG(&adapter->hw, RDT, i); 2140 } 2141} 2142 2143/** 2144 * ixgb_vlan_rx_register - enables or disables vlan tagging/stripping. 2145 * 2146 * @param netdev network interface device structure 2147 * @param grp indicates to enable or disable tagging/stripping 2148 **/ 2149static void 2150ixgb_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp) 2151{ 2152 struct ixgb_adapter *adapter = netdev_priv(netdev); 2153 uint32_t ctrl, rctl; 2154 2155 ixgb_irq_disable(adapter); 2156 adapter->vlgrp = grp; 2157 2158 if(grp) { 2159 /* enable VLAN tag insert/strip */ 2160 ctrl = IXGB_READ_REG(&adapter->hw, CTRL0); 2161 ctrl |= IXGB_CTRL0_VME; 2162 IXGB_WRITE_REG(&adapter->hw, CTRL0, ctrl); 2163 2164 /* enable VLAN receive filtering */ 2165 2166 rctl = IXGB_READ_REG(&adapter->hw, RCTL); 2167 rctl |= IXGB_RCTL_VFE; 2168 rctl &= ~IXGB_RCTL_CFIEN; 2169 IXGB_WRITE_REG(&adapter->hw, RCTL, rctl); 2170 } else { 2171 /* disable VLAN tag insert/strip */ 2172 2173 ctrl = IXGB_READ_REG(&adapter->hw, CTRL0); 2174 ctrl &= ~IXGB_CTRL0_VME; 2175 IXGB_WRITE_REG(&adapter->hw, CTRL0, ctrl); 2176 2177 /* disable VLAN filtering */ 2178 2179 rctl = IXGB_READ_REG(&adapter->hw, RCTL); 2180 rctl &= ~IXGB_RCTL_VFE; 2181 IXGB_WRITE_REG(&adapter->hw, RCTL, rctl); 2182 } 2183 2184 ixgb_irq_enable(adapter); 2185} 2186 2187static void 2188ixgb_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid) 2189{ 2190 struct ixgb_adapter *adapter = netdev_priv(netdev); 2191 uint32_t vfta, index; 2192 2193 /* add VID to filter table */ 2194 2195 index = (vid >> 5) & 0x7F; 2196 vfta = IXGB_READ_REG_ARRAY(&adapter->hw, VFTA, index); 2197 vfta |= (1 << (vid & 0x1F)); 2198 ixgb_write_vfta(&adapter->hw, index, vfta); 2199} 2200 2201static void 2202ixgb_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid) 2203{ 2204 struct ixgb_adapter *adapter = netdev_priv(netdev); 2205 uint32_t vfta, index; 2206 2207 ixgb_irq_disable(adapter); 2208 2209 vlan_group_set_device(adapter->vlgrp, vid, NULL); 2210 2211 ixgb_irq_enable(adapter); 2212 2213 /* remove VID from filter table*/ 2214 2215 index = (vid >> 5) & 0x7F; 2216 vfta = IXGB_READ_REG_ARRAY(&adapter->hw, VFTA, index); 2217 vfta &= ~(1 << (vid & 0x1F)); 2218 ixgb_write_vfta(&adapter->hw, index, vfta); 2219} 2220 2221static void 2222ixgb_restore_vlan(struct ixgb_adapter *adapter) 2223{ 2224 ixgb_vlan_rx_register(adapter->netdev, adapter->vlgrp); 2225 2226 if(adapter->vlgrp) { 2227 uint16_t vid; 2228 for(vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) { 2229 if(!vlan_group_get_device(adapter->vlgrp, vid)) 2230 continue; 2231 ixgb_vlan_rx_add_vid(adapter->netdev, vid); 2232 } 2233 } 2234} 2235 2236#ifdef CONFIG_NET_POLL_CONTROLLER 2237/* 2238 * Polling 'interrupt' - used by things like netconsole to send skbs 2239 * without having to re-enable interrupts. It's not called while 2240 * the interrupt routine is executing. 2241 */ 2242 2243static void ixgb_netpoll(struct net_device *dev) 2244{ 2245 struct ixgb_adapter *adapter = netdev_priv(dev); 2246 2247 disable_irq(adapter->pdev->irq); 2248 ixgb_intr(adapter->pdev->irq, dev); 2249 enable_irq(adapter->pdev->irq); 2250} 2251#endif 2252 2253/** 2254 * ixgb_io_error_detected() - called when PCI error is detected 2255 * @pdev pointer to pci device with error 2256 * @state pci channel state after error 2257 * 2258 * This callback is called by the PCI subsystem whenever 2259 * a PCI bus error is detected. 2260 */ 2261static pci_ers_result_t ixgb_io_error_detected (struct pci_dev *pdev, 2262 enum pci_channel_state state) 2263{ 2264 struct net_device *netdev = pci_get_drvdata(pdev); 2265 struct ixgb_adapter *adapter = netdev_priv(netdev); 2266 2267 if(netif_running(netdev)) 2268 ixgb_down(adapter, TRUE); 2269 2270 pci_disable_device(pdev); 2271 2272 /* Request a slot reset. */ 2273 return PCI_ERS_RESULT_NEED_RESET; 2274} 2275 2276/** 2277 * ixgb_io_slot_reset - called after the pci bus has been reset. 2278 * @pdev pointer to pci device with error 2279 * 2280 * This callback is called after the PCI buss has been reset. 2281 * Basically, this tries to restart the card from scratch. 2282 * This is a shortened version of the device probe/discovery code, 2283 * it resembles the first-half of the ixgb_probe() routine. 2284 */ 2285static pci_ers_result_t ixgb_io_slot_reset (struct pci_dev *pdev) 2286{ 2287 struct net_device *netdev = pci_get_drvdata(pdev); 2288 struct ixgb_adapter *adapter = netdev_priv(netdev); 2289 2290 if(pci_enable_device(pdev)) { 2291 DPRINTK(PROBE, ERR, "Cannot re-enable PCI device after reset.\n"); 2292 return PCI_ERS_RESULT_DISCONNECT; 2293 } 2294 2295 /* Perform card reset only on one instance of the card */ 2296 if (0 != PCI_FUNC (pdev->devfn)) 2297 return PCI_ERS_RESULT_RECOVERED; 2298 2299 pci_set_master(pdev); 2300 2301 netif_carrier_off(netdev); 2302 netif_stop_queue(netdev); 2303 ixgb_reset(adapter); 2304 2305 /* Make sure the EEPROM is good */ 2306 if(!ixgb_validate_eeprom_checksum(&adapter->hw)) { 2307 DPRINTK(PROBE, ERR, "After reset, the EEPROM checksum is not valid.\n"); 2308 return PCI_ERS_RESULT_DISCONNECT; 2309 } 2310 ixgb_get_ee_mac_addr(&adapter->hw, netdev->dev_addr); 2311 memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len); 2312 2313 if(!is_valid_ether_addr(netdev->perm_addr)) { 2314 DPRINTK(PROBE, ERR, "After reset, invalid MAC address.\n"); 2315 return PCI_ERS_RESULT_DISCONNECT; 2316 } 2317 2318 return PCI_ERS_RESULT_RECOVERED; 2319} 2320 2321/** 2322 * ixgb_io_resume - called when its OK to resume normal operations 2323 * @pdev pointer to pci device with error 2324 * 2325 * The error recovery driver tells us that its OK to resume 2326 * normal operation. Implementation resembles the second-half 2327 * of the ixgb_probe() routine. 2328 */ 2329static void ixgb_io_resume (struct pci_dev *pdev) 2330{ 2331 struct net_device *netdev = pci_get_drvdata(pdev); 2332 struct ixgb_adapter *adapter = netdev_priv(netdev); 2333 2334 pci_set_master(pdev); 2335 2336 if(netif_running(netdev)) { 2337 if(ixgb_up(adapter)) { 2338 printk ("ixgb: can't bring device back up after reset\n"); 2339 return; 2340 } 2341 } 2342 2343 netif_device_attach(netdev); 2344 mod_timer(&adapter->watchdog_timer, jiffies); 2345} 2346 2347/* ixgb_main.c */ 2348