1/* 2 * Copyright(c) 2005 - 2006 Attansic Corporation. All rights reserved. 3 * Copyright(c) 2006 - 2007 Chris Snook <csnook@redhat.com> 4 * Copyright(c) 2006 - 2008 Jay Cliburn <jcliburn@gmail.com> 5 * 6 * Derived from Intel e1000 driver 7 * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. 8 * 9 * This program is free software; you can redistribute it and/or modify it 10 * under the terms of the GNU General Public License as published by the Free 11 * Software Foundation; either version 2 of the License, or (at your option) 12 * any later version. 13 * 14 * This program is distributed in the hope that it will be useful, but WITHOUT 15 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 16 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 17 * more details. 18 * 19 * You should have received a copy of the GNU General Public License along with 20 * this program; if not, write to the Free Software Foundation, Inc., 59 21 * Temple Place - Suite 330, Boston, MA 02111-1307, USA. 22 * 23 * The full GNU General Public License is included in this distribution in the 24 * file called COPYING. 25 * 26 * Contact Information: 27 * Xiong Huang <xiong.huang@atheros.com> 28 * Jie Yang <jie.yang@atheros.com> 29 * Chris Snook <csnook@redhat.com> 30 * Jay Cliburn <jcliburn@gmail.com> 31 * 32 * This version is adapted from the Attansic reference driver. 33 * 34 * TODO: 35 * Add more ethtool functions. 36 * Fix abstruse irq enable/disable condition described here: 37 * http://marc.theaimsgroup.com/?l=linux-netdev&m=116398508500553&w=2 38 * 39 * NEEDS TESTING: 40 * VLAN 41 * multicast 42 * promiscuous mode 43 * interrupt coalescing 44 * SMP torture testing 45 */ 46 47#include <asm/atomic.h> 48#include <asm/byteorder.h> 49 50#include <linux/compiler.h> 51#include <linux/crc32.h> 52#include <linux/delay.h> 53#include <linux/dma-mapping.h> 54#include <linux/etherdevice.h> 55#include <linux/hardirq.h> 56#include <linux/if_ether.h> 57#include <linux/if_vlan.h> 58#include <linux/in.h> 59#include <linux/interrupt.h> 60#include <linux/ip.h> 61#include <linux/irqflags.h> 62#include <linux/irqreturn.h> 63#include <linux/jiffies.h> 64#include <linux/mii.h> 65#include <linux/module.h> 66#include <linux/moduleparam.h> 67#include <linux/net.h> 68#include <linux/netdevice.h> 69#include <linux/pci.h> 70#include <linux/pci_ids.h> 71#include <linux/pm.h> 72#include <linux/skbuff.h> 73#include <linux/slab.h> 74#include <linux/spinlock.h> 75#include <linux/string.h> 76#include <linux/tcp.h> 77#include <linux/timer.h> 78#include <linux/types.h> 79#include <linux/workqueue.h> 80 81#include <net/checksum.h> 82 83#include "atl1.h" 84 85#define ATLX_DRIVER_VERSION "2.1.3" 86MODULE_AUTHOR("Xiong Huang <xiong.huang@atheros.com>, \ 87Chris Snook <csnook@redhat.com>, Jay Cliburn <jcliburn@gmail.com>"); 88MODULE_LICENSE("GPL"); 89MODULE_VERSION(ATLX_DRIVER_VERSION); 90 91/* Temporary hack for merging atl1 and atl2 */ 92#include "atlx.c" 93 94/* 95 * This is the only thing that needs to be changed to adjust the 96 * maximum number of ports that the driver can manage. 97 */ 98#define ATL1_MAX_NIC 4 99 100#define OPTION_UNSET -1 101#define OPTION_DISABLED 0 102#define OPTION_ENABLED 1 103 104#define ATL1_PARAM_INIT { [0 ... ATL1_MAX_NIC] = OPTION_UNSET } 105 106/* 107 * Interrupt Moderate Timer in units of 2 us 108 * 109 * Valid Range: 10-65535 110 * 111 * Default Value: 100 (200us) 112 */ 113static int __devinitdata int_mod_timer[ATL1_MAX_NIC+1] = ATL1_PARAM_INIT; 114static unsigned int num_int_mod_timer; 115module_param_array_named(int_mod_timer, int_mod_timer, int, 116 &num_int_mod_timer, 0); 117MODULE_PARM_DESC(int_mod_timer, "Interrupt moderator timer"); 118 119#define DEFAULT_INT_MOD_CNT 100 /* 200us */ 120#define MAX_INT_MOD_CNT 65000 121#define MIN_INT_MOD_CNT 50 122 123struct atl1_option { 124 enum { enable_option, range_option, list_option } type; 125 char *name; 126 char *err; 127 int def; 128 union { 129 struct { /* range_option info */ 130 int min; 131 int max; 132 } r; 133 struct { /* list_option info */ 134 int nr; 135 struct atl1_opt_list { 136 int i; 137 char *str; 138 } *p; 139 } l; 140 } arg; 141}; 142 143static int __devinit atl1_validate_option(int *value, struct atl1_option *opt, 144 struct pci_dev *pdev) 145{ 146 if (*value == OPTION_UNSET) { 147 *value = opt->def; 148 return 0; 149 } 150 151 switch (opt->type) { 152 case enable_option: 153 switch (*value) { 154 case OPTION_ENABLED: 155 dev_info(&pdev->dev, "%s enabled\n", opt->name); 156 return 0; 157 case OPTION_DISABLED: 158 dev_info(&pdev->dev, "%s disabled\n", opt->name); 159 return 0; 160 } 161 break; 162 case range_option: 163 if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) { 164 dev_info(&pdev->dev, "%s set to %i\n", opt->name, 165 *value); 166 return 0; 167 } 168 break; 169 case list_option:{ 170 int i; 171 struct atl1_opt_list *ent; 172 173 for (i = 0; i < opt->arg.l.nr; i++) { 174 ent = &opt->arg.l.p[i]; 175 if (*value == ent->i) { 176 if (ent->str[0] != '\0') 177 dev_info(&pdev->dev, "%s\n", 178 ent->str); 179 return 0; 180 } 181 } 182 } 183 break; 184 185 default: 186 break; 187 } 188 189 dev_info(&pdev->dev, "invalid %s specified (%i) %s\n", 190 opt->name, *value, opt->err); 191 *value = opt->def; 192 return -1; 193} 194 195/* 196 * atl1_check_options - Range Checking for Command Line Parameters 197 * @adapter: board private structure 198 * 199 * This routine checks all command line parameters for valid user 200 * input. If an invalid value is given, or if no user specified 201 * value exists, a default value is used. The final value is stored 202 * in a variable in the adapter structure. 203 */ 204static void __devinit atl1_check_options(struct atl1_adapter *adapter) 205{ 206 struct pci_dev *pdev = adapter->pdev; 207 int bd = adapter->bd_number; 208 if (bd >= ATL1_MAX_NIC) { 209 dev_notice(&pdev->dev, "no configuration for board#%i\n", bd); 210 dev_notice(&pdev->dev, "using defaults for all values\n"); 211 } 212 { /* Interrupt Moderate Timer */ 213 struct atl1_option opt = { 214 .type = range_option, 215 .name = "Interrupt Moderator Timer", 216 .err = "using default of " 217 __MODULE_STRING(DEFAULT_INT_MOD_CNT), 218 .def = DEFAULT_INT_MOD_CNT, 219 .arg = {.r = {.min = MIN_INT_MOD_CNT, 220 .max = MAX_INT_MOD_CNT} } 221 }; 222 int val; 223 if (num_int_mod_timer > bd) { 224 val = int_mod_timer[bd]; 225 atl1_validate_option(&val, &opt, pdev); 226 adapter->imt = (u16) val; 227 } else 228 adapter->imt = (u16) (opt.def); 229 } 230} 231 232/* 233 * atl1_pci_tbl - PCI Device ID Table 234 */ 235static DEFINE_PCI_DEVICE_TABLE(atl1_pci_tbl) = { 236 {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATTANSIC_L1)}, 237 /* required last entry */ 238 {0,} 239}; 240MODULE_DEVICE_TABLE(pci, atl1_pci_tbl); 241 242static const u32 atl1_default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE | 243 NETIF_MSG_LINK | NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP; 244 245static int debug = -1; 246module_param(debug, int, 0); 247MODULE_PARM_DESC(debug, "Message level (0=none,...,16=all)"); 248 249/* 250 * Reset the transmit and receive units; mask and clear all interrupts. 251 * hw - Struct containing variables accessed by shared code 252 * return : 0 or idle status (if error) 253 */ 254static s32 atl1_reset_hw(struct atl1_hw *hw) 255{ 256 struct pci_dev *pdev = hw->back->pdev; 257 struct atl1_adapter *adapter = hw->back; 258 u32 icr; 259 int i; 260 261 /* 262 * Clear Interrupt mask to stop board from generating 263 * interrupts & Clear any pending interrupt events 264 */ 265 /* 266 * iowrite32(0, hw->hw_addr + REG_IMR); 267 * iowrite32(0xffffffff, hw->hw_addr + REG_ISR); 268 */ 269 270 /* 271 * Issue Soft Reset to the MAC. This will reset the chip's 272 * transmit, receive, DMA. It will not effect 273 * the current PCI configuration. The global reset bit is self- 274 * clearing, and should clear within a microsecond. 275 */ 276 iowrite32(MASTER_CTRL_SOFT_RST, hw->hw_addr + REG_MASTER_CTRL); 277 ioread32(hw->hw_addr + REG_MASTER_CTRL); 278 279 iowrite16(1, hw->hw_addr + REG_PHY_ENABLE); 280 ioread16(hw->hw_addr + REG_PHY_ENABLE); 281 282 /* delay about 1ms */ 283 msleep(1); 284 285 /* Wait at least 10ms for All module to be Idle */ 286 for (i = 0; i < 10; i++) { 287 icr = ioread32(hw->hw_addr + REG_IDLE_STATUS); 288 if (!icr) 289 break; 290 /* delay 1 ms */ 291 msleep(1); 292 cpu_relax(); 293 } 294 295 if (icr) { 296 if (netif_msg_hw(adapter)) 297 dev_dbg(&pdev->dev, "ICR = 0x%x\n", icr); 298 return icr; 299 } 300 301 return 0; 302} 303 304/* function about EEPROM 305 * 306 * check_eeprom_exist 307 * return 0 if eeprom exist 308 */ 309static int atl1_check_eeprom_exist(struct atl1_hw *hw) 310{ 311 u32 value; 312 value = ioread32(hw->hw_addr + REG_SPI_FLASH_CTRL); 313 if (value & SPI_FLASH_CTRL_EN_VPD) { 314 value &= ~SPI_FLASH_CTRL_EN_VPD; 315 iowrite32(value, hw->hw_addr + REG_SPI_FLASH_CTRL); 316 } 317 318 value = ioread16(hw->hw_addr + REG_PCIE_CAP_LIST); 319 return ((value & 0xFF00) == 0x6C00) ? 0 : 1; 320} 321 322static bool atl1_read_eeprom(struct atl1_hw *hw, u32 offset, u32 *p_value) 323{ 324 int i; 325 u32 control; 326 327 if (offset & 3) 328 /* address do not align */ 329 return false; 330 331 iowrite32(0, hw->hw_addr + REG_VPD_DATA); 332 control = (offset & VPD_CAP_VPD_ADDR_MASK) << VPD_CAP_VPD_ADDR_SHIFT; 333 iowrite32(control, hw->hw_addr + REG_VPD_CAP); 334 ioread32(hw->hw_addr + REG_VPD_CAP); 335 336 for (i = 0; i < 10; i++) { 337 msleep(2); 338 control = ioread32(hw->hw_addr + REG_VPD_CAP); 339 if (control & VPD_CAP_VPD_FLAG) 340 break; 341 } 342 if (control & VPD_CAP_VPD_FLAG) { 343 *p_value = ioread32(hw->hw_addr + REG_VPD_DATA); 344 return true; 345 } 346 /* timeout */ 347 return false; 348} 349 350/* 351 * Reads the value from a PHY register 352 * hw - Struct containing variables accessed by shared code 353 * reg_addr - address of the PHY register to read 354 */ 355s32 atl1_read_phy_reg(struct atl1_hw *hw, u16 reg_addr, u16 *phy_data) 356{ 357 u32 val; 358 int i; 359 360 val = ((u32) (reg_addr & MDIO_REG_ADDR_MASK)) << MDIO_REG_ADDR_SHIFT | 361 MDIO_START | MDIO_SUP_PREAMBLE | MDIO_RW | MDIO_CLK_25_4 << 362 MDIO_CLK_SEL_SHIFT; 363 iowrite32(val, hw->hw_addr + REG_MDIO_CTRL); 364 ioread32(hw->hw_addr + REG_MDIO_CTRL); 365 366 for (i = 0; i < MDIO_WAIT_TIMES; i++) { 367 udelay(2); 368 val = ioread32(hw->hw_addr + REG_MDIO_CTRL); 369 if (!(val & (MDIO_START | MDIO_BUSY))) 370 break; 371 } 372 if (!(val & (MDIO_START | MDIO_BUSY))) { 373 *phy_data = (u16) val; 374 return 0; 375 } 376 return ATLX_ERR_PHY; 377} 378 379#define CUSTOM_SPI_CS_SETUP 2 380#define CUSTOM_SPI_CLK_HI 2 381#define CUSTOM_SPI_CLK_LO 2 382#define CUSTOM_SPI_CS_HOLD 2 383#define CUSTOM_SPI_CS_HI 3 384 385static bool atl1_spi_read(struct atl1_hw *hw, u32 addr, u32 *buf) 386{ 387 int i; 388 u32 value; 389 390 iowrite32(0, hw->hw_addr + REG_SPI_DATA); 391 iowrite32(addr, hw->hw_addr + REG_SPI_ADDR); 392 393 value = SPI_FLASH_CTRL_WAIT_READY | 394 (CUSTOM_SPI_CS_SETUP & SPI_FLASH_CTRL_CS_SETUP_MASK) << 395 SPI_FLASH_CTRL_CS_SETUP_SHIFT | (CUSTOM_SPI_CLK_HI & 396 SPI_FLASH_CTRL_CLK_HI_MASK) << 397 SPI_FLASH_CTRL_CLK_HI_SHIFT | (CUSTOM_SPI_CLK_LO & 398 SPI_FLASH_CTRL_CLK_LO_MASK) << 399 SPI_FLASH_CTRL_CLK_LO_SHIFT | (CUSTOM_SPI_CS_HOLD & 400 SPI_FLASH_CTRL_CS_HOLD_MASK) << 401 SPI_FLASH_CTRL_CS_HOLD_SHIFT | (CUSTOM_SPI_CS_HI & 402 SPI_FLASH_CTRL_CS_HI_MASK) << 403 SPI_FLASH_CTRL_CS_HI_SHIFT | (1 & SPI_FLASH_CTRL_INS_MASK) << 404 SPI_FLASH_CTRL_INS_SHIFT; 405 406 iowrite32(value, hw->hw_addr + REG_SPI_FLASH_CTRL); 407 408 value |= SPI_FLASH_CTRL_START; 409 iowrite32(value, hw->hw_addr + REG_SPI_FLASH_CTRL); 410 ioread32(hw->hw_addr + REG_SPI_FLASH_CTRL); 411 412 for (i = 0; i < 10; i++) { 413 msleep(1); 414 value = ioread32(hw->hw_addr + REG_SPI_FLASH_CTRL); 415 if (!(value & SPI_FLASH_CTRL_START)) 416 break; 417 } 418 419 if (value & SPI_FLASH_CTRL_START) 420 return false; 421 422 *buf = ioread32(hw->hw_addr + REG_SPI_DATA); 423 424 return true; 425} 426 427/* 428 * get_permanent_address 429 * return 0 if get valid mac address, 430 */ 431static int atl1_get_permanent_address(struct atl1_hw *hw) 432{ 433 u32 addr[2]; 434 u32 i, control; 435 u16 reg; 436 u8 eth_addr[ETH_ALEN]; 437 bool key_valid; 438 439 if (is_valid_ether_addr(hw->perm_mac_addr)) 440 return 0; 441 442 /* init */ 443 addr[0] = addr[1] = 0; 444 445 if (!atl1_check_eeprom_exist(hw)) { 446 reg = 0; 447 key_valid = false; 448 /* Read out all EEPROM content */ 449 i = 0; 450 while (1) { 451 if (atl1_read_eeprom(hw, i + 0x100, &control)) { 452 if (key_valid) { 453 if (reg == REG_MAC_STA_ADDR) 454 addr[0] = control; 455 else if (reg == (REG_MAC_STA_ADDR + 4)) 456 addr[1] = control; 457 key_valid = false; 458 } else if ((control & 0xff) == 0x5A) { 459 key_valid = true; 460 reg = (u16) (control >> 16); 461 } else 462 break; 463 } else 464 /* read error */ 465 break; 466 i += 4; 467 } 468 469 *(u32 *) ð_addr[2] = swab32(addr[0]); 470 *(u16 *) ð_addr[0] = swab16(*(u16 *) &addr[1]); 471 if (is_valid_ether_addr(eth_addr)) { 472 memcpy(hw->perm_mac_addr, eth_addr, ETH_ALEN); 473 return 0; 474 } 475 } 476 477 /* see if SPI FLAGS exist ? */ 478 addr[0] = addr[1] = 0; 479 reg = 0; 480 key_valid = false; 481 i = 0; 482 while (1) { 483 if (atl1_spi_read(hw, i + 0x1f000, &control)) { 484 if (key_valid) { 485 if (reg == REG_MAC_STA_ADDR) 486 addr[0] = control; 487 else if (reg == (REG_MAC_STA_ADDR + 4)) 488 addr[1] = control; 489 key_valid = false; 490 } else if ((control & 0xff) == 0x5A) { 491 key_valid = true; 492 reg = (u16) (control >> 16); 493 } else 494 /* data end */ 495 break; 496 } else 497 /* read error */ 498 break; 499 i += 4; 500 } 501 502 *(u32 *) ð_addr[2] = swab32(addr[0]); 503 *(u16 *) ð_addr[0] = swab16(*(u16 *) &addr[1]); 504 if (is_valid_ether_addr(eth_addr)) { 505 memcpy(hw->perm_mac_addr, eth_addr, ETH_ALEN); 506 return 0; 507 } 508 509 /* 510 * On some motherboards, the MAC address is written by the 511 * BIOS directly to the MAC register during POST, and is 512 * not stored in eeprom. If all else thus far has failed 513 * to fetch the permanent MAC address, try reading it directly. 514 */ 515 addr[0] = ioread32(hw->hw_addr + REG_MAC_STA_ADDR); 516 addr[1] = ioread16(hw->hw_addr + (REG_MAC_STA_ADDR + 4)); 517 *(u32 *) ð_addr[2] = swab32(addr[0]); 518 *(u16 *) ð_addr[0] = swab16(*(u16 *) &addr[1]); 519 if (is_valid_ether_addr(eth_addr)) { 520 memcpy(hw->perm_mac_addr, eth_addr, ETH_ALEN); 521 return 0; 522 } 523 524 return 1; 525} 526 527/* 528 * Reads the adapter's MAC address from the EEPROM 529 * hw - Struct containing variables accessed by shared code 530 */ 531static s32 atl1_read_mac_addr(struct atl1_hw *hw) 532{ 533 u16 i; 534 535 if (atl1_get_permanent_address(hw)) 536 random_ether_addr(hw->perm_mac_addr); 537 538 for (i = 0; i < ETH_ALEN; i++) 539 hw->mac_addr[i] = hw->perm_mac_addr[i]; 540 return 0; 541} 542 543/* 544 * Hashes an address to determine its location in the multicast table 545 * hw - Struct containing variables accessed by shared code 546 * mc_addr - the multicast address to hash 547 * 548 * atl1_hash_mc_addr 549 * purpose 550 * set hash value for a multicast address 551 * hash calcu processing : 552 * 1. calcu 32bit CRC for multicast address 553 * 2. reverse crc with MSB to LSB 554 */ 555u32 atl1_hash_mc_addr(struct atl1_hw *hw, u8 *mc_addr) 556{ 557 u32 crc32, value = 0; 558 int i; 559 560 crc32 = ether_crc_le(6, mc_addr); 561 for (i = 0; i < 32; i++) 562 value |= (((crc32 >> i) & 1) << (31 - i)); 563 564 return value; 565} 566 567/* 568 * Sets the bit in the multicast table corresponding to the hash value. 569 * hw - Struct containing variables accessed by shared code 570 * hash_value - Multicast address hash value 571 */ 572void atl1_hash_set(struct atl1_hw *hw, u32 hash_value) 573{ 574 u32 hash_bit, hash_reg; 575 u32 mta; 576 577 /* 578 * The HASH Table is a register array of 2 32-bit registers. 579 * It is treated like an array of 64 bits. We want to set 580 * bit BitArray[hash_value]. So we figure out what register 581 * the bit is in, read it, OR in the new bit, then write 582 * back the new value. The register is determined by the 583 * upper 7 bits of the hash value and the bit within that 584 * register are determined by the lower 5 bits of the value. 585 */ 586 hash_reg = (hash_value >> 31) & 0x1; 587 hash_bit = (hash_value >> 26) & 0x1F; 588 mta = ioread32((hw->hw_addr + REG_RX_HASH_TABLE) + (hash_reg << 2)); 589 mta |= (1 << hash_bit); 590 iowrite32(mta, (hw->hw_addr + REG_RX_HASH_TABLE) + (hash_reg << 2)); 591} 592 593/* 594 * Writes a value to a PHY register 595 * hw - Struct containing variables accessed by shared code 596 * reg_addr - address of the PHY register to write 597 * data - data to write to the PHY 598 */ 599static s32 atl1_write_phy_reg(struct atl1_hw *hw, u32 reg_addr, u16 phy_data) 600{ 601 int i; 602 u32 val; 603 604 val = ((u32) (phy_data & MDIO_DATA_MASK)) << MDIO_DATA_SHIFT | 605 (reg_addr & MDIO_REG_ADDR_MASK) << MDIO_REG_ADDR_SHIFT | 606 MDIO_SUP_PREAMBLE | 607 MDIO_START | MDIO_CLK_25_4 << MDIO_CLK_SEL_SHIFT; 608 iowrite32(val, hw->hw_addr + REG_MDIO_CTRL); 609 ioread32(hw->hw_addr + REG_MDIO_CTRL); 610 611 for (i = 0; i < MDIO_WAIT_TIMES; i++) { 612 udelay(2); 613 val = ioread32(hw->hw_addr + REG_MDIO_CTRL); 614 if (!(val & (MDIO_START | MDIO_BUSY))) 615 break; 616 } 617 618 if (!(val & (MDIO_START | MDIO_BUSY))) 619 return 0; 620 621 return ATLX_ERR_PHY; 622} 623 624/* 625 * Make L001's PHY out of Power Saving State (bug) 626 * hw - Struct containing variables accessed by shared code 627 * when power on, L001's PHY always on Power saving State 628 * (Gigabit Link forbidden) 629 */ 630static s32 atl1_phy_leave_power_saving(struct atl1_hw *hw) 631{ 632 s32 ret; 633 ret = atl1_write_phy_reg(hw, 29, 0x0029); 634 if (ret) 635 return ret; 636 return atl1_write_phy_reg(hw, 30, 0); 637} 638 639/* 640 * Resets the PHY and make all config validate 641 * hw - Struct containing variables accessed by shared code 642 * 643 * Sets bit 15 and 12 of the MII Control regiser (for F001 bug) 644 */ 645static s32 atl1_phy_reset(struct atl1_hw *hw) 646{ 647 struct pci_dev *pdev = hw->back->pdev; 648 struct atl1_adapter *adapter = hw->back; 649 s32 ret_val; 650 u16 phy_data; 651 652 if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR || 653 hw->media_type == MEDIA_TYPE_1000M_FULL) 654 phy_data = MII_CR_RESET | MII_CR_AUTO_NEG_EN; 655 else { 656 switch (hw->media_type) { 657 case MEDIA_TYPE_100M_FULL: 658 phy_data = 659 MII_CR_FULL_DUPLEX | MII_CR_SPEED_100 | 660 MII_CR_RESET; 661 break; 662 case MEDIA_TYPE_100M_HALF: 663 phy_data = MII_CR_SPEED_100 | MII_CR_RESET; 664 break; 665 case MEDIA_TYPE_10M_FULL: 666 phy_data = 667 MII_CR_FULL_DUPLEX | MII_CR_SPEED_10 | MII_CR_RESET; 668 break; 669 default: 670 /* MEDIA_TYPE_10M_HALF: */ 671 phy_data = MII_CR_SPEED_10 | MII_CR_RESET; 672 break; 673 } 674 } 675 676 ret_val = atl1_write_phy_reg(hw, MII_BMCR, phy_data); 677 if (ret_val) { 678 u32 val; 679 int i; 680 /* pcie serdes link may be down! */ 681 if (netif_msg_hw(adapter)) 682 dev_dbg(&pdev->dev, "pcie phy link down\n"); 683 684 for (i = 0; i < 25; i++) { 685 msleep(1); 686 val = ioread32(hw->hw_addr + REG_MDIO_CTRL); 687 if (!(val & (MDIO_START | MDIO_BUSY))) 688 break; 689 } 690 691 if ((val & (MDIO_START | MDIO_BUSY)) != 0) { 692 if (netif_msg_hw(adapter)) 693 dev_warn(&pdev->dev, 694 "pcie link down at least 25ms\n"); 695 return ret_val; 696 } 697 } 698 return 0; 699} 700 701/* 702 * Configures PHY autoneg and flow control advertisement settings 703 * hw - Struct containing variables accessed by shared code 704 */ 705static s32 atl1_phy_setup_autoneg_adv(struct atl1_hw *hw) 706{ 707 s32 ret_val; 708 s16 mii_autoneg_adv_reg; 709 s16 mii_1000t_ctrl_reg; 710 711 /* Read the MII Auto-Neg Advertisement Register (Address 4). */ 712 mii_autoneg_adv_reg = MII_AR_DEFAULT_CAP_MASK; 713 714 /* Read the MII 1000Base-T Control Register (Address 9). */ 715 mii_1000t_ctrl_reg = MII_ATLX_CR_1000T_DEFAULT_CAP_MASK; 716 717 /* 718 * First we clear all the 10/100 mb speed bits in the Auto-Neg 719 * Advertisement Register (Address 4) and the 1000 mb speed bits in 720 * the 1000Base-T Control Register (Address 9). 721 */ 722 mii_autoneg_adv_reg &= ~MII_AR_SPEED_MASK; 723 mii_1000t_ctrl_reg &= ~MII_ATLX_CR_1000T_SPEED_MASK; 724 725 /* 726 * Need to parse media_type and set up 727 * the appropriate PHY registers. 728 */ 729 switch (hw->media_type) { 730 case MEDIA_TYPE_AUTO_SENSOR: 731 mii_autoneg_adv_reg |= (MII_AR_10T_HD_CAPS | 732 MII_AR_10T_FD_CAPS | 733 MII_AR_100TX_HD_CAPS | 734 MII_AR_100TX_FD_CAPS); 735 mii_1000t_ctrl_reg |= MII_ATLX_CR_1000T_FD_CAPS; 736 break; 737 738 case MEDIA_TYPE_1000M_FULL: 739 mii_1000t_ctrl_reg |= MII_ATLX_CR_1000T_FD_CAPS; 740 break; 741 742 case MEDIA_TYPE_100M_FULL: 743 mii_autoneg_adv_reg |= MII_AR_100TX_FD_CAPS; 744 break; 745 746 case MEDIA_TYPE_100M_HALF: 747 mii_autoneg_adv_reg |= MII_AR_100TX_HD_CAPS; 748 break; 749 750 case MEDIA_TYPE_10M_FULL: 751 mii_autoneg_adv_reg |= MII_AR_10T_FD_CAPS; 752 break; 753 754 default: 755 mii_autoneg_adv_reg |= MII_AR_10T_HD_CAPS; 756 break; 757 } 758 759 /* flow control fixed to enable all */ 760 mii_autoneg_adv_reg |= (MII_AR_ASM_DIR | MII_AR_PAUSE); 761 762 hw->mii_autoneg_adv_reg = mii_autoneg_adv_reg; 763 hw->mii_1000t_ctrl_reg = mii_1000t_ctrl_reg; 764 765 ret_val = atl1_write_phy_reg(hw, MII_ADVERTISE, mii_autoneg_adv_reg); 766 if (ret_val) 767 return ret_val; 768 769 ret_val = atl1_write_phy_reg(hw, MII_ATLX_CR, mii_1000t_ctrl_reg); 770 if (ret_val) 771 return ret_val; 772 773 return 0; 774} 775 776/* 777 * Configures link settings. 778 * hw - Struct containing variables accessed by shared code 779 * Assumes the hardware has previously been reset and the 780 * transmitter and receiver are not enabled. 781 */ 782static s32 atl1_setup_link(struct atl1_hw *hw) 783{ 784 struct pci_dev *pdev = hw->back->pdev; 785 struct atl1_adapter *adapter = hw->back; 786 s32 ret_val; 787 788 /* 789 * Options: 790 * PHY will advertise value(s) parsed from 791 * autoneg_advertised and fc 792 * no matter what autoneg is , We will not wait link result. 793 */ 794 ret_val = atl1_phy_setup_autoneg_adv(hw); 795 if (ret_val) { 796 if (netif_msg_link(adapter)) 797 dev_dbg(&pdev->dev, 798 "error setting up autonegotiation\n"); 799 return ret_val; 800 } 801 /* SW.Reset , En-Auto-Neg if needed */ 802 ret_val = atl1_phy_reset(hw); 803 if (ret_val) { 804 if (netif_msg_link(adapter)) 805 dev_dbg(&pdev->dev, "error resetting phy\n"); 806 return ret_val; 807 } 808 hw->phy_configured = true; 809 return ret_val; 810} 811 812static void atl1_init_flash_opcode(struct atl1_hw *hw) 813{ 814 if (hw->flash_vendor >= ARRAY_SIZE(flash_table)) 815 /* Atmel */ 816 hw->flash_vendor = 0; 817 818 /* Init OP table */ 819 iowrite8(flash_table[hw->flash_vendor].cmd_program, 820 hw->hw_addr + REG_SPI_FLASH_OP_PROGRAM); 821 iowrite8(flash_table[hw->flash_vendor].cmd_sector_erase, 822 hw->hw_addr + REG_SPI_FLASH_OP_SC_ERASE); 823 iowrite8(flash_table[hw->flash_vendor].cmd_chip_erase, 824 hw->hw_addr + REG_SPI_FLASH_OP_CHIP_ERASE); 825 iowrite8(flash_table[hw->flash_vendor].cmd_rdid, 826 hw->hw_addr + REG_SPI_FLASH_OP_RDID); 827 iowrite8(flash_table[hw->flash_vendor].cmd_wren, 828 hw->hw_addr + REG_SPI_FLASH_OP_WREN); 829 iowrite8(flash_table[hw->flash_vendor].cmd_rdsr, 830 hw->hw_addr + REG_SPI_FLASH_OP_RDSR); 831 iowrite8(flash_table[hw->flash_vendor].cmd_wrsr, 832 hw->hw_addr + REG_SPI_FLASH_OP_WRSR); 833 iowrite8(flash_table[hw->flash_vendor].cmd_read, 834 hw->hw_addr + REG_SPI_FLASH_OP_READ); 835} 836 837/* 838 * Performs basic configuration of the adapter. 839 * hw - Struct containing variables accessed by shared code 840 * Assumes that the controller has previously been reset and is in a 841 * post-reset uninitialized state. Initializes multicast table, 842 * and Calls routines to setup link 843 * Leaves the transmit and receive units disabled and uninitialized. 844 */ 845static s32 atl1_init_hw(struct atl1_hw *hw) 846{ 847 u32 ret_val = 0; 848 849 /* Zero out the Multicast HASH table */ 850 iowrite32(0, hw->hw_addr + REG_RX_HASH_TABLE); 851 /* clear the old settings from the multicast hash table */ 852 iowrite32(0, (hw->hw_addr + REG_RX_HASH_TABLE) + (1 << 2)); 853 854 atl1_init_flash_opcode(hw); 855 856 if (!hw->phy_configured) { 857 /* enable GPHY LinkChange Interrrupt */ 858 ret_val = atl1_write_phy_reg(hw, 18, 0xC00); 859 if (ret_val) 860 return ret_val; 861 /* make PHY out of power-saving state */ 862 ret_val = atl1_phy_leave_power_saving(hw); 863 if (ret_val) 864 return ret_val; 865 /* Call a subroutine to configure the link */ 866 ret_val = atl1_setup_link(hw); 867 } 868 return ret_val; 869} 870 871/* 872 * Detects the current speed and duplex settings of the hardware. 873 * hw - Struct containing variables accessed by shared code 874 * speed - Speed of the connection 875 * duplex - Duplex setting of the connection 876 */ 877static s32 atl1_get_speed_and_duplex(struct atl1_hw *hw, u16 *speed, u16 *duplex) 878{ 879 struct pci_dev *pdev = hw->back->pdev; 880 struct atl1_adapter *adapter = hw->back; 881 s32 ret_val; 882 u16 phy_data; 883 884 /* ; --- Read PHY Specific Status Register (17) */ 885 ret_val = atl1_read_phy_reg(hw, MII_ATLX_PSSR, &phy_data); 886 if (ret_val) 887 return ret_val; 888 889 if (!(phy_data & MII_ATLX_PSSR_SPD_DPLX_RESOLVED)) 890 return ATLX_ERR_PHY_RES; 891 892 switch (phy_data & MII_ATLX_PSSR_SPEED) { 893 case MII_ATLX_PSSR_1000MBS: 894 *speed = SPEED_1000; 895 break; 896 case MII_ATLX_PSSR_100MBS: 897 *speed = SPEED_100; 898 break; 899 case MII_ATLX_PSSR_10MBS: 900 *speed = SPEED_10; 901 break; 902 default: 903 if (netif_msg_hw(adapter)) 904 dev_dbg(&pdev->dev, "error getting speed\n"); 905 return ATLX_ERR_PHY_SPEED; 906 break; 907 } 908 if (phy_data & MII_ATLX_PSSR_DPLX) 909 *duplex = FULL_DUPLEX; 910 else 911 *duplex = HALF_DUPLEX; 912 913 return 0; 914} 915 916void atl1_set_mac_addr(struct atl1_hw *hw) 917{ 918 u32 value; 919 /* 920 * 00-0B-6A-F6-00-DC 921 * 0: 6AF600DC 1: 000B 922 * low dword 923 */ 924 value = (((u32) hw->mac_addr[2]) << 24) | 925 (((u32) hw->mac_addr[3]) << 16) | 926 (((u32) hw->mac_addr[4]) << 8) | (((u32) hw->mac_addr[5])); 927 iowrite32(value, hw->hw_addr + REG_MAC_STA_ADDR); 928 /* high dword */ 929 value = (((u32) hw->mac_addr[0]) << 8) | (((u32) hw->mac_addr[1])); 930 iowrite32(value, (hw->hw_addr + REG_MAC_STA_ADDR) + (1 << 2)); 931} 932 933/* 934 * atl1_sw_init - Initialize general software structures (struct atl1_adapter) 935 * @adapter: board private structure to initialize 936 * 937 * atl1_sw_init initializes the Adapter private data structure. 938 * Fields are initialized based on PCI device information and 939 * OS network device settings (MTU size). 940 */ 941static int __devinit atl1_sw_init(struct atl1_adapter *adapter) 942{ 943 struct atl1_hw *hw = &adapter->hw; 944 struct net_device *netdev = adapter->netdev; 945 946 hw->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; 947 hw->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; 948 949 adapter->wol = 0; 950 adapter->rx_buffer_len = (hw->max_frame_size + 7) & ~7; 951 adapter->ict = 50000; /* 100ms */ 952 adapter->link_speed = SPEED_0; /* hardware init */ 953 adapter->link_duplex = FULL_DUPLEX; 954 955 hw->phy_configured = false; 956 hw->preamble_len = 7; 957 hw->ipgt = 0x60; 958 hw->min_ifg = 0x50; 959 hw->ipgr1 = 0x40; 960 hw->ipgr2 = 0x60; 961 hw->max_retry = 0xf; 962 hw->lcol = 0x37; 963 hw->jam_ipg = 7; 964 hw->rfd_burst = 8; 965 hw->rrd_burst = 8; 966 hw->rfd_fetch_gap = 1; 967 hw->rx_jumbo_th = adapter->rx_buffer_len / 8; 968 hw->rx_jumbo_lkah = 1; 969 hw->rrd_ret_timer = 16; 970 hw->tpd_burst = 4; 971 hw->tpd_fetch_th = 16; 972 hw->txf_burst = 0x100; 973 hw->tx_jumbo_task_th = (hw->max_frame_size + 7) >> 3; 974 hw->tpd_fetch_gap = 1; 975 hw->rcb_value = atl1_rcb_64; 976 hw->dma_ord = atl1_dma_ord_enh; 977 hw->dmar_block = atl1_dma_req_256; 978 hw->dmaw_block = atl1_dma_req_256; 979 hw->cmb_rrd = 4; 980 hw->cmb_tpd = 4; 981 hw->cmb_rx_timer = 1; /* about 2us */ 982 hw->cmb_tx_timer = 1; /* about 2us */ 983 hw->smb_timer = 100000; /* about 200ms */ 984 985 spin_lock_init(&adapter->lock); 986 spin_lock_init(&adapter->mb_lock); 987 988 return 0; 989} 990 991static int mdio_read(struct net_device *netdev, int phy_id, int reg_num) 992{ 993 struct atl1_adapter *adapter = netdev_priv(netdev); 994 u16 result; 995 996 atl1_read_phy_reg(&adapter->hw, reg_num & 0x1f, &result); 997 998 return result; 999} 1000 1001static void mdio_write(struct net_device *netdev, int phy_id, int reg_num, 1002 int val) 1003{ 1004 struct atl1_adapter *adapter = netdev_priv(netdev); 1005 1006 atl1_write_phy_reg(&adapter->hw, reg_num, val); 1007} 1008 1009/* 1010 * atl1_mii_ioctl - 1011 * @netdev: 1012 * @ifreq: 1013 * @cmd: 1014 */ 1015static int atl1_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) 1016{ 1017 struct atl1_adapter *adapter = netdev_priv(netdev); 1018 unsigned long flags; 1019 int retval; 1020 1021 if (!netif_running(netdev)) 1022 return -EINVAL; 1023 1024 spin_lock_irqsave(&adapter->lock, flags); 1025 retval = generic_mii_ioctl(&adapter->mii, if_mii(ifr), cmd, NULL); 1026 spin_unlock_irqrestore(&adapter->lock, flags); 1027 1028 return retval; 1029} 1030 1031/* 1032 * atl1_setup_mem_resources - allocate Tx / RX descriptor resources 1033 * @adapter: board private structure 1034 * 1035 * Return 0 on success, negative on failure 1036 */ 1037static s32 atl1_setup_ring_resources(struct atl1_adapter *adapter) 1038{ 1039 struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; 1040 struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring; 1041 struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring; 1042 struct atl1_ring_header *ring_header = &adapter->ring_header; 1043 struct pci_dev *pdev = adapter->pdev; 1044 int size; 1045 u8 offset = 0; 1046 1047 size = sizeof(struct atl1_buffer) * (tpd_ring->count + rfd_ring->count); 1048 tpd_ring->buffer_info = kzalloc(size, GFP_KERNEL); 1049 if (unlikely(!tpd_ring->buffer_info)) { 1050 if (netif_msg_drv(adapter)) 1051 dev_err(&pdev->dev, "kzalloc failed , size = D%d\n", 1052 size); 1053 goto err_nomem; 1054 } 1055 rfd_ring->buffer_info = 1056 (struct atl1_buffer *)(tpd_ring->buffer_info + tpd_ring->count); 1057 1058 /* 1059 * real ring DMA buffer 1060 * each ring/block may need up to 8 bytes for alignment, hence the 1061 * additional 40 bytes tacked onto the end. 1062 */ 1063 ring_header->size = size = 1064 sizeof(struct tx_packet_desc) * tpd_ring->count 1065 + sizeof(struct rx_free_desc) * rfd_ring->count 1066 + sizeof(struct rx_return_desc) * rrd_ring->count 1067 + sizeof(struct coals_msg_block) 1068 + sizeof(struct stats_msg_block) 1069 + 40; 1070 1071 ring_header->desc = pci_alloc_consistent(pdev, ring_header->size, 1072 &ring_header->dma); 1073 if (unlikely(!ring_header->desc)) { 1074 if (netif_msg_drv(adapter)) 1075 dev_err(&pdev->dev, "pci_alloc_consistent failed\n"); 1076 goto err_nomem; 1077 } 1078 1079 memset(ring_header->desc, 0, ring_header->size); 1080 1081 /* init TPD ring */ 1082 tpd_ring->dma = ring_header->dma; 1083 offset = (tpd_ring->dma & 0x7) ? (8 - (ring_header->dma & 0x7)) : 0; 1084 tpd_ring->dma += offset; 1085 tpd_ring->desc = (u8 *) ring_header->desc + offset; 1086 tpd_ring->size = sizeof(struct tx_packet_desc) * tpd_ring->count; 1087 1088 /* init RFD ring */ 1089 rfd_ring->dma = tpd_ring->dma + tpd_ring->size; 1090 offset = (rfd_ring->dma & 0x7) ? (8 - (rfd_ring->dma & 0x7)) : 0; 1091 rfd_ring->dma += offset; 1092 rfd_ring->desc = (u8 *) tpd_ring->desc + (tpd_ring->size + offset); 1093 rfd_ring->size = sizeof(struct rx_free_desc) * rfd_ring->count; 1094 1095 1096 /* init RRD ring */ 1097 rrd_ring->dma = rfd_ring->dma + rfd_ring->size; 1098 offset = (rrd_ring->dma & 0x7) ? (8 - (rrd_ring->dma & 0x7)) : 0; 1099 rrd_ring->dma += offset; 1100 rrd_ring->desc = (u8 *) rfd_ring->desc + (rfd_ring->size + offset); 1101 rrd_ring->size = sizeof(struct rx_return_desc) * rrd_ring->count; 1102 1103 1104 /* init CMB */ 1105 adapter->cmb.dma = rrd_ring->dma + rrd_ring->size; 1106 offset = (adapter->cmb.dma & 0x7) ? (8 - (adapter->cmb.dma & 0x7)) : 0; 1107 adapter->cmb.dma += offset; 1108 adapter->cmb.cmb = (struct coals_msg_block *) 1109 ((u8 *) rrd_ring->desc + (rrd_ring->size + offset)); 1110 1111 /* init SMB */ 1112 adapter->smb.dma = adapter->cmb.dma + sizeof(struct coals_msg_block); 1113 offset = (adapter->smb.dma & 0x7) ? (8 - (adapter->smb.dma & 0x7)) : 0; 1114 adapter->smb.dma += offset; 1115 adapter->smb.smb = (struct stats_msg_block *) 1116 ((u8 *) adapter->cmb.cmb + 1117 (sizeof(struct coals_msg_block) + offset)); 1118 1119 return 0; 1120 1121err_nomem: 1122 kfree(tpd_ring->buffer_info); 1123 return -ENOMEM; 1124} 1125 1126static void atl1_init_ring_ptrs(struct atl1_adapter *adapter) 1127{ 1128 struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; 1129 struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring; 1130 struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring; 1131 1132 atomic_set(&tpd_ring->next_to_use, 0); 1133 atomic_set(&tpd_ring->next_to_clean, 0); 1134 1135 rfd_ring->next_to_clean = 0; 1136 atomic_set(&rfd_ring->next_to_use, 0); 1137 1138 rrd_ring->next_to_use = 0; 1139 atomic_set(&rrd_ring->next_to_clean, 0); 1140} 1141 1142/* 1143 * atl1_clean_rx_ring - Free RFD Buffers 1144 * @adapter: board private structure 1145 */ 1146static void atl1_clean_rx_ring(struct atl1_adapter *adapter) 1147{ 1148 struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring; 1149 struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring; 1150 struct atl1_buffer *buffer_info; 1151 struct pci_dev *pdev = adapter->pdev; 1152 unsigned long size; 1153 unsigned int i; 1154 1155 /* Free all the Rx ring sk_buffs */ 1156 for (i = 0; i < rfd_ring->count; i++) { 1157 buffer_info = &rfd_ring->buffer_info[i]; 1158 if (buffer_info->dma) { 1159 pci_unmap_page(pdev, buffer_info->dma, 1160 buffer_info->length, PCI_DMA_FROMDEVICE); 1161 buffer_info->dma = 0; 1162 } 1163 if (buffer_info->skb) { 1164 dev_kfree_skb(buffer_info->skb); 1165 buffer_info->skb = NULL; 1166 } 1167 } 1168 1169 size = sizeof(struct atl1_buffer) * rfd_ring->count; 1170 memset(rfd_ring->buffer_info, 0, size); 1171 1172 /* Zero out the descriptor ring */ 1173 memset(rfd_ring->desc, 0, rfd_ring->size); 1174 1175 rfd_ring->next_to_clean = 0; 1176 atomic_set(&rfd_ring->next_to_use, 0); 1177 1178 rrd_ring->next_to_use = 0; 1179 atomic_set(&rrd_ring->next_to_clean, 0); 1180} 1181 1182/* 1183 * atl1_clean_tx_ring - Free Tx Buffers 1184 * @adapter: board private structure 1185 */ 1186static void atl1_clean_tx_ring(struct atl1_adapter *adapter) 1187{ 1188 struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; 1189 struct atl1_buffer *buffer_info; 1190 struct pci_dev *pdev = adapter->pdev; 1191 unsigned long size; 1192 unsigned int i; 1193 1194 /* Free all the Tx ring sk_buffs */ 1195 for (i = 0; i < tpd_ring->count; i++) { 1196 buffer_info = &tpd_ring->buffer_info[i]; 1197 if (buffer_info->dma) { 1198 pci_unmap_page(pdev, buffer_info->dma, 1199 buffer_info->length, PCI_DMA_TODEVICE); 1200 buffer_info->dma = 0; 1201 } 1202 } 1203 1204 for (i = 0; i < tpd_ring->count; i++) { 1205 buffer_info = &tpd_ring->buffer_info[i]; 1206 if (buffer_info->skb) { 1207 dev_kfree_skb_any(buffer_info->skb); 1208 buffer_info->skb = NULL; 1209 } 1210 } 1211 1212 size = sizeof(struct atl1_buffer) * tpd_ring->count; 1213 memset(tpd_ring->buffer_info, 0, size); 1214 1215 /* Zero out the descriptor ring */ 1216 memset(tpd_ring->desc, 0, tpd_ring->size); 1217 1218 atomic_set(&tpd_ring->next_to_use, 0); 1219 atomic_set(&tpd_ring->next_to_clean, 0); 1220} 1221 1222/* 1223 * atl1_free_ring_resources - Free Tx / RX descriptor Resources 1224 * @adapter: board private structure 1225 * 1226 * Free all transmit software resources 1227 */ 1228static void atl1_free_ring_resources(struct atl1_adapter *adapter) 1229{ 1230 struct pci_dev *pdev = adapter->pdev; 1231 struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; 1232 struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring; 1233 struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring; 1234 struct atl1_ring_header *ring_header = &adapter->ring_header; 1235 1236 atl1_clean_tx_ring(adapter); 1237 atl1_clean_rx_ring(adapter); 1238 1239 kfree(tpd_ring->buffer_info); 1240 pci_free_consistent(pdev, ring_header->size, ring_header->desc, 1241 ring_header->dma); 1242 1243 tpd_ring->buffer_info = NULL; 1244 tpd_ring->desc = NULL; 1245 tpd_ring->dma = 0; 1246 1247 rfd_ring->buffer_info = NULL; 1248 rfd_ring->desc = NULL; 1249 rfd_ring->dma = 0; 1250 1251 rrd_ring->desc = NULL; 1252 rrd_ring->dma = 0; 1253 1254 adapter->cmb.dma = 0; 1255 adapter->cmb.cmb = NULL; 1256 1257 adapter->smb.dma = 0; 1258 adapter->smb.smb = NULL; 1259} 1260 1261static void atl1_setup_mac_ctrl(struct atl1_adapter *adapter) 1262{ 1263 u32 value; 1264 struct atl1_hw *hw = &adapter->hw; 1265 struct net_device *netdev = adapter->netdev; 1266 /* Config MAC CTRL Register */ 1267 value = MAC_CTRL_TX_EN | MAC_CTRL_RX_EN; 1268 /* duplex */ 1269 if (FULL_DUPLEX == adapter->link_duplex) 1270 value |= MAC_CTRL_DUPLX; 1271 /* speed */ 1272 value |= ((u32) ((SPEED_1000 == adapter->link_speed) ? 1273 MAC_CTRL_SPEED_1000 : MAC_CTRL_SPEED_10_100) << 1274 MAC_CTRL_SPEED_SHIFT); 1275 /* flow control */ 1276 value |= (MAC_CTRL_TX_FLOW | MAC_CTRL_RX_FLOW); 1277 /* PAD & CRC */ 1278 value |= (MAC_CTRL_ADD_CRC | MAC_CTRL_PAD); 1279 /* preamble length */ 1280 value |= (((u32) adapter->hw.preamble_len 1281 & MAC_CTRL_PRMLEN_MASK) << MAC_CTRL_PRMLEN_SHIFT); 1282 /* vlan */ 1283 if (adapter->vlgrp) 1284 value |= MAC_CTRL_RMV_VLAN; 1285 /* rx checksum 1286 if (adapter->rx_csum) 1287 value |= MAC_CTRL_RX_CHKSUM_EN; 1288 */ 1289 /* filter mode */ 1290 value |= MAC_CTRL_BC_EN; 1291 if (netdev->flags & IFF_PROMISC) 1292 value |= MAC_CTRL_PROMIS_EN; 1293 else if (netdev->flags & IFF_ALLMULTI) 1294 value |= MAC_CTRL_MC_ALL_EN; 1295 /* value |= MAC_CTRL_LOOPBACK; */ 1296 iowrite32(value, hw->hw_addr + REG_MAC_CTRL); 1297} 1298 1299static u32 atl1_check_link(struct atl1_adapter *adapter) 1300{ 1301 struct atl1_hw *hw = &adapter->hw; 1302 struct net_device *netdev = adapter->netdev; 1303 u32 ret_val; 1304 u16 speed, duplex, phy_data; 1305 int reconfig = 0; 1306 1307 /* MII_BMSR must read twice */ 1308 atl1_read_phy_reg(hw, MII_BMSR, &phy_data); 1309 atl1_read_phy_reg(hw, MII_BMSR, &phy_data); 1310 if (!(phy_data & BMSR_LSTATUS)) { 1311 /* link down */ 1312 if (netif_carrier_ok(netdev)) { 1313 /* old link state: Up */ 1314 if (netif_msg_link(adapter)) 1315 dev_info(&adapter->pdev->dev, "link is down\n"); 1316 adapter->link_speed = SPEED_0; 1317 netif_carrier_off(netdev); 1318 } 1319 return 0; 1320 } 1321 1322 /* Link Up */ 1323 ret_val = atl1_get_speed_and_duplex(hw, &speed, &duplex); 1324 if (ret_val) 1325 return ret_val; 1326 1327 switch (hw->media_type) { 1328 case MEDIA_TYPE_1000M_FULL: 1329 if (speed != SPEED_1000 || duplex != FULL_DUPLEX) 1330 reconfig = 1; 1331 break; 1332 case MEDIA_TYPE_100M_FULL: 1333 if (speed != SPEED_100 || duplex != FULL_DUPLEX) 1334 reconfig = 1; 1335 break; 1336 case MEDIA_TYPE_100M_HALF: 1337 if (speed != SPEED_100 || duplex != HALF_DUPLEX) 1338 reconfig = 1; 1339 break; 1340 case MEDIA_TYPE_10M_FULL: 1341 if (speed != SPEED_10 || duplex != FULL_DUPLEX) 1342 reconfig = 1; 1343 break; 1344 case MEDIA_TYPE_10M_HALF: 1345 if (speed != SPEED_10 || duplex != HALF_DUPLEX) 1346 reconfig = 1; 1347 break; 1348 } 1349 1350 /* link result is our setting */ 1351 if (!reconfig) { 1352 if (adapter->link_speed != speed || 1353 adapter->link_duplex != duplex) { 1354 adapter->link_speed = speed; 1355 adapter->link_duplex = duplex; 1356 atl1_setup_mac_ctrl(adapter); 1357 if (netif_msg_link(adapter)) 1358 dev_info(&adapter->pdev->dev, 1359 "%s link is up %d Mbps %s\n", 1360 netdev->name, adapter->link_speed, 1361 adapter->link_duplex == FULL_DUPLEX ? 1362 "full duplex" : "half duplex"); 1363 } 1364 if (!netif_carrier_ok(netdev)) { 1365 /* Link down -> Up */ 1366 netif_carrier_on(netdev); 1367 } 1368 return 0; 1369 } 1370 1371 /* change original link status */ 1372 if (netif_carrier_ok(netdev)) { 1373 adapter->link_speed = SPEED_0; 1374 netif_carrier_off(netdev); 1375 netif_stop_queue(netdev); 1376 } 1377 1378 if (hw->media_type != MEDIA_TYPE_AUTO_SENSOR && 1379 hw->media_type != MEDIA_TYPE_1000M_FULL) { 1380 switch (hw->media_type) { 1381 case MEDIA_TYPE_100M_FULL: 1382 phy_data = MII_CR_FULL_DUPLEX | MII_CR_SPEED_100 | 1383 MII_CR_RESET; 1384 break; 1385 case MEDIA_TYPE_100M_HALF: 1386 phy_data = MII_CR_SPEED_100 | MII_CR_RESET; 1387 break; 1388 case MEDIA_TYPE_10M_FULL: 1389 phy_data = 1390 MII_CR_FULL_DUPLEX | MII_CR_SPEED_10 | MII_CR_RESET; 1391 break; 1392 default: 1393 /* MEDIA_TYPE_10M_HALF: */ 1394 phy_data = MII_CR_SPEED_10 | MII_CR_RESET; 1395 break; 1396 } 1397 atl1_write_phy_reg(hw, MII_BMCR, phy_data); 1398 return 0; 1399 } 1400 1401 /* auto-neg, insert timer to re-config phy */ 1402 if (!adapter->phy_timer_pending) { 1403 adapter->phy_timer_pending = true; 1404 mod_timer(&adapter->phy_config_timer, 1405 round_jiffies(jiffies + 3 * HZ)); 1406 } 1407 1408 return 0; 1409} 1410 1411static void set_flow_ctrl_old(struct atl1_adapter *adapter) 1412{ 1413 u32 hi, lo, value; 1414 1415 /* RFD Flow Control */ 1416 value = adapter->rfd_ring.count; 1417 hi = value / 16; 1418 if (hi < 2) 1419 hi = 2; 1420 lo = value * 7 / 8; 1421 1422 value = ((hi & RXQ_RXF_PAUSE_TH_HI_MASK) << RXQ_RXF_PAUSE_TH_HI_SHIFT) | 1423 ((lo & RXQ_RXF_PAUSE_TH_LO_MASK) << RXQ_RXF_PAUSE_TH_LO_SHIFT); 1424 iowrite32(value, adapter->hw.hw_addr + REG_RXQ_RXF_PAUSE_THRESH); 1425 1426 /* RRD Flow Control */ 1427 value = adapter->rrd_ring.count; 1428 lo = value / 16; 1429 hi = value * 7 / 8; 1430 if (lo < 2) 1431 lo = 2; 1432 value = ((hi & RXQ_RRD_PAUSE_TH_HI_MASK) << RXQ_RRD_PAUSE_TH_HI_SHIFT) | 1433 ((lo & RXQ_RRD_PAUSE_TH_LO_MASK) << RXQ_RRD_PAUSE_TH_LO_SHIFT); 1434 iowrite32(value, adapter->hw.hw_addr + REG_RXQ_RRD_PAUSE_THRESH); 1435} 1436 1437static void set_flow_ctrl_new(struct atl1_hw *hw) 1438{ 1439 u32 hi, lo, value; 1440 1441 /* RXF Flow Control */ 1442 value = ioread32(hw->hw_addr + REG_SRAM_RXF_LEN); 1443 lo = value / 16; 1444 if (lo < 192) 1445 lo = 192; 1446 hi = value * 7 / 8; 1447 if (hi < lo) 1448 hi = lo + 16; 1449 value = ((hi & RXQ_RXF_PAUSE_TH_HI_MASK) << RXQ_RXF_PAUSE_TH_HI_SHIFT) | 1450 ((lo & RXQ_RXF_PAUSE_TH_LO_MASK) << RXQ_RXF_PAUSE_TH_LO_SHIFT); 1451 iowrite32(value, hw->hw_addr + REG_RXQ_RXF_PAUSE_THRESH); 1452 1453 /* RRD Flow Control */ 1454 value = ioread32(hw->hw_addr + REG_SRAM_RRD_LEN); 1455 lo = value / 8; 1456 hi = value * 7 / 8; 1457 if (lo < 2) 1458 lo = 2; 1459 if (hi < lo) 1460 hi = lo + 3; 1461 value = ((hi & RXQ_RRD_PAUSE_TH_HI_MASK) << RXQ_RRD_PAUSE_TH_HI_SHIFT) | 1462 ((lo & RXQ_RRD_PAUSE_TH_LO_MASK) << RXQ_RRD_PAUSE_TH_LO_SHIFT); 1463 iowrite32(value, hw->hw_addr + REG_RXQ_RRD_PAUSE_THRESH); 1464} 1465 1466/* 1467 * atl1_configure - Configure Transmit&Receive Unit after Reset 1468 * @adapter: board private structure 1469 * 1470 * Configure the Tx /Rx unit of the MAC after a reset. 1471 */ 1472static u32 atl1_configure(struct atl1_adapter *adapter) 1473{ 1474 struct atl1_hw *hw = &adapter->hw; 1475 u32 value; 1476 1477 /* clear interrupt status */ 1478 iowrite32(0xffffffff, adapter->hw.hw_addr + REG_ISR); 1479 1480 /* set MAC Address */ 1481 value = (((u32) hw->mac_addr[2]) << 24) | 1482 (((u32) hw->mac_addr[3]) << 16) | 1483 (((u32) hw->mac_addr[4]) << 8) | 1484 (((u32) hw->mac_addr[5])); 1485 iowrite32(value, hw->hw_addr + REG_MAC_STA_ADDR); 1486 value = (((u32) hw->mac_addr[0]) << 8) | (((u32) hw->mac_addr[1])); 1487 iowrite32(value, hw->hw_addr + (REG_MAC_STA_ADDR + 4)); 1488 1489 /* tx / rx ring */ 1490 1491 /* HI base address */ 1492 iowrite32((u32) ((adapter->tpd_ring.dma & 0xffffffff00000000ULL) >> 32), 1493 hw->hw_addr + REG_DESC_BASE_ADDR_HI); 1494 /* LO base address */ 1495 iowrite32((u32) (adapter->rfd_ring.dma & 0x00000000ffffffffULL), 1496 hw->hw_addr + REG_DESC_RFD_ADDR_LO); 1497 iowrite32((u32) (adapter->rrd_ring.dma & 0x00000000ffffffffULL), 1498 hw->hw_addr + REG_DESC_RRD_ADDR_LO); 1499 iowrite32((u32) (adapter->tpd_ring.dma & 0x00000000ffffffffULL), 1500 hw->hw_addr + REG_DESC_TPD_ADDR_LO); 1501 iowrite32((u32) (adapter->cmb.dma & 0x00000000ffffffffULL), 1502 hw->hw_addr + REG_DESC_CMB_ADDR_LO); 1503 iowrite32((u32) (adapter->smb.dma & 0x00000000ffffffffULL), 1504 hw->hw_addr + REG_DESC_SMB_ADDR_LO); 1505 1506 /* element count */ 1507 value = adapter->rrd_ring.count; 1508 value <<= 16; 1509 value += adapter->rfd_ring.count; 1510 iowrite32(value, hw->hw_addr + REG_DESC_RFD_RRD_RING_SIZE); 1511 iowrite32(adapter->tpd_ring.count, hw->hw_addr + 1512 REG_DESC_TPD_RING_SIZE); 1513 1514 /* Load Ptr */ 1515 iowrite32(1, hw->hw_addr + REG_LOAD_PTR); 1516 1517 /* config Mailbox */ 1518 value = ((atomic_read(&adapter->tpd_ring.next_to_use) 1519 & MB_TPD_PROD_INDX_MASK) << MB_TPD_PROD_INDX_SHIFT) | 1520 ((atomic_read(&adapter->rrd_ring.next_to_clean) 1521 & MB_RRD_CONS_INDX_MASK) << MB_RRD_CONS_INDX_SHIFT) | 1522 ((atomic_read(&adapter->rfd_ring.next_to_use) 1523 & MB_RFD_PROD_INDX_MASK) << MB_RFD_PROD_INDX_SHIFT); 1524 iowrite32(value, hw->hw_addr + REG_MAILBOX); 1525 1526 /* config IPG/IFG */ 1527 value = (((u32) hw->ipgt & MAC_IPG_IFG_IPGT_MASK) 1528 << MAC_IPG_IFG_IPGT_SHIFT) | 1529 (((u32) hw->min_ifg & MAC_IPG_IFG_MIFG_MASK) 1530 << MAC_IPG_IFG_MIFG_SHIFT) | 1531 (((u32) hw->ipgr1 & MAC_IPG_IFG_IPGR1_MASK) 1532 << MAC_IPG_IFG_IPGR1_SHIFT) | 1533 (((u32) hw->ipgr2 & MAC_IPG_IFG_IPGR2_MASK) 1534 << MAC_IPG_IFG_IPGR2_SHIFT); 1535 iowrite32(value, hw->hw_addr + REG_MAC_IPG_IFG); 1536 1537 /* config Half-Duplex Control */ 1538 value = ((u32) hw->lcol & MAC_HALF_DUPLX_CTRL_LCOL_MASK) | 1539 (((u32) hw->max_retry & MAC_HALF_DUPLX_CTRL_RETRY_MASK) 1540 << MAC_HALF_DUPLX_CTRL_RETRY_SHIFT) | 1541 MAC_HALF_DUPLX_CTRL_EXC_DEF_EN | 1542 (0xa << MAC_HALF_DUPLX_CTRL_ABEBT_SHIFT) | 1543 (((u32) hw->jam_ipg & MAC_HALF_DUPLX_CTRL_JAMIPG_MASK) 1544 << MAC_HALF_DUPLX_CTRL_JAMIPG_SHIFT); 1545 iowrite32(value, hw->hw_addr + REG_MAC_HALF_DUPLX_CTRL); 1546 1547 /* set Interrupt Moderator Timer */ 1548 iowrite16(adapter->imt, hw->hw_addr + REG_IRQ_MODU_TIMER_INIT); 1549 iowrite32(MASTER_CTRL_ITIMER_EN, hw->hw_addr + REG_MASTER_CTRL); 1550 1551 /* set Interrupt Clear Timer */ 1552 iowrite16(adapter->ict, hw->hw_addr + REG_CMBDISDMA_TIMER); 1553 1554 /* set max frame size hw will accept */ 1555 iowrite32(hw->max_frame_size, hw->hw_addr + REG_MTU); 1556 1557 /* jumbo size & rrd retirement timer */ 1558 value = (((u32) hw->rx_jumbo_th & RXQ_JMBOSZ_TH_MASK) 1559 << RXQ_JMBOSZ_TH_SHIFT) | 1560 (((u32) hw->rx_jumbo_lkah & RXQ_JMBO_LKAH_MASK) 1561 << RXQ_JMBO_LKAH_SHIFT) | 1562 (((u32) hw->rrd_ret_timer & RXQ_RRD_TIMER_MASK) 1563 << RXQ_RRD_TIMER_SHIFT); 1564 iowrite32(value, hw->hw_addr + REG_RXQ_JMBOSZ_RRDTIM); 1565 1566 /* Flow Control */ 1567 switch (hw->dev_rev) { 1568 case 0x8001: 1569 case 0x9001: 1570 case 0x9002: 1571 case 0x9003: 1572 set_flow_ctrl_old(adapter); 1573 break; 1574 default: 1575 set_flow_ctrl_new(hw); 1576 break; 1577 } 1578 1579 /* config TXQ */ 1580 value = (((u32) hw->tpd_burst & TXQ_CTRL_TPD_BURST_NUM_MASK) 1581 << TXQ_CTRL_TPD_BURST_NUM_SHIFT) | 1582 (((u32) hw->txf_burst & TXQ_CTRL_TXF_BURST_NUM_MASK) 1583 << TXQ_CTRL_TXF_BURST_NUM_SHIFT) | 1584 (((u32) hw->tpd_fetch_th & TXQ_CTRL_TPD_FETCH_TH_MASK) 1585 << TXQ_CTRL_TPD_FETCH_TH_SHIFT) | TXQ_CTRL_ENH_MODE | 1586 TXQ_CTRL_EN; 1587 iowrite32(value, hw->hw_addr + REG_TXQ_CTRL); 1588 1589 /* min tpd fetch gap & tx jumbo packet size threshold for taskoffload */ 1590 value = (((u32) hw->tx_jumbo_task_th & TX_JUMBO_TASK_TH_MASK) 1591 << TX_JUMBO_TASK_TH_SHIFT) | 1592 (((u32) hw->tpd_fetch_gap & TX_TPD_MIN_IPG_MASK) 1593 << TX_TPD_MIN_IPG_SHIFT); 1594 iowrite32(value, hw->hw_addr + REG_TX_JUMBO_TASK_TH_TPD_IPG); 1595 1596 /* config RXQ */ 1597 value = (((u32) hw->rfd_burst & RXQ_CTRL_RFD_BURST_NUM_MASK) 1598 << RXQ_CTRL_RFD_BURST_NUM_SHIFT) | 1599 (((u32) hw->rrd_burst & RXQ_CTRL_RRD_BURST_THRESH_MASK) 1600 << RXQ_CTRL_RRD_BURST_THRESH_SHIFT) | 1601 (((u32) hw->rfd_fetch_gap & RXQ_CTRL_RFD_PREF_MIN_IPG_MASK) 1602 << RXQ_CTRL_RFD_PREF_MIN_IPG_SHIFT) | RXQ_CTRL_CUT_THRU_EN | 1603 RXQ_CTRL_EN; 1604 iowrite32(value, hw->hw_addr + REG_RXQ_CTRL); 1605 1606 /* config DMA Engine */ 1607 value = ((((u32) hw->dmar_block) & DMA_CTRL_DMAR_BURST_LEN_MASK) 1608 << DMA_CTRL_DMAR_BURST_LEN_SHIFT) | 1609 ((((u32) hw->dmaw_block) & DMA_CTRL_DMAW_BURST_LEN_MASK) 1610 << DMA_CTRL_DMAW_BURST_LEN_SHIFT) | DMA_CTRL_DMAR_EN | 1611 DMA_CTRL_DMAW_EN; 1612 value |= (u32) hw->dma_ord; 1613 if (atl1_rcb_128 == hw->rcb_value) 1614 value |= DMA_CTRL_RCB_VALUE; 1615 iowrite32(value, hw->hw_addr + REG_DMA_CTRL); 1616 1617 /* config CMB / SMB */ 1618 value = (hw->cmb_tpd > adapter->tpd_ring.count) ? 1619 hw->cmb_tpd : adapter->tpd_ring.count; 1620 value <<= 16; 1621 value |= hw->cmb_rrd; 1622 iowrite32(value, hw->hw_addr + REG_CMB_WRITE_TH); 1623 value = hw->cmb_rx_timer | ((u32) hw->cmb_tx_timer << 16); 1624 iowrite32(value, hw->hw_addr + REG_CMB_WRITE_TIMER); 1625 iowrite32(hw->smb_timer, hw->hw_addr + REG_SMB_TIMER); 1626 1627 /* --- enable CMB / SMB */ 1628 value = CSMB_CTRL_CMB_EN | CSMB_CTRL_SMB_EN; 1629 iowrite32(value, hw->hw_addr + REG_CSMB_CTRL); 1630 1631 value = ioread32(adapter->hw.hw_addr + REG_ISR); 1632 if (unlikely((value & ISR_PHY_LINKDOWN) != 0)) 1633 value = 1; /* config failed */ 1634 else 1635 value = 0; 1636 1637 /* clear all interrupt status */ 1638 iowrite32(0x3fffffff, adapter->hw.hw_addr + REG_ISR); 1639 iowrite32(0, adapter->hw.hw_addr + REG_ISR); 1640 return value; 1641} 1642 1643/* 1644 * atl1_pcie_patch - Patch for PCIE module 1645 */ 1646static void atl1_pcie_patch(struct atl1_adapter *adapter) 1647{ 1648 u32 value; 1649 1650 /* much vendor magic here */ 1651 value = 0x6500; 1652 iowrite32(value, adapter->hw.hw_addr + 0x12FC); 1653 /* pcie flow control mode change */ 1654 value = ioread32(adapter->hw.hw_addr + 0x1008); 1655 value |= 0x8000; 1656 iowrite32(value, adapter->hw.hw_addr + 0x1008); 1657} 1658 1659/* 1660 * When ACPI resume on some VIA MotherBoard, the Interrupt Disable bit/0x400 1661 * on PCI Command register is disable. 1662 * The function enable this bit. 1663 * Brackett, 2006/03/15 1664 */ 1665static void atl1_via_workaround(struct atl1_adapter *adapter) 1666{ 1667 unsigned long value; 1668 1669 value = ioread16(adapter->hw.hw_addr + PCI_COMMAND); 1670 if (value & PCI_COMMAND_INTX_DISABLE) 1671 value &= ~PCI_COMMAND_INTX_DISABLE; 1672 iowrite32(value, adapter->hw.hw_addr + PCI_COMMAND); 1673} 1674 1675static void atl1_inc_smb(struct atl1_adapter *adapter) 1676{ 1677 struct net_device *netdev = adapter->netdev; 1678 struct stats_msg_block *smb = adapter->smb.smb; 1679 1680 /* Fill out the OS statistics structure */ 1681 adapter->soft_stats.rx_packets += smb->rx_ok; 1682 adapter->soft_stats.tx_packets += smb->tx_ok; 1683 adapter->soft_stats.rx_bytes += smb->rx_byte_cnt; 1684 adapter->soft_stats.tx_bytes += smb->tx_byte_cnt; 1685 adapter->soft_stats.multicast += smb->rx_mcast; 1686 adapter->soft_stats.collisions += (smb->tx_1_col + smb->tx_2_col * 2 + 1687 smb->tx_late_col + smb->tx_abort_col * adapter->hw.max_retry); 1688 1689 /* Rx Errors */ 1690 adapter->soft_stats.rx_errors += (smb->rx_frag + smb->rx_fcs_err + 1691 smb->rx_len_err + smb->rx_sz_ov + smb->rx_rxf_ov + 1692 smb->rx_rrd_ov + smb->rx_align_err); 1693 adapter->soft_stats.rx_fifo_errors += smb->rx_rxf_ov; 1694 adapter->soft_stats.rx_length_errors += smb->rx_len_err; 1695 adapter->soft_stats.rx_crc_errors += smb->rx_fcs_err; 1696 adapter->soft_stats.rx_frame_errors += smb->rx_align_err; 1697 adapter->soft_stats.rx_missed_errors += (smb->rx_rrd_ov + 1698 smb->rx_rxf_ov); 1699 1700 adapter->soft_stats.rx_pause += smb->rx_pause; 1701 adapter->soft_stats.rx_rrd_ov += smb->rx_rrd_ov; 1702 adapter->soft_stats.rx_trunc += smb->rx_sz_ov; 1703 1704 /* Tx Errors */ 1705 adapter->soft_stats.tx_errors += (smb->tx_late_col + 1706 smb->tx_abort_col + smb->tx_underrun + smb->tx_trunc); 1707 adapter->soft_stats.tx_fifo_errors += smb->tx_underrun; 1708 adapter->soft_stats.tx_aborted_errors += smb->tx_abort_col; 1709 adapter->soft_stats.tx_window_errors += smb->tx_late_col; 1710 1711 adapter->soft_stats.excecol += smb->tx_abort_col; 1712 adapter->soft_stats.deffer += smb->tx_defer; 1713 adapter->soft_stats.scc += smb->tx_1_col; 1714 adapter->soft_stats.mcc += smb->tx_2_col; 1715 adapter->soft_stats.latecol += smb->tx_late_col; 1716 adapter->soft_stats.tx_underun += smb->tx_underrun; 1717 adapter->soft_stats.tx_trunc += smb->tx_trunc; 1718 adapter->soft_stats.tx_pause += smb->tx_pause; 1719 1720 netdev->stats.rx_packets = adapter->soft_stats.rx_packets; 1721 netdev->stats.tx_packets = adapter->soft_stats.tx_packets; 1722 netdev->stats.rx_bytes = adapter->soft_stats.rx_bytes; 1723 netdev->stats.tx_bytes = adapter->soft_stats.tx_bytes; 1724 netdev->stats.multicast = adapter->soft_stats.multicast; 1725 netdev->stats.collisions = adapter->soft_stats.collisions; 1726 netdev->stats.rx_errors = adapter->soft_stats.rx_errors; 1727 netdev->stats.rx_over_errors = 1728 adapter->soft_stats.rx_missed_errors; 1729 netdev->stats.rx_length_errors = 1730 adapter->soft_stats.rx_length_errors; 1731 netdev->stats.rx_crc_errors = adapter->soft_stats.rx_crc_errors; 1732 netdev->stats.rx_frame_errors = 1733 adapter->soft_stats.rx_frame_errors; 1734 netdev->stats.rx_fifo_errors = adapter->soft_stats.rx_fifo_errors; 1735 netdev->stats.rx_missed_errors = 1736 adapter->soft_stats.rx_missed_errors; 1737 netdev->stats.tx_errors = adapter->soft_stats.tx_errors; 1738 netdev->stats.tx_fifo_errors = adapter->soft_stats.tx_fifo_errors; 1739 netdev->stats.tx_aborted_errors = 1740 adapter->soft_stats.tx_aborted_errors; 1741 netdev->stats.tx_window_errors = 1742 adapter->soft_stats.tx_window_errors; 1743 netdev->stats.tx_carrier_errors = 1744 adapter->soft_stats.tx_carrier_errors; 1745} 1746 1747static void atl1_update_mailbox(struct atl1_adapter *adapter) 1748{ 1749 unsigned long flags; 1750 u32 tpd_next_to_use; 1751 u32 rfd_next_to_use; 1752 u32 rrd_next_to_clean; 1753 u32 value; 1754 1755 spin_lock_irqsave(&adapter->mb_lock, flags); 1756 1757 tpd_next_to_use = atomic_read(&adapter->tpd_ring.next_to_use); 1758 rfd_next_to_use = atomic_read(&adapter->rfd_ring.next_to_use); 1759 rrd_next_to_clean = atomic_read(&adapter->rrd_ring.next_to_clean); 1760 1761 value = ((rfd_next_to_use & MB_RFD_PROD_INDX_MASK) << 1762 MB_RFD_PROD_INDX_SHIFT) | 1763 ((rrd_next_to_clean & MB_RRD_CONS_INDX_MASK) << 1764 MB_RRD_CONS_INDX_SHIFT) | 1765 ((tpd_next_to_use & MB_TPD_PROD_INDX_MASK) << 1766 MB_TPD_PROD_INDX_SHIFT); 1767 iowrite32(value, adapter->hw.hw_addr + REG_MAILBOX); 1768 1769 spin_unlock_irqrestore(&adapter->mb_lock, flags); 1770} 1771 1772static void atl1_clean_alloc_flag(struct atl1_adapter *adapter, 1773 struct rx_return_desc *rrd, u16 offset) 1774{ 1775 struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring; 1776 1777 while (rfd_ring->next_to_clean != (rrd->buf_indx + offset)) { 1778 rfd_ring->buffer_info[rfd_ring->next_to_clean].alloced = 0; 1779 if (++rfd_ring->next_to_clean == rfd_ring->count) { 1780 rfd_ring->next_to_clean = 0; 1781 } 1782 } 1783} 1784 1785static void atl1_update_rfd_index(struct atl1_adapter *adapter, 1786 struct rx_return_desc *rrd) 1787{ 1788 u16 num_buf; 1789 1790 num_buf = (rrd->xsz.xsum_sz.pkt_size + adapter->rx_buffer_len - 1) / 1791 adapter->rx_buffer_len; 1792 if (rrd->num_buf == num_buf) 1793 /* clean alloc flag for bad rrd */ 1794 atl1_clean_alloc_flag(adapter, rrd, num_buf); 1795} 1796 1797static void atl1_rx_checksum(struct atl1_adapter *adapter, 1798 struct rx_return_desc *rrd, struct sk_buff *skb) 1799{ 1800 struct pci_dev *pdev = adapter->pdev; 1801 1802 /* 1803 * The L1 hardware contains a bug that erroneously sets the 1804 * PACKET_FLAG_ERR and ERR_FLAG_L4_CHKSUM bits whenever a 1805 * fragmented IP packet is received, even though the packet 1806 * is perfectly valid and its checksum is correct. There's 1807 * no way to distinguish between one of these good packets 1808 * and a packet that actually contains a TCP/UDP checksum 1809 * error, so all we can do is allow it to be handed up to 1810 * the higher layers and let it be sorted out there. 1811 */ 1812 1813 skb->ip_summed = CHECKSUM_NONE; 1814 1815 if (unlikely(rrd->pkt_flg & PACKET_FLAG_ERR)) { 1816 if (rrd->err_flg & (ERR_FLAG_CRC | ERR_FLAG_TRUNC | 1817 ERR_FLAG_CODE | ERR_FLAG_OV)) { 1818 adapter->hw_csum_err++; 1819 if (netif_msg_rx_err(adapter)) 1820 dev_printk(KERN_DEBUG, &pdev->dev, 1821 "rx checksum error\n"); 1822 return; 1823 } 1824 } 1825 1826 /* not IPv4 */ 1827 if (!(rrd->pkt_flg & PACKET_FLAG_IPV4)) 1828 /* checksum is invalid, but it's not an IPv4 pkt, so ok */ 1829 return; 1830 1831 /* IPv4 packet */ 1832 if (likely(!(rrd->err_flg & 1833 (ERR_FLAG_IP_CHKSUM | ERR_FLAG_L4_CHKSUM)))) { 1834 skb->ip_summed = CHECKSUM_UNNECESSARY; 1835 adapter->hw_csum_good++; 1836 return; 1837 } 1838} 1839 1840/* 1841 * atl1_alloc_rx_buffers - Replace used receive buffers 1842 * @adapter: address of board private structure 1843 */ 1844static u16 atl1_alloc_rx_buffers(struct atl1_adapter *adapter) 1845{ 1846 struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring; 1847 struct pci_dev *pdev = adapter->pdev; 1848 struct page *page; 1849 unsigned long offset; 1850 struct atl1_buffer *buffer_info, *next_info; 1851 struct sk_buff *skb; 1852 u16 num_alloc = 0; 1853 u16 rfd_next_to_use, next_next; 1854 struct rx_free_desc *rfd_desc; 1855 1856 next_next = rfd_next_to_use = atomic_read(&rfd_ring->next_to_use); 1857 if (++next_next == rfd_ring->count) 1858 next_next = 0; 1859 buffer_info = &rfd_ring->buffer_info[rfd_next_to_use]; 1860 next_info = &rfd_ring->buffer_info[next_next]; 1861 1862 while (!buffer_info->alloced && !next_info->alloced) { 1863 if (buffer_info->skb) { 1864 buffer_info->alloced = 1; 1865 goto next; 1866 } 1867 1868 rfd_desc = ATL1_RFD_DESC(rfd_ring, rfd_next_to_use); 1869 1870 skb = netdev_alloc_skb_ip_align(adapter->netdev, 1871 adapter->rx_buffer_len); 1872 if (unlikely(!skb)) { 1873 /* Better luck next round */ 1874 adapter->netdev->stats.rx_dropped++; 1875 break; 1876 } 1877 1878 buffer_info->alloced = 1; 1879 buffer_info->skb = skb; 1880 buffer_info->length = (u16) adapter->rx_buffer_len; 1881 page = virt_to_page(skb->data); 1882 offset = (unsigned long)skb->data & ~PAGE_MASK; 1883 buffer_info->dma = pci_map_page(pdev, page, offset, 1884 adapter->rx_buffer_len, 1885 PCI_DMA_FROMDEVICE); 1886 rfd_desc->buffer_addr = cpu_to_le64(buffer_info->dma); 1887 rfd_desc->buf_len = cpu_to_le16(adapter->rx_buffer_len); 1888 rfd_desc->coalese = 0; 1889 1890next: 1891 rfd_next_to_use = next_next; 1892 if (unlikely(++next_next == rfd_ring->count)) 1893 next_next = 0; 1894 1895 buffer_info = &rfd_ring->buffer_info[rfd_next_to_use]; 1896 next_info = &rfd_ring->buffer_info[next_next]; 1897 num_alloc++; 1898 } 1899 1900 if (num_alloc) { 1901 /* 1902 * Force memory writes to complete before letting h/w 1903 * know there are new descriptors to fetch. (Only 1904 * applicable for weak-ordered memory model archs, 1905 * such as IA-64). 1906 */ 1907 wmb(); 1908 atomic_set(&rfd_ring->next_to_use, (int)rfd_next_to_use); 1909 } 1910 return num_alloc; 1911} 1912 1913static void atl1_intr_rx(struct atl1_adapter *adapter) 1914{ 1915 int i, count; 1916 u16 length; 1917 u16 rrd_next_to_clean; 1918 u32 value; 1919 struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring; 1920 struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring; 1921 struct atl1_buffer *buffer_info; 1922 struct rx_return_desc *rrd; 1923 struct sk_buff *skb; 1924 1925 count = 0; 1926 1927 rrd_next_to_clean = atomic_read(&rrd_ring->next_to_clean); 1928 1929 while (1) { 1930 rrd = ATL1_RRD_DESC(rrd_ring, rrd_next_to_clean); 1931 i = 1; 1932 if (likely(rrd->xsz.valid)) { /* packet valid */ 1933chk_rrd: 1934 /* check rrd status */ 1935 if (likely(rrd->num_buf == 1)) 1936 goto rrd_ok; 1937 else if (netif_msg_rx_err(adapter)) { 1938 dev_printk(KERN_DEBUG, &adapter->pdev->dev, 1939 "unexpected RRD buffer count\n"); 1940 dev_printk(KERN_DEBUG, &adapter->pdev->dev, 1941 "rx_buf_len = %d\n", 1942 adapter->rx_buffer_len); 1943 dev_printk(KERN_DEBUG, &adapter->pdev->dev, 1944 "RRD num_buf = %d\n", 1945 rrd->num_buf); 1946 dev_printk(KERN_DEBUG, &adapter->pdev->dev, 1947 "RRD pkt_len = %d\n", 1948 rrd->xsz.xsum_sz.pkt_size); 1949 dev_printk(KERN_DEBUG, &adapter->pdev->dev, 1950 "RRD pkt_flg = 0x%08X\n", 1951 rrd->pkt_flg); 1952 dev_printk(KERN_DEBUG, &adapter->pdev->dev, 1953 "RRD err_flg = 0x%08X\n", 1954 rrd->err_flg); 1955 dev_printk(KERN_DEBUG, &adapter->pdev->dev, 1956 "RRD vlan_tag = 0x%08X\n", 1957 rrd->vlan_tag); 1958 } 1959 1960 /* rrd seems to be bad */ 1961 if (unlikely(i-- > 0)) { 1962 /* rrd may not be DMAed completely */ 1963 udelay(1); 1964 goto chk_rrd; 1965 } 1966 /* bad rrd */ 1967 if (netif_msg_rx_err(adapter)) 1968 dev_printk(KERN_DEBUG, &adapter->pdev->dev, 1969 "bad RRD\n"); 1970 /* see if update RFD index */ 1971 if (rrd->num_buf > 1) 1972 atl1_update_rfd_index(adapter, rrd); 1973 1974 /* update rrd */ 1975 rrd->xsz.valid = 0; 1976 if (++rrd_next_to_clean == rrd_ring->count) 1977 rrd_next_to_clean = 0; 1978 count++; 1979 continue; 1980 } else { /* current rrd still not be updated */ 1981 1982 break; 1983 } 1984rrd_ok: 1985 /* clean alloc flag for bad rrd */ 1986 atl1_clean_alloc_flag(adapter, rrd, 0); 1987 1988 buffer_info = &rfd_ring->buffer_info[rrd->buf_indx]; 1989 if (++rfd_ring->next_to_clean == rfd_ring->count) 1990 rfd_ring->next_to_clean = 0; 1991 1992 /* update rrd next to clean */ 1993 if (++rrd_next_to_clean == rrd_ring->count) 1994 rrd_next_to_clean = 0; 1995 count++; 1996 1997 if (unlikely(rrd->pkt_flg & PACKET_FLAG_ERR)) { 1998 if (!(rrd->err_flg & 1999 (ERR_FLAG_IP_CHKSUM | ERR_FLAG_L4_CHKSUM 2000 | ERR_FLAG_LEN))) { 2001 /* packet error, don't need upstream */ 2002 buffer_info->alloced = 0; 2003 rrd->xsz.valid = 0; 2004 continue; 2005 } 2006 } 2007 2008 /* Good Receive */ 2009 pci_unmap_page(adapter->pdev, buffer_info->dma, 2010 buffer_info->length, PCI_DMA_FROMDEVICE); 2011 buffer_info->dma = 0; 2012 skb = buffer_info->skb; 2013 length = le16_to_cpu(rrd->xsz.xsum_sz.pkt_size); 2014 2015 skb_put(skb, length - ETH_FCS_LEN); 2016 2017 /* Receive Checksum Offload */ 2018 atl1_rx_checksum(adapter, rrd, skb); 2019 skb->protocol = eth_type_trans(skb, adapter->netdev); 2020 2021 if (adapter->vlgrp && (rrd->pkt_flg & PACKET_FLAG_VLAN_INS)) { 2022 u16 vlan_tag = (rrd->vlan_tag >> 4) | 2023 ((rrd->vlan_tag & 7) << 13) | 2024 ((rrd->vlan_tag & 8) << 9); 2025 vlan_hwaccel_rx(skb, adapter->vlgrp, vlan_tag); 2026 } else 2027 netif_rx(skb); 2028 2029 /* let protocol layer free skb */ 2030 buffer_info->skb = NULL; 2031 buffer_info->alloced = 0; 2032 rrd->xsz.valid = 0; 2033 } 2034 2035 atomic_set(&rrd_ring->next_to_clean, rrd_next_to_clean); 2036 2037 atl1_alloc_rx_buffers(adapter); 2038 2039 /* update mailbox ? */ 2040 if (count) { 2041 u32 tpd_next_to_use; 2042 u32 rfd_next_to_use; 2043 2044 spin_lock(&adapter->mb_lock); 2045 2046 tpd_next_to_use = atomic_read(&adapter->tpd_ring.next_to_use); 2047 rfd_next_to_use = 2048 atomic_read(&adapter->rfd_ring.next_to_use); 2049 rrd_next_to_clean = 2050 atomic_read(&adapter->rrd_ring.next_to_clean); 2051 value = ((rfd_next_to_use & MB_RFD_PROD_INDX_MASK) << 2052 MB_RFD_PROD_INDX_SHIFT) | 2053 ((rrd_next_to_clean & MB_RRD_CONS_INDX_MASK) << 2054 MB_RRD_CONS_INDX_SHIFT) | 2055 ((tpd_next_to_use & MB_TPD_PROD_INDX_MASK) << 2056 MB_TPD_PROD_INDX_SHIFT); 2057 iowrite32(value, adapter->hw.hw_addr + REG_MAILBOX); 2058 spin_unlock(&adapter->mb_lock); 2059 } 2060} 2061 2062static void atl1_intr_tx(struct atl1_adapter *adapter) 2063{ 2064 struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; 2065 struct atl1_buffer *buffer_info; 2066 u16 sw_tpd_next_to_clean; 2067 u16 cmb_tpd_next_to_clean; 2068 2069 sw_tpd_next_to_clean = atomic_read(&tpd_ring->next_to_clean); 2070 cmb_tpd_next_to_clean = le16_to_cpu(adapter->cmb.cmb->tpd_cons_idx); 2071 2072 while (cmb_tpd_next_to_clean != sw_tpd_next_to_clean) { 2073 struct tx_packet_desc *tpd; 2074 2075 tpd = ATL1_TPD_DESC(tpd_ring, sw_tpd_next_to_clean); 2076 buffer_info = &tpd_ring->buffer_info[sw_tpd_next_to_clean]; 2077 if (buffer_info->dma) { 2078 pci_unmap_page(adapter->pdev, buffer_info->dma, 2079 buffer_info->length, PCI_DMA_TODEVICE); 2080 buffer_info->dma = 0; 2081 } 2082 2083 if (buffer_info->skb) { 2084 dev_kfree_skb_irq(buffer_info->skb); 2085 buffer_info->skb = NULL; 2086 } 2087 2088 if (++sw_tpd_next_to_clean == tpd_ring->count) 2089 sw_tpd_next_to_clean = 0; 2090 } 2091 atomic_set(&tpd_ring->next_to_clean, sw_tpd_next_to_clean); 2092 2093 if (netif_queue_stopped(adapter->netdev) && 2094 netif_carrier_ok(adapter->netdev)) 2095 netif_wake_queue(adapter->netdev); 2096} 2097 2098static u16 atl1_tpd_avail(struct atl1_tpd_ring *tpd_ring) 2099{ 2100 u16 next_to_clean = atomic_read(&tpd_ring->next_to_clean); 2101 u16 next_to_use = atomic_read(&tpd_ring->next_to_use); 2102 return ((next_to_clean > next_to_use) ? 2103 next_to_clean - next_to_use - 1 : 2104 tpd_ring->count + next_to_clean - next_to_use - 1); 2105} 2106 2107static int atl1_tso(struct atl1_adapter *adapter, struct sk_buff *skb, 2108 struct tx_packet_desc *ptpd) 2109{ 2110 u8 hdr_len, ip_off; 2111 u32 real_len; 2112 int err; 2113 2114 if (skb_shinfo(skb)->gso_size) { 2115 if (skb_header_cloned(skb)) { 2116 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 2117 if (unlikely(err)) 2118 return -1; 2119 } 2120 2121 if (skb->protocol == htons(ETH_P_IP)) { 2122 struct iphdr *iph = ip_hdr(skb); 2123 2124 real_len = (((unsigned char *)iph - skb->data) + 2125 ntohs(iph->tot_len)); 2126 if (real_len < skb->len) 2127 pskb_trim(skb, real_len); 2128 hdr_len = (skb_transport_offset(skb) + tcp_hdrlen(skb)); 2129 if (skb->len == hdr_len) { 2130 iph->check = 0; 2131 tcp_hdr(skb)->check = 2132 ~csum_tcpudp_magic(iph->saddr, 2133 iph->daddr, tcp_hdrlen(skb), 2134 IPPROTO_TCP, 0); 2135 ptpd->word3 |= (iph->ihl & TPD_IPHL_MASK) << 2136 TPD_IPHL_SHIFT; 2137 ptpd->word3 |= ((tcp_hdrlen(skb) >> 2) & 2138 TPD_TCPHDRLEN_MASK) << 2139 TPD_TCPHDRLEN_SHIFT; 2140 ptpd->word3 |= 1 << TPD_IP_CSUM_SHIFT; 2141 ptpd->word3 |= 1 << TPD_TCP_CSUM_SHIFT; 2142 return 1; 2143 } 2144 2145 iph->check = 0; 2146 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, 2147 iph->daddr, 0, IPPROTO_TCP, 0); 2148 ip_off = (unsigned char *)iph - 2149 (unsigned char *) skb_network_header(skb); 2150 if (ip_off == 8) /* 802.3-SNAP frame */ 2151 ptpd->word3 |= 1 << TPD_ETHTYPE_SHIFT; 2152 else if (ip_off != 0) 2153 return -2; 2154 2155 ptpd->word3 |= (iph->ihl & TPD_IPHL_MASK) << 2156 TPD_IPHL_SHIFT; 2157 ptpd->word3 |= ((tcp_hdrlen(skb) >> 2) & 2158 TPD_TCPHDRLEN_MASK) << TPD_TCPHDRLEN_SHIFT; 2159 ptpd->word3 |= (skb_shinfo(skb)->gso_size & 2160 TPD_MSS_MASK) << TPD_MSS_SHIFT; 2161 ptpd->word3 |= 1 << TPD_SEGMENT_EN_SHIFT; 2162 return 3; 2163 } 2164 } 2165 return false; 2166} 2167 2168static int atl1_tx_csum(struct atl1_adapter *adapter, struct sk_buff *skb, 2169 struct tx_packet_desc *ptpd) 2170{ 2171 u8 css, cso; 2172 2173 if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { 2174 css = (u8) (skb->csum_start - skb_headroom(skb)); 2175 cso = css + (u8) skb->csum_offset; 2176 if (unlikely(css & 0x1)) { 2177 /* L1 hardware requires an even number here */ 2178 if (netif_msg_tx_err(adapter)) 2179 dev_printk(KERN_DEBUG, &adapter->pdev->dev, 2180 "payload offset not an even number\n"); 2181 return -1; 2182 } 2183 ptpd->word3 |= (css & TPD_PLOADOFFSET_MASK) << 2184 TPD_PLOADOFFSET_SHIFT; 2185 ptpd->word3 |= (cso & TPD_CCSUMOFFSET_MASK) << 2186 TPD_CCSUMOFFSET_SHIFT; 2187 ptpd->word3 |= 1 << TPD_CUST_CSUM_EN_SHIFT; 2188 return true; 2189 } 2190 return 0; 2191} 2192 2193static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb, 2194 struct tx_packet_desc *ptpd) 2195{ 2196 struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; 2197 struct atl1_buffer *buffer_info; 2198 u16 buf_len = skb->len; 2199 struct page *page; 2200 unsigned long offset; 2201 unsigned int nr_frags; 2202 unsigned int f; 2203 int retval; 2204 u16 next_to_use; 2205 u16 data_len; 2206 u8 hdr_len; 2207 2208 buf_len -= skb->data_len; 2209 nr_frags = skb_shinfo(skb)->nr_frags; 2210 next_to_use = atomic_read(&tpd_ring->next_to_use); 2211 buffer_info = &tpd_ring->buffer_info[next_to_use]; 2212 BUG_ON(buffer_info->skb); 2213 /* put skb in last TPD */ 2214 buffer_info->skb = NULL; 2215 2216 retval = (ptpd->word3 >> TPD_SEGMENT_EN_SHIFT) & TPD_SEGMENT_EN_MASK; 2217 if (retval) { 2218 /* TSO */ 2219 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 2220 buffer_info->length = hdr_len; 2221 page = virt_to_page(skb->data); 2222 offset = (unsigned long)skb->data & ~PAGE_MASK; 2223 buffer_info->dma = pci_map_page(adapter->pdev, page, 2224 offset, hdr_len, 2225 PCI_DMA_TODEVICE); 2226 2227 if (++next_to_use == tpd_ring->count) 2228 next_to_use = 0; 2229 2230 if (buf_len > hdr_len) { 2231 int i, nseg; 2232 2233 data_len = buf_len - hdr_len; 2234 nseg = (data_len + ATL1_MAX_TX_BUF_LEN - 1) / 2235 ATL1_MAX_TX_BUF_LEN; 2236 for (i = 0; i < nseg; i++) { 2237 buffer_info = 2238 &tpd_ring->buffer_info[next_to_use]; 2239 buffer_info->skb = NULL; 2240 buffer_info->length = 2241 (ATL1_MAX_TX_BUF_LEN >= 2242 data_len) ? ATL1_MAX_TX_BUF_LEN : data_len; 2243 data_len -= buffer_info->length; 2244 page = virt_to_page(skb->data + 2245 (hdr_len + i * ATL1_MAX_TX_BUF_LEN)); 2246 offset = (unsigned long)(skb->data + 2247 (hdr_len + i * ATL1_MAX_TX_BUF_LEN)) & 2248 ~PAGE_MASK; 2249 buffer_info->dma = pci_map_page(adapter->pdev, 2250 page, offset, buffer_info->length, 2251 PCI_DMA_TODEVICE); 2252 if (++next_to_use == tpd_ring->count) 2253 next_to_use = 0; 2254 } 2255 } 2256 } else { 2257 /* not TSO */ 2258 buffer_info->length = buf_len; 2259 page = virt_to_page(skb->data); 2260 offset = (unsigned long)skb->data & ~PAGE_MASK; 2261 buffer_info->dma = pci_map_page(adapter->pdev, page, 2262 offset, buf_len, PCI_DMA_TODEVICE); 2263 if (++next_to_use == tpd_ring->count) 2264 next_to_use = 0; 2265 } 2266 2267 for (f = 0; f < nr_frags; f++) { 2268 struct skb_frag_struct *frag; 2269 u16 i, nseg; 2270 2271 frag = &skb_shinfo(skb)->frags[f]; 2272 buf_len = frag->size; 2273 2274 nseg = (buf_len + ATL1_MAX_TX_BUF_LEN - 1) / 2275 ATL1_MAX_TX_BUF_LEN; 2276 for (i = 0; i < nseg; i++) { 2277 buffer_info = &tpd_ring->buffer_info[next_to_use]; 2278 BUG_ON(buffer_info->skb); 2279 2280 buffer_info->skb = NULL; 2281 buffer_info->length = (buf_len > ATL1_MAX_TX_BUF_LEN) ? 2282 ATL1_MAX_TX_BUF_LEN : buf_len; 2283 buf_len -= buffer_info->length; 2284 buffer_info->dma = pci_map_page(adapter->pdev, 2285 frag->page, 2286 frag->page_offset + (i * ATL1_MAX_TX_BUF_LEN), 2287 buffer_info->length, PCI_DMA_TODEVICE); 2288 2289 if (++next_to_use == tpd_ring->count) 2290 next_to_use = 0; 2291 } 2292 } 2293 2294 /* last tpd's buffer-info */ 2295 buffer_info->skb = skb; 2296} 2297 2298static void atl1_tx_queue(struct atl1_adapter *adapter, u16 count, 2299 struct tx_packet_desc *ptpd) 2300{ 2301 struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; 2302 struct atl1_buffer *buffer_info; 2303 struct tx_packet_desc *tpd; 2304 u16 j; 2305 u32 val; 2306 u16 next_to_use = (u16) atomic_read(&tpd_ring->next_to_use); 2307 2308 for (j = 0; j < count; j++) { 2309 buffer_info = &tpd_ring->buffer_info[next_to_use]; 2310 tpd = ATL1_TPD_DESC(&adapter->tpd_ring, next_to_use); 2311 if (tpd != ptpd) 2312 memcpy(tpd, ptpd, sizeof(struct tx_packet_desc)); 2313 tpd->buffer_addr = cpu_to_le64(buffer_info->dma); 2314 tpd->word2 &= ~(TPD_BUFLEN_MASK << TPD_BUFLEN_SHIFT); 2315 tpd->word2 |= (cpu_to_le16(buffer_info->length) & 2316 TPD_BUFLEN_MASK) << TPD_BUFLEN_SHIFT; 2317 2318 /* 2319 * if this is the first packet in a TSO chain, set 2320 * TPD_HDRFLAG, otherwise, clear it. 2321 */ 2322 val = (tpd->word3 >> TPD_SEGMENT_EN_SHIFT) & 2323 TPD_SEGMENT_EN_MASK; 2324 if (val) { 2325 if (!j) 2326 tpd->word3 |= 1 << TPD_HDRFLAG_SHIFT; 2327 else 2328 tpd->word3 &= ~(1 << TPD_HDRFLAG_SHIFT); 2329 } 2330 2331 if (j == (count - 1)) 2332 tpd->word3 |= 1 << TPD_EOP_SHIFT; 2333 2334 if (++next_to_use == tpd_ring->count) 2335 next_to_use = 0; 2336 } 2337 /* 2338 * Force memory writes to complete before letting h/w 2339 * know there are new descriptors to fetch. (Only 2340 * applicable for weak-ordered memory model archs, 2341 * such as IA-64). 2342 */ 2343 wmb(); 2344 2345 atomic_set(&tpd_ring->next_to_use, next_to_use); 2346} 2347 2348static netdev_tx_t atl1_xmit_frame(struct sk_buff *skb, 2349 struct net_device *netdev) 2350{ 2351 struct atl1_adapter *adapter = netdev_priv(netdev); 2352 struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; 2353 int len; 2354 int tso; 2355 int count = 1; 2356 int ret_val; 2357 struct tx_packet_desc *ptpd; 2358 u16 frag_size; 2359 u16 vlan_tag; 2360 unsigned int nr_frags = 0; 2361 unsigned int mss = 0; 2362 unsigned int f; 2363 unsigned int proto_hdr_len; 2364 2365 len = skb_headlen(skb); 2366 2367 if (unlikely(skb->len <= 0)) { 2368 dev_kfree_skb_any(skb); 2369 return NETDEV_TX_OK; 2370 } 2371 2372 nr_frags = skb_shinfo(skb)->nr_frags; 2373 for (f = 0; f < nr_frags; f++) { 2374 frag_size = skb_shinfo(skb)->frags[f].size; 2375 if (frag_size) 2376 count += (frag_size + ATL1_MAX_TX_BUF_LEN - 1) / 2377 ATL1_MAX_TX_BUF_LEN; 2378 } 2379 2380 mss = skb_shinfo(skb)->gso_size; 2381 if (mss) { 2382 if (skb->protocol == htons(ETH_P_IP)) { 2383 proto_hdr_len = (skb_transport_offset(skb) + 2384 tcp_hdrlen(skb)); 2385 if (unlikely(proto_hdr_len > len)) { 2386 dev_kfree_skb_any(skb); 2387 return NETDEV_TX_OK; 2388 } 2389 /* need additional TPD ? */ 2390 if (proto_hdr_len != len) 2391 count += (len - proto_hdr_len + 2392 ATL1_MAX_TX_BUF_LEN - 1) / 2393 ATL1_MAX_TX_BUF_LEN; 2394 } 2395 } 2396 2397 if (atl1_tpd_avail(&adapter->tpd_ring) < count) { 2398 /* not enough descriptors */ 2399 netif_stop_queue(netdev); 2400 if (netif_msg_tx_queued(adapter)) 2401 dev_printk(KERN_DEBUG, &adapter->pdev->dev, 2402 "tx busy\n"); 2403 return NETDEV_TX_BUSY; 2404 } 2405 2406 ptpd = ATL1_TPD_DESC(tpd_ring, 2407 (u16) atomic_read(&tpd_ring->next_to_use)); 2408 memset(ptpd, 0, sizeof(struct tx_packet_desc)); 2409 2410 if (adapter->vlgrp && vlan_tx_tag_present(skb)) { 2411 vlan_tag = vlan_tx_tag_get(skb); 2412 vlan_tag = (vlan_tag << 4) | (vlan_tag >> 13) | 2413 ((vlan_tag >> 9) & 0x8); 2414 ptpd->word3 |= 1 << TPD_INS_VL_TAG_SHIFT; 2415 ptpd->word2 |= (vlan_tag & TPD_VLANTAG_MASK) << 2416 TPD_VLANTAG_SHIFT; 2417 } 2418 2419 tso = atl1_tso(adapter, skb, ptpd); 2420 if (tso < 0) { 2421 dev_kfree_skb_any(skb); 2422 return NETDEV_TX_OK; 2423 } 2424 2425 if (!tso) { 2426 ret_val = atl1_tx_csum(adapter, skb, ptpd); 2427 if (ret_val < 0) { 2428 dev_kfree_skb_any(skb); 2429 return NETDEV_TX_OK; 2430 } 2431 } 2432 2433 atl1_tx_map(adapter, skb, ptpd); 2434 atl1_tx_queue(adapter, count, ptpd); 2435 atl1_update_mailbox(adapter); 2436 mmiowb(); 2437 return NETDEV_TX_OK; 2438} 2439 2440/* 2441 * atl1_intr - Interrupt Handler 2442 * @irq: interrupt number 2443 * @data: pointer to a network interface device structure 2444 * @pt_regs: CPU registers structure 2445 */ 2446static irqreturn_t atl1_intr(int irq, void *data) 2447{ 2448 struct atl1_adapter *adapter = netdev_priv(data); 2449 u32 status; 2450 int max_ints = 10; 2451 2452 status = adapter->cmb.cmb->int_stats; 2453 if (!status) 2454 return IRQ_NONE; 2455 2456 do { 2457 /* clear CMB interrupt status at once */ 2458 adapter->cmb.cmb->int_stats = 0; 2459 2460 if (status & ISR_GPHY) /* clear phy status */ 2461 atlx_clear_phy_int(adapter); 2462 2463 /* clear ISR status, and Enable CMB DMA/Disable Interrupt */ 2464 iowrite32(status | ISR_DIS_INT, adapter->hw.hw_addr + REG_ISR); 2465 2466 /* check if SMB intr */ 2467 if (status & ISR_SMB) 2468 atl1_inc_smb(adapter); 2469 2470 /* check if PCIE PHY Link down */ 2471 if (status & ISR_PHY_LINKDOWN) { 2472 if (netif_msg_intr(adapter)) 2473 dev_printk(KERN_DEBUG, &adapter->pdev->dev, 2474 "pcie phy link down %x\n", status); 2475 if (netif_running(adapter->netdev)) { /* reset MAC */ 2476 iowrite32(0, adapter->hw.hw_addr + REG_IMR); 2477 schedule_work(&adapter->pcie_dma_to_rst_task); 2478 return IRQ_HANDLED; 2479 } 2480 } 2481 2482 /* check if DMA read/write error ? */ 2483 if (status & (ISR_DMAR_TO_RST | ISR_DMAW_TO_RST)) { 2484 if (netif_msg_intr(adapter)) 2485 dev_printk(KERN_DEBUG, &adapter->pdev->dev, 2486 "pcie DMA r/w error (status = 0x%x)\n", 2487 status); 2488 iowrite32(0, adapter->hw.hw_addr + REG_IMR); 2489 schedule_work(&adapter->pcie_dma_to_rst_task); 2490 return IRQ_HANDLED; 2491 } 2492 2493 /* link event */ 2494 if (status & ISR_GPHY) { 2495 adapter->soft_stats.tx_carrier_errors++; 2496 atl1_check_for_link(adapter); 2497 } 2498 2499 /* transmit event */ 2500 if (status & ISR_CMB_TX) 2501 atl1_intr_tx(adapter); 2502 2503 /* rx exception */ 2504 if (unlikely(status & (ISR_RXF_OV | ISR_RFD_UNRUN | 2505 ISR_RRD_OV | ISR_HOST_RFD_UNRUN | 2506 ISR_HOST_RRD_OV | ISR_CMB_RX))) { 2507 if (status & (ISR_RXF_OV | ISR_RFD_UNRUN | 2508 ISR_RRD_OV | ISR_HOST_RFD_UNRUN | 2509 ISR_HOST_RRD_OV)) 2510 if (netif_msg_intr(adapter)) 2511 dev_printk(KERN_DEBUG, 2512 &adapter->pdev->dev, 2513 "rx exception, ISR = 0x%x\n", 2514 status); 2515 atl1_intr_rx(adapter); 2516 } 2517 2518 if (--max_ints < 0) 2519 break; 2520 2521 } while ((status = adapter->cmb.cmb->int_stats)); 2522 2523 /* re-enable Interrupt */ 2524 iowrite32(ISR_DIS_SMB | ISR_DIS_DMA, adapter->hw.hw_addr + REG_ISR); 2525 return IRQ_HANDLED; 2526} 2527 2528 2529/* 2530 * atl1_phy_config - Timer Call-back 2531 * @data: pointer to netdev cast into an unsigned long 2532 */ 2533static void atl1_phy_config(unsigned long data) 2534{ 2535 struct atl1_adapter *adapter = (struct atl1_adapter *)data; 2536 struct atl1_hw *hw = &adapter->hw; 2537 unsigned long flags; 2538 2539 spin_lock_irqsave(&adapter->lock, flags); 2540 adapter->phy_timer_pending = false; 2541 atl1_write_phy_reg(hw, MII_ADVERTISE, hw->mii_autoneg_adv_reg); 2542 atl1_write_phy_reg(hw, MII_ATLX_CR, hw->mii_1000t_ctrl_reg); 2543 atl1_write_phy_reg(hw, MII_BMCR, MII_CR_RESET | MII_CR_AUTO_NEG_EN); 2544 spin_unlock_irqrestore(&adapter->lock, flags); 2545} 2546 2547/* 2548 * Orphaned vendor comment left intact here: 2549 * <vendor comment> 2550 * If TPD Buffer size equal to 0, PCIE DMAR_TO_INT 2551 * will assert. We do soft reset <0x1400=1> according 2552 * with the SPEC. BUT, it seemes that PCIE or DMA 2553 * state-machine will not be reset. DMAR_TO_INT will 2554 * assert again and again. 2555 * </vendor comment> 2556 */ 2557 2558static int atl1_reset(struct atl1_adapter *adapter) 2559{ 2560 int ret; 2561 ret = atl1_reset_hw(&adapter->hw); 2562 if (ret) 2563 return ret; 2564 return atl1_init_hw(&adapter->hw); 2565} 2566 2567static s32 atl1_up(struct atl1_adapter *adapter) 2568{ 2569 struct net_device *netdev = adapter->netdev; 2570 int err; 2571 int irq_flags = IRQF_SAMPLE_RANDOM; 2572 2573 /* hardware has been reset, we need to reload some things */ 2574 atlx_set_multi(netdev); 2575 atl1_init_ring_ptrs(adapter); 2576 atlx_restore_vlan(adapter); 2577 err = atl1_alloc_rx_buffers(adapter); 2578 if (unlikely(!err)) 2579 /* no RX BUFFER allocated */ 2580 return -ENOMEM; 2581 2582 if (unlikely(atl1_configure(adapter))) { 2583 err = -EIO; 2584 goto err_up; 2585 } 2586 2587 err = pci_enable_msi(adapter->pdev); 2588 if (err) { 2589 if (netif_msg_ifup(adapter)) 2590 dev_info(&adapter->pdev->dev, 2591 "Unable to enable MSI: %d\n", err); 2592 irq_flags |= IRQF_SHARED; 2593 } 2594 2595 err = request_irq(adapter->pdev->irq, atl1_intr, irq_flags, 2596 netdev->name, netdev); 2597 if (unlikely(err)) 2598 goto err_up; 2599 2600 atlx_irq_enable(adapter); 2601 atl1_check_link(adapter); 2602 netif_start_queue(netdev); 2603 return 0; 2604 2605err_up: 2606 pci_disable_msi(adapter->pdev); 2607 /* free rx_buffers */ 2608 atl1_clean_rx_ring(adapter); 2609 return err; 2610} 2611 2612static void atl1_down(struct atl1_adapter *adapter) 2613{ 2614 struct net_device *netdev = adapter->netdev; 2615 2616 netif_stop_queue(netdev); 2617 del_timer_sync(&adapter->phy_config_timer); 2618 adapter->phy_timer_pending = false; 2619 2620 atlx_irq_disable(adapter); 2621 free_irq(adapter->pdev->irq, netdev); 2622 pci_disable_msi(adapter->pdev); 2623 atl1_reset_hw(&adapter->hw); 2624 adapter->cmb.cmb->int_stats = 0; 2625 2626 adapter->link_speed = SPEED_0; 2627 adapter->link_duplex = -1; 2628 netif_carrier_off(netdev); 2629 2630 atl1_clean_tx_ring(adapter); 2631 atl1_clean_rx_ring(adapter); 2632} 2633 2634static void atl1_tx_timeout_task(struct work_struct *work) 2635{ 2636 struct atl1_adapter *adapter = 2637 container_of(work, struct atl1_adapter, tx_timeout_task); 2638 struct net_device *netdev = adapter->netdev; 2639 2640 netif_device_detach(netdev); 2641 atl1_down(adapter); 2642 atl1_up(adapter); 2643 netif_device_attach(netdev); 2644} 2645 2646/* 2647 * atl1_change_mtu - Change the Maximum Transfer Unit 2648 * @netdev: network interface device structure 2649 * @new_mtu: new value for maximum frame size 2650 * 2651 * Returns 0 on success, negative on failure 2652 */ 2653static int atl1_change_mtu(struct net_device *netdev, int new_mtu) 2654{ 2655 struct atl1_adapter *adapter = netdev_priv(netdev); 2656 int old_mtu = netdev->mtu; 2657 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; 2658 2659 if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) || 2660 (max_frame > MAX_JUMBO_FRAME_SIZE)) { 2661 if (netif_msg_link(adapter)) 2662 dev_warn(&adapter->pdev->dev, "invalid MTU setting\n"); 2663 return -EINVAL; 2664 } 2665 2666 adapter->hw.max_frame_size = max_frame; 2667 adapter->hw.tx_jumbo_task_th = (max_frame + 7) >> 3; 2668 adapter->rx_buffer_len = (max_frame + 7) & ~7; 2669 adapter->hw.rx_jumbo_th = adapter->rx_buffer_len / 8; 2670 2671 netdev->mtu = new_mtu; 2672 if ((old_mtu != new_mtu) && netif_running(netdev)) { 2673 atl1_down(adapter); 2674 atl1_up(adapter); 2675 } 2676 2677 return 0; 2678} 2679 2680/* 2681 * atl1_open - Called when a network interface is made active 2682 * @netdev: network interface device structure 2683 * 2684 * Returns 0 on success, negative value on failure 2685 * 2686 * The open entry point is called when a network interface is made 2687 * active by the system (IFF_UP). At this point all resources needed 2688 * for transmit and receive operations are allocated, the interrupt 2689 * handler is registered with the OS, the watchdog timer is started, 2690 * and the stack is notified that the interface is ready. 2691 */ 2692static int atl1_open(struct net_device *netdev) 2693{ 2694 struct atl1_adapter *adapter = netdev_priv(netdev); 2695 int err; 2696 2697 netif_carrier_off(netdev); 2698 2699 /* allocate transmit descriptors */ 2700 err = atl1_setup_ring_resources(adapter); 2701 if (err) 2702 return err; 2703 2704 err = atl1_up(adapter); 2705 if (err) 2706 goto err_up; 2707 2708 return 0; 2709 2710err_up: 2711 atl1_reset(adapter); 2712 return err; 2713} 2714 2715/* 2716 * atl1_close - Disables a network interface 2717 * @netdev: network interface device structure 2718 * 2719 * Returns 0, this is not allowed to fail 2720 * 2721 * The close entry point is called when an interface is de-activated 2722 * by the OS. The hardware is still under the drivers control, but 2723 * needs to be disabled. A global MAC reset is issued to stop the 2724 * hardware, and all transmit and receive resources are freed. 2725 */ 2726static int atl1_close(struct net_device *netdev) 2727{ 2728 struct atl1_adapter *adapter = netdev_priv(netdev); 2729 atl1_down(adapter); 2730 atl1_free_ring_resources(adapter); 2731 return 0; 2732} 2733 2734#ifdef CONFIG_PM 2735static int atl1_suspend(struct pci_dev *pdev, pm_message_t state) 2736{ 2737 struct net_device *netdev = pci_get_drvdata(pdev); 2738 struct atl1_adapter *adapter = netdev_priv(netdev); 2739 struct atl1_hw *hw = &adapter->hw; 2740 u32 ctrl = 0; 2741 u32 wufc = adapter->wol; 2742 u32 val; 2743 int retval; 2744 u16 speed; 2745 u16 duplex; 2746 2747 netif_device_detach(netdev); 2748 if (netif_running(netdev)) 2749 atl1_down(adapter); 2750 2751 retval = pci_save_state(pdev); 2752 if (retval) 2753 return retval; 2754 2755 atl1_read_phy_reg(hw, MII_BMSR, (u16 *) & ctrl); 2756 atl1_read_phy_reg(hw, MII_BMSR, (u16 *) & ctrl); 2757 val = ctrl & BMSR_LSTATUS; 2758 if (val) 2759 wufc &= ~ATLX_WUFC_LNKC; 2760 2761 if (val && wufc) { 2762 val = atl1_get_speed_and_duplex(hw, &speed, &duplex); 2763 if (val) { 2764 if (netif_msg_ifdown(adapter)) 2765 dev_printk(KERN_DEBUG, &pdev->dev, 2766 "error getting speed/duplex\n"); 2767 goto disable_wol; 2768 } 2769 2770 ctrl = 0; 2771 2772 /* enable magic packet WOL */ 2773 if (wufc & ATLX_WUFC_MAG) 2774 ctrl |= (WOL_MAGIC_EN | WOL_MAGIC_PME_EN); 2775 iowrite32(ctrl, hw->hw_addr + REG_WOL_CTRL); 2776 ioread32(hw->hw_addr + REG_WOL_CTRL); 2777 2778 /* configure the mac */ 2779 ctrl = MAC_CTRL_RX_EN; 2780 ctrl |= ((u32)((speed == SPEED_1000) ? MAC_CTRL_SPEED_1000 : 2781 MAC_CTRL_SPEED_10_100) << MAC_CTRL_SPEED_SHIFT); 2782 if (duplex == FULL_DUPLEX) 2783 ctrl |= MAC_CTRL_DUPLX; 2784 ctrl |= (((u32)adapter->hw.preamble_len & 2785 MAC_CTRL_PRMLEN_MASK) << MAC_CTRL_PRMLEN_SHIFT); 2786 if (adapter->vlgrp) 2787 ctrl |= MAC_CTRL_RMV_VLAN; 2788 if (wufc & ATLX_WUFC_MAG) 2789 ctrl |= MAC_CTRL_BC_EN; 2790 iowrite32(ctrl, hw->hw_addr + REG_MAC_CTRL); 2791 ioread32(hw->hw_addr + REG_MAC_CTRL); 2792 2793 /* poke the PHY */ 2794 ctrl = ioread32(hw->hw_addr + REG_PCIE_PHYMISC); 2795 ctrl |= PCIE_PHYMISC_FORCE_RCV_DET; 2796 iowrite32(ctrl, hw->hw_addr + REG_PCIE_PHYMISC); 2797 ioread32(hw->hw_addr + REG_PCIE_PHYMISC); 2798 2799 pci_enable_wake(pdev, pci_choose_state(pdev, state), 1); 2800 goto exit; 2801 } 2802 2803 if (!val && wufc) { 2804 ctrl |= (WOL_LINK_CHG_EN | WOL_LINK_CHG_PME_EN); 2805 iowrite32(ctrl, hw->hw_addr + REG_WOL_CTRL); 2806 ioread32(hw->hw_addr + REG_WOL_CTRL); 2807 iowrite32(0, hw->hw_addr + REG_MAC_CTRL); 2808 ioread32(hw->hw_addr + REG_MAC_CTRL); 2809 hw->phy_configured = false; 2810 pci_enable_wake(pdev, pci_choose_state(pdev, state), 1); 2811 goto exit; 2812 } 2813 2814disable_wol: 2815 iowrite32(0, hw->hw_addr + REG_WOL_CTRL); 2816 ioread32(hw->hw_addr + REG_WOL_CTRL); 2817 ctrl = ioread32(hw->hw_addr + REG_PCIE_PHYMISC); 2818 ctrl |= PCIE_PHYMISC_FORCE_RCV_DET; 2819 iowrite32(ctrl, hw->hw_addr + REG_PCIE_PHYMISC); 2820 ioread32(hw->hw_addr + REG_PCIE_PHYMISC); 2821 hw->phy_configured = false; 2822 pci_enable_wake(pdev, pci_choose_state(pdev, state), 0); 2823exit: 2824 if (netif_running(netdev)) 2825 pci_disable_msi(adapter->pdev); 2826 pci_disable_device(pdev); 2827 pci_set_power_state(pdev, pci_choose_state(pdev, state)); 2828 2829 return 0; 2830} 2831 2832static int atl1_resume(struct pci_dev *pdev) 2833{ 2834 struct net_device *netdev = pci_get_drvdata(pdev); 2835 struct atl1_adapter *adapter = netdev_priv(netdev); 2836 u32 err; 2837 2838 pci_set_power_state(pdev, PCI_D0); 2839 pci_restore_state(pdev); 2840 2841 err = pci_enable_device(pdev); 2842 if (err) { 2843 if (netif_msg_ifup(adapter)) 2844 dev_printk(KERN_DEBUG, &pdev->dev, 2845 "error enabling pci device\n"); 2846 return err; 2847 } 2848 2849 pci_set_master(pdev); 2850 iowrite32(0, adapter->hw.hw_addr + REG_WOL_CTRL); 2851 pci_enable_wake(pdev, PCI_D3hot, 0); 2852 pci_enable_wake(pdev, PCI_D3cold, 0); 2853 2854 atl1_reset_hw(&adapter->hw); 2855 2856 if (netif_running(netdev)) { 2857 adapter->cmb.cmb->int_stats = 0; 2858 atl1_up(adapter); 2859 } 2860 netif_device_attach(netdev); 2861 2862 return 0; 2863} 2864#else 2865#define atl1_suspend NULL 2866#define atl1_resume NULL 2867#endif 2868 2869static void atl1_shutdown(struct pci_dev *pdev) 2870{ 2871#ifdef CONFIG_PM 2872 atl1_suspend(pdev, PMSG_SUSPEND); 2873#endif 2874} 2875 2876#ifdef CONFIG_NET_POLL_CONTROLLER 2877static void atl1_poll_controller(struct net_device *netdev) 2878{ 2879 disable_irq(netdev->irq); 2880 atl1_intr(netdev->irq, netdev); 2881 enable_irq(netdev->irq); 2882} 2883#endif 2884 2885static const struct net_device_ops atl1_netdev_ops = { 2886 .ndo_open = atl1_open, 2887 .ndo_stop = atl1_close, 2888 .ndo_start_xmit = atl1_xmit_frame, 2889 .ndo_set_multicast_list = atlx_set_multi, 2890 .ndo_validate_addr = eth_validate_addr, 2891 .ndo_set_mac_address = atl1_set_mac, 2892 .ndo_change_mtu = atl1_change_mtu, 2893 .ndo_do_ioctl = atlx_ioctl, 2894 .ndo_tx_timeout = atlx_tx_timeout, 2895 .ndo_vlan_rx_register = atlx_vlan_rx_register, 2896#ifdef CONFIG_NET_POLL_CONTROLLER 2897 .ndo_poll_controller = atl1_poll_controller, 2898#endif 2899}; 2900 2901/* 2902 * atl1_probe - Device Initialization Routine 2903 * @pdev: PCI device information struct 2904 * @ent: entry in atl1_pci_tbl 2905 * 2906 * Returns 0 on success, negative on failure 2907 * 2908 * atl1_probe initializes an adapter identified by a pci_dev structure. 2909 * The OS initialization, configuring of the adapter private structure, 2910 * and a hardware reset occur. 2911 */ 2912static int __devinit atl1_probe(struct pci_dev *pdev, 2913 const struct pci_device_id *ent) 2914{ 2915 struct net_device *netdev; 2916 struct atl1_adapter *adapter; 2917 static int cards_found = 0; 2918 int err; 2919 2920 err = pci_enable_device(pdev); 2921 if (err) 2922 return err; 2923 2924 /* 2925 * The atl1 chip can DMA to 64-bit addresses, but it uses a single 2926 * shared register for the high 32 bits, so only a single, aligned, 2927 * 4 GB physical address range can be used at a time. 2928 * 2929 * Supporting 64-bit DMA on this hardware is more trouble than it's 2930 * worth. It is far easier to limit to 32-bit DMA than update 2931 * various kernel subsystems to support the mechanics required by a 2932 * fixed-high-32-bit system. 2933 */ 2934 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 2935 if (err) { 2936 dev_err(&pdev->dev, "no usable DMA configuration\n"); 2937 goto err_dma; 2938 } 2939 /* 2940 * Mark all PCI regions associated with PCI device 2941 * pdev as being reserved by owner atl1_driver_name 2942 */ 2943 err = pci_request_regions(pdev, ATLX_DRIVER_NAME); 2944 if (err) 2945 goto err_request_regions; 2946 2947 /* 2948 * Enables bus-mastering on the device and calls 2949 * pcibios_set_master to do the needed arch specific settings 2950 */ 2951 pci_set_master(pdev); 2952 2953 netdev = alloc_etherdev(sizeof(struct atl1_adapter)); 2954 if (!netdev) { 2955 err = -ENOMEM; 2956 goto err_alloc_etherdev; 2957 } 2958 SET_NETDEV_DEV(netdev, &pdev->dev); 2959 2960 pci_set_drvdata(pdev, netdev); 2961 adapter = netdev_priv(netdev); 2962 adapter->netdev = netdev; 2963 adapter->pdev = pdev; 2964 adapter->hw.back = adapter; 2965 adapter->msg_enable = netif_msg_init(debug, atl1_default_msg); 2966 2967 adapter->hw.hw_addr = pci_iomap(pdev, 0, 0); 2968 if (!adapter->hw.hw_addr) { 2969 err = -EIO; 2970 goto err_pci_iomap; 2971 } 2972 /* get device revision number */ 2973 adapter->hw.dev_rev = ioread16(adapter->hw.hw_addr + 2974 (REG_MASTER_CTRL + 2)); 2975 if (netif_msg_probe(adapter)) 2976 dev_info(&pdev->dev, "version %s\n", ATLX_DRIVER_VERSION); 2977 2978 /* set default ring resource counts */ 2979 adapter->rfd_ring.count = adapter->rrd_ring.count = ATL1_DEFAULT_RFD; 2980 adapter->tpd_ring.count = ATL1_DEFAULT_TPD; 2981 2982 adapter->mii.dev = netdev; 2983 adapter->mii.mdio_read = mdio_read; 2984 adapter->mii.mdio_write = mdio_write; 2985 adapter->mii.phy_id_mask = 0x1f; 2986 adapter->mii.reg_num_mask = 0x1f; 2987 2988 netdev->netdev_ops = &atl1_netdev_ops; 2989 netdev->watchdog_timeo = 5 * HZ; 2990 2991 netdev->ethtool_ops = &atl1_ethtool_ops; 2992 adapter->bd_number = cards_found; 2993 2994 /* setup the private structure */ 2995 err = atl1_sw_init(adapter); 2996 if (err) 2997 goto err_common; 2998 2999 netdev->features = NETIF_F_HW_CSUM; 3000 netdev->features |= NETIF_F_SG; 3001 netdev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX); 3002 3003 /* 3004 * patch for some L1 of old version, 3005 * the final version of L1 may not need these 3006 * patches 3007 */ 3008 /* atl1_pcie_patch(adapter); */ 3009 3010 /* really reset GPHY core */ 3011 iowrite16(0, adapter->hw.hw_addr + REG_PHY_ENABLE); 3012 3013 /* 3014 * reset the controller to 3015 * put the device in a known good starting state 3016 */ 3017 if (atl1_reset_hw(&adapter->hw)) { 3018 err = -EIO; 3019 goto err_common; 3020 } 3021 3022 /* copy the MAC address out of the EEPROM */ 3023 atl1_read_mac_addr(&adapter->hw); 3024 memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len); 3025 3026 if (!is_valid_ether_addr(netdev->dev_addr)) { 3027 err = -EIO; 3028 goto err_common; 3029 } 3030 3031 atl1_check_options(adapter); 3032 3033 /* pre-init the MAC, and setup link */ 3034 err = atl1_init_hw(&adapter->hw); 3035 if (err) { 3036 err = -EIO; 3037 goto err_common; 3038 } 3039 3040 atl1_pcie_patch(adapter); 3041 /* assume we have no link for now */ 3042 netif_carrier_off(netdev); 3043 netif_stop_queue(netdev); 3044 3045 setup_timer(&adapter->phy_config_timer, &atl1_phy_config, 3046 (unsigned long)adapter); 3047 adapter->phy_timer_pending = false; 3048 3049 INIT_WORK(&adapter->tx_timeout_task, atl1_tx_timeout_task); 3050 3051 INIT_WORK(&adapter->link_chg_task, atlx_link_chg_task); 3052 3053 INIT_WORK(&adapter->pcie_dma_to_rst_task, atl1_tx_timeout_task); 3054 3055 err = register_netdev(netdev); 3056 if (err) 3057 goto err_common; 3058 3059 cards_found++; 3060 atl1_via_workaround(adapter); 3061 return 0; 3062 3063err_common: 3064 pci_iounmap(pdev, adapter->hw.hw_addr); 3065err_pci_iomap: 3066 free_netdev(netdev); 3067err_alloc_etherdev: 3068 pci_release_regions(pdev); 3069err_dma: 3070err_request_regions: 3071 pci_disable_device(pdev); 3072 return err; 3073} 3074 3075/* 3076 * atl1_remove - Device Removal Routine 3077 * @pdev: PCI device information struct 3078 * 3079 * atl1_remove is called by the PCI subsystem to alert the driver 3080 * that it should release a PCI device. The could be caused by a 3081 * Hot-Plug event, or because the driver is going to be removed from 3082 * memory. 3083 */ 3084static void __devexit atl1_remove(struct pci_dev *pdev) 3085{ 3086 struct net_device *netdev = pci_get_drvdata(pdev); 3087 struct atl1_adapter *adapter; 3088 /* Device not available. Return. */ 3089 if (!netdev) 3090 return; 3091 3092 adapter = netdev_priv(netdev); 3093 3094 /* 3095 * Some atl1 boards lack persistent storage for their MAC, and get it 3096 * from the BIOS during POST. If we've been messing with the MAC 3097 * address, we need to save the permanent one. 3098 */ 3099 if (memcmp(adapter->hw.mac_addr, adapter->hw.perm_mac_addr, ETH_ALEN)) { 3100 memcpy(adapter->hw.mac_addr, adapter->hw.perm_mac_addr, 3101 ETH_ALEN); 3102 atl1_set_mac_addr(&adapter->hw); 3103 } 3104 3105 iowrite16(0, adapter->hw.hw_addr + REG_PHY_ENABLE); 3106 unregister_netdev(netdev); 3107 pci_iounmap(pdev, adapter->hw.hw_addr); 3108 pci_release_regions(pdev); 3109 free_netdev(netdev); 3110 pci_disable_device(pdev); 3111} 3112 3113static struct pci_driver atl1_driver = { 3114 .name = ATLX_DRIVER_NAME, 3115 .id_table = atl1_pci_tbl, 3116 .probe = atl1_probe, 3117 .remove = __devexit_p(atl1_remove), 3118 .suspend = atl1_suspend, 3119 .resume = atl1_resume, 3120 .shutdown = atl1_shutdown 3121}; 3122 3123/* 3124 * atl1_exit_module - Driver Exit Cleanup Routine 3125 * 3126 * atl1_exit_module is called just before the driver is removed 3127 * from memory. 3128 */ 3129static void __exit atl1_exit_module(void) 3130{ 3131 pci_unregister_driver(&atl1_driver); 3132} 3133 3134/* 3135 * atl1_init_module - Driver Registration Routine 3136 * 3137 * atl1_init_module is the first routine called when the driver is 3138 * loaded. All it does is register with the PCI subsystem. 3139 */ 3140static int __init atl1_init_module(void) 3141{ 3142 return pci_register_driver(&atl1_driver); 3143} 3144 3145module_init(atl1_init_module); 3146module_exit(atl1_exit_module); 3147 3148struct atl1_stats { 3149 char stat_string[ETH_GSTRING_LEN]; 3150 int sizeof_stat; 3151 int stat_offset; 3152}; 3153 3154#define ATL1_STAT(m) \ 3155 sizeof(((struct atl1_adapter *)0)->m), offsetof(struct atl1_adapter, m) 3156 3157static struct atl1_stats atl1_gstrings_stats[] = { 3158 {"rx_packets", ATL1_STAT(soft_stats.rx_packets)}, 3159 {"tx_packets", ATL1_STAT(soft_stats.tx_packets)}, 3160 {"rx_bytes", ATL1_STAT(soft_stats.rx_bytes)}, 3161 {"tx_bytes", ATL1_STAT(soft_stats.tx_bytes)}, 3162 {"rx_errors", ATL1_STAT(soft_stats.rx_errors)}, 3163 {"tx_errors", ATL1_STAT(soft_stats.tx_errors)}, 3164 {"multicast", ATL1_STAT(soft_stats.multicast)}, 3165 {"collisions", ATL1_STAT(soft_stats.collisions)}, 3166 {"rx_length_errors", ATL1_STAT(soft_stats.rx_length_errors)}, 3167 {"rx_over_errors", ATL1_STAT(soft_stats.rx_missed_errors)}, 3168 {"rx_crc_errors", ATL1_STAT(soft_stats.rx_crc_errors)}, 3169 {"rx_frame_errors", ATL1_STAT(soft_stats.rx_frame_errors)}, 3170 {"rx_fifo_errors", ATL1_STAT(soft_stats.rx_fifo_errors)}, 3171 {"rx_missed_errors", ATL1_STAT(soft_stats.rx_missed_errors)}, 3172 {"tx_aborted_errors", ATL1_STAT(soft_stats.tx_aborted_errors)}, 3173 {"tx_carrier_errors", ATL1_STAT(soft_stats.tx_carrier_errors)}, 3174 {"tx_fifo_errors", ATL1_STAT(soft_stats.tx_fifo_errors)}, 3175 {"tx_window_errors", ATL1_STAT(soft_stats.tx_window_errors)}, 3176 {"tx_abort_exce_coll", ATL1_STAT(soft_stats.excecol)}, 3177 {"tx_abort_late_coll", ATL1_STAT(soft_stats.latecol)}, 3178 {"tx_deferred_ok", ATL1_STAT(soft_stats.deffer)}, 3179 {"tx_single_coll_ok", ATL1_STAT(soft_stats.scc)}, 3180 {"tx_multi_coll_ok", ATL1_STAT(soft_stats.mcc)}, 3181 {"tx_underun", ATL1_STAT(soft_stats.tx_underun)}, 3182 {"tx_trunc", ATL1_STAT(soft_stats.tx_trunc)}, 3183 {"tx_pause", ATL1_STAT(soft_stats.tx_pause)}, 3184 {"rx_pause", ATL1_STAT(soft_stats.rx_pause)}, 3185 {"rx_rrd_ov", ATL1_STAT(soft_stats.rx_rrd_ov)}, 3186 {"rx_trunc", ATL1_STAT(soft_stats.rx_trunc)} 3187}; 3188 3189static void atl1_get_ethtool_stats(struct net_device *netdev, 3190 struct ethtool_stats *stats, u64 *data) 3191{ 3192 struct atl1_adapter *adapter = netdev_priv(netdev); 3193 int i; 3194 char *p; 3195 3196 for (i = 0; i < ARRAY_SIZE(atl1_gstrings_stats); i++) { 3197 p = (char *)adapter+atl1_gstrings_stats[i].stat_offset; 3198 data[i] = (atl1_gstrings_stats[i].sizeof_stat == 3199 sizeof(u64)) ? *(u64 *)p : *(u32 *)p; 3200 } 3201 3202} 3203 3204static int atl1_get_sset_count(struct net_device *netdev, int sset) 3205{ 3206 switch (sset) { 3207 case ETH_SS_STATS: 3208 return ARRAY_SIZE(atl1_gstrings_stats); 3209 default: 3210 return -EOPNOTSUPP; 3211 } 3212} 3213 3214static int atl1_get_settings(struct net_device *netdev, 3215 struct ethtool_cmd *ecmd) 3216{ 3217 struct atl1_adapter *adapter = netdev_priv(netdev); 3218 struct atl1_hw *hw = &adapter->hw; 3219 3220 ecmd->supported = (SUPPORTED_10baseT_Half | 3221 SUPPORTED_10baseT_Full | 3222 SUPPORTED_100baseT_Half | 3223 SUPPORTED_100baseT_Full | 3224 SUPPORTED_1000baseT_Full | 3225 SUPPORTED_Autoneg | SUPPORTED_TP); 3226 ecmd->advertising = ADVERTISED_TP; 3227 if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR || 3228 hw->media_type == MEDIA_TYPE_1000M_FULL) { 3229 ecmd->advertising |= ADVERTISED_Autoneg; 3230 if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR) { 3231 ecmd->advertising |= ADVERTISED_Autoneg; 3232 ecmd->advertising |= 3233 (ADVERTISED_10baseT_Half | 3234 ADVERTISED_10baseT_Full | 3235 ADVERTISED_100baseT_Half | 3236 ADVERTISED_100baseT_Full | 3237 ADVERTISED_1000baseT_Full); 3238 } else 3239 ecmd->advertising |= (ADVERTISED_1000baseT_Full); 3240 } 3241 ecmd->port = PORT_TP; 3242 ecmd->phy_address = 0; 3243 ecmd->transceiver = XCVR_INTERNAL; 3244 3245 if (netif_carrier_ok(adapter->netdev)) { 3246 u16 link_speed, link_duplex; 3247 atl1_get_speed_and_duplex(hw, &link_speed, &link_duplex); 3248 ecmd->speed = link_speed; 3249 if (link_duplex == FULL_DUPLEX) 3250 ecmd->duplex = DUPLEX_FULL; 3251 else 3252 ecmd->duplex = DUPLEX_HALF; 3253 } else { 3254 ecmd->speed = -1; 3255 ecmd->duplex = -1; 3256 } 3257 if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR || 3258 hw->media_type == MEDIA_TYPE_1000M_FULL) 3259 ecmd->autoneg = AUTONEG_ENABLE; 3260 else 3261 ecmd->autoneg = AUTONEG_DISABLE; 3262 3263 return 0; 3264} 3265 3266static int atl1_set_settings(struct net_device *netdev, 3267 struct ethtool_cmd *ecmd) 3268{ 3269 struct atl1_adapter *adapter = netdev_priv(netdev); 3270 struct atl1_hw *hw = &adapter->hw; 3271 u16 phy_data; 3272 int ret_val = 0; 3273 u16 old_media_type = hw->media_type; 3274 3275 if (netif_running(adapter->netdev)) { 3276 if (netif_msg_link(adapter)) 3277 dev_dbg(&adapter->pdev->dev, 3278 "ethtool shutting down adapter\n"); 3279 atl1_down(adapter); 3280 } 3281 3282 if (ecmd->autoneg == AUTONEG_ENABLE) 3283 hw->media_type = MEDIA_TYPE_AUTO_SENSOR; 3284 else { 3285 if (ecmd->speed == SPEED_1000) { 3286 if (ecmd->duplex != DUPLEX_FULL) { 3287 if (netif_msg_link(adapter)) 3288 dev_warn(&adapter->pdev->dev, 3289 "1000M half is invalid\n"); 3290 ret_val = -EINVAL; 3291 goto exit_sset; 3292 } 3293 hw->media_type = MEDIA_TYPE_1000M_FULL; 3294 } else if (ecmd->speed == SPEED_100) { 3295 if (ecmd->duplex == DUPLEX_FULL) 3296 hw->media_type = MEDIA_TYPE_100M_FULL; 3297 else 3298 hw->media_type = MEDIA_TYPE_100M_HALF; 3299 } else { 3300 if (ecmd->duplex == DUPLEX_FULL) 3301 hw->media_type = MEDIA_TYPE_10M_FULL; 3302 else 3303 hw->media_type = MEDIA_TYPE_10M_HALF; 3304 } 3305 } 3306 switch (hw->media_type) { 3307 case MEDIA_TYPE_AUTO_SENSOR: 3308 ecmd->advertising = 3309 ADVERTISED_10baseT_Half | 3310 ADVERTISED_10baseT_Full | 3311 ADVERTISED_100baseT_Half | 3312 ADVERTISED_100baseT_Full | 3313 ADVERTISED_1000baseT_Full | 3314 ADVERTISED_Autoneg | ADVERTISED_TP; 3315 break; 3316 case MEDIA_TYPE_1000M_FULL: 3317 ecmd->advertising = 3318 ADVERTISED_1000baseT_Full | 3319 ADVERTISED_Autoneg | ADVERTISED_TP; 3320 break; 3321 default: 3322 ecmd->advertising = 0; 3323 break; 3324 } 3325 if (atl1_phy_setup_autoneg_adv(hw)) { 3326 ret_val = -EINVAL; 3327 if (netif_msg_link(adapter)) 3328 dev_warn(&adapter->pdev->dev, 3329 "invalid ethtool speed/duplex setting\n"); 3330 goto exit_sset; 3331 } 3332 if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR || 3333 hw->media_type == MEDIA_TYPE_1000M_FULL) 3334 phy_data = MII_CR_RESET | MII_CR_AUTO_NEG_EN; 3335 else { 3336 switch (hw->media_type) { 3337 case MEDIA_TYPE_100M_FULL: 3338 phy_data = 3339 MII_CR_FULL_DUPLEX | MII_CR_SPEED_100 | 3340 MII_CR_RESET; 3341 break; 3342 case MEDIA_TYPE_100M_HALF: 3343 phy_data = MII_CR_SPEED_100 | MII_CR_RESET; 3344 break; 3345 case MEDIA_TYPE_10M_FULL: 3346 phy_data = 3347 MII_CR_FULL_DUPLEX | MII_CR_SPEED_10 | MII_CR_RESET; 3348 break; 3349 default: 3350 /* MEDIA_TYPE_10M_HALF: */ 3351 phy_data = MII_CR_SPEED_10 | MII_CR_RESET; 3352 break; 3353 } 3354 } 3355 atl1_write_phy_reg(hw, MII_BMCR, phy_data); 3356exit_sset: 3357 if (ret_val) 3358 hw->media_type = old_media_type; 3359 3360 if (netif_running(adapter->netdev)) { 3361 if (netif_msg_link(adapter)) 3362 dev_dbg(&adapter->pdev->dev, 3363 "ethtool starting adapter\n"); 3364 atl1_up(adapter); 3365 } else if (!ret_val) { 3366 if (netif_msg_link(adapter)) 3367 dev_dbg(&adapter->pdev->dev, 3368 "ethtool resetting adapter\n"); 3369 atl1_reset(adapter); 3370 } 3371 return ret_val; 3372} 3373 3374static void atl1_get_drvinfo(struct net_device *netdev, 3375 struct ethtool_drvinfo *drvinfo) 3376{ 3377 struct atl1_adapter *adapter = netdev_priv(netdev); 3378 3379 strlcpy(drvinfo->driver, ATLX_DRIVER_NAME, sizeof(drvinfo->driver)); 3380 strlcpy(drvinfo->version, ATLX_DRIVER_VERSION, 3381 sizeof(drvinfo->version)); 3382 strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version)); 3383 strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), 3384 sizeof(drvinfo->bus_info)); 3385 drvinfo->eedump_len = ATL1_EEDUMP_LEN; 3386} 3387 3388static void atl1_get_wol(struct net_device *netdev, 3389 struct ethtool_wolinfo *wol) 3390{ 3391 struct atl1_adapter *adapter = netdev_priv(netdev); 3392 3393 wol->supported = WAKE_MAGIC; 3394 wol->wolopts = 0; 3395 if (adapter->wol & ATLX_WUFC_MAG) 3396 wol->wolopts |= WAKE_MAGIC; 3397} 3398 3399static int atl1_set_wol(struct net_device *netdev, 3400 struct ethtool_wolinfo *wol) 3401{ 3402 struct atl1_adapter *adapter = netdev_priv(netdev); 3403 3404 if (wol->wolopts & (WAKE_PHY | WAKE_UCAST | WAKE_MCAST | WAKE_BCAST | 3405 WAKE_ARP | WAKE_MAGICSECURE)) 3406 return -EOPNOTSUPP; 3407 adapter->wol = 0; 3408 if (wol->wolopts & WAKE_MAGIC) 3409 adapter->wol |= ATLX_WUFC_MAG; 3410 return 0; 3411} 3412 3413static u32 atl1_get_msglevel(struct net_device *netdev) 3414{ 3415 struct atl1_adapter *adapter = netdev_priv(netdev); 3416 return adapter->msg_enable; 3417} 3418 3419static void atl1_set_msglevel(struct net_device *netdev, u32 value) 3420{ 3421 struct atl1_adapter *adapter = netdev_priv(netdev); 3422 adapter->msg_enable = value; 3423} 3424 3425static int atl1_get_regs_len(struct net_device *netdev) 3426{ 3427 return ATL1_REG_COUNT * sizeof(u32); 3428} 3429 3430static void atl1_get_regs(struct net_device *netdev, struct ethtool_regs *regs, 3431 void *p) 3432{ 3433 struct atl1_adapter *adapter = netdev_priv(netdev); 3434 struct atl1_hw *hw = &adapter->hw; 3435 unsigned int i; 3436 u32 *regbuf = p; 3437 3438 for (i = 0; i < ATL1_REG_COUNT; i++) { 3439 /* 3440 * This switch statement avoids reserved regions 3441 * of register space. 3442 */ 3443 switch (i) { 3444 case 6 ... 9: 3445 case 14: 3446 case 29 ... 31: 3447 case 34 ... 63: 3448 case 75 ... 127: 3449 case 136 ... 1023: 3450 case 1027 ... 1087: 3451 case 1091 ... 1151: 3452 case 1194 ... 1195: 3453 case 1200 ... 1201: 3454 case 1206 ... 1213: 3455 case 1216 ... 1279: 3456 case 1290 ... 1311: 3457 case 1323 ... 1343: 3458 case 1358 ... 1359: 3459 case 1368 ... 1375: 3460 case 1378 ... 1383: 3461 case 1388 ... 1391: 3462 case 1393 ... 1395: 3463 case 1402 ... 1403: 3464 case 1410 ... 1471: 3465 case 1522 ... 1535: 3466 /* reserved region; don't read it */ 3467 regbuf[i] = 0; 3468 break; 3469 default: 3470 /* unreserved region */ 3471 regbuf[i] = ioread32(hw->hw_addr + (i * sizeof(u32))); 3472 } 3473 } 3474} 3475 3476static void atl1_get_ringparam(struct net_device *netdev, 3477 struct ethtool_ringparam *ring) 3478{ 3479 struct atl1_adapter *adapter = netdev_priv(netdev); 3480 struct atl1_tpd_ring *txdr = &adapter->tpd_ring; 3481 struct atl1_rfd_ring *rxdr = &adapter->rfd_ring; 3482 3483 ring->rx_max_pending = ATL1_MAX_RFD; 3484 ring->tx_max_pending = ATL1_MAX_TPD; 3485 ring->rx_mini_max_pending = 0; 3486 ring->rx_jumbo_max_pending = 0; 3487 ring->rx_pending = rxdr->count; 3488 ring->tx_pending = txdr->count; 3489 ring->rx_mini_pending = 0; 3490 ring->rx_jumbo_pending = 0; 3491} 3492 3493static int atl1_set_ringparam(struct net_device *netdev, 3494 struct ethtool_ringparam *ring) 3495{ 3496 struct atl1_adapter *adapter = netdev_priv(netdev); 3497 struct atl1_tpd_ring *tpdr = &adapter->tpd_ring; 3498 struct atl1_rrd_ring *rrdr = &adapter->rrd_ring; 3499 struct atl1_rfd_ring *rfdr = &adapter->rfd_ring; 3500 3501 struct atl1_tpd_ring tpd_old, tpd_new; 3502 struct atl1_rfd_ring rfd_old, rfd_new; 3503 struct atl1_rrd_ring rrd_old, rrd_new; 3504 struct atl1_ring_header rhdr_old, rhdr_new; 3505 struct atl1_smb smb; 3506 struct atl1_cmb cmb; 3507 int err; 3508 3509 tpd_old = adapter->tpd_ring; 3510 rfd_old = adapter->rfd_ring; 3511 rrd_old = adapter->rrd_ring; 3512 rhdr_old = adapter->ring_header; 3513 3514 if (netif_running(adapter->netdev)) 3515 atl1_down(adapter); 3516 3517 rfdr->count = (u16) max(ring->rx_pending, (u32) ATL1_MIN_RFD); 3518 rfdr->count = rfdr->count > ATL1_MAX_RFD ? ATL1_MAX_RFD : 3519 rfdr->count; 3520 rfdr->count = (rfdr->count + 3) & ~3; 3521 rrdr->count = rfdr->count; 3522 3523 tpdr->count = (u16) max(ring->tx_pending, (u32) ATL1_MIN_TPD); 3524 tpdr->count = tpdr->count > ATL1_MAX_TPD ? ATL1_MAX_TPD : 3525 tpdr->count; 3526 tpdr->count = (tpdr->count + 3) & ~3; 3527 3528 if (netif_running(adapter->netdev)) { 3529 /* try to get new resources before deleting old */ 3530 err = atl1_setup_ring_resources(adapter); 3531 if (err) 3532 goto err_setup_ring; 3533 3534 /* 3535 * save the new, restore the old in order to free it, 3536 * then restore the new back again 3537 */ 3538 3539 rfd_new = adapter->rfd_ring; 3540 rrd_new = adapter->rrd_ring; 3541 tpd_new = adapter->tpd_ring; 3542 rhdr_new = adapter->ring_header; 3543 adapter->rfd_ring = rfd_old; 3544 adapter->rrd_ring = rrd_old; 3545 adapter->tpd_ring = tpd_old; 3546 adapter->ring_header = rhdr_old; 3547 /* 3548 * Save SMB and CMB, since atl1_free_ring_resources 3549 * will clear them. 3550 */ 3551 smb = adapter->smb; 3552 cmb = adapter->cmb; 3553 atl1_free_ring_resources(adapter); 3554 adapter->rfd_ring = rfd_new; 3555 adapter->rrd_ring = rrd_new; 3556 adapter->tpd_ring = tpd_new; 3557 adapter->ring_header = rhdr_new; 3558 adapter->smb = smb; 3559 adapter->cmb = cmb; 3560 3561 err = atl1_up(adapter); 3562 if (err) 3563 return err; 3564 } 3565 return 0; 3566 3567err_setup_ring: 3568 adapter->rfd_ring = rfd_old; 3569 adapter->rrd_ring = rrd_old; 3570 adapter->tpd_ring = tpd_old; 3571 adapter->ring_header = rhdr_old; 3572 atl1_up(adapter); 3573 return err; 3574} 3575 3576static void atl1_get_pauseparam(struct net_device *netdev, 3577 struct ethtool_pauseparam *epause) 3578{ 3579 struct atl1_adapter *adapter = netdev_priv(netdev); 3580 struct atl1_hw *hw = &adapter->hw; 3581 3582 if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR || 3583 hw->media_type == MEDIA_TYPE_1000M_FULL) { 3584 epause->autoneg = AUTONEG_ENABLE; 3585 } else { 3586 epause->autoneg = AUTONEG_DISABLE; 3587 } 3588 epause->rx_pause = 1; 3589 epause->tx_pause = 1; 3590} 3591 3592static int atl1_set_pauseparam(struct net_device *netdev, 3593 struct ethtool_pauseparam *epause) 3594{ 3595 struct atl1_adapter *adapter = netdev_priv(netdev); 3596 struct atl1_hw *hw = &adapter->hw; 3597 3598 if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR || 3599 hw->media_type == MEDIA_TYPE_1000M_FULL) { 3600 epause->autoneg = AUTONEG_ENABLE; 3601 } else { 3602 epause->autoneg = AUTONEG_DISABLE; 3603 } 3604 3605 epause->rx_pause = 1; 3606 epause->tx_pause = 1; 3607 3608 return 0; 3609} 3610 3611static u32 atl1_get_rx_csum(struct net_device *netdev) 3612{ 3613 return 1; 3614} 3615 3616static void atl1_get_strings(struct net_device *netdev, u32 stringset, 3617 u8 *data) 3618{ 3619 u8 *p = data; 3620 int i; 3621 3622 switch (stringset) { 3623 case ETH_SS_STATS: 3624 for (i = 0; i < ARRAY_SIZE(atl1_gstrings_stats); i++) { 3625 memcpy(p, atl1_gstrings_stats[i].stat_string, 3626 ETH_GSTRING_LEN); 3627 p += ETH_GSTRING_LEN; 3628 } 3629 break; 3630 } 3631} 3632 3633static int atl1_nway_reset(struct net_device *netdev) 3634{ 3635 struct atl1_adapter *adapter = netdev_priv(netdev); 3636 struct atl1_hw *hw = &adapter->hw; 3637 3638 if (netif_running(netdev)) { 3639 u16 phy_data; 3640 atl1_down(adapter); 3641 3642 if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR || 3643 hw->media_type == MEDIA_TYPE_1000M_FULL) { 3644 phy_data = MII_CR_RESET | MII_CR_AUTO_NEG_EN; 3645 } else { 3646 switch (hw->media_type) { 3647 case MEDIA_TYPE_100M_FULL: 3648 phy_data = MII_CR_FULL_DUPLEX | 3649 MII_CR_SPEED_100 | MII_CR_RESET; 3650 break; 3651 case MEDIA_TYPE_100M_HALF: 3652 phy_data = MII_CR_SPEED_100 | MII_CR_RESET; 3653 break; 3654 case MEDIA_TYPE_10M_FULL: 3655 phy_data = MII_CR_FULL_DUPLEX | 3656 MII_CR_SPEED_10 | MII_CR_RESET; 3657 break; 3658 default: 3659 /* MEDIA_TYPE_10M_HALF */ 3660 phy_data = MII_CR_SPEED_10 | MII_CR_RESET; 3661 } 3662 } 3663 atl1_write_phy_reg(hw, MII_BMCR, phy_data); 3664 atl1_up(adapter); 3665 } 3666 return 0; 3667} 3668 3669const struct ethtool_ops atl1_ethtool_ops = { 3670 .get_settings = atl1_get_settings, 3671 .set_settings = atl1_set_settings, 3672 .get_drvinfo = atl1_get_drvinfo, 3673 .get_wol = atl1_get_wol, 3674 .set_wol = atl1_set_wol, 3675 .get_msglevel = atl1_get_msglevel, 3676 .set_msglevel = atl1_set_msglevel, 3677 .get_regs_len = atl1_get_regs_len, 3678 .get_regs = atl1_get_regs, 3679 .get_ringparam = atl1_get_ringparam, 3680 .set_ringparam = atl1_set_ringparam, 3681 .get_pauseparam = atl1_get_pauseparam, 3682 .set_pauseparam = atl1_set_pauseparam, 3683 .get_rx_csum = atl1_get_rx_csum, 3684 .set_tx_csum = ethtool_op_set_tx_hw_csum, 3685 .get_link = ethtool_op_get_link, 3686 .set_sg = ethtool_op_set_sg, 3687 .get_strings = atl1_get_strings, 3688 .nway_reset = atl1_nway_reset, 3689 .get_ethtool_stats = atl1_get_ethtool_stats, 3690 .get_sset_count = atl1_get_sset_count, 3691 .set_tso = ethtool_op_set_tso, 3692}; 3693