1/* 2 * ipg.c: Device Driver for the IP1000 Gigabit Ethernet Adapter 3 * 4 * Copyright (C) 2003, 2007 IC Plus Corp 5 * 6 * Original Author: 7 * 8 * Craig Rich 9 * Sundance Technology, Inc. 10 * www.sundanceti.com 11 * craig_rich@sundanceti.com 12 * 13 * Current Maintainer: 14 * 15 * Sorbica Shieh. 16 * http://www.icplus.com.tw 17 * sorbica@icplus.com.tw 18 * 19 * Jesse Huang 20 * http://www.icplus.com.tw 21 * jesse@icplus.com.tw 22 */ 23#include <linux/crc32.h> 24#include <linux/ethtool.h> 25#include <linux/gfp.h> 26#include <linux/mii.h> 27#include <linux/mutex.h> 28 29#include <asm/div64.h> 30 31#define IPG_RX_RING_BYTES (sizeof(struct ipg_rx) * IPG_RFDLIST_LENGTH) 32#define IPG_TX_RING_BYTES (sizeof(struct ipg_tx) * IPG_TFDLIST_LENGTH) 33#define IPG_RESET_MASK \ 34 (IPG_AC_GLOBAL_RESET | IPG_AC_RX_RESET | IPG_AC_TX_RESET | \ 35 IPG_AC_DMA | IPG_AC_FIFO | IPG_AC_NETWORK | IPG_AC_HOST | \ 36 IPG_AC_AUTO_INIT) 37 38#define ipg_w32(val32, reg) iowrite32((val32), ioaddr + (reg)) 39#define ipg_w16(val16, reg) iowrite16((val16), ioaddr + (reg)) 40#define ipg_w8(val8, reg) iowrite8((val8), ioaddr + (reg)) 41 42#define ipg_r32(reg) ioread32(ioaddr + (reg)) 43#define ipg_r16(reg) ioread16(ioaddr + (reg)) 44#define ipg_r8(reg) ioread8(ioaddr + (reg)) 45 46enum { 47 netdev_io_size = 128 48}; 49 50#include "ipg.h" 51#define DRV_NAME "ipg" 52 53MODULE_AUTHOR("IC Plus Corp. 2003"); 54MODULE_DESCRIPTION("IC Plus IP1000 Gigabit Ethernet Adapter Linux Driver"); 55MODULE_LICENSE("GPL"); 56 57/* 58 * Defaults 59 */ 60#define IPG_MAX_RXFRAME_SIZE 0x0600 61#define IPG_RXFRAG_SIZE 0x0600 62#define IPG_RXSUPPORT_SIZE 0x0600 63#define IPG_IS_JUMBO false 64 65/* 66 * Variable record -- index by leading revision/length 67 * Revision/Length(=N*4), Address1, Data1, Address2, Data2,...,AddressN,DataN 68 */ 69static unsigned short DefaultPhyParam[] = { 70 /* 11/12/03 IP1000A v1-3 rev=0x40 */ 71 /*-------------------------------------------------------------------------- 72 (0x4000|(15*4)), 31, 0x0001, 27, 0x01e0, 31, 0x0002, 22, 0x85bd, 24, 0xfff2, 73 27, 0x0c10, 28, 0x0c10, 29, 0x2c10, 31, 0x0003, 23, 0x92f6, 74 31, 0x0000, 23, 0x003d, 30, 0x00de, 20, 0x20e7, 9, 0x0700, 75 --------------------------------------------------------------------------*/ 76 /* 12/17/03 IP1000A v1-4 rev=0x40 */ 77 (0x4000 | (07 * 4)), 31, 0x0001, 27, 0x01e0, 31, 0x0002, 27, 0xeb8e, 31, 78 0x0000, 79 30, 0x005e, 9, 0x0700, 80 /* 01/09/04 IP1000A v1-5 rev=0x41 */ 81 (0x4100 | (07 * 4)), 31, 0x0001, 27, 0x01e0, 31, 0x0002, 27, 0xeb8e, 31, 82 0x0000, 83 30, 0x005e, 9, 0x0700, 84 0x0000 85}; 86 87static const char *ipg_brand_name[] = { 88 "IC PLUS IP1000 1000/100/10 based NIC", 89 "Sundance Technology ST2021 based NIC", 90 "Tamarack Microelectronics TC9020/9021 based NIC", 91 "Tamarack Microelectronics TC9020/9021 based NIC", 92 "D-Link NIC IP1000A" 93}; 94 95static DEFINE_PCI_DEVICE_TABLE(ipg_pci_tbl) = { 96 { PCI_VDEVICE(SUNDANCE, 0x1023), 0 }, 97 { PCI_VDEVICE(SUNDANCE, 0x2021), 1 }, 98 { PCI_VDEVICE(SUNDANCE, 0x1021), 2 }, 99 { PCI_VDEVICE(DLINK, 0x9021), 3 }, 100 { PCI_VDEVICE(DLINK, 0x4020), 4 }, 101 { 0, } 102}; 103 104MODULE_DEVICE_TABLE(pci, ipg_pci_tbl); 105 106static inline void __iomem *ipg_ioaddr(struct net_device *dev) 107{ 108 struct ipg_nic_private *sp = netdev_priv(dev); 109 return sp->ioaddr; 110} 111 112#ifdef IPG_DEBUG 113static void ipg_dump_rfdlist(struct net_device *dev) 114{ 115 struct ipg_nic_private *sp = netdev_priv(dev); 116 void __iomem *ioaddr = sp->ioaddr; 117 unsigned int i; 118 u32 offset; 119 120 IPG_DEBUG_MSG("_dump_rfdlist\n"); 121 122 printk(KERN_INFO "rx_current = %2.2x\n", sp->rx_current); 123 printk(KERN_INFO "rx_dirty = %2.2x\n", sp->rx_dirty); 124 printk(KERN_INFO "RFDList start address = %16.16lx\n", 125 (unsigned long) sp->rxd_map); 126 printk(KERN_INFO "RFDListPtr register = %8.8x%8.8x\n", 127 ipg_r32(IPG_RFDLISTPTR1), ipg_r32(IPG_RFDLISTPTR0)); 128 129 for (i = 0; i < IPG_RFDLIST_LENGTH; i++) { 130 offset = (u32) &sp->rxd[i].next_desc - (u32) sp->rxd; 131 printk(KERN_INFO "%2.2x %4.4x RFDNextPtr = %16.16lx\n", i, 132 offset, (unsigned long) sp->rxd[i].next_desc); 133 offset = (u32) &sp->rxd[i].rfs - (u32) sp->rxd; 134 printk(KERN_INFO "%2.2x %4.4x RFS = %16.16lx\n", i, 135 offset, (unsigned long) sp->rxd[i].rfs); 136 offset = (u32) &sp->rxd[i].frag_info - (u32) sp->rxd; 137 printk(KERN_INFO "%2.2x %4.4x frag_info = %16.16lx\n", i, 138 offset, (unsigned long) sp->rxd[i].frag_info); 139 } 140} 141 142static void ipg_dump_tfdlist(struct net_device *dev) 143{ 144 struct ipg_nic_private *sp = netdev_priv(dev); 145 void __iomem *ioaddr = sp->ioaddr; 146 unsigned int i; 147 u32 offset; 148 149 IPG_DEBUG_MSG("_dump_tfdlist\n"); 150 151 printk(KERN_INFO "tx_current = %2.2x\n", sp->tx_current); 152 printk(KERN_INFO "tx_dirty = %2.2x\n", sp->tx_dirty); 153 printk(KERN_INFO "TFDList start address = %16.16lx\n", 154 (unsigned long) sp->txd_map); 155 printk(KERN_INFO "TFDListPtr register = %8.8x%8.8x\n", 156 ipg_r32(IPG_TFDLISTPTR1), ipg_r32(IPG_TFDLISTPTR0)); 157 158 for (i = 0; i < IPG_TFDLIST_LENGTH; i++) { 159 offset = (u32) &sp->txd[i].next_desc - (u32) sp->txd; 160 printk(KERN_INFO "%2.2x %4.4x TFDNextPtr = %16.16lx\n", i, 161 offset, (unsigned long) sp->txd[i].next_desc); 162 163 offset = (u32) &sp->txd[i].tfc - (u32) sp->txd; 164 printk(KERN_INFO "%2.2x %4.4x TFC = %16.16lx\n", i, 165 offset, (unsigned long) sp->txd[i].tfc); 166 offset = (u32) &sp->txd[i].frag_info - (u32) sp->txd; 167 printk(KERN_INFO "%2.2x %4.4x frag_info = %16.16lx\n", i, 168 offset, (unsigned long) sp->txd[i].frag_info); 169 } 170} 171#endif 172 173static void ipg_write_phy_ctl(void __iomem *ioaddr, u8 data) 174{ 175 ipg_w8(IPG_PC_RSVD_MASK & data, PHY_CTRL); 176 ndelay(IPG_PC_PHYCTRLWAIT_NS); 177} 178 179static void ipg_drive_phy_ctl_low_high(void __iomem *ioaddr, u8 data) 180{ 181 ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_LO | data); 182 ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_HI | data); 183} 184 185static void send_three_state(void __iomem *ioaddr, u8 phyctrlpolarity) 186{ 187 phyctrlpolarity |= (IPG_PC_MGMTDATA & 0) | IPG_PC_MGMTDIR; 188 189 ipg_drive_phy_ctl_low_high(ioaddr, phyctrlpolarity); 190} 191 192static void send_end(void __iomem *ioaddr, u8 phyctrlpolarity) 193{ 194 ipg_w8((IPG_PC_MGMTCLK_LO | (IPG_PC_MGMTDATA & 0) | IPG_PC_MGMTDIR | 195 phyctrlpolarity) & IPG_PC_RSVD_MASK, PHY_CTRL); 196} 197 198static u16 read_phy_bit(void __iomem *ioaddr, u8 phyctrlpolarity) 199{ 200 u16 bit_data; 201 202 ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_LO | phyctrlpolarity); 203 204 bit_data = ((ipg_r8(PHY_CTRL) & IPG_PC_MGMTDATA) >> 1) & 1; 205 206 ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_HI | phyctrlpolarity); 207 208 return bit_data; 209} 210 211/* 212 * Read a register from the Physical Layer device located 213 * on the IPG NIC, using the IPG PHYCTRL register. 214 */ 215static int mdio_read(struct net_device *dev, int phy_id, int phy_reg) 216{ 217 void __iomem *ioaddr = ipg_ioaddr(dev); 218 /* 219 * The GMII mangement frame structure for a read is as follows: 220 * 221 * |Preamble|st|op|phyad|regad|ta| data |idle| 222 * |< 32 1s>|01|10|AAAAA|RRRRR|z0|DDDDDDDDDDDDDDDD|z | 223 * 224 * <32 1s> = 32 consecutive logic 1 values 225 * A = bit of Physical Layer device address (MSB first) 226 * R = bit of register address (MSB first) 227 * z = High impedance state 228 * D = bit of read data (MSB first) 229 * 230 * Transmission order is 'Preamble' field first, bits transmitted 231 * left to right (first to last). 232 */ 233 struct { 234 u32 field; 235 unsigned int len; 236 } p[] = { 237 { GMII_PREAMBLE, 32 }, /* Preamble */ 238 { GMII_ST, 2 }, /* ST */ 239 { GMII_READ, 2 }, /* OP */ 240 { phy_id, 5 }, /* PHYAD */ 241 { phy_reg, 5 }, /* REGAD */ 242 { 0x0000, 2 }, /* TA */ 243 { 0x0000, 16 }, /* DATA */ 244 { 0x0000, 1 } /* IDLE */ 245 }; 246 unsigned int i, j; 247 u8 polarity, data; 248 249 polarity = ipg_r8(PHY_CTRL); 250 polarity &= (IPG_PC_DUPLEX_POLARITY | IPG_PC_LINK_POLARITY); 251 252 /* Create the Preamble, ST, OP, PHYAD, and REGAD field. */ 253 for (j = 0; j < 5; j++) { 254 for (i = 0; i < p[j].len; i++) { 255 /* For each variable length field, the MSB must be 256 * transmitted first. Rotate through the field bits, 257 * starting with the MSB, and move each bit into the 258 * the 1st (2^1) bit position (this is the bit position 259 * corresponding to the MgmtData bit of the PhyCtrl 260 * register for the IPG). 261 * 262 * Example: ST = 01; 263 * 264 * First write a '0' to bit 1 of the PhyCtrl 265 * register, then write a '1' to bit 1 of the 266 * PhyCtrl register. 267 * 268 * To do this, right shift the MSB of ST by the value: 269 * [field length - 1 - #ST bits already written] 270 * then left shift this result by 1. 271 */ 272 data = (p[j].field >> (p[j].len - 1 - i)) << 1; 273 data &= IPG_PC_MGMTDATA; 274 data |= polarity | IPG_PC_MGMTDIR; 275 276 ipg_drive_phy_ctl_low_high(ioaddr, data); 277 } 278 } 279 280 send_three_state(ioaddr, polarity); 281 282 read_phy_bit(ioaddr, polarity); 283 284 /* 285 * For a read cycle, the bits for the next two fields (TA and 286 * DATA) are driven by the PHY (the IPG reads these bits). 287 */ 288 for (i = 0; i < p[6].len; i++) { 289 p[6].field |= 290 (read_phy_bit(ioaddr, polarity) << (p[6].len - 1 - i)); 291 } 292 293 send_three_state(ioaddr, polarity); 294 send_three_state(ioaddr, polarity); 295 send_three_state(ioaddr, polarity); 296 send_end(ioaddr, polarity); 297 298 /* Return the value of the DATA field. */ 299 return p[6].field; 300} 301 302/* 303 * Write to a register from the Physical Layer device located 304 * on the IPG NIC, using the IPG PHYCTRL register. 305 */ 306static void mdio_write(struct net_device *dev, int phy_id, int phy_reg, int val) 307{ 308 void __iomem *ioaddr = ipg_ioaddr(dev); 309 /* 310 * The GMII mangement frame structure for a read is as follows: 311 * 312 * |Preamble|st|op|phyad|regad|ta| data |idle| 313 * |< 32 1s>|01|10|AAAAA|RRRRR|z0|DDDDDDDDDDDDDDDD|z | 314 * 315 * <32 1s> = 32 consecutive logic 1 values 316 * A = bit of Physical Layer device address (MSB first) 317 * R = bit of register address (MSB first) 318 * z = High impedance state 319 * D = bit of write data (MSB first) 320 * 321 * Transmission order is 'Preamble' field first, bits transmitted 322 * left to right (first to last). 323 */ 324 struct { 325 u32 field; 326 unsigned int len; 327 } p[] = { 328 { GMII_PREAMBLE, 32 }, /* Preamble */ 329 { GMII_ST, 2 }, /* ST */ 330 { GMII_WRITE, 2 }, /* OP */ 331 { phy_id, 5 }, /* PHYAD */ 332 { phy_reg, 5 }, /* REGAD */ 333 { 0x0002, 2 }, /* TA */ 334 { val & 0xffff, 16 }, /* DATA */ 335 { 0x0000, 1 } /* IDLE */ 336 }; 337 unsigned int i, j; 338 u8 polarity, data; 339 340 polarity = ipg_r8(PHY_CTRL); 341 polarity &= (IPG_PC_DUPLEX_POLARITY | IPG_PC_LINK_POLARITY); 342 343 /* Create the Preamble, ST, OP, PHYAD, and REGAD field. */ 344 for (j = 0; j < 7; j++) { 345 for (i = 0; i < p[j].len; i++) { 346 /* For each variable length field, the MSB must be 347 * transmitted first. Rotate through the field bits, 348 * starting with the MSB, and move each bit into the 349 * the 1st (2^1) bit position (this is the bit position 350 * corresponding to the MgmtData bit of the PhyCtrl 351 * register for the IPG). 352 * 353 * Example: ST = 01; 354 * 355 * First write a '0' to bit 1 of the PhyCtrl 356 * register, then write a '1' to bit 1 of the 357 * PhyCtrl register. 358 * 359 * To do this, right shift the MSB of ST by the value: 360 * [field length - 1 - #ST bits already written] 361 * then left shift this result by 1. 362 */ 363 data = (p[j].field >> (p[j].len - 1 - i)) << 1; 364 data &= IPG_PC_MGMTDATA; 365 data |= polarity | IPG_PC_MGMTDIR; 366 367 ipg_drive_phy_ctl_low_high(ioaddr, data); 368 } 369 } 370 371 /* The last cycle is a tri-state, so read from the PHY. */ 372 for (j = 7; j < 8; j++) { 373 for (i = 0; i < p[j].len; i++) { 374 ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_LO | polarity); 375 376 p[j].field |= ((ipg_r8(PHY_CTRL) & 377 IPG_PC_MGMTDATA) >> 1) << (p[j].len - 1 - i); 378 379 ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_HI | polarity); 380 } 381 } 382} 383 384static void ipg_set_led_mode(struct net_device *dev) 385{ 386 struct ipg_nic_private *sp = netdev_priv(dev); 387 void __iomem *ioaddr = sp->ioaddr; 388 u32 mode; 389 390 mode = ipg_r32(ASIC_CTRL); 391 mode &= ~(IPG_AC_LED_MODE_BIT_1 | IPG_AC_LED_MODE | IPG_AC_LED_SPEED); 392 393 if ((sp->led_mode & 0x03) > 1) 394 mode |= IPG_AC_LED_MODE_BIT_1; /* Write Asic Control Bit 29 */ 395 396 if ((sp->led_mode & 0x01) == 1) 397 mode |= IPG_AC_LED_MODE; /* Write Asic Control Bit 14 */ 398 399 if ((sp->led_mode & 0x08) == 8) 400 mode |= IPG_AC_LED_SPEED; /* Write Asic Control Bit 27 */ 401 402 ipg_w32(mode, ASIC_CTRL); 403} 404 405static void ipg_set_phy_set(struct net_device *dev) 406{ 407 struct ipg_nic_private *sp = netdev_priv(dev); 408 void __iomem *ioaddr = sp->ioaddr; 409 int physet; 410 411 physet = ipg_r8(PHY_SET); 412 physet &= ~(IPG_PS_MEM_LENB9B | IPG_PS_MEM_LEN9 | IPG_PS_NON_COMPDET); 413 physet |= ((sp->led_mode & 0x70) >> 4); 414 ipg_w8(physet, PHY_SET); 415} 416 417static int ipg_reset(struct net_device *dev, u32 resetflags) 418{ 419 /* Assert functional resets via the IPG AsicCtrl 420 * register as specified by the 'resetflags' input 421 * parameter. 422 */ 423 void __iomem *ioaddr = ipg_ioaddr(dev); 424 unsigned int timeout_count = 0; 425 426 IPG_DEBUG_MSG("_reset\n"); 427 428 ipg_w32(ipg_r32(ASIC_CTRL) | resetflags, ASIC_CTRL); 429 430 /* Delay added to account for problem with 10Mbps reset. */ 431 mdelay(IPG_AC_RESETWAIT); 432 433 while (IPG_AC_RESET_BUSY & ipg_r32(ASIC_CTRL)) { 434 mdelay(IPG_AC_RESETWAIT); 435 if (++timeout_count > IPG_AC_RESET_TIMEOUT) 436 return -ETIME; 437 } 438 /* Set LED Mode in Asic Control */ 439 ipg_set_led_mode(dev); 440 441 /* Set PHYSet Register Value */ 442 ipg_set_phy_set(dev); 443 return 0; 444} 445 446/* Find the GMII PHY address. */ 447static int ipg_find_phyaddr(struct net_device *dev) 448{ 449 unsigned int phyaddr, i; 450 451 for (i = 0; i < 32; i++) { 452 u32 status; 453 454 /* Search for the correct PHY address among 32 possible. */ 455 phyaddr = (IPG_NIC_PHY_ADDRESS + i) % 32; 456 457 /* 10/22/03 Grace change verify from GMII_PHY_STATUS to 458 GMII_PHY_ID1 459 */ 460 461 status = mdio_read(dev, phyaddr, MII_BMSR); 462 463 if ((status != 0xFFFF) && (status != 0)) 464 return phyaddr; 465 } 466 467 return 0x1f; 468} 469 470/* 471 * Configure IPG based on result of IEEE 802.3 PHY 472 * auto-negotiation. 473 */ 474static int ipg_config_autoneg(struct net_device *dev) 475{ 476 struct ipg_nic_private *sp = netdev_priv(dev); 477 void __iomem *ioaddr = sp->ioaddr; 478 unsigned int txflowcontrol; 479 unsigned int rxflowcontrol; 480 unsigned int fullduplex; 481 u32 mac_ctrl_val; 482 u32 asicctrl; 483 u8 phyctrl; 484 485 IPG_DEBUG_MSG("_config_autoneg\n"); 486 487 asicctrl = ipg_r32(ASIC_CTRL); 488 phyctrl = ipg_r8(PHY_CTRL); 489 mac_ctrl_val = ipg_r32(MAC_CTRL); 490 491 /* Set flags for use in resolving auto-negotation, assuming 492 * non-1000Mbps, half duplex, no flow control. 493 */ 494 fullduplex = 0; 495 txflowcontrol = 0; 496 rxflowcontrol = 0; 497 498 /* To accomodate a problem in 10Mbps operation, 499 * set a global flag if PHY running in 10Mbps mode. 500 */ 501 sp->tenmbpsmode = 0; 502 503 printk(KERN_INFO "%s: Link speed = ", dev->name); 504 505 /* Determine actual speed of operation. */ 506 switch (phyctrl & IPG_PC_LINK_SPEED) { 507 case IPG_PC_LINK_SPEED_10MBPS: 508 printk("10Mbps.\n"); 509 printk(KERN_INFO "%s: 10Mbps operational mode enabled.\n", 510 dev->name); 511 sp->tenmbpsmode = 1; 512 break; 513 case IPG_PC_LINK_SPEED_100MBPS: 514 printk("100Mbps.\n"); 515 break; 516 case IPG_PC_LINK_SPEED_1000MBPS: 517 printk("1000Mbps.\n"); 518 break; 519 default: 520 printk("undefined!\n"); 521 return 0; 522 } 523 524 if (phyctrl & IPG_PC_DUPLEX_STATUS) { 525 fullduplex = 1; 526 txflowcontrol = 1; 527 rxflowcontrol = 1; 528 } 529 530 /* Configure full duplex, and flow control. */ 531 if (fullduplex == 1) { 532 /* Configure IPG for full duplex operation. */ 533 printk(KERN_INFO "%s: setting full duplex, ", dev->name); 534 535 mac_ctrl_val |= IPG_MC_DUPLEX_SELECT_FD; 536 537 if (txflowcontrol == 1) { 538 printk("TX flow control"); 539 mac_ctrl_val |= IPG_MC_TX_FLOW_CONTROL_ENABLE; 540 } else { 541 printk("no TX flow control"); 542 mac_ctrl_val &= ~IPG_MC_TX_FLOW_CONTROL_ENABLE; 543 } 544 545 if (rxflowcontrol == 1) { 546 printk(", RX flow control."); 547 mac_ctrl_val |= IPG_MC_RX_FLOW_CONTROL_ENABLE; 548 } else { 549 printk(", no RX flow control."); 550 mac_ctrl_val &= ~IPG_MC_RX_FLOW_CONTROL_ENABLE; 551 } 552 553 printk("\n"); 554 } else { 555 /* Configure IPG for half duplex operation. */ 556 printk(KERN_INFO "%s: setting half duplex, " 557 "no TX flow control, no RX flow control.\n", dev->name); 558 559 mac_ctrl_val &= ~IPG_MC_DUPLEX_SELECT_FD & 560 ~IPG_MC_TX_FLOW_CONTROL_ENABLE & 561 ~IPG_MC_RX_FLOW_CONTROL_ENABLE; 562 } 563 ipg_w32(mac_ctrl_val, MAC_CTRL); 564 return 0; 565} 566 567/* Determine and configure multicast operation and set 568 * receive mode for IPG. 569 */ 570static void ipg_nic_set_multicast_list(struct net_device *dev) 571{ 572 void __iomem *ioaddr = ipg_ioaddr(dev); 573 struct netdev_hw_addr *ha; 574 unsigned int hashindex; 575 u32 hashtable[2]; 576 u8 receivemode; 577 578 IPG_DEBUG_MSG("_nic_set_multicast_list\n"); 579 580 receivemode = IPG_RM_RECEIVEUNICAST | IPG_RM_RECEIVEBROADCAST; 581 582 if (dev->flags & IFF_PROMISC) { 583 /* NIC to be configured in promiscuous mode. */ 584 receivemode = IPG_RM_RECEIVEALLFRAMES; 585 } else if ((dev->flags & IFF_ALLMULTI) || 586 ((dev->flags & IFF_MULTICAST) && 587 (netdev_mc_count(dev) > IPG_MULTICAST_HASHTABLE_SIZE))) { 588 /* NIC to be configured to receive all multicast 589 * frames. */ 590 receivemode |= IPG_RM_RECEIVEMULTICAST; 591 } else if ((dev->flags & IFF_MULTICAST) && !netdev_mc_empty(dev)) { 592 /* NIC to be configured to receive selected 593 * multicast addresses. */ 594 receivemode |= IPG_RM_RECEIVEMULTICASTHASH; 595 } 596 597 /* Calculate the bits to set for the 64 bit, IPG HASHTABLE. 598 * The IPG applies a cyclic-redundancy-check (the same CRC 599 * used to calculate the frame data FCS) to the destination 600 * address all incoming multicast frames whose destination 601 * address has the multicast bit set. The least significant 602 * 6 bits of the CRC result are used as an addressing index 603 * into the hash table. If the value of the bit addressed by 604 * this index is a 1, the frame is passed to the host system. 605 */ 606 607 /* Clear hashtable. */ 608 hashtable[0] = 0x00000000; 609 hashtable[1] = 0x00000000; 610 611 /* Cycle through all multicast addresses to filter. */ 612 netdev_for_each_mc_addr(ha, dev) { 613 /* Calculate CRC result for each multicast address. */ 614 hashindex = crc32_le(0xffffffff, ha->addr, 615 ETH_ALEN); 616 617 /* Use only the least significant 6 bits. */ 618 hashindex = hashindex & 0x3F; 619 620 /* Within "hashtable", set bit number "hashindex" 621 * to a logic 1. 622 */ 623 set_bit(hashindex, (void *)hashtable); 624 } 625 626 /* Write the value of the hashtable, to the 4, 16 bit 627 * HASHTABLE IPG registers. 628 */ 629 ipg_w32(hashtable[0], HASHTABLE_0); 630 ipg_w32(hashtable[1], HASHTABLE_1); 631 632 ipg_w8(IPG_RM_RSVD_MASK & receivemode, RECEIVE_MODE); 633 634 IPG_DEBUG_MSG("ReceiveMode = %x\n", ipg_r8(RECEIVE_MODE)); 635} 636 637static int ipg_io_config(struct net_device *dev) 638{ 639 struct ipg_nic_private *sp = netdev_priv(dev); 640 void __iomem *ioaddr = ipg_ioaddr(dev); 641 u32 origmacctrl; 642 u32 restoremacctrl; 643 644 IPG_DEBUG_MSG("_io_config\n"); 645 646 origmacctrl = ipg_r32(MAC_CTRL); 647 648 restoremacctrl = origmacctrl | IPG_MC_STATISTICS_ENABLE; 649 650 /* Based on compilation option, determine if FCS is to be 651 * stripped on receive frames by IPG. 652 */ 653 if (!IPG_STRIP_FCS_ON_RX) 654 restoremacctrl |= IPG_MC_RCV_FCS; 655 656 /* Determine if transmitter and/or receiver are 657 * enabled so we may restore MACCTRL correctly. 658 */ 659 if (origmacctrl & IPG_MC_TX_ENABLED) 660 restoremacctrl |= IPG_MC_TX_ENABLE; 661 662 if (origmacctrl & IPG_MC_RX_ENABLED) 663 restoremacctrl |= IPG_MC_RX_ENABLE; 664 665 /* Transmitter and receiver must be disabled before setting 666 * IFSSelect. 667 */ 668 ipg_w32((origmacctrl & (IPG_MC_RX_DISABLE | IPG_MC_TX_DISABLE)) & 669 IPG_MC_RSVD_MASK, MAC_CTRL); 670 671 /* Now that transmitter and receiver are disabled, write 672 * to IFSSelect. 673 */ 674 ipg_w32((origmacctrl & IPG_MC_IFS_96BIT) & IPG_MC_RSVD_MASK, MAC_CTRL); 675 676 /* Set RECEIVEMODE register. */ 677 ipg_nic_set_multicast_list(dev); 678 679 ipg_w16(sp->max_rxframe_size, MAX_FRAME_SIZE); 680 681 ipg_w8(IPG_RXDMAPOLLPERIOD_VALUE, RX_DMA_POLL_PERIOD); 682 ipg_w8(IPG_RXDMAURGENTTHRESH_VALUE, RX_DMA_URGENT_THRESH); 683 ipg_w8(IPG_RXDMABURSTTHRESH_VALUE, RX_DMA_BURST_THRESH); 684 ipg_w8(IPG_TXDMAPOLLPERIOD_VALUE, TX_DMA_POLL_PERIOD); 685 ipg_w8(IPG_TXDMAURGENTTHRESH_VALUE, TX_DMA_URGENT_THRESH); 686 ipg_w8(IPG_TXDMABURSTTHRESH_VALUE, TX_DMA_BURST_THRESH); 687 ipg_w16((IPG_IE_HOST_ERROR | IPG_IE_TX_DMA_COMPLETE | 688 IPG_IE_TX_COMPLETE | IPG_IE_INT_REQUESTED | 689 IPG_IE_UPDATE_STATS | IPG_IE_LINK_EVENT | 690 IPG_IE_RX_DMA_COMPLETE | IPG_IE_RX_DMA_PRIORITY), INT_ENABLE); 691 ipg_w16(IPG_FLOWONTHRESH_VALUE, FLOW_ON_THRESH); 692 ipg_w16(IPG_FLOWOFFTHRESH_VALUE, FLOW_OFF_THRESH); 693 694 ipg_w16(ipg_r16(DEBUG_CTRL) | 0x0200, DEBUG_CTRL); 695 696 ipg_w16(ipg_r16(DEBUG_CTRL) | 0x0010, DEBUG_CTRL); 697 698 ipg_w16(ipg_r16(DEBUG_CTRL) | 0x0020, DEBUG_CTRL); 699 700 /* Now restore MACCTRL to original setting. */ 701 ipg_w32(IPG_MC_RSVD_MASK & restoremacctrl, MAC_CTRL); 702 703 /* Disable unused RMON statistics. */ 704 ipg_w32(IPG_RZ_ALL, RMON_STATISTICS_MASK); 705 706 /* Disable unused MIB statistics. */ 707 ipg_w32(IPG_SM_MACCONTROLFRAMESXMTD | IPG_SM_MACCONTROLFRAMESRCVD | 708 IPG_SM_BCSTOCTETXMTOK_BCSTFRAMESXMTDOK | IPG_SM_TXJUMBOFRAMES | 709 IPG_SM_MCSTOCTETXMTOK_MCSTFRAMESXMTDOK | IPG_SM_RXJUMBOFRAMES | 710 IPG_SM_BCSTOCTETRCVDOK_BCSTFRAMESRCVDOK | 711 IPG_SM_UDPCHECKSUMERRORS | IPG_SM_TCPCHECKSUMERRORS | 712 IPG_SM_IPCHECKSUMERRORS, STATISTICS_MASK); 713 714 return 0; 715} 716 717/* 718 * Create a receive buffer within system memory and update 719 * NIC private structure appropriately. 720 */ 721static int ipg_get_rxbuff(struct net_device *dev, int entry) 722{ 723 struct ipg_nic_private *sp = netdev_priv(dev); 724 struct ipg_rx *rxfd = sp->rxd + entry; 725 struct sk_buff *skb; 726 u64 rxfragsize; 727 728 IPG_DEBUG_MSG("_get_rxbuff\n"); 729 730 skb = netdev_alloc_skb_ip_align(dev, sp->rxsupport_size); 731 if (!skb) { 732 sp->rx_buff[entry] = NULL; 733 return -ENOMEM; 734 } 735 736 /* Associate the receive buffer with the IPG NIC. */ 737 skb->dev = dev; 738 739 /* Save the address of the sk_buff structure. */ 740 sp->rx_buff[entry] = skb; 741 742 rxfd->frag_info = cpu_to_le64(pci_map_single(sp->pdev, skb->data, 743 sp->rx_buf_sz, PCI_DMA_FROMDEVICE)); 744 745 /* Set the RFD fragment length. */ 746 rxfragsize = sp->rxfrag_size; 747 rxfd->frag_info |= cpu_to_le64((rxfragsize << 48) & IPG_RFI_FRAGLEN); 748 749 return 0; 750} 751 752static int init_rfdlist(struct net_device *dev) 753{ 754 struct ipg_nic_private *sp = netdev_priv(dev); 755 void __iomem *ioaddr = sp->ioaddr; 756 unsigned int i; 757 758 IPG_DEBUG_MSG("_init_rfdlist\n"); 759 760 for (i = 0; i < IPG_RFDLIST_LENGTH; i++) { 761 struct ipg_rx *rxfd = sp->rxd + i; 762 763 if (sp->rx_buff[i]) { 764 pci_unmap_single(sp->pdev, 765 le64_to_cpu(rxfd->frag_info) & ~IPG_RFI_FRAGLEN, 766 sp->rx_buf_sz, PCI_DMA_FROMDEVICE); 767 dev_kfree_skb_irq(sp->rx_buff[i]); 768 sp->rx_buff[i] = NULL; 769 } 770 771 /* Clear out the RFS field. */ 772 rxfd->rfs = 0x0000000000000000; 773 774 if (ipg_get_rxbuff(dev, i) < 0) { 775 /* 776 * A receive buffer was not ready, break the 777 * RFD list here. 778 */ 779 IPG_DEBUG_MSG("Cannot allocate Rx buffer.\n"); 780 781 /* Just in case we cannot allocate a single RFD. 782 * Should not occur. 783 */ 784 if (i == 0) { 785 printk(KERN_ERR "%s: No memory available" 786 " for RFD list.\n", dev->name); 787 return -ENOMEM; 788 } 789 } 790 791 rxfd->next_desc = cpu_to_le64(sp->rxd_map + 792 sizeof(struct ipg_rx)*(i + 1)); 793 } 794 sp->rxd[i - 1].next_desc = cpu_to_le64(sp->rxd_map); 795 796 sp->rx_current = 0; 797 sp->rx_dirty = 0; 798 799 /* Write the location of the RFDList to the IPG. */ 800 ipg_w32((u32) sp->rxd_map, RFD_LIST_PTR_0); 801 ipg_w32(0x00000000, RFD_LIST_PTR_1); 802 803 return 0; 804} 805 806static void init_tfdlist(struct net_device *dev) 807{ 808 struct ipg_nic_private *sp = netdev_priv(dev); 809 void __iomem *ioaddr = sp->ioaddr; 810 unsigned int i; 811 812 IPG_DEBUG_MSG("_init_tfdlist\n"); 813 814 for (i = 0; i < IPG_TFDLIST_LENGTH; i++) { 815 struct ipg_tx *txfd = sp->txd + i; 816 817 txfd->tfc = cpu_to_le64(IPG_TFC_TFDDONE); 818 819 if (sp->tx_buff[i]) { 820 dev_kfree_skb_irq(sp->tx_buff[i]); 821 sp->tx_buff[i] = NULL; 822 } 823 824 txfd->next_desc = cpu_to_le64(sp->txd_map + 825 sizeof(struct ipg_tx)*(i + 1)); 826 } 827 sp->txd[i - 1].next_desc = cpu_to_le64(sp->txd_map); 828 829 sp->tx_current = 0; 830 sp->tx_dirty = 0; 831 832 /* Write the location of the TFDList to the IPG. */ 833 IPG_DDEBUG_MSG("Starting TFDListPtr = %8.8x\n", 834 (u32) sp->txd_map); 835 ipg_w32((u32) sp->txd_map, TFD_LIST_PTR_0); 836 ipg_w32(0x00000000, TFD_LIST_PTR_1); 837 838 sp->reset_current_tfd = 1; 839} 840 841/* 842 * Free all transmit buffers which have already been transfered 843 * via DMA to the IPG. 844 */ 845static void ipg_nic_txfree(struct net_device *dev) 846{ 847 struct ipg_nic_private *sp = netdev_priv(dev); 848 unsigned int released, pending, dirty; 849 850 IPG_DEBUG_MSG("_nic_txfree\n"); 851 852 pending = sp->tx_current - sp->tx_dirty; 853 dirty = sp->tx_dirty % IPG_TFDLIST_LENGTH; 854 855 for (released = 0; released < pending; released++) { 856 struct sk_buff *skb = sp->tx_buff[dirty]; 857 struct ipg_tx *txfd = sp->txd + dirty; 858 859 IPG_DEBUG_MSG("TFC = %16.16lx\n", (unsigned long) txfd->tfc); 860 861 /* Look at each TFD's TFC field beginning 862 * at the last freed TFD up to the current TFD. 863 * If the TFDDone bit is set, free the associated 864 * buffer. 865 */ 866 if (!(txfd->tfc & cpu_to_le64(IPG_TFC_TFDDONE))) 867 break; 868 869 /* Free the transmit buffer. */ 870 if (skb) { 871 pci_unmap_single(sp->pdev, 872 le64_to_cpu(txfd->frag_info) & ~IPG_TFI_FRAGLEN, 873 skb->len, PCI_DMA_TODEVICE); 874 875 dev_kfree_skb_irq(skb); 876 877 sp->tx_buff[dirty] = NULL; 878 } 879 dirty = (dirty + 1) % IPG_TFDLIST_LENGTH; 880 } 881 882 sp->tx_dirty += released; 883 884 if (netif_queue_stopped(dev) && 885 (sp->tx_current != (sp->tx_dirty + IPG_TFDLIST_LENGTH))) { 886 netif_wake_queue(dev); 887 } 888} 889 890static void ipg_tx_timeout(struct net_device *dev) 891{ 892 struct ipg_nic_private *sp = netdev_priv(dev); 893 void __iomem *ioaddr = sp->ioaddr; 894 895 ipg_reset(dev, IPG_AC_TX_RESET | IPG_AC_DMA | IPG_AC_NETWORK | 896 IPG_AC_FIFO); 897 898 spin_lock_irq(&sp->lock); 899 900 /* Re-configure after DMA reset. */ 901 if (ipg_io_config(dev) < 0) { 902 printk(KERN_INFO "%s: Error during re-configuration.\n", 903 dev->name); 904 } 905 906 init_tfdlist(dev); 907 908 spin_unlock_irq(&sp->lock); 909 910 ipg_w32((ipg_r32(MAC_CTRL) | IPG_MC_TX_ENABLE) & IPG_MC_RSVD_MASK, 911 MAC_CTRL); 912} 913 914/* 915 * For TxComplete interrupts, free all transmit 916 * buffers which have already been transfered via DMA 917 * to the IPG. 918 */ 919static void ipg_nic_txcleanup(struct net_device *dev) 920{ 921 struct ipg_nic_private *sp = netdev_priv(dev); 922 void __iomem *ioaddr = sp->ioaddr; 923 unsigned int i; 924 925 IPG_DEBUG_MSG("_nic_txcleanup\n"); 926 927 for (i = 0; i < IPG_TFDLIST_LENGTH; i++) { 928 /* Reading the TXSTATUS register clears the 929 * TX_COMPLETE interrupt. 930 */ 931 u32 txstatusdword = ipg_r32(TX_STATUS); 932 933 IPG_DEBUG_MSG("TxStatus = %8.8x\n", txstatusdword); 934 935 /* Check for Transmit errors. Error bits only valid if 936 * TX_COMPLETE bit in the TXSTATUS register is a 1. 937 */ 938 if (!(txstatusdword & IPG_TS_TX_COMPLETE)) 939 break; 940 941 /* If in 10Mbps mode, indicate transmit is ready. */ 942 if (sp->tenmbpsmode) { 943 netif_wake_queue(dev); 944 } 945 946 /* Transmit error, increment stat counters. */ 947 if (txstatusdword & IPG_TS_TX_ERROR) { 948 IPG_DEBUG_MSG("Transmit error.\n"); 949 sp->stats.tx_errors++; 950 } 951 952 /* Late collision, re-enable transmitter. */ 953 if (txstatusdword & IPG_TS_LATE_COLLISION) { 954 IPG_DEBUG_MSG("Late collision on transmit.\n"); 955 ipg_w32((ipg_r32(MAC_CTRL) | IPG_MC_TX_ENABLE) & 956 IPG_MC_RSVD_MASK, MAC_CTRL); 957 } 958 959 /* Maximum collisions, re-enable transmitter. */ 960 if (txstatusdword & IPG_TS_TX_MAX_COLL) { 961 IPG_DEBUG_MSG("Maximum collisions on transmit.\n"); 962 ipg_w32((ipg_r32(MAC_CTRL) | IPG_MC_TX_ENABLE) & 963 IPG_MC_RSVD_MASK, MAC_CTRL); 964 } 965 966 /* Transmit underrun, reset and re-enable 967 * transmitter. 968 */ 969 if (txstatusdword & IPG_TS_TX_UNDERRUN) { 970 IPG_DEBUG_MSG("Transmitter underrun.\n"); 971 sp->stats.tx_fifo_errors++; 972 ipg_reset(dev, IPG_AC_TX_RESET | IPG_AC_DMA | 973 IPG_AC_NETWORK | IPG_AC_FIFO); 974 975 /* Re-configure after DMA reset. */ 976 if (ipg_io_config(dev) < 0) { 977 printk(KERN_INFO 978 "%s: Error during re-configuration.\n", 979 dev->name); 980 } 981 init_tfdlist(dev); 982 983 ipg_w32((ipg_r32(MAC_CTRL) | IPG_MC_TX_ENABLE) & 984 IPG_MC_RSVD_MASK, MAC_CTRL); 985 } 986 } 987 988 ipg_nic_txfree(dev); 989} 990 991/* Provides statistical information about the IPG NIC. */ 992static struct net_device_stats *ipg_nic_get_stats(struct net_device *dev) 993{ 994 struct ipg_nic_private *sp = netdev_priv(dev); 995 void __iomem *ioaddr = sp->ioaddr; 996 u16 temp1; 997 u16 temp2; 998 999 IPG_DEBUG_MSG("_nic_get_stats\n"); 1000 1001 /* Check to see if the NIC has been initialized via nic_open, 1002 * before trying to read statistic registers. 1003 */ 1004 if (!test_bit(__LINK_STATE_START, &dev->state)) 1005 return &sp->stats; 1006 1007 sp->stats.rx_packets += ipg_r32(IPG_FRAMESRCVDOK); 1008 sp->stats.tx_packets += ipg_r32(IPG_FRAMESXMTDOK); 1009 sp->stats.rx_bytes += ipg_r32(IPG_OCTETRCVOK); 1010 sp->stats.tx_bytes += ipg_r32(IPG_OCTETXMTOK); 1011 temp1 = ipg_r16(IPG_FRAMESLOSTRXERRORS); 1012 sp->stats.rx_errors += temp1; 1013 sp->stats.rx_missed_errors += temp1; 1014 temp1 = ipg_r32(IPG_SINGLECOLFRAMES) + ipg_r32(IPG_MULTICOLFRAMES) + 1015 ipg_r32(IPG_LATECOLLISIONS); 1016 temp2 = ipg_r16(IPG_CARRIERSENSEERRORS); 1017 sp->stats.collisions += temp1; 1018 sp->stats.tx_dropped += ipg_r16(IPG_FRAMESABORTXSCOLLS); 1019 sp->stats.tx_errors += ipg_r16(IPG_FRAMESWEXDEFERRAL) + 1020 ipg_r32(IPG_FRAMESWDEFERREDXMT) + temp1 + temp2; 1021 sp->stats.multicast += ipg_r32(IPG_MCSTOCTETRCVDOK); 1022 1023 /* detailed tx_errors */ 1024 sp->stats.tx_carrier_errors += temp2; 1025 1026 /* detailed rx_errors */ 1027 sp->stats.rx_length_errors += ipg_r16(IPG_INRANGELENGTHERRORS) + 1028 ipg_r16(IPG_FRAMETOOLONGERRRORS); 1029 sp->stats.rx_crc_errors += ipg_r16(IPG_FRAMECHECKSEQERRORS); 1030 1031 /* Unutilized IPG statistic registers. */ 1032 ipg_r32(IPG_MCSTFRAMESRCVDOK); 1033 1034 return &sp->stats; 1035} 1036 1037/* Restore used receive buffers. */ 1038static int ipg_nic_rxrestore(struct net_device *dev) 1039{ 1040 struct ipg_nic_private *sp = netdev_priv(dev); 1041 const unsigned int curr = sp->rx_current; 1042 unsigned int dirty = sp->rx_dirty; 1043 1044 IPG_DEBUG_MSG("_nic_rxrestore\n"); 1045 1046 for (dirty = sp->rx_dirty; curr - dirty > 0; dirty++) { 1047 unsigned int entry = dirty % IPG_RFDLIST_LENGTH; 1048 1049 /* rx_copybreak may poke hole here and there. */ 1050 if (sp->rx_buff[entry]) 1051 continue; 1052 1053 /* Generate a new receive buffer to replace the 1054 * current buffer (which will be released by the 1055 * Linux system). 1056 */ 1057 if (ipg_get_rxbuff(dev, entry) < 0) { 1058 IPG_DEBUG_MSG("Cannot allocate new Rx buffer.\n"); 1059 1060 break; 1061 } 1062 1063 /* Reset the RFS field. */ 1064 sp->rxd[entry].rfs = 0x0000000000000000; 1065 } 1066 sp->rx_dirty = dirty; 1067 1068 return 0; 1069} 1070 1071/* use jumboindex and jumbosize to control jumbo frame status 1072 * initial status is jumboindex=-1 and jumbosize=0 1073 * 1. jumboindex = -1 and jumbosize=0 : previous jumbo frame has been done. 1074 * 2. jumboindex != -1 and jumbosize != 0 : jumbo frame is not over size and receiving 1075 * 3. jumboindex = -1 and jumbosize != 0 : jumbo frame is over size, already dump 1076 * previous receiving and need to continue dumping the current one 1077 */ 1078enum { 1079 NORMAL_PACKET, 1080 ERROR_PACKET 1081}; 1082 1083enum { 1084 FRAME_NO_START_NO_END = 0, 1085 FRAME_WITH_START = 1, 1086 FRAME_WITH_END = 10, 1087 FRAME_WITH_START_WITH_END = 11 1088}; 1089 1090static void ipg_nic_rx_free_skb(struct net_device *dev) 1091{ 1092 struct ipg_nic_private *sp = netdev_priv(dev); 1093 unsigned int entry = sp->rx_current % IPG_RFDLIST_LENGTH; 1094 1095 if (sp->rx_buff[entry]) { 1096 struct ipg_rx *rxfd = sp->rxd + entry; 1097 1098 pci_unmap_single(sp->pdev, 1099 le64_to_cpu(rxfd->frag_info) & ~IPG_RFI_FRAGLEN, 1100 sp->rx_buf_sz, PCI_DMA_FROMDEVICE); 1101 dev_kfree_skb_irq(sp->rx_buff[entry]); 1102 sp->rx_buff[entry] = NULL; 1103 } 1104} 1105 1106static int ipg_nic_rx_check_frame_type(struct net_device *dev) 1107{ 1108 struct ipg_nic_private *sp = netdev_priv(dev); 1109 struct ipg_rx *rxfd = sp->rxd + (sp->rx_current % IPG_RFDLIST_LENGTH); 1110 int type = FRAME_NO_START_NO_END; 1111 1112 if (le64_to_cpu(rxfd->rfs) & IPG_RFS_FRAMESTART) 1113 type += FRAME_WITH_START; 1114 if (le64_to_cpu(rxfd->rfs) & IPG_RFS_FRAMEEND) 1115 type += FRAME_WITH_END; 1116 return type; 1117} 1118 1119static int ipg_nic_rx_check_error(struct net_device *dev) 1120{ 1121 struct ipg_nic_private *sp = netdev_priv(dev); 1122 unsigned int entry = sp->rx_current % IPG_RFDLIST_LENGTH; 1123 struct ipg_rx *rxfd = sp->rxd + entry; 1124 1125 if (IPG_DROP_ON_RX_ETH_ERRORS && (le64_to_cpu(rxfd->rfs) & 1126 (IPG_RFS_RXFIFOOVERRUN | IPG_RFS_RXRUNTFRAME | 1127 IPG_RFS_RXALIGNMENTERROR | IPG_RFS_RXFCSERROR | 1128 IPG_RFS_RXOVERSIZEDFRAME | IPG_RFS_RXLENGTHERROR))) { 1129 IPG_DEBUG_MSG("Rx error, RFS = %16.16lx\n", 1130 (unsigned long) rxfd->rfs); 1131 1132 /* Increment general receive error statistic. */ 1133 sp->stats.rx_errors++; 1134 1135 /* Increment detailed receive error statistics. */ 1136 if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFIFOOVERRUN) { 1137 IPG_DEBUG_MSG("RX FIFO overrun occured.\n"); 1138 1139 sp->stats.rx_fifo_errors++; 1140 } 1141 1142 if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXRUNTFRAME) { 1143 IPG_DEBUG_MSG("RX runt occured.\n"); 1144 sp->stats.rx_length_errors++; 1145 } 1146 1147 /* Do nothing for IPG_RFS_RXOVERSIZEDFRAME, 1148 * error count handled by a IPG statistic register. 1149 */ 1150 1151 if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXALIGNMENTERROR) { 1152 IPG_DEBUG_MSG("RX alignment error occured.\n"); 1153 sp->stats.rx_frame_errors++; 1154 } 1155 1156 /* Do nothing for IPG_RFS_RXFCSERROR, error count 1157 * handled by a IPG statistic register. 1158 */ 1159 1160 /* Free the memory associated with the RX 1161 * buffer since it is erroneous and we will 1162 * not pass it to higher layer processes. 1163 */ 1164 if (sp->rx_buff[entry]) { 1165 pci_unmap_single(sp->pdev, 1166 le64_to_cpu(rxfd->frag_info) & ~IPG_RFI_FRAGLEN, 1167 sp->rx_buf_sz, PCI_DMA_FROMDEVICE); 1168 1169 dev_kfree_skb_irq(sp->rx_buff[entry]); 1170 sp->rx_buff[entry] = NULL; 1171 } 1172 return ERROR_PACKET; 1173 } 1174 return NORMAL_PACKET; 1175} 1176 1177static void ipg_nic_rx_with_start_and_end(struct net_device *dev, 1178 struct ipg_nic_private *sp, 1179 struct ipg_rx *rxfd, unsigned entry) 1180{ 1181 struct ipg_jumbo *jumbo = &sp->jumbo; 1182 struct sk_buff *skb; 1183 int framelen; 1184 1185 if (jumbo->found_start) { 1186 dev_kfree_skb_irq(jumbo->skb); 1187 jumbo->found_start = 0; 1188 jumbo->current_size = 0; 1189 jumbo->skb = NULL; 1190 } 1191 1192 /* 1: found error, 0 no error */ 1193 if (ipg_nic_rx_check_error(dev) != NORMAL_PACKET) 1194 return; 1195 1196 skb = sp->rx_buff[entry]; 1197 if (!skb) 1198 return; 1199 1200 /* accept this frame and send to upper layer */ 1201 framelen = le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFRAMELEN; 1202 if (framelen > sp->rxfrag_size) 1203 framelen = sp->rxfrag_size; 1204 1205 skb_put(skb, framelen); 1206 skb->protocol = eth_type_trans(skb, dev); 1207 skb->ip_summed = CHECKSUM_NONE; 1208 netif_rx(skb); 1209 sp->rx_buff[entry] = NULL; 1210} 1211 1212static void ipg_nic_rx_with_start(struct net_device *dev, 1213 struct ipg_nic_private *sp, 1214 struct ipg_rx *rxfd, unsigned entry) 1215{ 1216 struct ipg_jumbo *jumbo = &sp->jumbo; 1217 struct pci_dev *pdev = sp->pdev; 1218 struct sk_buff *skb; 1219 1220 /* 1: found error, 0 no error */ 1221 if (ipg_nic_rx_check_error(dev) != NORMAL_PACKET) 1222 return; 1223 1224 /* accept this frame and send to upper layer */ 1225 skb = sp->rx_buff[entry]; 1226 if (!skb) 1227 return; 1228 1229 if (jumbo->found_start) 1230 dev_kfree_skb_irq(jumbo->skb); 1231 1232 pci_unmap_single(pdev, le64_to_cpu(rxfd->frag_info) & ~IPG_RFI_FRAGLEN, 1233 sp->rx_buf_sz, PCI_DMA_FROMDEVICE); 1234 1235 skb_put(skb, sp->rxfrag_size); 1236 1237 jumbo->found_start = 1; 1238 jumbo->current_size = sp->rxfrag_size; 1239 jumbo->skb = skb; 1240 1241 sp->rx_buff[entry] = NULL; 1242} 1243 1244static void ipg_nic_rx_with_end(struct net_device *dev, 1245 struct ipg_nic_private *sp, 1246 struct ipg_rx *rxfd, unsigned entry) 1247{ 1248 struct ipg_jumbo *jumbo = &sp->jumbo; 1249 1250 /* 1: found error, 0 no error */ 1251 if (ipg_nic_rx_check_error(dev) == NORMAL_PACKET) { 1252 struct sk_buff *skb = sp->rx_buff[entry]; 1253 1254 if (!skb) 1255 return; 1256 1257 if (jumbo->found_start) { 1258 int framelen, endframelen; 1259 1260 framelen = le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFRAMELEN; 1261 1262 endframelen = framelen - jumbo->current_size; 1263 if (framelen > sp->rxsupport_size) 1264 dev_kfree_skb_irq(jumbo->skb); 1265 else { 1266 memcpy(skb_put(jumbo->skb, endframelen), 1267 skb->data, endframelen); 1268 1269 jumbo->skb->protocol = 1270 eth_type_trans(jumbo->skb, dev); 1271 1272 jumbo->skb->ip_summed = CHECKSUM_NONE; 1273 netif_rx(jumbo->skb); 1274 } 1275 } 1276 1277 jumbo->found_start = 0; 1278 jumbo->current_size = 0; 1279 jumbo->skb = NULL; 1280 1281 ipg_nic_rx_free_skb(dev); 1282 } else { 1283 dev_kfree_skb_irq(jumbo->skb); 1284 jumbo->found_start = 0; 1285 jumbo->current_size = 0; 1286 jumbo->skb = NULL; 1287 } 1288} 1289 1290static void ipg_nic_rx_no_start_no_end(struct net_device *dev, 1291 struct ipg_nic_private *sp, 1292 struct ipg_rx *rxfd, unsigned entry) 1293{ 1294 struct ipg_jumbo *jumbo = &sp->jumbo; 1295 1296 /* 1: found error, 0 no error */ 1297 if (ipg_nic_rx_check_error(dev) == NORMAL_PACKET) { 1298 struct sk_buff *skb = sp->rx_buff[entry]; 1299 1300 if (skb) { 1301 if (jumbo->found_start) { 1302 jumbo->current_size += sp->rxfrag_size; 1303 if (jumbo->current_size <= sp->rxsupport_size) { 1304 memcpy(skb_put(jumbo->skb, 1305 sp->rxfrag_size), 1306 skb->data, sp->rxfrag_size); 1307 } 1308 } 1309 ipg_nic_rx_free_skb(dev); 1310 } 1311 } else { 1312 dev_kfree_skb_irq(jumbo->skb); 1313 jumbo->found_start = 0; 1314 jumbo->current_size = 0; 1315 jumbo->skb = NULL; 1316 } 1317} 1318 1319static int ipg_nic_rx_jumbo(struct net_device *dev) 1320{ 1321 struct ipg_nic_private *sp = netdev_priv(dev); 1322 unsigned int curr = sp->rx_current; 1323 void __iomem *ioaddr = sp->ioaddr; 1324 unsigned int i; 1325 1326 IPG_DEBUG_MSG("_nic_rx\n"); 1327 1328 for (i = 0; i < IPG_MAXRFDPROCESS_COUNT; i++, curr++) { 1329 unsigned int entry = curr % IPG_RFDLIST_LENGTH; 1330 struct ipg_rx *rxfd = sp->rxd + entry; 1331 1332 if (!(rxfd->rfs & cpu_to_le64(IPG_RFS_RFDDONE))) 1333 break; 1334 1335 switch (ipg_nic_rx_check_frame_type(dev)) { 1336 case FRAME_WITH_START_WITH_END: 1337 ipg_nic_rx_with_start_and_end(dev, sp, rxfd, entry); 1338 break; 1339 case FRAME_WITH_START: 1340 ipg_nic_rx_with_start(dev, sp, rxfd, entry); 1341 break; 1342 case FRAME_WITH_END: 1343 ipg_nic_rx_with_end(dev, sp, rxfd, entry); 1344 break; 1345 case FRAME_NO_START_NO_END: 1346 ipg_nic_rx_no_start_no_end(dev, sp, rxfd, entry); 1347 break; 1348 } 1349 } 1350 1351 sp->rx_current = curr; 1352 1353 if (i == IPG_MAXRFDPROCESS_COUNT) { 1354 /* There are more RFDs to process, however the 1355 * allocated amount of RFD processing time has 1356 * expired. Assert Interrupt Requested to make 1357 * sure we come back to process the remaining RFDs. 1358 */ 1359 ipg_w32(ipg_r32(ASIC_CTRL) | IPG_AC_INT_REQUEST, ASIC_CTRL); 1360 } 1361 1362 ipg_nic_rxrestore(dev); 1363 1364 return 0; 1365} 1366 1367static int ipg_nic_rx(struct net_device *dev) 1368{ 1369 /* Transfer received Ethernet frames to higher network layers. */ 1370 struct ipg_nic_private *sp = netdev_priv(dev); 1371 unsigned int curr = sp->rx_current; 1372 void __iomem *ioaddr = sp->ioaddr; 1373 struct ipg_rx *rxfd; 1374 unsigned int i; 1375 1376 IPG_DEBUG_MSG("_nic_rx\n"); 1377 1378#define __RFS_MASK \ 1379 cpu_to_le64(IPG_RFS_RFDDONE | IPG_RFS_FRAMESTART | IPG_RFS_FRAMEEND) 1380 1381 for (i = 0; i < IPG_MAXRFDPROCESS_COUNT; i++, curr++) { 1382 unsigned int entry = curr % IPG_RFDLIST_LENGTH; 1383 struct sk_buff *skb = sp->rx_buff[entry]; 1384 unsigned int framelen; 1385 1386 rxfd = sp->rxd + entry; 1387 1388 if (((rxfd->rfs & __RFS_MASK) != __RFS_MASK) || !skb) 1389 break; 1390 1391 /* Get received frame length. */ 1392 framelen = le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFRAMELEN; 1393 1394 /* Check for jumbo frame arrival with too small 1395 * RXFRAG_SIZE. 1396 */ 1397 if (framelen > sp->rxfrag_size) { 1398 IPG_DEBUG_MSG 1399 ("RFS FrameLen > allocated fragment size.\n"); 1400 1401 framelen = sp->rxfrag_size; 1402 } 1403 1404 if ((IPG_DROP_ON_RX_ETH_ERRORS && (le64_to_cpu(rxfd->rfs) & 1405 (IPG_RFS_RXFIFOOVERRUN | IPG_RFS_RXRUNTFRAME | 1406 IPG_RFS_RXALIGNMENTERROR | IPG_RFS_RXFCSERROR | 1407 IPG_RFS_RXOVERSIZEDFRAME | IPG_RFS_RXLENGTHERROR)))) { 1408 1409 IPG_DEBUG_MSG("Rx error, RFS = %16.16lx\n", 1410 (unsigned long int) rxfd->rfs); 1411 1412 /* Increment general receive error statistic. */ 1413 sp->stats.rx_errors++; 1414 1415 /* Increment detailed receive error statistics. */ 1416 if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFIFOOVERRUN) { 1417 IPG_DEBUG_MSG("RX FIFO overrun occured.\n"); 1418 sp->stats.rx_fifo_errors++; 1419 } 1420 1421 if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXRUNTFRAME) { 1422 IPG_DEBUG_MSG("RX runt occured.\n"); 1423 sp->stats.rx_length_errors++; 1424 } 1425 1426 if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXOVERSIZEDFRAME) ; 1427 /* Do nothing, error count handled by a IPG 1428 * statistic register. 1429 */ 1430 1431 if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXALIGNMENTERROR) { 1432 IPG_DEBUG_MSG("RX alignment error occured.\n"); 1433 sp->stats.rx_frame_errors++; 1434 } 1435 1436 if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFCSERROR) ; 1437 /* Do nothing, error count handled by a IPG 1438 * statistic register. 1439 */ 1440 1441 /* Free the memory associated with the RX 1442 * buffer since it is erroneous and we will 1443 * not pass it to higher layer processes. 1444 */ 1445 if (skb) { 1446 __le64 info = rxfd->frag_info; 1447 1448 pci_unmap_single(sp->pdev, 1449 le64_to_cpu(info) & ~IPG_RFI_FRAGLEN, 1450 sp->rx_buf_sz, PCI_DMA_FROMDEVICE); 1451 1452 dev_kfree_skb_irq(skb); 1453 } 1454 } else { 1455 1456 /* Adjust the new buffer length to accomodate the size 1457 * of the received frame. 1458 */ 1459 skb_put(skb, framelen); 1460 1461 /* Set the buffer's protocol field to Ethernet. */ 1462 skb->protocol = eth_type_trans(skb, dev); 1463 1464 /* The IPG encountered an error with (or 1465 * there were no) IP/TCP/UDP checksums. 1466 * This may or may not indicate an invalid 1467 * IP/TCP/UDP frame was received. Let the 1468 * upper layer decide. 1469 */ 1470 skb->ip_summed = CHECKSUM_NONE; 1471 1472 /* Hand off frame for higher layer processing. 1473 * The function netif_rx() releases the sk_buff 1474 * when processing completes. 1475 */ 1476 netif_rx(skb); 1477 } 1478 1479 /* Assure RX buffer is not reused by IPG. */ 1480 sp->rx_buff[entry] = NULL; 1481 } 1482 1483 /* 1484 * If there are more RFDs to proces and the allocated amount of RFD 1485 * processing time has expired, assert Interrupt Requested to make 1486 * sure we come back to process the remaining RFDs. 1487 */ 1488 if (i == IPG_MAXRFDPROCESS_COUNT) 1489 ipg_w32(ipg_r32(ASIC_CTRL) | IPG_AC_INT_REQUEST, ASIC_CTRL); 1490 1491#ifdef IPG_DEBUG 1492 /* Check if the RFD list contained no receive frame data. */ 1493 if (!i) 1494 sp->EmptyRFDListCount++; 1495#endif 1496 while ((le64_to_cpu(rxfd->rfs) & IPG_RFS_RFDDONE) && 1497 !((le64_to_cpu(rxfd->rfs) & IPG_RFS_FRAMESTART) && 1498 (le64_to_cpu(rxfd->rfs) & IPG_RFS_FRAMEEND))) { 1499 unsigned int entry = curr++ % IPG_RFDLIST_LENGTH; 1500 1501 rxfd = sp->rxd + entry; 1502 1503 IPG_DEBUG_MSG("Frame requires multiple RFDs.\n"); 1504 1505 /* An unexpected event, additional code needed to handle 1506 * properly. So for the time being, just disregard the 1507 * frame. 1508 */ 1509 1510 /* Free the memory associated with the RX 1511 * buffer since it is erroneous and we will 1512 * not pass it to higher layer processes. 1513 */ 1514 if (sp->rx_buff[entry]) { 1515 pci_unmap_single(sp->pdev, 1516 le64_to_cpu(rxfd->frag_info) & ~IPG_RFI_FRAGLEN, 1517 sp->rx_buf_sz, PCI_DMA_FROMDEVICE); 1518 dev_kfree_skb_irq(sp->rx_buff[entry]); 1519 } 1520 1521 /* Assure RX buffer is not reused by IPG. */ 1522 sp->rx_buff[entry] = NULL; 1523 } 1524 1525 sp->rx_current = curr; 1526 1527 /* Check to see if there are a minimum number of used 1528 * RFDs before restoring any (should improve performance.) 1529 */ 1530 if ((curr - sp->rx_dirty) >= IPG_MINUSEDRFDSTOFREE) 1531 ipg_nic_rxrestore(dev); 1532 1533 return 0; 1534} 1535 1536static void ipg_reset_after_host_error(struct work_struct *work) 1537{ 1538 struct ipg_nic_private *sp = 1539 container_of(work, struct ipg_nic_private, task.work); 1540 struct net_device *dev = sp->dev; 1541 1542 /* 1543 * Acknowledge HostError interrupt by resetting 1544 * IPG DMA and HOST. 1545 */ 1546 ipg_reset(dev, IPG_AC_GLOBAL_RESET | IPG_AC_HOST | IPG_AC_DMA); 1547 1548 init_rfdlist(dev); 1549 init_tfdlist(dev); 1550 1551 if (ipg_io_config(dev) < 0) { 1552 printk(KERN_INFO "%s: Cannot recover from PCI error.\n", 1553 dev->name); 1554 schedule_delayed_work(&sp->task, HZ); 1555 } 1556} 1557 1558static irqreturn_t ipg_interrupt_handler(int irq, void *dev_inst) 1559{ 1560 struct net_device *dev = dev_inst; 1561 struct ipg_nic_private *sp = netdev_priv(dev); 1562 void __iomem *ioaddr = sp->ioaddr; 1563 unsigned int handled = 0; 1564 u16 status; 1565 1566 IPG_DEBUG_MSG("_interrupt_handler\n"); 1567 1568 if (sp->is_jumbo) 1569 ipg_nic_rxrestore(dev); 1570 1571 spin_lock(&sp->lock); 1572 1573 /* Get interrupt source information, and acknowledge 1574 * some (i.e. TxDMAComplete, RxDMAComplete, RxEarly, 1575 * IntRequested, MacControlFrame, LinkEvent) interrupts 1576 * if issued. Also, all IPG interrupts are disabled by 1577 * reading IntStatusAck. 1578 */ 1579 status = ipg_r16(INT_STATUS_ACK); 1580 1581 IPG_DEBUG_MSG("IntStatusAck = %4.4x\n", status); 1582 1583 /* Shared IRQ of remove event. */ 1584 if (!(status & IPG_IS_RSVD_MASK)) 1585 goto out_enable; 1586 1587 handled = 1; 1588 1589 if (unlikely(!netif_running(dev))) 1590 goto out_unlock; 1591 1592 /* If RFDListEnd interrupt, restore all used RFDs. */ 1593 if (status & IPG_IS_RFD_LIST_END) { 1594 IPG_DEBUG_MSG("RFDListEnd Interrupt.\n"); 1595 1596 /* The RFD list end indicates an RFD was encountered 1597 * with a 0 NextPtr, or with an RFDDone bit set to 1 1598 * (indicating the RFD is not read for use by the 1599 * IPG.) Try to restore all RFDs. 1600 */ 1601 ipg_nic_rxrestore(dev); 1602 1603#ifdef IPG_DEBUG 1604 /* Increment the RFDlistendCount counter. */ 1605 sp->RFDlistendCount++; 1606#endif 1607 } 1608 1609 /* If RFDListEnd, RxDMAPriority, RxDMAComplete, or 1610 * IntRequested interrupt, process received frames. */ 1611 if ((status & IPG_IS_RX_DMA_PRIORITY) || 1612 (status & IPG_IS_RFD_LIST_END) || 1613 (status & IPG_IS_RX_DMA_COMPLETE) || 1614 (status & IPG_IS_INT_REQUESTED)) { 1615#ifdef IPG_DEBUG 1616 /* Increment the RFD list checked counter if interrupted 1617 * only to check the RFD list. */ 1618 if (status & (~(IPG_IS_RX_DMA_PRIORITY | IPG_IS_RFD_LIST_END | 1619 IPG_IS_RX_DMA_COMPLETE | IPG_IS_INT_REQUESTED) & 1620 (IPG_IS_HOST_ERROR | IPG_IS_TX_DMA_COMPLETE | 1621 IPG_IS_LINK_EVENT | IPG_IS_TX_COMPLETE | 1622 IPG_IS_UPDATE_STATS))) 1623 sp->RFDListCheckedCount++; 1624#endif 1625 1626 if (sp->is_jumbo) 1627 ipg_nic_rx_jumbo(dev); 1628 else 1629 ipg_nic_rx(dev); 1630 } 1631 1632 /* If TxDMAComplete interrupt, free used TFDs. */ 1633 if (status & IPG_IS_TX_DMA_COMPLETE) 1634 ipg_nic_txfree(dev); 1635 1636 /* TxComplete interrupts indicate one of numerous actions. 1637 * Determine what action to take based on TXSTATUS register. 1638 */ 1639 if (status & IPG_IS_TX_COMPLETE) 1640 ipg_nic_txcleanup(dev); 1641 1642 /* If UpdateStats interrupt, update Linux Ethernet statistics */ 1643 if (status & IPG_IS_UPDATE_STATS) 1644 ipg_nic_get_stats(dev); 1645 1646 /* If HostError interrupt, reset IPG. */ 1647 if (status & IPG_IS_HOST_ERROR) { 1648 IPG_DDEBUG_MSG("HostError Interrupt\n"); 1649 1650 schedule_delayed_work(&sp->task, 0); 1651 } 1652 1653 /* If LinkEvent interrupt, resolve autonegotiation. */ 1654 if (status & IPG_IS_LINK_EVENT) { 1655 if (ipg_config_autoneg(dev) < 0) 1656 printk(KERN_INFO "%s: Auto-negotiation error.\n", 1657 dev->name); 1658 } 1659 1660 /* If MACCtrlFrame interrupt, do nothing. */ 1661 if (status & IPG_IS_MAC_CTRL_FRAME) 1662 IPG_DEBUG_MSG("MACCtrlFrame interrupt.\n"); 1663 1664 /* If RxComplete interrupt, do nothing. */ 1665 if (status & IPG_IS_RX_COMPLETE) 1666 IPG_DEBUG_MSG("RxComplete interrupt.\n"); 1667 1668 /* If RxEarly interrupt, do nothing. */ 1669 if (status & IPG_IS_RX_EARLY) 1670 IPG_DEBUG_MSG("RxEarly interrupt.\n"); 1671 1672out_enable: 1673 /* Re-enable IPG interrupts. */ 1674 ipg_w16(IPG_IE_TX_DMA_COMPLETE | IPG_IE_RX_DMA_COMPLETE | 1675 IPG_IE_HOST_ERROR | IPG_IE_INT_REQUESTED | IPG_IE_TX_COMPLETE | 1676 IPG_IE_LINK_EVENT | IPG_IE_UPDATE_STATS, INT_ENABLE); 1677out_unlock: 1678 spin_unlock(&sp->lock); 1679 1680 return IRQ_RETVAL(handled); 1681} 1682 1683static void ipg_rx_clear(struct ipg_nic_private *sp) 1684{ 1685 unsigned int i; 1686 1687 for (i = 0; i < IPG_RFDLIST_LENGTH; i++) { 1688 if (sp->rx_buff[i]) { 1689 struct ipg_rx *rxfd = sp->rxd + i; 1690 1691 dev_kfree_skb_irq(sp->rx_buff[i]); 1692 sp->rx_buff[i] = NULL; 1693 pci_unmap_single(sp->pdev, 1694 le64_to_cpu(rxfd->frag_info) & ~IPG_RFI_FRAGLEN, 1695 sp->rx_buf_sz, PCI_DMA_FROMDEVICE); 1696 } 1697 } 1698} 1699 1700static void ipg_tx_clear(struct ipg_nic_private *sp) 1701{ 1702 unsigned int i; 1703 1704 for (i = 0; i < IPG_TFDLIST_LENGTH; i++) { 1705 if (sp->tx_buff[i]) { 1706 struct ipg_tx *txfd = sp->txd + i; 1707 1708 pci_unmap_single(sp->pdev, 1709 le64_to_cpu(txfd->frag_info) & ~IPG_TFI_FRAGLEN, 1710 sp->tx_buff[i]->len, PCI_DMA_TODEVICE); 1711 1712 dev_kfree_skb_irq(sp->tx_buff[i]); 1713 1714 sp->tx_buff[i] = NULL; 1715 } 1716 } 1717} 1718 1719static int ipg_nic_open(struct net_device *dev) 1720{ 1721 struct ipg_nic_private *sp = netdev_priv(dev); 1722 void __iomem *ioaddr = sp->ioaddr; 1723 struct pci_dev *pdev = sp->pdev; 1724 int rc; 1725 1726 IPG_DEBUG_MSG("_nic_open\n"); 1727 1728 sp->rx_buf_sz = sp->rxsupport_size; 1729 1730 /* Check for interrupt line conflicts, and request interrupt 1731 * line for IPG. 1732 * 1733 * IMPORTANT: Disable IPG interrupts prior to registering 1734 * IRQ. 1735 */ 1736 ipg_w16(0x0000, INT_ENABLE); 1737 1738 /* Register the interrupt line to be used by the IPG within 1739 * the Linux system. 1740 */ 1741 rc = request_irq(pdev->irq, ipg_interrupt_handler, IRQF_SHARED, 1742 dev->name, dev); 1743 if (rc < 0) { 1744 printk(KERN_INFO "%s: Error when requesting interrupt.\n", 1745 dev->name); 1746 goto out; 1747 } 1748 1749 dev->irq = pdev->irq; 1750 1751 rc = -ENOMEM; 1752 1753 sp->rxd = dma_alloc_coherent(&pdev->dev, IPG_RX_RING_BYTES, 1754 &sp->rxd_map, GFP_KERNEL); 1755 if (!sp->rxd) 1756 goto err_free_irq_0; 1757 1758 sp->txd = dma_alloc_coherent(&pdev->dev, IPG_TX_RING_BYTES, 1759 &sp->txd_map, GFP_KERNEL); 1760 if (!sp->txd) 1761 goto err_free_rx_1; 1762 1763 rc = init_rfdlist(dev); 1764 if (rc < 0) { 1765 printk(KERN_INFO "%s: Error during configuration.\n", 1766 dev->name); 1767 goto err_free_tx_2; 1768 } 1769 1770 init_tfdlist(dev); 1771 1772 rc = ipg_io_config(dev); 1773 if (rc < 0) { 1774 printk(KERN_INFO "%s: Error during configuration.\n", 1775 dev->name); 1776 goto err_release_tfdlist_3; 1777 } 1778 1779 /* Resolve autonegotiation. */ 1780 if (ipg_config_autoneg(dev) < 0) 1781 printk(KERN_INFO "%s: Auto-negotiation error.\n", dev->name); 1782 1783 /* initialize JUMBO Frame control variable */ 1784 sp->jumbo.found_start = 0; 1785 sp->jumbo.current_size = 0; 1786 sp->jumbo.skb = NULL; 1787 1788 /* Enable transmit and receive operation of the IPG. */ 1789 ipg_w32((ipg_r32(MAC_CTRL) | IPG_MC_RX_ENABLE | IPG_MC_TX_ENABLE) & 1790 IPG_MC_RSVD_MASK, MAC_CTRL); 1791 1792 netif_start_queue(dev); 1793out: 1794 return rc; 1795 1796err_release_tfdlist_3: 1797 ipg_tx_clear(sp); 1798 ipg_rx_clear(sp); 1799err_free_tx_2: 1800 dma_free_coherent(&pdev->dev, IPG_TX_RING_BYTES, sp->txd, sp->txd_map); 1801err_free_rx_1: 1802 dma_free_coherent(&pdev->dev, IPG_RX_RING_BYTES, sp->rxd, sp->rxd_map); 1803err_free_irq_0: 1804 free_irq(pdev->irq, dev); 1805 goto out; 1806} 1807 1808static int ipg_nic_stop(struct net_device *dev) 1809{ 1810 struct ipg_nic_private *sp = netdev_priv(dev); 1811 void __iomem *ioaddr = sp->ioaddr; 1812 struct pci_dev *pdev = sp->pdev; 1813 1814 IPG_DEBUG_MSG("_nic_stop\n"); 1815 1816 netif_stop_queue(dev); 1817 1818 IPG_DUMPTFDLIST(dev); 1819 1820 do { 1821 (void) ipg_r16(INT_STATUS_ACK); 1822 1823 ipg_reset(dev, IPG_AC_GLOBAL_RESET | IPG_AC_HOST | IPG_AC_DMA); 1824 1825 synchronize_irq(pdev->irq); 1826 } while (ipg_r16(INT_ENABLE) & IPG_IE_RSVD_MASK); 1827 1828 ipg_rx_clear(sp); 1829 1830 ipg_tx_clear(sp); 1831 1832 pci_free_consistent(pdev, IPG_RX_RING_BYTES, sp->rxd, sp->rxd_map); 1833 pci_free_consistent(pdev, IPG_TX_RING_BYTES, sp->txd, sp->txd_map); 1834 1835 free_irq(pdev->irq, dev); 1836 1837 return 0; 1838} 1839 1840static netdev_tx_t ipg_nic_hard_start_xmit(struct sk_buff *skb, 1841 struct net_device *dev) 1842{ 1843 struct ipg_nic_private *sp = netdev_priv(dev); 1844 void __iomem *ioaddr = sp->ioaddr; 1845 unsigned int entry = sp->tx_current % IPG_TFDLIST_LENGTH; 1846 unsigned long flags; 1847 struct ipg_tx *txfd; 1848 1849 IPG_DDEBUG_MSG("_nic_hard_start_xmit\n"); 1850 1851 /* If in 10Mbps mode, stop the transmit queue so 1852 * no more transmit frames are accepted. 1853 */ 1854 if (sp->tenmbpsmode) 1855 netif_stop_queue(dev); 1856 1857 if (sp->reset_current_tfd) { 1858 sp->reset_current_tfd = 0; 1859 entry = 0; 1860 } 1861 1862 txfd = sp->txd + entry; 1863 1864 sp->tx_buff[entry] = skb; 1865 1866 /* Clear all TFC fields, except TFDDONE. */ 1867 txfd->tfc = cpu_to_le64(IPG_TFC_TFDDONE); 1868 1869 /* Specify the TFC field within the TFD. */ 1870 txfd->tfc |= cpu_to_le64(IPG_TFC_WORDALIGNDISABLED | 1871 (IPG_TFC_FRAMEID & sp->tx_current) | 1872 (IPG_TFC_FRAGCOUNT & (1 << 24))); 1873 /* 1874 * 16--17 (WordAlign) <- 3 (disable), 1875 * 0--15 (FrameId) <- sp->tx_current, 1876 * 24--27 (FragCount) <- 1 1877 */ 1878 1879 /* Request TxComplete interrupts at an interval defined 1880 * by the constant IPG_FRAMESBETWEENTXCOMPLETES. 1881 * Request TxComplete interrupt for every frame 1882 * if in 10Mbps mode to accomodate problem with 10Mbps 1883 * processing. 1884 */ 1885 if (sp->tenmbpsmode) 1886 txfd->tfc |= cpu_to_le64(IPG_TFC_TXINDICATE); 1887 txfd->tfc |= cpu_to_le64(IPG_TFC_TXDMAINDICATE); 1888 /* Based on compilation option, determine if FCS is to be 1889 * appended to transmit frame by IPG. 1890 */ 1891 if (!(IPG_APPEND_FCS_ON_TX)) 1892 txfd->tfc |= cpu_to_le64(IPG_TFC_FCSAPPENDDISABLE); 1893 1894 /* Based on compilation option, determine if IP, TCP and/or 1895 * UDP checksums are to be added to transmit frame by IPG. 1896 */ 1897 if (IPG_ADD_IPCHECKSUM_ON_TX) 1898 txfd->tfc |= cpu_to_le64(IPG_TFC_IPCHECKSUMENABLE); 1899 1900 if (IPG_ADD_TCPCHECKSUM_ON_TX) 1901 txfd->tfc |= cpu_to_le64(IPG_TFC_TCPCHECKSUMENABLE); 1902 1903 if (IPG_ADD_UDPCHECKSUM_ON_TX) 1904 txfd->tfc |= cpu_to_le64(IPG_TFC_UDPCHECKSUMENABLE); 1905 1906 /* Based on compilation option, determine if VLAN tag info is to be 1907 * inserted into transmit frame by IPG. 1908 */ 1909 if (IPG_INSERT_MANUAL_VLAN_TAG) { 1910 txfd->tfc |= cpu_to_le64(IPG_TFC_VLANTAGINSERT | 1911 ((u64) IPG_MANUAL_VLAN_VID << 32) | 1912 ((u64) IPG_MANUAL_VLAN_CFI << 44) | 1913 ((u64) IPG_MANUAL_VLAN_USERPRIORITY << 45)); 1914 } 1915 1916 /* The fragment start location within system memory is defined 1917 * by the sk_buff structure's data field. The physical address 1918 * of this location within the system's virtual memory space 1919 * is determined using the IPG_HOST2BUS_MAP function. 1920 */ 1921 txfd->frag_info = cpu_to_le64(pci_map_single(sp->pdev, skb->data, 1922 skb->len, PCI_DMA_TODEVICE)); 1923 1924 /* The length of the fragment within system memory is defined by 1925 * the sk_buff structure's len field. 1926 */ 1927 txfd->frag_info |= cpu_to_le64(IPG_TFI_FRAGLEN & 1928 ((u64) (skb->len & 0xffff) << 48)); 1929 1930 /* Clear the TFDDone bit last to indicate the TFD is ready 1931 * for transfer to the IPG. 1932 */ 1933 txfd->tfc &= cpu_to_le64(~IPG_TFC_TFDDONE); 1934 1935 spin_lock_irqsave(&sp->lock, flags); 1936 1937 sp->tx_current++; 1938 1939 mmiowb(); 1940 1941 ipg_w32(IPG_DC_TX_DMA_POLL_NOW, DMA_CTRL); 1942 1943 if (sp->tx_current == (sp->tx_dirty + IPG_TFDLIST_LENGTH)) 1944 netif_stop_queue(dev); 1945 1946 spin_unlock_irqrestore(&sp->lock, flags); 1947 1948 return NETDEV_TX_OK; 1949} 1950 1951static void ipg_set_phy_default_param(unsigned char rev, 1952 struct net_device *dev, int phy_address) 1953{ 1954 unsigned short length; 1955 unsigned char revision; 1956 unsigned short *phy_param; 1957 unsigned short address, value; 1958 1959 phy_param = &DefaultPhyParam[0]; 1960 length = *phy_param & 0x00FF; 1961 revision = (unsigned char)((*phy_param) >> 8); 1962 phy_param++; 1963 while (length != 0) { 1964 if (rev == revision) { 1965 while (length > 1) { 1966 address = *phy_param; 1967 value = *(phy_param + 1); 1968 phy_param += 2; 1969 mdio_write(dev, phy_address, address, value); 1970 length -= 4; 1971 } 1972 break; 1973 } else { 1974 phy_param += length / 2; 1975 length = *phy_param & 0x00FF; 1976 revision = (unsigned char)((*phy_param) >> 8); 1977 phy_param++; 1978 } 1979 } 1980} 1981 1982static int read_eeprom(struct net_device *dev, int eep_addr) 1983{ 1984 void __iomem *ioaddr = ipg_ioaddr(dev); 1985 unsigned int i; 1986 int ret = 0; 1987 u16 value; 1988 1989 value = IPG_EC_EEPROM_READOPCODE | (eep_addr & 0xff); 1990 ipg_w16(value, EEPROM_CTRL); 1991 1992 for (i = 0; i < 1000; i++) { 1993 u16 data; 1994 1995 mdelay(10); 1996 data = ipg_r16(EEPROM_CTRL); 1997 if (!(data & IPG_EC_EEPROM_BUSY)) { 1998 ret = ipg_r16(EEPROM_DATA); 1999 break; 2000 } 2001 } 2002 return ret; 2003} 2004 2005static void ipg_init_mii(struct net_device *dev) 2006{ 2007 struct ipg_nic_private *sp = netdev_priv(dev); 2008 struct mii_if_info *mii_if = &sp->mii_if; 2009 int phyaddr; 2010 2011 mii_if->dev = dev; 2012 mii_if->mdio_read = mdio_read; 2013 mii_if->mdio_write = mdio_write; 2014 mii_if->phy_id_mask = 0x1f; 2015 mii_if->reg_num_mask = 0x1f; 2016 2017 mii_if->phy_id = phyaddr = ipg_find_phyaddr(dev); 2018 2019 if (phyaddr != 0x1f) { 2020 u16 mii_phyctrl, mii_1000cr; 2021 u8 revisionid = 0; 2022 2023 mii_1000cr = mdio_read(dev, phyaddr, MII_CTRL1000); 2024 mii_1000cr |= ADVERTISE_1000FULL | ADVERTISE_1000HALF | 2025 GMII_PHY_1000BASETCONTROL_PreferMaster; 2026 mdio_write(dev, phyaddr, MII_CTRL1000, mii_1000cr); 2027 2028 mii_phyctrl = mdio_read(dev, phyaddr, MII_BMCR); 2029 2030 /* Set default phyparam */ 2031 pci_read_config_byte(sp->pdev, PCI_REVISION_ID, &revisionid); 2032 ipg_set_phy_default_param(revisionid, dev, phyaddr); 2033 2034 /* Reset PHY */ 2035 mii_phyctrl |= BMCR_RESET | BMCR_ANRESTART; 2036 mdio_write(dev, phyaddr, MII_BMCR, mii_phyctrl); 2037 2038 } 2039} 2040 2041static int ipg_hw_init(struct net_device *dev) 2042{ 2043 struct ipg_nic_private *sp = netdev_priv(dev); 2044 void __iomem *ioaddr = sp->ioaddr; 2045 unsigned int i; 2046 int rc; 2047 2048 /* Read/Write and Reset EEPROM Value */ 2049 /* Read LED Mode Configuration from EEPROM */ 2050 sp->led_mode = read_eeprom(dev, 6); 2051 2052 /* Reset all functions within the IPG. Do not assert 2053 * RST_OUT as not compatible with some PHYs. 2054 */ 2055 rc = ipg_reset(dev, IPG_RESET_MASK); 2056 if (rc < 0) 2057 goto out; 2058 2059 ipg_init_mii(dev); 2060 2061 /* Read MAC Address from EEPROM */ 2062 for (i = 0; i < 3; i++) 2063 sp->station_addr[i] = read_eeprom(dev, 16 + i); 2064 2065 for (i = 0; i < 3; i++) 2066 ipg_w16(sp->station_addr[i], STATION_ADDRESS_0 + 2*i); 2067 2068 /* Set station address in ethernet_device structure. */ 2069 dev->dev_addr[0] = ipg_r16(STATION_ADDRESS_0) & 0x00ff; 2070 dev->dev_addr[1] = (ipg_r16(STATION_ADDRESS_0) & 0xff00) >> 8; 2071 dev->dev_addr[2] = ipg_r16(STATION_ADDRESS_1) & 0x00ff; 2072 dev->dev_addr[3] = (ipg_r16(STATION_ADDRESS_1) & 0xff00) >> 8; 2073 dev->dev_addr[4] = ipg_r16(STATION_ADDRESS_2) & 0x00ff; 2074 dev->dev_addr[5] = (ipg_r16(STATION_ADDRESS_2) & 0xff00) >> 8; 2075out: 2076 return rc; 2077} 2078 2079static int ipg_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 2080{ 2081 struct ipg_nic_private *sp = netdev_priv(dev); 2082 int rc; 2083 2084 mutex_lock(&sp->mii_mutex); 2085 rc = generic_mii_ioctl(&sp->mii_if, if_mii(ifr), cmd, NULL); 2086 mutex_unlock(&sp->mii_mutex); 2087 2088 return rc; 2089} 2090 2091static int ipg_nic_change_mtu(struct net_device *dev, int new_mtu) 2092{ 2093 struct ipg_nic_private *sp = netdev_priv(dev); 2094 int err; 2095 2096 /* Function to accomodate changes to Maximum Transfer Unit 2097 * (or MTU) of IPG NIC. Cannot use default function since 2098 * the default will not allow for MTU > 1500 bytes. 2099 */ 2100 2101 IPG_DEBUG_MSG("_nic_change_mtu\n"); 2102 2103 /* 2104 * Check that the new MTU value is between 68 (14 byte header, 46 byte 2105 * payload, 4 byte FCS) and 10 KB, which is the largest supported MTU. 2106 */ 2107 if (new_mtu < 68 || new_mtu > 10240) 2108 return -EINVAL; 2109 2110 err = ipg_nic_stop(dev); 2111 if (err) 2112 return err; 2113 2114 dev->mtu = new_mtu; 2115 2116 sp->max_rxframe_size = new_mtu; 2117 2118 sp->rxfrag_size = new_mtu; 2119 if (sp->rxfrag_size > 4088) 2120 sp->rxfrag_size = 4088; 2121 2122 sp->rxsupport_size = sp->max_rxframe_size; 2123 2124 if (new_mtu > 0x0600) 2125 sp->is_jumbo = true; 2126 else 2127 sp->is_jumbo = false; 2128 2129 return ipg_nic_open(dev); 2130} 2131 2132static int ipg_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 2133{ 2134 struct ipg_nic_private *sp = netdev_priv(dev); 2135 int rc; 2136 2137 mutex_lock(&sp->mii_mutex); 2138 rc = mii_ethtool_gset(&sp->mii_if, cmd); 2139 mutex_unlock(&sp->mii_mutex); 2140 2141 return rc; 2142} 2143 2144static int ipg_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 2145{ 2146 struct ipg_nic_private *sp = netdev_priv(dev); 2147 int rc; 2148 2149 mutex_lock(&sp->mii_mutex); 2150 rc = mii_ethtool_sset(&sp->mii_if, cmd); 2151 mutex_unlock(&sp->mii_mutex); 2152 2153 return rc; 2154} 2155 2156static int ipg_nway_reset(struct net_device *dev) 2157{ 2158 struct ipg_nic_private *sp = netdev_priv(dev); 2159 int rc; 2160 2161 mutex_lock(&sp->mii_mutex); 2162 rc = mii_nway_restart(&sp->mii_if); 2163 mutex_unlock(&sp->mii_mutex); 2164 2165 return rc; 2166} 2167 2168static const struct ethtool_ops ipg_ethtool_ops = { 2169 .get_settings = ipg_get_settings, 2170 .set_settings = ipg_set_settings, 2171 .nway_reset = ipg_nway_reset, 2172}; 2173 2174static void __devexit ipg_remove(struct pci_dev *pdev) 2175{ 2176 struct net_device *dev = pci_get_drvdata(pdev); 2177 struct ipg_nic_private *sp = netdev_priv(dev); 2178 2179 IPG_DEBUG_MSG("_remove\n"); 2180 2181 /* Un-register Ethernet device. */ 2182 unregister_netdev(dev); 2183 2184 pci_iounmap(pdev, sp->ioaddr); 2185 2186 pci_release_regions(pdev); 2187 2188 free_netdev(dev); 2189 pci_disable_device(pdev); 2190 pci_set_drvdata(pdev, NULL); 2191} 2192 2193static const struct net_device_ops ipg_netdev_ops = { 2194 .ndo_open = ipg_nic_open, 2195 .ndo_stop = ipg_nic_stop, 2196 .ndo_start_xmit = ipg_nic_hard_start_xmit, 2197 .ndo_get_stats = ipg_nic_get_stats, 2198 .ndo_set_multicast_list = ipg_nic_set_multicast_list, 2199 .ndo_do_ioctl = ipg_ioctl, 2200 .ndo_tx_timeout = ipg_tx_timeout, 2201 .ndo_change_mtu = ipg_nic_change_mtu, 2202 .ndo_set_mac_address = eth_mac_addr, 2203 .ndo_validate_addr = eth_validate_addr, 2204}; 2205 2206static int __devinit ipg_probe(struct pci_dev *pdev, 2207 const struct pci_device_id *id) 2208{ 2209 unsigned int i = id->driver_data; 2210 struct ipg_nic_private *sp; 2211 struct net_device *dev; 2212 void __iomem *ioaddr; 2213 int rc; 2214 2215 rc = pci_enable_device(pdev); 2216 if (rc < 0) 2217 goto out; 2218 2219 printk(KERN_INFO "%s: %s\n", pci_name(pdev), ipg_brand_name[i]); 2220 2221 pci_set_master(pdev); 2222 2223 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(40)); 2224 if (rc < 0) { 2225 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 2226 if (rc < 0) { 2227 printk(KERN_ERR "%s: DMA config failed.\n", 2228 pci_name(pdev)); 2229 goto err_disable_0; 2230 } 2231 } 2232 2233 /* 2234 * Initialize net device. 2235 */ 2236 dev = alloc_etherdev(sizeof(struct ipg_nic_private)); 2237 if (!dev) { 2238 printk(KERN_ERR "%s: alloc_etherdev failed\n", pci_name(pdev)); 2239 rc = -ENOMEM; 2240 goto err_disable_0; 2241 } 2242 2243 sp = netdev_priv(dev); 2244 spin_lock_init(&sp->lock); 2245 mutex_init(&sp->mii_mutex); 2246 2247 sp->is_jumbo = IPG_IS_JUMBO; 2248 sp->rxfrag_size = IPG_RXFRAG_SIZE; 2249 sp->rxsupport_size = IPG_RXSUPPORT_SIZE; 2250 sp->max_rxframe_size = IPG_MAX_RXFRAME_SIZE; 2251 2252 /* Declare IPG NIC functions for Ethernet device methods. 2253 */ 2254 dev->netdev_ops = &ipg_netdev_ops; 2255 SET_NETDEV_DEV(dev, &pdev->dev); 2256 SET_ETHTOOL_OPS(dev, &ipg_ethtool_ops); 2257 2258 rc = pci_request_regions(pdev, DRV_NAME); 2259 if (rc) 2260 goto err_free_dev_1; 2261 2262 ioaddr = pci_iomap(pdev, 1, pci_resource_len(pdev, 1)); 2263 if (!ioaddr) { 2264 printk(KERN_ERR "%s cannot map MMIO\n", pci_name(pdev)); 2265 rc = -EIO; 2266 goto err_release_regions_2; 2267 } 2268 2269 /* Save the pointer to the PCI device information. */ 2270 sp->ioaddr = ioaddr; 2271 sp->pdev = pdev; 2272 sp->dev = dev; 2273 2274 INIT_DELAYED_WORK(&sp->task, ipg_reset_after_host_error); 2275 2276 pci_set_drvdata(pdev, dev); 2277 2278 rc = ipg_hw_init(dev); 2279 if (rc < 0) 2280 goto err_unmap_3; 2281 2282 rc = register_netdev(dev); 2283 if (rc < 0) 2284 goto err_unmap_3; 2285 2286 printk(KERN_INFO "Ethernet device registered as: %s\n", dev->name); 2287out: 2288 return rc; 2289 2290err_unmap_3: 2291 pci_iounmap(pdev, ioaddr); 2292err_release_regions_2: 2293 pci_release_regions(pdev); 2294err_free_dev_1: 2295 free_netdev(dev); 2296err_disable_0: 2297 pci_disable_device(pdev); 2298 goto out; 2299} 2300 2301static struct pci_driver ipg_pci_driver = { 2302 .name = IPG_DRIVER_NAME, 2303 .id_table = ipg_pci_tbl, 2304 .probe = ipg_probe, 2305 .remove = __devexit_p(ipg_remove), 2306}; 2307 2308static int __init ipg_init_module(void) 2309{ 2310 return pci_register_driver(&ipg_pci_driver); 2311} 2312 2313static void __exit ipg_exit_module(void) 2314{ 2315 pci_unregister_driver(&ipg_pci_driver); 2316} 2317 2318module_init(ipg_init_module); 2319module_exit(ipg_exit_module); 2320