1/* winbond-840.c: A Linux PCI network adapter device driver. */ 2/* 3 Written 1998-2001 by Donald Becker. 4 5 This software may be used and distributed according to the terms of 6 the GNU General Public License (GPL), incorporated herein by reference. 7 Drivers based on or derived from this code fall under the GPL and must 8 retain the authorship, copyright and license notice. This file is not 9 a complete program and may only be used when the entire operating 10 system is licensed under the GPL. 11 12 The author may be reached as becker@scyld.com, or C/O 13 Scyld Computing Corporation 14 410 Severn Ave., Suite 210 15 Annapolis MD 21403 16 17 Support and updates available at 18 http://www.scyld.com/network/drivers.html 19 20 Do not remove the copyright information. 21 Do not change the version information unless an improvement has been made. 22 Merely removing my name, as Compex has done in the past, does not count 23 as an improvement. 24 25 Changelog: 26 * ported to 2.4 27 ??? 28 * spin lock update, memory barriers, new style dma mappings 29 limit each tx buffer to < 1024 bytes 30 remove DescIntr from Rx descriptors (that's an Tx flag) 31 remove next pointer from Tx descriptors 32 synchronize tx_q_bytes 33 software reset in tx_timeout 34 Copyright (C) 2000 Manfred Spraul 35 * further cleanups 36 power management. 37 support for big endian descriptors 38 Copyright (C) 2001 Manfred Spraul 39 * ethtool support (jgarzik) 40 * Replace some MII-related magic numbers with constants (jgarzik) 41 42 TODO: 43 * enable pci_power_off 44 * Wake-On-LAN 45*/ 46 47#define DRV_NAME "winbond-840" 48#define DRV_VERSION "1.01-e" 49#define DRV_RELDATE "Sep-11-2006" 50 51 52/* Automatically extracted configuration info: 53probe-func: winbond840_probe 54config-in: tristate 'Winbond W89c840 Ethernet support' CONFIG_WINBOND_840 55 56c-help-name: Winbond W89c840 PCI Ethernet support 57c-help-symbol: CONFIG_WINBOND_840 58c-help: This driver is for the Winbond W89c840 chip. It also works with 59c-help: the TX9882 chip on the Compex RL100-ATX board. 60c-help: More specific information and updates are available from 61c-help: http://www.scyld.com/network/drivers.html 62*/ 63 64/* The user-configurable values. 65 These may be modified when a driver module is loaded.*/ 66 67static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */ 68static int max_interrupt_work = 20; 69/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast). 70 The '840 uses a 64 element hash table based on the Ethernet CRC. */ 71static int multicast_filter_limit = 32; 72 73/* Set the copy breakpoint for the copy-only-tiny-frames scheme. 74 Setting to > 1518 effectively disables this feature. */ 75static int rx_copybreak; 76 77/* Used to pass the media type, etc. 78 Both 'options[]' and 'full_duplex[]' should exist for driver 79 interoperability. 80 The media type is usually passed in 'options[]'. 81*/ 82#define MAX_UNITS 8 /* More are supported, limit only on options */ 83static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1}; 84static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1}; 85 86/* Operational parameters that are set at compile time. */ 87 88/* Keep the ring sizes a power of two for compile efficiency. 89 The compiler will convert <unsigned>'%'<2^N> into a bit mask. 90 Making the Tx ring too large decreases the effectiveness of channel 91 bonding and packet priority. 92 There are no ill effects from too-large receive rings. */ 93#define TX_QUEUE_LEN 10 /* Limit ring entries actually used. */ 94#define TX_QUEUE_LEN_RESTART 5 95 96#define TX_BUFLIMIT (1024-128) 97 98/* The presumed FIFO size for working around the Tx-FIFO-overflow bug. 99 To avoid overflowing we don't queue again until we have room for a 100 full-size packet. 101 */ 102#define TX_FIFO_SIZE (2048) 103#define TX_BUG_FIFO_LIMIT (TX_FIFO_SIZE-1514-16) 104 105 106/* Operational parameters that usually are not changed. */ 107/* Time in jiffies before concluding the transmitter is hung. */ 108#define TX_TIMEOUT (2*HZ) 109 110/* Include files, designed to support most kernel versions 2.0.0 and later. */ 111#include <linux/module.h> 112#include <linux/kernel.h> 113#include <linux/string.h> 114#include <linux/timer.h> 115#include <linux/errno.h> 116#include <linux/ioport.h> 117#include <linux/interrupt.h> 118#include <linux/pci.h> 119#include <linux/dma-mapping.h> 120#include <linux/netdevice.h> 121#include <linux/etherdevice.h> 122#include <linux/skbuff.h> 123#include <linux/init.h> 124#include <linux/delay.h> 125#include <linux/ethtool.h> 126#include <linux/mii.h> 127#include <linux/rtnetlink.h> 128#include <linux/crc32.h> 129#include <linux/bitops.h> 130#include <asm/uaccess.h> 131#include <asm/processor.h> /* Processor type for cache alignment. */ 132#include <asm/io.h> 133#include <asm/irq.h> 134 135#include "tulip.h" 136 137#undef PKT_BUF_SZ /* tulip.h also defines this */ 138#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/ 139 140/* These identify the driver base version and may not be removed. */ 141static const char version[] __initconst = 142 KERN_INFO DRV_NAME ".c:v" DRV_VERSION " (2.4 port) " 143 DRV_RELDATE " Donald Becker <becker@scyld.com>\n" 144 " http://www.scyld.com/network/drivers.html\n"; 145 146MODULE_AUTHOR("Donald Becker <becker@scyld.com>"); 147MODULE_DESCRIPTION("Winbond W89c840 Ethernet driver"); 148MODULE_LICENSE("GPL"); 149MODULE_VERSION(DRV_VERSION); 150 151module_param(max_interrupt_work, int, 0); 152module_param(debug, int, 0); 153module_param(rx_copybreak, int, 0); 154module_param(multicast_filter_limit, int, 0); 155module_param_array(options, int, NULL, 0); 156module_param_array(full_duplex, int, NULL, 0); 157MODULE_PARM_DESC(max_interrupt_work, "winbond-840 maximum events handled per interrupt"); 158MODULE_PARM_DESC(debug, "winbond-840 debug level (0-6)"); 159MODULE_PARM_DESC(rx_copybreak, "winbond-840 copy breakpoint for copy-only-tiny-frames"); 160MODULE_PARM_DESC(multicast_filter_limit, "winbond-840 maximum number of filtered multicast addresses"); 161MODULE_PARM_DESC(options, "winbond-840: Bits 0-3: media type, bit 17: full duplex"); 162MODULE_PARM_DESC(full_duplex, "winbond-840 full duplex setting(s) (1)"); 163 164/* 165 Theory of Operation 166 167I. Board Compatibility 168 169This driver is for the Winbond w89c840 chip. 170 171II. Board-specific settings 172 173None. 174 175III. Driver operation 176 177This chip is very similar to the Digital 21*4* "Tulip" family. The first 178twelve registers and the descriptor format are nearly identical. Read a 179Tulip manual for operational details. 180 181A significant difference is that the multicast filter and station address are 182stored in registers rather than loaded through a pseudo-transmit packet. 183 184Unlike the Tulip, transmit buffers are limited to 1KB. To transmit a 185full-sized packet we must use both data buffers in a descriptor. Thus the 186driver uses ring mode where descriptors are implicitly sequential in memory, 187rather than using the second descriptor address as a chain pointer to 188subsequent descriptors. 189 190IV. Notes 191 192If you are going to almost clone a Tulip, why not go all the way and avoid 193the need for a new driver? 194 195IVb. References 196 197http://www.scyld.com/expert/100mbps.html 198http://www.scyld.com/expert/NWay.html 199http://www.winbond.com.tw/ 200 201IVc. Errata 202 203A horrible bug exists in the transmit FIFO. Apparently the chip doesn't 204correctly detect a full FIFO, and queuing more than 2048 bytes may result in 205silent data corruption. 206 207Test with 'ping -s 10000' on a fast computer. 208 209*/ 210 211 212 213/* 214 PCI probe table. 215*/ 216enum chip_capability_flags { 217 CanHaveMII=1, HasBrokenTx=2, AlwaysFDX=4, FDXOnNoMII=8, 218}; 219 220static DEFINE_PCI_DEVICE_TABLE(w840_pci_tbl) = { 221 { 0x1050, 0x0840, PCI_ANY_ID, 0x8153, 0, 0, 0 }, 222 { 0x1050, 0x0840, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 }, 223 { 0x11f6, 0x2011, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2 }, 224 { } 225}; 226MODULE_DEVICE_TABLE(pci, w840_pci_tbl); 227 228enum { 229 netdev_res_size = 128, /* size of PCI BAR resource */ 230}; 231 232struct pci_id_info { 233 const char *name; 234 int drv_flags; /* Driver use, intended as capability flags. */ 235}; 236 237static const struct pci_id_info pci_id_tbl[] __devinitdata = { 238 { /* Sometime a Level-One switch card. */ 239 "Winbond W89c840", CanHaveMII | HasBrokenTx | FDXOnNoMII}, 240 { "Winbond W89c840", CanHaveMII | HasBrokenTx}, 241 { "Compex RL100-ATX", CanHaveMII | HasBrokenTx}, 242 { } /* terminate list. */ 243}; 244 245/* This driver was written to use PCI memory space, however some x86 systems 246 work only with I/O space accesses. See CONFIG_TULIP_MMIO in .config 247*/ 248 249/* Offsets to the Command and Status Registers, "CSRs". 250 While similar to the Tulip, these registers are longword aligned. 251 Note: It's not useful to define symbolic names for every register bit in 252 the device. The name can only partially document the semantics and make 253 the driver longer and more difficult to read. 254*/ 255enum w840_offsets { 256 PCIBusCfg=0x00, TxStartDemand=0x04, RxStartDemand=0x08, 257 RxRingPtr=0x0C, TxRingPtr=0x10, 258 IntrStatus=0x14, NetworkConfig=0x18, IntrEnable=0x1C, 259 RxMissed=0x20, EECtrl=0x24, MIICtrl=0x24, BootRom=0x28, GPTimer=0x2C, 260 CurRxDescAddr=0x30, CurRxBufAddr=0x34, /* Debug use */ 261 MulticastFilter0=0x38, MulticastFilter1=0x3C, StationAddr=0x40, 262 CurTxDescAddr=0x4C, CurTxBufAddr=0x50, 263}; 264 265/* Bits in the NetworkConfig register. */ 266enum rx_mode_bits { 267 AcceptErr=0x80, 268 RxAcceptBroadcast=0x20, AcceptMulticast=0x10, 269 RxAcceptAllPhys=0x08, AcceptMyPhys=0x02, 270}; 271 272enum mii_reg_bits { 273 MDIO_ShiftClk=0x10000, MDIO_DataIn=0x80000, MDIO_DataOut=0x20000, 274 MDIO_EnbOutput=0x40000, MDIO_EnbIn = 0x00000, 275}; 276 277/* The Tulip Rx and Tx buffer descriptors. */ 278struct w840_rx_desc { 279 s32 status; 280 s32 length; 281 u32 buffer1; 282 u32 buffer2; 283}; 284 285struct w840_tx_desc { 286 s32 status; 287 s32 length; 288 u32 buffer1, buffer2; 289}; 290 291#define MII_CNT 1 /* winbond only supports one MII */ 292struct netdev_private { 293 struct w840_rx_desc *rx_ring; 294 dma_addr_t rx_addr[RX_RING_SIZE]; 295 struct w840_tx_desc *tx_ring; 296 dma_addr_t tx_addr[TX_RING_SIZE]; 297 dma_addr_t ring_dma_addr; 298 /* The addresses of receive-in-place skbuffs. */ 299 struct sk_buff* rx_skbuff[RX_RING_SIZE]; 300 /* The saved address of a sent-in-place packet/buffer, for later free(). */ 301 struct sk_buff* tx_skbuff[TX_RING_SIZE]; 302 struct net_device_stats stats; 303 struct timer_list timer; /* Media monitoring timer. */ 304 /* Frequently used values: keep some adjacent for cache effect. */ 305 spinlock_t lock; 306 int chip_id, drv_flags; 307 struct pci_dev *pci_dev; 308 int csr6; 309 struct w840_rx_desc *rx_head_desc; 310 unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */ 311 unsigned int rx_buf_sz; /* Based on MTU+slack. */ 312 unsigned int cur_tx, dirty_tx; 313 unsigned int tx_q_bytes; 314 unsigned int tx_full; /* The Tx queue is full. */ 315 /* MII transceiver section. */ 316 int mii_cnt; /* MII device addresses. */ 317 unsigned char phys[MII_CNT]; /* MII device addresses, but only the first is used */ 318 u32 mii; 319 struct mii_if_info mii_if; 320 void __iomem *base_addr; 321}; 322 323static int eeprom_read(void __iomem *ioaddr, int location); 324static int mdio_read(struct net_device *dev, int phy_id, int location); 325static void mdio_write(struct net_device *dev, int phy_id, int location, int value); 326static int netdev_open(struct net_device *dev); 327static int update_link(struct net_device *dev); 328static void netdev_timer(unsigned long data); 329static void init_rxtx_rings(struct net_device *dev); 330static void free_rxtx_rings(struct netdev_private *np); 331static void init_registers(struct net_device *dev); 332static void tx_timeout(struct net_device *dev); 333static int alloc_ringdesc(struct net_device *dev); 334static void free_ringdesc(struct netdev_private *np); 335static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev); 336static irqreturn_t intr_handler(int irq, void *dev_instance); 337static void netdev_error(struct net_device *dev, int intr_status); 338static int netdev_rx(struct net_device *dev); 339static u32 __set_rx_mode(struct net_device *dev); 340static void set_rx_mode(struct net_device *dev); 341static struct net_device_stats *get_stats(struct net_device *dev); 342static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); 343static const struct ethtool_ops netdev_ethtool_ops; 344static int netdev_close(struct net_device *dev); 345 346static const struct net_device_ops netdev_ops = { 347 .ndo_open = netdev_open, 348 .ndo_stop = netdev_close, 349 .ndo_start_xmit = start_tx, 350 .ndo_get_stats = get_stats, 351 .ndo_set_multicast_list = set_rx_mode, 352 .ndo_do_ioctl = netdev_ioctl, 353 .ndo_tx_timeout = tx_timeout, 354 .ndo_change_mtu = eth_change_mtu, 355 .ndo_set_mac_address = eth_mac_addr, 356 .ndo_validate_addr = eth_validate_addr, 357}; 358 359static int __devinit w840_probe1 (struct pci_dev *pdev, 360 const struct pci_device_id *ent) 361{ 362 struct net_device *dev; 363 struct netdev_private *np; 364 static int find_cnt; 365 int chip_idx = ent->driver_data; 366 int irq; 367 int i, option = find_cnt < MAX_UNITS ? options[find_cnt] : 0; 368 void __iomem *ioaddr; 369 370 i = pci_enable_device(pdev); 371 if (i) return i; 372 373 pci_set_master(pdev); 374 375 irq = pdev->irq; 376 377 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) { 378 pr_warning("Winbond-840: Device %s disabled due to DMA limitations\n", 379 pci_name(pdev)); 380 return -EIO; 381 } 382 dev = alloc_etherdev(sizeof(*np)); 383 if (!dev) 384 return -ENOMEM; 385 SET_NETDEV_DEV(dev, &pdev->dev); 386 387 if (pci_request_regions(pdev, DRV_NAME)) 388 goto err_out_netdev; 389 390 ioaddr = pci_iomap(pdev, TULIP_BAR, netdev_res_size); 391 if (!ioaddr) 392 goto err_out_free_res; 393 394 for (i = 0; i < 3; i++) 395 ((__le16 *)dev->dev_addr)[i] = cpu_to_le16(eeprom_read(ioaddr, i)); 396 397 /* Reset the chip to erase previous misconfiguration. 398 No hold time required! */ 399 iowrite32(0x00000001, ioaddr + PCIBusCfg); 400 401 dev->base_addr = (unsigned long)ioaddr; 402 dev->irq = irq; 403 404 np = netdev_priv(dev); 405 np->pci_dev = pdev; 406 np->chip_id = chip_idx; 407 np->drv_flags = pci_id_tbl[chip_idx].drv_flags; 408 spin_lock_init(&np->lock); 409 np->mii_if.dev = dev; 410 np->mii_if.mdio_read = mdio_read; 411 np->mii_if.mdio_write = mdio_write; 412 np->base_addr = ioaddr; 413 414 pci_set_drvdata(pdev, dev); 415 416 if (dev->mem_start) 417 option = dev->mem_start; 418 419 /* The lower four bits are the media type. */ 420 if (option > 0) { 421 if (option & 0x200) 422 np->mii_if.full_duplex = 1; 423 if (option & 15) 424 dev_info(&dev->dev, 425 "ignoring user supplied media type %d", 426 option & 15); 427 } 428 if (find_cnt < MAX_UNITS && full_duplex[find_cnt] > 0) 429 np->mii_if.full_duplex = 1; 430 431 if (np->mii_if.full_duplex) 432 np->mii_if.force_media = 1; 433 434 /* The chip-specific entries in the device structure. */ 435 dev->netdev_ops = &netdev_ops; 436 dev->ethtool_ops = &netdev_ethtool_ops; 437 dev->watchdog_timeo = TX_TIMEOUT; 438 439 i = register_netdev(dev); 440 if (i) 441 goto err_out_cleardev; 442 443 dev_info(&dev->dev, "%s at %p, %pM, IRQ %d\n", 444 pci_id_tbl[chip_idx].name, ioaddr, dev->dev_addr, irq); 445 446 if (np->drv_flags & CanHaveMII) { 447 int phy, phy_idx = 0; 448 for (phy = 1; phy < 32 && phy_idx < MII_CNT; phy++) { 449 int mii_status = mdio_read(dev, phy, MII_BMSR); 450 if (mii_status != 0xffff && mii_status != 0x0000) { 451 np->phys[phy_idx++] = phy; 452 np->mii_if.advertising = mdio_read(dev, phy, MII_ADVERTISE); 453 np->mii = (mdio_read(dev, phy, MII_PHYSID1) << 16)+ 454 mdio_read(dev, phy, MII_PHYSID2); 455 dev_info(&dev->dev, 456 "MII PHY %08xh found at address %d, status 0x%04x advertising %04x\n", 457 np->mii, phy, mii_status, 458 np->mii_if.advertising); 459 } 460 } 461 np->mii_cnt = phy_idx; 462 np->mii_if.phy_id = np->phys[0]; 463 if (phy_idx == 0) { 464 dev_warn(&dev->dev, 465 "MII PHY not found -- this device may not operate correctly\n"); 466 } 467 } 468 469 find_cnt++; 470 return 0; 471 472err_out_cleardev: 473 pci_set_drvdata(pdev, NULL); 474 pci_iounmap(pdev, ioaddr); 475err_out_free_res: 476 pci_release_regions(pdev); 477err_out_netdev: 478 free_netdev (dev); 479 return -ENODEV; 480} 481 482 483/* Read the EEPROM and MII Management Data I/O (MDIO) interfaces. These are 484 often serial bit streams generated by the host processor. 485 The example below is for the common 93c46 EEPROM, 64 16 bit words. */ 486 487/* Delay between EEPROM clock transitions. 488 No extra delay is needed with 33Mhz PCI, but future 66Mhz access may need 489 a delay. Note that pre-2.0.34 kernels had a cache-alignment bug that 490 made udelay() unreliable. 491 The old method of using an ISA access as a delay, __SLOW_DOWN_IO__, is 492 deprecated. 493*/ 494#define eeprom_delay(ee_addr) ioread32(ee_addr) 495 496enum EEPROM_Ctrl_Bits { 497 EE_ShiftClk=0x02, EE_Write0=0x801, EE_Write1=0x805, 498 EE_ChipSelect=0x801, EE_DataIn=0x08, 499}; 500 501/* The EEPROM commands include the alway-set leading bit. */ 502enum EEPROM_Cmds { 503 EE_WriteCmd=(5 << 6), EE_ReadCmd=(6 << 6), EE_EraseCmd=(7 << 6), 504}; 505 506static int eeprom_read(void __iomem *addr, int location) 507{ 508 int i; 509 int retval = 0; 510 void __iomem *ee_addr = addr + EECtrl; 511 int read_cmd = location | EE_ReadCmd; 512 iowrite32(EE_ChipSelect, ee_addr); 513 514 /* Shift the read command bits out. */ 515 for (i = 10; i >= 0; i--) { 516 short dataval = (read_cmd & (1 << i)) ? EE_Write1 : EE_Write0; 517 iowrite32(dataval, ee_addr); 518 eeprom_delay(ee_addr); 519 iowrite32(dataval | EE_ShiftClk, ee_addr); 520 eeprom_delay(ee_addr); 521 } 522 iowrite32(EE_ChipSelect, ee_addr); 523 eeprom_delay(ee_addr); 524 525 for (i = 16; i > 0; i--) { 526 iowrite32(EE_ChipSelect | EE_ShiftClk, ee_addr); 527 eeprom_delay(ee_addr); 528 retval = (retval << 1) | ((ioread32(ee_addr) & EE_DataIn) ? 1 : 0); 529 iowrite32(EE_ChipSelect, ee_addr); 530 eeprom_delay(ee_addr); 531 } 532 533 /* Terminate the EEPROM access. */ 534 iowrite32(0, ee_addr); 535 return retval; 536} 537 538/* MII transceiver control section. 539 Read and write the MII registers using software-generated serial 540 MDIO protocol. See the MII specifications or DP83840A data sheet 541 for details. 542 543 The maximum data clock rate is 2.5 Mhz. The minimum timing is usually 544 met by back-to-back 33Mhz PCI cycles. */ 545#define mdio_delay(mdio_addr) ioread32(mdio_addr) 546 547/* Set iff a MII transceiver on any interface requires mdio preamble. 548 This only set with older transceivers, so the extra 549 code size of a per-interface flag is not worthwhile. */ 550static char mii_preamble_required = 1; 551 552#define MDIO_WRITE0 (MDIO_EnbOutput) 553#define MDIO_WRITE1 (MDIO_DataOut | MDIO_EnbOutput) 554 555/* Generate the preamble required for initial synchronization and 556 a few older transceivers. */ 557static void mdio_sync(void __iomem *mdio_addr) 558{ 559 int bits = 32; 560 561 /* Establish sync by sending at least 32 logic ones. */ 562 while (--bits >= 0) { 563 iowrite32(MDIO_WRITE1, mdio_addr); 564 mdio_delay(mdio_addr); 565 iowrite32(MDIO_WRITE1 | MDIO_ShiftClk, mdio_addr); 566 mdio_delay(mdio_addr); 567 } 568} 569 570static int mdio_read(struct net_device *dev, int phy_id, int location) 571{ 572 struct netdev_private *np = netdev_priv(dev); 573 void __iomem *mdio_addr = np->base_addr + MIICtrl; 574 int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location; 575 int i, retval = 0; 576 577 if (mii_preamble_required) 578 mdio_sync(mdio_addr); 579 580 /* Shift the read command bits out. */ 581 for (i = 15; i >= 0; i--) { 582 int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0; 583 584 iowrite32(dataval, mdio_addr); 585 mdio_delay(mdio_addr); 586 iowrite32(dataval | MDIO_ShiftClk, mdio_addr); 587 mdio_delay(mdio_addr); 588 } 589 /* Read the two transition, 16 data, and wire-idle bits. */ 590 for (i = 20; i > 0; i--) { 591 iowrite32(MDIO_EnbIn, mdio_addr); 592 mdio_delay(mdio_addr); 593 retval = (retval << 1) | ((ioread32(mdio_addr) & MDIO_DataIn) ? 1 : 0); 594 iowrite32(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr); 595 mdio_delay(mdio_addr); 596 } 597 return (retval>>1) & 0xffff; 598} 599 600static void mdio_write(struct net_device *dev, int phy_id, int location, int value) 601{ 602 struct netdev_private *np = netdev_priv(dev); 603 void __iomem *mdio_addr = np->base_addr + MIICtrl; 604 int mii_cmd = (0x5002 << 16) | (phy_id << 23) | (location<<18) | value; 605 int i; 606 607 if (location == 4 && phy_id == np->phys[0]) 608 np->mii_if.advertising = value; 609 610 if (mii_preamble_required) 611 mdio_sync(mdio_addr); 612 613 /* Shift the command bits out. */ 614 for (i = 31; i >= 0; i--) { 615 int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0; 616 617 iowrite32(dataval, mdio_addr); 618 mdio_delay(mdio_addr); 619 iowrite32(dataval | MDIO_ShiftClk, mdio_addr); 620 mdio_delay(mdio_addr); 621 } 622 /* Clear out extra bits. */ 623 for (i = 2; i > 0; i--) { 624 iowrite32(MDIO_EnbIn, mdio_addr); 625 mdio_delay(mdio_addr); 626 iowrite32(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr); 627 mdio_delay(mdio_addr); 628 } 629} 630 631 632static int netdev_open(struct net_device *dev) 633{ 634 struct netdev_private *np = netdev_priv(dev); 635 void __iomem *ioaddr = np->base_addr; 636 int i; 637 638 iowrite32(0x00000001, ioaddr + PCIBusCfg); /* Reset */ 639 640 netif_device_detach(dev); 641 i = request_irq(dev->irq, intr_handler, IRQF_SHARED, dev->name, dev); 642 if (i) 643 goto out_err; 644 645 if (debug > 1) 646 printk(KERN_DEBUG "%s: w89c840_open() irq %d\n", 647 dev->name, dev->irq); 648 649 if((i=alloc_ringdesc(dev))) 650 goto out_err; 651 652 spin_lock_irq(&np->lock); 653 netif_device_attach(dev); 654 init_registers(dev); 655 spin_unlock_irq(&np->lock); 656 657 netif_start_queue(dev); 658 if (debug > 2) 659 printk(KERN_DEBUG "%s: Done netdev_open()\n", dev->name); 660 661 /* Set the timer to check for link beat. */ 662 init_timer(&np->timer); 663 np->timer.expires = jiffies + 1*HZ; 664 np->timer.data = (unsigned long)dev; 665 np->timer.function = &netdev_timer; /* timer handler */ 666 add_timer(&np->timer); 667 return 0; 668out_err: 669 netif_device_attach(dev); 670 return i; 671} 672 673#define MII_DAVICOM_DM9101 0x0181b800 674 675static int update_link(struct net_device *dev) 676{ 677 struct netdev_private *np = netdev_priv(dev); 678 int duplex, fasteth, result, mii_reg; 679 680 /* BSMR */ 681 mii_reg = mdio_read(dev, np->phys[0], MII_BMSR); 682 683 if (mii_reg == 0xffff) 684 return np->csr6; 685 /* reread: the link status bit is sticky */ 686 mii_reg = mdio_read(dev, np->phys[0], MII_BMSR); 687 if (!(mii_reg & 0x4)) { 688 if (netif_carrier_ok(dev)) { 689 if (debug) 690 dev_info(&dev->dev, 691 "MII #%d reports no link. Disabling watchdog\n", 692 np->phys[0]); 693 netif_carrier_off(dev); 694 } 695 return np->csr6; 696 } 697 if (!netif_carrier_ok(dev)) { 698 if (debug) 699 dev_info(&dev->dev, 700 "MII #%d link is back. Enabling watchdog\n", 701 np->phys[0]); 702 netif_carrier_on(dev); 703 } 704 705 if ((np->mii & ~0xf) == MII_DAVICOM_DM9101) { 706 /* If the link partner doesn't support autonegotiation 707 * the MII detects it's abilities with the "parallel detection". 708 * Some MIIs update the LPA register to the result of the parallel 709 * detection, some don't. 710 * The Davicom PHY [at least 0181b800] doesn't. 711 * Instead bit 9 and 13 of the BMCR are updated to the result 712 * of the negotiation.. 713 */ 714 mii_reg = mdio_read(dev, np->phys[0], MII_BMCR); 715 duplex = mii_reg & BMCR_FULLDPLX; 716 fasteth = mii_reg & BMCR_SPEED100; 717 } else { 718 int negotiated; 719 mii_reg = mdio_read(dev, np->phys[0], MII_LPA); 720 negotiated = mii_reg & np->mii_if.advertising; 721 722 duplex = (negotiated & LPA_100FULL) || ((negotiated & 0x02C0) == LPA_10FULL); 723 fasteth = negotiated & 0x380; 724 } 725 duplex |= np->mii_if.force_media; 726 /* remove fastether and fullduplex */ 727 result = np->csr6 & ~0x20000200; 728 if (duplex) 729 result |= 0x200; 730 if (fasteth) 731 result |= 0x20000000; 732 if (result != np->csr6 && debug) 733 dev_info(&dev->dev, 734 "Setting %dMBit-%s-duplex based on MII#%d\n", 735 fasteth ? 100 : 10, duplex ? "full" : "half", 736 np->phys[0]); 737 return result; 738} 739 740#define RXTX_TIMEOUT 2000 741static inline void update_csr6(struct net_device *dev, int new) 742{ 743 struct netdev_private *np = netdev_priv(dev); 744 void __iomem *ioaddr = np->base_addr; 745 int limit = RXTX_TIMEOUT; 746 747 if (!netif_device_present(dev)) 748 new = 0; 749 if (new==np->csr6) 750 return; 751 /* stop both Tx and Rx processes */ 752 iowrite32(np->csr6 & ~0x2002, ioaddr + NetworkConfig); 753 /* wait until they have really stopped */ 754 for (;;) { 755 int csr5 = ioread32(ioaddr + IntrStatus); 756 int t; 757 758 t = (csr5 >> 17) & 0x07; 759 if (t==0||t==1) { 760 /* rx stopped */ 761 t = (csr5 >> 20) & 0x07; 762 if (t==0||t==1) 763 break; 764 } 765 766 limit--; 767 if(!limit) { 768 dev_info(&dev->dev, 769 "couldn't stop rxtx, IntrStatus %xh\n", csr5); 770 break; 771 } 772 udelay(1); 773 } 774 np->csr6 = new; 775 /* and restart them with the new configuration */ 776 iowrite32(np->csr6, ioaddr + NetworkConfig); 777 if (new & 0x200) 778 np->mii_if.full_duplex = 1; 779} 780 781static void netdev_timer(unsigned long data) 782{ 783 struct net_device *dev = (struct net_device *)data; 784 struct netdev_private *np = netdev_priv(dev); 785 void __iomem *ioaddr = np->base_addr; 786 787 if (debug > 2) 788 printk(KERN_DEBUG "%s: Media selection timer tick, status %08x config %08x\n", 789 dev->name, ioread32(ioaddr + IntrStatus), 790 ioread32(ioaddr + NetworkConfig)); 791 spin_lock_irq(&np->lock); 792 update_csr6(dev, update_link(dev)); 793 spin_unlock_irq(&np->lock); 794 np->timer.expires = jiffies + 10*HZ; 795 add_timer(&np->timer); 796} 797 798static void init_rxtx_rings(struct net_device *dev) 799{ 800 struct netdev_private *np = netdev_priv(dev); 801 int i; 802 803 np->rx_head_desc = &np->rx_ring[0]; 804 np->tx_ring = (struct w840_tx_desc*)&np->rx_ring[RX_RING_SIZE]; 805 806 /* Initial all Rx descriptors. */ 807 for (i = 0; i < RX_RING_SIZE; i++) { 808 np->rx_ring[i].length = np->rx_buf_sz; 809 np->rx_ring[i].status = 0; 810 np->rx_skbuff[i] = NULL; 811 } 812 /* Mark the last entry as wrapping the ring. */ 813 np->rx_ring[i-1].length |= DescEndRing; 814 815 /* Fill in the Rx buffers. Handle allocation failure gracefully. */ 816 for (i = 0; i < RX_RING_SIZE; i++) { 817 struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz); 818 np->rx_skbuff[i] = skb; 819 if (skb == NULL) 820 break; 821 np->rx_addr[i] = pci_map_single(np->pci_dev,skb->data, 822 np->rx_buf_sz,PCI_DMA_FROMDEVICE); 823 824 np->rx_ring[i].buffer1 = np->rx_addr[i]; 825 np->rx_ring[i].status = DescOwned; 826 } 827 828 np->cur_rx = 0; 829 np->dirty_rx = (unsigned int)(i - RX_RING_SIZE); 830 831 /* Initialize the Tx descriptors */ 832 for (i = 0; i < TX_RING_SIZE; i++) { 833 np->tx_skbuff[i] = NULL; 834 np->tx_ring[i].status = 0; 835 } 836 np->tx_full = 0; 837 np->tx_q_bytes = np->dirty_tx = np->cur_tx = 0; 838 839 iowrite32(np->ring_dma_addr, np->base_addr + RxRingPtr); 840 iowrite32(np->ring_dma_addr+sizeof(struct w840_rx_desc)*RX_RING_SIZE, 841 np->base_addr + TxRingPtr); 842 843} 844 845static void free_rxtx_rings(struct netdev_private* np) 846{ 847 int i; 848 /* Free all the skbuffs in the Rx queue. */ 849 for (i = 0; i < RX_RING_SIZE; i++) { 850 np->rx_ring[i].status = 0; 851 if (np->rx_skbuff[i]) { 852 pci_unmap_single(np->pci_dev, 853 np->rx_addr[i], 854 np->rx_skbuff[i]->len, 855 PCI_DMA_FROMDEVICE); 856 dev_kfree_skb(np->rx_skbuff[i]); 857 } 858 np->rx_skbuff[i] = NULL; 859 } 860 for (i = 0; i < TX_RING_SIZE; i++) { 861 if (np->tx_skbuff[i]) { 862 pci_unmap_single(np->pci_dev, 863 np->tx_addr[i], 864 np->tx_skbuff[i]->len, 865 PCI_DMA_TODEVICE); 866 dev_kfree_skb(np->tx_skbuff[i]); 867 } 868 np->tx_skbuff[i] = NULL; 869 } 870} 871 872static void init_registers(struct net_device *dev) 873{ 874 struct netdev_private *np = netdev_priv(dev); 875 void __iomem *ioaddr = np->base_addr; 876 int i; 877 878 for (i = 0; i < 6; i++) 879 iowrite8(dev->dev_addr[i], ioaddr + StationAddr + i); 880 881 /* Initialize other registers. */ 882#ifdef __BIG_ENDIAN 883 i = (1<<20); /* Big-endian descriptors */ 884#else 885 i = 0; 886#endif 887 i |= (0x04<<2); /* skip length 4 u32 */ 888 i |= 0x02; /* give Rx priority */ 889 890 /* Configure the PCI bus bursts and FIFO thresholds. 891 486: Set 8 longword cache alignment, 8 longword burst. 892 586: Set 16 longword cache alignment, no burst limit. 893 Cache alignment bits 15:14 Burst length 13:8 894 0000 <not allowed> 0000 align to cache 0800 8 longwords 895 4000 8 longwords 0100 1 longword 1000 16 longwords 896 8000 16 longwords 0200 2 longwords 2000 32 longwords 897 C000 32 longwords 0400 4 longwords */ 898 899#if defined(__i386__) && !defined(MODULE) 900 if (boot_cpu_data.x86 <= 4) { 901 i |= 0x4800; 902 dev_info(&dev->dev, 903 "This is a 386/486 PCI system, setting cache alignment to 8 longwords\n"); 904 } else { 905 i |= 0xE000; 906 } 907#elif defined(__powerpc__) || defined(__i386__) || defined(__alpha__) || \ 908 defined(__ia64__) || defined(__x86_64__) 909 i |= 0xE000; 910#elif defined(CONFIG_SPARC) || defined(CONFIG_PARISC) 911 i |= 0x4800; 912#else 913#warning Processor architecture undefined 914 i |= 0x4800; 915#endif 916 iowrite32(i, ioaddr + PCIBusCfg); 917 918 np->csr6 = 0; 919 /* 128 byte Tx threshold; 920 Transmit on; Receive on; */ 921 update_csr6(dev, 0x00022002 | update_link(dev) | __set_rx_mode(dev)); 922 923 /* Clear and Enable interrupts by setting the interrupt mask. */ 924 iowrite32(0x1A0F5, ioaddr + IntrStatus); 925 iowrite32(0x1A0F5, ioaddr + IntrEnable); 926 927 iowrite32(0, ioaddr + RxStartDemand); 928} 929 930static void tx_timeout(struct net_device *dev) 931{ 932 struct netdev_private *np = netdev_priv(dev); 933 void __iomem *ioaddr = np->base_addr; 934 935 dev_warn(&dev->dev, "Transmit timed out, status %08x, resetting...\n", 936 ioread32(ioaddr + IntrStatus)); 937 938 { 939 int i; 940 printk(KERN_DEBUG " Rx ring %p: ", np->rx_ring); 941 for (i = 0; i < RX_RING_SIZE; i++) 942 printk(KERN_CONT " %08x", (unsigned int)np->rx_ring[i].status); 943 printk(KERN_CONT "\n"); 944 printk(KERN_DEBUG " Tx ring %p: ", np->tx_ring); 945 for (i = 0; i < TX_RING_SIZE; i++) 946 printk(KERN_CONT " %08x", np->tx_ring[i].status); 947 printk(KERN_CONT "\n"); 948 } 949 printk(KERN_DEBUG "Tx cur %d Tx dirty %d Tx Full %d, q bytes %d\n", 950 np->cur_tx, np->dirty_tx, np->tx_full, np->tx_q_bytes); 951 printk(KERN_DEBUG "Tx Descriptor addr %xh\n", ioread32(ioaddr+0x4C)); 952 953 disable_irq(dev->irq); 954 spin_lock_irq(&np->lock); 955 /* 956 * Under high load dirty_tx and the internal tx descriptor pointer 957 * come out of sync, thus perform a software reset and reinitialize 958 * everything. 959 */ 960 961 iowrite32(1, np->base_addr+PCIBusCfg); 962 udelay(1); 963 964 free_rxtx_rings(np); 965 init_rxtx_rings(dev); 966 init_registers(dev); 967 spin_unlock_irq(&np->lock); 968 enable_irq(dev->irq); 969 970 netif_wake_queue(dev); 971 dev->trans_start = jiffies; /* prevent tx timeout */ 972 np->stats.tx_errors++; 973} 974 975/* Initialize the Rx and Tx rings, along with various 'dev' bits. */ 976static int alloc_ringdesc(struct net_device *dev) 977{ 978 struct netdev_private *np = netdev_priv(dev); 979 980 np->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32); 981 982 np->rx_ring = pci_alloc_consistent(np->pci_dev, 983 sizeof(struct w840_rx_desc)*RX_RING_SIZE + 984 sizeof(struct w840_tx_desc)*TX_RING_SIZE, 985 &np->ring_dma_addr); 986 if(!np->rx_ring) 987 return -ENOMEM; 988 init_rxtx_rings(dev); 989 return 0; 990} 991 992static void free_ringdesc(struct netdev_private *np) 993{ 994 pci_free_consistent(np->pci_dev, 995 sizeof(struct w840_rx_desc)*RX_RING_SIZE + 996 sizeof(struct w840_tx_desc)*TX_RING_SIZE, 997 np->rx_ring, np->ring_dma_addr); 998 999} 1000 1001static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev) 1002{ 1003 struct netdev_private *np = netdev_priv(dev); 1004 unsigned entry; 1005 1006 /* Caution: the write order is important here, set the field 1007 with the "ownership" bits last. */ 1008 1009 /* Calculate the next Tx descriptor entry. */ 1010 entry = np->cur_tx % TX_RING_SIZE; 1011 1012 np->tx_addr[entry] = pci_map_single(np->pci_dev, 1013 skb->data,skb->len, PCI_DMA_TODEVICE); 1014 np->tx_skbuff[entry] = skb; 1015 1016 np->tx_ring[entry].buffer1 = np->tx_addr[entry]; 1017 if (skb->len < TX_BUFLIMIT) { 1018 np->tx_ring[entry].length = DescWholePkt | skb->len; 1019 } else { 1020 int len = skb->len - TX_BUFLIMIT; 1021 1022 np->tx_ring[entry].buffer2 = np->tx_addr[entry]+TX_BUFLIMIT; 1023 np->tx_ring[entry].length = DescWholePkt | (len << 11) | TX_BUFLIMIT; 1024 } 1025 if(entry == TX_RING_SIZE-1) 1026 np->tx_ring[entry].length |= DescEndRing; 1027 1028 /* Now acquire the irq spinlock. 1029 * The difficult race is the ordering between 1030 * increasing np->cur_tx and setting DescOwned: 1031 * - if np->cur_tx is increased first the interrupt 1032 * handler could consider the packet as transmitted 1033 * since DescOwned is cleared. 1034 * - If DescOwned is set first the NIC could report the 1035 * packet as sent, but the interrupt handler would ignore it 1036 * since the np->cur_tx was not yet increased. 1037 */ 1038 spin_lock_irq(&np->lock); 1039 np->cur_tx++; 1040 1041 wmb(); /* flush length, buffer1, buffer2 */ 1042 np->tx_ring[entry].status = DescOwned; 1043 wmb(); /* flush status and kick the hardware */ 1044 iowrite32(0, np->base_addr + TxStartDemand); 1045 np->tx_q_bytes += skb->len; 1046 if (np->cur_tx - np->dirty_tx > TX_QUEUE_LEN || 1047 ((np->drv_flags & HasBrokenTx) && np->tx_q_bytes > TX_BUG_FIFO_LIMIT)) { 1048 netif_stop_queue(dev); 1049 wmb(); 1050 np->tx_full = 1; 1051 } 1052 spin_unlock_irq(&np->lock); 1053 1054 if (debug > 4) { 1055 printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d\n", 1056 dev->name, np->cur_tx, entry); 1057 } 1058 return NETDEV_TX_OK; 1059} 1060 1061static void netdev_tx_done(struct net_device *dev) 1062{ 1063 struct netdev_private *np = netdev_priv(dev); 1064 for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) { 1065 int entry = np->dirty_tx % TX_RING_SIZE; 1066 int tx_status = np->tx_ring[entry].status; 1067 1068 if (tx_status < 0) 1069 break; 1070 if (tx_status & 0x8000) { /* There was an error, log it. */ 1071#ifndef final_version 1072 if (debug > 1) 1073 printk(KERN_DEBUG "%s: Transmit error, Tx status %08x\n", 1074 dev->name, tx_status); 1075#endif 1076 np->stats.tx_errors++; 1077 if (tx_status & 0x0104) np->stats.tx_aborted_errors++; 1078 if (tx_status & 0x0C80) np->stats.tx_carrier_errors++; 1079 if (tx_status & 0x0200) np->stats.tx_window_errors++; 1080 if (tx_status & 0x0002) np->stats.tx_fifo_errors++; 1081 if ((tx_status & 0x0080) && np->mii_if.full_duplex == 0) 1082 np->stats.tx_heartbeat_errors++; 1083 } else { 1084#ifndef final_version 1085 if (debug > 3) 1086 printk(KERN_DEBUG "%s: Transmit slot %d ok, Tx status %08x\n", 1087 dev->name, entry, tx_status); 1088#endif 1089 np->stats.tx_bytes += np->tx_skbuff[entry]->len; 1090 np->stats.collisions += (tx_status >> 3) & 15; 1091 np->stats.tx_packets++; 1092 } 1093 /* Free the original skb. */ 1094 pci_unmap_single(np->pci_dev,np->tx_addr[entry], 1095 np->tx_skbuff[entry]->len, 1096 PCI_DMA_TODEVICE); 1097 np->tx_q_bytes -= np->tx_skbuff[entry]->len; 1098 dev_kfree_skb_irq(np->tx_skbuff[entry]); 1099 np->tx_skbuff[entry] = NULL; 1100 } 1101 if (np->tx_full && 1102 np->cur_tx - np->dirty_tx < TX_QUEUE_LEN_RESTART && 1103 np->tx_q_bytes < TX_BUG_FIFO_LIMIT) { 1104 /* The ring is no longer full, clear tbusy. */ 1105 np->tx_full = 0; 1106 wmb(); 1107 netif_wake_queue(dev); 1108 } 1109} 1110 1111/* The interrupt handler does all of the Rx thread work and cleans up 1112 after the Tx thread. */ 1113static irqreturn_t intr_handler(int irq, void *dev_instance) 1114{ 1115 struct net_device *dev = (struct net_device *)dev_instance; 1116 struct netdev_private *np = netdev_priv(dev); 1117 void __iomem *ioaddr = np->base_addr; 1118 int work_limit = max_interrupt_work; 1119 int handled = 0; 1120 1121 if (!netif_device_present(dev)) 1122 return IRQ_NONE; 1123 do { 1124 u32 intr_status = ioread32(ioaddr + IntrStatus); 1125 1126 /* Acknowledge all of the current interrupt sources ASAP. */ 1127 iowrite32(intr_status & 0x001ffff, ioaddr + IntrStatus); 1128 1129 if (debug > 4) 1130 printk(KERN_DEBUG "%s: Interrupt, status %04x\n", 1131 dev->name, intr_status); 1132 1133 if ((intr_status & (NormalIntr|AbnormalIntr)) == 0) 1134 break; 1135 1136 handled = 1; 1137 1138 if (intr_status & (RxIntr | RxNoBuf)) 1139 netdev_rx(dev); 1140 if (intr_status & RxNoBuf) 1141 iowrite32(0, ioaddr + RxStartDemand); 1142 1143 if (intr_status & (TxNoBuf | TxIntr) && 1144 np->cur_tx != np->dirty_tx) { 1145 spin_lock(&np->lock); 1146 netdev_tx_done(dev); 1147 spin_unlock(&np->lock); 1148 } 1149 1150 /* Abnormal error summary/uncommon events handlers. */ 1151 if (intr_status & (AbnormalIntr | TxFIFOUnderflow | SystemError | 1152 TimerInt | TxDied)) 1153 netdev_error(dev, intr_status); 1154 1155 if (--work_limit < 0) { 1156 dev_warn(&dev->dev, 1157 "Too much work at interrupt, status=0x%04x\n", 1158 intr_status); 1159 /* Set the timer to re-enable the other interrupts after 1160 10*82usec ticks. */ 1161 spin_lock(&np->lock); 1162 if (netif_device_present(dev)) { 1163 iowrite32(AbnormalIntr | TimerInt, ioaddr + IntrEnable); 1164 iowrite32(10, ioaddr + GPTimer); 1165 } 1166 spin_unlock(&np->lock); 1167 break; 1168 } 1169 } while (1); 1170 1171 if (debug > 3) 1172 printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x\n", 1173 dev->name, ioread32(ioaddr + IntrStatus)); 1174 return IRQ_RETVAL(handled); 1175} 1176 1177/* This routine is logically part of the interrupt handler, but separated 1178 for clarity and better register allocation. */ 1179static int netdev_rx(struct net_device *dev) 1180{ 1181 struct netdev_private *np = netdev_priv(dev); 1182 int entry = np->cur_rx % RX_RING_SIZE; 1183 int work_limit = np->dirty_rx + RX_RING_SIZE - np->cur_rx; 1184 1185 if (debug > 4) { 1186 printk(KERN_DEBUG " In netdev_rx(), entry %d status %04x\n", 1187 entry, np->rx_ring[entry].status); 1188 } 1189 1190 /* If EOP is set on the next entry, it's a new packet. Send it up. */ 1191 while (--work_limit >= 0) { 1192 struct w840_rx_desc *desc = np->rx_head_desc; 1193 s32 status = desc->status; 1194 1195 if (debug > 4) 1196 printk(KERN_DEBUG " netdev_rx() status was %08x\n", 1197 status); 1198 if (status < 0) 1199 break; 1200 if ((status & 0x38008300) != 0x0300) { 1201 if ((status & 0x38000300) != 0x0300) { 1202 /* Ingore earlier buffers. */ 1203 if ((status & 0xffff) != 0x7fff) { 1204 dev_warn(&dev->dev, 1205 "Oversized Ethernet frame spanned multiple buffers, entry %#x status %04x!\n", 1206 np->cur_rx, status); 1207 np->stats.rx_length_errors++; 1208 } 1209 } else if (status & 0x8000) { 1210 /* There was a fatal error. */ 1211 if (debug > 2) 1212 printk(KERN_DEBUG "%s: Receive error, Rx status %08x\n", 1213 dev->name, status); 1214 np->stats.rx_errors++; /* end of a packet.*/ 1215 if (status & 0x0890) np->stats.rx_length_errors++; 1216 if (status & 0x004C) np->stats.rx_frame_errors++; 1217 if (status & 0x0002) np->stats.rx_crc_errors++; 1218 } 1219 } else { 1220 struct sk_buff *skb; 1221 /* Omit the four octet CRC from the length. */ 1222 int pkt_len = ((status >> 16) & 0x7ff) - 4; 1223 1224#ifndef final_version 1225 if (debug > 4) 1226 printk(KERN_DEBUG " netdev_rx() normal Rx pkt length %d status %x\n", 1227 pkt_len, status); 1228#endif 1229 /* Check if the packet is long enough to accept without copying 1230 to a minimally-sized skbuff. */ 1231 if (pkt_len < rx_copybreak && 1232 (skb = dev_alloc_skb(pkt_len + 2)) != NULL) { 1233 skb_reserve(skb, 2); /* 16 byte align the IP header */ 1234 pci_dma_sync_single_for_cpu(np->pci_dev,np->rx_addr[entry], 1235 np->rx_skbuff[entry]->len, 1236 PCI_DMA_FROMDEVICE); 1237 skb_copy_to_linear_data(skb, np->rx_skbuff[entry]->data, pkt_len); 1238 skb_put(skb, pkt_len); 1239 pci_dma_sync_single_for_device(np->pci_dev,np->rx_addr[entry], 1240 np->rx_skbuff[entry]->len, 1241 PCI_DMA_FROMDEVICE); 1242 } else { 1243 pci_unmap_single(np->pci_dev,np->rx_addr[entry], 1244 np->rx_skbuff[entry]->len, 1245 PCI_DMA_FROMDEVICE); 1246 skb_put(skb = np->rx_skbuff[entry], pkt_len); 1247 np->rx_skbuff[entry] = NULL; 1248 } 1249#ifndef final_version /* Remove after testing. */ 1250 /* You will want this info for the initial debug. */ 1251 if (debug > 5) 1252 printk(KERN_DEBUG " Rx data %pM %pM %02x%02x %pI4\n", 1253 &skb->data[0], &skb->data[6], 1254 skb->data[12], skb->data[13], 1255 &skb->data[14]); 1256#endif 1257 skb->protocol = eth_type_trans(skb, dev); 1258 netif_rx(skb); 1259 np->stats.rx_packets++; 1260 np->stats.rx_bytes += pkt_len; 1261 } 1262 entry = (++np->cur_rx) % RX_RING_SIZE; 1263 np->rx_head_desc = &np->rx_ring[entry]; 1264 } 1265 1266 /* Refill the Rx ring buffers. */ 1267 for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) { 1268 struct sk_buff *skb; 1269 entry = np->dirty_rx % RX_RING_SIZE; 1270 if (np->rx_skbuff[entry] == NULL) { 1271 skb = dev_alloc_skb(np->rx_buf_sz); 1272 np->rx_skbuff[entry] = skb; 1273 if (skb == NULL) 1274 break; /* Better luck next round. */ 1275 np->rx_addr[entry] = pci_map_single(np->pci_dev, 1276 skb->data, 1277 np->rx_buf_sz, PCI_DMA_FROMDEVICE); 1278 np->rx_ring[entry].buffer1 = np->rx_addr[entry]; 1279 } 1280 wmb(); 1281 np->rx_ring[entry].status = DescOwned; 1282 } 1283 1284 return 0; 1285} 1286 1287static void netdev_error(struct net_device *dev, int intr_status) 1288{ 1289 struct netdev_private *np = netdev_priv(dev); 1290 void __iomem *ioaddr = np->base_addr; 1291 1292 if (debug > 2) 1293 printk(KERN_DEBUG "%s: Abnormal event, %08x\n", 1294 dev->name, intr_status); 1295 if (intr_status == 0xffffffff) 1296 return; 1297 spin_lock(&np->lock); 1298 if (intr_status & TxFIFOUnderflow) { 1299 int new; 1300 /* Bump up the Tx threshold */ 1301 new = (np->csr6 >> 14)&0x7f; 1302 if (new < 64) 1303 new *= 2; 1304 else 1305 new = 127; /* load full packet before starting */ 1306 new = (np->csr6 & ~(0x7F << 14)) | (new<<14); 1307 printk(KERN_DEBUG "%s: Tx underflow, new csr6 %08x\n", 1308 dev->name, new); 1309 update_csr6(dev, new); 1310 } 1311 if (intr_status & RxDied) { /* Missed a Rx frame. */ 1312 np->stats.rx_errors++; 1313 } 1314 if (intr_status & TimerInt) { 1315 /* Re-enable other interrupts. */ 1316 if (netif_device_present(dev)) 1317 iowrite32(0x1A0F5, ioaddr + IntrEnable); 1318 } 1319 np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff; 1320 iowrite32(0, ioaddr + RxStartDemand); 1321 spin_unlock(&np->lock); 1322} 1323 1324static struct net_device_stats *get_stats(struct net_device *dev) 1325{ 1326 struct netdev_private *np = netdev_priv(dev); 1327 void __iomem *ioaddr = np->base_addr; 1328 1329 /* The chip only need report frame silently dropped. */ 1330 spin_lock_irq(&np->lock); 1331 if (netif_running(dev) && netif_device_present(dev)) 1332 np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff; 1333 spin_unlock_irq(&np->lock); 1334 1335 return &np->stats; 1336} 1337 1338 1339static u32 __set_rx_mode(struct net_device *dev) 1340{ 1341 struct netdev_private *np = netdev_priv(dev); 1342 void __iomem *ioaddr = np->base_addr; 1343 u32 mc_filter[2]; /* Multicast hash filter */ 1344 u32 rx_mode; 1345 1346 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */ 1347 memset(mc_filter, 0xff, sizeof(mc_filter)); 1348 rx_mode = RxAcceptBroadcast | AcceptMulticast | RxAcceptAllPhys 1349 | AcceptMyPhys; 1350 } else if ((netdev_mc_count(dev) > multicast_filter_limit) || 1351 (dev->flags & IFF_ALLMULTI)) { 1352 /* Too many to match, or accept all multicasts. */ 1353 memset(mc_filter, 0xff, sizeof(mc_filter)); 1354 rx_mode = RxAcceptBroadcast | AcceptMulticast | AcceptMyPhys; 1355 } else { 1356 struct netdev_hw_addr *ha; 1357 1358 memset(mc_filter, 0, sizeof(mc_filter)); 1359 netdev_for_each_mc_addr(ha, dev) { 1360 int filbit; 1361 1362 filbit = (ether_crc(ETH_ALEN, ha->addr) >> 26) ^ 0x3F; 1363 filbit &= 0x3f; 1364 mc_filter[filbit >> 5] |= 1 << (filbit & 31); 1365 } 1366 rx_mode = RxAcceptBroadcast | AcceptMulticast | AcceptMyPhys; 1367 } 1368 iowrite32(mc_filter[0], ioaddr + MulticastFilter0); 1369 iowrite32(mc_filter[1], ioaddr + MulticastFilter1); 1370 return rx_mode; 1371} 1372 1373static void set_rx_mode(struct net_device *dev) 1374{ 1375 struct netdev_private *np = netdev_priv(dev); 1376 u32 rx_mode = __set_rx_mode(dev); 1377 spin_lock_irq(&np->lock); 1378 update_csr6(dev, (np->csr6 & ~0x00F8) | rx_mode); 1379 spin_unlock_irq(&np->lock); 1380} 1381 1382static void netdev_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info) 1383{ 1384 struct netdev_private *np = netdev_priv(dev); 1385 1386 strcpy (info->driver, DRV_NAME); 1387 strcpy (info->version, DRV_VERSION); 1388 strcpy (info->bus_info, pci_name(np->pci_dev)); 1389} 1390 1391static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 1392{ 1393 struct netdev_private *np = netdev_priv(dev); 1394 int rc; 1395 1396 spin_lock_irq(&np->lock); 1397 rc = mii_ethtool_gset(&np->mii_if, cmd); 1398 spin_unlock_irq(&np->lock); 1399 1400 return rc; 1401} 1402 1403static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 1404{ 1405 struct netdev_private *np = netdev_priv(dev); 1406 int rc; 1407 1408 spin_lock_irq(&np->lock); 1409 rc = mii_ethtool_sset(&np->mii_if, cmd); 1410 spin_unlock_irq(&np->lock); 1411 1412 return rc; 1413} 1414 1415static int netdev_nway_reset(struct net_device *dev) 1416{ 1417 struct netdev_private *np = netdev_priv(dev); 1418 return mii_nway_restart(&np->mii_if); 1419} 1420 1421static u32 netdev_get_link(struct net_device *dev) 1422{ 1423 struct netdev_private *np = netdev_priv(dev); 1424 return mii_link_ok(&np->mii_if); 1425} 1426 1427static u32 netdev_get_msglevel(struct net_device *dev) 1428{ 1429 return debug; 1430} 1431 1432static void netdev_set_msglevel(struct net_device *dev, u32 value) 1433{ 1434 debug = value; 1435} 1436 1437static const struct ethtool_ops netdev_ethtool_ops = { 1438 .get_drvinfo = netdev_get_drvinfo, 1439 .get_settings = netdev_get_settings, 1440 .set_settings = netdev_set_settings, 1441 .nway_reset = netdev_nway_reset, 1442 .get_link = netdev_get_link, 1443 .get_msglevel = netdev_get_msglevel, 1444 .set_msglevel = netdev_set_msglevel, 1445}; 1446 1447static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 1448{ 1449 struct mii_ioctl_data *data = if_mii(rq); 1450 struct netdev_private *np = netdev_priv(dev); 1451 1452 switch(cmd) { 1453 case SIOCGMIIPHY: /* Get address of MII PHY in use. */ 1454 data->phy_id = ((struct netdev_private *)netdev_priv(dev))->phys[0] & 0x1f; 1455 /* Fall Through */ 1456 1457 case SIOCGMIIREG: /* Read MII PHY register. */ 1458 spin_lock_irq(&np->lock); 1459 data->val_out = mdio_read(dev, data->phy_id & 0x1f, data->reg_num & 0x1f); 1460 spin_unlock_irq(&np->lock); 1461 return 0; 1462 1463 case SIOCSMIIREG: /* Write MII PHY register. */ 1464 spin_lock_irq(&np->lock); 1465 mdio_write(dev, data->phy_id & 0x1f, data->reg_num & 0x1f, data->val_in); 1466 spin_unlock_irq(&np->lock); 1467 return 0; 1468 default: 1469 return -EOPNOTSUPP; 1470 } 1471} 1472 1473static int netdev_close(struct net_device *dev) 1474{ 1475 struct netdev_private *np = netdev_priv(dev); 1476 void __iomem *ioaddr = np->base_addr; 1477 1478 netif_stop_queue(dev); 1479 1480 if (debug > 1) { 1481 printk(KERN_DEBUG "%s: Shutting down ethercard, status was %08x Config %08x\n", 1482 dev->name, ioread32(ioaddr + IntrStatus), 1483 ioread32(ioaddr + NetworkConfig)); 1484 printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d\n", 1485 dev->name, 1486 np->cur_tx, np->dirty_tx, 1487 np->cur_rx, np->dirty_rx); 1488 } 1489 1490 /* Stop the chip's Tx and Rx processes. */ 1491 spin_lock_irq(&np->lock); 1492 netif_device_detach(dev); 1493 update_csr6(dev, 0); 1494 iowrite32(0x0000, ioaddr + IntrEnable); 1495 spin_unlock_irq(&np->lock); 1496 1497 free_irq(dev->irq, dev); 1498 wmb(); 1499 netif_device_attach(dev); 1500 1501 if (ioread32(ioaddr + NetworkConfig) != 0xffffffff) 1502 np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff; 1503 1504#ifdef __i386__ 1505 if (debug > 2) { 1506 int i; 1507 1508 printk(KERN_DEBUG" Tx ring at %p:\n", np->tx_ring); 1509 for (i = 0; i < TX_RING_SIZE; i++) 1510 printk(KERN_DEBUG " #%d desc. %04x %04x %08x\n", 1511 i, np->tx_ring[i].length, 1512 np->tx_ring[i].status, np->tx_ring[i].buffer1); 1513 printk(KERN_DEBUG " Rx ring %p:\n", np->rx_ring); 1514 for (i = 0; i < RX_RING_SIZE; i++) { 1515 printk(KERN_DEBUG " #%d desc. %04x %04x %08x\n", 1516 i, np->rx_ring[i].length, 1517 np->rx_ring[i].status, np->rx_ring[i].buffer1); 1518 } 1519 } 1520#endif /* __i386__ debugging only */ 1521 1522 del_timer_sync(&np->timer); 1523 1524 free_rxtx_rings(np); 1525 free_ringdesc(np); 1526 1527 return 0; 1528} 1529 1530static void __devexit w840_remove1 (struct pci_dev *pdev) 1531{ 1532 struct net_device *dev = pci_get_drvdata(pdev); 1533 1534 if (dev) { 1535 struct netdev_private *np = netdev_priv(dev); 1536 unregister_netdev(dev); 1537 pci_release_regions(pdev); 1538 pci_iounmap(pdev, np->base_addr); 1539 free_netdev(dev); 1540 } 1541 1542 pci_set_drvdata(pdev, NULL); 1543} 1544 1545#ifdef CONFIG_PM 1546 1547/* 1548 * suspend/resume synchronization: 1549 * - open, close, do_ioctl: 1550 * rtnl_lock, & netif_device_detach after the rtnl_unlock. 1551 * - get_stats: 1552 * spin_lock_irq(np->lock), doesn't touch hw if not present 1553 * - start_xmit: 1554 * synchronize_irq + netif_tx_disable; 1555 * - tx_timeout: 1556 * netif_device_detach + netif_tx_disable; 1557 * - set_multicast_list 1558 * netif_device_detach + netif_tx_disable; 1559 * - interrupt handler 1560 * doesn't touch hw if not present, synchronize_irq waits for 1561 * running instances of the interrupt handler. 1562 * 1563 * Disabling hw requires clearing csr6 & IntrEnable. 1564 * update_csr6 & all function that write IntrEnable check netif_device_present 1565 * before settings any bits. 1566 * 1567 * Detach must occur under spin_unlock_irq(), interrupts from a detached 1568 * device would cause an irq storm. 1569 */ 1570static int w840_suspend (struct pci_dev *pdev, pm_message_t state) 1571{ 1572 struct net_device *dev = pci_get_drvdata (pdev); 1573 struct netdev_private *np = netdev_priv(dev); 1574 void __iomem *ioaddr = np->base_addr; 1575 1576 rtnl_lock(); 1577 if (netif_running (dev)) { 1578 del_timer_sync(&np->timer); 1579 1580 spin_lock_irq(&np->lock); 1581 netif_device_detach(dev); 1582 update_csr6(dev, 0); 1583 iowrite32(0, ioaddr + IntrEnable); 1584 spin_unlock_irq(&np->lock); 1585 1586 synchronize_irq(dev->irq); 1587 netif_tx_disable(dev); 1588 1589 np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff; 1590 1591 /* no more hardware accesses behind this line. */ 1592 1593 BUG_ON(np->csr6 || ioread32(ioaddr + IntrEnable)); 1594 1595 /* pci_power_off(pdev, -1); */ 1596 1597 free_rxtx_rings(np); 1598 } else { 1599 netif_device_detach(dev); 1600 } 1601 rtnl_unlock(); 1602 return 0; 1603} 1604 1605static int w840_resume (struct pci_dev *pdev) 1606{ 1607 struct net_device *dev = pci_get_drvdata (pdev); 1608 struct netdev_private *np = netdev_priv(dev); 1609 int retval = 0; 1610 1611 rtnl_lock(); 1612 if (netif_device_present(dev)) 1613 goto out; /* device not suspended */ 1614 if (netif_running(dev)) { 1615 if ((retval = pci_enable_device(pdev))) { 1616 dev_err(&dev->dev, 1617 "pci_enable_device failed in resume\n"); 1618 goto out; 1619 } 1620 spin_lock_irq(&np->lock); 1621 iowrite32(1, np->base_addr+PCIBusCfg); 1622 ioread32(np->base_addr+PCIBusCfg); 1623 udelay(1); 1624 netif_device_attach(dev); 1625 init_rxtx_rings(dev); 1626 init_registers(dev); 1627 spin_unlock_irq(&np->lock); 1628 1629 netif_wake_queue(dev); 1630 1631 mod_timer(&np->timer, jiffies + 1*HZ); 1632 } else { 1633 netif_device_attach(dev); 1634 } 1635out: 1636 rtnl_unlock(); 1637 return retval; 1638} 1639#endif 1640 1641static struct pci_driver w840_driver = { 1642 .name = DRV_NAME, 1643 .id_table = w840_pci_tbl, 1644 .probe = w840_probe1, 1645 .remove = __devexit_p(w840_remove1), 1646#ifdef CONFIG_PM 1647 .suspend = w840_suspend, 1648 .resume = w840_resume, 1649#endif 1650}; 1651 1652static int __init w840_init(void) 1653{ 1654 printk(version); 1655 return pci_register_driver(&w840_driver); 1656} 1657 1658static void __exit w840_exit(void) 1659{ 1660 pci_unregister_driver(&w840_driver); 1661} 1662 1663module_init(w840_init); 1664module_exit(w840_exit); 1665