1/* 2 Written 1998-2000 by Donald Becker. 3 4 This software may be used and distributed according to the terms of 5 the GNU General Public License (GPL), incorporated herein by reference. 6 Drivers based on or derived from this code fall under the GPL and must 7 retain the authorship, copyright and license notice. This file is not 8 a complete program and may only be used when the entire operating 9 system is licensed under the GPL. 10 11 The author may be reached as becker@scyld.com, or C/O 12 Scyld Computing Corporation 13 410 Severn Ave., Suite 210 14 Annapolis MD 21403 15 16 Support information and updates available at 17 http://www.scyld.com/network/pci-skeleton.html 18 19 Linux kernel updates: 20 21 Version 2.51, Nov 17, 2001 (jgarzik): 22 - Add ethtool support 23 - Replace some MII-related magic numbers with constants 24 25*/ 26 27#define DRV_NAME "fealnx" 28#define DRV_VERSION "2.52" 29#define DRV_RELDATE "Sep-11-2006" 30 31static int debug; /* 1-> print debug message */ 32static int max_interrupt_work = 20; 33 34/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast). */ 35static int multicast_filter_limit = 32; 36 37/* Set the copy breakpoint for the copy-only-tiny-frames scheme. */ 38/* Setting to > 1518 effectively disables this feature. */ 39static int rx_copybreak; 40 41/* Used to pass the media type, etc. */ 42/* Both 'options[]' and 'full_duplex[]' should exist for driver */ 43/* interoperability. */ 44/* The media type is usually passed in 'options[]'. */ 45#define MAX_UNITS 8 /* More are supported, limit only on options */ 46static int options[MAX_UNITS] = { -1, -1, -1, -1, -1, -1, -1, -1 }; 47static int full_duplex[MAX_UNITS] = { -1, -1, -1, -1, -1, -1, -1, -1 }; 48 49/* Operational parameters that are set at compile time. */ 50/* Keep the ring sizes a power of two for compile efficiency. */ 51/* The compiler will convert <unsigned>'%'<2^N> into a bit mask. */ 52/* Making the Tx ring too large decreases the effectiveness of channel */ 53/* bonding and packet priority. */ 54/* There are no ill effects from too-large receive rings. */ 55// 88-12-9 modify, 56// #define TX_RING_SIZE 16 57// #define RX_RING_SIZE 32 58#define TX_RING_SIZE 6 59#define RX_RING_SIZE 12 60#define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct fealnx_desc) 61#define RX_TOTAL_SIZE RX_RING_SIZE*sizeof(struct fealnx_desc) 62 63/* Operational parameters that usually are not changed. */ 64/* Time in jiffies before concluding the transmitter is hung. */ 65#define TX_TIMEOUT (2*HZ) 66 67#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer. */ 68 69 70/* Include files, designed to support most kernel versions 2.0.0 and later. */ 71#include <linux/module.h> 72#include <linux/kernel.h> 73#include <linux/string.h> 74#include <linux/timer.h> 75#include <linux/errno.h> 76#include <linux/ioport.h> 77#include <linux/interrupt.h> 78#include <linux/pci.h> 79#include <linux/netdevice.h> 80#include <linux/etherdevice.h> 81#include <linux/skbuff.h> 82#include <linux/init.h> 83#include <linux/mii.h> 84#include <linux/ethtool.h> 85#include <linux/crc32.h> 86#include <linux/delay.h> 87#include <linux/bitops.h> 88 89#include <asm/processor.h> /* Processor type for cache alignment. */ 90#include <asm/io.h> 91#include <asm/uaccess.h> 92#include <asm/byteorder.h> 93 94/* These identify the driver base version and may not be removed. */ 95static const char version[] __devinitconst = 96 KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE "\n"; 97 98 99/* This driver was written to use PCI memory space, however some x86 systems 100 work only with I/O space accesses. */ 101#ifndef __alpha__ 102#define USE_IO_OPS 103#endif 104 105/* Kernel compatibility defines, some common to David Hinds' PCMCIA package. */ 106/* This is only in the support-all-kernels source code. */ 107 108#define RUN_AT(x) (jiffies + (x)) 109 110MODULE_AUTHOR("Myson or whoever"); 111MODULE_DESCRIPTION("Myson MTD-8xx 100/10M Ethernet PCI Adapter Driver"); 112MODULE_LICENSE("GPL"); 113module_param(max_interrupt_work, int, 0); 114module_param(debug, int, 0); 115module_param(rx_copybreak, int, 0); 116module_param(multicast_filter_limit, int, 0); 117module_param_array(options, int, NULL, 0); 118module_param_array(full_duplex, int, NULL, 0); 119MODULE_PARM_DESC(max_interrupt_work, "fealnx maximum events handled per interrupt"); 120MODULE_PARM_DESC(debug, "fealnx enable debugging (0-1)"); 121MODULE_PARM_DESC(rx_copybreak, "fealnx copy breakpoint for copy-only-tiny-frames"); 122MODULE_PARM_DESC(multicast_filter_limit, "fealnx maximum number of filtered multicast addresses"); 123MODULE_PARM_DESC(options, "fealnx: Bits 0-3: media type, bit 17: full duplex"); 124MODULE_PARM_DESC(full_duplex, "fealnx full duplex setting(s) (1)"); 125 126enum { 127 MIN_REGION_SIZE = 136, 128}; 129 130/* A chip capabilities table, matching the entries in pci_tbl[] above. */ 131enum chip_capability_flags { 132 HAS_MII_XCVR, 133 HAS_CHIP_XCVR, 134}; 135 136/* 89/6/13 add, */ 137/* for different PHY */ 138enum phy_type_flags { 139 MysonPHY = 1, 140 AhdocPHY = 2, 141 SeeqPHY = 3, 142 MarvellPHY = 4, 143 Myson981 = 5, 144 LevelOnePHY = 6, 145 OtherPHY = 10, 146}; 147 148struct chip_info { 149 char *chip_name; 150 int flags; 151}; 152 153static const struct chip_info skel_netdrv_tbl[] __devinitdata = { 154 { "100/10M Ethernet PCI Adapter", HAS_MII_XCVR }, 155 { "100/10M Ethernet PCI Adapter", HAS_CHIP_XCVR }, 156 { "1000/100/10M Ethernet PCI Adapter", HAS_MII_XCVR }, 157}; 158 159/* Offsets to the Command and Status Registers. */ 160enum fealnx_offsets { 161 PAR0 = 0x0, /* physical address 0-3 */ 162 PAR1 = 0x04, /* physical address 4-5 */ 163 MAR0 = 0x08, /* multicast address 0-3 */ 164 MAR1 = 0x0C, /* multicast address 4-7 */ 165 FAR0 = 0x10, /* flow-control address 0-3 */ 166 FAR1 = 0x14, /* flow-control address 4-5 */ 167 TCRRCR = 0x18, /* receive & transmit configuration */ 168 BCR = 0x1C, /* bus command */ 169 TXPDR = 0x20, /* transmit polling demand */ 170 RXPDR = 0x24, /* receive polling demand */ 171 RXCWP = 0x28, /* receive current word pointer */ 172 TXLBA = 0x2C, /* transmit list base address */ 173 RXLBA = 0x30, /* receive list base address */ 174 ISR = 0x34, /* interrupt status */ 175 IMR = 0x38, /* interrupt mask */ 176 FTH = 0x3C, /* flow control high/low threshold */ 177 MANAGEMENT = 0x40, /* bootrom/eeprom and mii management */ 178 TALLY = 0x44, /* tally counters for crc and mpa */ 179 TSR = 0x48, /* tally counter for transmit status */ 180 BMCRSR = 0x4c, /* basic mode control and status */ 181 PHYIDENTIFIER = 0x50, /* phy identifier */ 182 ANARANLPAR = 0x54, /* auto-negotiation advertisement and link 183 partner ability */ 184 ANEROCR = 0x58, /* auto-negotiation expansion and pci conf. */ 185 BPREMRPSR = 0x5c, /* bypass & receive error mask and phy status */ 186}; 187 188/* Bits in the interrupt status/enable registers. */ 189/* The bits in the Intr Status/Enable registers, mostly interrupt sources. */ 190enum intr_status_bits { 191 RFCON = 0x00020000, /* receive flow control xon packet */ 192 RFCOFF = 0x00010000, /* receive flow control xoff packet */ 193 LSCStatus = 0x00008000, /* link status change */ 194 ANCStatus = 0x00004000, /* autonegotiation completed */ 195 FBE = 0x00002000, /* fatal bus error */ 196 FBEMask = 0x00001800, /* mask bit12-11 */ 197 ParityErr = 0x00000000, /* parity error */ 198 TargetErr = 0x00001000, /* target abort */ 199 MasterErr = 0x00000800, /* master error */ 200 TUNF = 0x00000400, /* transmit underflow */ 201 ROVF = 0x00000200, /* receive overflow */ 202 ETI = 0x00000100, /* transmit early int */ 203 ERI = 0x00000080, /* receive early int */ 204 CNTOVF = 0x00000040, /* counter overflow */ 205 RBU = 0x00000020, /* receive buffer unavailable */ 206 TBU = 0x00000010, /* transmit buffer unavilable */ 207 TI = 0x00000008, /* transmit interrupt */ 208 RI = 0x00000004, /* receive interrupt */ 209 RxErr = 0x00000002, /* receive error */ 210}; 211 212/* Bits in the NetworkConfig register, W for writing, R for reading */ 213/* If you have docs and know bit names, please fix 'em */ 214enum rx_mode_bits { 215 CR_W_ENH = 0x02000000, /* enhanced mode (name?) */ 216 CR_W_FD = 0x00100000, /* full duplex */ 217 CR_W_PS10 = 0x00080000, /* 10 mbit */ 218 CR_W_TXEN = 0x00040000, /* tx enable (name?) */ 219 CR_W_PS1000 = 0x00010000, /* 1000 mbit */ 220 /* CR_W_RXBURSTMASK= 0x00000e00, Im unsure about this */ 221 CR_W_RXMODEMASK = 0x000000e0, 222 CR_W_PROM = 0x00000080, /* promiscuous mode */ 223 CR_W_AB = 0x00000040, /* accept broadcast */ 224 CR_W_AM = 0x00000020, /* accept mutlicast */ 225 CR_W_ARP = 0x00000008, /* receive runt pkt */ 226 CR_W_ALP = 0x00000004, /* receive long pkt */ 227 CR_W_SEP = 0x00000002, /* receive error pkt */ 228 CR_W_RXEN = 0x00000001, /* rx enable (unicast?) (name?) */ 229 230 CR_R_TXSTOP = 0x04000000, /* tx stopped (name?) */ 231 CR_R_FD = 0x00100000, /* full duplex detected */ 232 CR_R_PS10 = 0x00080000, /* 10 mbit detected */ 233 CR_R_RXSTOP = 0x00008000, /* rx stopped (name?) */ 234}; 235 236/* The Tulip Rx and Tx buffer descriptors. */ 237struct fealnx_desc { 238 s32 status; 239 s32 control; 240 u32 buffer; 241 u32 next_desc; 242 struct fealnx_desc *next_desc_logical; 243 struct sk_buff *skbuff; 244 u32 reserved1; 245 u32 reserved2; 246}; 247 248/* Bits in network_desc.status */ 249enum rx_desc_status_bits { 250 RXOWN = 0x80000000, /* own bit */ 251 FLNGMASK = 0x0fff0000, /* frame length */ 252 FLNGShift = 16, 253 MARSTATUS = 0x00004000, /* multicast address received */ 254 BARSTATUS = 0x00002000, /* broadcast address received */ 255 PHYSTATUS = 0x00001000, /* physical address received */ 256 RXFSD = 0x00000800, /* first descriptor */ 257 RXLSD = 0x00000400, /* last descriptor */ 258 ErrorSummary = 0x80, /* error summary */ 259 RUNT = 0x40, /* runt packet received */ 260 LONG = 0x20, /* long packet received */ 261 FAE = 0x10, /* frame align error */ 262 CRC = 0x08, /* crc error */ 263 RXER = 0x04, /* receive error */ 264}; 265 266enum rx_desc_control_bits { 267 RXIC = 0x00800000, /* interrupt control */ 268 RBSShift = 0, 269}; 270 271enum tx_desc_status_bits { 272 TXOWN = 0x80000000, /* own bit */ 273 JABTO = 0x00004000, /* jabber timeout */ 274 CSL = 0x00002000, /* carrier sense lost */ 275 LC = 0x00001000, /* late collision */ 276 EC = 0x00000800, /* excessive collision */ 277 UDF = 0x00000400, /* fifo underflow */ 278 DFR = 0x00000200, /* deferred */ 279 HF = 0x00000100, /* heartbeat fail */ 280 NCRMask = 0x000000ff, /* collision retry count */ 281 NCRShift = 0, 282}; 283 284enum tx_desc_control_bits { 285 TXIC = 0x80000000, /* interrupt control */ 286 ETIControl = 0x40000000, /* early transmit interrupt */ 287 TXLD = 0x20000000, /* last descriptor */ 288 TXFD = 0x10000000, /* first descriptor */ 289 CRCEnable = 0x08000000, /* crc control */ 290 PADEnable = 0x04000000, /* padding control */ 291 RetryTxLC = 0x02000000, /* retry late collision */ 292 PKTSMask = 0x3ff800, /* packet size bit21-11 */ 293 PKTSShift = 11, 294 TBSMask = 0x000007ff, /* transmit buffer bit 10-0 */ 295 TBSShift = 0, 296}; 297 298/* BootROM/EEPROM/MII Management Register */ 299#define MASK_MIIR_MII_READ 0x00000000 300#define MASK_MIIR_MII_WRITE 0x00000008 301#define MASK_MIIR_MII_MDO 0x00000004 302#define MASK_MIIR_MII_MDI 0x00000002 303#define MASK_MIIR_MII_MDC 0x00000001 304 305/* ST+OP+PHYAD+REGAD+TA */ 306#define OP_READ 0x6000 /* ST:01+OP:10+PHYAD+REGAD+TA:Z0 */ 307#define OP_WRITE 0x5002 /* ST:01+OP:01+PHYAD+REGAD+TA:10 */ 308 309/* ------------------------------------------------------------------------- */ 310/* Constants for Myson PHY */ 311/* ------------------------------------------------------------------------- */ 312#define MysonPHYID 0xd0000302 313/* 89-7-27 add, (begin) */ 314#define MysonPHYID0 0x0302 315#define StatusRegister 18 316#define SPEED100 0x0400 // bit10 317#define FULLMODE 0x0800 // bit11 318/* 89-7-27 add, (end) */ 319 320/* ------------------------------------------------------------------------- */ 321/* Constants for Seeq 80225 PHY */ 322/* ------------------------------------------------------------------------- */ 323#define SeeqPHYID0 0x0016 324 325#define MIIRegister18 18 326#define SPD_DET_100 0x80 327#define DPLX_DET_FULL 0x40 328 329/* ------------------------------------------------------------------------- */ 330/* Constants for Ahdoc 101 PHY */ 331/* ------------------------------------------------------------------------- */ 332#define AhdocPHYID0 0x0022 333 334#define DiagnosticReg 18 335#define DPLX_FULL 0x0800 336#define Speed_100 0x0400 337 338/* 89/6/13 add, */ 339/* -------------------------------------------------------------------------- */ 340/* Constants */ 341/* -------------------------------------------------------------------------- */ 342#define MarvellPHYID0 0x0141 343#define LevelOnePHYID0 0x0013 344 345#define MII1000BaseTControlReg 9 346#define MII1000BaseTStatusReg 10 347#define SpecificReg 17 348 349/* for 1000BaseT Control Register */ 350#define PHYAbletoPerform1000FullDuplex 0x0200 351#define PHYAbletoPerform1000HalfDuplex 0x0100 352#define PHY1000AbilityMask 0x300 353 354// for phy specific status register, marvell phy. 355#define SpeedMask 0x0c000 356#define Speed_1000M 0x08000 357#define Speed_100M 0x4000 358#define Speed_10M 0 359#define Full_Duplex 0x2000 360 361// 89/12/29 add, for phy specific status register, levelone phy, (begin) 362#define LXT1000_100M 0x08000 363#define LXT1000_1000M 0x0c000 364#define LXT1000_Full 0x200 365// 89/12/29 add, for phy specific status register, levelone phy, (end) 366 367/* for 3-in-1 case, BMCRSR register */ 368#define LinkIsUp2 0x00040000 369 370/* for PHY */ 371#define LinkIsUp 0x0004 372 373 374struct netdev_private { 375 /* Descriptor rings first for alignment. */ 376 struct fealnx_desc *rx_ring; 377 struct fealnx_desc *tx_ring; 378 379 dma_addr_t rx_ring_dma; 380 dma_addr_t tx_ring_dma; 381 382 spinlock_t lock; 383 384 /* Media monitoring timer. */ 385 struct timer_list timer; 386 387 /* Reset timer */ 388 struct timer_list reset_timer; 389 int reset_timer_armed; 390 unsigned long crvalue_sv; 391 unsigned long imrvalue_sv; 392 393 /* Frequently used values: keep some adjacent for cache effect. */ 394 int flags; 395 struct pci_dev *pci_dev; 396 unsigned long crvalue; 397 unsigned long bcrvalue; 398 unsigned long imrvalue; 399 struct fealnx_desc *cur_rx; 400 struct fealnx_desc *lack_rxbuf; 401 int really_rx_count; 402 struct fealnx_desc *cur_tx; 403 struct fealnx_desc *cur_tx_copy; 404 int really_tx_count; 405 int free_tx_count; 406 unsigned int rx_buf_sz; /* Based on MTU+slack. */ 407 408 /* These values are keep track of the transceiver/media in use. */ 409 unsigned int linkok; 410 unsigned int line_speed; 411 unsigned int duplexmode; 412 unsigned int default_port:4; /* Last dev->if_port value. */ 413 unsigned int PHYType; 414 415 /* MII transceiver section. */ 416 int mii_cnt; /* MII device addresses. */ 417 unsigned char phys[2]; /* MII device addresses. */ 418 struct mii_if_info mii; 419 void __iomem *mem; 420}; 421 422 423static int mdio_read(struct net_device *dev, int phy_id, int location); 424static void mdio_write(struct net_device *dev, int phy_id, int location, int value); 425static int netdev_open(struct net_device *dev); 426static void getlinktype(struct net_device *dev); 427static void getlinkstatus(struct net_device *dev); 428static void netdev_timer(unsigned long data); 429static void reset_timer(unsigned long data); 430static void fealnx_tx_timeout(struct net_device *dev); 431static void init_ring(struct net_device *dev); 432static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev); 433static irqreturn_t intr_handler(int irq, void *dev_instance); 434static int netdev_rx(struct net_device *dev); 435static void set_rx_mode(struct net_device *dev); 436static void __set_rx_mode(struct net_device *dev); 437static struct net_device_stats *get_stats(struct net_device *dev); 438static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); 439static const struct ethtool_ops netdev_ethtool_ops; 440static int netdev_close(struct net_device *dev); 441static void reset_rx_descriptors(struct net_device *dev); 442static void reset_tx_descriptors(struct net_device *dev); 443 444static void stop_nic_rx(void __iomem *ioaddr, long crvalue) 445{ 446 int delay = 0x1000; 447 iowrite32(crvalue & ~(CR_W_RXEN), ioaddr + TCRRCR); 448 while (--delay) { 449 if ( (ioread32(ioaddr + TCRRCR) & CR_R_RXSTOP) == CR_R_RXSTOP) 450 break; 451 } 452} 453 454 455static void stop_nic_rxtx(void __iomem *ioaddr, long crvalue) 456{ 457 int delay = 0x1000; 458 iowrite32(crvalue & ~(CR_W_RXEN+CR_W_TXEN), ioaddr + TCRRCR); 459 while (--delay) { 460 if ( (ioread32(ioaddr + TCRRCR) & (CR_R_RXSTOP+CR_R_TXSTOP)) 461 == (CR_R_RXSTOP+CR_R_TXSTOP) ) 462 break; 463 } 464} 465 466static const struct net_device_ops netdev_ops = { 467 .ndo_open = netdev_open, 468 .ndo_stop = netdev_close, 469 .ndo_start_xmit = start_tx, 470 .ndo_get_stats = get_stats, 471 .ndo_set_multicast_list = set_rx_mode, 472 .ndo_do_ioctl = mii_ioctl, 473 .ndo_tx_timeout = fealnx_tx_timeout, 474 .ndo_change_mtu = eth_change_mtu, 475 .ndo_set_mac_address = eth_mac_addr, 476 .ndo_validate_addr = eth_validate_addr, 477}; 478 479static int __devinit fealnx_init_one(struct pci_dev *pdev, 480 const struct pci_device_id *ent) 481{ 482 struct netdev_private *np; 483 int i, option, err, irq; 484 static int card_idx = -1; 485 char boardname[12]; 486 void __iomem *ioaddr; 487 unsigned long len; 488 unsigned int chip_id = ent->driver_data; 489 struct net_device *dev; 490 void *ring_space; 491 dma_addr_t ring_dma; 492#ifdef USE_IO_OPS 493 int bar = 0; 494#else 495 int bar = 1; 496#endif 497 498/* when built into the kernel, we only print version if device is found */ 499#ifndef MODULE 500 static int printed_version; 501 if (!printed_version++) 502 printk(version); 503#endif 504 505 card_idx++; 506 sprintf(boardname, "fealnx%d", card_idx); 507 508 option = card_idx < MAX_UNITS ? options[card_idx] : 0; 509 510 i = pci_enable_device(pdev); 511 if (i) return i; 512 pci_set_master(pdev); 513 514 len = pci_resource_len(pdev, bar); 515 if (len < MIN_REGION_SIZE) { 516 dev_err(&pdev->dev, 517 "region size %ld too small, aborting\n", len); 518 return -ENODEV; 519 } 520 521 i = pci_request_regions(pdev, boardname); 522 if (i) 523 return i; 524 525 irq = pdev->irq; 526 527 ioaddr = pci_iomap(pdev, bar, len); 528 if (!ioaddr) { 529 err = -ENOMEM; 530 goto err_out_res; 531 } 532 533 dev = alloc_etherdev(sizeof(struct netdev_private)); 534 if (!dev) { 535 err = -ENOMEM; 536 goto err_out_unmap; 537 } 538 SET_NETDEV_DEV(dev, &pdev->dev); 539 540 /* read ethernet id */ 541 for (i = 0; i < 6; ++i) 542 dev->dev_addr[i] = ioread8(ioaddr + PAR0 + i); 543 544 /* Reset the chip to erase previous misconfiguration. */ 545 iowrite32(0x00000001, ioaddr + BCR); 546 547 dev->base_addr = (unsigned long)ioaddr; 548 dev->irq = irq; 549 550 /* Make certain the descriptor lists are aligned. */ 551 np = netdev_priv(dev); 552 np->mem = ioaddr; 553 spin_lock_init(&np->lock); 554 np->pci_dev = pdev; 555 np->flags = skel_netdrv_tbl[chip_id].flags; 556 pci_set_drvdata(pdev, dev); 557 np->mii.dev = dev; 558 np->mii.mdio_read = mdio_read; 559 np->mii.mdio_write = mdio_write; 560 np->mii.phy_id_mask = 0x1f; 561 np->mii.reg_num_mask = 0x1f; 562 563 ring_space = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &ring_dma); 564 if (!ring_space) { 565 err = -ENOMEM; 566 goto err_out_free_dev; 567 } 568 np->rx_ring = (struct fealnx_desc *)ring_space; 569 np->rx_ring_dma = ring_dma; 570 571 ring_space = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &ring_dma); 572 if (!ring_space) { 573 err = -ENOMEM; 574 goto err_out_free_rx; 575 } 576 np->tx_ring = (struct fealnx_desc *)ring_space; 577 np->tx_ring_dma = ring_dma; 578 579 /* find the connected MII xcvrs */ 580 if (np->flags == HAS_MII_XCVR) { 581 int phy, phy_idx = 0; 582 583 for (phy = 1; phy < 32 && phy_idx < ARRAY_SIZE(np->phys); 584 phy++) { 585 int mii_status = mdio_read(dev, phy, 1); 586 587 if (mii_status != 0xffff && mii_status != 0x0000) { 588 np->phys[phy_idx++] = phy; 589 dev_info(&pdev->dev, 590 "MII PHY found at address %d, status " 591 "0x%4.4x.\n", phy, mii_status); 592 /* get phy type */ 593 { 594 unsigned int data; 595 596 data = mdio_read(dev, np->phys[0], 2); 597 if (data == SeeqPHYID0) 598 np->PHYType = SeeqPHY; 599 else if (data == AhdocPHYID0) 600 np->PHYType = AhdocPHY; 601 else if (data == MarvellPHYID0) 602 np->PHYType = MarvellPHY; 603 else if (data == MysonPHYID0) 604 np->PHYType = Myson981; 605 else if (data == LevelOnePHYID0) 606 np->PHYType = LevelOnePHY; 607 else 608 np->PHYType = OtherPHY; 609 } 610 } 611 } 612 613 np->mii_cnt = phy_idx; 614 if (phy_idx == 0) 615 dev_warn(&pdev->dev, 616 "MII PHY not found -- this device may " 617 "not operate correctly.\n"); 618 } else { 619 np->phys[0] = 32; 620/* 89/6/23 add, (begin) */ 621 /* get phy type */ 622 if (ioread32(ioaddr + PHYIDENTIFIER) == MysonPHYID) 623 np->PHYType = MysonPHY; 624 else 625 np->PHYType = OtherPHY; 626 } 627 np->mii.phy_id = np->phys[0]; 628 629 if (dev->mem_start) 630 option = dev->mem_start; 631 632 /* The lower four bits are the media type. */ 633 if (option > 0) { 634 if (option & 0x200) 635 np->mii.full_duplex = 1; 636 np->default_port = option & 15; 637 } 638 639 if (card_idx < MAX_UNITS && full_duplex[card_idx] > 0) 640 np->mii.full_duplex = full_duplex[card_idx]; 641 642 if (np->mii.full_duplex) { 643 dev_info(&pdev->dev, "Media type forced to Full Duplex.\n"); 644/* 89/6/13 add, (begin) */ 645// if (np->PHYType==MarvellPHY) 646 if ((np->PHYType == MarvellPHY) || (np->PHYType == LevelOnePHY)) { 647 unsigned int data; 648 649 data = mdio_read(dev, np->phys[0], 9); 650 data = (data & 0xfcff) | 0x0200; 651 mdio_write(dev, np->phys[0], 9, data); 652 } 653/* 89/6/13 add, (end) */ 654 if (np->flags == HAS_MII_XCVR) 655 mdio_write(dev, np->phys[0], MII_ADVERTISE, ADVERTISE_FULL); 656 else 657 iowrite32(ADVERTISE_FULL, ioaddr + ANARANLPAR); 658 np->mii.force_media = 1; 659 } 660 661 dev->netdev_ops = &netdev_ops; 662 dev->ethtool_ops = &netdev_ethtool_ops; 663 dev->watchdog_timeo = TX_TIMEOUT; 664 665 err = register_netdev(dev); 666 if (err) 667 goto err_out_free_tx; 668 669 printk(KERN_INFO "%s: %s at %p, %pM, IRQ %d.\n", 670 dev->name, skel_netdrv_tbl[chip_id].chip_name, ioaddr, 671 dev->dev_addr, irq); 672 673 return 0; 674 675err_out_free_tx: 676 pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma); 677err_out_free_rx: 678 pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma); 679err_out_free_dev: 680 free_netdev(dev); 681err_out_unmap: 682 pci_iounmap(pdev, ioaddr); 683err_out_res: 684 pci_release_regions(pdev); 685 return err; 686} 687 688 689static void __devexit fealnx_remove_one(struct pci_dev *pdev) 690{ 691 struct net_device *dev = pci_get_drvdata(pdev); 692 693 if (dev) { 694 struct netdev_private *np = netdev_priv(dev); 695 696 pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring, 697 np->tx_ring_dma); 698 pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring, 699 np->rx_ring_dma); 700 unregister_netdev(dev); 701 pci_iounmap(pdev, np->mem); 702 free_netdev(dev); 703 pci_release_regions(pdev); 704 pci_set_drvdata(pdev, NULL); 705 } else 706 printk(KERN_ERR "fealnx: remove for unknown device\n"); 707} 708 709 710static ulong m80x_send_cmd_to_phy(void __iomem *miiport, int opcode, int phyad, int regad) 711{ 712 ulong miir; 713 int i; 714 unsigned int mask, data; 715 716 /* enable MII output */ 717 miir = (ulong) ioread32(miiport); 718 miir &= 0xfffffff0; 719 720 miir |= MASK_MIIR_MII_WRITE + MASK_MIIR_MII_MDO; 721 722 /* send 32 1's preamble */ 723 for (i = 0; i < 32; i++) { 724 /* low MDC; MDO is already high (miir) */ 725 miir &= ~MASK_MIIR_MII_MDC; 726 iowrite32(miir, miiport); 727 728 /* high MDC */ 729 miir |= MASK_MIIR_MII_MDC; 730 iowrite32(miir, miiport); 731 } 732 733 /* calculate ST+OP+PHYAD+REGAD+TA */ 734 data = opcode | (phyad << 7) | (regad << 2); 735 736 /* sent out */ 737 mask = 0x8000; 738 while (mask) { 739 /* low MDC, prepare MDO */ 740 miir &= ~(MASK_MIIR_MII_MDC + MASK_MIIR_MII_MDO); 741 if (mask & data) 742 miir |= MASK_MIIR_MII_MDO; 743 744 iowrite32(miir, miiport); 745 /* high MDC */ 746 miir |= MASK_MIIR_MII_MDC; 747 iowrite32(miir, miiport); 748 udelay(30); 749 750 /* next */ 751 mask >>= 1; 752 if (mask == 0x2 && opcode == OP_READ) 753 miir &= ~MASK_MIIR_MII_WRITE; 754 } 755 return miir; 756} 757 758 759static int mdio_read(struct net_device *dev, int phyad, int regad) 760{ 761 struct netdev_private *np = netdev_priv(dev); 762 void __iomem *miiport = np->mem + MANAGEMENT; 763 ulong miir; 764 unsigned int mask, data; 765 766 miir = m80x_send_cmd_to_phy(miiport, OP_READ, phyad, regad); 767 768 /* read data */ 769 mask = 0x8000; 770 data = 0; 771 while (mask) { 772 /* low MDC */ 773 miir &= ~MASK_MIIR_MII_MDC; 774 iowrite32(miir, miiport); 775 776 /* read MDI */ 777 miir = ioread32(miiport); 778 if (miir & MASK_MIIR_MII_MDI) 779 data |= mask; 780 781 /* high MDC, and wait */ 782 miir |= MASK_MIIR_MII_MDC; 783 iowrite32(miir, miiport); 784 udelay(30); 785 786 /* next */ 787 mask >>= 1; 788 } 789 790 /* low MDC */ 791 miir &= ~MASK_MIIR_MII_MDC; 792 iowrite32(miir, miiport); 793 794 return data & 0xffff; 795} 796 797 798static void mdio_write(struct net_device *dev, int phyad, int regad, int data) 799{ 800 struct netdev_private *np = netdev_priv(dev); 801 void __iomem *miiport = np->mem + MANAGEMENT; 802 ulong miir; 803 unsigned int mask; 804 805 miir = m80x_send_cmd_to_phy(miiport, OP_WRITE, phyad, regad); 806 807 /* write data */ 808 mask = 0x8000; 809 while (mask) { 810 /* low MDC, prepare MDO */ 811 miir &= ~(MASK_MIIR_MII_MDC + MASK_MIIR_MII_MDO); 812 if (mask & data) 813 miir |= MASK_MIIR_MII_MDO; 814 iowrite32(miir, miiport); 815 816 /* high MDC */ 817 miir |= MASK_MIIR_MII_MDC; 818 iowrite32(miir, miiport); 819 820 /* next */ 821 mask >>= 1; 822 } 823 824 /* low MDC */ 825 miir &= ~MASK_MIIR_MII_MDC; 826 iowrite32(miir, miiport); 827} 828 829 830static int netdev_open(struct net_device *dev) 831{ 832 struct netdev_private *np = netdev_priv(dev); 833 void __iomem *ioaddr = np->mem; 834 int i; 835 836 iowrite32(0x00000001, ioaddr + BCR); /* Reset */ 837 838 if (request_irq(dev->irq, intr_handler, IRQF_SHARED, dev->name, dev)) 839 return -EAGAIN; 840 841 for (i = 0; i < 3; i++) 842 iowrite16(((unsigned short*)dev->dev_addr)[i], 843 ioaddr + PAR0 + i*2); 844 845 init_ring(dev); 846 847 iowrite32(np->rx_ring_dma, ioaddr + RXLBA); 848 iowrite32(np->tx_ring_dma, ioaddr + TXLBA); 849 850 /* Initialize other registers. */ 851 852 np->bcrvalue = 0x10; /* little-endian, 8 burst length */ 853#ifdef __BIG_ENDIAN 854 np->bcrvalue |= 0x04; /* big-endian */ 855#endif 856 857#if defined(__i386__) && !defined(MODULE) 858 if (boot_cpu_data.x86 <= 4) 859 np->crvalue = 0xa00; 860 else 861#endif 862 np->crvalue = 0xe00; /* rx 128 burst length */ 863 864 865// 89/12/29 add, 866// 90/1/16 modify, 867// np->imrvalue=FBE|TUNF|CNTOVF|RBU|TI|RI; 868 np->imrvalue = TUNF | CNTOVF | RBU | TI | RI; 869 if (np->pci_dev->device == 0x891) { 870 np->bcrvalue |= 0x200; /* set PROG bit */ 871 np->crvalue |= CR_W_ENH; /* set enhanced bit */ 872 np->imrvalue |= ETI; 873 } 874 iowrite32(np->bcrvalue, ioaddr + BCR); 875 876 if (dev->if_port == 0) 877 dev->if_port = np->default_port; 878 879 iowrite32(0, ioaddr + RXPDR); 880// 89/9/1 modify, 881// np->crvalue = 0x00e40001; /* tx store and forward, tx/rx enable */ 882 np->crvalue |= 0x00e40001; /* tx store and forward, tx/rx enable */ 883 np->mii.full_duplex = np->mii.force_media; 884 getlinkstatus(dev); 885 if (np->linkok) 886 getlinktype(dev); 887 __set_rx_mode(dev); 888 889 netif_start_queue(dev); 890 891 /* Clear and Enable interrupts by setting the interrupt mask. */ 892 iowrite32(FBE | TUNF | CNTOVF | RBU | TI | RI, ioaddr + ISR); 893 iowrite32(np->imrvalue, ioaddr + IMR); 894 895 if (debug) 896 printk(KERN_DEBUG "%s: Done netdev_open().\n", dev->name); 897 898 /* Set the timer to check for link beat. */ 899 init_timer(&np->timer); 900 np->timer.expires = RUN_AT(3 * HZ); 901 np->timer.data = (unsigned long) dev; 902 np->timer.function = &netdev_timer; 903 904 /* timer handler */ 905 add_timer(&np->timer); 906 907 init_timer(&np->reset_timer); 908 np->reset_timer.data = (unsigned long) dev; 909 np->reset_timer.function = &reset_timer; 910 np->reset_timer_armed = 0; 911 912 return 0; 913} 914 915 916static void getlinkstatus(struct net_device *dev) 917/* function: Routine will read MII Status Register to get link status. */ 918/* input : dev... pointer to the adapter block. */ 919/* output : none. */ 920{ 921 struct netdev_private *np = netdev_priv(dev); 922 unsigned int i, DelayTime = 0x1000; 923 924 np->linkok = 0; 925 926 if (np->PHYType == MysonPHY) { 927 for (i = 0; i < DelayTime; ++i) { 928 if (ioread32(np->mem + BMCRSR) & LinkIsUp2) { 929 np->linkok = 1; 930 return; 931 } 932 udelay(100); 933 } 934 } else { 935 for (i = 0; i < DelayTime; ++i) { 936 if (mdio_read(dev, np->phys[0], MII_BMSR) & BMSR_LSTATUS) { 937 np->linkok = 1; 938 return; 939 } 940 udelay(100); 941 } 942 } 943} 944 945 946static void getlinktype(struct net_device *dev) 947{ 948 struct netdev_private *np = netdev_priv(dev); 949 950 if (np->PHYType == MysonPHY) { /* 3-in-1 case */ 951 if (ioread32(np->mem + TCRRCR) & CR_R_FD) 952 np->duplexmode = 2; /* full duplex */ 953 else 954 np->duplexmode = 1; /* half duplex */ 955 if (ioread32(np->mem + TCRRCR) & CR_R_PS10) 956 np->line_speed = 1; /* 10M */ 957 else 958 np->line_speed = 2; /* 100M */ 959 } else { 960 if (np->PHYType == SeeqPHY) { /* this PHY is SEEQ 80225 */ 961 unsigned int data; 962 963 data = mdio_read(dev, np->phys[0], MIIRegister18); 964 if (data & SPD_DET_100) 965 np->line_speed = 2; /* 100M */ 966 else 967 np->line_speed = 1; /* 10M */ 968 if (data & DPLX_DET_FULL) 969 np->duplexmode = 2; /* full duplex mode */ 970 else 971 np->duplexmode = 1; /* half duplex mode */ 972 } else if (np->PHYType == AhdocPHY) { 973 unsigned int data; 974 975 data = mdio_read(dev, np->phys[0], DiagnosticReg); 976 if (data & Speed_100) 977 np->line_speed = 2; /* 100M */ 978 else 979 np->line_speed = 1; /* 10M */ 980 if (data & DPLX_FULL) 981 np->duplexmode = 2; /* full duplex mode */ 982 else 983 np->duplexmode = 1; /* half duplex mode */ 984 } 985/* 89/6/13 add, (begin) */ 986 else if (np->PHYType == MarvellPHY) { 987 unsigned int data; 988 989 data = mdio_read(dev, np->phys[0], SpecificReg); 990 if (data & Full_Duplex) 991 np->duplexmode = 2; /* full duplex mode */ 992 else 993 np->duplexmode = 1; /* half duplex mode */ 994 data &= SpeedMask; 995 if (data == Speed_1000M) 996 np->line_speed = 3; /* 1000M */ 997 else if (data == Speed_100M) 998 np->line_speed = 2; /* 100M */ 999 else 1000 np->line_speed = 1; /* 10M */ 1001 } 1002/* 89/6/13 add, (end) */ 1003/* 89/7/27 add, (begin) */ 1004 else if (np->PHYType == Myson981) { 1005 unsigned int data; 1006 1007 data = mdio_read(dev, np->phys[0], StatusRegister); 1008 1009 if (data & SPEED100) 1010 np->line_speed = 2; 1011 else 1012 np->line_speed = 1; 1013 1014 if (data & FULLMODE) 1015 np->duplexmode = 2; 1016 else 1017 np->duplexmode = 1; 1018 } 1019/* 89/7/27 add, (end) */ 1020/* 89/12/29 add */ 1021 else if (np->PHYType == LevelOnePHY) { 1022 unsigned int data; 1023 1024 data = mdio_read(dev, np->phys[0], SpecificReg); 1025 if (data & LXT1000_Full) 1026 np->duplexmode = 2; /* full duplex mode */ 1027 else 1028 np->duplexmode = 1; /* half duplex mode */ 1029 data &= SpeedMask; 1030 if (data == LXT1000_1000M) 1031 np->line_speed = 3; /* 1000M */ 1032 else if (data == LXT1000_100M) 1033 np->line_speed = 2; /* 100M */ 1034 else 1035 np->line_speed = 1; /* 10M */ 1036 } 1037 np->crvalue &= (~CR_W_PS10) & (~CR_W_FD) & (~CR_W_PS1000); 1038 if (np->line_speed == 1) 1039 np->crvalue |= CR_W_PS10; 1040 else if (np->line_speed == 3) 1041 np->crvalue |= CR_W_PS1000; 1042 if (np->duplexmode == 2) 1043 np->crvalue |= CR_W_FD; 1044 } 1045} 1046 1047 1048/* Take lock before calling this */ 1049static void allocate_rx_buffers(struct net_device *dev) 1050{ 1051 struct netdev_private *np = netdev_priv(dev); 1052 1053 /* allocate skb for rx buffers */ 1054 while (np->really_rx_count != RX_RING_SIZE) { 1055 struct sk_buff *skb; 1056 1057 skb = dev_alloc_skb(np->rx_buf_sz); 1058 if (skb == NULL) 1059 break; /* Better luck next round. */ 1060 1061 while (np->lack_rxbuf->skbuff) 1062 np->lack_rxbuf = np->lack_rxbuf->next_desc_logical; 1063 1064 skb->dev = dev; /* Mark as being used by this device. */ 1065 np->lack_rxbuf->skbuff = skb; 1066 np->lack_rxbuf->buffer = pci_map_single(np->pci_dev, skb->data, 1067 np->rx_buf_sz, PCI_DMA_FROMDEVICE); 1068 np->lack_rxbuf->status = RXOWN; 1069 ++np->really_rx_count; 1070 } 1071} 1072 1073 1074static void netdev_timer(unsigned long data) 1075{ 1076 struct net_device *dev = (struct net_device *) data; 1077 struct netdev_private *np = netdev_priv(dev); 1078 void __iomem *ioaddr = np->mem; 1079 int old_crvalue = np->crvalue; 1080 unsigned int old_linkok = np->linkok; 1081 unsigned long flags; 1082 1083 if (debug) 1084 printk(KERN_DEBUG "%s: Media selection timer tick, status %8.8x " 1085 "config %8.8x.\n", dev->name, ioread32(ioaddr + ISR), 1086 ioread32(ioaddr + TCRRCR)); 1087 1088 spin_lock_irqsave(&np->lock, flags); 1089 1090 if (np->flags == HAS_MII_XCVR) { 1091 getlinkstatus(dev); 1092 if ((old_linkok == 0) && (np->linkok == 1)) { /* we need to detect the media type again */ 1093 getlinktype(dev); 1094 if (np->crvalue != old_crvalue) { 1095 stop_nic_rxtx(ioaddr, np->crvalue); 1096 iowrite32(np->crvalue, ioaddr + TCRRCR); 1097 } 1098 } 1099 } 1100 1101 allocate_rx_buffers(dev); 1102 1103 spin_unlock_irqrestore(&np->lock, flags); 1104 1105 np->timer.expires = RUN_AT(10 * HZ); 1106 add_timer(&np->timer); 1107} 1108 1109 1110/* Take lock before calling */ 1111/* Reset chip and disable rx, tx and interrupts */ 1112static void reset_and_disable_rxtx(struct net_device *dev) 1113{ 1114 struct netdev_private *np = netdev_priv(dev); 1115 void __iomem *ioaddr = np->mem; 1116 int delay=51; 1117 1118 /* Reset the chip's Tx and Rx processes. */ 1119 stop_nic_rxtx(ioaddr, 0); 1120 1121 /* Disable interrupts by clearing the interrupt mask. */ 1122 iowrite32(0, ioaddr + IMR); 1123 1124 /* Reset the chip to erase previous misconfiguration. */ 1125 iowrite32(0x00000001, ioaddr + BCR); 1126 1127 /* Ueimor: wait for 50 PCI cycles (and flush posted writes btw). 1128 We surely wait too long (address+data phase). Who cares? */ 1129 while (--delay) { 1130 ioread32(ioaddr + BCR); 1131 rmb(); 1132 } 1133} 1134 1135 1136/* Take lock before calling */ 1137/* Restore chip after reset */ 1138static void enable_rxtx(struct net_device *dev) 1139{ 1140 struct netdev_private *np = netdev_priv(dev); 1141 void __iomem *ioaddr = np->mem; 1142 1143 reset_rx_descriptors(dev); 1144 1145 iowrite32(np->tx_ring_dma + ((char*)np->cur_tx - (char*)np->tx_ring), 1146 ioaddr + TXLBA); 1147 iowrite32(np->rx_ring_dma + ((char*)np->cur_rx - (char*)np->rx_ring), 1148 ioaddr + RXLBA); 1149 1150 iowrite32(np->bcrvalue, ioaddr + BCR); 1151 1152 iowrite32(0, ioaddr + RXPDR); 1153 __set_rx_mode(dev); /* changes np->crvalue, writes it into TCRRCR */ 1154 1155 /* Clear and Enable interrupts by setting the interrupt mask. */ 1156 iowrite32(FBE | TUNF | CNTOVF | RBU | TI | RI, ioaddr + ISR); 1157 iowrite32(np->imrvalue, ioaddr + IMR); 1158 1159 iowrite32(0, ioaddr + TXPDR); 1160} 1161 1162 1163static void reset_timer(unsigned long data) 1164{ 1165 struct net_device *dev = (struct net_device *) data; 1166 struct netdev_private *np = netdev_priv(dev); 1167 unsigned long flags; 1168 1169 printk(KERN_WARNING "%s: resetting tx and rx machinery\n", dev->name); 1170 1171 spin_lock_irqsave(&np->lock, flags); 1172 np->crvalue = np->crvalue_sv; 1173 np->imrvalue = np->imrvalue_sv; 1174 1175 reset_and_disable_rxtx(dev); 1176 /* works for me without this: 1177 reset_tx_descriptors(dev); */ 1178 enable_rxtx(dev); 1179 netif_start_queue(dev); 1180 1181 np->reset_timer_armed = 0; 1182 1183 spin_unlock_irqrestore(&np->lock, flags); 1184} 1185 1186 1187static void fealnx_tx_timeout(struct net_device *dev) 1188{ 1189 struct netdev_private *np = netdev_priv(dev); 1190 void __iomem *ioaddr = np->mem; 1191 unsigned long flags; 1192 int i; 1193 1194 printk(KERN_WARNING 1195 "%s: Transmit timed out, status %8.8x, resetting...\n", 1196 dev->name, ioread32(ioaddr + ISR)); 1197 1198 { 1199 printk(KERN_DEBUG " Rx ring %p: ", np->rx_ring); 1200 for (i = 0; i < RX_RING_SIZE; i++) 1201 printk(KERN_CONT " %8.8x", 1202 (unsigned int) np->rx_ring[i].status); 1203 printk(KERN_CONT "\n"); 1204 printk(KERN_DEBUG " Tx ring %p: ", np->tx_ring); 1205 for (i = 0; i < TX_RING_SIZE; i++) 1206 printk(KERN_CONT " %4.4x", np->tx_ring[i].status); 1207 printk(KERN_CONT "\n"); 1208 } 1209 1210 spin_lock_irqsave(&np->lock, flags); 1211 1212 reset_and_disable_rxtx(dev); 1213 reset_tx_descriptors(dev); 1214 enable_rxtx(dev); 1215 1216 spin_unlock_irqrestore(&np->lock, flags); 1217 1218 dev->trans_start = jiffies; /* prevent tx timeout */ 1219 dev->stats.tx_errors++; 1220 netif_wake_queue(dev); /* or .._start_.. ?? */ 1221} 1222 1223 1224/* Initialize the Rx and Tx rings, along with various 'dev' bits. */ 1225static void init_ring(struct net_device *dev) 1226{ 1227 struct netdev_private *np = netdev_priv(dev); 1228 int i; 1229 1230 /* initialize rx variables */ 1231 np->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32); 1232 np->cur_rx = &np->rx_ring[0]; 1233 np->lack_rxbuf = np->rx_ring; 1234 np->really_rx_count = 0; 1235 1236 /* initial rx descriptors. */ 1237 for (i = 0; i < RX_RING_SIZE; i++) { 1238 np->rx_ring[i].status = 0; 1239 np->rx_ring[i].control = np->rx_buf_sz << RBSShift; 1240 np->rx_ring[i].next_desc = np->rx_ring_dma + 1241 (i + 1)*sizeof(struct fealnx_desc); 1242 np->rx_ring[i].next_desc_logical = &np->rx_ring[i + 1]; 1243 np->rx_ring[i].skbuff = NULL; 1244 } 1245 1246 /* for the last rx descriptor */ 1247 np->rx_ring[i - 1].next_desc = np->rx_ring_dma; 1248 np->rx_ring[i - 1].next_desc_logical = np->rx_ring; 1249 1250 /* allocate skb for rx buffers */ 1251 for (i = 0; i < RX_RING_SIZE; i++) { 1252 struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz); 1253 1254 if (skb == NULL) { 1255 np->lack_rxbuf = &np->rx_ring[i]; 1256 break; 1257 } 1258 1259 ++np->really_rx_count; 1260 np->rx_ring[i].skbuff = skb; 1261 skb->dev = dev; /* Mark as being used by this device. */ 1262 np->rx_ring[i].buffer = pci_map_single(np->pci_dev, skb->data, 1263 np->rx_buf_sz, PCI_DMA_FROMDEVICE); 1264 np->rx_ring[i].status = RXOWN; 1265 np->rx_ring[i].control |= RXIC; 1266 } 1267 1268 /* initialize tx variables */ 1269 np->cur_tx = &np->tx_ring[0]; 1270 np->cur_tx_copy = &np->tx_ring[0]; 1271 np->really_tx_count = 0; 1272 np->free_tx_count = TX_RING_SIZE; 1273 1274 for (i = 0; i < TX_RING_SIZE; i++) { 1275 np->tx_ring[i].status = 0; 1276 np->tx_ring[i].next_desc = np->tx_ring_dma + 1277 (i + 1)*sizeof(struct fealnx_desc); 1278 np->tx_ring[i].next_desc_logical = &np->tx_ring[i + 1]; 1279 np->tx_ring[i].skbuff = NULL; 1280 } 1281 1282 /* for the last tx descriptor */ 1283 np->tx_ring[i - 1].next_desc = np->tx_ring_dma; 1284 np->tx_ring[i - 1].next_desc_logical = &np->tx_ring[0]; 1285} 1286 1287 1288static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev) 1289{ 1290 struct netdev_private *np = netdev_priv(dev); 1291 unsigned long flags; 1292 1293 spin_lock_irqsave(&np->lock, flags); 1294 1295 np->cur_tx_copy->skbuff = skb; 1296 1297#define one_buffer 1298#define BPT 1022 1299#if defined(one_buffer) 1300 np->cur_tx_copy->buffer = pci_map_single(np->pci_dev, skb->data, 1301 skb->len, PCI_DMA_TODEVICE); 1302 np->cur_tx_copy->control = TXIC | TXLD | TXFD | CRCEnable | PADEnable; 1303 np->cur_tx_copy->control |= (skb->len << PKTSShift); /* pkt size */ 1304 np->cur_tx_copy->control |= (skb->len << TBSShift); /* buffer size */ 1305// 89/12/29 add, 1306 if (np->pci_dev->device == 0x891) 1307 np->cur_tx_copy->control |= ETIControl | RetryTxLC; 1308 np->cur_tx_copy->status = TXOWN; 1309 np->cur_tx_copy = np->cur_tx_copy->next_desc_logical; 1310 --np->free_tx_count; 1311#elif defined(two_buffer) 1312 if (skb->len > BPT) { 1313 struct fealnx_desc *next; 1314 1315 /* for the first descriptor */ 1316 np->cur_tx_copy->buffer = pci_map_single(np->pci_dev, skb->data, 1317 BPT, PCI_DMA_TODEVICE); 1318 np->cur_tx_copy->control = TXIC | TXFD | CRCEnable | PADEnable; 1319 np->cur_tx_copy->control |= (skb->len << PKTSShift); /* pkt size */ 1320 np->cur_tx_copy->control |= (BPT << TBSShift); /* buffer size */ 1321 1322 /* for the last descriptor */ 1323 next = np->cur_tx_copy->next_desc_logical; 1324 next->skbuff = skb; 1325 next->control = TXIC | TXLD | CRCEnable | PADEnable; 1326 next->control |= (skb->len << PKTSShift); /* pkt size */ 1327 next->control |= ((skb->len - BPT) << TBSShift); /* buf size */ 1328// 89/12/29 add, 1329 if (np->pci_dev->device == 0x891) 1330 np->cur_tx_copy->control |= ETIControl | RetryTxLC; 1331 next->buffer = pci_map_single(ep->pci_dev, skb->data + BPT, 1332 skb->len - BPT, PCI_DMA_TODEVICE); 1333 1334 next->status = TXOWN; 1335 np->cur_tx_copy->status = TXOWN; 1336 1337 np->cur_tx_copy = next->next_desc_logical; 1338 np->free_tx_count -= 2; 1339 } else { 1340 np->cur_tx_copy->buffer = pci_map_single(np->pci_dev, skb->data, 1341 skb->len, PCI_DMA_TODEVICE); 1342 np->cur_tx_copy->control = TXIC | TXLD | TXFD | CRCEnable | PADEnable; 1343 np->cur_tx_copy->control |= (skb->len << PKTSShift); /* pkt size */ 1344 np->cur_tx_copy->control |= (skb->len << TBSShift); /* buffer size */ 1345// 89/12/29 add, 1346 if (np->pci_dev->device == 0x891) 1347 np->cur_tx_copy->control |= ETIControl | RetryTxLC; 1348 np->cur_tx_copy->status = TXOWN; 1349 np->cur_tx_copy = np->cur_tx_copy->next_desc_logical; 1350 --np->free_tx_count; 1351 } 1352#endif 1353 1354 if (np->free_tx_count < 2) 1355 netif_stop_queue(dev); 1356 ++np->really_tx_count; 1357 iowrite32(0, np->mem + TXPDR); 1358 1359 spin_unlock_irqrestore(&np->lock, flags); 1360 return NETDEV_TX_OK; 1361} 1362 1363 1364/* Take lock before calling */ 1365/* Chip probably hosed tx ring. Clean up. */ 1366static void reset_tx_descriptors(struct net_device *dev) 1367{ 1368 struct netdev_private *np = netdev_priv(dev); 1369 struct fealnx_desc *cur; 1370 int i; 1371 1372 /* initialize tx variables */ 1373 np->cur_tx = &np->tx_ring[0]; 1374 np->cur_tx_copy = &np->tx_ring[0]; 1375 np->really_tx_count = 0; 1376 np->free_tx_count = TX_RING_SIZE; 1377 1378 for (i = 0; i < TX_RING_SIZE; i++) { 1379 cur = &np->tx_ring[i]; 1380 if (cur->skbuff) { 1381 pci_unmap_single(np->pci_dev, cur->buffer, 1382 cur->skbuff->len, PCI_DMA_TODEVICE); 1383 dev_kfree_skb_any(cur->skbuff); 1384 cur->skbuff = NULL; 1385 } 1386 cur->status = 0; 1387 cur->control = 0; /* needed? */ 1388 /* probably not needed. We do it for purely paranoid reasons */ 1389 cur->next_desc = np->tx_ring_dma + 1390 (i + 1)*sizeof(struct fealnx_desc); 1391 cur->next_desc_logical = &np->tx_ring[i + 1]; 1392 } 1393 /* for the last tx descriptor */ 1394 np->tx_ring[TX_RING_SIZE - 1].next_desc = np->tx_ring_dma; 1395 np->tx_ring[TX_RING_SIZE - 1].next_desc_logical = &np->tx_ring[0]; 1396} 1397 1398 1399/* Take lock and stop rx before calling this */ 1400static void reset_rx_descriptors(struct net_device *dev) 1401{ 1402 struct netdev_private *np = netdev_priv(dev); 1403 struct fealnx_desc *cur = np->cur_rx; 1404 int i; 1405 1406 allocate_rx_buffers(dev); 1407 1408 for (i = 0; i < RX_RING_SIZE; i++) { 1409 if (cur->skbuff) 1410 cur->status = RXOWN; 1411 cur = cur->next_desc_logical; 1412 } 1413 1414 iowrite32(np->rx_ring_dma + ((char*)np->cur_rx - (char*)np->rx_ring), 1415 np->mem + RXLBA); 1416} 1417 1418 1419/* The interrupt handler does all of the Rx thread work and cleans up 1420 after the Tx thread. */ 1421static irqreturn_t intr_handler(int irq, void *dev_instance) 1422{ 1423 struct net_device *dev = (struct net_device *) dev_instance; 1424 struct netdev_private *np = netdev_priv(dev); 1425 void __iomem *ioaddr = np->mem; 1426 long boguscnt = max_interrupt_work; 1427 unsigned int num_tx = 0; 1428 int handled = 0; 1429 1430 spin_lock(&np->lock); 1431 1432 iowrite32(0, ioaddr + IMR); 1433 1434 do { 1435 u32 intr_status = ioread32(ioaddr + ISR); 1436 1437 /* Acknowledge all of the current interrupt sources ASAP. */ 1438 iowrite32(intr_status, ioaddr + ISR); 1439 1440 if (debug) 1441 printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n", dev->name, 1442 intr_status); 1443 1444 if (!(intr_status & np->imrvalue)) 1445 break; 1446 1447 handled = 1; 1448 1449// 90/1/16 delete, 1450// 1451// if (intr_status & FBE) 1452// { /* fatal error */ 1453// stop_nic_tx(ioaddr, 0); 1454// stop_nic_rx(ioaddr, 0); 1455// break; 1456// }; 1457 1458 if (intr_status & TUNF) 1459 iowrite32(0, ioaddr + TXPDR); 1460 1461 if (intr_status & CNTOVF) { 1462 /* missed pkts */ 1463 dev->stats.rx_missed_errors += 1464 ioread32(ioaddr + TALLY) & 0x7fff; 1465 1466 /* crc error */ 1467 dev->stats.rx_crc_errors += 1468 (ioread32(ioaddr + TALLY) & 0x7fff0000) >> 16; 1469 } 1470 1471 if (intr_status & (RI | RBU)) { 1472 if (intr_status & RI) 1473 netdev_rx(dev); 1474 else { 1475 stop_nic_rx(ioaddr, np->crvalue); 1476 reset_rx_descriptors(dev); 1477 iowrite32(np->crvalue, ioaddr + TCRRCR); 1478 } 1479 } 1480 1481 while (np->really_tx_count) { 1482 long tx_status = np->cur_tx->status; 1483 long tx_control = np->cur_tx->control; 1484 1485 if (!(tx_control & TXLD)) { /* this pkt is combined by two tx descriptors */ 1486 struct fealnx_desc *next; 1487 1488 next = np->cur_tx->next_desc_logical; 1489 tx_status = next->status; 1490 tx_control = next->control; 1491 } 1492 1493 if (tx_status & TXOWN) 1494 break; 1495 1496 if (!(np->crvalue & CR_W_ENH)) { 1497 if (tx_status & (CSL | LC | EC | UDF | HF)) { 1498 dev->stats.tx_errors++; 1499 if (tx_status & EC) 1500 dev->stats.tx_aborted_errors++; 1501 if (tx_status & CSL) 1502 dev->stats.tx_carrier_errors++; 1503 if (tx_status & LC) 1504 dev->stats.tx_window_errors++; 1505 if (tx_status & UDF) 1506 dev->stats.tx_fifo_errors++; 1507 if ((tx_status & HF) && np->mii.full_duplex == 0) 1508 dev->stats.tx_heartbeat_errors++; 1509 1510 } else { 1511 dev->stats.tx_bytes += 1512 ((tx_control & PKTSMask) >> PKTSShift); 1513 1514 dev->stats.collisions += 1515 ((tx_status & NCRMask) >> NCRShift); 1516 dev->stats.tx_packets++; 1517 } 1518 } else { 1519 dev->stats.tx_bytes += 1520 ((tx_control & PKTSMask) >> PKTSShift); 1521 dev->stats.tx_packets++; 1522 } 1523 1524 /* Free the original skb. */ 1525 pci_unmap_single(np->pci_dev, np->cur_tx->buffer, 1526 np->cur_tx->skbuff->len, PCI_DMA_TODEVICE); 1527 dev_kfree_skb_irq(np->cur_tx->skbuff); 1528 np->cur_tx->skbuff = NULL; 1529 --np->really_tx_count; 1530 if (np->cur_tx->control & TXLD) { 1531 np->cur_tx = np->cur_tx->next_desc_logical; 1532 ++np->free_tx_count; 1533 } else { 1534 np->cur_tx = np->cur_tx->next_desc_logical; 1535 np->cur_tx = np->cur_tx->next_desc_logical; 1536 np->free_tx_count += 2; 1537 } 1538 num_tx++; 1539 } /* end of for loop */ 1540 1541 if (num_tx && np->free_tx_count >= 2) 1542 netif_wake_queue(dev); 1543 1544 /* read transmit status for enhanced mode only */ 1545 if (np->crvalue & CR_W_ENH) { 1546 long data; 1547 1548 data = ioread32(ioaddr + TSR); 1549 dev->stats.tx_errors += (data & 0xff000000) >> 24; 1550 dev->stats.tx_aborted_errors += 1551 (data & 0xff000000) >> 24; 1552 dev->stats.tx_window_errors += 1553 (data & 0x00ff0000) >> 16; 1554 dev->stats.collisions += (data & 0x0000ffff); 1555 } 1556 1557 if (--boguscnt < 0) { 1558 printk(KERN_WARNING "%s: Too much work at interrupt, " 1559 "status=0x%4.4x.\n", dev->name, intr_status); 1560 if (!np->reset_timer_armed) { 1561 np->reset_timer_armed = 1; 1562 np->reset_timer.expires = RUN_AT(HZ/2); 1563 add_timer(&np->reset_timer); 1564 stop_nic_rxtx(ioaddr, 0); 1565 netif_stop_queue(dev); 1566 /* or netif_tx_disable(dev); ?? */ 1567 /* Prevent other paths from enabling tx,rx,intrs */ 1568 np->crvalue_sv = np->crvalue; 1569 np->imrvalue_sv = np->imrvalue; 1570 np->crvalue &= ~(CR_W_TXEN | CR_W_RXEN); /* or simply = 0? */ 1571 np->imrvalue = 0; 1572 } 1573 1574 break; 1575 } 1576 } while (1); 1577 1578 /* read the tally counters */ 1579 /* missed pkts */ 1580 dev->stats.rx_missed_errors += ioread32(ioaddr + TALLY) & 0x7fff; 1581 1582 /* crc error */ 1583 dev->stats.rx_crc_errors += 1584 (ioread32(ioaddr + TALLY) & 0x7fff0000) >> 16; 1585 1586 if (debug) 1587 printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n", 1588 dev->name, ioread32(ioaddr + ISR)); 1589 1590 iowrite32(np->imrvalue, ioaddr + IMR); 1591 1592 spin_unlock(&np->lock); 1593 1594 return IRQ_RETVAL(handled); 1595} 1596 1597 1598/* This routine is logically part of the interrupt handler, but separated 1599 for clarity and better register allocation. */ 1600static int netdev_rx(struct net_device *dev) 1601{ 1602 struct netdev_private *np = netdev_priv(dev); 1603 void __iomem *ioaddr = np->mem; 1604 1605 /* If EOP is set on the next entry, it's a new packet. Send it up. */ 1606 while (!(np->cur_rx->status & RXOWN) && np->cur_rx->skbuff) { 1607 s32 rx_status = np->cur_rx->status; 1608 1609 if (np->really_rx_count == 0) 1610 break; 1611 1612 if (debug) 1613 printk(KERN_DEBUG " netdev_rx() status was %8.8x.\n", rx_status); 1614 1615 if ((!((rx_status & RXFSD) && (rx_status & RXLSD))) || 1616 (rx_status & ErrorSummary)) { 1617 if (rx_status & ErrorSummary) { /* there was a fatal error */ 1618 if (debug) 1619 printk(KERN_DEBUG 1620 "%s: Receive error, Rx status %8.8x.\n", 1621 dev->name, rx_status); 1622 1623 dev->stats.rx_errors++; /* end of a packet. */ 1624 if (rx_status & (LONG | RUNT)) 1625 dev->stats.rx_length_errors++; 1626 if (rx_status & RXER) 1627 dev->stats.rx_frame_errors++; 1628 if (rx_status & CRC) 1629 dev->stats.rx_crc_errors++; 1630 } else { 1631 int need_to_reset = 0; 1632 int desno = 0; 1633 1634 if (rx_status & RXFSD) { /* this pkt is too long, over one rx buffer */ 1635 struct fealnx_desc *cur; 1636 1637 /* check this packet is received completely? */ 1638 cur = np->cur_rx; 1639 while (desno <= np->really_rx_count) { 1640 ++desno; 1641 if ((!(cur->status & RXOWN)) && 1642 (cur->status & RXLSD)) 1643 break; 1644 /* goto next rx descriptor */ 1645 cur = cur->next_desc_logical; 1646 } 1647 if (desno > np->really_rx_count) 1648 need_to_reset = 1; 1649 } else /* RXLSD did not find, something error */ 1650 need_to_reset = 1; 1651 1652 if (need_to_reset == 0) { 1653 int i; 1654 1655 dev->stats.rx_length_errors++; 1656 1657 /* free all rx descriptors related this long pkt */ 1658 for (i = 0; i < desno; ++i) { 1659 if (!np->cur_rx->skbuff) { 1660 printk(KERN_DEBUG 1661 "%s: I'm scared\n", dev->name); 1662 break; 1663 } 1664 np->cur_rx->status = RXOWN; 1665 np->cur_rx = np->cur_rx->next_desc_logical; 1666 } 1667 continue; 1668 } else { /* rx error, need to reset this chip */ 1669 stop_nic_rx(ioaddr, np->crvalue); 1670 reset_rx_descriptors(dev); 1671 iowrite32(np->crvalue, ioaddr + TCRRCR); 1672 } 1673 break; /* exit the while loop */ 1674 } 1675 } else { /* this received pkt is ok */ 1676 1677 struct sk_buff *skb; 1678 /* Omit the four octet CRC from the length. */ 1679 short pkt_len = ((rx_status & FLNGMASK) >> FLNGShift) - 4; 1680 1681#ifndef final_version 1682 if (debug) 1683 printk(KERN_DEBUG " netdev_rx() normal Rx pkt length %d" 1684 " status %x.\n", pkt_len, rx_status); 1685#endif 1686 1687 /* Check if the packet is long enough to accept without copying 1688 to a minimally-sized skbuff. */ 1689 if (pkt_len < rx_copybreak && 1690 (skb = dev_alloc_skb(pkt_len + 2)) != NULL) { 1691 skb_reserve(skb, 2); /* 16 byte align the IP header */ 1692 pci_dma_sync_single_for_cpu(np->pci_dev, 1693 np->cur_rx->buffer, 1694 np->rx_buf_sz, 1695 PCI_DMA_FROMDEVICE); 1696 /* Call copy + cksum if available. */ 1697 1698#if !defined(__alpha__) 1699 skb_copy_to_linear_data(skb, 1700 np->cur_rx->skbuff->data, pkt_len); 1701 skb_put(skb, pkt_len); 1702#else 1703 memcpy(skb_put(skb, pkt_len), 1704 np->cur_rx->skbuff->data, pkt_len); 1705#endif 1706 pci_dma_sync_single_for_device(np->pci_dev, 1707 np->cur_rx->buffer, 1708 np->rx_buf_sz, 1709 PCI_DMA_FROMDEVICE); 1710 } else { 1711 pci_unmap_single(np->pci_dev, 1712 np->cur_rx->buffer, 1713 np->rx_buf_sz, 1714 PCI_DMA_FROMDEVICE); 1715 skb_put(skb = np->cur_rx->skbuff, pkt_len); 1716 np->cur_rx->skbuff = NULL; 1717 --np->really_rx_count; 1718 } 1719 skb->protocol = eth_type_trans(skb, dev); 1720 netif_rx(skb); 1721 dev->stats.rx_packets++; 1722 dev->stats.rx_bytes += pkt_len; 1723 } 1724 1725 np->cur_rx = np->cur_rx->next_desc_logical; 1726 } /* end of while loop */ 1727 1728 /* allocate skb for rx buffers */ 1729 allocate_rx_buffers(dev); 1730 1731 return 0; 1732} 1733 1734 1735static struct net_device_stats *get_stats(struct net_device *dev) 1736{ 1737 struct netdev_private *np = netdev_priv(dev); 1738 void __iomem *ioaddr = np->mem; 1739 1740 /* The chip only need report frame silently dropped. */ 1741 if (netif_running(dev)) { 1742 dev->stats.rx_missed_errors += 1743 ioread32(ioaddr + TALLY) & 0x7fff; 1744 dev->stats.rx_crc_errors += 1745 (ioread32(ioaddr + TALLY) & 0x7fff0000) >> 16; 1746 } 1747 1748 return &dev->stats; 1749} 1750 1751 1752/* for dev->set_multicast_list */ 1753static void set_rx_mode(struct net_device *dev) 1754{ 1755 spinlock_t *lp = &((struct netdev_private *)netdev_priv(dev))->lock; 1756 unsigned long flags; 1757 spin_lock_irqsave(lp, flags); 1758 __set_rx_mode(dev); 1759 spin_unlock_irqrestore(lp, flags); 1760} 1761 1762 1763/* Take lock before calling */ 1764static void __set_rx_mode(struct net_device *dev) 1765{ 1766 struct netdev_private *np = netdev_priv(dev); 1767 void __iomem *ioaddr = np->mem; 1768 u32 mc_filter[2]; /* Multicast hash filter */ 1769 u32 rx_mode; 1770 1771 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */ 1772 memset(mc_filter, 0xff, sizeof(mc_filter)); 1773 rx_mode = CR_W_PROM | CR_W_AB | CR_W_AM; 1774 } else if ((netdev_mc_count(dev) > multicast_filter_limit) || 1775 (dev->flags & IFF_ALLMULTI)) { 1776 /* Too many to match, or accept all multicasts. */ 1777 memset(mc_filter, 0xff, sizeof(mc_filter)); 1778 rx_mode = CR_W_AB | CR_W_AM; 1779 } else { 1780 struct netdev_hw_addr *ha; 1781 1782 memset(mc_filter, 0, sizeof(mc_filter)); 1783 netdev_for_each_mc_addr(ha, dev) { 1784 unsigned int bit; 1785 bit = (ether_crc(ETH_ALEN, ha->addr) >> 26) ^ 0x3F; 1786 mc_filter[bit >> 5] |= (1 << bit); 1787 } 1788 rx_mode = CR_W_AB | CR_W_AM; 1789 } 1790 1791 stop_nic_rxtx(ioaddr, np->crvalue); 1792 1793 iowrite32(mc_filter[0], ioaddr + MAR0); 1794 iowrite32(mc_filter[1], ioaddr + MAR1); 1795 np->crvalue &= ~CR_W_RXMODEMASK; 1796 np->crvalue |= rx_mode; 1797 iowrite32(np->crvalue, ioaddr + TCRRCR); 1798} 1799 1800static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) 1801{ 1802 struct netdev_private *np = netdev_priv(dev); 1803 1804 strcpy(info->driver, DRV_NAME); 1805 strcpy(info->version, DRV_VERSION); 1806 strcpy(info->bus_info, pci_name(np->pci_dev)); 1807} 1808 1809static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 1810{ 1811 struct netdev_private *np = netdev_priv(dev); 1812 int rc; 1813 1814 spin_lock_irq(&np->lock); 1815 rc = mii_ethtool_gset(&np->mii, cmd); 1816 spin_unlock_irq(&np->lock); 1817 1818 return rc; 1819} 1820 1821static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 1822{ 1823 struct netdev_private *np = netdev_priv(dev); 1824 int rc; 1825 1826 spin_lock_irq(&np->lock); 1827 rc = mii_ethtool_sset(&np->mii, cmd); 1828 spin_unlock_irq(&np->lock); 1829 1830 return rc; 1831} 1832 1833static int netdev_nway_reset(struct net_device *dev) 1834{ 1835 struct netdev_private *np = netdev_priv(dev); 1836 return mii_nway_restart(&np->mii); 1837} 1838 1839static u32 netdev_get_link(struct net_device *dev) 1840{ 1841 struct netdev_private *np = netdev_priv(dev); 1842 return mii_link_ok(&np->mii); 1843} 1844 1845static u32 netdev_get_msglevel(struct net_device *dev) 1846{ 1847 return debug; 1848} 1849 1850static void netdev_set_msglevel(struct net_device *dev, u32 value) 1851{ 1852 debug = value; 1853} 1854 1855static const struct ethtool_ops netdev_ethtool_ops = { 1856 .get_drvinfo = netdev_get_drvinfo, 1857 .get_settings = netdev_get_settings, 1858 .set_settings = netdev_set_settings, 1859 .nway_reset = netdev_nway_reset, 1860 .get_link = netdev_get_link, 1861 .get_msglevel = netdev_get_msglevel, 1862 .set_msglevel = netdev_set_msglevel, 1863}; 1864 1865static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 1866{ 1867 struct netdev_private *np = netdev_priv(dev); 1868 int rc; 1869 1870 if (!netif_running(dev)) 1871 return -EINVAL; 1872 1873 spin_lock_irq(&np->lock); 1874 rc = generic_mii_ioctl(&np->mii, if_mii(rq), cmd, NULL); 1875 spin_unlock_irq(&np->lock); 1876 1877 return rc; 1878} 1879 1880 1881static int netdev_close(struct net_device *dev) 1882{ 1883 struct netdev_private *np = netdev_priv(dev); 1884 void __iomem *ioaddr = np->mem; 1885 int i; 1886 1887 netif_stop_queue(dev); 1888 1889 /* Disable interrupts by clearing the interrupt mask. */ 1890 iowrite32(0x0000, ioaddr + IMR); 1891 1892 /* Stop the chip's Tx and Rx processes. */ 1893 stop_nic_rxtx(ioaddr, 0); 1894 1895 del_timer_sync(&np->timer); 1896 del_timer_sync(&np->reset_timer); 1897 1898 free_irq(dev->irq, dev); 1899 1900 /* Free all the skbuffs in the Rx queue. */ 1901 for (i = 0; i < RX_RING_SIZE; i++) { 1902 struct sk_buff *skb = np->rx_ring[i].skbuff; 1903 1904 np->rx_ring[i].status = 0; 1905 if (skb) { 1906 pci_unmap_single(np->pci_dev, np->rx_ring[i].buffer, 1907 np->rx_buf_sz, PCI_DMA_FROMDEVICE); 1908 dev_kfree_skb(skb); 1909 np->rx_ring[i].skbuff = NULL; 1910 } 1911 } 1912 1913 for (i = 0; i < TX_RING_SIZE; i++) { 1914 struct sk_buff *skb = np->tx_ring[i].skbuff; 1915 1916 if (skb) { 1917 pci_unmap_single(np->pci_dev, np->tx_ring[i].buffer, 1918 skb->len, PCI_DMA_TODEVICE); 1919 dev_kfree_skb(skb); 1920 np->tx_ring[i].skbuff = NULL; 1921 } 1922 } 1923 1924 return 0; 1925} 1926 1927static DEFINE_PCI_DEVICE_TABLE(fealnx_pci_tbl) = { 1928 {0x1516, 0x0800, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, 1929 {0x1516, 0x0803, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1}, 1930 {0x1516, 0x0891, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2}, 1931 {} /* terminate list */ 1932}; 1933MODULE_DEVICE_TABLE(pci, fealnx_pci_tbl); 1934 1935 1936static struct pci_driver fealnx_driver = { 1937 .name = "fealnx", 1938 .id_table = fealnx_pci_tbl, 1939 .probe = fealnx_init_one, 1940 .remove = __devexit_p(fealnx_remove_one), 1941}; 1942 1943static int __init fealnx_init(void) 1944{ 1945/* when a module, this is printed whether or not devices are found in probe */ 1946#ifdef MODULE 1947 printk(version); 1948#endif 1949 1950 return pci_register_driver(&fealnx_driver); 1951} 1952 1953static void __exit fealnx_exit(void) 1954{ 1955 pci_unregister_driver(&fealnx_driver); 1956} 1957 1958module_init(fealnx_init); 1959module_exit(fealnx_exit); 1960