1/* natsemi.c: A Linux PCI Ethernet driver for the NatSemi DP8381x series. */ 2/* 3 Written/copyright 1999-2001 by Donald Becker. 4 Portions copyright (c) 2001,2002 Sun Microsystems (thockin@sun.com) 5 Portions copyright 2001,2002 Manfred Spraul (manfred@colorfullife.com) 6 Portions copyright 2004 Harald Welte <laforge@gnumonks.org> 7 8 This software may be used and distributed according to the terms of 9 the GNU General Public License (GPL), incorporated herein by reference. 10 Drivers based on or derived from this code fall under the GPL and must 11 retain the authorship, copyright and license notice. This file is not 12 a complete program and may only be used when the entire operating 13 system is licensed under the GPL. License for under other terms may be 14 available. Contact the original author for details. 15 16 The original author may be reached as becker@scyld.com, or at 17 Scyld Computing Corporation 18 410 Severn Ave., Suite 210 19 Annapolis MD 21403 20 21 Support information and updates available at 22 http://www.scyld.com/network/netsemi.html 23 [link no longer provides useful info -jgarzik] 24 25 26 TODO: 27 * big endian support with CFG:BEM instead of cpu_to_le32 28*/ 29 30#include <linux/module.h> 31#include <linux/kernel.h> 32#include <linux/string.h> 33#include <linux/timer.h> 34#include <linux/errno.h> 35#include <linux/ioport.h> 36#include <linux/slab.h> 37#include <linux/interrupt.h> 38#include <linux/pci.h> 39#include <linux/netdevice.h> 40#include <linux/etherdevice.h> 41#include <linux/skbuff.h> 42#include <linux/init.h> 43#include <linux/spinlock.h> 44#include <linux/ethtool.h> 45#include <linux/delay.h> 46#include <linux/rtnetlink.h> 47#include <linux/mii.h> 48#include <linux/crc32.h> 49#include <linux/bitops.h> 50#include <linux/prefetch.h> 51#include <asm/processor.h> /* Processor type for cache alignment. */ 52#include <asm/io.h> 53#include <asm/irq.h> 54#include <asm/uaccess.h> 55 56#define DRV_NAME "natsemi" 57#define DRV_VERSION "2.1" 58#define DRV_RELDATE "Sept 11, 2006" 59 60#define RX_OFFSET 2 61 62/* Updated to recommendations in pci-skeleton v2.03. */ 63 64/* The user-configurable values. 65 These may be modified when a driver module is loaded.*/ 66 67#define NATSEMI_DEF_MSG (NETIF_MSG_DRV | \ 68 NETIF_MSG_LINK | \ 69 NETIF_MSG_WOL | \ 70 NETIF_MSG_RX_ERR | \ 71 NETIF_MSG_TX_ERR) 72static int debug = -1; 73 74static int mtu; 75 76/* Maximum number of multicast addresses to filter (vs. rx-all-multicast). 77 This chip uses a 512 element hash table based on the Ethernet CRC. */ 78static const int multicast_filter_limit = 100; 79 80/* Set the copy breakpoint for the copy-only-tiny-frames scheme. 81 Setting to > 1518 effectively disables this feature. */ 82static int rx_copybreak; 83 84static int dspcfg_workaround = 1; 85 86/* Used to pass the media type, etc. 87 Both 'options[]' and 'full_duplex[]' should exist for driver 88 interoperability. 89 The media type is usually passed in 'options[]'. 90*/ 91#define MAX_UNITS 8 /* More are supported, limit only on options */ 92static int options[MAX_UNITS]; 93static int full_duplex[MAX_UNITS]; 94 95/* Operational parameters that are set at compile time. */ 96 97/* Keep the ring sizes a power of two for compile efficiency. 98 The compiler will convert <unsigned>'%'<2^N> into a bit mask. 99 Making the Tx ring too large decreases the effectiveness of channel 100 bonding and packet priority. 101 There are no ill effects from too-large receive rings. */ 102#define TX_RING_SIZE 16 103#define TX_QUEUE_LEN 10 /* Limit ring entries actually used, min 4. */ 104#define RX_RING_SIZE 32 105 106/* Operational parameters that usually are not changed. */ 107/* Time in jiffies before concluding the transmitter is hung. */ 108#define TX_TIMEOUT (2*HZ) 109 110#define NATSEMI_HW_TIMEOUT 400 111#define NATSEMI_TIMER_FREQ 3*HZ 112#define NATSEMI_PG0_NREGS 64 113#define NATSEMI_RFDR_NREGS 8 114#define NATSEMI_PG1_NREGS 4 115#define NATSEMI_NREGS (NATSEMI_PG0_NREGS + NATSEMI_RFDR_NREGS + \ 116 NATSEMI_PG1_NREGS) 117#define NATSEMI_REGS_VER 1 /* v1 added RFDR registers */ 118#define NATSEMI_REGS_SIZE (NATSEMI_NREGS * sizeof(u32)) 119 120/* Buffer sizes: 121 * The nic writes 32-bit values, even if the upper bytes of 122 * a 32-bit value are beyond the end of the buffer. 123 */ 124#define NATSEMI_HEADERS 22 /* 2*mac,type,vlan,crc */ 125#define NATSEMI_PADDING 16 /* 2 bytes should be sufficient */ 126#define NATSEMI_LONGPKT 1518 /* limit for normal packets */ 127#define NATSEMI_RX_LIMIT 2046 /* maximum supported by hardware */ 128 129/* These identify the driver base version and may not be removed. */ 130static const char version[] __devinitdata = 131 KERN_INFO DRV_NAME " dp8381x driver, version " 132 DRV_VERSION ", " DRV_RELDATE "\n" 133 KERN_INFO " originally by Donald Becker <becker@scyld.com>\n" 134 KERN_INFO " 2.4.x kernel port by Jeff Garzik, Tjeerd Mulder\n"; 135 136MODULE_AUTHOR("Donald Becker <becker@scyld.com>"); 137MODULE_DESCRIPTION("National Semiconductor DP8381x series PCI Ethernet driver"); 138MODULE_LICENSE("GPL"); 139 140module_param(mtu, int, 0); 141module_param(debug, int, 0); 142module_param(rx_copybreak, int, 0); 143module_param(dspcfg_workaround, int, 1); 144module_param_array(options, int, NULL, 0); 145module_param_array(full_duplex, int, NULL, 0); 146MODULE_PARM_DESC(mtu, "DP8381x MTU (all boards)"); 147MODULE_PARM_DESC(debug, "DP8381x default debug level"); 148MODULE_PARM_DESC(rx_copybreak, 149 "DP8381x copy breakpoint for copy-only-tiny-frames"); 150MODULE_PARM_DESC(dspcfg_workaround, "DP8381x: control DspCfg workaround"); 151MODULE_PARM_DESC(options, 152 "DP8381x: Bits 0-3: media type, bit 17: full duplex"); 153MODULE_PARM_DESC(full_duplex, "DP8381x full duplex setting(s) (1)"); 154 155/* 156 Theory of Operation 157 158I. Board Compatibility 159 160This driver is designed for National Semiconductor DP83815 PCI Ethernet NIC. 161It also works with other chips in in the DP83810 series. 162 163II. Board-specific settings 164 165This driver requires the PCI interrupt line to be valid. 166It honors the EEPROM-set values. 167 168III. Driver operation 169 170IIIa. Ring buffers 171 172This driver uses two statically allocated fixed-size descriptor lists 173formed into rings by a branch from the final descriptor to the beginning of 174the list. The ring sizes are set at compile time by RX/TX_RING_SIZE. 175The NatSemi design uses a 'next descriptor' pointer that the driver forms 176into a list. 177 178IIIb/c. Transmit/Receive Structure 179 180This driver uses a zero-copy receive and transmit scheme. 181The driver allocates full frame size skbuffs for the Rx ring buffers at 182open() time and passes the skb->data field to the chip as receive data 183buffers. When an incoming frame is less than RX_COPYBREAK bytes long, 184a fresh skbuff is allocated and the frame is copied to the new skbuff. 185When the incoming frame is larger, the skbuff is passed directly up the 186protocol stack. Buffers consumed this way are replaced by newly allocated 187skbuffs in a later phase of receives. 188 189The RX_COPYBREAK value is chosen to trade-off the memory wasted by 190using a full-sized skbuff for small frames vs. the copying costs of larger 191frames. New boards are typically used in generously configured machines 192and the underfilled buffers have negligible impact compared to the benefit of 193a single allocation size, so the default value of zero results in never 194copying packets. When copying is done, the cost is usually mitigated by using 195a combined copy/checksum routine. Copying also preloads the cache, which is 196most useful with small frames. 197 198A subtle aspect of the operation is that unaligned buffers are not permitted 199by the hardware. Thus the IP header at offset 14 in an ethernet frame isn't 200longword aligned for further processing. On copies frames are put into the 201skbuff at an offset of "+2", 16-byte aligning the IP header. 202 203IIId. Synchronization 204 205Most operations are synchronized on the np->lock irq spinlock, except the 206performance critical codepaths: 207 208The rx process only runs in the interrupt handler. Access from outside 209the interrupt handler is only permitted after disable_irq(). 210 211The rx process usually runs under the netif_tx_lock. If np->intr_tx_reap 212is set, then access is permitted under spin_lock_irq(&np->lock). 213 214Thus configuration functions that want to access everything must call 215 disable_irq(dev->irq); 216 netif_tx_lock_bh(dev); 217 spin_lock_irq(&np->lock); 218 219IV. Notes 220 221NatSemi PCI network controllers are very uncommon. 222 223IVb. References 224 225http://www.scyld.com/expert/100mbps.html 226http://www.scyld.com/expert/NWay.html 227Datasheet is available from: 228http://www.national.com/pf/DP/DP83815.html 229 230IVc. Errata 231 232None characterised. 233*/ 234 235 236 237/* 238 * Support for fibre connections on Am79C874: 239 * This phy needs a special setup when connected to a fibre cable. 240 * http://www.amd.com/files/connectivitysolutions/networking/archivednetworking/22235.pdf 241 */ 242#define PHYID_AM79C874 0x0022561b 243 244enum { 245 MII_MCTRL = 0x15, /* mode control register */ 246 MII_FX_SEL = 0x0001, /* 100BASE-FX (fiber) */ 247 MII_EN_SCRM = 0x0004, /* enable scrambler (tp) */ 248}; 249 250enum { 251 NATSEMI_FLAG_IGNORE_PHY = 0x1, 252}; 253 254/* array of board data directly indexed by pci_tbl[x].driver_data */ 255static const struct { 256 const char *name; 257 unsigned long flags; 258 unsigned int eeprom_size; 259} natsemi_pci_info[] __devinitdata = { 260 { "Aculab E1/T1 PMXc cPCI carrier card", NATSEMI_FLAG_IGNORE_PHY, 128 }, 261 { "NatSemi DP8381[56]", 0, 24 }, 262}; 263 264static const struct pci_device_id natsemi_pci_tbl[] __devinitdata = { 265 { PCI_VENDOR_ID_NS, 0x0020, 0x12d9, 0x000c, 0, 0, 0 }, 266 { PCI_VENDOR_ID_NS, 0x0020, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 }, 267 { } /* terminate list */ 268}; 269MODULE_DEVICE_TABLE(pci, natsemi_pci_tbl); 270 271/* Offsets to the device registers. 272 Unlike software-only systems, device drivers interact with complex hardware. 273 It's not useful to define symbolic names for every register bit in the 274 device. 275*/ 276enum register_offsets { 277 ChipCmd = 0x00, 278 ChipConfig = 0x04, 279 EECtrl = 0x08, 280 PCIBusCfg = 0x0C, 281 IntrStatus = 0x10, 282 IntrMask = 0x14, 283 IntrEnable = 0x18, 284 IntrHoldoff = 0x1C, /* DP83816 only */ 285 TxRingPtr = 0x20, 286 TxConfig = 0x24, 287 RxRingPtr = 0x30, 288 RxConfig = 0x34, 289 ClkRun = 0x3C, 290 WOLCmd = 0x40, 291 PauseCmd = 0x44, 292 RxFilterAddr = 0x48, 293 RxFilterData = 0x4C, 294 BootRomAddr = 0x50, 295 BootRomData = 0x54, 296 SiliconRev = 0x58, 297 StatsCtrl = 0x5C, 298 StatsData = 0x60, 299 RxPktErrs = 0x60, 300 RxMissed = 0x68, 301 RxCRCErrs = 0x64, 302 BasicControl = 0x80, 303 BasicStatus = 0x84, 304 AnegAdv = 0x90, 305 AnegPeer = 0x94, 306 PhyStatus = 0xC0, 307 MIntrCtrl = 0xC4, 308 MIntrStatus = 0xC8, 309 PhyCtrl = 0xE4, 310 311 /* These are from the spec, around page 78... on a separate table. 312 * The meaning of these registers depend on the value of PGSEL. */ 313 PGSEL = 0xCC, 314 PMDCSR = 0xE4, 315 TSTDAT = 0xFC, 316 DSPCFG = 0xF4, 317 SDCFG = 0xF8 318}; 319/* the values for the 'magic' registers above (PGSEL=1) */ 320#define PMDCSR_VAL 0x189c /* enable preferred adaptation circuitry */ 321#define TSTDAT_VAL 0x0 322#define DSPCFG_VAL 0x5040 323#define SDCFG_VAL 0x008c /* set voltage thresholds for Signal Detect */ 324#define DSPCFG_LOCK 0x20 /* coefficient lock bit in DSPCFG */ 325#define DSPCFG_COEF 0x1000 /* see coefficient (in TSTDAT) bit in DSPCFG */ 326#define TSTDAT_FIXED 0xe8 /* magic number for bad coefficients */ 327 328/* misc PCI space registers */ 329enum pci_register_offsets { 330 PCIPM = 0x44, 331}; 332 333enum ChipCmd_bits { 334 ChipReset = 0x100, 335 RxReset = 0x20, 336 TxReset = 0x10, 337 RxOff = 0x08, 338 RxOn = 0x04, 339 TxOff = 0x02, 340 TxOn = 0x01, 341}; 342 343enum ChipConfig_bits { 344 CfgPhyDis = 0x200, 345 CfgPhyRst = 0x400, 346 CfgExtPhy = 0x1000, 347 CfgAnegEnable = 0x2000, 348 CfgAneg100 = 0x4000, 349 CfgAnegFull = 0x8000, 350 CfgAnegDone = 0x8000000, 351 CfgFullDuplex = 0x20000000, 352 CfgSpeed100 = 0x40000000, 353 CfgLink = 0x80000000, 354}; 355 356enum EECtrl_bits { 357 EE_ShiftClk = 0x04, 358 EE_DataIn = 0x01, 359 EE_ChipSelect = 0x08, 360 EE_DataOut = 0x02, 361 MII_Data = 0x10, 362 MII_Write = 0x20, 363 MII_ShiftClk = 0x40, 364}; 365 366enum PCIBusCfg_bits { 367 EepromReload = 0x4, 368}; 369 370/* Bits in the interrupt status/mask registers. */ 371enum IntrStatus_bits { 372 IntrRxDone = 0x0001, 373 IntrRxIntr = 0x0002, 374 IntrRxErr = 0x0004, 375 IntrRxEarly = 0x0008, 376 IntrRxIdle = 0x0010, 377 IntrRxOverrun = 0x0020, 378 IntrTxDone = 0x0040, 379 IntrTxIntr = 0x0080, 380 IntrTxErr = 0x0100, 381 IntrTxIdle = 0x0200, 382 IntrTxUnderrun = 0x0400, 383 StatsMax = 0x0800, 384 SWInt = 0x1000, 385 WOLPkt = 0x2000, 386 LinkChange = 0x4000, 387 IntrHighBits = 0x8000, 388 RxStatusFIFOOver = 0x10000, 389 IntrPCIErr = 0xf00000, 390 RxResetDone = 0x1000000, 391 TxResetDone = 0x2000000, 392 IntrAbnormalSummary = 0xCD20, 393}; 394 395/* 396 * Default Interrupts: 397 * Rx OK, Rx Packet Error, Rx Overrun, 398 * Tx OK, Tx Packet Error, Tx Underrun, 399 * MIB Service, Phy Interrupt, High Bits, 400 * Rx Status FIFO overrun, 401 * Received Target Abort, Received Master Abort, 402 * Signalled System Error, Received Parity Error 403 */ 404#define DEFAULT_INTR 0x00f1cd65 405 406enum TxConfig_bits { 407 TxDrthMask = 0x3f, 408 TxFlthMask = 0x3f00, 409 TxMxdmaMask = 0x700000, 410 TxMxdma_512 = 0x0, 411 TxMxdma_4 = 0x100000, 412 TxMxdma_8 = 0x200000, 413 TxMxdma_16 = 0x300000, 414 TxMxdma_32 = 0x400000, 415 TxMxdma_64 = 0x500000, 416 TxMxdma_128 = 0x600000, 417 TxMxdma_256 = 0x700000, 418 TxCollRetry = 0x800000, 419 TxAutoPad = 0x10000000, 420 TxMacLoop = 0x20000000, 421 TxHeartIgn = 0x40000000, 422 TxCarrierIgn = 0x80000000 423}; 424 425/* 426 * Tx Configuration: 427 * - 256 byte DMA burst length 428 * - fill threshold 512 bytes (i.e. restart DMA when 512 bytes are free) 429 * - 64 bytes initial drain threshold (i.e. begin actual transmission 430 * when 64 byte are in the fifo) 431 * - on tx underruns, increase drain threshold by 64. 432 * - at most use a drain threshold of 1472 bytes: The sum of the fill 433 * threshold and the drain threshold must be less than 2016 bytes. 434 * 435 */ 436#define TX_FLTH_VAL ((512/32) << 8) 437#define TX_DRTH_VAL_START (64/32) 438#define TX_DRTH_VAL_INC 2 439#define TX_DRTH_VAL_LIMIT (1472/32) 440 441enum RxConfig_bits { 442 RxDrthMask = 0x3e, 443 RxMxdmaMask = 0x700000, 444 RxMxdma_512 = 0x0, 445 RxMxdma_4 = 0x100000, 446 RxMxdma_8 = 0x200000, 447 RxMxdma_16 = 0x300000, 448 RxMxdma_32 = 0x400000, 449 RxMxdma_64 = 0x500000, 450 RxMxdma_128 = 0x600000, 451 RxMxdma_256 = 0x700000, 452 RxAcceptLong = 0x8000000, 453 RxAcceptTx = 0x10000000, 454 RxAcceptRunt = 0x40000000, 455 RxAcceptErr = 0x80000000 456}; 457#define RX_DRTH_VAL (128/8) 458 459enum ClkRun_bits { 460 PMEEnable = 0x100, 461 PMEStatus = 0x8000, 462}; 463 464enum WolCmd_bits { 465 WakePhy = 0x1, 466 WakeUnicast = 0x2, 467 WakeMulticast = 0x4, 468 WakeBroadcast = 0x8, 469 WakeArp = 0x10, 470 WakePMatch0 = 0x20, 471 WakePMatch1 = 0x40, 472 WakePMatch2 = 0x80, 473 WakePMatch3 = 0x100, 474 WakeMagic = 0x200, 475 WakeMagicSecure = 0x400, 476 SecureHack = 0x100000, 477 WokePhy = 0x400000, 478 WokeUnicast = 0x800000, 479 WokeMulticast = 0x1000000, 480 WokeBroadcast = 0x2000000, 481 WokeArp = 0x4000000, 482 WokePMatch0 = 0x8000000, 483 WokePMatch1 = 0x10000000, 484 WokePMatch2 = 0x20000000, 485 WokePMatch3 = 0x40000000, 486 WokeMagic = 0x80000000, 487 WakeOptsSummary = 0x7ff 488}; 489 490enum RxFilterAddr_bits { 491 RFCRAddressMask = 0x3ff, 492 AcceptMulticast = 0x00200000, 493 AcceptMyPhys = 0x08000000, 494 AcceptAllPhys = 0x10000000, 495 AcceptAllMulticast = 0x20000000, 496 AcceptBroadcast = 0x40000000, 497 RxFilterEnable = 0x80000000 498}; 499 500enum StatsCtrl_bits { 501 StatsWarn = 0x1, 502 StatsFreeze = 0x2, 503 StatsClear = 0x4, 504 StatsStrobe = 0x8, 505}; 506 507enum MIntrCtrl_bits { 508 MICRIntEn = 0x2, 509}; 510 511enum PhyCtrl_bits { 512 PhyAddrMask = 0x1f, 513}; 514 515#define PHY_ADDR_NONE 32 516#define PHY_ADDR_INTERNAL 1 517 518/* values we might find in the silicon revision register */ 519#define SRR_DP83815_C 0x0302 520#define SRR_DP83815_D 0x0403 521#define SRR_DP83816_A4 0x0504 522#define SRR_DP83816_A5 0x0505 523 524/* The Rx and Tx buffer descriptors. */ 525/* Note that using only 32 bit fields simplifies conversion to big-endian 526 architectures. */ 527struct netdev_desc { 528 u32 next_desc; 529 s32 cmd_status; 530 u32 addr; 531 u32 software_use; 532}; 533 534/* Bits in network_desc.status */ 535enum desc_status_bits { 536 DescOwn=0x80000000, DescMore=0x40000000, DescIntr=0x20000000, 537 DescNoCRC=0x10000000, DescPktOK=0x08000000, 538 DescSizeMask=0xfff, 539 540 DescTxAbort=0x04000000, DescTxFIFO=0x02000000, 541 DescTxCarrier=0x01000000, DescTxDefer=0x00800000, 542 DescTxExcDefer=0x00400000, DescTxOOWCol=0x00200000, 543 DescTxExcColl=0x00100000, DescTxCollCount=0x000f0000, 544 545 DescRxAbort=0x04000000, DescRxOver=0x02000000, 546 DescRxDest=0x01800000, DescRxLong=0x00400000, 547 DescRxRunt=0x00200000, DescRxInvalid=0x00100000, 548 DescRxCRC=0x00080000, DescRxAlign=0x00040000, 549 DescRxLoop=0x00020000, DesRxColl=0x00010000, 550}; 551 552struct netdev_private { 553 /* Descriptor rings first for alignment */ 554 dma_addr_t ring_dma; 555 struct netdev_desc *rx_ring; 556 struct netdev_desc *tx_ring; 557 /* The addresses of receive-in-place skbuffs */ 558 struct sk_buff *rx_skbuff[RX_RING_SIZE]; 559 dma_addr_t rx_dma[RX_RING_SIZE]; 560 /* address of a sent-in-place packet/buffer, for later free() */ 561 struct sk_buff *tx_skbuff[TX_RING_SIZE]; 562 dma_addr_t tx_dma[TX_RING_SIZE]; 563 struct net_device_stats stats; 564 /* Media monitoring timer */ 565 struct timer_list timer; 566 /* Frequently used values: keep some adjacent for cache effect */ 567 struct pci_dev *pci_dev; 568 struct netdev_desc *rx_head_desc; 569 /* Producer/consumer ring indices */ 570 unsigned int cur_rx, dirty_rx; 571 unsigned int cur_tx, dirty_tx; 572 /* Based on MTU+slack. */ 573 unsigned int rx_buf_sz; 574 int oom; 575 /* Interrupt status */ 576 u32 intr_status; 577 /* Do not touch the nic registers */ 578 int hands_off; 579 /* Don't pay attention to the reported link state. */ 580 int ignore_phy; 581 /* external phy that is used: only valid if dev->if_port != PORT_TP */ 582 int mii; 583 int phy_addr_external; 584 unsigned int full_duplex; 585 /* Rx filter */ 586 u32 cur_rx_mode; 587 u32 rx_filter[16]; 588 /* FIFO and PCI burst thresholds */ 589 u32 tx_config, rx_config; 590 /* original contents of ClkRun register */ 591 u32 SavedClkRun; 592 /* silicon revision */ 593 u32 srr; 594 /* expected DSPCFG value */ 595 u16 dspcfg; 596 int dspcfg_workaround; 597 /* parms saved in ethtool format */ 598 u16 speed; /* The forced speed, 10Mb, 100Mb, gigabit */ 599 u8 duplex; /* Duplex, half or full */ 600 u8 autoneg; /* Autonegotiation enabled */ 601 /* MII transceiver section */ 602 u16 advertising; 603 unsigned int iosize; 604 spinlock_t lock; 605 u32 msg_enable; 606 /* EEPROM data */ 607 int eeprom_size; 608}; 609 610static void move_int_phy(struct net_device *dev, int addr); 611static int eeprom_read(void __iomem *ioaddr, int location); 612static int mdio_read(struct net_device *dev, int reg); 613static void mdio_write(struct net_device *dev, int reg, u16 data); 614static void init_phy_fixup(struct net_device *dev); 615static int miiport_read(struct net_device *dev, int phy_id, int reg); 616static void miiport_write(struct net_device *dev, int phy_id, int reg, u16 data); 617static int find_mii(struct net_device *dev); 618static void natsemi_reset(struct net_device *dev); 619static void natsemi_reload_eeprom(struct net_device *dev); 620static void natsemi_stop_rxtx(struct net_device *dev); 621static int netdev_open(struct net_device *dev); 622static void do_cable_magic(struct net_device *dev); 623static void undo_cable_magic(struct net_device *dev); 624static void check_link(struct net_device *dev); 625static void netdev_timer(unsigned long data); 626static void dump_ring(struct net_device *dev); 627static void tx_timeout(struct net_device *dev); 628static int alloc_ring(struct net_device *dev); 629static void refill_rx(struct net_device *dev); 630static void init_ring(struct net_device *dev); 631static void drain_tx(struct net_device *dev); 632static void drain_ring(struct net_device *dev); 633static void free_ring(struct net_device *dev); 634static void reinit_ring(struct net_device *dev); 635static void init_registers(struct net_device *dev); 636static int start_tx(struct sk_buff *skb, struct net_device *dev); 637static irqreturn_t intr_handler(int irq, void *dev_instance); 638static void netdev_error(struct net_device *dev, int intr_status); 639static int natsemi_poll(struct net_device *dev, int *budget); 640static void netdev_rx(struct net_device *dev, int *work_done, int work_to_do); 641static void netdev_tx_done(struct net_device *dev); 642static int natsemi_change_mtu(struct net_device *dev, int new_mtu); 643#ifdef CONFIG_NET_POLL_CONTROLLER 644static void natsemi_poll_controller(struct net_device *dev); 645#endif 646static void __set_rx_mode(struct net_device *dev); 647static void set_rx_mode(struct net_device *dev); 648static void __get_stats(struct net_device *dev); 649static struct net_device_stats *get_stats(struct net_device *dev); 650static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); 651static int netdev_set_wol(struct net_device *dev, u32 newval); 652static int netdev_get_wol(struct net_device *dev, u32 *supported, u32 *cur); 653static int netdev_set_sopass(struct net_device *dev, u8 *newval); 654static int netdev_get_sopass(struct net_device *dev, u8 *data); 655static int netdev_get_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd); 656static int netdev_set_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd); 657static void enable_wol_mode(struct net_device *dev, int enable_intr); 658static int netdev_close(struct net_device *dev); 659static int netdev_get_regs(struct net_device *dev, u8 *buf); 660static int netdev_get_eeprom(struct net_device *dev, u8 *buf); 661static const struct ethtool_ops ethtool_ops; 662 663#define NATSEMI_ATTR(_name) \ 664static ssize_t natsemi_show_##_name(struct device *dev, \ 665 struct device_attribute *attr, char *buf); \ 666 static ssize_t natsemi_set_##_name(struct device *dev, \ 667 struct device_attribute *attr, \ 668 const char *buf, size_t count); \ 669 static DEVICE_ATTR(_name, 0644, natsemi_show_##_name, natsemi_set_##_name) 670 671#define NATSEMI_CREATE_FILE(_dev, _name) \ 672 device_create_file(&_dev->dev, &dev_attr_##_name) 673#define NATSEMI_REMOVE_FILE(_dev, _name) \ 674 device_create_file(&_dev->dev, &dev_attr_##_name) 675 676NATSEMI_ATTR(dspcfg_workaround); 677 678static ssize_t natsemi_show_dspcfg_workaround(struct device *dev, 679 struct device_attribute *attr, 680 char *buf) 681{ 682 struct netdev_private *np = netdev_priv(to_net_dev(dev)); 683 684 return sprintf(buf, "%s\n", np->dspcfg_workaround ? "on" : "off"); 685} 686 687static ssize_t natsemi_set_dspcfg_workaround(struct device *dev, 688 struct device_attribute *attr, 689 const char *buf, size_t count) 690{ 691 struct netdev_private *np = netdev_priv(to_net_dev(dev)); 692 int new_setting; 693 unsigned long flags; 694 695 /* Find out the new setting */ 696 if (!strncmp("on", buf, count - 1) || !strncmp("1", buf, count - 1)) 697 new_setting = 1; 698 else if (!strncmp("off", buf, count - 1) 699 || !strncmp("0", buf, count - 1)) 700 new_setting = 0; 701 else 702 return count; 703 704 spin_lock_irqsave(&np->lock, flags); 705 706 np->dspcfg_workaround = new_setting; 707 708 spin_unlock_irqrestore(&np->lock, flags); 709 710 return count; 711} 712 713static inline void __iomem *ns_ioaddr(struct net_device *dev) 714{ 715 return (void __iomem *) dev->base_addr; 716} 717 718static inline void natsemi_irq_enable(struct net_device *dev) 719{ 720 writel(1, ns_ioaddr(dev) + IntrEnable); 721 readl(ns_ioaddr(dev) + IntrEnable); 722} 723 724static inline void natsemi_irq_disable(struct net_device *dev) 725{ 726 writel(0, ns_ioaddr(dev) + IntrEnable); 727 readl(ns_ioaddr(dev) + IntrEnable); 728} 729 730static void move_int_phy(struct net_device *dev, int addr) 731{ 732 struct netdev_private *np = netdev_priv(dev); 733 void __iomem *ioaddr = ns_ioaddr(dev); 734 int target = 31; 735 736 /* 737 * The internal phy is visible on the external mii bus. Therefore we must 738 * move it away before we can send commands to an external phy. 739 * There are two addresses we must avoid: 740 * - the address on the external phy that is used for transmission. 741 * - the address that we want to access. User space can access phys 742 * on the mii bus with SIOCGMIIREG/SIOCSMIIREG, independant from the 743 * phy that is used for transmission. 744 */ 745 746 if (target == addr) 747 target--; 748 if (target == np->phy_addr_external) 749 target--; 750 writew(target, ioaddr + PhyCtrl); 751 readw(ioaddr + PhyCtrl); 752 udelay(1); 753} 754 755static void __devinit natsemi_init_media (struct net_device *dev) 756{ 757 struct netdev_private *np = netdev_priv(dev); 758 u32 tmp; 759 760 if (np->ignore_phy) 761 netif_carrier_on(dev); 762 else 763 netif_carrier_off(dev); 764 765 /* get the initial settings from hardware */ 766 tmp = mdio_read(dev, MII_BMCR); 767 np->speed = (tmp & BMCR_SPEED100)? SPEED_100 : SPEED_10; 768 np->duplex = (tmp & BMCR_FULLDPLX)? DUPLEX_FULL : DUPLEX_HALF; 769 np->autoneg = (tmp & BMCR_ANENABLE)? AUTONEG_ENABLE: AUTONEG_DISABLE; 770 np->advertising= mdio_read(dev, MII_ADVERTISE); 771 772 if ((np->advertising & ADVERTISE_ALL) != ADVERTISE_ALL 773 && netif_msg_probe(np)) { 774 printk(KERN_INFO "natsemi %s: Transceiver default autonegotiation %s " 775 "10%s %s duplex.\n", 776 pci_name(np->pci_dev), 777 (mdio_read(dev, MII_BMCR) & BMCR_ANENABLE)? 778 "enabled, advertise" : "disabled, force", 779 (np->advertising & 780 (ADVERTISE_100FULL|ADVERTISE_100HALF))? 781 "0" : "", 782 (np->advertising & 783 (ADVERTISE_100FULL|ADVERTISE_10FULL))? 784 "full" : "half"); 785 } 786 if (netif_msg_probe(np)) 787 printk(KERN_INFO 788 "natsemi %s: Transceiver status %#04x advertising %#04x.\n", 789 pci_name(np->pci_dev), mdio_read(dev, MII_BMSR), 790 np->advertising); 791 792} 793 794static int __devinit natsemi_probe1 (struct pci_dev *pdev, 795 const struct pci_device_id *ent) 796{ 797 struct net_device *dev; 798 struct netdev_private *np; 799 int i, option, irq, chip_idx = ent->driver_data; 800 static int find_cnt = -1; 801 unsigned long iostart, iosize; 802 void __iomem *ioaddr; 803 const int pcibar = 1; /* PCI base address register */ 804 int prev_eedata; 805 u32 tmp; 806 807/* when built into the kernel, we only print version if device is found */ 808#ifndef MODULE 809 static int printed_version; 810 if (!printed_version++) 811 printk(version); 812#endif 813 814 i = pci_enable_device(pdev); 815 if (i) return i; 816 817 /* natsemi has a non-standard PM control register 818 * in PCI config space. Some boards apparently need 819 * to be brought to D0 in this manner. 820 */ 821 pci_read_config_dword(pdev, PCIPM, &tmp); 822 if (tmp & PCI_PM_CTRL_STATE_MASK) { 823 /* D0 state, disable PME assertion */ 824 u32 newtmp = tmp & ~PCI_PM_CTRL_STATE_MASK; 825 pci_write_config_dword(pdev, PCIPM, newtmp); 826 } 827 828 find_cnt++; 829 iostart = pci_resource_start(pdev, pcibar); 830 iosize = pci_resource_len(pdev, pcibar); 831 irq = pdev->irq; 832 833 pci_set_master(pdev); 834 835 dev = alloc_etherdev(sizeof (struct netdev_private)); 836 if (!dev) 837 return -ENOMEM; 838 SET_MODULE_OWNER(dev); 839 SET_NETDEV_DEV(dev, &pdev->dev); 840 841 i = pci_request_regions(pdev, DRV_NAME); 842 if (i) 843 goto err_pci_request_regions; 844 845 ioaddr = ioremap(iostart, iosize); 846 if (!ioaddr) { 847 i = -ENOMEM; 848 goto err_ioremap; 849 } 850 851 prev_eedata = eeprom_read(ioaddr, 6); 852 for (i = 0; i < 3; i++) { 853 int eedata = eeprom_read(ioaddr, i + 7); 854 dev->dev_addr[i*2] = (eedata << 1) + (prev_eedata >> 15); 855 dev->dev_addr[i*2+1] = eedata >> 7; 856 prev_eedata = eedata; 857 } 858 859 dev->base_addr = (unsigned long __force) ioaddr; 860 dev->irq = irq; 861 862 np = netdev_priv(dev); 863 864 np->pci_dev = pdev; 865 pci_set_drvdata(pdev, dev); 866 np->iosize = iosize; 867 spin_lock_init(&np->lock); 868 np->msg_enable = (debug >= 0) ? (1<<debug)-1 : NATSEMI_DEF_MSG; 869 np->hands_off = 0; 870 np->intr_status = 0; 871 np->eeprom_size = natsemi_pci_info[chip_idx].eeprom_size; 872 if (natsemi_pci_info[chip_idx].flags & NATSEMI_FLAG_IGNORE_PHY) 873 np->ignore_phy = 1; 874 else 875 np->ignore_phy = 0; 876 np->dspcfg_workaround = dspcfg_workaround; 877 878 /* Initial port: 879 * - If configured to ignore the PHY set up for external. 880 * - If the nic was configured to use an external phy and if find_mii 881 * finds a phy: use external port, first phy that replies. 882 * - Otherwise: internal port. 883 * Note that the phy address for the internal phy doesn't matter: 884 * The address would be used to access a phy over the mii bus, but 885 * the internal phy is accessed through mapped registers. 886 */ 887 if (np->ignore_phy || readl(ioaddr + ChipConfig) & CfgExtPhy) 888 dev->if_port = PORT_MII; 889 else 890 dev->if_port = PORT_TP; 891 /* Reset the chip to erase previous misconfiguration. */ 892 natsemi_reload_eeprom(dev); 893 natsemi_reset(dev); 894 895 if (dev->if_port != PORT_TP) { 896 np->phy_addr_external = find_mii(dev); 897 /* If we're ignoring the PHY it doesn't matter if we can't 898 * find one. */ 899 if (!np->ignore_phy && np->phy_addr_external == PHY_ADDR_NONE) { 900 dev->if_port = PORT_TP; 901 np->phy_addr_external = PHY_ADDR_INTERNAL; 902 } 903 } else { 904 np->phy_addr_external = PHY_ADDR_INTERNAL; 905 } 906 907 option = find_cnt < MAX_UNITS ? options[find_cnt] : 0; 908 if (dev->mem_start) 909 option = dev->mem_start; 910 911 /* The lower four bits are the media type. */ 912 if (option) { 913 if (option & 0x200) 914 np->full_duplex = 1; 915 if (option & 15) 916 printk(KERN_INFO 917 "natsemi %s: ignoring user supplied media type %d", 918 pci_name(np->pci_dev), option & 15); 919 } 920 if (find_cnt < MAX_UNITS && full_duplex[find_cnt]) 921 np->full_duplex = 1; 922 923 /* The chip-specific entries in the device structure. */ 924 dev->open = &netdev_open; 925 dev->hard_start_xmit = &start_tx; 926 dev->stop = &netdev_close; 927 dev->get_stats = &get_stats; 928 dev->set_multicast_list = &set_rx_mode; 929 dev->change_mtu = &natsemi_change_mtu; 930 dev->do_ioctl = &netdev_ioctl; 931 dev->tx_timeout = &tx_timeout; 932 dev->watchdog_timeo = TX_TIMEOUT; 933 dev->poll = natsemi_poll; 934 dev->weight = 64; 935 936#ifdef CONFIG_NET_POLL_CONTROLLER 937 dev->poll_controller = &natsemi_poll_controller; 938#endif 939 SET_ETHTOOL_OPS(dev, ðtool_ops); 940 941 if (mtu) 942 dev->mtu = mtu; 943 944 natsemi_init_media(dev); 945 946 /* save the silicon revision for later querying */ 947 np->srr = readl(ioaddr + SiliconRev); 948 if (netif_msg_hw(np)) 949 printk(KERN_INFO "natsemi %s: silicon revision %#04x.\n", 950 pci_name(np->pci_dev), np->srr); 951 952 i = register_netdev(dev); 953 if (i) 954 goto err_register_netdev; 955 956 if (NATSEMI_CREATE_FILE(pdev, dspcfg_workaround)) 957 goto err_create_file; 958 959 if (netif_msg_drv(np)) { 960 printk(KERN_INFO "natsemi %s: %s at %#08lx (%s), ", 961 dev->name, natsemi_pci_info[chip_idx].name, iostart, 962 pci_name(np->pci_dev)); 963 for (i = 0; i < ETH_ALEN-1; i++) 964 printk("%02x:", dev->dev_addr[i]); 965 printk("%02x, IRQ %d", dev->dev_addr[i], irq); 966 if (dev->if_port == PORT_TP) 967 printk(", port TP.\n"); 968 else if (np->ignore_phy) 969 printk(", port MII, ignoring PHY\n"); 970 else 971 printk(", port MII, phy ad %d.\n", np->phy_addr_external); 972 } 973 return 0; 974 975 err_create_file: 976 unregister_netdev(dev); 977 978 err_register_netdev: 979 iounmap(ioaddr); 980 981 err_ioremap: 982 pci_release_regions(pdev); 983 pci_set_drvdata(pdev, NULL); 984 985 err_pci_request_regions: 986 free_netdev(dev); 987 return i; 988} 989 990 991/* Read the EEPROM and MII Management Data I/O (MDIO) interfaces. 992 The EEPROM code is for the common 93c06/46 EEPROMs with 6 bit addresses. */ 993 994/* Delay between EEPROM clock transitions. 995 No extra delay is needed with 33Mhz PCI, but future 66Mhz access may need 996 a delay. Note that pre-2.0.34 kernels had a cache-alignment bug that 997 made udelay() unreliable. 998 The old method of using an ISA access as a delay, __SLOW_DOWN_IO__, is 999 depricated. 1000*/ 1001#define eeprom_delay(ee_addr) readl(ee_addr) 1002 1003#define EE_Write0 (EE_ChipSelect) 1004#define EE_Write1 (EE_ChipSelect | EE_DataIn) 1005 1006/* The EEPROM commands include the alway-set leading bit. */ 1007enum EEPROM_Cmds { 1008 EE_WriteCmd=(5 << 6), EE_ReadCmd=(6 << 6), EE_EraseCmd=(7 << 6), 1009}; 1010 1011static int eeprom_read(void __iomem *addr, int location) 1012{ 1013 int i; 1014 int retval = 0; 1015 void __iomem *ee_addr = addr + EECtrl; 1016 int read_cmd = location | EE_ReadCmd; 1017 1018 writel(EE_Write0, ee_addr); 1019 1020 /* Shift the read command bits out. */ 1021 for (i = 10; i >= 0; i--) { 1022 short dataval = (read_cmd & (1 << i)) ? EE_Write1 : EE_Write0; 1023 writel(dataval, ee_addr); 1024 eeprom_delay(ee_addr); 1025 writel(dataval | EE_ShiftClk, ee_addr); 1026 eeprom_delay(ee_addr); 1027 } 1028 writel(EE_ChipSelect, ee_addr); 1029 eeprom_delay(ee_addr); 1030 1031 for (i = 0; i < 16; i++) { 1032 writel(EE_ChipSelect | EE_ShiftClk, ee_addr); 1033 eeprom_delay(ee_addr); 1034 retval |= (readl(ee_addr) & EE_DataOut) ? 1 << i : 0; 1035 writel(EE_ChipSelect, ee_addr); 1036 eeprom_delay(ee_addr); 1037 } 1038 1039 /* Terminate the EEPROM access. */ 1040 writel(EE_Write0, ee_addr); 1041 writel(0, ee_addr); 1042 return retval; 1043} 1044 1045/* MII transceiver control section. 1046 * The 83815 series has an internal transceiver, and we present the 1047 * internal management registers as if they were MII connected. 1048 * External Phy registers are referenced through the MII interface. 1049 */ 1050 1051/* clock transitions >= 20ns (25MHz) 1052 * One readl should be good to PCI @ 100MHz 1053 */ 1054#define mii_delay(ioaddr) readl(ioaddr + EECtrl) 1055 1056static int mii_getbit (struct net_device *dev) 1057{ 1058 int data; 1059 void __iomem *ioaddr = ns_ioaddr(dev); 1060 1061 writel(MII_ShiftClk, ioaddr + EECtrl); 1062 data = readl(ioaddr + EECtrl); 1063 writel(0, ioaddr + EECtrl); 1064 mii_delay(ioaddr); 1065 return (data & MII_Data)? 1 : 0; 1066} 1067 1068static void mii_send_bits (struct net_device *dev, u32 data, int len) 1069{ 1070 u32 i; 1071 void __iomem *ioaddr = ns_ioaddr(dev); 1072 1073 for (i = (1 << (len-1)); i; i >>= 1) 1074 { 1075 u32 mdio_val = MII_Write | ((data & i)? MII_Data : 0); 1076 writel(mdio_val, ioaddr + EECtrl); 1077 mii_delay(ioaddr); 1078 writel(mdio_val | MII_ShiftClk, ioaddr + EECtrl); 1079 mii_delay(ioaddr); 1080 } 1081 writel(0, ioaddr + EECtrl); 1082 mii_delay(ioaddr); 1083} 1084 1085static int miiport_read(struct net_device *dev, int phy_id, int reg) 1086{ 1087 u32 cmd; 1088 int i; 1089 u32 retval = 0; 1090 1091 /* Ensure sync */ 1092 mii_send_bits (dev, 0xffffffff, 32); 1093 /* ST(2), OP(2), ADDR(5), REG#(5), TA(2), Data(16) total 32 bits */ 1094 /* ST,OP = 0110'b for read operation */ 1095 cmd = (0x06 << 10) | (phy_id << 5) | reg; 1096 mii_send_bits (dev, cmd, 14); 1097 /* Turnaround */ 1098 if (mii_getbit (dev)) 1099 return 0; 1100 /* Read data */ 1101 for (i = 0; i < 16; i++) { 1102 retval <<= 1; 1103 retval |= mii_getbit (dev); 1104 } 1105 /* End cycle */ 1106 mii_getbit (dev); 1107 return retval; 1108} 1109 1110static void miiport_write(struct net_device *dev, int phy_id, int reg, u16 data) 1111{ 1112 u32 cmd; 1113 1114 /* Ensure sync */ 1115 mii_send_bits (dev, 0xffffffff, 32); 1116 /* ST(2), OP(2), ADDR(5), REG#(5), TA(2), Data(16) total 32 bits */ 1117 /* ST,OP,AAAAA,RRRRR,TA = 0101xxxxxxxxxx10'b = 0x5002 for write */ 1118 cmd = (0x5002 << 16) | (phy_id << 23) | (reg << 18) | data; 1119 mii_send_bits (dev, cmd, 32); 1120 /* End cycle */ 1121 mii_getbit (dev); 1122} 1123 1124static int mdio_read(struct net_device *dev, int reg) 1125{ 1126 struct netdev_private *np = netdev_priv(dev); 1127 void __iomem *ioaddr = ns_ioaddr(dev); 1128 1129 /* The 83815 series has two ports: 1130 * - an internal transceiver 1131 * - an external mii bus 1132 */ 1133 if (dev->if_port == PORT_TP) 1134 return readw(ioaddr+BasicControl+(reg<<2)); 1135 else 1136 return miiport_read(dev, np->phy_addr_external, reg); 1137} 1138 1139static void mdio_write(struct net_device *dev, int reg, u16 data) 1140{ 1141 struct netdev_private *np = netdev_priv(dev); 1142 void __iomem *ioaddr = ns_ioaddr(dev); 1143 1144 /* The 83815 series has an internal transceiver; handle separately */ 1145 if (dev->if_port == PORT_TP) 1146 writew(data, ioaddr+BasicControl+(reg<<2)); 1147 else 1148 miiport_write(dev, np->phy_addr_external, reg, data); 1149} 1150 1151static void init_phy_fixup(struct net_device *dev) 1152{ 1153 struct netdev_private *np = netdev_priv(dev); 1154 void __iomem *ioaddr = ns_ioaddr(dev); 1155 int i; 1156 u32 cfg; 1157 u16 tmp; 1158 1159 /* restore stuff lost when power was out */ 1160 tmp = mdio_read(dev, MII_BMCR); 1161 if (np->autoneg == AUTONEG_ENABLE) { 1162 /* renegotiate if something changed */ 1163 if ((tmp & BMCR_ANENABLE) == 0 1164 || np->advertising != mdio_read(dev, MII_ADVERTISE)) 1165 { 1166 /* turn on autonegotiation and force negotiation */ 1167 tmp |= (BMCR_ANENABLE | BMCR_ANRESTART); 1168 mdio_write(dev, MII_ADVERTISE, np->advertising); 1169 } 1170 } else { 1171 /* turn off auto negotiation, set speed and duplexity */ 1172 tmp &= ~(BMCR_ANENABLE | BMCR_SPEED100 | BMCR_FULLDPLX); 1173 if (np->speed == SPEED_100) 1174 tmp |= BMCR_SPEED100; 1175 if (np->duplex == DUPLEX_FULL) 1176 tmp |= BMCR_FULLDPLX; 1177 /* 1178 * Note: there is no good way to inform the link partner 1179 * that our capabilities changed. The user has to unplug 1180 * and replug the network cable after some changes, e.g. 1181 * after switching from 10HD, autoneg off to 100 HD, 1182 * autoneg off. 1183 */ 1184 } 1185 mdio_write(dev, MII_BMCR, tmp); 1186 readl(ioaddr + ChipConfig); 1187 udelay(1); 1188 1189 /* find out what phy this is */ 1190 np->mii = (mdio_read(dev, MII_PHYSID1) << 16) 1191 + mdio_read(dev, MII_PHYSID2); 1192 1193 /* handle external phys here */ 1194 switch (np->mii) { 1195 case PHYID_AM79C874: 1196 /* phy specific configuration for fibre/tp operation */ 1197 tmp = mdio_read(dev, MII_MCTRL); 1198 tmp &= ~(MII_FX_SEL | MII_EN_SCRM); 1199 if (dev->if_port == PORT_FIBRE) 1200 tmp |= MII_FX_SEL; 1201 else 1202 tmp |= MII_EN_SCRM; 1203 mdio_write(dev, MII_MCTRL, tmp); 1204 break; 1205 default: 1206 break; 1207 } 1208 cfg = readl(ioaddr + ChipConfig); 1209 if (cfg & CfgExtPhy) 1210 return; 1211 1212 /* On page 78 of the spec, they recommend some settings for "optimum 1213 performance" to be done in sequence. These settings optimize some 1214 of the 100Mbit autodetection circuitry. They say we only want to 1215 do this for rev C of the chip, but engineers at NSC (Bradley 1216 Kennedy) recommends always setting them. If you don't, you get 1217 errors on some autonegotiations that make the device unusable. 1218 1219 It seems that the DSP needs a few usec to reinitialize after 1220 the start of the phy. Just retry writing these values until they 1221 stick. 1222 */ 1223 for (i=0;i<NATSEMI_HW_TIMEOUT;i++) { 1224 1225 int dspcfg; 1226 writew(1, ioaddr + PGSEL); 1227 writew(PMDCSR_VAL, ioaddr + PMDCSR); 1228 writew(TSTDAT_VAL, ioaddr + TSTDAT); 1229 np->dspcfg = (np->srr <= SRR_DP83815_C)? 1230 DSPCFG_VAL : (DSPCFG_COEF | readw(ioaddr + DSPCFG)); 1231 writew(np->dspcfg, ioaddr + DSPCFG); 1232 writew(SDCFG_VAL, ioaddr + SDCFG); 1233 writew(0, ioaddr + PGSEL); 1234 readl(ioaddr + ChipConfig); 1235 udelay(10); 1236 1237 writew(1, ioaddr + PGSEL); 1238 dspcfg = readw(ioaddr + DSPCFG); 1239 writew(0, ioaddr + PGSEL); 1240 if (np->dspcfg == dspcfg) 1241 break; 1242 } 1243 1244 if (netif_msg_link(np)) { 1245 if (i==NATSEMI_HW_TIMEOUT) { 1246 printk(KERN_INFO 1247 "%s: DSPCFG mismatch after retrying for %d usec.\n", 1248 dev->name, i*10); 1249 } else { 1250 printk(KERN_INFO 1251 "%s: DSPCFG accepted after %d usec.\n", 1252 dev->name, i*10); 1253 } 1254 } 1255 /* 1256 * Enable PHY Specific event based interrupts. Link state change 1257 * and Auto-Negotiation Completion are among the affected. 1258 * Read the intr status to clear it (needed for wake events). 1259 */ 1260 readw(ioaddr + MIntrStatus); 1261 writew(MICRIntEn, ioaddr + MIntrCtrl); 1262} 1263 1264static int switch_port_external(struct net_device *dev) 1265{ 1266 struct netdev_private *np = netdev_priv(dev); 1267 void __iomem *ioaddr = ns_ioaddr(dev); 1268 u32 cfg; 1269 1270 cfg = readl(ioaddr + ChipConfig); 1271 if (cfg & CfgExtPhy) 1272 return 0; 1273 1274 if (netif_msg_link(np)) { 1275 printk(KERN_INFO "%s: switching to external transceiver.\n", 1276 dev->name); 1277 } 1278 1279 /* 1) switch back to external phy */ 1280 writel(cfg | (CfgExtPhy | CfgPhyDis), ioaddr + ChipConfig); 1281 readl(ioaddr + ChipConfig); 1282 udelay(1); 1283 1284 /* 2) reset the external phy: */ 1285 /* resetting the external PHY has been known to cause a hub supplying 1286 * power over Ethernet to kill the power. We don't want to kill 1287 * power to this computer, so we avoid resetting the phy. 1288 */ 1289 1290 /* 3) reinit the phy fixup, it got lost during power down. */ 1291 move_int_phy(dev, np->phy_addr_external); 1292 init_phy_fixup(dev); 1293 1294 return 1; 1295} 1296 1297static int switch_port_internal(struct net_device *dev) 1298{ 1299 struct netdev_private *np = netdev_priv(dev); 1300 void __iomem *ioaddr = ns_ioaddr(dev); 1301 int i; 1302 u32 cfg; 1303 u16 bmcr; 1304 1305 cfg = readl(ioaddr + ChipConfig); 1306 if (!(cfg &CfgExtPhy)) 1307 return 0; 1308 1309 if (netif_msg_link(np)) { 1310 printk(KERN_INFO "%s: switching to internal transceiver.\n", 1311 dev->name); 1312 } 1313 /* 1) switch back to internal phy: */ 1314 cfg = cfg & ~(CfgExtPhy | CfgPhyDis); 1315 writel(cfg, ioaddr + ChipConfig); 1316 readl(ioaddr + ChipConfig); 1317 udelay(1); 1318 1319 /* 2) reset the internal phy: */ 1320 bmcr = readw(ioaddr+BasicControl+(MII_BMCR<<2)); 1321 writel(bmcr | BMCR_RESET, ioaddr+BasicControl+(MII_BMCR<<2)); 1322 readl(ioaddr + ChipConfig); 1323 udelay(10); 1324 for (i=0;i<NATSEMI_HW_TIMEOUT;i++) { 1325 bmcr = readw(ioaddr+BasicControl+(MII_BMCR<<2)); 1326 if (!(bmcr & BMCR_RESET)) 1327 break; 1328 udelay(10); 1329 } 1330 if (i==NATSEMI_HW_TIMEOUT && netif_msg_link(np)) { 1331 printk(KERN_INFO 1332 "%s: phy reset did not complete in %d usec.\n", 1333 dev->name, i*10); 1334 } 1335 /* 3) reinit the phy fixup, it got lost during power down. */ 1336 init_phy_fixup(dev); 1337 1338 return 1; 1339} 1340 1341/* Scan for a PHY on the external mii bus. 1342 * There are two tricky points: 1343 * - Do not scan while the internal phy is enabled. The internal phy will 1344 * crash: e.g. reads from the DSPCFG register will return odd values and 1345 * the nasty random phy reset code will reset the nic every few seconds. 1346 * - The internal phy must be moved around, an external phy could 1347 * have the same address as the internal phy. 1348 */ 1349static int find_mii(struct net_device *dev) 1350{ 1351 struct netdev_private *np = netdev_priv(dev); 1352 int tmp; 1353 int i; 1354 int did_switch; 1355 1356 /* Switch to external phy */ 1357 did_switch = switch_port_external(dev); 1358 1359 /* Scan the possible phy addresses: 1360 * 1361 * PHY address 0 means that the phy is in isolate mode. Not yet 1362 * supported due to lack of test hardware. User space should 1363 * handle it through ethtool. 1364 */ 1365 for (i = 1; i <= 31; i++) { 1366 move_int_phy(dev, i); 1367 tmp = miiport_read(dev, i, MII_BMSR); 1368 if (tmp != 0xffff && tmp != 0x0000) { 1369 /* found something! */ 1370 np->mii = (mdio_read(dev, MII_PHYSID1) << 16) 1371 + mdio_read(dev, MII_PHYSID2); 1372 if (netif_msg_probe(np)) { 1373 printk(KERN_INFO "natsemi %s: found external phy %08x at address %d.\n", 1374 pci_name(np->pci_dev), np->mii, i); 1375 } 1376 break; 1377 } 1378 } 1379 /* And switch back to internal phy: */ 1380 if (did_switch) 1381 switch_port_internal(dev); 1382 return i; 1383} 1384 1385/* CFG bits [13:16] [18:23] */ 1386#define CFG_RESET_SAVE 0xfde000 1387/* WCSR bits [0:4] [9:10] */ 1388#define WCSR_RESET_SAVE 0x61f 1389/* RFCR bits [20] [22] [27:31] */ 1390#define RFCR_RESET_SAVE 0xf8500000; 1391 1392static void natsemi_reset(struct net_device *dev) 1393{ 1394 int i; 1395 u32 cfg; 1396 u32 wcsr; 1397 u32 rfcr; 1398 u16 pmatch[3]; 1399 u16 sopass[3]; 1400 struct netdev_private *np = netdev_priv(dev); 1401 void __iomem *ioaddr = ns_ioaddr(dev); 1402 1403 /* 1404 * Resetting the chip causes some registers to be lost. 1405 * Natsemi suggests NOT reloading the EEPROM while live, so instead 1406 * we save the state that would have been loaded from EEPROM 1407 * on a normal power-up (see the spec EEPROM map). This assumes 1408 * whoever calls this will follow up with init_registers() eventually. 1409 */ 1410 1411 /* CFG */ 1412 cfg = readl(ioaddr + ChipConfig) & CFG_RESET_SAVE; 1413 /* WCSR */ 1414 wcsr = readl(ioaddr + WOLCmd) & WCSR_RESET_SAVE; 1415 /* RFCR */ 1416 rfcr = readl(ioaddr + RxFilterAddr) & RFCR_RESET_SAVE; 1417 /* PMATCH */ 1418 for (i = 0; i < 3; i++) { 1419 writel(i*2, ioaddr + RxFilterAddr); 1420 pmatch[i] = readw(ioaddr + RxFilterData); 1421 } 1422 /* SOPAS */ 1423 for (i = 0; i < 3; i++) { 1424 writel(0xa+(i*2), ioaddr + RxFilterAddr); 1425 sopass[i] = readw(ioaddr + RxFilterData); 1426 } 1427 1428 /* now whack the chip */ 1429 writel(ChipReset, ioaddr + ChipCmd); 1430 for (i=0;i<NATSEMI_HW_TIMEOUT;i++) { 1431 if (!(readl(ioaddr + ChipCmd) & ChipReset)) 1432 break; 1433 udelay(5); 1434 } 1435 if (i==NATSEMI_HW_TIMEOUT) { 1436 printk(KERN_WARNING "%s: reset did not complete in %d usec.\n", 1437 dev->name, i*5); 1438 } else if (netif_msg_hw(np)) { 1439 printk(KERN_DEBUG "%s: reset completed in %d usec.\n", 1440 dev->name, i*5); 1441 } 1442 1443 /* restore CFG */ 1444 cfg |= readl(ioaddr + ChipConfig) & ~CFG_RESET_SAVE; 1445 /* turn on external phy if it was selected */ 1446 if (dev->if_port == PORT_TP) 1447 cfg &= ~(CfgExtPhy | CfgPhyDis); 1448 else 1449 cfg |= (CfgExtPhy | CfgPhyDis); 1450 writel(cfg, ioaddr + ChipConfig); 1451 /* restore WCSR */ 1452 wcsr |= readl(ioaddr + WOLCmd) & ~WCSR_RESET_SAVE; 1453 writel(wcsr, ioaddr + WOLCmd); 1454 /* read RFCR */ 1455 rfcr |= readl(ioaddr + RxFilterAddr) & ~RFCR_RESET_SAVE; 1456 /* restore PMATCH */ 1457 for (i = 0; i < 3; i++) { 1458 writel(i*2, ioaddr + RxFilterAddr); 1459 writew(pmatch[i], ioaddr + RxFilterData); 1460 } 1461 for (i = 0; i < 3; i++) { 1462 writel(0xa+(i*2), ioaddr + RxFilterAddr); 1463 writew(sopass[i], ioaddr + RxFilterData); 1464 } 1465 /* restore RFCR */ 1466 writel(rfcr, ioaddr + RxFilterAddr); 1467} 1468 1469static void reset_rx(struct net_device *dev) 1470{ 1471 int i; 1472 struct netdev_private *np = netdev_priv(dev); 1473 void __iomem *ioaddr = ns_ioaddr(dev); 1474 1475 np->intr_status &= ~RxResetDone; 1476 1477 writel(RxReset, ioaddr + ChipCmd); 1478 1479 for (i=0;i<NATSEMI_HW_TIMEOUT;i++) { 1480 np->intr_status |= readl(ioaddr + IntrStatus); 1481 if (np->intr_status & RxResetDone) 1482 break; 1483 udelay(15); 1484 } 1485 if (i==NATSEMI_HW_TIMEOUT) { 1486 printk(KERN_WARNING "%s: RX reset did not complete in %d usec.\n", 1487 dev->name, i*15); 1488 } else if (netif_msg_hw(np)) { 1489 printk(KERN_WARNING "%s: RX reset took %d usec.\n", 1490 dev->name, i*15); 1491 } 1492} 1493 1494static void natsemi_reload_eeprom(struct net_device *dev) 1495{ 1496 struct netdev_private *np = netdev_priv(dev); 1497 void __iomem *ioaddr = ns_ioaddr(dev); 1498 int i; 1499 1500 writel(EepromReload, ioaddr + PCIBusCfg); 1501 for (i=0;i<NATSEMI_HW_TIMEOUT;i++) { 1502 udelay(50); 1503 if (!(readl(ioaddr + PCIBusCfg) & EepromReload)) 1504 break; 1505 } 1506 if (i==NATSEMI_HW_TIMEOUT) { 1507 printk(KERN_WARNING "natsemi %s: EEPROM did not reload in %d usec.\n", 1508 pci_name(np->pci_dev), i*50); 1509 } else if (netif_msg_hw(np)) { 1510 printk(KERN_DEBUG "natsemi %s: EEPROM reloaded in %d usec.\n", 1511 pci_name(np->pci_dev), i*50); 1512 } 1513} 1514 1515static void natsemi_stop_rxtx(struct net_device *dev) 1516{ 1517 void __iomem * ioaddr = ns_ioaddr(dev); 1518 struct netdev_private *np = netdev_priv(dev); 1519 int i; 1520 1521 writel(RxOff | TxOff, ioaddr + ChipCmd); 1522 for(i=0;i< NATSEMI_HW_TIMEOUT;i++) { 1523 if ((readl(ioaddr + ChipCmd) & (TxOn|RxOn)) == 0) 1524 break; 1525 udelay(5); 1526 } 1527 if (i==NATSEMI_HW_TIMEOUT) { 1528 printk(KERN_WARNING "%s: Tx/Rx process did not stop in %d usec.\n", 1529 dev->name, i*5); 1530 } else if (netif_msg_hw(np)) { 1531 printk(KERN_DEBUG "%s: Tx/Rx process stopped in %d usec.\n", 1532 dev->name, i*5); 1533 } 1534} 1535 1536static int netdev_open(struct net_device *dev) 1537{ 1538 struct netdev_private *np = netdev_priv(dev); 1539 void __iomem * ioaddr = ns_ioaddr(dev); 1540 int i; 1541 1542 /* Reset the chip, just in case. */ 1543 natsemi_reset(dev); 1544 1545 i = request_irq(dev->irq, &intr_handler, IRQF_SHARED, dev->name, dev); 1546 if (i) return i; 1547 1548 if (netif_msg_ifup(np)) 1549 printk(KERN_DEBUG "%s: netdev_open() irq %d.\n", 1550 dev->name, dev->irq); 1551 i = alloc_ring(dev); 1552 if (i < 0) { 1553 free_irq(dev->irq, dev); 1554 return i; 1555 } 1556 init_ring(dev); 1557 spin_lock_irq(&np->lock); 1558 init_registers(dev); 1559 /* now set the MAC address according to dev->dev_addr */ 1560 for (i = 0; i < 3; i++) { 1561 u16 mac = (dev->dev_addr[2*i+1]<<8) + dev->dev_addr[2*i]; 1562 1563 writel(i*2, ioaddr + RxFilterAddr); 1564 writew(mac, ioaddr + RxFilterData); 1565 } 1566 writel(np->cur_rx_mode, ioaddr + RxFilterAddr); 1567 spin_unlock_irq(&np->lock); 1568 1569 netif_start_queue(dev); 1570 1571 if (netif_msg_ifup(np)) 1572 printk(KERN_DEBUG "%s: Done netdev_open(), status: %#08x.\n", 1573 dev->name, (int)readl(ioaddr + ChipCmd)); 1574 1575 /* Set the timer to check for link beat. */ 1576 init_timer(&np->timer); 1577 np->timer.expires = jiffies + NATSEMI_TIMER_FREQ; 1578 np->timer.data = (unsigned long)dev; 1579 np->timer.function = &netdev_timer; /* timer handler */ 1580 add_timer(&np->timer); 1581 1582 return 0; 1583} 1584 1585static void do_cable_magic(struct net_device *dev) 1586{ 1587 struct netdev_private *np = netdev_priv(dev); 1588 void __iomem *ioaddr = ns_ioaddr(dev); 1589 1590 if (dev->if_port != PORT_TP) 1591 return; 1592 1593 if (np->srr >= SRR_DP83816_A5) 1594 return; 1595 1596 /* 1597 * 100 MBit links with short cables can trip an issue with the chip. 1598 * The problem manifests as lots of CRC errors and/or flickering 1599 * activity LED while idle. This process is based on instructions 1600 * from engineers at National. 1601 */ 1602 if (readl(ioaddr + ChipConfig) & CfgSpeed100) { 1603 u16 data; 1604 1605 writew(1, ioaddr + PGSEL); 1606 /* 1607 * coefficient visibility should already be enabled via 1608 * DSPCFG | 0x1000 1609 */ 1610 data = readw(ioaddr + TSTDAT) & 0xff; 1611 /* 1612 * the value must be negative, and within certain values 1613 * (these values all come from National) 1614 */ 1615 if (!(data & 0x80) || ((data >= 0xd8) && (data <= 0xff))) { 1616 struct netdev_private *np = netdev_priv(dev); 1617 1618 /* the bug has been triggered - fix the coefficient */ 1619 writew(TSTDAT_FIXED, ioaddr + TSTDAT); 1620 /* lock the value */ 1621 data = readw(ioaddr + DSPCFG); 1622 np->dspcfg = data | DSPCFG_LOCK; 1623 writew(np->dspcfg, ioaddr + DSPCFG); 1624 } 1625 writew(0, ioaddr + PGSEL); 1626 } 1627} 1628 1629static void undo_cable_magic(struct net_device *dev) 1630{ 1631 u16 data; 1632 struct netdev_private *np = netdev_priv(dev); 1633 void __iomem * ioaddr = ns_ioaddr(dev); 1634 1635 if (dev->if_port != PORT_TP) 1636 return; 1637 1638 if (np->srr >= SRR_DP83816_A5) 1639 return; 1640 1641 writew(1, ioaddr + PGSEL); 1642 /* make sure the lock bit is clear */ 1643 data = readw(ioaddr + DSPCFG); 1644 np->dspcfg = data & ~DSPCFG_LOCK; 1645 writew(np->dspcfg, ioaddr + DSPCFG); 1646 writew(0, ioaddr + PGSEL); 1647} 1648 1649static void check_link(struct net_device *dev) 1650{ 1651 struct netdev_private *np = netdev_priv(dev); 1652 void __iomem * ioaddr = ns_ioaddr(dev); 1653 int duplex = np->duplex; 1654 u16 bmsr; 1655 1656 /* If we are ignoring the PHY then don't try reading it. */ 1657 if (np->ignore_phy) 1658 goto propagate_state; 1659 1660 /* The link status field is latched: it remains low after a temporary 1661 * link failure until it's read. We need the current link status, 1662 * thus read twice. 1663 */ 1664 mdio_read(dev, MII_BMSR); 1665 bmsr = mdio_read(dev, MII_BMSR); 1666 1667 if (!(bmsr & BMSR_LSTATUS)) { 1668 if (netif_carrier_ok(dev)) { 1669 if (netif_msg_link(np)) 1670 printk(KERN_NOTICE "%s: link down.\n", 1671 dev->name); 1672 netif_carrier_off(dev); 1673 undo_cable_magic(dev); 1674 } 1675 return; 1676 } 1677 if (!netif_carrier_ok(dev)) { 1678 if (netif_msg_link(np)) 1679 printk(KERN_NOTICE "%s: link up.\n", dev->name); 1680 netif_carrier_on(dev); 1681 do_cable_magic(dev); 1682 } 1683 1684 duplex = np->full_duplex; 1685 if (!duplex) { 1686 if (bmsr & BMSR_ANEGCOMPLETE) { 1687 int tmp = mii_nway_result( 1688 np->advertising & mdio_read(dev, MII_LPA)); 1689 if (tmp == LPA_100FULL || tmp == LPA_10FULL) 1690 duplex = 1; 1691 } else if (mdio_read(dev, MII_BMCR) & BMCR_FULLDPLX) 1692 duplex = 1; 1693 } 1694 1695propagate_state: 1696 /* if duplex is set then bit 28 must be set, too */ 1697 if (duplex ^ !!(np->rx_config & RxAcceptTx)) { 1698 if (netif_msg_link(np)) 1699 printk(KERN_INFO 1700 "%s: Setting %s-duplex based on negotiated " 1701 "link capability.\n", dev->name, 1702 duplex ? "full" : "half"); 1703 if (duplex) { 1704 np->rx_config |= RxAcceptTx; 1705 np->tx_config |= TxCarrierIgn | TxHeartIgn; 1706 } else { 1707 np->rx_config &= ~RxAcceptTx; 1708 np->tx_config &= ~(TxCarrierIgn | TxHeartIgn); 1709 } 1710 writel(np->tx_config, ioaddr + TxConfig); 1711 writel(np->rx_config, ioaddr + RxConfig); 1712 } 1713} 1714 1715static void init_registers(struct net_device *dev) 1716{ 1717 struct netdev_private *np = netdev_priv(dev); 1718 void __iomem * ioaddr = ns_ioaddr(dev); 1719 1720 init_phy_fixup(dev); 1721 1722 /* clear any interrupts that are pending, such as wake events */ 1723 readl(ioaddr + IntrStatus); 1724 1725 writel(np->ring_dma, ioaddr + RxRingPtr); 1726 writel(np->ring_dma + RX_RING_SIZE * sizeof(struct netdev_desc), 1727 ioaddr + TxRingPtr); 1728 1729 /* Initialize other registers. 1730 * Configure the PCI bus bursts and FIFO thresholds. 1731 * Configure for standard, in-spec Ethernet. 1732 * Start with half-duplex. check_link will update 1733 * to the correct settings. 1734 */ 1735 1736 /* DRTH: 2: start tx if 64 bytes are in the fifo 1737 * FLTH: 0x10: refill with next packet if 512 bytes are free 1738 * MXDMA: 0: up to 256 byte bursts. 1739 * MXDMA must be <= FLTH 1740 * ECRETRY=1 1741 * ATP=1 1742 */ 1743 np->tx_config = TxAutoPad | TxCollRetry | TxMxdma_256 | 1744 TX_FLTH_VAL | TX_DRTH_VAL_START; 1745 writel(np->tx_config, ioaddr + TxConfig); 1746 1747 /* DRTH 0x10: start copying to memory if 128 bytes are in the fifo 1748 * MXDMA 0: up to 256 byte bursts 1749 */ 1750 np->rx_config = RxMxdma_256 | RX_DRTH_VAL; 1751 /* if receive ring now has bigger buffers than normal, enable jumbo */ 1752 if (np->rx_buf_sz > NATSEMI_LONGPKT) 1753 np->rx_config |= RxAcceptLong; 1754 1755 writel(np->rx_config, ioaddr + RxConfig); 1756 1757 /* Disable PME: 1758 * The PME bit is initialized from the EEPROM contents. 1759 * PCI cards probably have PME disabled, but motherboard 1760 * implementations may have PME set to enable WakeOnLan. 1761 * With PME set the chip will scan incoming packets but 1762 * nothing will be written to memory. */ 1763 np->SavedClkRun = readl(ioaddr + ClkRun); 1764 writel(np->SavedClkRun & ~PMEEnable, ioaddr + ClkRun); 1765 if (np->SavedClkRun & PMEStatus && netif_msg_wol(np)) { 1766 printk(KERN_NOTICE "%s: Wake-up event %#08x\n", 1767 dev->name, readl(ioaddr + WOLCmd)); 1768 } 1769 1770 check_link(dev); 1771 __set_rx_mode(dev); 1772 1773 /* Enable interrupts by setting the interrupt mask. */ 1774 writel(DEFAULT_INTR, ioaddr + IntrMask); 1775 natsemi_irq_enable(dev); 1776 1777 writel(RxOn | TxOn, ioaddr + ChipCmd); 1778 writel(StatsClear, ioaddr + StatsCtrl); /* Clear Stats */ 1779} 1780 1781/* 1782 * netdev_timer: 1783 * Purpose: 1784 * 1) check for link changes. Usually they are handled by the MII interrupt 1785 * but it doesn't hurt to check twice. 1786 * 2) check for sudden death of the NIC: 1787 * It seems that a reference set for this chip went out with incorrect info, 1788 * and there exist boards that aren't quite right. An unexpected voltage 1789 * drop can cause the PHY to get itself in a weird state (basically reset). 1790 * NOTE: this only seems to affect revC chips. The user can disable 1791 * this check via dspcfg_workaround sysfs option. 1792 * 3) check of death of the RX path due to OOM 1793 */ 1794static void netdev_timer(unsigned long data) 1795{ 1796 struct net_device *dev = (struct net_device *)data; 1797 struct netdev_private *np = netdev_priv(dev); 1798 void __iomem * ioaddr = ns_ioaddr(dev); 1799 int next_tick = 5*HZ; 1800 1801 if (netif_msg_timer(np)) { 1802 /* DO NOT read the IntrStatus register, 1803 * a read clears any pending interrupts. 1804 */ 1805 printk(KERN_DEBUG "%s: Media selection timer tick.\n", 1806 dev->name); 1807 } 1808 1809 if (dev->if_port == PORT_TP) { 1810 u16 dspcfg; 1811 1812 spin_lock_irq(&np->lock); 1813 /* check for a nasty random phy-reset - use dspcfg as a flag */ 1814 writew(1, ioaddr+PGSEL); 1815 dspcfg = readw(ioaddr+DSPCFG); 1816 writew(0, ioaddr+PGSEL); 1817 if (np->dspcfg_workaround && dspcfg != np->dspcfg) { 1818 if (!netif_queue_stopped(dev)) { 1819 spin_unlock_irq(&np->lock); 1820 if (netif_msg_drv(np)) 1821 printk(KERN_NOTICE "%s: possible phy reset: " 1822 "re-initializing\n", dev->name); 1823 disable_irq(dev->irq); 1824 spin_lock_irq(&np->lock); 1825 natsemi_stop_rxtx(dev); 1826 dump_ring(dev); 1827 reinit_ring(dev); 1828 init_registers(dev); 1829 spin_unlock_irq(&np->lock); 1830 enable_irq(dev->irq); 1831 } else { 1832 /* hurry back */ 1833 next_tick = HZ; 1834 spin_unlock_irq(&np->lock); 1835 } 1836 } else { 1837 /* init_registers() calls check_link() for the above case */ 1838 check_link(dev); 1839 spin_unlock_irq(&np->lock); 1840 } 1841 } else { 1842 spin_lock_irq(&np->lock); 1843 check_link(dev); 1844 spin_unlock_irq(&np->lock); 1845 } 1846 if (np->oom) { 1847 disable_irq(dev->irq); 1848 np->oom = 0; 1849 refill_rx(dev); 1850 enable_irq(dev->irq); 1851 if (!np->oom) { 1852 writel(RxOn, ioaddr + ChipCmd); 1853 } else { 1854 next_tick = 1; 1855 } 1856 } 1857 mod_timer(&np->timer, jiffies + next_tick); 1858} 1859 1860static void dump_ring(struct net_device *dev) 1861{ 1862 struct netdev_private *np = netdev_priv(dev); 1863 1864 if (netif_msg_pktdata(np)) { 1865 int i; 1866 printk(KERN_DEBUG " Tx ring at %p:\n", np->tx_ring); 1867 for (i = 0; i < TX_RING_SIZE; i++) { 1868 printk(KERN_DEBUG " #%d desc. %#08x %#08x %#08x.\n", 1869 i, np->tx_ring[i].next_desc, 1870 np->tx_ring[i].cmd_status, 1871 np->tx_ring[i].addr); 1872 } 1873 printk(KERN_DEBUG " Rx ring %p:\n", np->rx_ring); 1874 for (i = 0; i < RX_RING_SIZE; i++) { 1875 printk(KERN_DEBUG " #%d desc. %#08x %#08x %#08x.\n", 1876 i, np->rx_ring[i].next_desc, 1877 np->rx_ring[i].cmd_status, 1878 np->rx_ring[i].addr); 1879 } 1880 } 1881} 1882 1883static void tx_timeout(struct net_device *dev) 1884{ 1885 struct netdev_private *np = netdev_priv(dev); 1886 void __iomem * ioaddr = ns_ioaddr(dev); 1887 1888 disable_irq(dev->irq); 1889 spin_lock_irq(&np->lock); 1890 if (!np->hands_off) { 1891 if (netif_msg_tx_err(np)) 1892 printk(KERN_WARNING 1893 "%s: Transmit timed out, status %#08x," 1894 " resetting...\n", 1895 dev->name, readl(ioaddr + IntrStatus)); 1896 dump_ring(dev); 1897 1898 natsemi_reset(dev); 1899 reinit_ring(dev); 1900 init_registers(dev); 1901 } else { 1902 printk(KERN_WARNING 1903 "%s: tx_timeout while in hands_off state?\n", 1904 dev->name); 1905 } 1906 spin_unlock_irq(&np->lock); 1907 enable_irq(dev->irq); 1908 1909 dev->trans_start = jiffies; 1910 np->stats.tx_errors++; 1911 netif_wake_queue(dev); 1912} 1913 1914static int alloc_ring(struct net_device *dev) 1915{ 1916 struct netdev_private *np = netdev_priv(dev); 1917 np->rx_ring = pci_alloc_consistent(np->pci_dev, 1918 sizeof(struct netdev_desc) * (RX_RING_SIZE+TX_RING_SIZE), 1919 &np->ring_dma); 1920 if (!np->rx_ring) 1921 return -ENOMEM; 1922 np->tx_ring = &np->rx_ring[RX_RING_SIZE]; 1923 return 0; 1924} 1925 1926static void refill_rx(struct net_device *dev) 1927{ 1928 struct netdev_private *np = netdev_priv(dev); 1929 1930 /* Refill the Rx ring buffers. */ 1931 for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) { 1932 struct sk_buff *skb; 1933 int entry = np->dirty_rx % RX_RING_SIZE; 1934 if (np->rx_skbuff[entry] == NULL) { 1935 unsigned int buflen = np->rx_buf_sz+NATSEMI_PADDING; 1936 skb = dev_alloc_skb(buflen); 1937 np->rx_skbuff[entry] = skb; 1938 if (skb == NULL) 1939 break; /* Better luck next round. */ 1940 skb->dev = dev; /* Mark as being used by this device. */ 1941 np->rx_dma[entry] = pci_map_single(np->pci_dev, 1942 skb->data, buflen, PCI_DMA_FROMDEVICE); 1943 np->rx_ring[entry].addr = cpu_to_le32(np->rx_dma[entry]); 1944 } 1945 np->rx_ring[entry].cmd_status = cpu_to_le32(np->rx_buf_sz); 1946 } 1947 if (np->cur_rx - np->dirty_rx == RX_RING_SIZE) { 1948 if (netif_msg_rx_err(np)) 1949 printk(KERN_WARNING "%s: going OOM.\n", dev->name); 1950 np->oom = 1; 1951 } 1952} 1953 1954static void set_bufsize(struct net_device *dev) 1955{ 1956 struct netdev_private *np = netdev_priv(dev); 1957 if (dev->mtu <= ETH_DATA_LEN) 1958 np->rx_buf_sz = ETH_DATA_LEN + NATSEMI_HEADERS; 1959 else 1960 np->rx_buf_sz = dev->mtu + NATSEMI_HEADERS; 1961} 1962 1963/* Initialize the Rx and Tx rings, along with various 'dev' bits. */ 1964static void init_ring(struct net_device *dev) 1965{ 1966 struct netdev_private *np = netdev_priv(dev); 1967 int i; 1968 1969 /* 1) TX ring */ 1970 np->dirty_tx = np->cur_tx = 0; 1971 for (i = 0; i < TX_RING_SIZE; i++) { 1972 np->tx_skbuff[i] = NULL; 1973 np->tx_ring[i].next_desc = cpu_to_le32(np->ring_dma 1974 +sizeof(struct netdev_desc) 1975 *((i+1)%TX_RING_SIZE+RX_RING_SIZE)); 1976 np->tx_ring[i].cmd_status = 0; 1977 } 1978 1979 /* 2) RX ring */ 1980 np->dirty_rx = 0; 1981 np->cur_rx = RX_RING_SIZE; 1982 np->oom = 0; 1983 set_bufsize(dev); 1984 1985 np->rx_head_desc = &np->rx_ring[0]; 1986 1987 /* Please be carefull before changing this loop - at least gcc-2.95.1 1988 * miscompiles it otherwise. 1989 */ 1990 /* Initialize all Rx descriptors. */ 1991 for (i = 0; i < RX_RING_SIZE; i++) { 1992 np->rx_ring[i].next_desc = cpu_to_le32(np->ring_dma 1993 +sizeof(struct netdev_desc) 1994 *((i+1)%RX_RING_SIZE)); 1995 np->rx_ring[i].cmd_status = cpu_to_le32(DescOwn); 1996 np->rx_skbuff[i] = NULL; 1997 } 1998 refill_rx(dev); 1999 dump_ring(dev); 2000} 2001 2002static void drain_tx(struct net_device *dev) 2003{ 2004 struct netdev_private *np = netdev_priv(dev); 2005 int i; 2006 2007 for (i = 0; i < TX_RING_SIZE; i++) { 2008 if (np->tx_skbuff[i]) { 2009 pci_unmap_single(np->pci_dev, 2010 np->tx_dma[i], np->tx_skbuff[i]->len, 2011 PCI_DMA_TODEVICE); 2012 dev_kfree_skb(np->tx_skbuff[i]); 2013 np->stats.tx_dropped++; 2014 } 2015 np->tx_skbuff[i] = NULL; 2016 } 2017} 2018 2019static void drain_rx(struct net_device *dev) 2020{ 2021 struct netdev_private *np = netdev_priv(dev); 2022 unsigned int buflen = np->rx_buf_sz; 2023 int i; 2024 2025 /* Free all the skbuffs in the Rx queue. */ 2026 for (i = 0; i < RX_RING_SIZE; i++) { 2027 np->rx_ring[i].cmd_status = 0; 2028 np->rx_ring[i].addr = 0xBADF00D0; /* An invalid address. */ 2029 if (np->rx_skbuff[i]) { 2030 pci_unmap_single(np->pci_dev, 2031 np->rx_dma[i], buflen, 2032 PCI_DMA_FROMDEVICE); 2033 dev_kfree_skb(np->rx_skbuff[i]); 2034 } 2035 np->rx_skbuff[i] = NULL; 2036 } 2037} 2038 2039static void drain_ring(struct net_device *dev) 2040{ 2041 drain_rx(dev); 2042 drain_tx(dev); 2043} 2044 2045static void free_ring(struct net_device *dev) 2046{ 2047 struct netdev_private *np = netdev_priv(dev); 2048 pci_free_consistent(np->pci_dev, 2049 sizeof(struct netdev_desc) * (RX_RING_SIZE+TX_RING_SIZE), 2050 np->rx_ring, np->ring_dma); 2051} 2052 2053static void reinit_rx(struct net_device *dev) 2054{ 2055 struct netdev_private *np = netdev_priv(dev); 2056 int i; 2057 2058 /* RX Ring */ 2059 np->dirty_rx = 0; 2060 np->cur_rx = RX_RING_SIZE; 2061 np->rx_head_desc = &np->rx_ring[0]; 2062 /* Initialize all Rx descriptors. */ 2063 for (i = 0; i < RX_RING_SIZE; i++) 2064 np->rx_ring[i].cmd_status = cpu_to_le32(DescOwn); 2065 2066 refill_rx(dev); 2067} 2068 2069static void reinit_ring(struct net_device *dev) 2070{ 2071 struct netdev_private *np = netdev_priv(dev); 2072 int i; 2073 2074 /* drain TX ring */ 2075 drain_tx(dev); 2076 np->dirty_tx = np->cur_tx = 0; 2077 for (i=0;i<TX_RING_SIZE;i++) 2078 np->tx_ring[i].cmd_status = 0; 2079 2080 reinit_rx(dev); 2081} 2082 2083static int start_tx(struct sk_buff *skb, struct net_device *dev) 2084{ 2085 struct netdev_private *np = netdev_priv(dev); 2086 void __iomem * ioaddr = ns_ioaddr(dev); 2087 unsigned entry; 2088 unsigned long flags; 2089 2090 /* Note: Ordering is important here, set the field with the 2091 "ownership" bit last, and only then increment cur_tx. */ 2092 2093 /* Calculate the next Tx descriptor entry. */ 2094 entry = np->cur_tx % TX_RING_SIZE; 2095 2096 np->tx_skbuff[entry] = skb; 2097 np->tx_dma[entry] = pci_map_single(np->pci_dev, 2098 skb->data,skb->len, PCI_DMA_TODEVICE); 2099 2100 np->tx_ring[entry].addr = cpu_to_le32(np->tx_dma[entry]); 2101 2102 spin_lock_irqsave(&np->lock, flags); 2103 2104 if (!np->hands_off) { 2105 np->tx_ring[entry].cmd_status = cpu_to_le32(DescOwn | skb->len); 2106 /* StrongARM: Explicitly cache flush np->tx_ring and 2107 * skb->data,skb->len. */ 2108 wmb(); 2109 np->cur_tx++; 2110 if (np->cur_tx - np->dirty_tx >= TX_QUEUE_LEN - 1) { 2111 netdev_tx_done(dev); 2112 if (np->cur_tx - np->dirty_tx >= TX_QUEUE_LEN - 1) 2113 netif_stop_queue(dev); 2114 } 2115 /* Wake the potentially-idle transmit channel. */ 2116 writel(TxOn, ioaddr + ChipCmd); 2117 } else { 2118 dev_kfree_skb_irq(skb); 2119 np->stats.tx_dropped++; 2120 } 2121 spin_unlock_irqrestore(&np->lock, flags); 2122 2123 dev->trans_start = jiffies; 2124 2125 if (netif_msg_tx_queued(np)) { 2126 printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d.\n", 2127 dev->name, np->cur_tx, entry); 2128 } 2129 return 0; 2130} 2131 2132static void netdev_tx_done(struct net_device *dev) 2133{ 2134 struct netdev_private *np = netdev_priv(dev); 2135 2136 for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) { 2137 int entry = np->dirty_tx % TX_RING_SIZE; 2138 if (np->tx_ring[entry].cmd_status & cpu_to_le32(DescOwn)) 2139 break; 2140 if (netif_msg_tx_done(np)) 2141 printk(KERN_DEBUG 2142 "%s: tx frame #%d finished, status %#08x.\n", 2143 dev->name, np->dirty_tx, 2144 le32_to_cpu(np->tx_ring[entry].cmd_status)); 2145 if (np->tx_ring[entry].cmd_status & cpu_to_le32(DescPktOK)) { 2146 np->stats.tx_packets++; 2147 np->stats.tx_bytes += np->tx_skbuff[entry]->len; 2148 } else { /* Various Tx errors */ 2149 int tx_status = 2150 le32_to_cpu(np->tx_ring[entry].cmd_status); 2151 if (tx_status & (DescTxAbort|DescTxExcColl)) 2152 np->stats.tx_aborted_errors++; 2153 if (tx_status & DescTxFIFO) 2154 np->stats.tx_fifo_errors++; 2155 if (tx_status & DescTxCarrier) 2156 np->stats.tx_carrier_errors++; 2157 if (tx_status & DescTxOOWCol) 2158 np->stats.tx_window_errors++; 2159 np->stats.tx_errors++; 2160 } 2161 pci_unmap_single(np->pci_dev,np->tx_dma[entry], 2162 np->tx_skbuff[entry]->len, 2163 PCI_DMA_TODEVICE); 2164 /* Free the original skb. */ 2165 dev_kfree_skb_irq(np->tx_skbuff[entry]); 2166 np->tx_skbuff[entry] = NULL; 2167 } 2168 if (netif_queue_stopped(dev) 2169 && np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) { 2170 /* The ring is no longer full, wake queue. */ 2171 netif_wake_queue(dev); 2172 } 2173} 2174 2175/* The interrupt handler doesn't actually handle interrupts itself, it 2176 * schedules a NAPI poll if there is anything to do. */ 2177static irqreturn_t intr_handler(int irq, void *dev_instance) 2178{ 2179 struct net_device *dev = dev_instance; 2180 struct netdev_private *np = netdev_priv(dev); 2181 void __iomem * ioaddr = ns_ioaddr(dev); 2182 2183 /* Reading IntrStatus automatically acknowledges so don't do 2184 * that while interrupts are disabled, (for example, while a 2185 * poll is scheduled). */ 2186 if (np->hands_off || !readl(ioaddr + IntrEnable)) 2187 return IRQ_NONE; 2188 2189 np->intr_status = readl(ioaddr + IntrStatus); 2190 2191 if (!np->intr_status) 2192 return IRQ_NONE; 2193 2194 if (netif_msg_intr(np)) 2195 printk(KERN_DEBUG 2196 "%s: Interrupt, status %#08x, mask %#08x.\n", 2197 dev->name, np->intr_status, 2198 readl(ioaddr + IntrMask)); 2199 2200 prefetch(&np->rx_skbuff[np->cur_rx % RX_RING_SIZE]); 2201 2202 if (netif_rx_schedule_prep(dev)) { 2203 /* Disable interrupts and register for poll */ 2204 natsemi_irq_disable(dev); 2205 __netif_rx_schedule(dev); 2206 } else 2207 printk(KERN_WARNING 2208 "%s: Ignoring interrupt, status %#08x, mask %#08x.\n", 2209 dev->name, np->intr_status, 2210 readl(ioaddr + IntrMask)); 2211 2212 return IRQ_HANDLED; 2213} 2214 2215/* This is the NAPI poll routine. As well as the standard RX handling 2216 * it also handles all other interrupts that the chip might raise. 2217 */ 2218static int natsemi_poll(struct net_device *dev, int *budget) 2219{ 2220 struct netdev_private *np = netdev_priv(dev); 2221 void __iomem * ioaddr = ns_ioaddr(dev); 2222 2223 int work_to_do = min(*budget, dev->quota); 2224 int work_done = 0; 2225 2226 do { 2227 if (netif_msg_intr(np)) 2228 printk(KERN_DEBUG 2229 "%s: Poll, status %#08x, mask %#08x.\n", 2230 dev->name, np->intr_status, 2231 readl(ioaddr + IntrMask)); 2232 2233 /* netdev_rx() may read IntrStatus again if the RX state 2234 * machine falls over so do it first. */ 2235 if (np->intr_status & 2236 (IntrRxDone | IntrRxIntr | RxStatusFIFOOver | 2237 IntrRxErr | IntrRxOverrun)) { 2238 netdev_rx(dev, &work_done, work_to_do); 2239 } 2240 2241 if (np->intr_status & 2242 (IntrTxDone | IntrTxIntr | IntrTxIdle | IntrTxErr)) { 2243 spin_lock(&np->lock); 2244 netdev_tx_done(dev); 2245 spin_unlock(&np->lock); 2246 } 2247 2248 /* Abnormal error summary/uncommon events handlers. */ 2249 if (np->intr_status & IntrAbnormalSummary) 2250 netdev_error(dev, np->intr_status); 2251 2252 *budget -= work_done; 2253 dev->quota -= work_done; 2254 2255 if (work_done >= work_to_do) 2256 return 1; 2257 2258 np->intr_status = readl(ioaddr + IntrStatus); 2259 } while (np->intr_status); 2260 2261 netif_rx_complete(dev); 2262 2263 /* Reenable interrupts providing nothing is trying to shut 2264 * the chip down. */ 2265 spin_lock(&np->lock); 2266 if (!np->hands_off && netif_running(dev)) 2267 natsemi_irq_enable(dev); 2268 spin_unlock(&np->lock); 2269 2270 return 0; 2271} 2272 2273/* This routine is logically part of the interrupt handler, but separated 2274 for clarity and better register allocation. */ 2275static void netdev_rx(struct net_device *dev, int *work_done, int work_to_do) 2276{ 2277 struct netdev_private *np = netdev_priv(dev); 2278 int entry = np->cur_rx % RX_RING_SIZE; 2279 int boguscnt = np->dirty_rx + RX_RING_SIZE - np->cur_rx; 2280 s32 desc_status = le32_to_cpu(np->rx_head_desc->cmd_status); 2281 unsigned int buflen = np->rx_buf_sz; 2282 void __iomem * ioaddr = ns_ioaddr(dev); 2283 2284 /* If the driver owns the next entry it's a new packet. Send it up. */ 2285 while (desc_status < 0) { /* e.g. & DescOwn */ 2286 int pkt_len; 2287 if (netif_msg_rx_status(np)) 2288 printk(KERN_DEBUG 2289 " netdev_rx() entry %d status was %#08x.\n", 2290 entry, desc_status); 2291 if (--boguscnt < 0) 2292 break; 2293 2294 if (*work_done >= work_to_do) 2295 break; 2296 2297 (*work_done)++; 2298 2299 pkt_len = (desc_status & DescSizeMask) - 4; 2300 if ((desc_status&(DescMore|DescPktOK|DescRxLong)) != DescPktOK){ 2301 if (desc_status & DescMore) { 2302 unsigned long flags; 2303 2304 if (netif_msg_rx_err(np)) 2305 printk(KERN_WARNING 2306 "%s: Oversized(?) Ethernet " 2307 "frame spanned multiple " 2308 "buffers, entry %#08x " 2309 "status %#08x.\n", dev->name, 2310 np->cur_rx, desc_status); 2311 np->stats.rx_length_errors++; 2312 2313 /* The RX state machine has probably 2314 * locked up beneath us. Follow the 2315 * reset procedure documented in 2316 * AN-1287. */ 2317 2318 spin_lock_irqsave(&np->lock, flags); 2319 reset_rx(dev); 2320 reinit_rx(dev); 2321 writel(np->ring_dma, ioaddr + RxRingPtr); 2322 check_link(dev); 2323 spin_unlock_irqrestore(&np->lock, flags); 2324 2325 /* We'll enable RX on exit from this 2326 * function. */ 2327 break; 2328 2329 } else { 2330 /* There was an error. */ 2331 np->stats.rx_errors++; 2332 if (desc_status & (DescRxAbort|DescRxOver)) 2333 np->stats.rx_over_errors++; 2334 if (desc_status & (DescRxLong|DescRxRunt)) 2335 np->stats.rx_length_errors++; 2336 if (desc_status & (DescRxInvalid|DescRxAlign)) 2337 np->stats.rx_frame_errors++; 2338 if (desc_status & DescRxCRC) 2339 np->stats.rx_crc_errors++; 2340 } 2341 } else if (pkt_len > np->rx_buf_sz) { 2342 /* if this is the tail of a double buffer 2343 * packet, we've already counted the error 2344 * on the first part. Ignore the second half. 2345 */ 2346 } else { 2347 struct sk_buff *skb; 2348 /* Omit CRC size. */ 2349 /* Check if the packet is long enough to accept 2350 * without copying to a minimally-sized skbuff. */ 2351 if (pkt_len < rx_copybreak 2352 && (skb = dev_alloc_skb(pkt_len + RX_OFFSET)) != NULL) { 2353 /* 16 byte align the IP header */ 2354 skb_reserve(skb, RX_OFFSET); 2355 pci_dma_sync_single_for_cpu(np->pci_dev, 2356 np->rx_dma[entry], 2357 buflen, 2358 PCI_DMA_FROMDEVICE); 2359 eth_copy_and_sum(skb, 2360 np->rx_skbuff[entry]->data, pkt_len, 0); 2361 skb_put(skb, pkt_len); 2362 pci_dma_sync_single_for_device(np->pci_dev, 2363 np->rx_dma[entry], 2364 buflen, 2365 PCI_DMA_FROMDEVICE); 2366 } else { 2367 pci_unmap_single(np->pci_dev, np->rx_dma[entry], 2368 buflen, PCI_DMA_FROMDEVICE); 2369 skb_put(skb = np->rx_skbuff[entry], pkt_len); 2370 np->rx_skbuff[entry] = NULL; 2371 } 2372 skb->protocol = eth_type_trans(skb, dev); 2373 netif_receive_skb(skb); 2374 dev->last_rx = jiffies; 2375 np->stats.rx_packets++; 2376 np->stats.rx_bytes += pkt_len; 2377 } 2378 entry = (++np->cur_rx) % RX_RING_SIZE; 2379 np->rx_head_desc = &np->rx_ring[entry]; 2380 desc_status = le32_to_cpu(np->rx_head_desc->cmd_status); 2381 } 2382 refill_rx(dev); 2383 2384 /* Restart Rx engine if stopped. */ 2385 if (np->oom) 2386 mod_timer(&np->timer, jiffies + 1); 2387 else 2388 writel(RxOn, ioaddr + ChipCmd); 2389} 2390 2391static void netdev_error(struct net_device *dev, int intr_status) 2392{ 2393 struct netdev_private *np = netdev_priv(dev); 2394 void __iomem * ioaddr = ns_ioaddr(dev); 2395 2396 spin_lock(&np->lock); 2397 if (intr_status & LinkChange) { 2398 u16 lpa = mdio_read(dev, MII_LPA); 2399 if (mdio_read(dev, MII_BMCR) & BMCR_ANENABLE 2400 && netif_msg_link(np)) { 2401 printk(KERN_INFO 2402 "%s: Autonegotiation advertising" 2403 " %#04x partner %#04x.\n", dev->name, 2404 np->advertising, lpa); 2405 } 2406 2407 /* read MII int status to clear the flag */ 2408 readw(ioaddr + MIntrStatus); 2409 check_link(dev); 2410 } 2411 if (intr_status & StatsMax) { 2412 __get_stats(dev); 2413 } 2414 if (intr_status & IntrTxUnderrun) { 2415 if ((np->tx_config & TxDrthMask) < TX_DRTH_VAL_LIMIT) { 2416 np->tx_config += TX_DRTH_VAL_INC; 2417 if (netif_msg_tx_err(np)) 2418 printk(KERN_NOTICE 2419 "%s: increased tx threshold, txcfg %#08x.\n", 2420 dev->name, np->tx_config); 2421 } else { 2422 if (netif_msg_tx_err(np)) 2423 printk(KERN_NOTICE 2424 "%s: tx underrun with maximum tx threshold, txcfg %#08x.\n", 2425 dev->name, np->tx_config); 2426 } 2427 writel(np->tx_config, ioaddr + TxConfig); 2428 } 2429 if (intr_status & WOLPkt && netif_msg_wol(np)) { 2430 int wol_status = readl(ioaddr + WOLCmd); 2431 printk(KERN_NOTICE "%s: Link wake-up event %#08x\n", 2432 dev->name, wol_status); 2433 } 2434 if (intr_status & RxStatusFIFOOver) { 2435 if (netif_msg_rx_err(np) && netif_msg_intr(np)) { 2436 printk(KERN_NOTICE "%s: Rx status FIFO overrun\n", 2437 dev->name); 2438 } 2439 np->stats.rx_fifo_errors++; 2440 } 2441 /* Hmmmmm, it's not clear how to recover from PCI faults. */ 2442 if (intr_status & IntrPCIErr) { 2443 printk(KERN_NOTICE "%s: PCI error %#08x\n", dev->name, 2444 intr_status & IntrPCIErr); 2445 np->stats.tx_fifo_errors++; 2446 np->stats.rx_fifo_errors++; 2447 } 2448 spin_unlock(&np->lock); 2449} 2450 2451static void __get_stats(struct net_device *dev) 2452{ 2453 void __iomem * ioaddr = ns_ioaddr(dev); 2454 struct netdev_private *np = netdev_priv(dev); 2455 2456 /* The chip only need report frame silently dropped. */ 2457 np->stats.rx_crc_errors += readl(ioaddr + RxCRCErrs); 2458 np->stats.rx_missed_errors += readl(ioaddr + RxMissed); 2459} 2460 2461static struct net_device_stats *get_stats(struct net_device *dev) 2462{ 2463 struct netdev_private *np = netdev_priv(dev); 2464 2465 /* The chip only need report frame silently dropped. */ 2466 spin_lock_irq(&np->lock); 2467 if (netif_running(dev) && !np->hands_off) 2468 __get_stats(dev); 2469 spin_unlock_irq(&np->lock); 2470 2471 return &np->stats; 2472} 2473 2474#ifdef CONFIG_NET_POLL_CONTROLLER 2475static void natsemi_poll_controller(struct net_device *dev) 2476{ 2477 disable_irq(dev->irq); 2478 intr_handler(dev->irq, dev); 2479 enable_irq(dev->irq); 2480} 2481#endif 2482 2483#define HASH_TABLE 0x200 2484static void __set_rx_mode(struct net_device *dev) 2485{ 2486 void __iomem * ioaddr = ns_ioaddr(dev); 2487 struct netdev_private *np = netdev_priv(dev); 2488 u8 mc_filter[64]; /* Multicast hash filter */ 2489 u32 rx_mode; 2490 2491 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */ 2492 rx_mode = RxFilterEnable | AcceptBroadcast 2493 | AcceptAllMulticast | AcceptAllPhys | AcceptMyPhys; 2494 } else if ((dev->mc_count > multicast_filter_limit) 2495 || (dev->flags & IFF_ALLMULTI)) { 2496 rx_mode = RxFilterEnable | AcceptBroadcast 2497 | AcceptAllMulticast | AcceptMyPhys; 2498 } else { 2499 struct dev_mc_list *mclist; 2500 int i; 2501 memset(mc_filter, 0, sizeof(mc_filter)); 2502 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count; 2503 i++, mclist = mclist->next) { 2504 int i = (ether_crc(ETH_ALEN, mclist->dmi_addr) >> 23) & 0x1ff; 2505 mc_filter[i/8] |= (1 << (i & 0x07)); 2506 } 2507 rx_mode = RxFilterEnable | AcceptBroadcast 2508 | AcceptMulticast | AcceptMyPhys; 2509 for (i = 0; i < 64; i += 2) { 2510 writel(HASH_TABLE + i, ioaddr + RxFilterAddr); 2511 writel((mc_filter[i + 1] << 8) + mc_filter[i], 2512 ioaddr + RxFilterData); 2513 } 2514 } 2515 writel(rx_mode, ioaddr + RxFilterAddr); 2516 np->cur_rx_mode = rx_mode; 2517} 2518 2519static int natsemi_change_mtu(struct net_device *dev, int new_mtu) 2520{ 2521 if (new_mtu < 64 || new_mtu > NATSEMI_RX_LIMIT-NATSEMI_HEADERS) 2522 return -EINVAL; 2523 2524 dev->mtu = new_mtu; 2525 2526 /* synchronized against open : rtnl_lock() held by caller */ 2527 if (netif_running(dev)) { 2528 struct netdev_private *np = netdev_priv(dev); 2529 void __iomem * ioaddr = ns_ioaddr(dev); 2530 2531 disable_irq(dev->irq); 2532 spin_lock(&np->lock); 2533 /* stop engines */ 2534 natsemi_stop_rxtx(dev); 2535 /* drain rx queue */ 2536 drain_rx(dev); 2537 /* change buffers */ 2538 set_bufsize(dev); 2539 reinit_rx(dev); 2540 writel(np->ring_dma, ioaddr + RxRingPtr); 2541 /* restart engines */ 2542 writel(RxOn | TxOn, ioaddr + ChipCmd); 2543 spin_unlock(&np->lock); 2544 enable_irq(dev->irq); 2545 } 2546 return 0; 2547} 2548 2549static void set_rx_mode(struct net_device *dev) 2550{ 2551 struct netdev_private *np = netdev_priv(dev); 2552 spin_lock_irq(&np->lock); 2553 if (!np->hands_off) 2554 __set_rx_mode(dev); 2555 spin_unlock_irq(&np->lock); 2556} 2557 2558static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) 2559{ 2560 struct netdev_private *np = netdev_priv(dev); 2561 strncpy(info->driver, DRV_NAME, ETHTOOL_BUSINFO_LEN); 2562 strncpy(info->version, DRV_VERSION, ETHTOOL_BUSINFO_LEN); 2563 strncpy(info->bus_info, pci_name(np->pci_dev), ETHTOOL_BUSINFO_LEN); 2564} 2565 2566static int get_regs_len(struct net_device *dev) 2567{ 2568 return NATSEMI_REGS_SIZE; 2569} 2570 2571static int get_eeprom_len(struct net_device *dev) 2572{ 2573 struct netdev_private *np = netdev_priv(dev); 2574 return np->eeprom_size; 2575} 2576 2577static int get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) 2578{ 2579 struct netdev_private *np = netdev_priv(dev); 2580 spin_lock_irq(&np->lock); 2581 netdev_get_ecmd(dev, ecmd); 2582 spin_unlock_irq(&np->lock); 2583 return 0; 2584} 2585 2586static int set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) 2587{ 2588 struct netdev_private *np = netdev_priv(dev); 2589 int res; 2590 spin_lock_irq(&np->lock); 2591 res = netdev_set_ecmd(dev, ecmd); 2592 spin_unlock_irq(&np->lock); 2593 return res; 2594} 2595 2596static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 2597{ 2598 struct netdev_private *np = netdev_priv(dev); 2599 spin_lock_irq(&np->lock); 2600 netdev_get_wol(dev, &wol->supported, &wol->wolopts); 2601 netdev_get_sopass(dev, wol->sopass); 2602 spin_unlock_irq(&np->lock); 2603} 2604 2605static int set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 2606{ 2607 struct netdev_private *np = netdev_priv(dev); 2608 int res; 2609 spin_lock_irq(&np->lock); 2610 netdev_set_wol(dev, wol->wolopts); 2611 res = netdev_set_sopass(dev, wol->sopass); 2612 spin_unlock_irq(&np->lock); 2613 return res; 2614} 2615 2616static void get_regs(struct net_device *dev, struct ethtool_regs *regs, void *buf) 2617{ 2618 struct netdev_private *np = netdev_priv(dev); 2619 regs->version = NATSEMI_REGS_VER; 2620 spin_lock_irq(&np->lock); 2621 netdev_get_regs(dev, buf); 2622 spin_unlock_irq(&np->lock); 2623} 2624 2625static u32 get_msglevel(struct net_device *dev) 2626{ 2627 struct netdev_private *np = netdev_priv(dev); 2628 return np->msg_enable; 2629} 2630 2631static void set_msglevel(struct net_device *dev, u32 val) 2632{ 2633 struct netdev_private *np = netdev_priv(dev); 2634 np->msg_enable = val; 2635} 2636 2637static int nway_reset(struct net_device *dev) 2638{ 2639 int tmp; 2640 int r = -EINVAL; 2641 /* if autoneg is off, it's an error */ 2642 tmp = mdio_read(dev, MII_BMCR); 2643 if (tmp & BMCR_ANENABLE) { 2644 tmp |= (BMCR_ANRESTART); 2645 mdio_write(dev, MII_BMCR, tmp); 2646 r = 0; 2647 } 2648 return r; 2649} 2650 2651static u32 get_link(struct net_device *dev) 2652{ 2653 /* LSTATUS is latched low until a read - so read twice */ 2654 mdio_read(dev, MII_BMSR); 2655 return (mdio_read(dev, MII_BMSR)&BMSR_LSTATUS) ? 1:0; 2656} 2657 2658static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data) 2659{ 2660 struct netdev_private *np = netdev_priv(dev); 2661 u8 *eebuf; 2662 int res; 2663 2664 eebuf = kmalloc(np->eeprom_size, GFP_KERNEL); 2665 if (!eebuf) 2666 return -ENOMEM; 2667 2668 eeprom->magic = PCI_VENDOR_ID_NS | (PCI_DEVICE_ID_NS_83815<<16); 2669 spin_lock_irq(&np->lock); 2670 res = netdev_get_eeprom(dev, eebuf); 2671 spin_unlock_irq(&np->lock); 2672 if (!res) 2673 memcpy(data, eebuf+eeprom->offset, eeprom->len); 2674 kfree(eebuf); 2675 return res; 2676} 2677 2678static const struct ethtool_ops ethtool_ops = { 2679 .get_drvinfo = get_drvinfo, 2680 .get_regs_len = get_regs_len, 2681 .get_eeprom_len = get_eeprom_len, 2682 .get_settings = get_settings, 2683 .set_settings = set_settings, 2684 .get_wol = get_wol, 2685 .set_wol = set_wol, 2686 .get_regs = get_regs, 2687 .get_msglevel = get_msglevel, 2688 .set_msglevel = set_msglevel, 2689 .nway_reset = nway_reset, 2690 .get_link = get_link, 2691 .get_eeprom = get_eeprom, 2692}; 2693 2694static int netdev_set_wol(struct net_device *dev, u32 newval) 2695{ 2696 struct netdev_private *np = netdev_priv(dev); 2697 void __iomem * ioaddr = ns_ioaddr(dev); 2698 u32 data = readl(ioaddr + WOLCmd) & ~WakeOptsSummary; 2699 2700 /* translate to bitmasks this chip understands */ 2701 if (newval & WAKE_PHY) 2702 data |= WakePhy; 2703 if (newval & WAKE_UCAST) 2704 data |= WakeUnicast; 2705 if (newval & WAKE_MCAST) 2706 data |= WakeMulticast; 2707 if (newval & WAKE_BCAST) 2708 data |= WakeBroadcast; 2709 if (newval & WAKE_ARP) 2710 data |= WakeArp; 2711 if (newval & WAKE_MAGIC) 2712 data |= WakeMagic; 2713 if (np->srr >= SRR_DP83815_D) { 2714 if (newval & WAKE_MAGICSECURE) { 2715 data |= WakeMagicSecure; 2716 } 2717 } 2718 2719 writel(data, ioaddr + WOLCmd); 2720 2721 return 0; 2722} 2723 2724static int netdev_get_wol(struct net_device *dev, u32 *supported, u32 *cur) 2725{ 2726 struct netdev_private *np = netdev_priv(dev); 2727 void __iomem * ioaddr = ns_ioaddr(dev); 2728 u32 regval = readl(ioaddr + WOLCmd); 2729 2730 *supported = (WAKE_PHY | WAKE_UCAST | WAKE_MCAST | WAKE_BCAST 2731 | WAKE_ARP | WAKE_MAGIC); 2732 2733 if (np->srr >= SRR_DP83815_D) { 2734 /* SOPASS works on revD and higher */ 2735 *supported |= WAKE_MAGICSECURE; 2736 } 2737 *cur = 0; 2738 2739 /* translate from chip bitmasks */ 2740 if (regval & WakePhy) 2741 *cur |= WAKE_PHY; 2742 if (regval & WakeUnicast) 2743 *cur |= WAKE_UCAST; 2744 if (regval & WakeMulticast) 2745 *cur |= WAKE_MCAST; 2746 if (regval & WakeBroadcast) 2747 *cur |= WAKE_BCAST; 2748 if (regval & WakeArp) 2749 *cur |= WAKE_ARP; 2750 if (regval & WakeMagic) 2751 *cur |= WAKE_MAGIC; 2752 if (regval & WakeMagicSecure) { 2753 /* this can be on in revC, but it's broken */ 2754 *cur |= WAKE_MAGICSECURE; 2755 } 2756 2757 return 0; 2758} 2759 2760static int netdev_set_sopass(struct net_device *dev, u8 *newval) 2761{ 2762 struct netdev_private *np = netdev_priv(dev); 2763 void __iomem * ioaddr = ns_ioaddr(dev); 2764 u16 *sval = (u16 *)newval; 2765 u32 addr; 2766 2767 if (np->srr < SRR_DP83815_D) { 2768 return 0; 2769 } 2770 2771 /* enable writing to these registers by disabling the RX filter */ 2772 addr = readl(ioaddr + RxFilterAddr) & ~RFCRAddressMask; 2773 addr &= ~RxFilterEnable; 2774 writel(addr, ioaddr + RxFilterAddr); 2775 2776 /* write the three words to (undocumented) RFCR vals 0xa, 0xc, 0xe */ 2777 writel(addr | 0xa, ioaddr + RxFilterAddr); 2778 writew(sval[0], ioaddr + RxFilterData); 2779 2780 writel(addr | 0xc, ioaddr + RxFilterAddr); 2781 writew(sval[1], ioaddr + RxFilterData); 2782 2783 writel(addr | 0xe, ioaddr + RxFilterAddr); 2784 writew(sval[2], ioaddr + RxFilterData); 2785 2786 /* re-enable the RX filter */ 2787 writel(addr | RxFilterEnable, ioaddr + RxFilterAddr); 2788 2789 return 0; 2790} 2791 2792static int netdev_get_sopass(struct net_device *dev, u8 *data) 2793{ 2794 struct netdev_private *np = netdev_priv(dev); 2795 void __iomem * ioaddr = ns_ioaddr(dev); 2796 u16 *sval = (u16 *)data; 2797 u32 addr; 2798 2799 if (np->srr < SRR_DP83815_D) { 2800 sval[0] = sval[1] = sval[2] = 0; 2801 return 0; 2802 } 2803 2804 /* read the three words from (undocumented) RFCR vals 0xa, 0xc, 0xe */ 2805 addr = readl(ioaddr + RxFilterAddr) & ~RFCRAddressMask; 2806 2807 writel(addr | 0xa, ioaddr + RxFilterAddr); 2808 sval[0] = readw(ioaddr + RxFilterData); 2809 2810 writel(addr | 0xc, ioaddr + RxFilterAddr); 2811 sval[1] = readw(ioaddr + RxFilterData); 2812 2813 writel(addr | 0xe, ioaddr + RxFilterAddr); 2814 sval[2] = readw(ioaddr + RxFilterData); 2815 2816 writel(addr, ioaddr + RxFilterAddr); 2817 2818 return 0; 2819} 2820 2821static int netdev_get_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd) 2822{ 2823 struct netdev_private *np = netdev_priv(dev); 2824 u32 tmp; 2825 2826 ecmd->port = dev->if_port; 2827 ecmd->speed = np->speed; 2828 ecmd->duplex = np->duplex; 2829 ecmd->autoneg = np->autoneg; 2830 ecmd->advertising = 0; 2831 if (np->advertising & ADVERTISE_10HALF) 2832 ecmd->advertising |= ADVERTISED_10baseT_Half; 2833 if (np->advertising & ADVERTISE_10FULL) 2834 ecmd->advertising |= ADVERTISED_10baseT_Full; 2835 if (np->advertising & ADVERTISE_100HALF) 2836 ecmd->advertising |= ADVERTISED_100baseT_Half; 2837 if (np->advertising & ADVERTISE_100FULL) 2838 ecmd->advertising |= ADVERTISED_100baseT_Full; 2839 ecmd->supported = (SUPPORTED_Autoneg | 2840 SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | 2841 SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | 2842 SUPPORTED_TP | SUPPORTED_MII | SUPPORTED_FIBRE); 2843 ecmd->phy_address = np->phy_addr_external; 2844 2845 /* set information based on active port type */ 2846 switch (ecmd->port) { 2847 default: 2848 case PORT_TP: 2849 ecmd->advertising |= ADVERTISED_TP; 2850 ecmd->transceiver = XCVR_INTERNAL; 2851 break; 2852 case PORT_MII: 2853 ecmd->advertising |= ADVERTISED_MII; 2854 ecmd->transceiver = XCVR_EXTERNAL; 2855 break; 2856 case PORT_FIBRE: 2857 ecmd->advertising |= ADVERTISED_FIBRE; 2858 ecmd->transceiver = XCVR_EXTERNAL; 2859 break; 2860 } 2861 2862 /* if autonegotiation is on, try to return the active speed/duplex */ 2863 if (ecmd->autoneg == AUTONEG_ENABLE) { 2864 ecmd->advertising |= ADVERTISED_Autoneg; 2865 tmp = mii_nway_result( 2866 np->advertising & mdio_read(dev, MII_LPA)); 2867 if (tmp == LPA_100FULL || tmp == LPA_100HALF) 2868 ecmd->speed = SPEED_100; 2869 else 2870 ecmd->speed = SPEED_10; 2871 if (tmp == LPA_100FULL || tmp == LPA_10FULL) 2872 ecmd->duplex = DUPLEX_FULL; 2873 else 2874 ecmd->duplex = DUPLEX_HALF; 2875 } 2876 2877 /* ignore maxtxpkt, maxrxpkt for now */ 2878 2879 return 0; 2880} 2881 2882static int netdev_set_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd) 2883{ 2884 struct netdev_private *np = netdev_priv(dev); 2885 2886 if (ecmd->port != PORT_TP && ecmd->port != PORT_MII && ecmd->port != PORT_FIBRE) 2887 return -EINVAL; 2888 if (ecmd->transceiver != XCVR_INTERNAL && ecmd->transceiver != XCVR_EXTERNAL) 2889 return -EINVAL; 2890 if (ecmd->autoneg == AUTONEG_ENABLE) { 2891 if ((ecmd->advertising & (ADVERTISED_10baseT_Half | 2892 ADVERTISED_10baseT_Full | 2893 ADVERTISED_100baseT_Half | 2894 ADVERTISED_100baseT_Full)) == 0) { 2895 return -EINVAL; 2896 } 2897 } else if (ecmd->autoneg == AUTONEG_DISABLE) { 2898 if (ecmd->speed != SPEED_10 && ecmd->speed != SPEED_100) 2899 return -EINVAL; 2900 if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL) 2901 return -EINVAL; 2902 } else { 2903 return -EINVAL; 2904 } 2905 2906 /* 2907 * If we're ignoring the PHY then autoneg and the internal 2908 * transciever are really not going to work so don't let the 2909 * user select them. 2910 */ 2911 if (np->ignore_phy && (ecmd->autoneg == AUTONEG_ENABLE || 2912 ecmd->port == PORT_TP)) 2913 return -EINVAL; 2914 2915 /* 2916 * maxtxpkt, maxrxpkt: ignored for now. 2917 * 2918 * transceiver: 2919 * PORT_TP is always XCVR_INTERNAL, PORT_MII and PORT_FIBRE are always 2920 * XCVR_EXTERNAL. The implementation thus ignores ecmd->transceiver and 2921 * selects based on ecmd->port. 2922 * 2923 * Actually PORT_FIBRE is nearly identical to PORT_MII: it's for fibre 2924 * phys that are connected to the mii bus. It's used to apply fibre 2925 * specific updates. 2926 */ 2927 2928 /* WHEW! now lets bang some bits */ 2929 2930 /* save the parms */ 2931 dev->if_port = ecmd->port; 2932 np->autoneg = ecmd->autoneg; 2933 np->phy_addr_external = ecmd->phy_address & PhyAddrMask; 2934 if (np->autoneg == AUTONEG_ENABLE) { 2935 /* advertise only what has been requested */ 2936 np->advertising &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4); 2937 if (ecmd->advertising & ADVERTISED_10baseT_Half) 2938 np->advertising |= ADVERTISE_10HALF; 2939 if (ecmd->advertising & ADVERTISED_10baseT_Full) 2940 np->advertising |= ADVERTISE_10FULL; 2941 if (ecmd->advertising & ADVERTISED_100baseT_Half) 2942 np->advertising |= ADVERTISE_100HALF; 2943 if (ecmd->advertising & ADVERTISED_100baseT_Full) 2944 np->advertising |= ADVERTISE_100FULL; 2945 } else { 2946 np->speed = ecmd->speed; 2947 np->duplex = ecmd->duplex; 2948 /* user overriding the initial full duplex parm? */ 2949 if (np->duplex == DUPLEX_HALF) 2950 np->full_duplex = 0; 2951 } 2952 2953 /* get the right phy enabled */ 2954 if (ecmd->port == PORT_TP) 2955 switch_port_internal(dev); 2956 else 2957 switch_port_external(dev); 2958 2959 /* set parms and see how this affected our link status */ 2960 init_phy_fixup(dev); 2961 check_link(dev); 2962 return 0; 2963} 2964 2965static int netdev_get_regs(struct net_device *dev, u8 *buf) 2966{ 2967 int i; 2968 int j; 2969 u32 rfcr; 2970 u32 *rbuf = (u32 *)buf; 2971 void __iomem * ioaddr = ns_ioaddr(dev); 2972 2973 /* read non-mii page 0 of registers */ 2974 for (i = 0; i < NATSEMI_PG0_NREGS/2; i++) { 2975 rbuf[i] = readl(ioaddr + i*4); 2976 } 2977 2978 /* read current mii registers */ 2979 for (i = NATSEMI_PG0_NREGS/2; i < NATSEMI_PG0_NREGS; i++) 2980 rbuf[i] = mdio_read(dev, i & 0x1f); 2981 2982 /* read only the 'magic' registers from page 1 */ 2983 writew(1, ioaddr + PGSEL); 2984 rbuf[i++] = readw(ioaddr + PMDCSR); 2985 rbuf[i++] = readw(ioaddr + TSTDAT); 2986 rbuf[i++] = readw(ioaddr + DSPCFG); 2987 rbuf[i++] = readw(ioaddr + SDCFG); 2988 writew(0, ioaddr + PGSEL); 2989 2990 /* read RFCR indexed registers */ 2991 rfcr = readl(ioaddr + RxFilterAddr); 2992 for (j = 0; j < NATSEMI_RFDR_NREGS; j++) { 2993 writel(j*2, ioaddr + RxFilterAddr); 2994 rbuf[i++] = readw(ioaddr + RxFilterData); 2995 } 2996 writel(rfcr, ioaddr + RxFilterAddr); 2997 2998 /* the interrupt status is clear-on-read - see if we missed any */ 2999 if (rbuf[4] & rbuf[5]) { 3000 printk(KERN_WARNING 3001 "%s: shoot, we dropped an interrupt (%#08x)\n", 3002 dev->name, rbuf[4] & rbuf[5]); 3003 } 3004 3005 return 0; 3006} 3007 3008#define SWAP_BITS(x) ( (((x) & 0x0001) << 15) | (((x) & 0x0002) << 13) \ 3009 | (((x) & 0x0004) << 11) | (((x) & 0x0008) << 9) \ 3010 | (((x) & 0x0010) << 7) | (((x) & 0x0020) << 5) \ 3011 | (((x) & 0x0040) << 3) | (((x) & 0x0080) << 1) \ 3012 | (((x) & 0x0100) >> 1) | (((x) & 0x0200) >> 3) \ 3013 | (((x) & 0x0400) >> 5) | (((x) & 0x0800) >> 7) \ 3014 | (((x) & 0x1000) >> 9) | (((x) & 0x2000) >> 11) \ 3015 | (((x) & 0x4000) >> 13) | (((x) & 0x8000) >> 15) ) 3016 3017static int netdev_get_eeprom(struct net_device *dev, u8 *buf) 3018{ 3019 int i; 3020 u16 *ebuf = (u16 *)buf; 3021 void __iomem * ioaddr = ns_ioaddr(dev); 3022 struct netdev_private *np = netdev_priv(dev); 3023 3024 /* eeprom_read reads 16 bits, and indexes by 16 bits */ 3025 for (i = 0; i < np->eeprom_size/2; i++) { 3026 ebuf[i] = eeprom_read(ioaddr, i); 3027 /* The EEPROM itself stores data bit-swapped, but eeprom_read 3028 * reads it back "sanely". So we swap it back here in order to 3029 * present it to userland as it is stored. */ 3030 ebuf[i] = SWAP_BITS(ebuf[i]); 3031 } 3032 return 0; 3033} 3034 3035static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 3036{ 3037 struct mii_ioctl_data *data = if_mii(rq); 3038 struct netdev_private *np = netdev_priv(dev); 3039 3040 switch(cmd) { 3041 case SIOCGMIIPHY: /* Get address of MII PHY in use. */ 3042 case SIOCDEVPRIVATE: /* for binary compat, remove in 2.5 */ 3043 data->phy_id = np->phy_addr_external; 3044 /* Fall Through */ 3045 3046 case SIOCGMIIREG: /* Read MII PHY register. */ 3047 case SIOCDEVPRIVATE+1: /* for binary compat, remove in 2.5 */ 3048 /* The phy_id is not enough to uniquely identify 3049 * the intended target. Therefore the command is sent to 3050 * the given mii on the current port. 3051 */ 3052 if (dev->if_port == PORT_TP) { 3053 if ((data->phy_id & 0x1f) == np->phy_addr_external) 3054 data->val_out = mdio_read(dev, 3055 data->reg_num & 0x1f); 3056 else 3057 data->val_out = 0; 3058 } else { 3059 move_int_phy(dev, data->phy_id & 0x1f); 3060 data->val_out = miiport_read(dev, data->phy_id & 0x1f, 3061 data->reg_num & 0x1f); 3062 } 3063 return 0; 3064 3065 case SIOCSMIIREG: /* Write MII PHY register. */ 3066 case SIOCDEVPRIVATE+2: /* for binary compat, remove in 2.5 */ 3067 if (!capable(CAP_NET_ADMIN)) 3068 return -EPERM; 3069 if (dev->if_port == PORT_TP) { 3070 if ((data->phy_id & 0x1f) == np->phy_addr_external) { 3071 if ((data->reg_num & 0x1f) == MII_ADVERTISE) 3072 np->advertising = data->val_in; 3073 mdio_write(dev, data->reg_num & 0x1f, 3074 data->val_in); 3075 } 3076 } else { 3077 if ((data->phy_id & 0x1f) == np->phy_addr_external) { 3078 if ((data->reg_num & 0x1f) == MII_ADVERTISE) 3079 np->advertising = data->val_in; 3080 } 3081 move_int_phy(dev, data->phy_id & 0x1f); 3082 miiport_write(dev, data->phy_id & 0x1f, 3083 data->reg_num & 0x1f, 3084 data->val_in); 3085 } 3086 return 0; 3087 default: 3088 return -EOPNOTSUPP; 3089 } 3090} 3091 3092static void enable_wol_mode(struct net_device *dev, int enable_intr) 3093{ 3094 void __iomem * ioaddr = ns_ioaddr(dev); 3095 struct netdev_private *np = netdev_priv(dev); 3096 3097 if (netif_msg_wol(np)) 3098 printk(KERN_INFO "%s: remaining active for wake-on-lan\n", 3099 dev->name); 3100 3101 /* For WOL we must restart the rx process in silent mode. 3102 * Write NULL to the RxRingPtr. Only possible if 3103 * rx process is stopped 3104 */ 3105 writel(0, ioaddr + RxRingPtr); 3106 3107 /* read WoL status to clear */ 3108 readl(ioaddr + WOLCmd); 3109 3110 /* PME on, clear status */ 3111 writel(np->SavedClkRun | PMEEnable | PMEStatus, ioaddr + ClkRun); 3112 3113 /* and restart the rx process */ 3114 writel(RxOn, ioaddr + ChipCmd); 3115 3116 if (enable_intr) { 3117 /* enable the WOL interrupt. 3118 * Could be used to send a netlink message. 3119 */ 3120 writel(WOLPkt | LinkChange, ioaddr + IntrMask); 3121 natsemi_irq_enable(dev); 3122 } 3123} 3124 3125static int netdev_close(struct net_device *dev) 3126{ 3127 void __iomem * ioaddr = ns_ioaddr(dev); 3128 struct netdev_private *np = netdev_priv(dev); 3129 3130 if (netif_msg_ifdown(np)) 3131 printk(KERN_DEBUG 3132 "%s: Shutting down ethercard, status was %#04x.\n", 3133 dev->name, (int)readl(ioaddr + ChipCmd)); 3134 if (netif_msg_pktdata(np)) 3135 printk(KERN_DEBUG 3136 "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n", 3137 dev->name, np->cur_tx, np->dirty_tx, 3138 np->cur_rx, np->dirty_rx); 3139 3140 3141 del_timer_sync(&np->timer); 3142 disable_irq(dev->irq); 3143 spin_lock_irq(&np->lock); 3144 natsemi_irq_disable(dev); 3145 np->hands_off = 1; 3146 spin_unlock_irq(&np->lock); 3147 enable_irq(dev->irq); 3148 3149 free_irq(dev->irq, dev); 3150 3151 /* Interrupt disabled, interrupt handler released, 3152 * queue stopped, timer deleted, rtnl_lock held 3153 * All async codepaths that access the driver are disabled. 3154 */ 3155 spin_lock_irq(&np->lock); 3156 np->hands_off = 0; 3157 readl(ioaddr + IntrMask); 3158 readw(ioaddr + MIntrStatus); 3159 3160 /* Freeze Stats */ 3161 writel(StatsFreeze, ioaddr + StatsCtrl); 3162 3163 /* Stop the chip's Tx and Rx processes. */ 3164 natsemi_stop_rxtx(dev); 3165 3166 __get_stats(dev); 3167 spin_unlock_irq(&np->lock); 3168 3169 /* clear the carrier last - an interrupt could reenable it otherwise */ 3170 netif_carrier_off(dev); 3171 netif_stop_queue(dev); 3172 3173 dump_ring(dev); 3174 drain_ring(dev); 3175 free_ring(dev); 3176 3177 { 3178 u32 wol = readl(ioaddr + WOLCmd) & WakeOptsSummary; 3179 if (wol) { 3180 /* restart the NIC in WOL mode. 3181 * The nic must be stopped for this. 3182 */ 3183 enable_wol_mode(dev, 0); 3184 } else { 3185 /* Restore PME enable bit unmolested */ 3186 writel(np->SavedClkRun, ioaddr + ClkRun); 3187 } 3188 } 3189 return 0; 3190} 3191 3192 3193static void __devexit natsemi_remove1 (struct pci_dev *pdev) 3194{ 3195 struct net_device *dev = pci_get_drvdata(pdev); 3196 void __iomem * ioaddr = ns_ioaddr(dev); 3197 3198 NATSEMI_REMOVE_FILE(pdev, dspcfg_workaround); 3199 unregister_netdev (dev); 3200 pci_release_regions (pdev); 3201 iounmap(ioaddr); 3202 free_netdev (dev); 3203 pci_set_drvdata(pdev, NULL); 3204} 3205 3206#ifdef CONFIG_PM 3207 3208/* 3209 * The ns83815 chip doesn't have explicit RxStop bits. 3210 * Kicking the Rx or Tx process for a new packet reenables the Rx process 3211 * of the nic, thus this function must be very careful: 3212 * 3213 * suspend/resume synchronization: 3214 * entry points: 3215 * netdev_open, netdev_close, netdev_ioctl, set_rx_mode, intr_handler, 3216 * start_tx, tx_timeout 3217 * 3218 * No function accesses the hardware without checking np->hands_off. 3219 * the check occurs under spin_lock_irq(&np->lock); 3220 * exceptions: 3221 * * netdev_ioctl: noncritical access. 3222 * * netdev_open: cannot happen due to the device_detach 3223 * * netdev_close: doesn't hurt. 3224 * * netdev_timer: timer stopped by natsemi_suspend. 3225 * * intr_handler: doesn't acquire the spinlock. suspend calls 3226 * disable_irq() to enforce synchronization. 3227 * * natsemi_poll: checks before reenabling interrupts. suspend 3228 * sets hands_off, disables interrupts and then waits with 3229 * netif_poll_disable(). 3230 * 3231 * Interrupts must be disabled, otherwise hands_off can cause irq storms. 3232 */ 3233 3234static int natsemi_suspend (struct pci_dev *pdev, pm_message_t state) 3235{ 3236 struct net_device *dev = pci_get_drvdata (pdev); 3237 struct netdev_private *np = netdev_priv(dev); 3238 void __iomem * ioaddr = ns_ioaddr(dev); 3239 3240 rtnl_lock(); 3241 if (netif_running (dev)) { 3242 del_timer_sync(&np->timer); 3243 3244 disable_irq(dev->irq); 3245 spin_lock_irq(&np->lock); 3246 3247 natsemi_irq_disable(dev); 3248 np->hands_off = 1; 3249 natsemi_stop_rxtx(dev); 3250 netif_stop_queue(dev); 3251 3252 spin_unlock_irq(&np->lock); 3253 enable_irq(dev->irq); 3254 3255 netif_poll_disable(dev); 3256 3257 /* Update the error counts. */ 3258 __get_stats(dev); 3259 3260 /* pci_power_off(pdev, -1); */ 3261 drain_ring(dev); 3262 { 3263 u32 wol = readl(ioaddr + WOLCmd) & WakeOptsSummary; 3264 /* Restore PME enable bit */ 3265 if (wol) { 3266 enable_wol_mode(dev, 0); 3267 } else { 3268 /* Restore PME enable bit unmolested */ 3269 writel(np->SavedClkRun, ioaddr + ClkRun); 3270 } 3271 } 3272 } 3273 netif_device_detach(dev); 3274 rtnl_unlock(); 3275 return 0; 3276} 3277 3278 3279static int natsemi_resume (struct pci_dev *pdev) 3280{ 3281 struct net_device *dev = pci_get_drvdata (pdev); 3282 struct netdev_private *np = netdev_priv(dev); 3283 3284 rtnl_lock(); 3285 if (netif_device_present(dev)) 3286 goto out; 3287 if (netif_running(dev)) { 3288 BUG_ON(!np->hands_off); 3289 pci_enable_device(pdev); 3290 /* pci_power_on(pdev); */ 3291 3292 natsemi_reset(dev); 3293 init_ring(dev); 3294 disable_irq(dev->irq); 3295 spin_lock_irq(&np->lock); 3296 np->hands_off = 0; 3297 init_registers(dev); 3298 netif_device_attach(dev); 3299 spin_unlock_irq(&np->lock); 3300 enable_irq(dev->irq); 3301 3302 mod_timer(&np->timer, jiffies + 1*HZ); 3303 } 3304 netif_device_attach(dev); 3305 netif_poll_enable(dev); 3306out: 3307 rtnl_unlock(); 3308 return 0; 3309} 3310 3311#endif /* CONFIG_PM */ 3312 3313static struct pci_driver natsemi_driver = { 3314 .name = DRV_NAME, 3315 .id_table = natsemi_pci_tbl, 3316 .probe = natsemi_probe1, 3317 .remove = __devexit_p(natsemi_remove1), 3318#ifdef CONFIG_PM 3319 .suspend = natsemi_suspend, 3320 .resume = natsemi_resume, 3321#endif 3322}; 3323 3324static int __init natsemi_init_mod (void) 3325{ 3326/* when a module, this is printed whether or not devices are found in probe */ 3327#ifdef MODULE 3328 printk(version); 3329#endif 3330 3331 return pci_register_driver(&natsemi_driver); 3332} 3333 3334static void __exit natsemi_exit_mod (void) 3335{ 3336 pci_unregister_driver (&natsemi_driver); 3337} 3338 3339module_init(natsemi_init_mod); 3340module_exit(natsemi_exit_mod); 3341