1/* 2 * forcedeth: Ethernet driver for NVIDIA nForce media access controllers. 3 * 4 * Note: This driver is a cleanroom reimplementation based on reverse 5 * engineered documentation written by Carl-Daniel Hailfinger 6 * and Andrew de Quincey. 7 * 8 * NVIDIA, nForce and other NVIDIA marks are trademarks or registered 9 * trademarks of NVIDIA Corporation in the United States and other 10 * countries. 11 * 12 * Copyright (C) 2003,4,5 Manfred Spraul 13 * Copyright (C) 2004 Andrew de Quincey (wol support) 14 * Copyright (C) 2004 Carl-Daniel Hailfinger (invalid MAC handling, insane 15 * IRQ rate fixes, bigendian fixes, cleanups, verification) 16 * Copyright (c) 2004,2005,2006,2007,2008,2009 NVIDIA Corporation 17 * 18 * This program is free software; you can redistribute it and/or modify 19 * it under the terms of the GNU General Public License as published by 20 * the Free Software Foundation; either version 2 of the License, or 21 * (at your option) any later version. 22 * 23 * This program is distributed in the hope that it will be useful, 24 * but WITHOUT ANY WARRANTY; without even the implied warranty of 25 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 26 * GNU General Public License for more details. 27 * 28 * You should have received a copy of the GNU General Public License 29 * along with this program; if not, write to the Free Software 30 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 31 * 32 * Known bugs: 33 * We suspect that on some hardware no TX done interrupts are generated. 34 * This means recovery from netif_stop_queue only happens if the hw timer 35 * interrupt fires (100 times/second, configurable with NVREG_POLL_DEFAULT) 36 * and the timer is active in the IRQMask, or if a rx packet arrives by chance. 37 * If your hardware reliably generates tx done interrupts, then you can remove 38 * DEV_NEED_TIMERIRQ from the driver_data flags. 39 * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few 40 * superfluous timer interrupts from the nic. 41 */ 42#define FORCEDETH_VERSION "0.64" 43#define DRV_NAME "forcedeth" 44 45#include <linux/module.h> 46#include <linux/types.h> 47#include <linux/pci.h> 48#include <linux/interrupt.h> 49#include <linux/netdevice.h> 50#include <linux/etherdevice.h> 51#include <linux/delay.h> 52#include <linux/sched.h> 53#include <linux/spinlock.h> 54#include <linux/ethtool.h> 55#include <linux/timer.h> 56#include <linux/skbuff.h> 57#include <linux/mii.h> 58#include <linux/random.h> 59#include <linux/init.h> 60#include <linux/if_vlan.h> 61#include <linux/dma-mapping.h> 62#include <linux/slab.h> 63 64#include <asm/irq.h> 65#include <asm/io.h> 66#include <asm/uaccess.h> 67#include <asm/system.h> 68 69#define dprintk(x...) do { } while (0) 70 71#define TX_WORK_PER_LOOP 64 72#define RX_WORK_PER_LOOP 64 73 74/* 75 * Hardware access: 76 */ 77 78#define DEV_NEED_TIMERIRQ 0x0000001 /* set the timer irq flag in the irq mask */ 79#define DEV_NEED_LINKTIMER 0x0000002 /* poll link settings. Relies on the timer irq */ 80#define DEV_HAS_LARGEDESC 0x0000004 /* device supports jumbo frames and needs packet format 2 */ 81#define DEV_HAS_HIGH_DMA 0x0000008 /* device supports 64bit dma */ 82#define DEV_HAS_CHECKSUM 0x0000010 /* device supports tx and rx checksum offloads */ 83#define DEV_HAS_VLAN 0x0000020 /* device supports vlan tagging and striping */ 84#define DEV_HAS_MSI 0x0000040 /* device supports MSI */ 85#define DEV_HAS_MSI_X 0x0000080 /* device supports MSI-X */ 86#define DEV_HAS_POWER_CNTRL 0x0000100 /* device supports power savings */ 87#define DEV_HAS_STATISTICS_V1 0x0000200 /* device supports hw statistics version 1 */ 88#define DEV_HAS_STATISTICS_V2 0x0000400 /* device supports hw statistics version 2 */ 89#define DEV_HAS_STATISTICS_V3 0x0000800 /* device supports hw statistics version 3 */ 90#define DEV_HAS_STATISTICS_V12 0x0000600 /* device supports hw statistics version 1 and 2 */ 91#define DEV_HAS_STATISTICS_V123 0x0000e00 /* device supports hw statistics version 1, 2, and 3 */ 92#define DEV_HAS_TEST_EXTENDED 0x0001000 /* device supports extended diagnostic test */ 93#define DEV_HAS_MGMT_UNIT 0x0002000 /* device supports management unit */ 94#define DEV_HAS_CORRECT_MACADDR 0x0004000 /* device supports correct mac address order */ 95#define DEV_HAS_COLLISION_FIX 0x0008000 /* device supports tx collision fix */ 96#define DEV_HAS_PAUSEFRAME_TX_V1 0x0010000 /* device supports tx pause frames version 1 */ 97#define DEV_HAS_PAUSEFRAME_TX_V2 0x0020000 /* device supports tx pause frames version 2 */ 98#define DEV_HAS_PAUSEFRAME_TX_V3 0x0040000 /* device supports tx pause frames version 3 */ 99#define DEV_NEED_TX_LIMIT 0x0080000 /* device needs to limit tx */ 100#define DEV_NEED_TX_LIMIT2 0x0180000 /* device needs to limit tx, expect for some revs */ 101#define DEV_HAS_GEAR_MODE 0x0200000 /* device supports gear mode */ 102#define DEV_NEED_PHY_INIT_FIX 0x0400000 103#define DEV_NEED_LOW_POWER_FIX 0x0800000 104#define DEV_NEED_MSI_FIX 0x1000000 105 106enum { 107 NvRegIrqStatus = 0x000, 108#define NVREG_IRQSTAT_MIIEVENT 0x040 109#define NVREG_IRQSTAT_MASK 0x83ff 110 NvRegIrqMask = 0x004, 111#define NVREG_IRQ_RX_ERROR 0x0001 112#define NVREG_IRQ_RX 0x0002 113#define NVREG_IRQ_RX_NOBUF 0x0004 114#define NVREG_IRQ_TX_ERR 0x0008 115#define NVREG_IRQ_TX_OK 0x0010 116#define NVREG_IRQ_TIMER 0x0020 117#define NVREG_IRQ_LINK 0x0040 118#define NVREG_IRQ_RX_FORCED 0x0080 119#define NVREG_IRQ_TX_FORCED 0x0100 120#define NVREG_IRQ_RECOVER_ERROR 0x8200 121#define NVREG_IRQMASK_THROUGHPUT 0x00df 122#define NVREG_IRQMASK_CPU 0x0060 123#define NVREG_IRQ_TX_ALL (NVREG_IRQ_TX_ERR|NVREG_IRQ_TX_OK|NVREG_IRQ_TX_FORCED) 124#define NVREG_IRQ_RX_ALL (NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_RX_FORCED) 125#define NVREG_IRQ_OTHER (NVREG_IRQ_TIMER|NVREG_IRQ_LINK|NVREG_IRQ_RECOVER_ERROR) 126 127 NvRegUnknownSetupReg6 = 0x008, 128#define NVREG_UNKSETUP6_VAL 3 129 130/* 131 * NVREG_POLL_DEFAULT is the interval length of the timer source on the nic 132 * NVREG_POLL_DEFAULT=97 would result in an interval length of 1 ms 133 */ 134 NvRegPollingInterval = 0x00c, 135#define NVREG_POLL_DEFAULT_THROUGHPUT 65535 /* backup tx cleanup if loop max reached */ 136#define NVREG_POLL_DEFAULT_CPU 13 137 NvRegMSIMap0 = 0x020, 138 NvRegMSIMap1 = 0x024, 139 NvRegMSIIrqMask = 0x030, 140#define NVREG_MSI_VECTOR_0_ENABLED 0x01 141 NvRegMisc1 = 0x080, 142#define NVREG_MISC1_PAUSE_TX 0x01 143#define NVREG_MISC1_HD 0x02 144#define NVREG_MISC1_FORCE 0x3b0f3c 145 146 NvRegMacReset = 0x34, 147#define NVREG_MAC_RESET_ASSERT 0x0F3 148 NvRegTransmitterControl = 0x084, 149#define NVREG_XMITCTL_START 0x01 150#define NVREG_XMITCTL_MGMT_ST 0x40000000 151#define NVREG_XMITCTL_SYNC_MASK 0x000f0000 152#define NVREG_XMITCTL_SYNC_NOT_READY 0x0 153#define NVREG_XMITCTL_SYNC_PHY_INIT 0x00040000 154#define NVREG_XMITCTL_MGMT_SEMA_MASK 0x00000f00 155#define NVREG_XMITCTL_MGMT_SEMA_FREE 0x0 156#define NVREG_XMITCTL_HOST_SEMA_MASK 0x0000f000 157#define NVREG_XMITCTL_HOST_SEMA_ACQ 0x0000f000 158#define NVREG_XMITCTL_HOST_LOADED 0x00004000 159#define NVREG_XMITCTL_TX_PATH_EN 0x01000000 160#define NVREG_XMITCTL_DATA_START 0x00100000 161#define NVREG_XMITCTL_DATA_READY 0x00010000 162#define NVREG_XMITCTL_DATA_ERROR 0x00020000 163 NvRegTransmitterStatus = 0x088, 164#define NVREG_XMITSTAT_BUSY 0x01 165 166 NvRegPacketFilterFlags = 0x8c, 167#define NVREG_PFF_PAUSE_RX 0x08 168#define NVREG_PFF_ALWAYS 0x7F0000 169#define NVREG_PFF_PROMISC 0x80 170#define NVREG_PFF_MYADDR 0x20 171#define NVREG_PFF_LOOPBACK 0x10 172 173 NvRegOffloadConfig = 0x90, 174#define NVREG_OFFLOAD_HOMEPHY 0x601 175#define NVREG_OFFLOAD_NORMAL RX_NIC_BUFSIZE 176 NvRegReceiverControl = 0x094, 177#define NVREG_RCVCTL_START 0x01 178#define NVREG_RCVCTL_RX_PATH_EN 0x01000000 179 NvRegReceiverStatus = 0x98, 180#define NVREG_RCVSTAT_BUSY 0x01 181 182 NvRegSlotTime = 0x9c, 183#define NVREG_SLOTTIME_LEGBF_ENABLED 0x80000000 184#define NVREG_SLOTTIME_10_100_FULL 0x00007f00 185#define NVREG_SLOTTIME_1000_FULL 0x0003ff00 186#define NVREG_SLOTTIME_HALF 0x0000ff00 187#define NVREG_SLOTTIME_DEFAULT 0x00007f00 188#define NVREG_SLOTTIME_MASK 0x000000ff 189 190 NvRegTxDeferral = 0xA0, 191#define NVREG_TX_DEFERRAL_DEFAULT 0x15050f 192#define NVREG_TX_DEFERRAL_RGMII_10_100 0x16070f 193#define NVREG_TX_DEFERRAL_RGMII_1000 0x14050f 194#define NVREG_TX_DEFERRAL_RGMII_STRETCH_10 0x16190f 195#define NVREG_TX_DEFERRAL_RGMII_STRETCH_100 0x16300f 196#define NVREG_TX_DEFERRAL_MII_STRETCH 0x152000 197 NvRegRxDeferral = 0xA4, 198#define NVREG_RX_DEFERRAL_DEFAULT 0x16 199 NvRegMacAddrA = 0xA8, 200 NvRegMacAddrB = 0xAC, 201 NvRegMulticastAddrA = 0xB0, 202#define NVREG_MCASTADDRA_FORCE 0x01 203 NvRegMulticastAddrB = 0xB4, 204 NvRegMulticastMaskA = 0xB8, 205#define NVREG_MCASTMASKA_NONE 0xffffffff 206 NvRegMulticastMaskB = 0xBC, 207#define NVREG_MCASTMASKB_NONE 0xffff 208 209 NvRegPhyInterface = 0xC0, 210#define PHY_RGMII 0x10000000 211 NvRegBackOffControl = 0xC4, 212#define NVREG_BKOFFCTRL_DEFAULT 0x70000000 213#define NVREG_BKOFFCTRL_SEED_MASK 0x000003ff 214#define NVREG_BKOFFCTRL_SELECT 24 215#define NVREG_BKOFFCTRL_GEAR 12 216 217 NvRegTxRingPhysAddr = 0x100, 218 NvRegRxRingPhysAddr = 0x104, 219 NvRegRingSizes = 0x108, 220#define NVREG_RINGSZ_TXSHIFT 0 221#define NVREG_RINGSZ_RXSHIFT 16 222 NvRegTransmitPoll = 0x10c, 223#define NVREG_TRANSMITPOLL_MAC_ADDR_REV 0x00008000 224 NvRegLinkSpeed = 0x110, 225#define NVREG_LINKSPEED_FORCE 0x10000 226#define NVREG_LINKSPEED_10 1000 227#define NVREG_LINKSPEED_100 100 228#define NVREG_LINKSPEED_1000 50 229#define NVREG_LINKSPEED_MASK (0xFFF) 230 NvRegUnknownSetupReg5 = 0x130, 231#define NVREG_UNKSETUP5_BIT31 (1<<31) 232 NvRegTxWatermark = 0x13c, 233#define NVREG_TX_WM_DESC1_DEFAULT 0x0200010 234#define NVREG_TX_WM_DESC2_3_DEFAULT 0x1e08000 235#define NVREG_TX_WM_DESC2_3_1000 0xfe08000 236 NvRegTxRxControl = 0x144, 237#define NVREG_TXRXCTL_KICK 0x0001 238#define NVREG_TXRXCTL_BIT1 0x0002 239#define NVREG_TXRXCTL_BIT2 0x0004 240#define NVREG_TXRXCTL_IDLE 0x0008 241#define NVREG_TXRXCTL_RESET 0x0010 242#define NVREG_TXRXCTL_RXCHECK 0x0400 243#define NVREG_TXRXCTL_DESC_1 0 244#define NVREG_TXRXCTL_DESC_2 0x002100 245#define NVREG_TXRXCTL_DESC_3 0xc02200 246#define NVREG_TXRXCTL_VLANSTRIP 0x00040 247#define NVREG_TXRXCTL_VLANINS 0x00080 248 NvRegTxRingPhysAddrHigh = 0x148, 249 NvRegRxRingPhysAddrHigh = 0x14C, 250 NvRegTxPauseFrame = 0x170, 251#define NVREG_TX_PAUSEFRAME_DISABLE 0x0fff0080 252#define NVREG_TX_PAUSEFRAME_ENABLE_V1 0x01800010 253#define NVREG_TX_PAUSEFRAME_ENABLE_V2 0x056003f0 254#define NVREG_TX_PAUSEFRAME_ENABLE_V3 0x09f00880 255 NvRegTxPauseFrameLimit = 0x174, 256#define NVREG_TX_PAUSEFRAMELIMIT_ENABLE 0x00010000 257 NvRegMIIStatus = 0x180, 258#define NVREG_MIISTAT_ERROR 0x0001 259#define NVREG_MIISTAT_LINKCHANGE 0x0008 260#define NVREG_MIISTAT_MASK_RW 0x0007 261#define NVREG_MIISTAT_MASK_ALL 0x000f 262 NvRegMIIMask = 0x184, 263#define NVREG_MII_LINKCHANGE 0x0008 264 265 NvRegAdapterControl = 0x188, 266#define NVREG_ADAPTCTL_START 0x02 267#define NVREG_ADAPTCTL_LINKUP 0x04 268#define NVREG_ADAPTCTL_PHYVALID 0x40000 269#define NVREG_ADAPTCTL_RUNNING 0x100000 270#define NVREG_ADAPTCTL_PHYSHIFT 24 271 NvRegMIISpeed = 0x18c, 272#define NVREG_MIISPEED_BIT8 (1<<8) 273#define NVREG_MIIDELAY 5 274 NvRegMIIControl = 0x190, 275#define NVREG_MIICTL_INUSE 0x08000 276#define NVREG_MIICTL_WRITE 0x00400 277#define NVREG_MIICTL_ADDRSHIFT 5 278 NvRegMIIData = 0x194, 279 NvRegTxUnicast = 0x1a0, 280 NvRegTxMulticast = 0x1a4, 281 NvRegTxBroadcast = 0x1a8, 282 NvRegWakeUpFlags = 0x200, 283#define NVREG_WAKEUPFLAGS_VAL 0x7770 284#define NVREG_WAKEUPFLAGS_BUSYSHIFT 24 285#define NVREG_WAKEUPFLAGS_ENABLESHIFT 16 286#define NVREG_WAKEUPFLAGS_D3SHIFT 12 287#define NVREG_WAKEUPFLAGS_D2SHIFT 8 288#define NVREG_WAKEUPFLAGS_D1SHIFT 4 289#define NVREG_WAKEUPFLAGS_D0SHIFT 0 290#define NVREG_WAKEUPFLAGS_ACCEPT_MAGPAT 0x01 291#define NVREG_WAKEUPFLAGS_ACCEPT_WAKEUPPAT 0x02 292#define NVREG_WAKEUPFLAGS_ACCEPT_LINKCHANGE 0x04 293#define NVREG_WAKEUPFLAGS_ENABLE 0x1111 294 295 NvRegMgmtUnitGetVersion = 0x204, 296#define NVREG_MGMTUNITGETVERSION 0x01 297 NvRegMgmtUnitVersion = 0x208, 298#define NVREG_MGMTUNITVERSION 0x08 299 NvRegPowerCap = 0x268, 300#define NVREG_POWERCAP_D3SUPP (1<<30) 301#define NVREG_POWERCAP_D2SUPP (1<<26) 302#define NVREG_POWERCAP_D1SUPP (1<<25) 303 NvRegPowerState = 0x26c, 304#define NVREG_POWERSTATE_POWEREDUP 0x8000 305#define NVREG_POWERSTATE_VALID 0x0100 306#define NVREG_POWERSTATE_MASK 0x0003 307#define NVREG_POWERSTATE_D0 0x0000 308#define NVREG_POWERSTATE_D1 0x0001 309#define NVREG_POWERSTATE_D2 0x0002 310#define NVREG_POWERSTATE_D3 0x0003 311 NvRegMgmtUnitControl = 0x278, 312#define NVREG_MGMTUNITCONTROL_INUSE 0x20000 313 NvRegTxCnt = 0x280, 314 NvRegTxZeroReXmt = 0x284, 315 NvRegTxOneReXmt = 0x288, 316 NvRegTxManyReXmt = 0x28c, 317 NvRegTxLateCol = 0x290, 318 NvRegTxUnderflow = 0x294, 319 NvRegTxLossCarrier = 0x298, 320 NvRegTxExcessDef = 0x29c, 321 NvRegTxRetryErr = 0x2a0, 322 NvRegRxFrameErr = 0x2a4, 323 NvRegRxExtraByte = 0x2a8, 324 NvRegRxLateCol = 0x2ac, 325 NvRegRxRunt = 0x2b0, 326 NvRegRxFrameTooLong = 0x2b4, 327 NvRegRxOverflow = 0x2b8, 328 NvRegRxFCSErr = 0x2bc, 329 NvRegRxFrameAlignErr = 0x2c0, 330 NvRegRxLenErr = 0x2c4, 331 NvRegRxUnicast = 0x2c8, 332 NvRegRxMulticast = 0x2cc, 333 NvRegRxBroadcast = 0x2d0, 334 NvRegTxDef = 0x2d4, 335 NvRegTxFrame = 0x2d8, 336 NvRegRxCnt = 0x2dc, 337 NvRegTxPause = 0x2e0, 338 NvRegRxPause = 0x2e4, 339 NvRegRxDropFrame = 0x2e8, 340 NvRegVlanControl = 0x300, 341#define NVREG_VLANCONTROL_ENABLE 0x2000 342 NvRegMSIXMap0 = 0x3e0, 343 NvRegMSIXMap1 = 0x3e4, 344 NvRegMSIXIrqStatus = 0x3f0, 345 346 NvRegPowerState2 = 0x600, 347#define NVREG_POWERSTATE2_POWERUP_MASK 0x0F15 348#define NVREG_POWERSTATE2_POWERUP_REV_A3 0x0001 349#define NVREG_POWERSTATE2_PHY_RESET 0x0004 350#define NVREG_POWERSTATE2_GATE_CLOCKS 0x0F00 351}; 352 353/* Big endian: should work, but is untested */ 354struct ring_desc { 355 __le32 buf; 356 __le32 flaglen; 357}; 358 359struct ring_desc_ex { 360 __le32 bufhigh; 361 __le32 buflow; 362 __le32 txvlan; 363 __le32 flaglen; 364}; 365 366union ring_type { 367 struct ring_desc* orig; 368 struct ring_desc_ex* ex; 369}; 370 371#define FLAG_MASK_V1 0xffff0000 372#define FLAG_MASK_V2 0xffffc000 373#define LEN_MASK_V1 (0xffffffff ^ FLAG_MASK_V1) 374#define LEN_MASK_V2 (0xffffffff ^ FLAG_MASK_V2) 375 376#define NV_TX_LASTPACKET (1<<16) 377#define NV_TX_RETRYERROR (1<<19) 378#define NV_TX_RETRYCOUNT_MASK (0xF<<20) 379#define NV_TX_FORCED_INTERRUPT (1<<24) 380#define NV_TX_DEFERRED (1<<26) 381#define NV_TX_CARRIERLOST (1<<27) 382#define NV_TX_LATECOLLISION (1<<28) 383#define NV_TX_UNDERFLOW (1<<29) 384#define NV_TX_ERROR (1<<30) 385#define NV_TX_VALID (1<<31) 386 387#define NV_TX2_LASTPACKET (1<<29) 388#define NV_TX2_RETRYERROR (1<<18) 389#define NV_TX2_RETRYCOUNT_MASK (0xF<<19) 390#define NV_TX2_FORCED_INTERRUPT (1<<30) 391#define NV_TX2_DEFERRED (1<<25) 392#define NV_TX2_CARRIERLOST (1<<26) 393#define NV_TX2_LATECOLLISION (1<<27) 394#define NV_TX2_UNDERFLOW (1<<28) 395/* error and valid are the same for both */ 396#define NV_TX2_ERROR (1<<30) 397#define NV_TX2_VALID (1<<31) 398#define NV_TX2_TSO (1<<28) 399#define NV_TX2_TSO_SHIFT 14 400#define NV_TX2_TSO_MAX_SHIFT 14 401#define NV_TX2_TSO_MAX_SIZE (1<<NV_TX2_TSO_MAX_SHIFT) 402#define NV_TX2_CHECKSUM_L3 (1<<27) 403#define NV_TX2_CHECKSUM_L4 (1<<26) 404 405#define NV_TX3_VLAN_TAG_PRESENT (1<<18) 406 407#define NV_RX_DESCRIPTORVALID (1<<16) 408#define NV_RX_MISSEDFRAME (1<<17) 409#define NV_RX_SUBSTRACT1 (1<<18) 410#define NV_RX_ERROR1 (1<<23) 411#define NV_RX_ERROR2 (1<<24) 412#define NV_RX_ERROR3 (1<<25) 413#define NV_RX_ERROR4 (1<<26) 414#define NV_RX_CRCERR (1<<27) 415#define NV_RX_OVERFLOW (1<<28) 416#define NV_RX_FRAMINGERR (1<<29) 417#define NV_RX_ERROR (1<<30) 418#define NV_RX_AVAIL (1<<31) 419#define NV_RX_ERROR_MASK (NV_RX_ERROR1|NV_RX_ERROR2|NV_RX_ERROR3|NV_RX_ERROR4|NV_RX_CRCERR|NV_RX_OVERFLOW|NV_RX_FRAMINGERR) 420 421#define NV_RX2_CHECKSUMMASK (0x1C000000) 422#define NV_RX2_CHECKSUM_IP (0x10000000) 423#define NV_RX2_CHECKSUM_IP_TCP (0x14000000) 424#define NV_RX2_CHECKSUM_IP_UDP (0x18000000) 425#define NV_RX2_DESCRIPTORVALID (1<<29) 426#define NV_RX2_SUBSTRACT1 (1<<25) 427#define NV_RX2_ERROR1 (1<<18) 428#define NV_RX2_ERROR2 (1<<19) 429#define NV_RX2_ERROR3 (1<<20) 430#define NV_RX2_ERROR4 (1<<21) 431#define NV_RX2_CRCERR (1<<22) 432#define NV_RX2_OVERFLOW (1<<23) 433#define NV_RX2_FRAMINGERR (1<<24) 434/* error and avail are the same for both */ 435#define NV_RX2_ERROR (1<<30) 436#define NV_RX2_AVAIL (1<<31) 437#define NV_RX2_ERROR_MASK (NV_RX2_ERROR1|NV_RX2_ERROR2|NV_RX2_ERROR3|NV_RX2_ERROR4|NV_RX2_CRCERR|NV_RX2_OVERFLOW|NV_RX2_FRAMINGERR) 438 439#define NV_RX3_VLAN_TAG_PRESENT (1<<16) 440#define NV_RX3_VLAN_TAG_MASK (0x0000FFFF) 441 442/* Miscelaneous hardware related defines: */ 443#define NV_PCI_REGSZ_VER1 0x270 444#define NV_PCI_REGSZ_VER2 0x2d4 445#define NV_PCI_REGSZ_VER3 0x604 446#define NV_PCI_REGSZ_MAX 0x604 447 448/* various timeout delays: all in usec */ 449#define NV_TXRX_RESET_DELAY 4 450#define NV_TXSTOP_DELAY1 10 451#define NV_TXSTOP_DELAY1MAX 500000 452#define NV_TXSTOP_DELAY2 100 453#define NV_RXSTOP_DELAY1 10 454#define NV_RXSTOP_DELAY1MAX 500000 455#define NV_RXSTOP_DELAY2 100 456#define NV_SETUP5_DELAY 5 457#define NV_SETUP5_DELAYMAX 50000 458#define NV_POWERUP_DELAY 5 459#define NV_POWERUP_DELAYMAX 5000 460#define NV_MIIBUSY_DELAY 50 461#define NV_MIIPHY_DELAY 10 462#define NV_MIIPHY_DELAYMAX 10000 463#define NV_MAC_RESET_DELAY 64 464 465#define NV_WAKEUPPATTERNS 5 466#define NV_WAKEUPMASKENTRIES 4 467 468/* General driver defaults */ 469#define NV_WATCHDOG_TIMEO (5*HZ) 470 471#define RX_RING_DEFAULT 512 472#define TX_RING_DEFAULT 256 473#define RX_RING_MIN 128 474#define TX_RING_MIN 64 475#define RING_MAX_DESC_VER_1 1024 476#define RING_MAX_DESC_VER_2_3 16384 477 478/* rx/tx mac addr + type + vlan + align + slack*/ 479#define NV_RX_HEADERS (64) 480/* even more slack. */ 481#define NV_RX_ALLOC_PAD (64) 482 483/* maximum mtu size */ 484#define NV_PKTLIMIT_1 ETH_DATA_LEN /* hard limit not known */ 485#define NV_PKTLIMIT_2 9100 /* Actual limit according to NVidia: 9202 */ 486 487#define OOM_REFILL (1+HZ/20) 488#define POLL_WAIT (1+HZ/100) 489#define LINK_TIMEOUT (3*HZ) 490#define STATS_INTERVAL (10*HZ) 491 492/* 493 * desc_ver values: 494 * The nic supports three different descriptor types: 495 * - DESC_VER_1: Original 496 * - DESC_VER_2: support for jumbo frames. 497 * - DESC_VER_3: 64-bit format. 498 */ 499#define DESC_VER_1 1 500#define DESC_VER_2 2 501#define DESC_VER_3 3 502 503/* PHY defines */ 504#define PHY_OUI_MARVELL 0x5043 505#define PHY_OUI_CICADA 0x03f1 506#define PHY_OUI_VITESSE 0x01c1 507#define PHY_OUI_REALTEK 0x0732 508#define PHY_OUI_REALTEK2 0x0020 509#define PHYID1_OUI_MASK 0x03ff 510#define PHYID1_OUI_SHFT 6 511#define PHYID2_OUI_MASK 0xfc00 512#define PHYID2_OUI_SHFT 10 513#define PHYID2_MODEL_MASK 0x03f0 514#define PHY_MODEL_REALTEK_8211 0x0110 515#define PHY_REV_MASK 0x0001 516#define PHY_REV_REALTEK_8211B 0x0000 517#define PHY_REV_REALTEK_8211C 0x0001 518#define PHY_MODEL_REALTEK_8201 0x0200 519#define PHY_MODEL_MARVELL_E3016 0x0220 520#define PHY_MARVELL_E3016_INITMASK 0x0300 521#define PHY_CICADA_INIT1 0x0f000 522#define PHY_CICADA_INIT2 0x0e00 523#define PHY_CICADA_INIT3 0x01000 524#define PHY_CICADA_INIT4 0x0200 525#define PHY_CICADA_INIT5 0x0004 526#define PHY_CICADA_INIT6 0x02000 527#define PHY_VITESSE_INIT_REG1 0x1f 528#define PHY_VITESSE_INIT_REG2 0x10 529#define PHY_VITESSE_INIT_REG3 0x11 530#define PHY_VITESSE_INIT_REG4 0x12 531#define PHY_VITESSE_INIT_MSK1 0xc 532#define PHY_VITESSE_INIT_MSK2 0x0180 533#define PHY_VITESSE_INIT1 0x52b5 534#define PHY_VITESSE_INIT2 0xaf8a 535#define PHY_VITESSE_INIT3 0x8 536#define PHY_VITESSE_INIT4 0x8f8a 537#define PHY_VITESSE_INIT5 0xaf86 538#define PHY_VITESSE_INIT6 0x8f86 539#define PHY_VITESSE_INIT7 0xaf82 540#define PHY_VITESSE_INIT8 0x0100 541#define PHY_VITESSE_INIT9 0x8f82 542#define PHY_VITESSE_INIT10 0x0 543#define PHY_REALTEK_INIT_REG1 0x1f 544#define PHY_REALTEK_INIT_REG2 0x19 545#define PHY_REALTEK_INIT_REG3 0x13 546#define PHY_REALTEK_INIT_REG4 0x14 547#define PHY_REALTEK_INIT_REG5 0x18 548#define PHY_REALTEK_INIT_REG6 0x11 549#define PHY_REALTEK_INIT_REG7 0x01 550#define PHY_REALTEK_INIT1 0x0000 551#define PHY_REALTEK_INIT2 0x8e00 552#define PHY_REALTEK_INIT3 0x0001 553#define PHY_REALTEK_INIT4 0xad17 554#define PHY_REALTEK_INIT5 0xfb54 555#define PHY_REALTEK_INIT6 0xf5c7 556#define PHY_REALTEK_INIT7 0x1000 557#define PHY_REALTEK_INIT8 0x0003 558#define PHY_REALTEK_INIT9 0x0008 559#define PHY_REALTEK_INIT10 0x0005 560#define PHY_REALTEK_INIT11 0x0200 561#define PHY_REALTEK_INIT_MSK1 0x0003 562 563#define PHY_GIGABIT 0x0100 564 565#define PHY_TIMEOUT 0x1 566#define PHY_ERROR 0x2 567 568#define PHY_100 0x1 569#define PHY_1000 0x2 570#define PHY_HALF 0x100 571 572#define NV_PAUSEFRAME_RX_CAPABLE 0x0001 573#define NV_PAUSEFRAME_TX_CAPABLE 0x0002 574#define NV_PAUSEFRAME_RX_ENABLE 0x0004 575#define NV_PAUSEFRAME_TX_ENABLE 0x0008 576#define NV_PAUSEFRAME_RX_REQ 0x0010 577#define NV_PAUSEFRAME_TX_REQ 0x0020 578#define NV_PAUSEFRAME_AUTONEG 0x0040 579 580/* MSI/MSI-X defines */ 581#define NV_MSI_X_MAX_VECTORS 8 582#define NV_MSI_X_VECTORS_MASK 0x000f 583#define NV_MSI_CAPABLE 0x0010 584#define NV_MSI_X_CAPABLE 0x0020 585#define NV_MSI_ENABLED 0x0040 586#define NV_MSI_X_ENABLED 0x0080 587 588#define NV_MSI_X_VECTOR_ALL 0x0 589#define NV_MSI_X_VECTOR_RX 0x0 590#define NV_MSI_X_VECTOR_TX 0x1 591#define NV_MSI_X_VECTOR_OTHER 0x2 592 593#define NV_MSI_PRIV_OFFSET 0x68 594#define NV_MSI_PRIV_VALUE 0xffffffff 595 596#define NV_RESTART_TX 0x1 597#define NV_RESTART_RX 0x2 598 599#define NV_TX_LIMIT_COUNT 16 600 601#define NV_DYNAMIC_THRESHOLD 4 602#define NV_DYNAMIC_MAX_QUIET_COUNT 2048 603 604/* statistics */ 605struct nv_ethtool_str { 606 char name[ETH_GSTRING_LEN]; 607}; 608 609static const struct nv_ethtool_str nv_estats_str[] = { 610 { "tx_bytes" }, 611 { "tx_zero_rexmt" }, 612 { "tx_one_rexmt" }, 613 { "tx_many_rexmt" }, 614 { "tx_late_collision" }, 615 { "tx_fifo_errors" }, 616 { "tx_carrier_errors" }, 617 { "tx_excess_deferral" }, 618 { "tx_retry_error" }, 619 { "rx_frame_error" }, 620 { "rx_extra_byte" }, 621 { "rx_late_collision" }, 622 { "rx_runt" }, 623 { "rx_frame_too_long" }, 624 { "rx_over_errors" }, 625 { "rx_crc_errors" }, 626 { "rx_frame_align_error" }, 627 { "rx_length_error" }, 628 { "rx_unicast" }, 629 { "rx_multicast" }, 630 { "rx_broadcast" }, 631 { "rx_packets" }, 632 { "rx_errors_total" }, 633 { "tx_errors_total" }, 634 635 /* version 2 stats */ 636 { "tx_deferral" }, 637 { "tx_packets" }, 638 { "rx_bytes" }, 639 { "tx_pause" }, 640 { "rx_pause" }, 641 { "rx_drop_frame" }, 642 643 /* version 3 stats */ 644 { "tx_unicast" }, 645 { "tx_multicast" }, 646 { "tx_broadcast" } 647}; 648 649struct nv_ethtool_stats { 650 u64 tx_bytes; 651 u64 tx_zero_rexmt; 652 u64 tx_one_rexmt; 653 u64 tx_many_rexmt; 654 u64 tx_late_collision; 655 u64 tx_fifo_errors; 656 u64 tx_carrier_errors; 657 u64 tx_excess_deferral; 658 u64 tx_retry_error; 659 u64 rx_frame_error; 660 u64 rx_extra_byte; 661 u64 rx_late_collision; 662 u64 rx_runt; 663 u64 rx_frame_too_long; 664 u64 rx_over_errors; 665 u64 rx_crc_errors; 666 u64 rx_frame_align_error; 667 u64 rx_length_error; 668 u64 rx_unicast; 669 u64 rx_multicast; 670 u64 rx_broadcast; 671 u64 rx_packets; 672 u64 rx_errors_total; 673 u64 tx_errors_total; 674 675 /* version 2 stats */ 676 u64 tx_deferral; 677 u64 tx_packets; 678 u64 rx_bytes; 679 u64 tx_pause; 680 u64 rx_pause; 681 u64 rx_drop_frame; 682 683 /* version 3 stats */ 684 u64 tx_unicast; 685 u64 tx_multicast; 686 u64 tx_broadcast; 687}; 688 689#define NV_DEV_STATISTICS_V3_COUNT (sizeof(struct nv_ethtool_stats)/sizeof(u64)) 690#define NV_DEV_STATISTICS_V2_COUNT (NV_DEV_STATISTICS_V3_COUNT - 3) 691#define NV_DEV_STATISTICS_V1_COUNT (NV_DEV_STATISTICS_V2_COUNT - 6) 692 693/* diagnostics */ 694#define NV_TEST_COUNT_BASE 3 695#define NV_TEST_COUNT_EXTENDED 4 696 697static const struct nv_ethtool_str nv_etests_str[] = { 698 { "link (online/offline)" }, 699 { "register (offline) " }, 700 { "interrupt (offline) " }, 701 { "loopback (offline) " } 702}; 703 704struct register_test { 705 __u32 reg; 706 __u32 mask; 707}; 708 709static const struct register_test nv_registers_test[] = { 710 { NvRegUnknownSetupReg6, 0x01 }, 711 { NvRegMisc1, 0x03c }, 712 { NvRegOffloadConfig, 0x03ff }, 713 { NvRegMulticastAddrA, 0xffffffff }, 714 { NvRegTxWatermark, 0x0ff }, 715 { NvRegWakeUpFlags, 0x07777 }, 716 { 0,0 } 717}; 718 719struct nv_skb_map { 720 struct sk_buff *skb; 721 dma_addr_t dma; 722 unsigned int dma_len:31; 723 unsigned int dma_single:1; 724 struct ring_desc_ex *first_tx_desc; 725 struct nv_skb_map *next_tx_ctx; 726}; 727 728/* 729 * SMP locking: 730 * All hardware access under netdev_priv(dev)->lock, except the performance 731 * critical parts: 732 * - rx is (pseudo-) lockless: it relies on the single-threading provided 733 * by the arch code for interrupts. 734 * - tx setup is lockless: it relies on netif_tx_lock. Actual submission 735 * needs netdev_priv(dev)->lock :-( 736 * - set_multicast_list: preparation lockless, relies on netif_tx_lock. 737 */ 738 739/* in dev: base, irq */ 740struct fe_priv { 741 spinlock_t lock; 742 743 struct net_device *dev; 744 struct napi_struct napi; 745 746 /* General data: 747 * Locking: spin_lock(&np->lock); */ 748 struct nv_ethtool_stats estats; 749 int in_shutdown; 750 u32 linkspeed; 751 int duplex; 752 int autoneg; 753 int fixed_mode; 754 int phyaddr; 755 int wolenabled; 756 unsigned int phy_oui; 757 unsigned int phy_model; 758 unsigned int phy_rev; 759 u16 gigabit; 760 int intr_test; 761 int recover_error; 762 int quiet_count; 763 764 /* General data: RO fields */ 765 dma_addr_t ring_addr; 766 struct pci_dev *pci_dev; 767 u32 orig_mac[2]; 768 u32 events; 769 u32 irqmask; 770 u32 desc_ver; 771 u32 txrxctl_bits; 772 u32 vlanctl_bits; 773 u32 driver_data; 774 u32 device_id; 775 u32 register_size; 776 int rx_csum; 777 u32 mac_in_use; 778 int mgmt_version; 779 int mgmt_sema; 780 781 void __iomem *base; 782 783 /* rx specific fields. 784 * Locking: Within irq hander or disable_irq+spin_lock(&np->lock); 785 */ 786 union ring_type get_rx, put_rx, first_rx, last_rx; 787 struct nv_skb_map *get_rx_ctx, *put_rx_ctx; 788 struct nv_skb_map *first_rx_ctx, *last_rx_ctx; 789 struct nv_skb_map *rx_skb; 790 791 union ring_type rx_ring; 792 unsigned int rx_buf_sz; 793 unsigned int pkt_limit; 794 struct timer_list oom_kick; 795 struct timer_list nic_poll; 796 struct timer_list stats_poll; 797 u32 nic_poll_irq; 798 int rx_ring_size; 799 800 int need_linktimer; 801 unsigned long link_timeout; 802 /* 803 * tx specific fields. 804 */ 805 union ring_type get_tx, put_tx, first_tx, last_tx; 806 struct nv_skb_map *get_tx_ctx, *put_tx_ctx; 807 struct nv_skb_map *first_tx_ctx, *last_tx_ctx; 808 struct nv_skb_map *tx_skb; 809 810 union ring_type tx_ring; 811 u32 tx_flags; 812 int tx_ring_size; 813 int tx_limit; 814 u32 tx_pkts_in_progress; 815 struct nv_skb_map *tx_change_owner; 816 struct nv_skb_map *tx_end_flip; 817 int tx_stop; 818 819 /* vlan fields */ 820 struct vlan_group *vlangrp; 821 822 /* msi/msi-x fields */ 823 u32 msi_flags; 824 struct msix_entry msi_x_entry[NV_MSI_X_MAX_VECTORS]; 825 826 /* flow control */ 827 u32 pause_flags; 828 829 /* power saved state */ 830 u32 saved_config_space[NV_PCI_REGSZ_MAX/4]; 831 832 /* for different msi-x irq type */ 833 char name_rx[IFNAMSIZ + 3]; /* -rx */ 834 char name_tx[IFNAMSIZ + 3]; /* -tx */ 835 char name_other[IFNAMSIZ + 6]; /* -other */ 836}; 837 838/* 839 * Maximum number of loops until we assume that a bit in the irq mask 840 * is stuck. Overridable with module param. 841 */ 842static int max_interrupt_work = 4; 843 844/* 845 * Optimization can be either throuput mode or cpu mode 846 * 847 * Throughput Mode: Every tx and rx packet will generate an interrupt. 848 * CPU Mode: Interrupts are controlled by a timer. 849 */ 850enum { 851 NV_OPTIMIZATION_MODE_THROUGHPUT, 852 NV_OPTIMIZATION_MODE_CPU, 853 NV_OPTIMIZATION_MODE_DYNAMIC 854}; 855static int optimization_mode = NV_OPTIMIZATION_MODE_DYNAMIC; 856 857/* 858 * Poll interval for timer irq 859 * 860 * This interval determines how frequent an interrupt is generated. 861 * The is value is determined by [(time_in_micro_secs * 100) / (2^10)] 862 * Min = 0, and Max = 65535 863 */ 864static int poll_interval = -1; 865 866/* 867 * MSI interrupts 868 */ 869enum { 870 NV_MSI_INT_DISABLED, 871 NV_MSI_INT_ENABLED 872}; 873static int msi = NV_MSI_INT_ENABLED; 874 875/* 876 * MSIX interrupts 877 */ 878enum { 879 NV_MSIX_INT_DISABLED, 880 NV_MSIX_INT_ENABLED 881}; 882static int msix = NV_MSIX_INT_ENABLED; 883 884/* 885 * DMA 64bit 886 */ 887enum { 888 NV_DMA_64BIT_DISABLED, 889 NV_DMA_64BIT_ENABLED 890}; 891static int dma_64bit = NV_DMA_64BIT_ENABLED; 892 893/* 894 * Crossover Detection 895 * Realtek 8201 phy + some OEM boards do not work properly. 896 */ 897enum { 898 NV_CROSSOVER_DETECTION_DISABLED, 899 NV_CROSSOVER_DETECTION_ENABLED 900}; 901static int phy_cross = NV_CROSSOVER_DETECTION_DISABLED; 902 903/* 904 * Power down phy when interface is down (persists through reboot; 905 * older Linux and other OSes may not power it up again) 906 */ 907static int phy_power_down = 0; 908 909static inline struct fe_priv *get_nvpriv(struct net_device *dev) 910{ 911 return netdev_priv(dev); 912} 913 914static inline u8 __iomem *get_hwbase(struct net_device *dev) 915{ 916 return ((struct fe_priv *)netdev_priv(dev))->base; 917} 918 919static inline void pci_push(u8 __iomem *base) 920{ 921 /* force out pending posted writes */ 922 readl(base); 923} 924 925static inline u32 nv_descr_getlength(struct ring_desc *prd, u32 v) 926{ 927 return le32_to_cpu(prd->flaglen) 928 & ((v == DESC_VER_1) ? LEN_MASK_V1 : LEN_MASK_V2); 929} 930 931static inline u32 nv_descr_getlength_ex(struct ring_desc_ex *prd, u32 v) 932{ 933 return le32_to_cpu(prd->flaglen) & LEN_MASK_V2; 934} 935 936static bool nv_optimized(struct fe_priv *np) 937{ 938 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) 939 return false; 940 return true; 941} 942 943static int reg_delay(struct net_device *dev, int offset, u32 mask, u32 target, 944 int delay, int delaymax, const char *msg) 945{ 946 u8 __iomem *base = get_hwbase(dev); 947 948 pci_push(base); 949 do { 950 udelay(delay); 951 delaymax -= delay; 952 if (delaymax < 0) { 953 if (msg) 954 printk("%s", msg); 955 return 1; 956 } 957 } while ((readl(base + offset) & mask) != target); 958 return 0; 959} 960 961#define NV_SETUP_RX_RING 0x01 962#define NV_SETUP_TX_RING 0x02 963 964static inline u32 dma_low(dma_addr_t addr) 965{ 966 return addr; 967} 968 969static inline u32 dma_high(dma_addr_t addr) 970{ 971 return addr>>31>>1; /* 0 if 32bit, shift down by 32 if 64bit */ 972} 973 974static void setup_hw_rings(struct net_device *dev, int rxtx_flags) 975{ 976 struct fe_priv *np = get_nvpriv(dev); 977 u8 __iomem *base = get_hwbase(dev); 978 979 if (!nv_optimized(np)) { 980 if (rxtx_flags & NV_SETUP_RX_RING) { 981 writel(dma_low(np->ring_addr), base + NvRegRxRingPhysAddr); 982 } 983 if (rxtx_flags & NV_SETUP_TX_RING) { 984 writel(dma_low(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr); 985 } 986 } else { 987 if (rxtx_flags & NV_SETUP_RX_RING) { 988 writel(dma_low(np->ring_addr), base + NvRegRxRingPhysAddr); 989 writel(dma_high(np->ring_addr), base + NvRegRxRingPhysAddrHigh); 990 } 991 if (rxtx_flags & NV_SETUP_TX_RING) { 992 writel(dma_low(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddr); 993 writel(dma_high(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddrHigh); 994 } 995 } 996} 997 998static void free_rings(struct net_device *dev) 999{ 1000 struct fe_priv *np = get_nvpriv(dev); 1001 1002 if (!nv_optimized(np)) { 1003 if (np->rx_ring.orig) 1004 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size), 1005 np->rx_ring.orig, np->ring_addr); 1006 } else { 1007 if (np->rx_ring.ex) 1008 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (np->rx_ring_size + np->tx_ring_size), 1009 np->rx_ring.ex, np->ring_addr); 1010 } 1011 if (np->rx_skb) 1012 kfree(np->rx_skb); 1013 if (np->tx_skb) 1014 kfree(np->tx_skb); 1015} 1016 1017static int using_multi_irqs(struct net_device *dev) 1018{ 1019 struct fe_priv *np = get_nvpriv(dev); 1020 1021 if (!(np->msi_flags & NV_MSI_X_ENABLED) || 1022 ((np->msi_flags & NV_MSI_X_ENABLED) && 1023 ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) 1024 return 0; 1025 else 1026 return 1; 1027} 1028 1029static void nv_txrx_gate(struct net_device *dev, bool gate) 1030{ 1031 struct fe_priv *np = get_nvpriv(dev); 1032 u8 __iomem *base = get_hwbase(dev); 1033 u32 powerstate; 1034 1035 if (!np->mac_in_use && 1036 (np->driver_data & DEV_HAS_POWER_CNTRL)) { 1037 powerstate = readl(base + NvRegPowerState2); 1038 if (gate) 1039 powerstate |= NVREG_POWERSTATE2_GATE_CLOCKS; 1040 else 1041 powerstate &= ~NVREG_POWERSTATE2_GATE_CLOCKS; 1042 writel(powerstate, base + NvRegPowerState2); 1043 } 1044} 1045 1046static void nv_enable_irq(struct net_device *dev) 1047{ 1048 struct fe_priv *np = get_nvpriv(dev); 1049 1050 if (!using_multi_irqs(dev)) { 1051 if (np->msi_flags & NV_MSI_X_ENABLED) 1052 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); 1053 else 1054 enable_irq(np->pci_dev->irq); 1055 } else { 1056 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); 1057 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); 1058 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector); 1059 } 1060} 1061 1062static void nv_disable_irq(struct net_device *dev) 1063{ 1064 struct fe_priv *np = get_nvpriv(dev); 1065 1066 if (!using_multi_irqs(dev)) { 1067 if (np->msi_flags & NV_MSI_X_ENABLED) 1068 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); 1069 else 1070 disable_irq(np->pci_dev->irq); 1071 } else { 1072 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); 1073 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); 1074 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector); 1075 } 1076} 1077 1078/* In MSIX mode, a write to irqmask behaves as XOR */ 1079static void nv_enable_hw_interrupts(struct net_device *dev, u32 mask) 1080{ 1081 u8 __iomem *base = get_hwbase(dev); 1082 1083 writel(mask, base + NvRegIrqMask); 1084} 1085 1086static void nv_disable_hw_interrupts(struct net_device *dev, u32 mask) 1087{ 1088 struct fe_priv *np = get_nvpriv(dev); 1089 u8 __iomem *base = get_hwbase(dev); 1090 1091 if (np->msi_flags & NV_MSI_X_ENABLED) { 1092 writel(mask, base + NvRegIrqMask); 1093 } else { 1094 if (np->msi_flags & NV_MSI_ENABLED) 1095 writel(0, base + NvRegMSIIrqMask); 1096 writel(0, base + NvRegIrqMask); 1097 } 1098} 1099 1100static void nv_napi_enable(struct net_device *dev) 1101{ 1102 struct fe_priv *np = get_nvpriv(dev); 1103 1104 napi_enable(&np->napi); 1105} 1106 1107static void nv_napi_disable(struct net_device *dev) 1108{ 1109 struct fe_priv *np = get_nvpriv(dev); 1110 1111 napi_disable(&np->napi); 1112} 1113 1114#define MII_READ (-1) 1115/* mii_rw: read/write a register on the PHY. 1116 * 1117 * Caller must guarantee serialization 1118 */ 1119static int mii_rw(struct net_device *dev, int addr, int miireg, int value) 1120{ 1121 u8 __iomem *base = get_hwbase(dev); 1122 u32 reg; 1123 int retval; 1124 1125 writel(NVREG_MIISTAT_MASK_RW, base + NvRegMIIStatus); 1126 1127 reg = readl(base + NvRegMIIControl); 1128 if (reg & NVREG_MIICTL_INUSE) { 1129 writel(NVREG_MIICTL_INUSE, base + NvRegMIIControl); 1130 udelay(NV_MIIBUSY_DELAY); 1131 } 1132 1133 reg = (addr << NVREG_MIICTL_ADDRSHIFT) | miireg; 1134 if (value != MII_READ) { 1135 writel(value, base + NvRegMIIData); 1136 reg |= NVREG_MIICTL_WRITE; 1137 } 1138 writel(reg, base + NvRegMIIControl); 1139 1140 if (reg_delay(dev, NvRegMIIControl, NVREG_MIICTL_INUSE, 0, 1141 NV_MIIPHY_DELAY, NV_MIIPHY_DELAYMAX, NULL)) { 1142 dprintk(KERN_DEBUG "%s: mii_rw of reg %d at PHY %d timed out.\n", 1143 dev->name, miireg, addr); 1144 retval = -1; 1145 } else if (value != MII_READ) { 1146 /* it was a write operation - fewer failures are detectable */ 1147 dprintk(KERN_DEBUG "%s: mii_rw wrote 0x%x to reg %d at PHY %d\n", 1148 dev->name, value, miireg, addr); 1149 retval = 0; 1150 } else if (readl(base + NvRegMIIStatus) & NVREG_MIISTAT_ERROR) { 1151 dprintk(KERN_DEBUG "%s: mii_rw of reg %d at PHY %d failed.\n", 1152 dev->name, miireg, addr); 1153 retval = -1; 1154 } else { 1155 retval = readl(base + NvRegMIIData); 1156 dprintk(KERN_DEBUG "%s: mii_rw read from reg %d at PHY %d: 0x%x.\n", 1157 dev->name, miireg, addr, retval); 1158 } 1159 1160 return retval; 1161} 1162 1163static int phy_reset(struct net_device *dev, u32 bmcr_setup) 1164{ 1165 struct fe_priv *np = netdev_priv(dev); 1166 u32 miicontrol; 1167 unsigned int tries = 0; 1168 1169 miicontrol = BMCR_RESET | bmcr_setup; 1170 if (mii_rw(dev, np->phyaddr, MII_BMCR, miicontrol)) { 1171 return -1; 1172 } 1173 1174 /* wait for 500ms */ 1175 msleep(500); 1176 1177 /* must wait till reset is deasserted */ 1178 while (miicontrol & BMCR_RESET) { 1179 msleep(10); 1180 miicontrol = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); 1181 if (tries++ > 100) 1182 return -1; 1183 } 1184 return 0; 1185} 1186 1187static int phy_init(struct net_device *dev) 1188{ 1189 struct fe_priv *np = get_nvpriv(dev); 1190 u8 __iomem *base = get_hwbase(dev); 1191 u32 phyinterface, phy_reserved, mii_status, mii_control, mii_control_1000,reg; 1192 1193 /* phy errata for E3016 phy */ 1194 if (np->phy_model == PHY_MODEL_MARVELL_E3016) { 1195 reg = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ); 1196 reg &= ~PHY_MARVELL_E3016_INITMASK; 1197 if (mii_rw(dev, np->phyaddr, MII_NCONFIG, reg)) { 1198 printk(KERN_INFO "%s: phy write to errata reg failed.\n", pci_name(np->pci_dev)); 1199 return PHY_ERROR; 1200 } 1201 } 1202 if (np->phy_oui == PHY_OUI_REALTEK) { 1203 if (np->phy_model == PHY_MODEL_REALTEK_8211 && 1204 np->phy_rev == PHY_REV_REALTEK_8211B) { 1205 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) { 1206 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1207 return PHY_ERROR; 1208 } 1209 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, PHY_REALTEK_INIT2)) { 1210 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1211 return PHY_ERROR; 1212 } 1213 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3)) { 1214 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1215 return PHY_ERROR; 1216 } 1217 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG3, PHY_REALTEK_INIT4)) { 1218 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1219 return PHY_ERROR; 1220 } 1221 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG4, PHY_REALTEK_INIT5)) { 1222 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1223 return PHY_ERROR; 1224 } 1225 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG5, PHY_REALTEK_INIT6)) { 1226 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1227 return PHY_ERROR; 1228 } 1229 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) { 1230 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1231 return PHY_ERROR; 1232 } 1233 } 1234 if (np->phy_model == PHY_MODEL_REALTEK_8211 && 1235 np->phy_rev == PHY_REV_REALTEK_8211C) { 1236 u32 powerstate = readl(base + NvRegPowerState2); 1237 1238 /* need to perform hw phy reset */ 1239 powerstate |= NVREG_POWERSTATE2_PHY_RESET; 1240 writel(powerstate, base + NvRegPowerState2); 1241 msleep(25); 1242 1243 powerstate &= ~NVREG_POWERSTATE2_PHY_RESET; 1244 writel(powerstate, base + NvRegPowerState2); 1245 msleep(25); 1246 1247 reg = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, MII_READ); 1248 reg |= PHY_REALTEK_INIT9; 1249 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, reg)) { 1250 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1251 return PHY_ERROR; 1252 } 1253 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT10)) { 1254 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1255 return PHY_ERROR; 1256 } 1257 reg = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG7, MII_READ); 1258 if (!(reg & PHY_REALTEK_INIT11)) { 1259 reg |= PHY_REALTEK_INIT11; 1260 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG7, reg)) { 1261 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1262 return PHY_ERROR; 1263 } 1264 } 1265 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) { 1266 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1267 return PHY_ERROR; 1268 } 1269 } 1270 if (np->phy_model == PHY_MODEL_REALTEK_8201) { 1271 if (np->driver_data & DEV_NEED_PHY_INIT_FIX) { 1272 phy_reserved = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, MII_READ); 1273 phy_reserved |= PHY_REALTEK_INIT7; 1274 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, phy_reserved)) { 1275 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1276 return PHY_ERROR; 1277 } 1278 } 1279 } 1280 } 1281 1282 /* set advertise register */ 1283 reg = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); 1284 reg |= (ADVERTISE_10HALF|ADVERTISE_10FULL|ADVERTISE_100HALF|ADVERTISE_100FULL|ADVERTISE_PAUSE_ASYM|ADVERTISE_PAUSE_CAP); 1285 if (mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg)) { 1286 printk(KERN_INFO "%s: phy write to advertise failed.\n", pci_name(np->pci_dev)); 1287 return PHY_ERROR; 1288 } 1289 1290 /* get phy interface type */ 1291 phyinterface = readl(base + NvRegPhyInterface); 1292 1293 /* see if gigabit phy */ 1294 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); 1295 if (mii_status & PHY_GIGABIT) { 1296 np->gigabit = PHY_GIGABIT; 1297 mii_control_1000 = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ); 1298 mii_control_1000 &= ~ADVERTISE_1000HALF; 1299 if (phyinterface & PHY_RGMII) 1300 mii_control_1000 |= ADVERTISE_1000FULL; 1301 else 1302 mii_control_1000 &= ~ADVERTISE_1000FULL; 1303 1304 if (mii_rw(dev, np->phyaddr, MII_CTRL1000, mii_control_1000)) { 1305 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1306 return PHY_ERROR; 1307 } 1308 } 1309 else 1310 np->gigabit = 0; 1311 1312 mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); 1313 mii_control |= BMCR_ANENABLE; 1314 1315 if (np->phy_oui == PHY_OUI_REALTEK && 1316 np->phy_model == PHY_MODEL_REALTEK_8211 && 1317 np->phy_rev == PHY_REV_REALTEK_8211C) { 1318 /* start autoneg since we already performed hw reset above */ 1319 mii_control |= BMCR_ANRESTART; 1320 if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control)) { 1321 printk(KERN_INFO "%s: phy init failed\n", pci_name(np->pci_dev)); 1322 return PHY_ERROR; 1323 } 1324 } else { 1325 /* reset the phy 1326 * (certain phys need bmcr to be setup with reset) 1327 */ 1328 if (phy_reset(dev, mii_control)) { 1329 printk(KERN_INFO "%s: phy reset failed\n", pci_name(np->pci_dev)); 1330 return PHY_ERROR; 1331 } 1332 } 1333 1334 /* phy vendor specific configuration */ 1335 if ((np->phy_oui == PHY_OUI_CICADA) && (phyinterface & PHY_RGMII) ) { 1336 phy_reserved = mii_rw(dev, np->phyaddr, MII_RESV1, MII_READ); 1337 phy_reserved &= ~(PHY_CICADA_INIT1 | PHY_CICADA_INIT2); 1338 phy_reserved |= (PHY_CICADA_INIT3 | PHY_CICADA_INIT4); 1339 if (mii_rw(dev, np->phyaddr, MII_RESV1, phy_reserved)) { 1340 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1341 return PHY_ERROR; 1342 } 1343 phy_reserved = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ); 1344 phy_reserved |= PHY_CICADA_INIT5; 1345 if (mii_rw(dev, np->phyaddr, MII_NCONFIG, phy_reserved)) { 1346 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1347 return PHY_ERROR; 1348 } 1349 } 1350 if (np->phy_oui == PHY_OUI_CICADA) { 1351 phy_reserved = mii_rw(dev, np->phyaddr, MII_SREVISION, MII_READ); 1352 phy_reserved |= PHY_CICADA_INIT6; 1353 if (mii_rw(dev, np->phyaddr, MII_SREVISION, phy_reserved)) { 1354 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1355 return PHY_ERROR; 1356 } 1357 } 1358 if (np->phy_oui == PHY_OUI_VITESSE) { 1359 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG1, PHY_VITESSE_INIT1)) { 1360 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1361 return PHY_ERROR; 1362 } 1363 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT2)) { 1364 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1365 return PHY_ERROR; 1366 } 1367 phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, MII_READ); 1368 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved)) { 1369 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1370 return PHY_ERROR; 1371 } 1372 phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, MII_READ); 1373 phy_reserved &= ~PHY_VITESSE_INIT_MSK1; 1374 phy_reserved |= PHY_VITESSE_INIT3; 1375 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved)) { 1376 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1377 return PHY_ERROR; 1378 } 1379 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT4)) { 1380 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1381 return PHY_ERROR; 1382 } 1383 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT5)) { 1384 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1385 return PHY_ERROR; 1386 } 1387 phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, MII_READ); 1388 phy_reserved &= ~PHY_VITESSE_INIT_MSK1; 1389 phy_reserved |= PHY_VITESSE_INIT3; 1390 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved)) { 1391 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1392 return PHY_ERROR; 1393 } 1394 phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, MII_READ); 1395 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved)) { 1396 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1397 return PHY_ERROR; 1398 } 1399 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT6)) { 1400 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1401 return PHY_ERROR; 1402 } 1403 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT7)) { 1404 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1405 return PHY_ERROR; 1406 } 1407 phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, MII_READ); 1408 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved)) { 1409 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1410 return PHY_ERROR; 1411 } 1412 phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, MII_READ); 1413 phy_reserved &= ~PHY_VITESSE_INIT_MSK2; 1414 phy_reserved |= PHY_VITESSE_INIT8; 1415 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved)) { 1416 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1417 return PHY_ERROR; 1418 } 1419 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT9)) { 1420 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1421 return PHY_ERROR; 1422 } 1423 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG1, PHY_VITESSE_INIT10)) { 1424 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1425 return PHY_ERROR; 1426 } 1427 } 1428 if (np->phy_oui == PHY_OUI_REALTEK) { 1429 if (np->phy_model == PHY_MODEL_REALTEK_8211 && 1430 np->phy_rev == PHY_REV_REALTEK_8211B) { 1431 /* reset could have cleared these out, set them back */ 1432 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) { 1433 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1434 return PHY_ERROR; 1435 } 1436 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, PHY_REALTEK_INIT2)) { 1437 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1438 return PHY_ERROR; 1439 } 1440 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3)) { 1441 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1442 return PHY_ERROR; 1443 } 1444 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG3, PHY_REALTEK_INIT4)) { 1445 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1446 return PHY_ERROR; 1447 } 1448 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG4, PHY_REALTEK_INIT5)) { 1449 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1450 return PHY_ERROR; 1451 } 1452 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG5, PHY_REALTEK_INIT6)) { 1453 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1454 return PHY_ERROR; 1455 } 1456 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) { 1457 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1458 return PHY_ERROR; 1459 } 1460 } 1461 if (np->phy_model == PHY_MODEL_REALTEK_8201) { 1462 if (np->driver_data & DEV_NEED_PHY_INIT_FIX) { 1463 phy_reserved = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, MII_READ); 1464 phy_reserved |= PHY_REALTEK_INIT7; 1465 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, phy_reserved)) { 1466 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1467 return PHY_ERROR; 1468 } 1469 } 1470 if (phy_cross == NV_CROSSOVER_DETECTION_DISABLED) { 1471 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3)) { 1472 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1473 return PHY_ERROR; 1474 } 1475 phy_reserved = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, MII_READ); 1476 phy_reserved &= ~PHY_REALTEK_INIT_MSK1; 1477 phy_reserved |= PHY_REALTEK_INIT3; 1478 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, phy_reserved)) { 1479 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1480 return PHY_ERROR; 1481 } 1482 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) { 1483 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1484 return PHY_ERROR; 1485 } 1486 } 1487 } 1488 } 1489 1490 /* some phys clear out pause advertisment on reset, set it back */ 1491 mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg); 1492 1493 /* restart auto negotiation, power down phy */ 1494 mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); 1495 mii_control |= (BMCR_ANRESTART | BMCR_ANENABLE); 1496 if (phy_power_down) { 1497 mii_control |= BMCR_PDOWN; 1498 } 1499 if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control)) { 1500 return PHY_ERROR; 1501 } 1502 1503 return 0; 1504} 1505 1506static void nv_start_rx(struct net_device *dev) 1507{ 1508 struct fe_priv *np = netdev_priv(dev); 1509 u8 __iomem *base = get_hwbase(dev); 1510 u32 rx_ctrl = readl(base + NvRegReceiverControl); 1511 1512 dprintk(KERN_DEBUG "%s: nv_start_rx\n", dev->name); 1513 /* Already running? Stop it. */ 1514 if ((readl(base + NvRegReceiverControl) & NVREG_RCVCTL_START) && !np->mac_in_use) { 1515 rx_ctrl &= ~NVREG_RCVCTL_START; 1516 writel(rx_ctrl, base + NvRegReceiverControl); 1517 pci_push(base); 1518 } 1519 writel(np->linkspeed, base + NvRegLinkSpeed); 1520 pci_push(base); 1521 rx_ctrl |= NVREG_RCVCTL_START; 1522 if (np->mac_in_use) 1523 rx_ctrl &= ~NVREG_RCVCTL_RX_PATH_EN; 1524 writel(rx_ctrl, base + NvRegReceiverControl); 1525 dprintk(KERN_DEBUG "%s: nv_start_rx to duplex %d, speed 0x%08x.\n", 1526 dev->name, np->duplex, np->linkspeed); 1527 pci_push(base); 1528} 1529 1530static void nv_stop_rx(struct net_device *dev) 1531{ 1532 struct fe_priv *np = netdev_priv(dev); 1533 u8 __iomem *base = get_hwbase(dev); 1534 u32 rx_ctrl = readl(base + NvRegReceiverControl); 1535 1536 dprintk(KERN_DEBUG "%s: nv_stop_rx\n", dev->name); 1537 if (!np->mac_in_use) 1538 rx_ctrl &= ~NVREG_RCVCTL_START; 1539 else 1540 rx_ctrl |= NVREG_RCVCTL_RX_PATH_EN; 1541 writel(rx_ctrl, base + NvRegReceiverControl); 1542 reg_delay(dev, NvRegReceiverStatus, NVREG_RCVSTAT_BUSY, 0, 1543 NV_RXSTOP_DELAY1, NV_RXSTOP_DELAY1MAX, 1544 KERN_INFO "nv_stop_rx: ReceiverStatus remained busy"); 1545 1546 udelay(NV_RXSTOP_DELAY2); 1547 if (!np->mac_in_use) 1548 writel(0, base + NvRegLinkSpeed); 1549} 1550 1551static void nv_start_tx(struct net_device *dev) 1552{ 1553 struct fe_priv *np = netdev_priv(dev); 1554 u8 __iomem *base = get_hwbase(dev); 1555 u32 tx_ctrl = readl(base + NvRegTransmitterControl); 1556 1557 dprintk(KERN_DEBUG "%s: nv_start_tx\n", dev->name); 1558 tx_ctrl |= NVREG_XMITCTL_START; 1559 if (np->mac_in_use) 1560 tx_ctrl &= ~NVREG_XMITCTL_TX_PATH_EN; 1561 writel(tx_ctrl, base + NvRegTransmitterControl); 1562 pci_push(base); 1563} 1564 1565static void nv_stop_tx(struct net_device *dev) 1566{ 1567 struct fe_priv *np = netdev_priv(dev); 1568 u8 __iomem *base = get_hwbase(dev); 1569 u32 tx_ctrl = readl(base + NvRegTransmitterControl); 1570 1571 dprintk(KERN_DEBUG "%s: nv_stop_tx\n", dev->name); 1572 if (!np->mac_in_use) 1573 tx_ctrl &= ~NVREG_XMITCTL_START; 1574 else 1575 tx_ctrl |= NVREG_XMITCTL_TX_PATH_EN; 1576 writel(tx_ctrl, base + NvRegTransmitterControl); 1577 reg_delay(dev, NvRegTransmitterStatus, NVREG_XMITSTAT_BUSY, 0, 1578 NV_TXSTOP_DELAY1, NV_TXSTOP_DELAY1MAX, 1579 KERN_INFO "nv_stop_tx: TransmitterStatus remained busy"); 1580 1581 udelay(NV_TXSTOP_DELAY2); 1582 if (!np->mac_in_use) 1583 writel(readl(base + NvRegTransmitPoll) & NVREG_TRANSMITPOLL_MAC_ADDR_REV, 1584 base + NvRegTransmitPoll); 1585} 1586 1587static void nv_start_rxtx(struct net_device *dev) 1588{ 1589 nv_start_rx(dev); 1590 nv_start_tx(dev); 1591} 1592 1593static void nv_stop_rxtx(struct net_device *dev) 1594{ 1595 nv_stop_rx(dev); 1596 nv_stop_tx(dev); 1597} 1598 1599static void nv_txrx_reset(struct net_device *dev) 1600{ 1601 struct fe_priv *np = netdev_priv(dev); 1602 u8 __iomem *base = get_hwbase(dev); 1603 1604 dprintk(KERN_DEBUG "%s: nv_txrx_reset\n", dev->name); 1605 writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl); 1606 pci_push(base); 1607 udelay(NV_TXRX_RESET_DELAY); 1608 writel(NVREG_TXRXCTL_BIT2 | np->txrxctl_bits, base + NvRegTxRxControl); 1609 pci_push(base); 1610} 1611 1612static void nv_mac_reset(struct net_device *dev) 1613{ 1614 struct fe_priv *np = netdev_priv(dev); 1615 u8 __iomem *base = get_hwbase(dev); 1616 u32 temp1, temp2, temp3; 1617 1618 dprintk(KERN_DEBUG "%s: nv_mac_reset\n", dev->name); 1619 1620 writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl); 1621 pci_push(base); 1622 1623 /* save registers since they will be cleared on reset */ 1624 temp1 = readl(base + NvRegMacAddrA); 1625 temp2 = readl(base + NvRegMacAddrB); 1626 temp3 = readl(base + NvRegTransmitPoll); 1627 1628 writel(NVREG_MAC_RESET_ASSERT, base + NvRegMacReset); 1629 pci_push(base); 1630 udelay(NV_MAC_RESET_DELAY); 1631 writel(0, base + NvRegMacReset); 1632 pci_push(base); 1633 udelay(NV_MAC_RESET_DELAY); 1634 1635 /* restore saved registers */ 1636 writel(temp1, base + NvRegMacAddrA); 1637 writel(temp2, base + NvRegMacAddrB); 1638 writel(temp3, base + NvRegTransmitPoll); 1639 1640 writel(NVREG_TXRXCTL_BIT2 | np->txrxctl_bits, base + NvRegTxRxControl); 1641 pci_push(base); 1642} 1643 1644static void nv_get_hw_stats(struct net_device *dev) 1645{ 1646 struct fe_priv *np = netdev_priv(dev); 1647 u8 __iomem *base = get_hwbase(dev); 1648 1649 np->estats.tx_bytes += readl(base + NvRegTxCnt); 1650 np->estats.tx_zero_rexmt += readl(base + NvRegTxZeroReXmt); 1651 np->estats.tx_one_rexmt += readl(base + NvRegTxOneReXmt); 1652 np->estats.tx_many_rexmt += readl(base + NvRegTxManyReXmt); 1653 np->estats.tx_late_collision += readl(base + NvRegTxLateCol); 1654 np->estats.tx_fifo_errors += readl(base + NvRegTxUnderflow); 1655 np->estats.tx_carrier_errors += readl(base + NvRegTxLossCarrier); 1656 np->estats.tx_excess_deferral += readl(base + NvRegTxExcessDef); 1657 np->estats.tx_retry_error += readl(base + NvRegTxRetryErr); 1658 np->estats.rx_frame_error += readl(base + NvRegRxFrameErr); 1659 np->estats.rx_extra_byte += readl(base + NvRegRxExtraByte); 1660 np->estats.rx_late_collision += readl(base + NvRegRxLateCol); 1661 np->estats.rx_runt += readl(base + NvRegRxRunt); 1662 np->estats.rx_frame_too_long += readl(base + NvRegRxFrameTooLong); 1663 np->estats.rx_over_errors += readl(base + NvRegRxOverflow); 1664 np->estats.rx_crc_errors += readl(base + NvRegRxFCSErr); 1665 np->estats.rx_frame_align_error += readl(base + NvRegRxFrameAlignErr); 1666 np->estats.rx_length_error += readl(base + NvRegRxLenErr); 1667 np->estats.rx_unicast += readl(base + NvRegRxUnicast); 1668 np->estats.rx_multicast += readl(base + NvRegRxMulticast); 1669 np->estats.rx_broadcast += readl(base + NvRegRxBroadcast); 1670 np->estats.rx_packets = 1671 np->estats.rx_unicast + 1672 np->estats.rx_multicast + 1673 np->estats.rx_broadcast; 1674 np->estats.rx_errors_total = 1675 np->estats.rx_crc_errors + 1676 np->estats.rx_over_errors + 1677 np->estats.rx_frame_error + 1678 (np->estats.rx_frame_align_error - np->estats.rx_extra_byte) + 1679 np->estats.rx_late_collision + 1680 np->estats.rx_runt + 1681 np->estats.rx_frame_too_long; 1682 np->estats.tx_errors_total = 1683 np->estats.tx_late_collision + 1684 np->estats.tx_fifo_errors + 1685 np->estats.tx_carrier_errors + 1686 np->estats.tx_excess_deferral + 1687 np->estats.tx_retry_error; 1688 1689 if (np->driver_data & DEV_HAS_STATISTICS_V2) { 1690 np->estats.tx_deferral += readl(base + NvRegTxDef); 1691 np->estats.tx_packets += readl(base + NvRegTxFrame); 1692 np->estats.rx_bytes += readl(base + NvRegRxCnt); 1693 np->estats.tx_pause += readl(base + NvRegTxPause); 1694 np->estats.rx_pause += readl(base + NvRegRxPause); 1695 np->estats.rx_drop_frame += readl(base + NvRegRxDropFrame); 1696 } 1697 1698 if (np->driver_data & DEV_HAS_STATISTICS_V3) { 1699 np->estats.tx_unicast += readl(base + NvRegTxUnicast); 1700 np->estats.tx_multicast += readl(base + NvRegTxMulticast); 1701 np->estats.tx_broadcast += readl(base + NvRegTxBroadcast); 1702 } 1703} 1704 1705/* 1706 * nv_get_stats: dev->get_stats function 1707 * Get latest stats value from the nic. 1708 * Called with read_lock(&dev_base_lock) held for read - 1709 * only synchronized against unregister_netdevice. 1710 */ 1711static struct net_device_stats *nv_get_stats(struct net_device *dev) 1712{ 1713 struct fe_priv *np = netdev_priv(dev); 1714 1715 /* If the nic supports hw counters then retrieve latest values */ 1716 if (np->driver_data & (DEV_HAS_STATISTICS_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_STATISTICS_V3)) { 1717 nv_get_hw_stats(dev); 1718 1719 /* copy to net_device stats */ 1720 dev->stats.tx_bytes = np->estats.tx_bytes; 1721 dev->stats.tx_fifo_errors = np->estats.tx_fifo_errors; 1722 dev->stats.tx_carrier_errors = np->estats.tx_carrier_errors; 1723 dev->stats.rx_crc_errors = np->estats.rx_crc_errors; 1724 dev->stats.rx_over_errors = np->estats.rx_over_errors; 1725 dev->stats.rx_errors = np->estats.rx_errors_total; 1726 dev->stats.tx_errors = np->estats.tx_errors_total; 1727 } 1728 1729 return &dev->stats; 1730} 1731 1732/* 1733 * nv_alloc_rx: fill rx ring entries. 1734 * Return 1 if the allocations for the skbs failed and the 1735 * rx engine is without Available descriptors 1736 */ 1737static int nv_alloc_rx(struct net_device *dev) 1738{ 1739 struct fe_priv *np = netdev_priv(dev); 1740 struct ring_desc* less_rx; 1741 1742 less_rx = np->get_rx.orig; 1743 if (less_rx-- == np->first_rx.orig) 1744 less_rx = np->last_rx.orig; 1745 1746 while (np->put_rx.orig != less_rx) { 1747 struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD); 1748 if (skb) { 1749 np->put_rx_ctx->skb = skb; 1750 np->put_rx_ctx->dma = pci_map_single(np->pci_dev, 1751 skb->data, 1752 skb_tailroom(skb), 1753 PCI_DMA_FROMDEVICE); 1754 np->put_rx_ctx->dma_len = skb_tailroom(skb); 1755 np->put_rx.orig->buf = cpu_to_le32(np->put_rx_ctx->dma); 1756 wmb(); 1757 np->put_rx.orig->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX_AVAIL); 1758 if (unlikely(np->put_rx.orig++ == np->last_rx.orig)) 1759 np->put_rx.orig = np->first_rx.orig; 1760 if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx)) 1761 np->put_rx_ctx = np->first_rx_ctx; 1762 } else { 1763 return 1; 1764 } 1765 } 1766 return 0; 1767} 1768 1769static int nv_alloc_rx_optimized(struct net_device *dev) 1770{ 1771 struct fe_priv *np = netdev_priv(dev); 1772 struct ring_desc_ex* less_rx; 1773 1774 less_rx = np->get_rx.ex; 1775 if (less_rx-- == np->first_rx.ex) 1776 less_rx = np->last_rx.ex; 1777 1778 while (np->put_rx.ex != less_rx) { 1779 struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD); 1780 if (skb) { 1781 np->put_rx_ctx->skb = skb; 1782 np->put_rx_ctx->dma = pci_map_single(np->pci_dev, 1783 skb->data, 1784 skb_tailroom(skb), 1785 PCI_DMA_FROMDEVICE); 1786 np->put_rx_ctx->dma_len = skb_tailroom(skb); 1787 np->put_rx.ex->bufhigh = cpu_to_le32(dma_high(np->put_rx_ctx->dma)); 1788 np->put_rx.ex->buflow = cpu_to_le32(dma_low(np->put_rx_ctx->dma)); 1789 wmb(); 1790 np->put_rx.ex->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX2_AVAIL); 1791 if (unlikely(np->put_rx.ex++ == np->last_rx.ex)) 1792 np->put_rx.ex = np->first_rx.ex; 1793 if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx)) 1794 np->put_rx_ctx = np->first_rx_ctx; 1795 } else { 1796 return 1; 1797 } 1798 } 1799 return 0; 1800} 1801 1802/* If rx bufs are exhausted called after 50ms to attempt to refresh */ 1803static void nv_do_rx_refill(unsigned long data) 1804{ 1805 struct net_device *dev = (struct net_device *) data; 1806 struct fe_priv *np = netdev_priv(dev); 1807 1808 /* Just reschedule NAPI rx processing */ 1809 napi_schedule(&np->napi); 1810} 1811 1812static void nv_init_rx(struct net_device *dev) 1813{ 1814 struct fe_priv *np = netdev_priv(dev); 1815 int i; 1816 1817 np->get_rx = np->put_rx = np->first_rx = np->rx_ring; 1818 1819 if (!nv_optimized(np)) 1820 np->last_rx.orig = &np->rx_ring.orig[np->rx_ring_size-1]; 1821 else 1822 np->last_rx.ex = &np->rx_ring.ex[np->rx_ring_size-1]; 1823 np->get_rx_ctx = np->put_rx_ctx = np->first_rx_ctx = np->rx_skb; 1824 np->last_rx_ctx = &np->rx_skb[np->rx_ring_size-1]; 1825 1826 for (i = 0; i < np->rx_ring_size; i++) { 1827 if (!nv_optimized(np)) { 1828 np->rx_ring.orig[i].flaglen = 0; 1829 np->rx_ring.orig[i].buf = 0; 1830 } else { 1831 np->rx_ring.ex[i].flaglen = 0; 1832 np->rx_ring.ex[i].txvlan = 0; 1833 np->rx_ring.ex[i].bufhigh = 0; 1834 np->rx_ring.ex[i].buflow = 0; 1835 } 1836 np->rx_skb[i].skb = NULL; 1837 np->rx_skb[i].dma = 0; 1838 } 1839} 1840 1841static void nv_init_tx(struct net_device *dev) 1842{ 1843 struct fe_priv *np = netdev_priv(dev); 1844 int i; 1845 1846 np->get_tx = np->put_tx = np->first_tx = np->tx_ring; 1847 1848 if (!nv_optimized(np)) 1849 np->last_tx.orig = &np->tx_ring.orig[np->tx_ring_size-1]; 1850 else 1851 np->last_tx.ex = &np->tx_ring.ex[np->tx_ring_size-1]; 1852 np->get_tx_ctx = np->put_tx_ctx = np->first_tx_ctx = np->tx_skb; 1853 np->last_tx_ctx = &np->tx_skb[np->tx_ring_size-1]; 1854 np->tx_pkts_in_progress = 0; 1855 np->tx_change_owner = NULL; 1856 np->tx_end_flip = NULL; 1857 np->tx_stop = 0; 1858 1859 for (i = 0; i < np->tx_ring_size; i++) { 1860 if (!nv_optimized(np)) { 1861 np->tx_ring.orig[i].flaglen = 0; 1862 np->tx_ring.orig[i].buf = 0; 1863 } else { 1864 np->tx_ring.ex[i].flaglen = 0; 1865 np->tx_ring.ex[i].txvlan = 0; 1866 np->tx_ring.ex[i].bufhigh = 0; 1867 np->tx_ring.ex[i].buflow = 0; 1868 } 1869 np->tx_skb[i].skb = NULL; 1870 np->tx_skb[i].dma = 0; 1871 np->tx_skb[i].dma_len = 0; 1872 np->tx_skb[i].dma_single = 0; 1873 np->tx_skb[i].first_tx_desc = NULL; 1874 np->tx_skb[i].next_tx_ctx = NULL; 1875 } 1876} 1877 1878static int nv_init_ring(struct net_device *dev) 1879{ 1880 struct fe_priv *np = netdev_priv(dev); 1881 1882 nv_init_tx(dev); 1883 nv_init_rx(dev); 1884 1885 if (!nv_optimized(np)) 1886 return nv_alloc_rx(dev); 1887 else 1888 return nv_alloc_rx_optimized(dev); 1889} 1890 1891static void nv_unmap_txskb(struct fe_priv *np, struct nv_skb_map *tx_skb) 1892{ 1893 if (tx_skb->dma) { 1894 if (tx_skb->dma_single) 1895 pci_unmap_single(np->pci_dev, tx_skb->dma, 1896 tx_skb->dma_len, 1897 PCI_DMA_TODEVICE); 1898 else 1899 pci_unmap_page(np->pci_dev, tx_skb->dma, 1900 tx_skb->dma_len, 1901 PCI_DMA_TODEVICE); 1902 tx_skb->dma = 0; 1903 } 1904} 1905 1906static int nv_release_txskb(struct fe_priv *np, struct nv_skb_map *tx_skb) 1907{ 1908 nv_unmap_txskb(np, tx_skb); 1909 if (tx_skb->skb) { 1910 dev_kfree_skb_any(tx_skb->skb); 1911 tx_skb->skb = NULL; 1912 return 1; 1913 } 1914 return 0; 1915} 1916 1917static void nv_drain_tx(struct net_device *dev) 1918{ 1919 struct fe_priv *np = netdev_priv(dev); 1920 unsigned int i; 1921 1922 for (i = 0; i < np->tx_ring_size; i++) { 1923 if (!nv_optimized(np)) { 1924 np->tx_ring.orig[i].flaglen = 0; 1925 np->tx_ring.orig[i].buf = 0; 1926 } else { 1927 np->tx_ring.ex[i].flaglen = 0; 1928 np->tx_ring.ex[i].txvlan = 0; 1929 np->tx_ring.ex[i].bufhigh = 0; 1930 np->tx_ring.ex[i].buflow = 0; 1931 } 1932 if (nv_release_txskb(np, &np->tx_skb[i])) 1933 dev->stats.tx_dropped++; 1934 np->tx_skb[i].dma = 0; 1935 np->tx_skb[i].dma_len = 0; 1936 np->tx_skb[i].dma_single = 0; 1937 np->tx_skb[i].first_tx_desc = NULL; 1938 np->tx_skb[i].next_tx_ctx = NULL; 1939 } 1940 np->tx_pkts_in_progress = 0; 1941 np->tx_change_owner = NULL; 1942 np->tx_end_flip = NULL; 1943} 1944 1945static void nv_drain_rx(struct net_device *dev) 1946{ 1947 struct fe_priv *np = netdev_priv(dev); 1948 int i; 1949 1950 for (i = 0; i < np->rx_ring_size; i++) { 1951 if (!nv_optimized(np)) { 1952 np->rx_ring.orig[i].flaglen = 0; 1953 np->rx_ring.orig[i].buf = 0; 1954 } else { 1955 np->rx_ring.ex[i].flaglen = 0; 1956 np->rx_ring.ex[i].txvlan = 0; 1957 np->rx_ring.ex[i].bufhigh = 0; 1958 np->rx_ring.ex[i].buflow = 0; 1959 } 1960 wmb(); 1961 if (np->rx_skb[i].skb) { 1962 pci_unmap_single(np->pci_dev, np->rx_skb[i].dma, 1963 (skb_end_pointer(np->rx_skb[i].skb) - 1964 np->rx_skb[i].skb->data), 1965 PCI_DMA_FROMDEVICE); 1966 dev_kfree_skb(np->rx_skb[i].skb); 1967 np->rx_skb[i].skb = NULL; 1968 } 1969 } 1970} 1971 1972static void nv_drain_rxtx(struct net_device *dev) 1973{ 1974 nv_drain_tx(dev); 1975 nv_drain_rx(dev); 1976} 1977 1978static inline u32 nv_get_empty_tx_slots(struct fe_priv *np) 1979{ 1980 return (u32)(np->tx_ring_size - ((np->tx_ring_size + (np->put_tx_ctx - np->get_tx_ctx)) % np->tx_ring_size)); 1981} 1982 1983static void nv_legacybackoff_reseed(struct net_device *dev) 1984{ 1985 u8 __iomem *base = get_hwbase(dev); 1986 u32 reg; 1987 u32 low; 1988 int tx_status = 0; 1989 1990 reg = readl(base + NvRegSlotTime) & ~NVREG_SLOTTIME_MASK; 1991 get_random_bytes(&low, sizeof(low)); 1992 reg |= low & NVREG_SLOTTIME_MASK; 1993 1994 /* Need to stop tx before change takes effect. 1995 * Caller has already gained np->lock. 1996 */ 1997 tx_status = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_START; 1998 if (tx_status) 1999 nv_stop_tx(dev); 2000 nv_stop_rx(dev); 2001 writel(reg, base + NvRegSlotTime); 2002 if (tx_status) 2003 nv_start_tx(dev); 2004 nv_start_rx(dev); 2005} 2006 2007/* Gear Backoff Seeds */ 2008#define BACKOFF_SEEDSET_ROWS 8 2009#define BACKOFF_SEEDSET_LFSRS 15 2010 2011/* Known Good seed sets */ 2012static const u32 main_seedset[BACKOFF_SEEDSET_ROWS][BACKOFF_SEEDSET_LFSRS] = { 2013 {145, 155, 165, 175, 185, 196, 235, 245, 255, 265, 275, 285, 660, 690, 874}, 2014 {245, 255, 265, 575, 385, 298, 335, 345, 355, 366, 375, 385, 761, 790, 974}, 2015 {145, 155, 165, 175, 185, 196, 235, 245, 255, 265, 275, 285, 660, 690, 874}, 2016 {245, 255, 265, 575, 385, 298, 335, 345, 355, 366, 375, 386, 761, 790, 974}, 2017 {266, 265, 276, 585, 397, 208, 345, 355, 365, 376, 385, 396, 771, 700, 984}, 2018 {266, 265, 276, 586, 397, 208, 346, 355, 365, 376, 285, 396, 771, 700, 984}, 2019 {366, 365, 376, 686, 497, 308, 447, 455, 466, 476, 485, 496, 871, 800, 84}, 2020 {466, 465, 476, 786, 597, 408, 547, 555, 566, 576, 585, 597, 971, 900, 184}}; 2021 2022static const u32 gear_seedset[BACKOFF_SEEDSET_ROWS][BACKOFF_SEEDSET_LFSRS] = { 2023 {251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375, 30, 295}, 2024 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395}, 2025 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 397}, 2026 {251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375, 30, 295}, 2027 {251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375, 30, 295}, 2028 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395}, 2029 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395}, 2030 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395}}; 2031 2032static void nv_gear_backoff_reseed(struct net_device *dev) 2033{ 2034 u8 __iomem *base = get_hwbase(dev); 2035 u32 miniseed1, miniseed2, miniseed2_reversed, miniseed3, miniseed3_reversed; 2036 u32 temp, seedset, combinedSeed; 2037 int i; 2038 2039 /* Setup seed for free running LFSR */ 2040 /* We are going to read the time stamp counter 3 times 2041 and swizzle bits around to increase randomness */ 2042 get_random_bytes(&miniseed1, sizeof(miniseed1)); 2043 miniseed1 &= 0x0fff; 2044 if (miniseed1 == 0) 2045 miniseed1 = 0xabc; 2046 2047 get_random_bytes(&miniseed2, sizeof(miniseed2)); 2048 miniseed2 &= 0x0fff; 2049 if (miniseed2 == 0) 2050 miniseed2 = 0xabc; 2051 miniseed2_reversed = 2052 ((miniseed2 & 0xF00) >> 8) | 2053 (miniseed2 & 0x0F0) | 2054 ((miniseed2 & 0x00F) << 8); 2055 2056 get_random_bytes(&miniseed3, sizeof(miniseed3)); 2057 miniseed3 &= 0x0fff; 2058 if (miniseed3 == 0) 2059 miniseed3 = 0xabc; 2060 miniseed3_reversed = 2061 ((miniseed3 & 0xF00) >> 8) | 2062 (miniseed3 & 0x0F0) | 2063 ((miniseed3 & 0x00F) << 8); 2064 2065 combinedSeed = ((miniseed1 ^ miniseed2_reversed) << 12) | 2066 (miniseed2 ^ miniseed3_reversed); 2067 2068 /* Seeds can not be zero */ 2069 if ((combinedSeed & NVREG_BKOFFCTRL_SEED_MASK) == 0) 2070 combinedSeed |= 0x08; 2071 if ((combinedSeed & (NVREG_BKOFFCTRL_SEED_MASK << NVREG_BKOFFCTRL_GEAR)) == 0) 2072 combinedSeed |= 0x8000; 2073 2074 /* No need to disable tx here */ 2075 temp = NVREG_BKOFFCTRL_DEFAULT | (0 << NVREG_BKOFFCTRL_SELECT); 2076 temp |= combinedSeed & NVREG_BKOFFCTRL_SEED_MASK; 2077 temp |= combinedSeed >> NVREG_BKOFFCTRL_GEAR; 2078 writel(temp,base + NvRegBackOffControl); 2079 2080 /* Setup seeds for all gear LFSRs. */ 2081 get_random_bytes(&seedset, sizeof(seedset)); 2082 seedset = seedset % BACKOFF_SEEDSET_ROWS; 2083 for (i = 1; i <= BACKOFF_SEEDSET_LFSRS; i++) 2084 { 2085 temp = NVREG_BKOFFCTRL_DEFAULT | (i << NVREG_BKOFFCTRL_SELECT); 2086 temp |= main_seedset[seedset][i-1] & 0x3ff; 2087 temp |= ((gear_seedset[seedset][i-1] & 0x3ff) << NVREG_BKOFFCTRL_GEAR); 2088 writel(temp, base + NvRegBackOffControl); 2089 } 2090} 2091 2092/* 2093 * nv_start_xmit: dev->hard_start_xmit function 2094 * Called with netif_tx_lock held. 2095 */ 2096static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev) 2097{ 2098 struct fe_priv *np = netdev_priv(dev); 2099 u32 tx_flags = 0; 2100 u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET); 2101 unsigned int fragments = skb_shinfo(skb)->nr_frags; 2102 unsigned int i; 2103 u32 offset = 0; 2104 u32 bcnt; 2105 u32 size = skb_headlen(skb); 2106 u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); 2107 u32 empty_slots; 2108 struct ring_desc* put_tx; 2109 struct ring_desc* start_tx; 2110 struct ring_desc* prev_tx; 2111 struct nv_skb_map* prev_tx_ctx; 2112 unsigned long flags; 2113 2114 /* add fragments to entries count */ 2115 for (i = 0; i < fragments; i++) { 2116 entries += (skb_shinfo(skb)->frags[i].size >> NV_TX2_TSO_MAX_SHIFT) + 2117 ((skb_shinfo(skb)->frags[i].size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); 2118 } 2119 2120 spin_lock_irqsave(&np->lock, flags); 2121 empty_slots = nv_get_empty_tx_slots(np); 2122 if (unlikely(empty_slots <= entries)) { 2123 netif_stop_queue(dev); 2124 np->tx_stop = 1; 2125 spin_unlock_irqrestore(&np->lock, flags); 2126 return NETDEV_TX_BUSY; 2127 } 2128 spin_unlock_irqrestore(&np->lock, flags); 2129 2130 start_tx = put_tx = np->put_tx.orig; 2131 2132 /* setup the header buffer */ 2133 do { 2134 prev_tx = put_tx; 2135 prev_tx_ctx = np->put_tx_ctx; 2136 bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size; 2137 np->put_tx_ctx->dma = pci_map_single(np->pci_dev, skb->data + offset, bcnt, 2138 PCI_DMA_TODEVICE); 2139 np->put_tx_ctx->dma_len = bcnt; 2140 np->put_tx_ctx->dma_single = 1; 2141 put_tx->buf = cpu_to_le32(np->put_tx_ctx->dma); 2142 put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags); 2143 2144 tx_flags = np->tx_flags; 2145 offset += bcnt; 2146 size -= bcnt; 2147 if (unlikely(put_tx++ == np->last_tx.orig)) 2148 put_tx = np->first_tx.orig; 2149 if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx)) 2150 np->put_tx_ctx = np->first_tx_ctx; 2151 } while (size); 2152 2153 /* setup the fragments */ 2154 for (i = 0; i < fragments; i++) { 2155 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2156 u32 size = frag->size; 2157 offset = 0; 2158 2159 do { 2160 prev_tx = put_tx; 2161 prev_tx_ctx = np->put_tx_ctx; 2162 bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size; 2163 np->put_tx_ctx->dma = pci_map_page(np->pci_dev, frag->page, frag->page_offset+offset, bcnt, 2164 PCI_DMA_TODEVICE); 2165 np->put_tx_ctx->dma_len = bcnt; 2166 np->put_tx_ctx->dma_single = 0; 2167 put_tx->buf = cpu_to_le32(np->put_tx_ctx->dma); 2168 put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags); 2169 2170 offset += bcnt; 2171 size -= bcnt; 2172 if (unlikely(put_tx++ == np->last_tx.orig)) 2173 put_tx = np->first_tx.orig; 2174 if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx)) 2175 np->put_tx_ctx = np->first_tx_ctx; 2176 } while (size); 2177 } 2178 2179 /* set last fragment flag */ 2180 prev_tx->flaglen |= cpu_to_le32(tx_flags_extra); 2181 2182 /* save skb in this slot's context area */ 2183 prev_tx_ctx->skb = skb; 2184 2185 if (skb_is_gso(skb)) 2186 tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT); 2187 else 2188 tx_flags_extra = skb->ip_summed == CHECKSUM_PARTIAL ? 2189 NV_TX2_CHECKSUM_L3 | NV_TX2_CHECKSUM_L4 : 0; 2190 2191 spin_lock_irqsave(&np->lock, flags); 2192 2193 /* set tx flags */ 2194 start_tx->flaglen |= cpu_to_le32(tx_flags | tx_flags_extra); 2195 np->put_tx.orig = put_tx; 2196 2197 spin_unlock_irqrestore(&np->lock, flags); 2198 2199 dprintk(KERN_DEBUG "%s: nv_start_xmit: entries %d queued for transmission. tx_flags_extra: %x\n", 2200 dev->name, entries, tx_flags_extra); 2201 { 2202 int j; 2203 for (j=0; j<64; j++) { 2204 if ((j%16) == 0) 2205 dprintk("\n%03x:", j); 2206 dprintk(" %02x", ((unsigned char*)skb->data)[j]); 2207 } 2208 dprintk("\n"); 2209 } 2210 2211 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 2212 return NETDEV_TX_OK; 2213} 2214 2215static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb, 2216 struct net_device *dev) 2217{ 2218 struct fe_priv *np = netdev_priv(dev); 2219 u32 tx_flags = 0; 2220 u32 tx_flags_extra; 2221 unsigned int fragments = skb_shinfo(skb)->nr_frags; 2222 unsigned int i; 2223 u32 offset = 0; 2224 u32 bcnt; 2225 u32 size = skb_headlen(skb); 2226 u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); 2227 u32 empty_slots; 2228 struct ring_desc_ex* put_tx; 2229 struct ring_desc_ex* start_tx; 2230 struct ring_desc_ex* prev_tx; 2231 struct nv_skb_map* prev_tx_ctx; 2232 struct nv_skb_map* start_tx_ctx; 2233 unsigned long flags; 2234 2235 /* add fragments to entries count */ 2236 for (i = 0; i < fragments; i++) { 2237 entries += (skb_shinfo(skb)->frags[i].size >> NV_TX2_TSO_MAX_SHIFT) + 2238 ((skb_shinfo(skb)->frags[i].size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); 2239 } 2240 2241 spin_lock_irqsave(&np->lock, flags); 2242 empty_slots = nv_get_empty_tx_slots(np); 2243 if (unlikely(empty_slots <= entries)) { 2244 netif_stop_queue(dev); 2245 np->tx_stop = 1; 2246 spin_unlock_irqrestore(&np->lock, flags); 2247 return NETDEV_TX_BUSY; 2248 } 2249 spin_unlock_irqrestore(&np->lock, flags); 2250 2251 start_tx = put_tx = np->put_tx.ex; 2252 start_tx_ctx = np->put_tx_ctx; 2253 2254 /* setup the header buffer */ 2255 do { 2256 prev_tx = put_tx; 2257 prev_tx_ctx = np->put_tx_ctx; 2258 bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size; 2259 np->put_tx_ctx->dma = pci_map_single(np->pci_dev, skb->data + offset, bcnt, 2260 PCI_DMA_TODEVICE); 2261 np->put_tx_ctx->dma_len = bcnt; 2262 np->put_tx_ctx->dma_single = 1; 2263 put_tx->bufhigh = cpu_to_le32(dma_high(np->put_tx_ctx->dma)); 2264 put_tx->buflow = cpu_to_le32(dma_low(np->put_tx_ctx->dma)); 2265 put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags); 2266 2267 tx_flags = NV_TX2_VALID; 2268 offset += bcnt; 2269 size -= bcnt; 2270 if (unlikely(put_tx++ == np->last_tx.ex)) 2271 put_tx = np->first_tx.ex; 2272 if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx)) 2273 np->put_tx_ctx = np->first_tx_ctx; 2274 } while (size); 2275 2276 /* setup the fragments */ 2277 for (i = 0; i < fragments; i++) { 2278 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2279 u32 size = frag->size; 2280 offset = 0; 2281 2282 do { 2283 prev_tx = put_tx; 2284 prev_tx_ctx = np->put_tx_ctx; 2285 bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size; 2286 np->put_tx_ctx->dma = pci_map_page(np->pci_dev, frag->page, frag->page_offset+offset, bcnt, 2287 PCI_DMA_TODEVICE); 2288 np->put_tx_ctx->dma_len = bcnt; 2289 np->put_tx_ctx->dma_single = 0; 2290 put_tx->bufhigh = cpu_to_le32(dma_high(np->put_tx_ctx->dma)); 2291 put_tx->buflow = cpu_to_le32(dma_low(np->put_tx_ctx->dma)); 2292 put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags); 2293 2294 offset += bcnt; 2295 size -= bcnt; 2296 if (unlikely(put_tx++ == np->last_tx.ex)) 2297 put_tx = np->first_tx.ex; 2298 if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx)) 2299 np->put_tx_ctx = np->first_tx_ctx; 2300 } while (size); 2301 } 2302 2303 /* set last fragment flag */ 2304 prev_tx->flaglen |= cpu_to_le32(NV_TX2_LASTPACKET); 2305 2306 /* save skb in this slot's context area */ 2307 prev_tx_ctx->skb = skb; 2308 2309 if (skb_is_gso(skb)) 2310 tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT); 2311 else 2312 tx_flags_extra = skb->ip_summed == CHECKSUM_PARTIAL ? 2313 NV_TX2_CHECKSUM_L3 | NV_TX2_CHECKSUM_L4 : 0; 2314 2315 /* vlan tag */ 2316 if (likely(!np->vlangrp)) { 2317 start_tx->txvlan = 0; 2318 } else { 2319 if (vlan_tx_tag_present(skb)) 2320 start_tx->txvlan = cpu_to_le32(NV_TX3_VLAN_TAG_PRESENT | vlan_tx_tag_get(skb)); 2321 else 2322 start_tx->txvlan = 0; 2323 } 2324 2325 spin_lock_irqsave(&np->lock, flags); 2326 2327 if (np->tx_limit) { 2328 /* Limit the number of outstanding tx. Setup all fragments, but 2329 * do not set the VALID bit on the first descriptor. Save a pointer 2330 * to that descriptor and also for next skb_map element. 2331 */ 2332 2333 if (np->tx_pkts_in_progress == NV_TX_LIMIT_COUNT) { 2334 if (!np->tx_change_owner) 2335 np->tx_change_owner = start_tx_ctx; 2336 2337 /* remove VALID bit */ 2338 tx_flags &= ~NV_TX2_VALID; 2339 start_tx_ctx->first_tx_desc = start_tx; 2340 start_tx_ctx->next_tx_ctx = np->put_tx_ctx; 2341 np->tx_end_flip = np->put_tx_ctx; 2342 } else { 2343 np->tx_pkts_in_progress++; 2344 } 2345 } 2346 2347 /* set tx flags */ 2348 start_tx->flaglen |= cpu_to_le32(tx_flags | tx_flags_extra); 2349 np->put_tx.ex = put_tx; 2350 2351 spin_unlock_irqrestore(&np->lock, flags); 2352 2353 dprintk(KERN_DEBUG "%s: nv_start_xmit_optimized: entries %d queued for transmission. tx_flags_extra: %x\n", 2354 dev->name, entries, tx_flags_extra); 2355 { 2356 int j; 2357 for (j=0; j<64; j++) { 2358 if ((j%16) == 0) 2359 dprintk("\n%03x:", j); 2360 dprintk(" %02x", ((unsigned char*)skb->data)[j]); 2361 } 2362 dprintk("\n"); 2363 } 2364 2365 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 2366 return NETDEV_TX_OK; 2367} 2368 2369static inline void nv_tx_flip_ownership(struct net_device *dev) 2370{ 2371 struct fe_priv *np = netdev_priv(dev); 2372 2373 np->tx_pkts_in_progress--; 2374 if (np->tx_change_owner) { 2375 np->tx_change_owner->first_tx_desc->flaglen |= 2376 cpu_to_le32(NV_TX2_VALID); 2377 np->tx_pkts_in_progress++; 2378 2379 np->tx_change_owner = np->tx_change_owner->next_tx_ctx; 2380 if (np->tx_change_owner == np->tx_end_flip) 2381 np->tx_change_owner = NULL; 2382 2383 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 2384 } 2385} 2386 2387/* 2388 * nv_tx_done: check for completed packets, release the skbs. 2389 * 2390 * Caller must own np->lock. 2391 */ 2392static int nv_tx_done(struct net_device *dev, int limit) 2393{ 2394 struct fe_priv *np = netdev_priv(dev); 2395 u32 flags; 2396 int tx_work = 0; 2397 struct ring_desc* orig_get_tx = np->get_tx.orig; 2398 2399 while ((np->get_tx.orig != np->put_tx.orig) && 2400 !((flags = le32_to_cpu(np->get_tx.orig->flaglen)) & NV_TX_VALID) && 2401 (tx_work < limit)) { 2402 2403 dprintk(KERN_DEBUG "%s: nv_tx_done: flags 0x%x.\n", 2404 dev->name, flags); 2405 2406 nv_unmap_txskb(np, np->get_tx_ctx); 2407 2408 if (np->desc_ver == DESC_VER_1) { 2409 if (flags & NV_TX_LASTPACKET) { 2410 if (flags & NV_TX_ERROR) { 2411 if (flags & NV_TX_UNDERFLOW) 2412 dev->stats.tx_fifo_errors++; 2413 if (flags & NV_TX_CARRIERLOST) 2414 dev->stats.tx_carrier_errors++; 2415 if ((flags & NV_TX_RETRYERROR) && !(flags & NV_TX_RETRYCOUNT_MASK)) 2416 nv_legacybackoff_reseed(dev); 2417 dev->stats.tx_errors++; 2418 } else { 2419 dev->stats.tx_packets++; 2420 dev->stats.tx_bytes += np->get_tx_ctx->skb->len; 2421 } 2422 dev_kfree_skb_any(np->get_tx_ctx->skb); 2423 np->get_tx_ctx->skb = NULL; 2424 tx_work++; 2425 } 2426 } else { 2427 if (flags & NV_TX2_LASTPACKET) { 2428 if (flags & NV_TX2_ERROR) { 2429 if (flags & NV_TX2_UNDERFLOW) 2430 dev->stats.tx_fifo_errors++; 2431 if (flags & NV_TX2_CARRIERLOST) 2432 dev->stats.tx_carrier_errors++; 2433 if ((flags & NV_TX2_RETRYERROR) && !(flags & NV_TX2_RETRYCOUNT_MASK)) 2434 nv_legacybackoff_reseed(dev); 2435 dev->stats.tx_errors++; 2436 } else { 2437 dev->stats.tx_packets++; 2438 dev->stats.tx_bytes += np->get_tx_ctx->skb->len; 2439 } 2440 dev_kfree_skb_any(np->get_tx_ctx->skb); 2441 np->get_tx_ctx->skb = NULL; 2442 tx_work++; 2443 } 2444 } 2445 if (unlikely(np->get_tx.orig++ == np->last_tx.orig)) 2446 np->get_tx.orig = np->first_tx.orig; 2447 if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx)) 2448 np->get_tx_ctx = np->first_tx_ctx; 2449 } 2450 if (unlikely((np->tx_stop == 1) && (np->get_tx.orig != orig_get_tx))) { 2451 np->tx_stop = 0; 2452 netif_wake_queue(dev); 2453 } 2454 return tx_work; 2455} 2456 2457static int nv_tx_done_optimized(struct net_device *dev, int limit) 2458{ 2459 struct fe_priv *np = netdev_priv(dev); 2460 u32 flags; 2461 int tx_work = 0; 2462 struct ring_desc_ex* orig_get_tx = np->get_tx.ex; 2463 2464 while ((np->get_tx.ex != np->put_tx.ex) && 2465 !((flags = le32_to_cpu(np->get_tx.ex->flaglen)) & NV_TX2_VALID) && 2466 (tx_work < limit)) { 2467 2468 dprintk(KERN_DEBUG "%s: nv_tx_done_optimized: flags 0x%x.\n", 2469 dev->name, flags); 2470 2471 nv_unmap_txskb(np, np->get_tx_ctx); 2472 2473 if (flags & NV_TX2_LASTPACKET) { 2474 if (!(flags & NV_TX2_ERROR)) 2475 dev->stats.tx_packets++; 2476 else { 2477 if ((flags & NV_TX2_RETRYERROR) && !(flags & NV_TX2_RETRYCOUNT_MASK)) { 2478 if (np->driver_data & DEV_HAS_GEAR_MODE) 2479 nv_gear_backoff_reseed(dev); 2480 else 2481 nv_legacybackoff_reseed(dev); 2482 } 2483 } 2484 2485 dev_kfree_skb_any(np->get_tx_ctx->skb); 2486 np->get_tx_ctx->skb = NULL; 2487 tx_work++; 2488 2489 if (np->tx_limit) { 2490 nv_tx_flip_ownership(dev); 2491 } 2492 } 2493 if (unlikely(np->get_tx.ex++ == np->last_tx.ex)) 2494 np->get_tx.ex = np->first_tx.ex; 2495 if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx)) 2496 np->get_tx_ctx = np->first_tx_ctx; 2497 } 2498 if (unlikely((np->tx_stop == 1) && (np->get_tx.ex != orig_get_tx))) { 2499 np->tx_stop = 0; 2500 netif_wake_queue(dev); 2501 } 2502 return tx_work; 2503} 2504 2505/* 2506 * nv_tx_timeout: dev->tx_timeout function 2507 * Called with netif_tx_lock held. 2508 */ 2509static void nv_tx_timeout(struct net_device *dev) 2510{ 2511 struct fe_priv *np = netdev_priv(dev); 2512 u8 __iomem *base = get_hwbase(dev); 2513 u32 status; 2514 union ring_type put_tx; 2515 int saved_tx_limit; 2516 2517 if (np->msi_flags & NV_MSI_X_ENABLED) 2518 status = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK; 2519 else 2520 status = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK; 2521 2522 printk(KERN_INFO "%s: Got tx_timeout. irq: %08x\n", dev->name, status); 2523 2524 { 2525 int i; 2526 2527 printk(KERN_INFO "%s: Ring at %lx\n", 2528 dev->name, (unsigned long)np->ring_addr); 2529 printk(KERN_INFO "%s: Dumping tx registers\n", dev->name); 2530 for (i=0;i<=np->register_size;i+= 32) { 2531 printk(KERN_INFO "%3x: %08x %08x %08x %08x %08x %08x %08x %08x\n", 2532 i, 2533 readl(base + i + 0), readl(base + i + 4), 2534 readl(base + i + 8), readl(base + i + 12), 2535 readl(base + i + 16), readl(base + i + 20), 2536 readl(base + i + 24), readl(base + i + 28)); 2537 } 2538 printk(KERN_INFO "%s: Dumping tx ring\n", dev->name); 2539 for (i=0;i<np->tx_ring_size;i+= 4) { 2540 if (!nv_optimized(np)) { 2541 printk(KERN_INFO "%03x: %08x %08x // %08x %08x // %08x %08x // %08x %08x\n", 2542 i, 2543 le32_to_cpu(np->tx_ring.orig[i].buf), 2544 le32_to_cpu(np->tx_ring.orig[i].flaglen), 2545 le32_to_cpu(np->tx_ring.orig[i+1].buf), 2546 le32_to_cpu(np->tx_ring.orig[i+1].flaglen), 2547 le32_to_cpu(np->tx_ring.orig[i+2].buf), 2548 le32_to_cpu(np->tx_ring.orig[i+2].flaglen), 2549 le32_to_cpu(np->tx_ring.orig[i+3].buf), 2550 le32_to_cpu(np->tx_ring.orig[i+3].flaglen)); 2551 } else { 2552 printk(KERN_INFO "%03x: %08x %08x %08x // %08x %08x %08x // %08x %08x %08x // %08x %08x %08x\n", 2553 i, 2554 le32_to_cpu(np->tx_ring.ex[i].bufhigh), 2555 le32_to_cpu(np->tx_ring.ex[i].buflow), 2556 le32_to_cpu(np->tx_ring.ex[i].flaglen), 2557 le32_to_cpu(np->tx_ring.ex[i+1].bufhigh), 2558 le32_to_cpu(np->tx_ring.ex[i+1].buflow), 2559 le32_to_cpu(np->tx_ring.ex[i+1].flaglen), 2560 le32_to_cpu(np->tx_ring.ex[i+2].bufhigh), 2561 le32_to_cpu(np->tx_ring.ex[i+2].buflow), 2562 le32_to_cpu(np->tx_ring.ex[i+2].flaglen), 2563 le32_to_cpu(np->tx_ring.ex[i+3].bufhigh), 2564 le32_to_cpu(np->tx_ring.ex[i+3].buflow), 2565 le32_to_cpu(np->tx_ring.ex[i+3].flaglen)); 2566 } 2567 } 2568 } 2569 2570 spin_lock_irq(&np->lock); 2571 2572 /* 1) stop tx engine */ 2573 nv_stop_tx(dev); 2574 2575 /* 2) complete any outstanding tx and do not give HW any limited tx pkts */ 2576 saved_tx_limit = np->tx_limit; 2577 np->tx_limit = 0; /* prevent giving HW any limited pkts */ 2578 np->tx_stop = 0; /* prevent waking tx queue */ 2579 if (!nv_optimized(np)) 2580 nv_tx_done(dev, np->tx_ring_size); 2581 else 2582 nv_tx_done_optimized(dev, np->tx_ring_size); 2583 2584 /* save current HW postion */ 2585 if (np->tx_change_owner) 2586 put_tx.ex = np->tx_change_owner->first_tx_desc; 2587 else 2588 put_tx = np->put_tx; 2589 2590 /* 3) clear all tx state */ 2591 nv_drain_tx(dev); 2592 nv_init_tx(dev); 2593 2594 /* 4) restore state to current HW position */ 2595 np->get_tx = np->put_tx = put_tx; 2596 np->tx_limit = saved_tx_limit; 2597 2598 /* 5) restart tx engine */ 2599 nv_start_tx(dev); 2600 netif_wake_queue(dev); 2601 spin_unlock_irq(&np->lock); 2602} 2603 2604/* 2605 * Called when the nic notices a mismatch between the actual data len on the 2606 * wire and the len indicated in the 802 header 2607 */ 2608static int nv_getlen(struct net_device *dev, void *packet, int datalen) 2609{ 2610 int hdrlen; /* length of the 802 header */ 2611 int protolen; /* length as stored in the proto field */ 2612 2613 /* 1) calculate len according to header */ 2614 if ( ((struct vlan_ethhdr *)packet)->h_vlan_proto == htons(ETH_P_8021Q)) { 2615 protolen = ntohs( ((struct vlan_ethhdr *)packet)->h_vlan_encapsulated_proto ); 2616 hdrlen = VLAN_HLEN; 2617 } else { 2618 protolen = ntohs( ((struct ethhdr *)packet)->h_proto); 2619 hdrlen = ETH_HLEN; 2620 } 2621 dprintk(KERN_DEBUG "%s: nv_getlen: datalen %d, protolen %d, hdrlen %d\n", 2622 dev->name, datalen, protolen, hdrlen); 2623 if (protolen > ETH_DATA_LEN) 2624 return datalen; /* Value in proto field not a len, no checks possible */ 2625 2626 protolen += hdrlen; 2627 /* consistency checks: */ 2628 if (datalen > ETH_ZLEN) { 2629 if (datalen >= protolen) { 2630 /* more data on wire than in 802 header, trim of 2631 * additional data. 2632 */ 2633 dprintk(KERN_DEBUG "%s: nv_getlen: accepting %d bytes.\n", 2634 dev->name, protolen); 2635 return protolen; 2636 } else { 2637 /* less data on wire than mentioned in header. 2638 * Discard the packet. 2639 */ 2640 dprintk(KERN_DEBUG "%s: nv_getlen: discarding long packet.\n", 2641 dev->name); 2642 return -1; 2643 } 2644 } else { 2645 /* short packet. Accept only if 802 values are also short */ 2646 if (protolen > ETH_ZLEN) { 2647 dprintk(KERN_DEBUG "%s: nv_getlen: discarding short packet.\n", 2648 dev->name); 2649 return -1; 2650 } 2651 dprintk(KERN_DEBUG "%s: nv_getlen: accepting %d bytes.\n", 2652 dev->name, datalen); 2653 return datalen; 2654 } 2655} 2656 2657static int nv_rx_process(struct net_device *dev, int limit) 2658{ 2659 struct fe_priv *np = netdev_priv(dev); 2660 u32 flags; 2661 int rx_work = 0; 2662 struct sk_buff *skb; 2663 int len; 2664 2665 while((np->get_rx.orig != np->put_rx.orig) && 2666 !((flags = le32_to_cpu(np->get_rx.orig->flaglen)) & NV_RX_AVAIL) && 2667 (rx_work < limit)) { 2668 2669 dprintk(KERN_DEBUG "%s: nv_rx_process: flags 0x%x.\n", 2670 dev->name, flags); 2671 2672 /* 2673 * the packet is for us - immediately tear down the pci mapping. 2674 * TODO: check if a prefetch of the first cacheline improves 2675 * the performance. 2676 */ 2677 pci_unmap_single(np->pci_dev, np->get_rx_ctx->dma, 2678 np->get_rx_ctx->dma_len, 2679 PCI_DMA_FROMDEVICE); 2680 skb = np->get_rx_ctx->skb; 2681 np->get_rx_ctx->skb = NULL; 2682 2683 { 2684 int j; 2685 dprintk(KERN_DEBUG "Dumping packet (flags 0x%x).",flags); 2686 for (j=0; j<64; j++) { 2687 if ((j%16) == 0) 2688 dprintk("\n%03x:", j); 2689 dprintk(" %02x", ((unsigned char*)skb->data)[j]); 2690 } 2691 dprintk("\n"); 2692 } 2693 /* look at what we actually got: */ 2694 if (np->desc_ver == DESC_VER_1) { 2695 if (likely(flags & NV_RX_DESCRIPTORVALID)) { 2696 len = flags & LEN_MASK_V1; 2697 if (unlikely(flags & NV_RX_ERROR)) { 2698 if ((flags & NV_RX_ERROR_MASK) == NV_RX_ERROR4) { 2699 len = nv_getlen(dev, skb->data, len); 2700 if (len < 0) { 2701 dev->stats.rx_errors++; 2702 dev_kfree_skb(skb); 2703 goto next_pkt; 2704 } 2705 } 2706 /* framing errors are soft errors */ 2707 else if ((flags & NV_RX_ERROR_MASK) == NV_RX_FRAMINGERR) { 2708 if (flags & NV_RX_SUBSTRACT1) { 2709 len--; 2710 } 2711 } 2712 /* the rest are hard errors */ 2713 else { 2714 if (flags & NV_RX_MISSEDFRAME) 2715 dev->stats.rx_missed_errors++; 2716 if (flags & NV_RX_CRCERR) 2717 dev->stats.rx_crc_errors++; 2718 if (flags & NV_RX_OVERFLOW) 2719 dev->stats.rx_over_errors++; 2720 dev->stats.rx_errors++; 2721 dev_kfree_skb(skb); 2722 goto next_pkt; 2723 } 2724 } 2725 } else { 2726 dev_kfree_skb(skb); 2727 goto next_pkt; 2728 } 2729 } else { 2730 if (likely(flags & NV_RX2_DESCRIPTORVALID)) { 2731 len = flags & LEN_MASK_V2; 2732 if (unlikely(flags & NV_RX2_ERROR)) { 2733 if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_ERROR4) { 2734 len = nv_getlen(dev, skb->data, len); 2735 if (len < 0) { 2736 dev->stats.rx_errors++; 2737 dev_kfree_skb(skb); 2738 goto next_pkt; 2739 } 2740 } 2741 /* framing errors are soft errors */ 2742 else if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_FRAMINGERR) { 2743 if (flags & NV_RX2_SUBSTRACT1) { 2744 len--; 2745 } 2746 } 2747 /* the rest are hard errors */ 2748 else { 2749 if (flags & NV_RX2_CRCERR) 2750 dev->stats.rx_crc_errors++; 2751 if (flags & NV_RX2_OVERFLOW) 2752 dev->stats.rx_over_errors++; 2753 dev->stats.rx_errors++; 2754 dev_kfree_skb(skb); 2755 goto next_pkt; 2756 } 2757 } 2758 if (((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_TCP) || /*ip and tcp */ 2759 ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_UDP)) /*ip and udp */ 2760 skb->ip_summed = CHECKSUM_UNNECESSARY; 2761 } else { 2762 dev_kfree_skb(skb); 2763 goto next_pkt; 2764 } 2765 } 2766 /* got a valid packet - forward it to the network core */ 2767 skb_put(skb, len); 2768 skb->protocol = eth_type_trans(skb, dev); 2769 dprintk(KERN_DEBUG "%s: nv_rx_process: %d bytes, proto %d accepted.\n", 2770 dev->name, len, skb->protocol); 2771 napi_gro_receive(&np->napi, skb); 2772 dev->stats.rx_packets++; 2773 dev->stats.rx_bytes += len; 2774next_pkt: 2775 if (unlikely(np->get_rx.orig++ == np->last_rx.orig)) 2776 np->get_rx.orig = np->first_rx.orig; 2777 if (unlikely(np->get_rx_ctx++ == np->last_rx_ctx)) 2778 np->get_rx_ctx = np->first_rx_ctx; 2779 2780 rx_work++; 2781 } 2782 2783 return rx_work; 2784} 2785 2786static int nv_rx_process_optimized(struct net_device *dev, int limit) 2787{ 2788 struct fe_priv *np = netdev_priv(dev); 2789 u32 flags; 2790 u32 vlanflags = 0; 2791 int rx_work = 0; 2792 struct sk_buff *skb; 2793 int len; 2794 2795 while((np->get_rx.ex != np->put_rx.ex) && 2796 !((flags = le32_to_cpu(np->get_rx.ex->flaglen)) & NV_RX2_AVAIL) && 2797 (rx_work < limit)) { 2798 2799 dprintk(KERN_DEBUG "%s: nv_rx_process_optimized: flags 0x%x.\n", 2800 dev->name, flags); 2801 2802 /* 2803 * the packet is for us - immediately tear down the pci mapping. 2804 * TODO: check if a prefetch of the first cacheline improves 2805 * the performance. 2806 */ 2807 pci_unmap_single(np->pci_dev, np->get_rx_ctx->dma, 2808 np->get_rx_ctx->dma_len, 2809 PCI_DMA_FROMDEVICE); 2810 skb = np->get_rx_ctx->skb; 2811 np->get_rx_ctx->skb = NULL; 2812 2813 { 2814 int j; 2815 dprintk(KERN_DEBUG "Dumping packet (flags 0x%x).",flags); 2816 for (j=0; j<64; j++) { 2817 if ((j%16) == 0) 2818 dprintk("\n%03x:", j); 2819 dprintk(" %02x", ((unsigned char*)skb->data)[j]); 2820 } 2821 dprintk("\n"); 2822 } 2823 /* look at what we actually got: */ 2824 if (likely(flags & NV_RX2_DESCRIPTORVALID)) { 2825 len = flags & LEN_MASK_V2; 2826 if (unlikely(flags & NV_RX2_ERROR)) { 2827 if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_ERROR4) { 2828 len = nv_getlen(dev, skb->data, len); 2829 if (len < 0) { 2830 dev_kfree_skb(skb); 2831 goto next_pkt; 2832 } 2833 } 2834 /* framing errors are soft errors */ 2835 else if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_FRAMINGERR) { 2836 if (flags & NV_RX2_SUBSTRACT1) { 2837 len--; 2838 } 2839 } 2840 /* the rest are hard errors */ 2841 else { 2842 dev_kfree_skb(skb); 2843 goto next_pkt; 2844 } 2845 } 2846 2847 if (((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_TCP) || /*ip and tcp */ 2848 ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_UDP)) /*ip and udp */ 2849 skb->ip_summed = CHECKSUM_UNNECESSARY; 2850 2851 /* got a valid packet - forward it to the network core */ 2852 skb_put(skb, len); 2853 skb->protocol = eth_type_trans(skb, dev); 2854 prefetch(skb->data); 2855 2856 dprintk(KERN_DEBUG "%s: nv_rx_process_optimized: %d bytes, proto %d accepted.\n", 2857 dev->name, len, skb->protocol); 2858 2859 if (likely(!np->vlangrp)) { 2860 napi_gro_receive(&np->napi, skb); 2861 } else { 2862 vlanflags = le32_to_cpu(np->get_rx.ex->buflow); 2863 if (vlanflags & NV_RX3_VLAN_TAG_PRESENT) { 2864 vlan_gro_receive(&np->napi, np->vlangrp, 2865 vlanflags & NV_RX3_VLAN_TAG_MASK, skb); 2866 } else { 2867 napi_gro_receive(&np->napi, skb); 2868 } 2869 } 2870 2871 dev->stats.rx_packets++; 2872 dev->stats.rx_bytes += len; 2873 } else { 2874 dev_kfree_skb(skb); 2875 } 2876next_pkt: 2877 if (unlikely(np->get_rx.ex++ == np->last_rx.ex)) 2878 np->get_rx.ex = np->first_rx.ex; 2879 if (unlikely(np->get_rx_ctx++ == np->last_rx_ctx)) 2880 np->get_rx_ctx = np->first_rx_ctx; 2881 2882 rx_work++; 2883 } 2884 2885 return rx_work; 2886} 2887 2888static void set_bufsize(struct net_device *dev) 2889{ 2890 struct fe_priv *np = netdev_priv(dev); 2891 2892 if (dev->mtu <= ETH_DATA_LEN) 2893 np->rx_buf_sz = ETH_DATA_LEN + NV_RX_HEADERS; 2894 else 2895 np->rx_buf_sz = dev->mtu + NV_RX_HEADERS; 2896} 2897 2898/* 2899 * nv_change_mtu: dev->change_mtu function 2900 * Called with dev_base_lock held for read. 2901 */ 2902static int nv_change_mtu(struct net_device *dev, int new_mtu) 2903{ 2904 struct fe_priv *np = netdev_priv(dev); 2905 int old_mtu; 2906 2907 if (new_mtu < 64 || new_mtu > np->pkt_limit) 2908 return -EINVAL; 2909 2910 old_mtu = dev->mtu; 2911 dev->mtu = new_mtu; 2912 2913 /* return early if the buffer sizes will not change */ 2914 if (old_mtu <= ETH_DATA_LEN && new_mtu <= ETH_DATA_LEN) 2915 return 0; 2916 if (old_mtu == new_mtu) 2917 return 0; 2918 2919 /* synchronized against open : rtnl_lock() held by caller */ 2920 if (netif_running(dev)) { 2921 u8 __iomem *base = get_hwbase(dev); 2922 /* 2923 * It seems that the nic preloads valid ring entries into an 2924 * internal buffer. The procedure for flushing everything is 2925 * guessed, there is probably a simpler approach. 2926 * Changing the MTU is a rare event, it shouldn't matter. 2927 */ 2928 nv_disable_irq(dev); 2929 nv_napi_disable(dev); 2930 netif_tx_lock_bh(dev); 2931 netif_addr_lock(dev); 2932 spin_lock(&np->lock); 2933 /* stop engines */ 2934 nv_stop_rxtx(dev); 2935 nv_txrx_reset(dev); 2936 /* drain rx queue */ 2937 nv_drain_rxtx(dev); 2938 /* reinit driver view of the rx queue */ 2939 set_bufsize(dev); 2940 if (nv_init_ring(dev)) { 2941 if (!np->in_shutdown) 2942 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 2943 } 2944 /* reinit nic view of the rx queue */ 2945 writel(np->rx_buf_sz, base + NvRegOffloadConfig); 2946 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); 2947 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), 2948 base + NvRegRingSizes); 2949 pci_push(base); 2950 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 2951 pci_push(base); 2952 2953 /* restart rx engine */ 2954 nv_start_rxtx(dev); 2955 spin_unlock(&np->lock); 2956 netif_addr_unlock(dev); 2957 netif_tx_unlock_bh(dev); 2958 nv_napi_enable(dev); 2959 nv_enable_irq(dev); 2960 } 2961 return 0; 2962} 2963 2964static void nv_copy_mac_to_hw(struct net_device *dev) 2965{ 2966 u8 __iomem *base = get_hwbase(dev); 2967 u32 mac[2]; 2968 2969 mac[0] = (dev->dev_addr[0] << 0) + (dev->dev_addr[1] << 8) + 2970 (dev->dev_addr[2] << 16) + (dev->dev_addr[3] << 24); 2971 mac[1] = (dev->dev_addr[4] << 0) + (dev->dev_addr[5] << 8); 2972 2973 writel(mac[0], base + NvRegMacAddrA); 2974 writel(mac[1], base + NvRegMacAddrB); 2975} 2976 2977/* 2978 * nv_set_mac_address: dev->set_mac_address function 2979 * Called with rtnl_lock() held. 2980 */ 2981static int nv_set_mac_address(struct net_device *dev, void *addr) 2982{ 2983 struct fe_priv *np = netdev_priv(dev); 2984 struct sockaddr *macaddr = (struct sockaddr*)addr; 2985 2986 if (!is_valid_ether_addr(macaddr->sa_data)) 2987 return -EADDRNOTAVAIL; 2988 2989 /* synchronized against open : rtnl_lock() held by caller */ 2990 memcpy(dev->dev_addr, macaddr->sa_data, ETH_ALEN); 2991 2992 if (netif_running(dev)) { 2993 netif_tx_lock_bh(dev); 2994 netif_addr_lock(dev); 2995 spin_lock_irq(&np->lock); 2996 2997 /* stop rx engine */ 2998 nv_stop_rx(dev); 2999 3000 /* set mac address */ 3001 nv_copy_mac_to_hw(dev); 3002 3003 /* restart rx engine */ 3004 nv_start_rx(dev); 3005 spin_unlock_irq(&np->lock); 3006 netif_addr_unlock(dev); 3007 netif_tx_unlock_bh(dev); 3008 } else { 3009 nv_copy_mac_to_hw(dev); 3010 } 3011 return 0; 3012} 3013 3014/* 3015 * nv_set_multicast: dev->set_multicast function 3016 * Called with netif_tx_lock held. 3017 */ 3018static void nv_set_multicast(struct net_device *dev) 3019{ 3020 struct fe_priv *np = netdev_priv(dev); 3021 u8 __iomem *base = get_hwbase(dev); 3022 u32 addr[2]; 3023 u32 mask[2]; 3024 u32 pff = readl(base + NvRegPacketFilterFlags) & NVREG_PFF_PAUSE_RX; 3025 3026 memset(addr, 0, sizeof(addr)); 3027 memset(mask, 0, sizeof(mask)); 3028 3029 if (dev->flags & IFF_PROMISC) { 3030 pff |= NVREG_PFF_PROMISC; 3031 } else { 3032 pff |= NVREG_PFF_MYADDR; 3033 3034 if (dev->flags & IFF_ALLMULTI || !netdev_mc_empty(dev)) { 3035 u32 alwaysOff[2]; 3036 u32 alwaysOn[2]; 3037 3038 alwaysOn[0] = alwaysOn[1] = alwaysOff[0] = alwaysOff[1] = 0xffffffff; 3039 if (dev->flags & IFF_ALLMULTI) { 3040 alwaysOn[0] = alwaysOn[1] = alwaysOff[0] = alwaysOff[1] = 0; 3041 } else { 3042 struct netdev_hw_addr *ha; 3043 3044 netdev_for_each_mc_addr(ha, dev) { 3045 unsigned char *addr = ha->addr; 3046 u32 a, b; 3047 3048 a = le32_to_cpu(*(__le32 *) addr); 3049 b = le16_to_cpu(*(__le16 *) (&addr[4])); 3050 alwaysOn[0] &= a; 3051 alwaysOff[0] &= ~a; 3052 alwaysOn[1] &= b; 3053 alwaysOff[1] &= ~b; 3054 } 3055 } 3056 addr[0] = alwaysOn[0]; 3057 addr[1] = alwaysOn[1]; 3058 mask[0] = alwaysOn[0] | alwaysOff[0]; 3059 mask[1] = alwaysOn[1] | alwaysOff[1]; 3060 } else { 3061 mask[0] = NVREG_MCASTMASKA_NONE; 3062 mask[1] = NVREG_MCASTMASKB_NONE; 3063 } 3064 } 3065 addr[0] |= NVREG_MCASTADDRA_FORCE; 3066 pff |= NVREG_PFF_ALWAYS; 3067 spin_lock_irq(&np->lock); 3068 nv_stop_rx(dev); 3069 writel(addr[0], base + NvRegMulticastAddrA); 3070 writel(addr[1], base + NvRegMulticastAddrB); 3071 writel(mask[0], base + NvRegMulticastMaskA); 3072 writel(mask[1], base + NvRegMulticastMaskB); 3073 writel(pff, base + NvRegPacketFilterFlags); 3074 dprintk(KERN_INFO "%s: reconfiguration for multicast lists.\n", 3075 dev->name); 3076 nv_start_rx(dev); 3077 spin_unlock_irq(&np->lock); 3078} 3079 3080static void nv_update_pause(struct net_device *dev, u32 pause_flags) 3081{ 3082 struct fe_priv *np = netdev_priv(dev); 3083 u8 __iomem *base = get_hwbase(dev); 3084 3085 np->pause_flags &= ~(NV_PAUSEFRAME_TX_ENABLE | NV_PAUSEFRAME_RX_ENABLE); 3086 3087 if (np->pause_flags & NV_PAUSEFRAME_RX_CAPABLE) { 3088 u32 pff = readl(base + NvRegPacketFilterFlags) & ~NVREG_PFF_PAUSE_RX; 3089 if (pause_flags & NV_PAUSEFRAME_RX_ENABLE) { 3090 writel(pff|NVREG_PFF_PAUSE_RX, base + NvRegPacketFilterFlags); 3091 np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE; 3092 } else { 3093 writel(pff, base + NvRegPacketFilterFlags); 3094 } 3095 } 3096 if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE) { 3097 u32 regmisc = readl(base + NvRegMisc1) & ~NVREG_MISC1_PAUSE_TX; 3098 if (pause_flags & NV_PAUSEFRAME_TX_ENABLE) { 3099 u32 pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V1; 3100 if (np->driver_data & DEV_HAS_PAUSEFRAME_TX_V2) 3101 pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V2; 3102 if (np->driver_data & DEV_HAS_PAUSEFRAME_TX_V3) { 3103 pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V3; 3104 /* limit the number of tx pause frames to a default of 8 */ 3105 writel(readl(base + NvRegTxPauseFrameLimit)|NVREG_TX_PAUSEFRAMELIMIT_ENABLE, base + NvRegTxPauseFrameLimit); 3106 } 3107 writel(pause_enable, base + NvRegTxPauseFrame); 3108 writel(regmisc|NVREG_MISC1_PAUSE_TX, base + NvRegMisc1); 3109 np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE; 3110 } else { 3111 writel(NVREG_TX_PAUSEFRAME_DISABLE, base + NvRegTxPauseFrame); 3112 writel(regmisc, base + NvRegMisc1); 3113 } 3114 } 3115} 3116 3117/** 3118 * nv_update_linkspeed: Setup the MAC according to the link partner 3119 * @dev: Network device to be configured 3120 * 3121 * The function queries the PHY and checks if there is a link partner. 3122 * If yes, then it sets up the MAC accordingly. Otherwise, the MAC is 3123 * set to 10 MBit HD. 3124 * 3125 * The function returns 0 if there is no link partner and 1 if there is 3126 * a good link partner. 3127 */ 3128static int nv_update_linkspeed(struct net_device *dev) 3129{ 3130 struct fe_priv *np = netdev_priv(dev); 3131 u8 __iomem *base = get_hwbase(dev); 3132 int adv = 0; 3133 int lpa = 0; 3134 int adv_lpa, adv_pause, lpa_pause; 3135 int newls = np->linkspeed; 3136 int newdup = np->duplex; 3137 int mii_status; 3138 int retval = 0; 3139 u32 control_1000, status_1000, phyreg, pause_flags, txreg; 3140 u32 txrxFlags = 0; 3141 u32 phy_exp; 3142 3143 /* BMSR_LSTATUS is latched, read it twice: 3144 * we want the current value. 3145 */ 3146 mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); 3147 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); 3148 3149 if (!(mii_status & BMSR_LSTATUS)) { 3150 dprintk(KERN_DEBUG "%s: no link detected by phy - falling back to 10HD.\n", 3151 dev->name); 3152 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; 3153 newdup = 0; 3154 retval = 0; 3155 goto set_speed; 3156 } 3157 3158 if (np->autoneg == 0) { 3159 dprintk(KERN_DEBUG "%s: nv_update_linkspeed: autoneg off, PHY set to 0x%04x.\n", 3160 dev->name, np->fixed_mode); 3161 if (np->fixed_mode & LPA_100FULL) { 3162 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100; 3163 newdup = 1; 3164 } else if (np->fixed_mode & LPA_100HALF) { 3165 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100; 3166 newdup = 0; 3167 } else if (np->fixed_mode & LPA_10FULL) { 3168 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; 3169 newdup = 1; 3170 } else { 3171 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; 3172 newdup = 0; 3173 } 3174 retval = 1; 3175 goto set_speed; 3176 } 3177 /* check auto negotiation is complete */ 3178 if (!(mii_status & BMSR_ANEGCOMPLETE)) { 3179 /* still in autonegotiation - configure nic for 10 MBit HD and wait. */ 3180 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; 3181 newdup = 0; 3182 retval = 0; 3183 dprintk(KERN_DEBUG "%s: autoneg not completed - falling back to 10HD.\n", dev->name); 3184 goto set_speed; 3185 } 3186 3187 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); 3188 lpa = mii_rw(dev, np->phyaddr, MII_LPA, MII_READ); 3189 dprintk(KERN_DEBUG "%s: nv_update_linkspeed: PHY advertises 0x%04x, lpa 0x%04x.\n", 3190 dev->name, adv, lpa); 3191 3192 retval = 1; 3193 if (np->gigabit == PHY_GIGABIT) { 3194 control_1000 = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ); 3195 status_1000 = mii_rw(dev, np->phyaddr, MII_STAT1000, MII_READ); 3196 3197 if ((control_1000 & ADVERTISE_1000FULL) && 3198 (status_1000 & LPA_1000FULL)) { 3199 dprintk(KERN_DEBUG "%s: nv_update_linkspeed: GBit ethernet detected.\n", 3200 dev->name); 3201 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_1000; 3202 newdup = 1; 3203 goto set_speed; 3204 } 3205 } 3206 3207 adv_lpa = lpa & adv; 3208 if (adv_lpa & LPA_100FULL) { 3209 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100; 3210 newdup = 1; 3211 } else if (adv_lpa & LPA_100HALF) { 3212 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100; 3213 newdup = 0; 3214 } else if (adv_lpa & LPA_10FULL) { 3215 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; 3216 newdup = 1; 3217 } else if (adv_lpa & LPA_10HALF) { 3218 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; 3219 newdup = 0; 3220 } else { 3221 dprintk(KERN_DEBUG "%s: bad ability %04x - falling back to 10HD.\n", dev->name, adv_lpa); 3222 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; 3223 newdup = 0; 3224 } 3225 3226set_speed: 3227 if (np->duplex == newdup && np->linkspeed == newls) 3228 return retval; 3229 3230 dprintk(KERN_INFO "%s: changing link setting from %d/%d to %d/%d.\n", 3231 dev->name, np->linkspeed, np->duplex, newls, newdup); 3232 3233 np->duplex = newdup; 3234 np->linkspeed = newls; 3235 3236 /* The transmitter and receiver must be restarted for safe update */ 3237 if (readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_START) { 3238 txrxFlags |= NV_RESTART_TX; 3239 nv_stop_tx(dev); 3240 } 3241 if (readl(base + NvRegReceiverControl) & NVREG_RCVCTL_START) { 3242 txrxFlags |= NV_RESTART_RX; 3243 nv_stop_rx(dev); 3244 } 3245 3246 if (np->gigabit == PHY_GIGABIT) { 3247 phyreg = readl(base + NvRegSlotTime); 3248 phyreg &= ~(0x3FF00); 3249 if (((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_10) || 3250 ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_100)) 3251 phyreg |= NVREG_SLOTTIME_10_100_FULL; 3252 else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_1000) 3253 phyreg |= NVREG_SLOTTIME_1000_FULL; 3254 writel(phyreg, base + NvRegSlotTime); 3255 } 3256 3257 phyreg = readl(base + NvRegPhyInterface); 3258 phyreg &= ~(PHY_HALF|PHY_100|PHY_1000); 3259 if (np->duplex == 0) 3260 phyreg |= PHY_HALF; 3261 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_100) 3262 phyreg |= PHY_100; 3263 else if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000) 3264 phyreg |= PHY_1000; 3265 writel(phyreg, base + NvRegPhyInterface); 3266 3267 phy_exp = mii_rw(dev, np->phyaddr, MII_EXPANSION, MII_READ) & EXPANSION_NWAY; /* autoneg capable */ 3268 if (phyreg & PHY_RGMII) { 3269 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000) { 3270 txreg = NVREG_TX_DEFERRAL_RGMII_1000; 3271 } else { 3272 if (!phy_exp && !np->duplex && (np->driver_data & DEV_HAS_COLLISION_FIX)) { 3273 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_10) 3274 txreg = NVREG_TX_DEFERRAL_RGMII_STRETCH_10; 3275 else 3276 txreg = NVREG_TX_DEFERRAL_RGMII_STRETCH_100; 3277 } else { 3278 txreg = NVREG_TX_DEFERRAL_RGMII_10_100; 3279 } 3280 } 3281 } else { 3282 if (!phy_exp && !np->duplex && (np->driver_data & DEV_HAS_COLLISION_FIX)) 3283 txreg = NVREG_TX_DEFERRAL_MII_STRETCH; 3284 else 3285 txreg = NVREG_TX_DEFERRAL_DEFAULT; 3286 } 3287 writel(txreg, base + NvRegTxDeferral); 3288 3289 if (np->desc_ver == DESC_VER_1) { 3290 txreg = NVREG_TX_WM_DESC1_DEFAULT; 3291 } else { 3292 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000) 3293 txreg = NVREG_TX_WM_DESC2_3_1000; 3294 else 3295 txreg = NVREG_TX_WM_DESC2_3_DEFAULT; 3296 } 3297 writel(txreg, base + NvRegTxWatermark); 3298 3299 writel(NVREG_MISC1_FORCE | ( np->duplex ? 0 : NVREG_MISC1_HD), 3300 base + NvRegMisc1); 3301 pci_push(base); 3302 writel(np->linkspeed, base + NvRegLinkSpeed); 3303 pci_push(base); 3304 3305 pause_flags = 0; 3306 /* setup pause frame */ 3307 if (np->duplex != 0) { 3308 if (np->autoneg && np->pause_flags & NV_PAUSEFRAME_AUTONEG) { 3309 adv_pause = adv & (ADVERTISE_PAUSE_CAP| ADVERTISE_PAUSE_ASYM); 3310 lpa_pause = lpa & (LPA_PAUSE_CAP| LPA_PAUSE_ASYM); 3311 3312 switch (adv_pause) { 3313 case ADVERTISE_PAUSE_CAP: 3314 if (lpa_pause & LPA_PAUSE_CAP) { 3315 pause_flags |= NV_PAUSEFRAME_RX_ENABLE; 3316 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) 3317 pause_flags |= NV_PAUSEFRAME_TX_ENABLE; 3318 } 3319 break; 3320 case ADVERTISE_PAUSE_ASYM: 3321 if (lpa_pause == (LPA_PAUSE_CAP| LPA_PAUSE_ASYM)) 3322 { 3323 pause_flags |= NV_PAUSEFRAME_TX_ENABLE; 3324 } 3325 break; 3326 case ADVERTISE_PAUSE_CAP| ADVERTISE_PAUSE_ASYM: 3327 if (lpa_pause & LPA_PAUSE_CAP) 3328 { 3329 pause_flags |= NV_PAUSEFRAME_RX_ENABLE; 3330 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) 3331 pause_flags |= NV_PAUSEFRAME_TX_ENABLE; 3332 } 3333 if (lpa_pause == LPA_PAUSE_ASYM) 3334 { 3335 pause_flags |= NV_PAUSEFRAME_RX_ENABLE; 3336 } 3337 break; 3338 } 3339 } else { 3340 pause_flags = np->pause_flags; 3341 } 3342 } 3343 nv_update_pause(dev, pause_flags); 3344 3345 if (txrxFlags & NV_RESTART_TX) 3346 nv_start_tx(dev); 3347 if (txrxFlags & NV_RESTART_RX) 3348 nv_start_rx(dev); 3349 3350 return retval; 3351} 3352 3353static void nv_linkchange(struct net_device *dev) 3354{ 3355 if (nv_update_linkspeed(dev)) { 3356 if (!netif_carrier_ok(dev)) { 3357 netif_carrier_on(dev); 3358 printk(KERN_INFO "%s: link up.\n", dev->name); 3359 nv_txrx_gate(dev, false); 3360 nv_start_rx(dev); 3361 } 3362 } else { 3363 if (netif_carrier_ok(dev)) { 3364 netif_carrier_off(dev); 3365 printk(KERN_INFO "%s: link down.\n", dev->name); 3366 nv_txrx_gate(dev, true); 3367 nv_stop_rx(dev); 3368 } 3369 } 3370} 3371 3372static void nv_link_irq(struct net_device *dev) 3373{ 3374 u8 __iomem *base = get_hwbase(dev); 3375 u32 miistat; 3376 3377 miistat = readl(base + NvRegMIIStatus); 3378 writel(NVREG_MIISTAT_LINKCHANGE, base + NvRegMIIStatus); 3379 dprintk(KERN_INFO "%s: link change irq, status 0x%x.\n", dev->name, miistat); 3380 3381 if (miistat & (NVREG_MIISTAT_LINKCHANGE)) 3382 nv_linkchange(dev); 3383 dprintk(KERN_DEBUG "%s: link change notification done.\n", dev->name); 3384} 3385 3386static void nv_msi_workaround(struct fe_priv *np) 3387{ 3388 3389 /* Need to toggle the msi irq mask within the ethernet device, 3390 * otherwise, future interrupts will not be detected. 3391 */ 3392 if (np->msi_flags & NV_MSI_ENABLED) { 3393 u8 __iomem *base = np->base; 3394 3395 writel(0, base + NvRegMSIIrqMask); 3396 writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask); 3397 } 3398} 3399 3400static inline int nv_change_interrupt_mode(struct net_device *dev, int total_work) 3401{ 3402 struct fe_priv *np = netdev_priv(dev); 3403 3404 if (optimization_mode == NV_OPTIMIZATION_MODE_DYNAMIC) { 3405 if (total_work > NV_DYNAMIC_THRESHOLD) { 3406 /* transition to poll based interrupts */ 3407 np->quiet_count = 0; 3408 if (np->irqmask != NVREG_IRQMASK_CPU) { 3409 np->irqmask = NVREG_IRQMASK_CPU; 3410 return 1; 3411 } 3412 } else { 3413 if (np->quiet_count < NV_DYNAMIC_MAX_QUIET_COUNT) { 3414 np->quiet_count++; 3415 } else { 3416 /* reached a period of low activity, switch 3417 to per tx/rx packet interrupts */ 3418 if (np->irqmask != NVREG_IRQMASK_THROUGHPUT) { 3419 np->irqmask = NVREG_IRQMASK_THROUGHPUT; 3420 return 1; 3421 } 3422 } 3423 } 3424 } 3425 return 0; 3426} 3427 3428static irqreturn_t nv_nic_irq(int foo, void *data) 3429{ 3430 struct net_device *dev = (struct net_device *) data; 3431 struct fe_priv *np = netdev_priv(dev); 3432 u8 __iomem *base = get_hwbase(dev); 3433 3434 dprintk(KERN_DEBUG "%s: nv_nic_irq\n", dev->name); 3435 3436 if (!(np->msi_flags & NV_MSI_X_ENABLED)) { 3437 np->events = readl(base + NvRegIrqStatus); 3438 writel(np->events, base + NvRegIrqStatus); 3439 } else { 3440 np->events = readl(base + NvRegMSIXIrqStatus); 3441 writel(np->events, base + NvRegMSIXIrqStatus); 3442 } 3443 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, np->events); 3444 if (!(np->events & np->irqmask)) 3445 return IRQ_NONE; 3446 3447 nv_msi_workaround(np); 3448 3449 if (napi_schedule_prep(&np->napi)) { 3450 /* 3451 * Disable further irq's (msix not enabled with napi) 3452 */ 3453 writel(0, base + NvRegIrqMask); 3454 __napi_schedule(&np->napi); 3455 } 3456 3457 dprintk(KERN_DEBUG "%s: nv_nic_irq completed\n", dev->name); 3458 3459 return IRQ_HANDLED; 3460} 3461 3462/** 3463 * All _optimized functions are used to help increase performance 3464 * (reduce CPU and increase throughput). They use descripter version 3, 3465 * compiler directives, and reduce memory accesses. 3466 */ 3467static irqreturn_t nv_nic_irq_optimized(int foo, void *data) 3468{ 3469 struct net_device *dev = (struct net_device *) data; 3470 struct fe_priv *np = netdev_priv(dev); 3471 u8 __iomem *base = get_hwbase(dev); 3472 3473 dprintk(KERN_DEBUG "%s: nv_nic_irq_optimized\n", dev->name); 3474 3475 if (!(np->msi_flags & NV_MSI_X_ENABLED)) { 3476 np->events = readl(base + NvRegIrqStatus); 3477 writel(np->events, base + NvRegIrqStatus); 3478 } else { 3479 np->events = readl(base + NvRegMSIXIrqStatus); 3480 writel(np->events, base + NvRegMSIXIrqStatus); 3481 } 3482 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, np->events); 3483 if (!(np->events & np->irqmask)) 3484 return IRQ_NONE; 3485 3486 nv_msi_workaround(np); 3487 3488 if (napi_schedule_prep(&np->napi)) { 3489 /* 3490 * Disable further irq's (msix not enabled with napi) 3491 */ 3492 writel(0, base + NvRegIrqMask); 3493 __napi_schedule(&np->napi); 3494 } 3495 dprintk(KERN_DEBUG "%s: nv_nic_irq_optimized completed\n", dev->name); 3496 3497 return IRQ_HANDLED; 3498} 3499 3500static irqreturn_t nv_nic_irq_tx(int foo, void *data) 3501{ 3502 struct net_device *dev = (struct net_device *) data; 3503 struct fe_priv *np = netdev_priv(dev); 3504 u8 __iomem *base = get_hwbase(dev); 3505 u32 events; 3506 int i; 3507 unsigned long flags; 3508 3509 dprintk(KERN_DEBUG "%s: nv_nic_irq_tx\n", dev->name); 3510 3511 for (i=0; ; i++) { 3512 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_TX_ALL; 3513 writel(NVREG_IRQ_TX_ALL, base + NvRegMSIXIrqStatus); 3514 dprintk(KERN_DEBUG "%s: tx irq: %08x\n", dev->name, events); 3515 if (!(events & np->irqmask)) 3516 break; 3517 3518 spin_lock_irqsave(&np->lock, flags); 3519 nv_tx_done_optimized(dev, TX_WORK_PER_LOOP); 3520 spin_unlock_irqrestore(&np->lock, flags); 3521 3522 if (unlikely(i > max_interrupt_work)) { 3523 spin_lock_irqsave(&np->lock, flags); 3524 /* disable interrupts on the nic */ 3525 writel(NVREG_IRQ_TX_ALL, base + NvRegIrqMask); 3526 pci_push(base); 3527 3528 if (!np->in_shutdown) { 3529 np->nic_poll_irq |= NVREG_IRQ_TX_ALL; 3530 mod_timer(&np->nic_poll, jiffies + POLL_WAIT); 3531 } 3532 spin_unlock_irqrestore(&np->lock, flags); 3533 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_tx.\n", dev->name, i); 3534 break; 3535 } 3536 3537 } 3538 dprintk(KERN_DEBUG "%s: nv_nic_irq_tx completed\n", dev->name); 3539 3540 return IRQ_RETVAL(i); 3541} 3542 3543static int nv_napi_poll(struct napi_struct *napi, int budget) 3544{ 3545 struct fe_priv *np = container_of(napi, struct fe_priv, napi); 3546 struct net_device *dev = np->dev; 3547 u8 __iomem *base = get_hwbase(dev); 3548 unsigned long flags; 3549 int retcode; 3550 int rx_count, tx_work=0, rx_work=0; 3551 3552 do { 3553 if (!nv_optimized(np)) { 3554 spin_lock_irqsave(&np->lock, flags); 3555 tx_work += nv_tx_done(dev, np->tx_ring_size); 3556 spin_unlock_irqrestore(&np->lock, flags); 3557 3558 rx_count = nv_rx_process(dev, budget - rx_work); 3559 retcode = nv_alloc_rx(dev); 3560 } else { 3561 spin_lock_irqsave(&np->lock, flags); 3562 tx_work += nv_tx_done_optimized(dev, np->tx_ring_size); 3563 spin_unlock_irqrestore(&np->lock, flags); 3564 3565 rx_count = nv_rx_process_optimized(dev, 3566 budget - rx_work); 3567 retcode = nv_alloc_rx_optimized(dev); 3568 } 3569 } while (retcode == 0 && 3570 rx_count > 0 && (rx_work += rx_count) < budget); 3571 3572 if (retcode) { 3573 spin_lock_irqsave(&np->lock, flags); 3574 if (!np->in_shutdown) 3575 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 3576 spin_unlock_irqrestore(&np->lock, flags); 3577 } 3578 3579 nv_change_interrupt_mode(dev, tx_work + rx_work); 3580 3581 if (unlikely(np->events & NVREG_IRQ_LINK)) { 3582 spin_lock_irqsave(&np->lock, flags); 3583 nv_link_irq(dev); 3584 spin_unlock_irqrestore(&np->lock, flags); 3585 } 3586 if (unlikely(np->need_linktimer && time_after(jiffies, np->link_timeout))) { 3587 spin_lock_irqsave(&np->lock, flags); 3588 nv_linkchange(dev); 3589 spin_unlock_irqrestore(&np->lock, flags); 3590 np->link_timeout = jiffies + LINK_TIMEOUT; 3591 } 3592 if (unlikely(np->events & NVREG_IRQ_RECOVER_ERROR)) { 3593 spin_lock_irqsave(&np->lock, flags); 3594 if (!np->in_shutdown) { 3595 np->nic_poll_irq = np->irqmask; 3596 np->recover_error = 1; 3597 mod_timer(&np->nic_poll, jiffies + POLL_WAIT); 3598 } 3599 spin_unlock_irqrestore(&np->lock, flags); 3600 napi_complete(napi); 3601 return rx_work; 3602 } 3603 3604 if (rx_work < budget) { 3605 /* re-enable interrupts 3606 (msix not enabled in napi) */ 3607 napi_complete(napi); 3608 3609 writel(np->irqmask, base + NvRegIrqMask); 3610 } 3611 return rx_work; 3612} 3613 3614static irqreturn_t nv_nic_irq_rx(int foo, void *data) 3615{ 3616 struct net_device *dev = (struct net_device *) data; 3617 struct fe_priv *np = netdev_priv(dev); 3618 u8 __iomem *base = get_hwbase(dev); 3619 u32 events; 3620 int i; 3621 unsigned long flags; 3622 3623 dprintk(KERN_DEBUG "%s: nv_nic_irq_rx\n", dev->name); 3624 3625 for (i=0; ; i++) { 3626 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_RX_ALL; 3627 writel(NVREG_IRQ_RX_ALL, base + NvRegMSIXIrqStatus); 3628 dprintk(KERN_DEBUG "%s: rx irq: %08x\n", dev->name, events); 3629 if (!(events & np->irqmask)) 3630 break; 3631 3632 if (nv_rx_process_optimized(dev, RX_WORK_PER_LOOP)) { 3633 if (unlikely(nv_alloc_rx_optimized(dev))) { 3634 spin_lock_irqsave(&np->lock, flags); 3635 if (!np->in_shutdown) 3636 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 3637 spin_unlock_irqrestore(&np->lock, flags); 3638 } 3639 } 3640 3641 if (unlikely(i > max_interrupt_work)) { 3642 spin_lock_irqsave(&np->lock, flags); 3643 /* disable interrupts on the nic */ 3644 writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask); 3645 pci_push(base); 3646 3647 if (!np->in_shutdown) { 3648 np->nic_poll_irq |= NVREG_IRQ_RX_ALL; 3649 mod_timer(&np->nic_poll, jiffies + POLL_WAIT); 3650 } 3651 spin_unlock_irqrestore(&np->lock, flags); 3652 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_rx.\n", dev->name, i); 3653 break; 3654 } 3655 } 3656 dprintk(KERN_DEBUG "%s: nv_nic_irq_rx completed\n", dev->name); 3657 3658 return IRQ_RETVAL(i); 3659} 3660 3661static irqreturn_t nv_nic_irq_other(int foo, void *data) 3662{ 3663 struct net_device *dev = (struct net_device *) data; 3664 struct fe_priv *np = netdev_priv(dev); 3665 u8 __iomem *base = get_hwbase(dev); 3666 u32 events; 3667 int i; 3668 unsigned long flags; 3669 3670 dprintk(KERN_DEBUG "%s: nv_nic_irq_other\n", dev->name); 3671 3672 for (i=0; ; i++) { 3673 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_OTHER; 3674 writel(NVREG_IRQ_OTHER, base + NvRegMSIXIrqStatus); 3675 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events); 3676 if (!(events & np->irqmask)) 3677 break; 3678 3679 /* check tx in case we reached max loop limit in tx isr */ 3680 spin_lock_irqsave(&np->lock, flags); 3681 nv_tx_done_optimized(dev, TX_WORK_PER_LOOP); 3682 spin_unlock_irqrestore(&np->lock, flags); 3683 3684 if (events & NVREG_IRQ_LINK) { 3685 spin_lock_irqsave(&np->lock, flags); 3686 nv_link_irq(dev); 3687 spin_unlock_irqrestore(&np->lock, flags); 3688 } 3689 if (np->need_linktimer && time_after(jiffies, np->link_timeout)) { 3690 spin_lock_irqsave(&np->lock, flags); 3691 nv_linkchange(dev); 3692 spin_unlock_irqrestore(&np->lock, flags); 3693 np->link_timeout = jiffies + LINK_TIMEOUT; 3694 } 3695 if (events & NVREG_IRQ_RECOVER_ERROR) { 3696 spin_lock_irq(&np->lock); 3697 /* disable interrupts on the nic */ 3698 writel(NVREG_IRQ_OTHER, base + NvRegIrqMask); 3699 pci_push(base); 3700 3701 if (!np->in_shutdown) { 3702 np->nic_poll_irq |= NVREG_IRQ_OTHER; 3703 np->recover_error = 1; 3704 mod_timer(&np->nic_poll, jiffies + POLL_WAIT); 3705 } 3706 spin_unlock_irq(&np->lock); 3707 break; 3708 } 3709 if (unlikely(i > max_interrupt_work)) { 3710 spin_lock_irqsave(&np->lock, flags); 3711 /* disable interrupts on the nic */ 3712 writel(NVREG_IRQ_OTHER, base + NvRegIrqMask); 3713 pci_push(base); 3714 3715 if (!np->in_shutdown) { 3716 np->nic_poll_irq |= NVREG_IRQ_OTHER; 3717 mod_timer(&np->nic_poll, jiffies + POLL_WAIT); 3718 } 3719 spin_unlock_irqrestore(&np->lock, flags); 3720 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_other.\n", dev->name, i); 3721 break; 3722 } 3723 3724 } 3725 dprintk(KERN_DEBUG "%s: nv_nic_irq_other completed\n", dev->name); 3726 3727 return IRQ_RETVAL(i); 3728} 3729 3730static irqreturn_t nv_nic_irq_test(int foo, void *data) 3731{ 3732 struct net_device *dev = (struct net_device *) data; 3733 struct fe_priv *np = netdev_priv(dev); 3734 u8 __iomem *base = get_hwbase(dev); 3735 u32 events; 3736 3737 dprintk(KERN_DEBUG "%s: nv_nic_irq_test\n", dev->name); 3738 3739 if (!(np->msi_flags & NV_MSI_X_ENABLED)) { 3740 events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK; 3741 writel(NVREG_IRQ_TIMER, base + NvRegIrqStatus); 3742 } else { 3743 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK; 3744 writel(NVREG_IRQ_TIMER, base + NvRegMSIXIrqStatus); 3745 } 3746 pci_push(base); 3747 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events); 3748 if (!(events & NVREG_IRQ_TIMER)) 3749 return IRQ_RETVAL(0); 3750 3751 nv_msi_workaround(np); 3752 3753 spin_lock(&np->lock); 3754 np->intr_test = 1; 3755 spin_unlock(&np->lock); 3756 3757 dprintk(KERN_DEBUG "%s: nv_nic_irq_test completed\n", dev->name); 3758 3759 return IRQ_RETVAL(1); 3760} 3761 3762static void set_msix_vector_map(struct net_device *dev, u32 vector, u32 irqmask) 3763{ 3764 u8 __iomem *base = get_hwbase(dev); 3765 int i; 3766 u32 msixmap = 0; 3767 3768 /* Each interrupt bit can be mapped to a MSIX vector (4 bits). 3769 * MSIXMap0 represents the first 8 interrupts and MSIXMap1 represents 3770 * the remaining 8 interrupts. 3771 */ 3772 for (i = 0; i < 8; i++) { 3773 if ((irqmask >> i) & 0x1) { 3774 msixmap |= vector << (i << 2); 3775 } 3776 } 3777 writel(readl(base + NvRegMSIXMap0) | msixmap, base + NvRegMSIXMap0); 3778 3779 msixmap = 0; 3780 for (i = 0; i < 8; i++) { 3781 if ((irqmask >> (i + 8)) & 0x1) { 3782 msixmap |= vector << (i << 2); 3783 } 3784 } 3785 writel(readl(base + NvRegMSIXMap1) | msixmap, base + NvRegMSIXMap1); 3786} 3787 3788static int nv_request_irq(struct net_device *dev, int intr_test) 3789{ 3790 struct fe_priv *np = get_nvpriv(dev); 3791 u8 __iomem *base = get_hwbase(dev); 3792 int ret = 1; 3793 int i; 3794 irqreturn_t (*handler)(int foo, void *data); 3795 3796 if (intr_test) { 3797 handler = nv_nic_irq_test; 3798 } else { 3799 if (nv_optimized(np)) 3800 handler = nv_nic_irq_optimized; 3801 else 3802 handler = nv_nic_irq; 3803 } 3804 3805 if (np->msi_flags & NV_MSI_X_CAPABLE) { 3806 for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) { 3807 np->msi_x_entry[i].entry = i; 3808 } 3809 if ((ret = pci_enable_msix(np->pci_dev, np->msi_x_entry, (np->msi_flags & NV_MSI_X_VECTORS_MASK))) == 0) { 3810 np->msi_flags |= NV_MSI_X_ENABLED; 3811 if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT && !intr_test) { 3812 /* Request irq for rx handling */ 3813 sprintf(np->name_rx, "%s-rx", dev->name); 3814 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, 3815 nv_nic_irq_rx, IRQF_SHARED, np->name_rx, dev) != 0) { 3816 printk(KERN_INFO "forcedeth: request_irq failed for rx %d\n", ret); 3817 pci_disable_msix(np->pci_dev); 3818 np->msi_flags &= ~NV_MSI_X_ENABLED; 3819 goto out_err; 3820 } 3821 /* Request irq for tx handling */ 3822 sprintf(np->name_tx, "%s-tx", dev->name); 3823 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, 3824 nv_nic_irq_tx, IRQF_SHARED, np->name_tx, dev) != 0) { 3825 printk(KERN_INFO "forcedeth: request_irq failed for tx %d\n", ret); 3826 pci_disable_msix(np->pci_dev); 3827 np->msi_flags &= ~NV_MSI_X_ENABLED; 3828 goto out_free_rx; 3829 } 3830 /* Request irq for link and timer handling */ 3831 sprintf(np->name_other, "%s-other", dev->name); 3832 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector, 3833 nv_nic_irq_other, IRQF_SHARED, np->name_other, dev) != 0) { 3834 printk(KERN_INFO "forcedeth: request_irq failed for link %d\n", ret); 3835 pci_disable_msix(np->pci_dev); 3836 np->msi_flags &= ~NV_MSI_X_ENABLED; 3837 goto out_free_tx; 3838 } 3839 /* map interrupts to their respective vector */ 3840 writel(0, base + NvRegMSIXMap0); 3841 writel(0, base + NvRegMSIXMap1); 3842 set_msix_vector_map(dev, NV_MSI_X_VECTOR_RX, NVREG_IRQ_RX_ALL); 3843 set_msix_vector_map(dev, NV_MSI_X_VECTOR_TX, NVREG_IRQ_TX_ALL); 3844 set_msix_vector_map(dev, NV_MSI_X_VECTOR_OTHER, NVREG_IRQ_OTHER); 3845 } else { 3846 /* Request irq for all interrupts */ 3847 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, handler, IRQF_SHARED, dev->name, dev) != 0) { 3848 printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret); 3849 pci_disable_msix(np->pci_dev); 3850 np->msi_flags &= ~NV_MSI_X_ENABLED; 3851 goto out_err; 3852 } 3853 3854 /* map interrupts to vector 0 */ 3855 writel(0, base + NvRegMSIXMap0); 3856 writel(0, base + NvRegMSIXMap1); 3857 } 3858 } 3859 } 3860 if (ret != 0 && np->msi_flags & NV_MSI_CAPABLE) { 3861 if ((ret = pci_enable_msi(np->pci_dev)) == 0) { 3862 np->msi_flags |= NV_MSI_ENABLED; 3863 dev->irq = np->pci_dev->irq; 3864 if (request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev) != 0) { 3865 printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret); 3866 pci_disable_msi(np->pci_dev); 3867 np->msi_flags &= ~NV_MSI_ENABLED; 3868 dev->irq = np->pci_dev->irq; 3869 goto out_err; 3870 } 3871 3872 /* map interrupts to vector 0 */ 3873 writel(0, base + NvRegMSIMap0); 3874 writel(0, base + NvRegMSIMap1); 3875 /* enable msi vector 0 */ 3876 writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask); 3877 } 3878 } 3879 if (ret != 0) { 3880 if (request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev) != 0) 3881 goto out_err; 3882 3883 } 3884 3885 return 0; 3886out_free_tx: 3887 free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, dev); 3888out_free_rx: 3889 free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, dev); 3890out_err: 3891 return 1; 3892} 3893 3894static void nv_free_irq(struct net_device *dev) 3895{ 3896 struct fe_priv *np = get_nvpriv(dev); 3897 int i; 3898 3899 if (np->msi_flags & NV_MSI_X_ENABLED) { 3900 for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) { 3901 free_irq(np->msi_x_entry[i].vector, dev); 3902 } 3903 pci_disable_msix(np->pci_dev); 3904 np->msi_flags &= ~NV_MSI_X_ENABLED; 3905 } else { 3906 free_irq(np->pci_dev->irq, dev); 3907 if (np->msi_flags & NV_MSI_ENABLED) { 3908 pci_disable_msi(np->pci_dev); 3909 np->msi_flags &= ~NV_MSI_ENABLED; 3910 } 3911 } 3912} 3913 3914static void nv_do_nic_poll(unsigned long data) 3915{ 3916 struct net_device *dev = (struct net_device *) data; 3917 struct fe_priv *np = netdev_priv(dev); 3918 u8 __iomem *base = get_hwbase(dev); 3919 u32 mask = 0; 3920 3921 /* 3922 * First disable irq(s) and then 3923 * reenable interrupts on the nic, we have to do this before calling 3924 * nv_nic_irq because that may decide to do otherwise 3925 */ 3926 3927 if (!using_multi_irqs(dev)) { 3928 if (np->msi_flags & NV_MSI_X_ENABLED) 3929 disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); 3930 else 3931 disable_irq_lockdep(np->pci_dev->irq); 3932 mask = np->irqmask; 3933 } else { 3934 if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) { 3935 disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); 3936 mask |= NVREG_IRQ_RX_ALL; 3937 } 3938 if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) { 3939 disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); 3940 mask |= NVREG_IRQ_TX_ALL; 3941 } 3942 if (np->nic_poll_irq & NVREG_IRQ_OTHER) { 3943 disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector); 3944 mask |= NVREG_IRQ_OTHER; 3945 } 3946 } 3947 /* disable_irq() contains synchronize_irq, thus no irq handler can run now */ 3948 3949 if (np->recover_error) { 3950 np->recover_error = 0; 3951 printk(KERN_INFO "%s: MAC in recoverable error state\n", dev->name); 3952 if (netif_running(dev)) { 3953 netif_tx_lock_bh(dev); 3954 netif_addr_lock(dev); 3955 spin_lock(&np->lock); 3956 /* stop engines */ 3957 nv_stop_rxtx(dev); 3958 if (np->driver_data & DEV_HAS_POWER_CNTRL) 3959 nv_mac_reset(dev); 3960 nv_txrx_reset(dev); 3961 /* drain rx queue */ 3962 nv_drain_rxtx(dev); 3963 /* reinit driver view of the rx queue */ 3964 set_bufsize(dev); 3965 if (nv_init_ring(dev)) { 3966 if (!np->in_shutdown) 3967 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 3968 } 3969 /* reinit nic view of the rx queue */ 3970 writel(np->rx_buf_sz, base + NvRegOffloadConfig); 3971 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); 3972 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), 3973 base + NvRegRingSizes); 3974 pci_push(base); 3975 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 3976 pci_push(base); 3977 /* clear interrupts */ 3978 if (!(np->msi_flags & NV_MSI_X_ENABLED)) 3979 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); 3980 else 3981 writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus); 3982 3983 /* restart rx engine */ 3984 nv_start_rxtx(dev); 3985 spin_unlock(&np->lock); 3986 netif_addr_unlock(dev); 3987 netif_tx_unlock_bh(dev); 3988 } 3989 } 3990 3991 writel(mask, base + NvRegIrqMask); 3992 pci_push(base); 3993 3994 if (!using_multi_irqs(dev)) { 3995 np->nic_poll_irq = 0; 3996 if (nv_optimized(np)) 3997 nv_nic_irq_optimized(0, dev); 3998 else 3999 nv_nic_irq(0, dev); 4000 if (np->msi_flags & NV_MSI_X_ENABLED) 4001 enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); 4002 else 4003 enable_irq_lockdep(np->pci_dev->irq); 4004 } else { 4005 if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) { 4006 np->nic_poll_irq &= ~NVREG_IRQ_RX_ALL; 4007 nv_nic_irq_rx(0, dev); 4008 enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); 4009 } 4010 if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) { 4011 np->nic_poll_irq &= ~NVREG_IRQ_TX_ALL; 4012 nv_nic_irq_tx(0, dev); 4013 enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); 4014 } 4015 if (np->nic_poll_irq & NVREG_IRQ_OTHER) { 4016 np->nic_poll_irq &= ~NVREG_IRQ_OTHER; 4017 nv_nic_irq_other(0, dev); 4018 enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector); 4019 } 4020 } 4021 4022} 4023 4024#ifdef CONFIG_NET_POLL_CONTROLLER 4025static void nv_poll_controller(struct net_device *dev) 4026{ 4027 nv_do_nic_poll((unsigned long) dev); 4028} 4029#endif 4030 4031static void nv_do_stats_poll(unsigned long data) 4032{ 4033 struct net_device *dev = (struct net_device *) data; 4034 struct fe_priv *np = netdev_priv(dev); 4035 4036 nv_get_hw_stats(dev); 4037 4038 if (!np->in_shutdown) 4039 mod_timer(&np->stats_poll, 4040 round_jiffies(jiffies + STATS_INTERVAL)); 4041} 4042 4043static void nv_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) 4044{ 4045 struct fe_priv *np = netdev_priv(dev); 4046 strcpy(info->driver, DRV_NAME); 4047 strcpy(info->version, FORCEDETH_VERSION); 4048 strcpy(info->bus_info, pci_name(np->pci_dev)); 4049} 4050 4051static void nv_get_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo) 4052{ 4053 struct fe_priv *np = netdev_priv(dev); 4054 wolinfo->supported = WAKE_MAGIC; 4055 4056 spin_lock_irq(&np->lock); 4057 if (np->wolenabled) 4058 wolinfo->wolopts = WAKE_MAGIC; 4059 spin_unlock_irq(&np->lock); 4060} 4061 4062static int nv_set_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo) 4063{ 4064 struct fe_priv *np = netdev_priv(dev); 4065 u8 __iomem *base = get_hwbase(dev); 4066 u32 flags = 0; 4067 4068 if (wolinfo->wolopts == 0) { 4069 np->wolenabled = 0; 4070 } else if (wolinfo->wolopts & WAKE_MAGIC) { 4071 np->wolenabled = 1; 4072 flags = NVREG_WAKEUPFLAGS_ENABLE; 4073 } 4074 if (netif_running(dev)) { 4075 spin_lock_irq(&np->lock); 4076 writel(flags, base + NvRegWakeUpFlags); 4077 spin_unlock_irq(&np->lock); 4078 } 4079 return 0; 4080} 4081 4082static int nv_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) 4083{ 4084 struct fe_priv *np = netdev_priv(dev); 4085 int adv; 4086 4087 spin_lock_irq(&np->lock); 4088 ecmd->port = PORT_MII; 4089 if (!netif_running(dev)) { 4090 /* We do not track link speed / duplex setting if the 4091 * interface is disabled. Force a link check */ 4092 if (nv_update_linkspeed(dev)) { 4093 if (!netif_carrier_ok(dev)) 4094 netif_carrier_on(dev); 4095 } else { 4096 if (netif_carrier_ok(dev)) 4097 netif_carrier_off(dev); 4098 } 4099 } 4100 4101 if (netif_carrier_ok(dev)) { 4102 switch(np->linkspeed & (NVREG_LINKSPEED_MASK)) { 4103 case NVREG_LINKSPEED_10: 4104 ecmd->speed = SPEED_10; 4105 break; 4106 case NVREG_LINKSPEED_100: 4107 ecmd->speed = SPEED_100; 4108 break; 4109 case NVREG_LINKSPEED_1000: 4110 ecmd->speed = SPEED_1000; 4111 break; 4112 } 4113 ecmd->duplex = DUPLEX_HALF; 4114 if (np->duplex) 4115 ecmd->duplex = DUPLEX_FULL; 4116 } else { 4117 ecmd->speed = -1; 4118 ecmd->duplex = -1; 4119 } 4120 4121 ecmd->autoneg = np->autoneg; 4122 4123 ecmd->advertising = ADVERTISED_MII; 4124 if (np->autoneg) { 4125 ecmd->advertising |= ADVERTISED_Autoneg; 4126 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); 4127 if (adv & ADVERTISE_10HALF) 4128 ecmd->advertising |= ADVERTISED_10baseT_Half; 4129 if (adv & ADVERTISE_10FULL) 4130 ecmd->advertising |= ADVERTISED_10baseT_Full; 4131 if (adv & ADVERTISE_100HALF) 4132 ecmd->advertising |= ADVERTISED_100baseT_Half; 4133 if (adv & ADVERTISE_100FULL) 4134 ecmd->advertising |= ADVERTISED_100baseT_Full; 4135 if (np->gigabit == PHY_GIGABIT) { 4136 adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ); 4137 if (adv & ADVERTISE_1000FULL) 4138 ecmd->advertising |= ADVERTISED_1000baseT_Full; 4139 } 4140 } 4141 ecmd->supported = (SUPPORTED_Autoneg | 4142 SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | 4143 SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | 4144 SUPPORTED_MII); 4145 if (np->gigabit == PHY_GIGABIT) 4146 ecmd->supported |= SUPPORTED_1000baseT_Full; 4147 4148 ecmd->phy_address = np->phyaddr; 4149 ecmd->transceiver = XCVR_EXTERNAL; 4150 4151 /* ignore maxtxpkt, maxrxpkt for now */ 4152 spin_unlock_irq(&np->lock); 4153 return 0; 4154} 4155 4156static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) 4157{ 4158 struct fe_priv *np = netdev_priv(dev); 4159 4160 if (ecmd->port != PORT_MII) 4161 return -EINVAL; 4162 if (ecmd->transceiver != XCVR_EXTERNAL) 4163 return -EINVAL; 4164 if (ecmd->phy_address != np->phyaddr) { 4165 /* TODO: support switching between multiple phys. Should be 4166 * trivial, but not enabled due to lack of test hardware. */ 4167 return -EINVAL; 4168 } 4169 if (ecmd->autoneg == AUTONEG_ENABLE) { 4170 u32 mask; 4171 4172 mask = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | 4173 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full; 4174 if (np->gigabit == PHY_GIGABIT) 4175 mask |= ADVERTISED_1000baseT_Full; 4176 4177 if ((ecmd->advertising & mask) == 0) 4178 return -EINVAL; 4179 4180 } else if (ecmd->autoneg == AUTONEG_DISABLE) { 4181 /* Note: autonegotiation disable, speed 1000 intentionally 4182 * forbidden - noone should need that. */ 4183 4184 if (ecmd->speed != SPEED_10 && ecmd->speed != SPEED_100) 4185 return -EINVAL; 4186 if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL) 4187 return -EINVAL; 4188 } else { 4189 return -EINVAL; 4190 } 4191 4192 netif_carrier_off(dev); 4193 if (netif_running(dev)) { 4194 unsigned long flags; 4195 4196 nv_disable_irq(dev); 4197 netif_tx_lock_bh(dev); 4198 netif_addr_lock(dev); 4199 /* with plain spinlock lockdep complains */ 4200 spin_lock_irqsave(&np->lock, flags); 4201 /* stop engines */ 4202 nv_stop_rxtx(dev); 4203 spin_unlock_irqrestore(&np->lock, flags); 4204 netif_addr_unlock(dev); 4205 netif_tx_unlock_bh(dev); 4206 } 4207 4208 if (ecmd->autoneg == AUTONEG_ENABLE) { 4209 int adv, bmcr; 4210 4211 np->autoneg = 1; 4212 4213 /* advertise only what has been requested */ 4214 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); 4215 adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); 4216 if (ecmd->advertising & ADVERTISED_10baseT_Half) 4217 adv |= ADVERTISE_10HALF; 4218 if (ecmd->advertising & ADVERTISED_10baseT_Full) 4219 adv |= ADVERTISE_10FULL; 4220 if (ecmd->advertising & ADVERTISED_100baseT_Half) 4221 adv |= ADVERTISE_100HALF; 4222 if (ecmd->advertising & ADVERTISED_100baseT_Full) 4223 adv |= ADVERTISE_100FULL; 4224 if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) /* for rx we set both advertisments but disable tx pause */ 4225 adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; 4226 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) 4227 adv |= ADVERTISE_PAUSE_ASYM; 4228 mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv); 4229 4230 if (np->gigabit == PHY_GIGABIT) { 4231 adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ); 4232 adv &= ~ADVERTISE_1000FULL; 4233 if (ecmd->advertising & ADVERTISED_1000baseT_Full) 4234 adv |= ADVERTISE_1000FULL; 4235 mii_rw(dev, np->phyaddr, MII_CTRL1000, adv); 4236 } 4237 4238 if (netif_running(dev)) 4239 printk(KERN_INFO "%s: link down.\n", dev->name); 4240 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); 4241 if (np->phy_model == PHY_MODEL_MARVELL_E3016) { 4242 bmcr |= BMCR_ANENABLE; 4243 /* reset the phy in order for settings to stick, 4244 * and cause autoneg to start */ 4245 if (phy_reset(dev, bmcr)) { 4246 printk(KERN_INFO "%s: phy reset failed\n", dev->name); 4247 return -EINVAL; 4248 } 4249 } else { 4250 bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART); 4251 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr); 4252 } 4253 } else { 4254 int adv, bmcr; 4255 4256 np->autoneg = 0; 4257 4258 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); 4259 adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); 4260 if (ecmd->speed == SPEED_10 && ecmd->duplex == DUPLEX_HALF) 4261 adv |= ADVERTISE_10HALF; 4262 if (ecmd->speed == SPEED_10 && ecmd->duplex == DUPLEX_FULL) 4263 adv |= ADVERTISE_10FULL; 4264 if (ecmd->speed == SPEED_100 && ecmd->duplex == DUPLEX_HALF) 4265 adv |= ADVERTISE_100HALF; 4266 if (ecmd->speed == SPEED_100 && ecmd->duplex == DUPLEX_FULL) 4267 adv |= ADVERTISE_100FULL; 4268 np->pause_flags &= ~(NV_PAUSEFRAME_AUTONEG|NV_PAUSEFRAME_RX_ENABLE|NV_PAUSEFRAME_TX_ENABLE); 4269 if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) {/* for rx we set both advertisments but disable tx pause */ 4270 adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; 4271 np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE; 4272 } 4273 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) { 4274 adv |= ADVERTISE_PAUSE_ASYM; 4275 np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE; 4276 } 4277 mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv); 4278 np->fixed_mode = adv; 4279 4280 if (np->gigabit == PHY_GIGABIT) { 4281 adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ); 4282 adv &= ~ADVERTISE_1000FULL; 4283 mii_rw(dev, np->phyaddr, MII_CTRL1000, adv); 4284 } 4285 4286 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); 4287 bmcr &= ~(BMCR_ANENABLE|BMCR_SPEED100|BMCR_SPEED1000|BMCR_FULLDPLX); 4288 if (np->fixed_mode & (ADVERTISE_10FULL|ADVERTISE_100FULL)) 4289 bmcr |= BMCR_FULLDPLX; 4290 if (np->fixed_mode & (ADVERTISE_100HALF|ADVERTISE_100FULL)) 4291 bmcr |= BMCR_SPEED100; 4292 if (np->phy_oui == PHY_OUI_MARVELL) { 4293 /* reset the phy in order for forced mode settings to stick */ 4294 if (phy_reset(dev, bmcr)) { 4295 printk(KERN_INFO "%s: phy reset failed\n", dev->name); 4296 return -EINVAL; 4297 } 4298 } else { 4299 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr); 4300 if (netif_running(dev)) { 4301 /* Wait a bit and then reconfigure the nic. */ 4302 udelay(10); 4303 nv_linkchange(dev); 4304 } 4305 } 4306 } 4307 4308 if (netif_running(dev)) { 4309 nv_start_rxtx(dev); 4310 nv_enable_irq(dev); 4311 } 4312 4313 return 0; 4314} 4315 4316#define FORCEDETH_REGS_VER 1 4317 4318static int nv_get_regs_len(struct net_device *dev) 4319{ 4320 struct fe_priv *np = netdev_priv(dev); 4321 return np->register_size; 4322} 4323 4324static void nv_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *buf) 4325{ 4326 struct fe_priv *np = netdev_priv(dev); 4327 u8 __iomem *base = get_hwbase(dev); 4328 u32 *rbuf = buf; 4329 int i; 4330 4331 regs->version = FORCEDETH_REGS_VER; 4332 spin_lock_irq(&np->lock); 4333 for (i = 0;i <= np->register_size/sizeof(u32); i++) 4334 rbuf[i] = readl(base + i*sizeof(u32)); 4335 spin_unlock_irq(&np->lock); 4336} 4337 4338static int nv_nway_reset(struct net_device *dev) 4339{ 4340 struct fe_priv *np = netdev_priv(dev); 4341 int ret; 4342 4343 if (np->autoneg) { 4344 int bmcr; 4345 4346 netif_carrier_off(dev); 4347 if (netif_running(dev)) { 4348 nv_disable_irq(dev); 4349 netif_tx_lock_bh(dev); 4350 netif_addr_lock(dev); 4351 spin_lock(&np->lock); 4352 /* stop engines */ 4353 nv_stop_rxtx(dev); 4354 spin_unlock(&np->lock); 4355 netif_addr_unlock(dev); 4356 netif_tx_unlock_bh(dev); 4357 printk(KERN_INFO "%s: link down.\n", dev->name); 4358 } 4359 4360 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); 4361 if (np->phy_model == PHY_MODEL_MARVELL_E3016) { 4362 bmcr |= BMCR_ANENABLE; 4363 /* reset the phy in order for settings to stick*/ 4364 if (phy_reset(dev, bmcr)) { 4365 printk(KERN_INFO "%s: phy reset failed\n", dev->name); 4366 return -EINVAL; 4367 } 4368 } else { 4369 bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART); 4370 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr); 4371 } 4372 4373 if (netif_running(dev)) { 4374 nv_start_rxtx(dev); 4375 nv_enable_irq(dev); 4376 } 4377 ret = 0; 4378 } else { 4379 ret = -EINVAL; 4380 } 4381 4382 return ret; 4383} 4384 4385static int nv_set_tso(struct net_device *dev, u32 value) 4386{ 4387 struct fe_priv *np = netdev_priv(dev); 4388 4389 if ((np->driver_data & DEV_HAS_CHECKSUM)) 4390 return ethtool_op_set_tso(dev, value); 4391 else 4392 return -EOPNOTSUPP; 4393} 4394 4395static void nv_get_ringparam(struct net_device *dev, struct ethtool_ringparam* ring) 4396{ 4397 struct fe_priv *np = netdev_priv(dev); 4398 4399 ring->rx_max_pending = (np->desc_ver == DESC_VER_1) ? RING_MAX_DESC_VER_1 : RING_MAX_DESC_VER_2_3; 4400 ring->rx_mini_max_pending = 0; 4401 ring->rx_jumbo_max_pending = 0; 4402 ring->tx_max_pending = (np->desc_ver == DESC_VER_1) ? RING_MAX_DESC_VER_1 : RING_MAX_DESC_VER_2_3; 4403 4404 ring->rx_pending = np->rx_ring_size; 4405 ring->rx_mini_pending = 0; 4406 ring->rx_jumbo_pending = 0; 4407 ring->tx_pending = np->tx_ring_size; 4408} 4409 4410static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ring) 4411{ 4412 struct fe_priv *np = netdev_priv(dev); 4413 u8 __iomem *base = get_hwbase(dev); 4414 u8 *rxtx_ring, *rx_skbuff, *tx_skbuff; 4415 dma_addr_t ring_addr; 4416 4417 if (ring->rx_pending < RX_RING_MIN || 4418 ring->tx_pending < TX_RING_MIN || 4419 ring->rx_mini_pending != 0 || 4420 ring->rx_jumbo_pending != 0 || 4421 (np->desc_ver == DESC_VER_1 && 4422 (ring->rx_pending > RING_MAX_DESC_VER_1 || 4423 ring->tx_pending > RING_MAX_DESC_VER_1)) || 4424 (np->desc_ver != DESC_VER_1 && 4425 (ring->rx_pending > RING_MAX_DESC_VER_2_3 || 4426 ring->tx_pending > RING_MAX_DESC_VER_2_3))) { 4427 return -EINVAL; 4428 } 4429 4430 /* allocate new rings */ 4431 if (!nv_optimized(np)) { 4432 rxtx_ring = pci_alloc_consistent(np->pci_dev, 4433 sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending), 4434 &ring_addr); 4435 } else { 4436 rxtx_ring = pci_alloc_consistent(np->pci_dev, 4437 sizeof(struct ring_desc_ex) * (ring->rx_pending + ring->tx_pending), 4438 &ring_addr); 4439 } 4440 rx_skbuff = kmalloc(sizeof(struct nv_skb_map) * ring->rx_pending, GFP_KERNEL); 4441 tx_skbuff = kmalloc(sizeof(struct nv_skb_map) * ring->tx_pending, GFP_KERNEL); 4442 if (!rxtx_ring || !rx_skbuff || !tx_skbuff) { 4443 /* fall back to old rings */ 4444 if (!nv_optimized(np)) { 4445 if (rxtx_ring) 4446 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending), 4447 rxtx_ring, ring_addr); 4448 } else { 4449 if (rxtx_ring) 4450 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (ring->rx_pending + ring->tx_pending), 4451 rxtx_ring, ring_addr); 4452 } 4453 if (rx_skbuff) 4454 kfree(rx_skbuff); 4455 if (tx_skbuff) 4456 kfree(tx_skbuff); 4457 goto exit; 4458 } 4459 4460 if (netif_running(dev)) { 4461 nv_disable_irq(dev); 4462 nv_napi_disable(dev); 4463 netif_tx_lock_bh(dev); 4464 netif_addr_lock(dev); 4465 spin_lock(&np->lock); 4466 /* stop engines */ 4467 nv_stop_rxtx(dev); 4468 nv_txrx_reset(dev); 4469 /* drain queues */ 4470 nv_drain_rxtx(dev); 4471 /* delete queues */ 4472 free_rings(dev); 4473 } 4474 4475 /* set new values */ 4476 np->rx_ring_size = ring->rx_pending; 4477 np->tx_ring_size = ring->tx_pending; 4478 4479 if (!nv_optimized(np)) { 4480 np->rx_ring.orig = (struct ring_desc*)rxtx_ring; 4481 np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size]; 4482 } else { 4483 np->rx_ring.ex = (struct ring_desc_ex*)rxtx_ring; 4484 np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size]; 4485 } 4486 np->rx_skb = (struct nv_skb_map*)rx_skbuff; 4487 np->tx_skb = (struct nv_skb_map*)tx_skbuff; 4488 np->ring_addr = ring_addr; 4489 4490 memset(np->rx_skb, 0, sizeof(struct nv_skb_map) * np->rx_ring_size); 4491 memset(np->tx_skb, 0, sizeof(struct nv_skb_map) * np->tx_ring_size); 4492 4493 if (netif_running(dev)) { 4494 /* reinit driver view of the queues */ 4495 set_bufsize(dev); 4496 if (nv_init_ring(dev)) { 4497 if (!np->in_shutdown) 4498 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 4499 } 4500 4501 /* reinit nic view of the queues */ 4502 writel(np->rx_buf_sz, base + NvRegOffloadConfig); 4503 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); 4504 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), 4505 base + NvRegRingSizes); 4506 pci_push(base); 4507 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 4508 pci_push(base); 4509 4510 /* restart engines */ 4511 nv_start_rxtx(dev); 4512 spin_unlock(&np->lock); 4513 netif_addr_unlock(dev); 4514 netif_tx_unlock_bh(dev); 4515 nv_napi_enable(dev); 4516 nv_enable_irq(dev); 4517 } 4518 return 0; 4519exit: 4520 return -ENOMEM; 4521} 4522 4523static void nv_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam* pause) 4524{ 4525 struct fe_priv *np = netdev_priv(dev); 4526 4527 pause->autoneg = (np->pause_flags & NV_PAUSEFRAME_AUTONEG) != 0; 4528 pause->rx_pause = (np->pause_flags & NV_PAUSEFRAME_RX_ENABLE) != 0; 4529 pause->tx_pause = (np->pause_flags & NV_PAUSEFRAME_TX_ENABLE) != 0; 4530} 4531 4532static int nv_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam* pause) 4533{ 4534 struct fe_priv *np = netdev_priv(dev); 4535 int adv, bmcr; 4536 4537 if ((!np->autoneg && np->duplex == 0) || 4538 (np->autoneg && !pause->autoneg && np->duplex == 0)) { 4539 printk(KERN_INFO "%s: can not set pause settings when forced link is in half duplex.\n", 4540 dev->name); 4541 return -EINVAL; 4542 } 4543 if (pause->tx_pause && !(np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE)) { 4544 printk(KERN_INFO "%s: hardware does not support tx pause frames.\n", dev->name); 4545 return -EINVAL; 4546 } 4547 4548 netif_carrier_off(dev); 4549 if (netif_running(dev)) { 4550 nv_disable_irq(dev); 4551 netif_tx_lock_bh(dev); 4552 netif_addr_lock(dev); 4553 spin_lock(&np->lock); 4554 /* stop engines */ 4555 nv_stop_rxtx(dev); 4556 spin_unlock(&np->lock); 4557 netif_addr_unlock(dev); 4558 netif_tx_unlock_bh(dev); 4559 } 4560 4561 np->pause_flags &= ~(NV_PAUSEFRAME_RX_REQ|NV_PAUSEFRAME_TX_REQ); 4562 if (pause->rx_pause) 4563 np->pause_flags |= NV_PAUSEFRAME_RX_REQ; 4564 if (pause->tx_pause) 4565 np->pause_flags |= NV_PAUSEFRAME_TX_REQ; 4566 4567 if (np->autoneg && pause->autoneg) { 4568 np->pause_flags |= NV_PAUSEFRAME_AUTONEG; 4569 4570 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); 4571 adv &= ~(ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); 4572 if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) /* for rx we set both advertisments but disable tx pause */ 4573 adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; 4574 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) 4575 adv |= ADVERTISE_PAUSE_ASYM; 4576 mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv); 4577 4578 if (netif_running(dev)) 4579 printk(KERN_INFO "%s: link down.\n", dev->name); 4580 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); 4581 bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART); 4582 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr); 4583 } else { 4584 np->pause_flags &= ~(NV_PAUSEFRAME_AUTONEG|NV_PAUSEFRAME_RX_ENABLE|NV_PAUSEFRAME_TX_ENABLE); 4585 if (pause->rx_pause) 4586 np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE; 4587 if (pause->tx_pause) 4588 np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE; 4589 4590 if (!netif_running(dev)) 4591 nv_update_linkspeed(dev); 4592 else 4593 nv_update_pause(dev, np->pause_flags); 4594 } 4595 4596 if (netif_running(dev)) { 4597 nv_start_rxtx(dev); 4598 nv_enable_irq(dev); 4599 } 4600 return 0; 4601} 4602 4603static u32 nv_get_rx_csum(struct net_device *dev) 4604{ 4605 struct fe_priv *np = netdev_priv(dev); 4606 return (np->rx_csum) != 0; 4607} 4608 4609static int nv_set_rx_csum(struct net_device *dev, u32 data) 4610{ 4611 struct fe_priv *np = netdev_priv(dev); 4612 u8 __iomem *base = get_hwbase(dev); 4613 int retcode = 0; 4614 4615 if (np->driver_data & DEV_HAS_CHECKSUM) { 4616 if (data) { 4617 np->rx_csum = 1; 4618 np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK; 4619 } else { 4620 np->rx_csum = 0; 4621 /* vlan is dependent on rx checksum offload */ 4622 if (!(np->vlanctl_bits & NVREG_VLANCONTROL_ENABLE)) 4623 np->txrxctl_bits &= ~NVREG_TXRXCTL_RXCHECK; 4624 } 4625 if (netif_running(dev)) { 4626 spin_lock_irq(&np->lock); 4627 writel(np->txrxctl_bits, base + NvRegTxRxControl); 4628 spin_unlock_irq(&np->lock); 4629 } 4630 } else { 4631 return -EINVAL; 4632 } 4633 4634 return retcode; 4635} 4636 4637static int nv_set_tx_csum(struct net_device *dev, u32 data) 4638{ 4639 struct fe_priv *np = netdev_priv(dev); 4640 4641 if (np->driver_data & DEV_HAS_CHECKSUM) 4642 return ethtool_op_set_tx_csum(dev, data); 4643 else 4644 return -EOPNOTSUPP; 4645} 4646 4647static int nv_set_sg(struct net_device *dev, u32 data) 4648{ 4649 struct fe_priv *np = netdev_priv(dev); 4650 4651 if (np->driver_data & DEV_HAS_CHECKSUM) 4652 return ethtool_op_set_sg(dev, data); 4653 else 4654 return -EOPNOTSUPP; 4655} 4656 4657static int nv_get_sset_count(struct net_device *dev, int sset) 4658{ 4659 struct fe_priv *np = netdev_priv(dev); 4660 4661 switch (sset) { 4662 case ETH_SS_TEST: 4663 if (np->driver_data & DEV_HAS_TEST_EXTENDED) 4664 return NV_TEST_COUNT_EXTENDED; 4665 else 4666 return NV_TEST_COUNT_BASE; 4667 case ETH_SS_STATS: 4668 if (np->driver_data & DEV_HAS_STATISTICS_V3) 4669 return NV_DEV_STATISTICS_V3_COUNT; 4670 else if (np->driver_data & DEV_HAS_STATISTICS_V2) 4671 return NV_DEV_STATISTICS_V2_COUNT; 4672 else if (np->driver_data & DEV_HAS_STATISTICS_V1) 4673 return NV_DEV_STATISTICS_V1_COUNT; 4674 else 4675 return 0; 4676 default: 4677 return -EOPNOTSUPP; 4678 } 4679} 4680 4681static void nv_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *estats, u64 *buffer) 4682{ 4683 struct fe_priv *np = netdev_priv(dev); 4684 4685 /* update stats */ 4686 nv_do_stats_poll((unsigned long)dev); 4687 4688 memcpy(buffer, &np->estats, nv_get_sset_count(dev, ETH_SS_STATS)*sizeof(u64)); 4689} 4690 4691static int nv_link_test(struct net_device *dev) 4692{ 4693 struct fe_priv *np = netdev_priv(dev); 4694 int mii_status; 4695 4696 mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); 4697 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); 4698 4699 /* check phy link status */ 4700 if (!(mii_status & BMSR_LSTATUS)) 4701 return 0; 4702 else 4703 return 1; 4704} 4705 4706static int nv_register_test(struct net_device *dev) 4707{ 4708 u8 __iomem *base = get_hwbase(dev); 4709 int i = 0; 4710 u32 orig_read, new_read; 4711 4712 do { 4713 orig_read = readl(base + nv_registers_test[i].reg); 4714 4715 /* xor with mask to toggle bits */ 4716 orig_read ^= nv_registers_test[i].mask; 4717 4718 writel(orig_read, base + nv_registers_test[i].reg); 4719 4720 new_read = readl(base + nv_registers_test[i].reg); 4721 4722 if ((new_read & nv_registers_test[i].mask) != (orig_read & nv_registers_test[i].mask)) 4723 return 0; 4724 4725 /* restore original value */ 4726 orig_read ^= nv_registers_test[i].mask; 4727 writel(orig_read, base + nv_registers_test[i].reg); 4728 4729 } while (nv_registers_test[++i].reg != 0); 4730 4731 return 1; 4732} 4733 4734static int nv_interrupt_test(struct net_device *dev) 4735{ 4736 struct fe_priv *np = netdev_priv(dev); 4737 u8 __iomem *base = get_hwbase(dev); 4738 int ret = 1; 4739 int testcnt; 4740 u32 save_msi_flags, save_poll_interval = 0; 4741 4742 if (netif_running(dev)) { 4743 /* free current irq */ 4744 nv_free_irq(dev); 4745 save_poll_interval = readl(base+NvRegPollingInterval); 4746 } 4747 4748 /* flag to test interrupt handler */ 4749 np->intr_test = 0; 4750 4751 /* setup test irq */ 4752 save_msi_flags = np->msi_flags; 4753 np->msi_flags &= ~NV_MSI_X_VECTORS_MASK; 4754 np->msi_flags |= 0x001; /* setup 1 vector */ 4755 if (nv_request_irq(dev, 1)) 4756 return 0; 4757 4758 /* setup timer interrupt */ 4759 writel(NVREG_POLL_DEFAULT_CPU, base + NvRegPollingInterval); 4760 writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6); 4761 4762 nv_enable_hw_interrupts(dev, NVREG_IRQ_TIMER); 4763 4764 /* wait for at least one interrupt */ 4765 msleep(100); 4766 4767 spin_lock_irq(&np->lock); 4768 4769 /* flag should be set within ISR */ 4770 testcnt = np->intr_test; 4771 if (!testcnt) 4772 ret = 2; 4773 4774 nv_disable_hw_interrupts(dev, NVREG_IRQ_TIMER); 4775 if (!(np->msi_flags & NV_MSI_X_ENABLED)) 4776 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); 4777 else 4778 writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus); 4779 4780 spin_unlock_irq(&np->lock); 4781 4782 nv_free_irq(dev); 4783 4784 np->msi_flags = save_msi_flags; 4785 4786 if (netif_running(dev)) { 4787 writel(save_poll_interval, base + NvRegPollingInterval); 4788 writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6); 4789 /* restore original irq */ 4790 if (nv_request_irq(dev, 0)) 4791 return 0; 4792 } 4793 4794 return ret; 4795} 4796 4797static int nv_loopback_test(struct net_device *dev) 4798{ 4799 struct fe_priv *np = netdev_priv(dev); 4800 u8 __iomem *base = get_hwbase(dev); 4801 struct sk_buff *tx_skb, *rx_skb; 4802 dma_addr_t test_dma_addr; 4803 u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET); 4804 u32 flags; 4805 int len, i, pkt_len; 4806 u8 *pkt_data; 4807 u32 filter_flags = 0; 4808 u32 misc1_flags = 0; 4809 int ret = 1; 4810 4811 if (netif_running(dev)) { 4812 nv_disable_irq(dev); 4813 filter_flags = readl(base + NvRegPacketFilterFlags); 4814 misc1_flags = readl(base + NvRegMisc1); 4815 } else { 4816 nv_txrx_reset(dev); 4817 } 4818 4819 /* reinit driver view of the rx queue */ 4820 set_bufsize(dev); 4821 nv_init_ring(dev); 4822 4823 /* setup hardware for loopback */ 4824 writel(NVREG_MISC1_FORCE, base + NvRegMisc1); 4825 writel(NVREG_PFF_ALWAYS | NVREG_PFF_LOOPBACK, base + NvRegPacketFilterFlags); 4826 4827 /* reinit nic view of the rx queue */ 4828 writel(np->rx_buf_sz, base + NvRegOffloadConfig); 4829 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); 4830 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), 4831 base + NvRegRingSizes); 4832 pci_push(base); 4833 4834 /* restart rx engine */ 4835 nv_start_rxtx(dev); 4836 4837 /* setup packet for tx */ 4838 pkt_len = ETH_DATA_LEN; 4839 tx_skb = dev_alloc_skb(pkt_len); 4840 if (!tx_skb) { 4841 printk(KERN_ERR "dev_alloc_skb() failed during loopback test" 4842 " of %s\n", dev->name); 4843 ret = 0; 4844 goto out; 4845 } 4846 test_dma_addr = pci_map_single(np->pci_dev, tx_skb->data, 4847 skb_tailroom(tx_skb), 4848 PCI_DMA_FROMDEVICE); 4849 pkt_data = skb_put(tx_skb, pkt_len); 4850 for (i = 0; i < pkt_len; i++) 4851 pkt_data[i] = (u8)(i & 0xff); 4852 4853 if (!nv_optimized(np)) { 4854 np->tx_ring.orig[0].buf = cpu_to_le32(test_dma_addr); 4855 np->tx_ring.orig[0].flaglen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra); 4856 } else { 4857 np->tx_ring.ex[0].bufhigh = cpu_to_le32(dma_high(test_dma_addr)); 4858 np->tx_ring.ex[0].buflow = cpu_to_le32(dma_low(test_dma_addr)); 4859 np->tx_ring.ex[0].flaglen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra); 4860 } 4861 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 4862 pci_push(get_hwbase(dev)); 4863 4864 msleep(500); 4865 4866 /* check for rx of the packet */ 4867 if (!nv_optimized(np)) { 4868 flags = le32_to_cpu(np->rx_ring.orig[0].flaglen); 4869 len = nv_descr_getlength(&np->rx_ring.orig[0], np->desc_ver); 4870 4871 } else { 4872 flags = le32_to_cpu(np->rx_ring.ex[0].flaglen); 4873 len = nv_descr_getlength_ex(&np->rx_ring.ex[0], np->desc_ver); 4874 } 4875 4876 if (flags & NV_RX_AVAIL) { 4877 ret = 0; 4878 } else if (np->desc_ver == DESC_VER_1) { 4879 if (flags & NV_RX_ERROR) 4880 ret = 0; 4881 } else { 4882 if (flags & NV_RX2_ERROR) { 4883 ret = 0; 4884 } 4885 } 4886 4887 if (ret) { 4888 if (len != pkt_len) { 4889 ret = 0; 4890 dprintk(KERN_DEBUG "%s: loopback len mismatch %d vs %d\n", 4891 dev->name, len, pkt_len); 4892 } else { 4893 rx_skb = np->rx_skb[0].skb; 4894 for (i = 0; i < pkt_len; i++) { 4895 if (rx_skb->data[i] != (u8)(i & 0xff)) { 4896 ret = 0; 4897 dprintk(KERN_DEBUG "%s: loopback pattern check failed on byte %d\n", 4898 dev->name, i); 4899 break; 4900 } 4901 } 4902 } 4903 } else { 4904 dprintk(KERN_DEBUG "%s: loopback - did not receive test packet\n", dev->name); 4905 } 4906 4907 pci_unmap_single(np->pci_dev, test_dma_addr, 4908 (skb_end_pointer(tx_skb) - tx_skb->data), 4909 PCI_DMA_TODEVICE); 4910 dev_kfree_skb_any(tx_skb); 4911 out: 4912 /* stop engines */ 4913 nv_stop_rxtx(dev); 4914 nv_txrx_reset(dev); 4915 /* drain rx queue */ 4916 nv_drain_rxtx(dev); 4917 4918 if (netif_running(dev)) { 4919 writel(misc1_flags, base + NvRegMisc1); 4920 writel(filter_flags, base + NvRegPacketFilterFlags); 4921 nv_enable_irq(dev); 4922 } 4923 4924 return ret; 4925} 4926 4927static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64 *buffer) 4928{ 4929 struct fe_priv *np = netdev_priv(dev); 4930 u8 __iomem *base = get_hwbase(dev); 4931 int result; 4932 memset(buffer, 0, nv_get_sset_count(dev, ETH_SS_TEST)*sizeof(u64)); 4933 4934 if (!nv_link_test(dev)) { 4935 test->flags |= ETH_TEST_FL_FAILED; 4936 buffer[0] = 1; 4937 } 4938 4939 if (test->flags & ETH_TEST_FL_OFFLINE) { 4940 if (netif_running(dev)) { 4941 netif_stop_queue(dev); 4942 nv_napi_disable(dev); 4943 netif_tx_lock_bh(dev); 4944 netif_addr_lock(dev); 4945 spin_lock_irq(&np->lock); 4946 nv_disable_hw_interrupts(dev, np->irqmask); 4947 if (!(np->msi_flags & NV_MSI_X_ENABLED)) { 4948 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); 4949 } else { 4950 writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus); 4951 } 4952 /* stop engines */ 4953 nv_stop_rxtx(dev); 4954 nv_txrx_reset(dev); 4955 /* drain rx queue */ 4956 nv_drain_rxtx(dev); 4957 spin_unlock_irq(&np->lock); 4958 netif_addr_unlock(dev); 4959 netif_tx_unlock_bh(dev); 4960 } 4961 4962 if (!nv_register_test(dev)) { 4963 test->flags |= ETH_TEST_FL_FAILED; 4964 buffer[1] = 1; 4965 } 4966 4967 result = nv_interrupt_test(dev); 4968 if (result != 1) { 4969 test->flags |= ETH_TEST_FL_FAILED; 4970 buffer[2] = 1; 4971 } 4972 if (result == 0) { 4973 /* bail out */ 4974 return; 4975 } 4976 4977 if (!nv_loopback_test(dev)) { 4978 test->flags |= ETH_TEST_FL_FAILED; 4979 buffer[3] = 1; 4980 } 4981 4982 if (netif_running(dev)) { 4983 /* reinit driver view of the rx queue */ 4984 set_bufsize(dev); 4985 if (nv_init_ring(dev)) { 4986 if (!np->in_shutdown) 4987 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 4988 } 4989 /* reinit nic view of the rx queue */ 4990 writel(np->rx_buf_sz, base + NvRegOffloadConfig); 4991 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); 4992 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), 4993 base + NvRegRingSizes); 4994 pci_push(base); 4995 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 4996 pci_push(base); 4997 /* restart rx engine */ 4998 nv_start_rxtx(dev); 4999 netif_start_queue(dev); 5000 nv_napi_enable(dev); 5001 nv_enable_hw_interrupts(dev, np->irqmask); 5002 } 5003 } 5004} 5005 5006static void nv_get_strings(struct net_device *dev, u32 stringset, u8 *buffer) 5007{ 5008 switch (stringset) { 5009 case ETH_SS_STATS: 5010 memcpy(buffer, &nv_estats_str, nv_get_sset_count(dev, ETH_SS_STATS)*sizeof(struct nv_ethtool_str)); 5011 break; 5012 case ETH_SS_TEST: 5013 memcpy(buffer, &nv_etests_str, nv_get_sset_count(dev, ETH_SS_TEST)*sizeof(struct nv_ethtool_str)); 5014 break; 5015 } 5016} 5017 5018static const struct ethtool_ops ops = { 5019 .get_drvinfo = nv_get_drvinfo, 5020 .get_link = ethtool_op_get_link, 5021 .get_wol = nv_get_wol, 5022 .set_wol = nv_set_wol, 5023 .get_settings = nv_get_settings, 5024 .set_settings = nv_set_settings, 5025 .get_regs_len = nv_get_regs_len, 5026 .get_regs = nv_get_regs, 5027 .nway_reset = nv_nway_reset, 5028 .set_tso = nv_set_tso, 5029 .get_ringparam = nv_get_ringparam, 5030 .set_ringparam = nv_set_ringparam, 5031 .get_pauseparam = nv_get_pauseparam, 5032 .set_pauseparam = nv_set_pauseparam, 5033 .get_rx_csum = nv_get_rx_csum, 5034 .set_rx_csum = nv_set_rx_csum, 5035 .set_tx_csum = nv_set_tx_csum, 5036 .set_sg = nv_set_sg, 5037 .get_strings = nv_get_strings, 5038 .get_ethtool_stats = nv_get_ethtool_stats, 5039 .get_sset_count = nv_get_sset_count, 5040 .self_test = nv_self_test, 5041}; 5042 5043static void nv_vlan_rx_register(struct net_device *dev, struct vlan_group *grp) 5044{ 5045 struct fe_priv *np = get_nvpriv(dev); 5046 5047 spin_lock_irq(&np->lock); 5048 5049 /* save vlan group */ 5050 np->vlangrp = grp; 5051 5052 if (grp) { 5053 /* enable vlan on MAC */ 5054 np->txrxctl_bits |= NVREG_TXRXCTL_VLANSTRIP | NVREG_TXRXCTL_VLANINS; 5055 } else { 5056 /* disable vlan on MAC */ 5057 np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANSTRIP; 5058 np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANINS; 5059 } 5060 5061 writel(np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 5062 5063 spin_unlock_irq(&np->lock); 5064} 5065 5066/* The mgmt unit and driver use a semaphore to access the phy during init */ 5067static int nv_mgmt_acquire_sema(struct net_device *dev) 5068{ 5069 struct fe_priv *np = netdev_priv(dev); 5070 u8 __iomem *base = get_hwbase(dev); 5071 int i; 5072 u32 tx_ctrl, mgmt_sema; 5073 5074 for (i = 0; i < 10; i++) { 5075 mgmt_sema = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_MGMT_SEMA_MASK; 5076 if (mgmt_sema == NVREG_XMITCTL_MGMT_SEMA_FREE) 5077 break; 5078 msleep(500); 5079 } 5080 5081 if (mgmt_sema != NVREG_XMITCTL_MGMT_SEMA_FREE) 5082 return 0; 5083 5084 for (i = 0; i < 2; i++) { 5085 tx_ctrl = readl(base + NvRegTransmitterControl); 5086 tx_ctrl |= NVREG_XMITCTL_HOST_SEMA_ACQ; 5087 writel(tx_ctrl, base + NvRegTransmitterControl); 5088 5089 /* verify that semaphore was acquired */ 5090 tx_ctrl = readl(base + NvRegTransmitterControl); 5091 if (((tx_ctrl & NVREG_XMITCTL_HOST_SEMA_MASK) == NVREG_XMITCTL_HOST_SEMA_ACQ) && 5092 ((tx_ctrl & NVREG_XMITCTL_MGMT_SEMA_MASK) == NVREG_XMITCTL_MGMT_SEMA_FREE)) { 5093 np->mgmt_sema = 1; 5094 return 1; 5095 } 5096 else 5097 udelay(50); 5098 } 5099 5100 return 0; 5101} 5102 5103static void nv_mgmt_release_sema(struct net_device *dev) 5104{ 5105 struct fe_priv *np = netdev_priv(dev); 5106 u8 __iomem *base = get_hwbase(dev); 5107 u32 tx_ctrl; 5108 5109 if (np->driver_data & DEV_HAS_MGMT_UNIT) { 5110 if (np->mgmt_sema) { 5111 tx_ctrl = readl(base + NvRegTransmitterControl); 5112 tx_ctrl &= ~NVREG_XMITCTL_HOST_SEMA_ACQ; 5113 writel(tx_ctrl, base + NvRegTransmitterControl); 5114 } 5115 } 5116} 5117 5118 5119static int nv_mgmt_get_version(struct net_device *dev) 5120{ 5121 struct fe_priv *np = netdev_priv(dev); 5122 u8 __iomem *base = get_hwbase(dev); 5123 u32 data_ready = readl(base + NvRegTransmitterControl); 5124 u32 data_ready2 = 0; 5125 unsigned long start; 5126 int ready = 0; 5127 5128 writel(NVREG_MGMTUNITGETVERSION, base + NvRegMgmtUnitGetVersion); 5129 writel(data_ready ^ NVREG_XMITCTL_DATA_START, base + NvRegTransmitterControl); 5130 start = jiffies; 5131 while (time_before(jiffies, start + 5*HZ)) { 5132 data_ready2 = readl(base + NvRegTransmitterControl); 5133 if ((data_ready & NVREG_XMITCTL_DATA_READY) != (data_ready2 & NVREG_XMITCTL_DATA_READY)) { 5134 ready = 1; 5135 break; 5136 } 5137 schedule_timeout_uninterruptible(1); 5138 } 5139 5140 if (!ready || (data_ready2 & NVREG_XMITCTL_DATA_ERROR)) 5141 return 0; 5142 5143 np->mgmt_version = readl(base + NvRegMgmtUnitVersion) & NVREG_MGMTUNITVERSION; 5144 5145 return 1; 5146} 5147 5148static int nv_open(struct net_device *dev) 5149{ 5150 struct fe_priv *np = netdev_priv(dev); 5151 u8 __iomem *base = get_hwbase(dev); 5152 int ret = 1; 5153 int oom, i; 5154 u32 low; 5155 5156 dprintk(KERN_DEBUG "nv_open: begin\n"); 5157 5158 /* power up phy */ 5159 mii_rw(dev, np->phyaddr, MII_BMCR, 5160 mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ) & ~BMCR_PDOWN); 5161 5162 nv_txrx_gate(dev, false); 5163 /* erase previous misconfiguration */ 5164 if (np->driver_data & DEV_HAS_POWER_CNTRL) 5165 nv_mac_reset(dev); 5166 writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA); 5167 writel(0, base + NvRegMulticastAddrB); 5168 writel(NVREG_MCASTMASKA_NONE, base + NvRegMulticastMaskA); 5169 writel(NVREG_MCASTMASKB_NONE, base + NvRegMulticastMaskB); 5170 writel(0, base + NvRegPacketFilterFlags); 5171 5172 writel(0, base + NvRegTransmitterControl); 5173 writel(0, base + NvRegReceiverControl); 5174 5175 writel(0, base + NvRegAdapterControl); 5176 5177 if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE) 5178 writel(NVREG_TX_PAUSEFRAME_DISABLE, base + NvRegTxPauseFrame); 5179 5180 /* initialize descriptor rings */ 5181 set_bufsize(dev); 5182 oom = nv_init_ring(dev); 5183 5184 writel(0, base + NvRegLinkSpeed); 5185 writel(readl(base + NvRegTransmitPoll) & NVREG_TRANSMITPOLL_MAC_ADDR_REV, base + NvRegTransmitPoll); 5186 nv_txrx_reset(dev); 5187 writel(0, base + NvRegUnknownSetupReg6); 5188 5189 np->in_shutdown = 0; 5190 5191 /* give hw rings */ 5192 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); 5193 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), 5194 base + NvRegRingSizes); 5195 5196 writel(np->linkspeed, base + NvRegLinkSpeed); 5197 if (np->desc_ver == DESC_VER_1) 5198 writel(NVREG_TX_WM_DESC1_DEFAULT, base + NvRegTxWatermark); 5199 else 5200 writel(NVREG_TX_WM_DESC2_3_DEFAULT, base + NvRegTxWatermark); 5201 writel(np->txrxctl_bits, base + NvRegTxRxControl); 5202 writel(np->vlanctl_bits, base + NvRegVlanControl); 5203 pci_push(base); 5204 writel(NVREG_TXRXCTL_BIT1|np->txrxctl_bits, base + NvRegTxRxControl); 5205 reg_delay(dev, NvRegUnknownSetupReg5, NVREG_UNKSETUP5_BIT31, NVREG_UNKSETUP5_BIT31, 5206 NV_SETUP5_DELAY, NV_SETUP5_DELAYMAX, 5207 KERN_INFO "open: SetupReg5, Bit 31 remained off\n"); 5208 5209 writel(0, base + NvRegMIIMask); 5210 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); 5211 writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus); 5212 5213 writel(NVREG_MISC1_FORCE | NVREG_MISC1_HD, base + NvRegMisc1); 5214 writel(readl(base + NvRegTransmitterStatus), base + NvRegTransmitterStatus); 5215 writel(NVREG_PFF_ALWAYS, base + NvRegPacketFilterFlags); 5216 writel(np->rx_buf_sz, base + NvRegOffloadConfig); 5217 5218 writel(readl(base + NvRegReceiverStatus), base + NvRegReceiverStatus); 5219 5220 get_random_bytes(&low, sizeof(low)); 5221 low &= NVREG_SLOTTIME_MASK; 5222 if (np->desc_ver == DESC_VER_1) { 5223 writel(low|NVREG_SLOTTIME_DEFAULT, base + NvRegSlotTime); 5224 } else { 5225 if (!(np->driver_data & DEV_HAS_GEAR_MODE)) { 5226 /* setup legacy backoff */ 5227 writel(NVREG_SLOTTIME_LEGBF_ENABLED|NVREG_SLOTTIME_10_100_FULL|low, base + NvRegSlotTime); 5228 } else { 5229 writel(NVREG_SLOTTIME_10_100_FULL, base + NvRegSlotTime); 5230 nv_gear_backoff_reseed(dev); 5231 } 5232 } 5233 writel(NVREG_TX_DEFERRAL_DEFAULT, base + NvRegTxDeferral); 5234 writel(NVREG_RX_DEFERRAL_DEFAULT, base + NvRegRxDeferral); 5235 if (poll_interval == -1) { 5236 if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT) 5237 writel(NVREG_POLL_DEFAULT_THROUGHPUT, base + NvRegPollingInterval); 5238 else 5239 writel(NVREG_POLL_DEFAULT_CPU, base + NvRegPollingInterval); 5240 } 5241 else 5242 writel(poll_interval & 0xFFFF, base + NvRegPollingInterval); 5243 writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6); 5244 writel((np->phyaddr << NVREG_ADAPTCTL_PHYSHIFT)|NVREG_ADAPTCTL_PHYVALID|NVREG_ADAPTCTL_RUNNING, 5245 base + NvRegAdapterControl); 5246 writel(NVREG_MIISPEED_BIT8|NVREG_MIIDELAY, base + NvRegMIISpeed); 5247 writel(NVREG_MII_LINKCHANGE, base + NvRegMIIMask); 5248 if (np->wolenabled) 5249 writel(NVREG_WAKEUPFLAGS_ENABLE , base + NvRegWakeUpFlags); 5250 5251 i = readl(base + NvRegPowerState); 5252 if ( (i & NVREG_POWERSTATE_POWEREDUP) == 0) 5253 writel(NVREG_POWERSTATE_POWEREDUP|i, base + NvRegPowerState); 5254 5255 pci_push(base); 5256 udelay(10); 5257 writel(readl(base + NvRegPowerState) | NVREG_POWERSTATE_VALID, base + NvRegPowerState); 5258 5259 nv_disable_hw_interrupts(dev, np->irqmask); 5260 pci_push(base); 5261 writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus); 5262 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); 5263 pci_push(base); 5264 5265 if (nv_request_irq(dev, 0)) { 5266 goto out_drain; 5267 } 5268 5269 /* ask for interrupts */ 5270 nv_enable_hw_interrupts(dev, np->irqmask); 5271 5272 spin_lock_irq(&np->lock); 5273 writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA); 5274 writel(0, base + NvRegMulticastAddrB); 5275 writel(NVREG_MCASTMASKA_NONE, base + NvRegMulticastMaskA); 5276 writel(NVREG_MCASTMASKB_NONE, base + NvRegMulticastMaskB); 5277 writel(NVREG_PFF_ALWAYS|NVREG_PFF_MYADDR, base + NvRegPacketFilterFlags); 5278 /* One manual link speed update: Interrupts are enabled, future link 5279 * speed changes cause interrupts and are handled by nv_link_irq(). 5280 */ 5281 { 5282 u32 miistat; 5283 miistat = readl(base + NvRegMIIStatus); 5284 writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus); 5285 dprintk(KERN_INFO "startup: got 0x%08x.\n", miistat); 5286 } 5287 /* set linkspeed to invalid value, thus force nv_update_linkspeed 5288 * to init hw */ 5289 np->linkspeed = 0; 5290 ret = nv_update_linkspeed(dev); 5291 nv_start_rxtx(dev); 5292 netif_start_queue(dev); 5293 nv_napi_enable(dev); 5294 5295 if (ret) { 5296 netif_carrier_on(dev); 5297 } else { 5298 printk(KERN_INFO "%s: no link during initialization.\n", dev->name); 5299 netif_carrier_off(dev); 5300 } 5301 if (oom) 5302 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 5303 5304 /* start statistics timer */ 5305 if (np->driver_data & (DEV_HAS_STATISTICS_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_STATISTICS_V3)) 5306 mod_timer(&np->stats_poll, 5307 round_jiffies(jiffies + STATS_INTERVAL)); 5308 5309 spin_unlock_irq(&np->lock); 5310 5311 return 0; 5312out_drain: 5313 nv_drain_rxtx(dev); 5314 return ret; 5315} 5316 5317static int nv_close(struct net_device *dev) 5318{ 5319 struct fe_priv *np = netdev_priv(dev); 5320 u8 __iomem *base; 5321 5322 spin_lock_irq(&np->lock); 5323 np->in_shutdown = 1; 5324 spin_unlock_irq(&np->lock); 5325 nv_napi_disable(dev); 5326 synchronize_irq(np->pci_dev->irq); 5327 5328 del_timer_sync(&np->oom_kick); 5329 del_timer_sync(&np->nic_poll); 5330 del_timer_sync(&np->stats_poll); 5331 5332 netif_stop_queue(dev); 5333 spin_lock_irq(&np->lock); 5334 nv_stop_rxtx(dev); 5335 nv_txrx_reset(dev); 5336 5337 /* disable interrupts on the nic or we will lock up */ 5338 base = get_hwbase(dev); 5339 nv_disable_hw_interrupts(dev, np->irqmask); 5340 pci_push(base); 5341 dprintk(KERN_INFO "%s: Irqmask is zero again\n", dev->name); 5342 5343 spin_unlock_irq(&np->lock); 5344 5345 nv_free_irq(dev); 5346 5347 nv_drain_rxtx(dev); 5348 5349 if (np->wolenabled || !phy_power_down) { 5350 nv_txrx_gate(dev, false); 5351 writel(NVREG_PFF_ALWAYS|NVREG_PFF_MYADDR, base + NvRegPacketFilterFlags); 5352 nv_start_rx(dev); 5353 } else { 5354 /* power down phy */ 5355 mii_rw(dev, np->phyaddr, MII_BMCR, 5356 mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ)|BMCR_PDOWN); 5357 nv_txrx_gate(dev, true); 5358 } 5359 5360 5361 return 0; 5362} 5363 5364static const struct net_device_ops nv_netdev_ops = { 5365 .ndo_open = nv_open, 5366 .ndo_stop = nv_close, 5367 .ndo_get_stats = nv_get_stats, 5368 .ndo_start_xmit = nv_start_xmit, 5369 .ndo_tx_timeout = nv_tx_timeout, 5370 .ndo_change_mtu = nv_change_mtu, 5371 .ndo_validate_addr = eth_validate_addr, 5372 .ndo_set_mac_address = nv_set_mac_address, 5373 .ndo_set_multicast_list = nv_set_multicast, 5374 .ndo_vlan_rx_register = nv_vlan_rx_register, 5375#ifdef CONFIG_NET_POLL_CONTROLLER 5376 .ndo_poll_controller = nv_poll_controller, 5377#endif 5378}; 5379 5380static const struct net_device_ops nv_netdev_ops_optimized = { 5381 .ndo_open = nv_open, 5382 .ndo_stop = nv_close, 5383 .ndo_get_stats = nv_get_stats, 5384 .ndo_start_xmit = nv_start_xmit_optimized, 5385 .ndo_tx_timeout = nv_tx_timeout, 5386 .ndo_change_mtu = nv_change_mtu, 5387 .ndo_validate_addr = eth_validate_addr, 5388 .ndo_set_mac_address = nv_set_mac_address, 5389 .ndo_set_multicast_list = nv_set_multicast, 5390 .ndo_vlan_rx_register = nv_vlan_rx_register, 5391#ifdef CONFIG_NET_POLL_CONTROLLER 5392 .ndo_poll_controller = nv_poll_controller, 5393#endif 5394}; 5395 5396static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id) 5397{ 5398 struct net_device *dev; 5399 struct fe_priv *np; 5400 unsigned long addr; 5401 u8 __iomem *base; 5402 int err, i; 5403 u32 powerstate, txreg; 5404 u32 phystate_orig = 0, phystate; 5405 int phyinitialized = 0; 5406 static int printed_version; 5407 5408 if (!printed_version++) 5409 printk(KERN_INFO "%s: Reverse Engineered nForce ethernet" 5410 " driver. Version %s.\n", DRV_NAME, FORCEDETH_VERSION); 5411 5412 dev = alloc_etherdev(sizeof(struct fe_priv)); 5413 err = -ENOMEM; 5414 if (!dev) 5415 goto out; 5416 5417 np = netdev_priv(dev); 5418 np->dev = dev; 5419 np->pci_dev = pci_dev; 5420 spin_lock_init(&np->lock); 5421 SET_NETDEV_DEV(dev, &pci_dev->dev); 5422 5423 init_timer(&np->oom_kick); 5424 np->oom_kick.data = (unsigned long) dev; 5425 np->oom_kick.function = &nv_do_rx_refill; /* timer handler */ 5426 init_timer(&np->nic_poll); 5427 np->nic_poll.data = (unsigned long) dev; 5428 np->nic_poll.function = &nv_do_nic_poll; /* timer handler */ 5429 init_timer(&np->stats_poll); 5430 np->stats_poll.data = (unsigned long) dev; 5431 np->stats_poll.function = &nv_do_stats_poll; /* timer handler */ 5432 5433 err = pci_enable_device(pci_dev); 5434 if (err) 5435 goto out_free; 5436 5437 pci_set_master(pci_dev); 5438 5439 err = pci_request_regions(pci_dev, DRV_NAME); 5440 if (err < 0) 5441 goto out_disable; 5442 5443 if (id->driver_data & (DEV_HAS_VLAN|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V2|DEV_HAS_STATISTICS_V3)) 5444 np->register_size = NV_PCI_REGSZ_VER3; 5445 else if (id->driver_data & DEV_HAS_STATISTICS_V1) 5446 np->register_size = NV_PCI_REGSZ_VER2; 5447 else 5448 np->register_size = NV_PCI_REGSZ_VER1; 5449 5450 err = -EINVAL; 5451 addr = 0; 5452 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { 5453 dprintk(KERN_DEBUG "%s: resource %d start %p len %ld flags 0x%08lx.\n", 5454 pci_name(pci_dev), i, (void*)pci_resource_start(pci_dev, i), 5455 pci_resource_len(pci_dev, i), 5456 pci_resource_flags(pci_dev, i)); 5457 if (pci_resource_flags(pci_dev, i) & IORESOURCE_MEM && 5458 pci_resource_len(pci_dev, i) >= np->register_size) { 5459 addr = pci_resource_start(pci_dev, i); 5460 break; 5461 } 5462 } 5463 if (i == DEVICE_COUNT_RESOURCE) { 5464 dev_printk(KERN_INFO, &pci_dev->dev, 5465 "Couldn't find register window\n"); 5466 goto out_relreg; 5467 } 5468 5469 /* copy of driver data */ 5470 np->driver_data = id->driver_data; 5471 /* copy of device id */ 5472 np->device_id = id->device; 5473 5474 /* handle different descriptor versions */ 5475 if (id->driver_data & DEV_HAS_HIGH_DMA) { 5476 /* packet format 3: supports 40-bit addressing */ 5477 np->desc_ver = DESC_VER_3; 5478 np->txrxctl_bits = NVREG_TXRXCTL_DESC_3; 5479 if (dma_64bit) { 5480 if (pci_set_dma_mask(pci_dev, DMA_BIT_MASK(39))) 5481 dev_printk(KERN_INFO, &pci_dev->dev, 5482 "64-bit DMA failed, using 32-bit addressing\n"); 5483 else 5484 dev->features |= NETIF_F_HIGHDMA; 5485 if (pci_set_consistent_dma_mask(pci_dev, DMA_BIT_MASK(39))) { 5486 dev_printk(KERN_INFO, &pci_dev->dev, 5487 "64-bit DMA (consistent) failed, using 32-bit ring buffers\n"); 5488 } 5489 } 5490 } else if (id->driver_data & DEV_HAS_LARGEDESC) { 5491 /* packet format 2: supports jumbo frames */ 5492 np->desc_ver = DESC_VER_2; 5493 np->txrxctl_bits = NVREG_TXRXCTL_DESC_2; 5494 } else { 5495 /* original packet format */ 5496 np->desc_ver = DESC_VER_1; 5497 np->txrxctl_bits = NVREG_TXRXCTL_DESC_1; 5498 } 5499 5500 np->pkt_limit = NV_PKTLIMIT_1; 5501 if (id->driver_data & DEV_HAS_LARGEDESC) 5502 np->pkt_limit = NV_PKTLIMIT_2; 5503 5504 if (id->driver_data & DEV_HAS_CHECKSUM) { 5505 np->rx_csum = 1; 5506 np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK; 5507 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG; 5508 dev->features |= NETIF_F_TSO; 5509 dev->features |= NETIF_F_GRO; 5510 } 5511 5512 np->vlanctl_bits = 0; 5513 if (id->driver_data & DEV_HAS_VLAN) { 5514 np->vlanctl_bits = NVREG_VLANCONTROL_ENABLE; 5515 dev->features |= NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX; 5516 } 5517 5518 np->pause_flags = NV_PAUSEFRAME_RX_CAPABLE | NV_PAUSEFRAME_RX_REQ | NV_PAUSEFRAME_AUTONEG; 5519 if ((id->driver_data & DEV_HAS_PAUSEFRAME_TX_V1) || 5520 (id->driver_data & DEV_HAS_PAUSEFRAME_TX_V2) || 5521 (id->driver_data & DEV_HAS_PAUSEFRAME_TX_V3)) { 5522 np->pause_flags |= NV_PAUSEFRAME_TX_CAPABLE | NV_PAUSEFRAME_TX_REQ; 5523 } 5524 5525 5526 err = -ENOMEM; 5527 np->base = ioremap(addr, np->register_size); 5528 if (!np->base) 5529 goto out_relreg; 5530 dev->base_addr = (unsigned long)np->base; 5531 5532 dev->irq = pci_dev->irq; 5533 5534 np->rx_ring_size = RX_RING_DEFAULT; 5535 np->tx_ring_size = TX_RING_DEFAULT; 5536 5537 if (!nv_optimized(np)) { 5538 np->rx_ring.orig = pci_alloc_consistent(pci_dev, 5539 sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size), 5540 &np->ring_addr); 5541 if (!np->rx_ring.orig) 5542 goto out_unmap; 5543 np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size]; 5544 } else { 5545 np->rx_ring.ex = pci_alloc_consistent(pci_dev, 5546 sizeof(struct ring_desc_ex) * (np->rx_ring_size + np->tx_ring_size), 5547 &np->ring_addr); 5548 if (!np->rx_ring.ex) 5549 goto out_unmap; 5550 np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size]; 5551 } 5552 np->rx_skb = kcalloc(np->rx_ring_size, sizeof(struct nv_skb_map), GFP_KERNEL); 5553 np->tx_skb = kcalloc(np->tx_ring_size, sizeof(struct nv_skb_map), GFP_KERNEL); 5554 if (!np->rx_skb || !np->tx_skb) 5555 goto out_freering; 5556 5557 if (!nv_optimized(np)) 5558 dev->netdev_ops = &nv_netdev_ops; 5559 else 5560 dev->netdev_ops = &nv_netdev_ops_optimized; 5561 5562 netif_napi_add(dev, &np->napi, nv_napi_poll, RX_WORK_PER_LOOP); 5563 SET_ETHTOOL_OPS(dev, &ops); 5564 dev->watchdog_timeo = NV_WATCHDOG_TIMEO; 5565 5566 pci_set_drvdata(pci_dev, dev); 5567 5568 /* read the mac address */ 5569 base = get_hwbase(dev); 5570 np->orig_mac[0] = readl(base + NvRegMacAddrA); 5571 np->orig_mac[1] = readl(base + NvRegMacAddrB); 5572 5573 txreg = readl(base + NvRegTransmitPoll); 5574 if (id->driver_data & DEV_HAS_CORRECT_MACADDR) { 5575 /* mac address is already in correct order */ 5576 dev->dev_addr[0] = (np->orig_mac[0] >> 0) & 0xff; 5577 dev->dev_addr[1] = (np->orig_mac[0] >> 8) & 0xff; 5578 dev->dev_addr[2] = (np->orig_mac[0] >> 16) & 0xff; 5579 dev->dev_addr[3] = (np->orig_mac[0] >> 24) & 0xff; 5580 dev->dev_addr[4] = (np->orig_mac[1] >> 0) & 0xff; 5581 dev->dev_addr[5] = (np->orig_mac[1] >> 8) & 0xff; 5582 } else if (txreg & NVREG_TRANSMITPOLL_MAC_ADDR_REV) { 5583 /* mac address is already in correct order */ 5584 dev->dev_addr[0] = (np->orig_mac[0] >> 0) & 0xff; 5585 dev->dev_addr[1] = (np->orig_mac[0] >> 8) & 0xff; 5586 dev->dev_addr[2] = (np->orig_mac[0] >> 16) & 0xff; 5587 dev->dev_addr[3] = (np->orig_mac[0] >> 24) & 0xff; 5588 dev->dev_addr[4] = (np->orig_mac[1] >> 0) & 0xff; 5589 dev->dev_addr[5] = (np->orig_mac[1] >> 8) & 0xff; 5590 /* 5591 * Set orig mac address back to the reversed version. 5592 * This flag will be cleared during low power transition. 5593 * Therefore, we should always put back the reversed address. 5594 */ 5595 np->orig_mac[0] = (dev->dev_addr[5] << 0) + (dev->dev_addr[4] << 8) + 5596 (dev->dev_addr[3] << 16) + (dev->dev_addr[2] << 24); 5597 np->orig_mac[1] = (dev->dev_addr[1] << 0) + (dev->dev_addr[0] << 8); 5598 } else { 5599 /* need to reverse mac address to correct order */ 5600 dev->dev_addr[0] = (np->orig_mac[1] >> 8) & 0xff; 5601 dev->dev_addr[1] = (np->orig_mac[1] >> 0) & 0xff; 5602 dev->dev_addr[2] = (np->orig_mac[0] >> 24) & 0xff; 5603 dev->dev_addr[3] = (np->orig_mac[0] >> 16) & 0xff; 5604 dev->dev_addr[4] = (np->orig_mac[0] >> 8) & 0xff; 5605 dev->dev_addr[5] = (np->orig_mac[0] >> 0) & 0xff; 5606 writel(txreg|NVREG_TRANSMITPOLL_MAC_ADDR_REV, base + NvRegTransmitPoll); 5607 printk(KERN_DEBUG "nv_probe: set workaround bit for reversed mac addr\n"); 5608 } 5609 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); 5610 5611 if (!is_valid_ether_addr(dev->perm_addr)) { 5612 /* 5613 * Bad mac address. At least one bios sets the mac address 5614 * to 01:23:45:67:89:ab 5615 */ 5616 dev_printk(KERN_ERR, &pci_dev->dev, 5617 "Invalid Mac address detected: %pM\n", 5618 dev->dev_addr); 5619 dev_printk(KERN_ERR, &pci_dev->dev, 5620 "Please complain to your hardware vendor. Switching to a random MAC.\n"); 5621 random_ether_addr(dev->dev_addr); 5622 } 5623 5624 dprintk(KERN_DEBUG "%s: MAC Address %pM\n", 5625 pci_name(pci_dev), dev->dev_addr); 5626 5627 /* set mac address */ 5628 nv_copy_mac_to_hw(dev); 5629 5630 device_init_wakeup(&pci_dev->dev, 1); 5631 5632 /* disable WOL */ 5633 writel(0, base + NvRegWakeUpFlags); 5634 np->wolenabled = 0; 5635 5636 if (id->driver_data & DEV_HAS_POWER_CNTRL) { 5637 5638 /* take phy and nic out of low power mode */ 5639 powerstate = readl(base + NvRegPowerState2); 5640 powerstate &= ~NVREG_POWERSTATE2_POWERUP_MASK; 5641 if ((id->driver_data & DEV_NEED_LOW_POWER_FIX) && 5642 pci_dev->revision >= 0xA3) 5643 powerstate |= NVREG_POWERSTATE2_POWERUP_REV_A3; 5644 writel(powerstate, base + NvRegPowerState2); 5645 } 5646 5647 if (np->desc_ver == DESC_VER_1) { 5648 np->tx_flags = NV_TX_VALID; 5649 } else { 5650 np->tx_flags = NV_TX2_VALID; 5651 } 5652 5653 np->msi_flags = 0; 5654 if ((id->driver_data & DEV_HAS_MSI) && msi) { 5655 np->msi_flags |= NV_MSI_CAPABLE; 5656 } 5657 if ((id->driver_data & DEV_HAS_MSI_X) && msix) { 5658 /* msix has had reported issues when modifying irqmask 5659 as in the case of napi, therefore, disable for now 5660 */ 5661 } 5662 5663 if (optimization_mode == NV_OPTIMIZATION_MODE_CPU) { 5664 np->irqmask = NVREG_IRQMASK_CPU; 5665 if (np->msi_flags & NV_MSI_X_CAPABLE) /* set number of vectors */ 5666 np->msi_flags |= 0x0001; 5667 } else if (optimization_mode == NV_OPTIMIZATION_MODE_DYNAMIC && 5668 !(id->driver_data & DEV_NEED_TIMERIRQ)) { 5669 /* start off in throughput mode */ 5670 np->irqmask = NVREG_IRQMASK_THROUGHPUT; 5671 /* remove support for msix mode */ 5672 np->msi_flags &= ~NV_MSI_X_CAPABLE; 5673 } else { 5674 optimization_mode = NV_OPTIMIZATION_MODE_THROUGHPUT; 5675 np->irqmask = NVREG_IRQMASK_THROUGHPUT; 5676 if (np->msi_flags & NV_MSI_X_CAPABLE) /* set number of vectors */ 5677 np->msi_flags |= 0x0003; 5678 } 5679 5680 if (id->driver_data & DEV_NEED_TIMERIRQ) 5681 np->irqmask |= NVREG_IRQ_TIMER; 5682 if (id->driver_data & DEV_NEED_LINKTIMER) { 5683 dprintk(KERN_INFO "%s: link timer on.\n", pci_name(pci_dev)); 5684 np->need_linktimer = 1; 5685 np->link_timeout = jiffies + LINK_TIMEOUT; 5686 } else { 5687 dprintk(KERN_INFO "%s: link timer off.\n", pci_name(pci_dev)); 5688 np->need_linktimer = 0; 5689 } 5690 5691 /* Limit the number of tx's outstanding for hw bug */ 5692 if (id->driver_data & DEV_NEED_TX_LIMIT) { 5693 np->tx_limit = 1; 5694 if (((id->driver_data & DEV_NEED_TX_LIMIT2) == DEV_NEED_TX_LIMIT2) && 5695 pci_dev->revision >= 0xA2) 5696 np->tx_limit = 0; 5697 } 5698 5699 /* clear phy state and temporarily halt phy interrupts */ 5700 writel(0, base + NvRegMIIMask); 5701 phystate = readl(base + NvRegAdapterControl); 5702 if (phystate & NVREG_ADAPTCTL_RUNNING) { 5703 phystate_orig = 1; 5704 phystate &= ~NVREG_ADAPTCTL_RUNNING; 5705 writel(phystate, base + NvRegAdapterControl); 5706 } 5707 writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus); 5708 5709 if (id->driver_data & DEV_HAS_MGMT_UNIT) { 5710 /* management unit running on the mac? */ 5711 if ((readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_MGMT_ST) && 5712 (readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_PHY_INIT) && 5713 nv_mgmt_acquire_sema(dev) && 5714 nv_mgmt_get_version(dev)) { 5715 np->mac_in_use = 1; 5716 if (np->mgmt_version > 0) { 5717 np->mac_in_use = readl(base + NvRegMgmtUnitControl) & NVREG_MGMTUNITCONTROL_INUSE; 5718 } 5719 dprintk(KERN_INFO "%s: mgmt unit is running. mac in use %x.\n", 5720 pci_name(pci_dev), np->mac_in_use); 5721 /* management unit setup the phy already? */ 5722 if (np->mac_in_use && 5723 ((readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_MASK) == 5724 NVREG_XMITCTL_SYNC_PHY_INIT)) { 5725 /* phy is inited by mgmt unit */ 5726 phyinitialized = 1; 5727 dprintk(KERN_INFO "%s: Phy already initialized by mgmt unit.\n", 5728 pci_name(pci_dev)); 5729 } else { 5730 /* we need to init the phy */ 5731 } 5732 } 5733 } 5734 5735 /* find a suitable phy */ 5736 for (i = 1; i <= 32; i++) { 5737 int id1, id2; 5738 int phyaddr = i & 0x1F; 5739 5740 spin_lock_irq(&np->lock); 5741 id1 = mii_rw(dev, phyaddr, MII_PHYSID1, MII_READ); 5742 spin_unlock_irq(&np->lock); 5743 if (id1 < 0 || id1 == 0xffff) 5744 continue; 5745 spin_lock_irq(&np->lock); 5746 id2 = mii_rw(dev, phyaddr, MII_PHYSID2, MII_READ); 5747 spin_unlock_irq(&np->lock); 5748 if (id2 < 0 || id2 == 0xffff) 5749 continue; 5750 5751 np->phy_model = id2 & PHYID2_MODEL_MASK; 5752 id1 = (id1 & PHYID1_OUI_MASK) << PHYID1_OUI_SHFT; 5753 id2 = (id2 & PHYID2_OUI_MASK) >> PHYID2_OUI_SHFT; 5754 dprintk(KERN_DEBUG "%s: open: Found PHY %04x:%04x at address %d.\n", 5755 pci_name(pci_dev), id1, id2, phyaddr); 5756 np->phyaddr = phyaddr; 5757 np->phy_oui = id1 | id2; 5758 5759 /* Realtek hardcoded phy id1 to all zero's on certain phys */ 5760 if (np->phy_oui == PHY_OUI_REALTEK2) 5761 np->phy_oui = PHY_OUI_REALTEK; 5762 /* Setup phy revision for Realtek */ 5763 if (np->phy_oui == PHY_OUI_REALTEK && np->phy_model == PHY_MODEL_REALTEK_8211) 5764 np->phy_rev = mii_rw(dev, phyaddr, MII_RESV1, MII_READ) & PHY_REV_MASK; 5765 5766 break; 5767 } 5768 if (i == 33) { 5769 dev_printk(KERN_INFO, &pci_dev->dev, 5770 "open: Could not find a valid PHY.\n"); 5771 goto out_error; 5772 } 5773 5774 if (!phyinitialized) { 5775 /* reset it */ 5776 phy_init(dev); 5777 } else { 5778 /* see if it is a gigabit phy */ 5779 u32 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); 5780 if (mii_status & PHY_GIGABIT) { 5781 np->gigabit = PHY_GIGABIT; 5782 } 5783 } 5784 5785 /* set default link speed settings */ 5786 np->linkspeed = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; 5787 np->duplex = 0; 5788 np->autoneg = 1; 5789 5790 err = register_netdev(dev); 5791 if (err) { 5792 dev_printk(KERN_INFO, &pci_dev->dev, 5793 "unable to register netdev: %d\n", err); 5794 goto out_error; 5795 } 5796 5797 dev_printk(KERN_INFO, &pci_dev->dev, "ifname %s, PHY OUI 0x%x @ %d, " 5798 "addr %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x\n", 5799 dev->name, 5800 np->phy_oui, 5801 np->phyaddr, 5802 dev->dev_addr[0], 5803 dev->dev_addr[1], 5804 dev->dev_addr[2], 5805 dev->dev_addr[3], 5806 dev->dev_addr[4], 5807 dev->dev_addr[5]); 5808 5809 dev_printk(KERN_INFO, &pci_dev->dev, "%s%s%s%s%s%s%s%s%s%sdesc-v%u\n", 5810 dev->features & NETIF_F_HIGHDMA ? "highdma " : "", 5811 dev->features & (NETIF_F_IP_CSUM | NETIF_F_SG) ? 5812 "csum " : "", 5813 dev->features & (NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX) ? 5814 "vlan " : "", 5815 id->driver_data & DEV_HAS_POWER_CNTRL ? "pwrctl " : "", 5816 id->driver_data & DEV_HAS_MGMT_UNIT ? "mgmt " : "", 5817 id->driver_data & DEV_NEED_TIMERIRQ ? "timirq " : "", 5818 np->gigabit == PHY_GIGABIT ? "gbit " : "", 5819 np->need_linktimer ? "lnktim " : "", 5820 np->msi_flags & NV_MSI_CAPABLE ? "msi " : "", 5821 np->msi_flags & NV_MSI_X_CAPABLE ? "msi-x " : "", 5822 np->desc_ver); 5823 5824 return 0; 5825 5826out_error: 5827 if (phystate_orig) 5828 writel(phystate|NVREG_ADAPTCTL_RUNNING, base + NvRegAdapterControl); 5829 pci_set_drvdata(pci_dev, NULL); 5830out_freering: 5831 free_rings(dev); 5832out_unmap: 5833 iounmap(get_hwbase(dev)); 5834out_relreg: 5835 pci_release_regions(pci_dev); 5836out_disable: 5837 pci_disable_device(pci_dev); 5838out_free: 5839 free_netdev(dev); 5840out: 5841 return err; 5842} 5843 5844static void nv_restore_phy(struct net_device *dev) 5845{ 5846 struct fe_priv *np = netdev_priv(dev); 5847 u16 phy_reserved, mii_control; 5848 5849 if (np->phy_oui == PHY_OUI_REALTEK && 5850 np->phy_model == PHY_MODEL_REALTEK_8201 && 5851 phy_cross == NV_CROSSOVER_DETECTION_DISABLED) { 5852 mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3); 5853 phy_reserved = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, MII_READ); 5854 phy_reserved &= ~PHY_REALTEK_INIT_MSK1; 5855 phy_reserved |= PHY_REALTEK_INIT8; 5856 mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, phy_reserved); 5857 mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1); 5858 5859 /* restart auto negotiation */ 5860 mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); 5861 mii_control |= (BMCR_ANRESTART | BMCR_ANENABLE); 5862 mii_rw(dev, np->phyaddr, MII_BMCR, mii_control); 5863 } 5864} 5865 5866static void nv_restore_mac_addr(struct pci_dev *pci_dev) 5867{ 5868 struct net_device *dev = pci_get_drvdata(pci_dev); 5869 struct fe_priv *np = netdev_priv(dev); 5870 u8 __iomem *base = get_hwbase(dev); 5871 5872 /* special op: write back the misordered MAC address - otherwise 5873 * the next nv_probe would see a wrong address. 5874 */ 5875 writel(np->orig_mac[0], base + NvRegMacAddrA); 5876 writel(np->orig_mac[1], base + NvRegMacAddrB); 5877 writel(readl(base + NvRegTransmitPoll) & ~NVREG_TRANSMITPOLL_MAC_ADDR_REV, 5878 base + NvRegTransmitPoll); 5879} 5880 5881static void __devexit nv_remove(struct pci_dev *pci_dev) 5882{ 5883 struct net_device *dev = pci_get_drvdata(pci_dev); 5884 5885 unregister_netdev(dev); 5886 5887 nv_restore_mac_addr(pci_dev); 5888 5889 /* restore any phy related changes */ 5890 nv_restore_phy(dev); 5891 5892 nv_mgmt_release_sema(dev); 5893 5894 /* free all structures */ 5895 free_rings(dev); 5896 iounmap(get_hwbase(dev)); 5897 pci_release_regions(pci_dev); 5898 pci_disable_device(pci_dev); 5899 free_netdev(dev); 5900 pci_set_drvdata(pci_dev, NULL); 5901} 5902 5903#ifdef CONFIG_PM 5904static int nv_suspend(struct pci_dev *pdev, pm_message_t state) 5905{ 5906 struct net_device *dev = pci_get_drvdata(pdev); 5907 struct fe_priv *np = netdev_priv(dev); 5908 u8 __iomem *base = get_hwbase(dev); 5909 int i; 5910 5911 if (netif_running(dev)) { 5912 // Gross. 5913 nv_close(dev); 5914 } 5915 netif_device_detach(dev); 5916 5917 /* save non-pci configuration space */ 5918 for (i = 0;i <= np->register_size/sizeof(u32); i++) 5919 np->saved_config_space[i] = readl(base + i*sizeof(u32)); 5920 5921 pci_save_state(pdev); 5922 pci_enable_wake(pdev, pci_choose_state(pdev, state), np->wolenabled); 5923 pci_disable_device(pdev); 5924 pci_set_power_state(pdev, pci_choose_state(pdev, state)); 5925 return 0; 5926} 5927 5928static int nv_resume(struct pci_dev *pdev) 5929{ 5930 struct net_device *dev = pci_get_drvdata(pdev); 5931 struct fe_priv *np = netdev_priv(dev); 5932 u8 __iomem *base = get_hwbase(dev); 5933 int i, rc = 0; 5934 5935 pci_set_power_state(pdev, PCI_D0); 5936 pci_restore_state(pdev); 5937 /* ack any pending wake events, disable PME */ 5938 pci_enable_wake(pdev, PCI_D0, 0); 5939 5940 /* restore non-pci configuration space */ 5941 for (i = 0;i <= np->register_size/sizeof(u32); i++) 5942 writel(np->saved_config_space[i], base+i*sizeof(u32)); 5943 5944 if (np->driver_data & DEV_NEED_MSI_FIX) 5945 pci_write_config_dword(pdev, NV_MSI_PRIV_OFFSET, NV_MSI_PRIV_VALUE); 5946 5947 /* restore phy state, including autoneg */ 5948 phy_init(dev); 5949 5950 netif_device_attach(dev); 5951 if (netif_running(dev)) { 5952 rc = nv_open(dev); 5953 nv_set_multicast(dev); 5954 } 5955 return rc; 5956} 5957 5958static void nv_shutdown(struct pci_dev *pdev) 5959{ 5960 struct net_device *dev = pci_get_drvdata(pdev); 5961 struct fe_priv *np = netdev_priv(dev); 5962 5963 if (netif_running(dev)) 5964 nv_close(dev); 5965 5966 /* 5967 * Restore the MAC so a kernel started by kexec won't get confused. 5968 * If we really go for poweroff, we must not restore the MAC, 5969 * otherwise the MAC for WOL will be reversed at least on some boards. 5970 */ 5971 if (system_state != SYSTEM_POWER_OFF) { 5972 nv_restore_mac_addr(pdev); 5973 } 5974 5975 pci_disable_device(pdev); 5976 /* 5977 * Apparently it is not possible to reinitialise from D3 hot, 5978 * only put the device into D3 if we really go for poweroff. 5979 */ 5980 if (system_state == SYSTEM_POWER_OFF) { 5981 if (pci_enable_wake(pdev, PCI_D3cold, np->wolenabled)) 5982 pci_enable_wake(pdev, PCI_D3hot, np->wolenabled); 5983 pci_set_power_state(pdev, PCI_D3hot); 5984 } 5985} 5986#else 5987#define nv_suspend NULL 5988#define nv_shutdown NULL 5989#define nv_resume NULL 5990#endif /* CONFIG_PM */ 5991 5992static DEFINE_PCI_DEVICE_TABLE(pci_tbl) = { 5993 { /* nForce Ethernet Controller */ 5994 PCI_DEVICE(0x10DE, 0x01C3), 5995 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER, 5996 }, 5997 { /* nForce2 Ethernet Controller */ 5998 PCI_DEVICE(0x10DE, 0x0066), 5999 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER, 6000 }, 6001 { /* nForce3 Ethernet Controller */ 6002 PCI_DEVICE(0x10DE, 0x00D6), 6003 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER, 6004 }, 6005 { /* nForce3 Ethernet Controller */ 6006 PCI_DEVICE(0x10DE, 0x0086), 6007 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM, 6008 }, 6009 { /* nForce3 Ethernet Controller */ 6010 PCI_DEVICE(0x10DE, 0x008C), 6011 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM, 6012 }, 6013 { /* nForce3 Ethernet Controller */ 6014 PCI_DEVICE(0x10DE, 0x00E6), 6015 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM, 6016 }, 6017 { /* nForce3 Ethernet Controller */ 6018 PCI_DEVICE(0x10DE, 0x00DF), 6019 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM, 6020 }, 6021 { /* CK804 Ethernet Controller */ 6022 PCI_DEVICE(0x10DE, 0x0056), 6023 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT, 6024 }, 6025 { /* CK804 Ethernet Controller */ 6026 PCI_DEVICE(0x10DE, 0x0057), 6027 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT, 6028 }, 6029 { /* MCP04 Ethernet Controller */ 6030 PCI_DEVICE(0x10DE, 0x0037), 6031 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT, 6032 }, 6033 { /* MCP04 Ethernet Controller */ 6034 PCI_DEVICE(0x10DE, 0x0038), 6035 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT, 6036 }, 6037 { /* MCP51 Ethernet Controller */ 6038 PCI_DEVICE(0x10DE, 0x0268), 6039 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V1|DEV_NEED_LOW_POWER_FIX, 6040 }, 6041 { /* MCP51 Ethernet Controller */ 6042 PCI_DEVICE(0x10DE, 0x0269), 6043 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V1|DEV_NEED_LOW_POWER_FIX, 6044 }, 6045 { /* MCP55 Ethernet Controller */ 6046 PCI_DEVICE(0x10DE, 0x0372), 6047 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_NEED_TX_LIMIT|DEV_NEED_MSI_FIX, 6048 }, 6049 { /* MCP55 Ethernet Controller */ 6050 PCI_DEVICE(0x10DE, 0x0373), 6051 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_NEED_TX_LIMIT|DEV_NEED_MSI_FIX, 6052 }, 6053 { /* MCP61 Ethernet Controller */ 6054 PCI_DEVICE(0x10DE, 0x03E5), 6055 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_MSI_FIX, 6056 }, 6057 { /* MCP61 Ethernet Controller */ 6058 PCI_DEVICE(0x10DE, 0x03E6), 6059 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_MSI_FIX, 6060 }, 6061 { /* MCP61 Ethernet Controller */ 6062 PCI_DEVICE(0x10DE, 0x03EE), 6063 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_MSI_FIX, 6064 }, 6065 { /* MCP61 Ethernet Controller */ 6066 PCI_DEVICE(0x10DE, 0x03EF), 6067 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_MSI_FIX, 6068 }, 6069 { /* MCP65 Ethernet Controller */ 6070 PCI_DEVICE(0x10DE, 0x0450), 6071 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, 6072 }, 6073 { /* MCP65 Ethernet Controller */ 6074 PCI_DEVICE(0x10DE, 0x0451), 6075 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, 6076 }, 6077 { /* MCP65 Ethernet Controller */ 6078 PCI_DEVICE(0x10DE, 0x0452), 6079 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, 6080 }, 6081 { /* MCP65 Ethernet Controller */ 6082 PCI_DEVICE(0x10DE, 0x0453), 6083 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, 6084 }, 6085 { /* MCP67 Ethernet Controller */ 6086 PCI_DEVICE(0x10DE, 0x054C), 6087 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, 6088 }, 6089 { /* MCP67 Ethernet Controller */ 6090 PCI_DEVICE(0x10DE, 0x054D), 6091 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, 6092 }, 6093 { /* MCP67 Ethernet Controller */ 6094 PCI_DEVICE(0x10DE, 0x054E), 6095 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, 6096 }, 6097 { /* MCP67 Ethernet Controller */ 6098 PCI_DEVICE(0x10DE, 0x054F), 6099 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, 6100 }, 6101 { /* MCP73 Ethernet Controller */ 6102 PCI_DEVICE(0x10DE, 0x07DC), 6103 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, 6104 }, 6105 { /* MCP73 Ethernet Controller */ 6106 PCI_DEVICE(0x10DE, 0x07DD), 6107 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, 6108 }, 6109 { /* MCP73 Ethernet Controller */ 6110 PCI_DEVICE(0x10DE, 0x07DE), 6111 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, 6112 }, 6113 { /* MCP73 Ethernet Controller */ 6114 PCI_DEVICE(0x10DE, 0x07DF), 6115 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, 6116 }, 6117 { /* MCP77 Ethernet Controller */ 6118 PCI_DEVICE(0x10DE, 0x0760), 6119 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX, 6120 }, 6121 { /* MCP77 Ethernet Controller */ 6122 PCI_DEVICE(0x10DE, 0x0761), 6123 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX, 6124 }, 6125 { /* MCP77 Ethernet Controller */ 6126 PCI_DEVICE(0x10DE, 0x0762), 6127 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX, 6128 }, 6129 { /* MCP77 Ethernet Controller */ 6130 PCI_DEVICE(0x10DE, 0x0763), 6131 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX, 6132 }, 6133 { /* MCP79 Ethernet Controller */ 6134 PCI_DEVICE(0x10DE, 0x0AB0), 6135 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX, 6136 }, 6137 { /* MCP79 Ethernet Controller */ 6138 PCI_DEVICE(0x10DE, 0x0AB1), 6139 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX, 6140 }, 6141 { /* MCP79 Ethernet Controller */ 6142 PCI_DEVICE(0x10DE, 0x0AB2), 6143 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX, 6144 }, 6145 { /* MCP79 Ethernet Controller */ 6146 PCI_DEVICE(0x10DE, 0x0AB3), 6147 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX, 6148 }, 6149 { /* MCP89 Ethernet Controller */ 6150 PCI_DEVICE(0x10DE, 0x0D7D), 6151 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX, 6152 }, 6153 {0,}, 6154}; 6155 6156static struct pci_driver driver = { 6157 .name = DRV_NAME, 6158 .id_table = pci_tbl, 6159 .probe = nv_probe, 6160 .remove = __devexit_p(nv_remove), 6161 .suspend = nv_suspend, 6162 .resume = nv_resume, 6163 .shutdown = nv_shutdown, 6164}; 6165 6166static int __init init_nic(void) 6167{ 6168 return pci_register_driver(&driver); 6169} 6170 6171static void __exit exit_nic(void) 6172{ 6173 pci_unregister_driver(&driver); 6174} 6175 6176module_param(max_interrupt_work, int, 0); 6177MODULE_PARM_DESC(max_interrupt_work, "forcedeth maximum events handled per interrupt"); 6178module_param(optimization_mode, int, 0); 6179MODULE_PARM_DESC(optimization_mode, "In throughput mode (0), every tx & rx packet will generate an interrupt. In CPU mode (1), interrupts are controlled by a timer. In dynamic mode (2), the mode toggles between throughput and CPU mode based on network load."); 6180module_param(poll_interval, int, 0); 6181MODULE_PARM_DESC(poll_interval, "Interval determines how frequent timer interrupt is generated by [(time_in_micro_secs * 100) / (2^10)]. Min is 0 and Max is 65535."); 6182module_param(msi, int, 0); 6183MODULE_PARM_DESC(msi, "MSI interrupts are enabled by setting to 1 and disabled by setting to 0."); 6184module_param(msix, int, 0); 6185MODULE_PARM_DESC(msix, "MSIX interrupts are enabled by setting to 1 and disabled by setting to 0."); 6186module_param(dma_64bit, int, 0); 6187MODULE_PARM_DESC(dma_64bit, "High DMA is enabled by setting to 1 and disabled by setting to 0."); 6188module_param(phy_cross, int, 0); 6189MODULE_PARM_DESC(phy_cross, "Phy crossover detection for Realtek 8201 phy is enabled by setting to 1 and disabled by setting to 0."); 6190module_param(phy_power_down, int, 0); 6191MODULE_PARM_DESC(phy_power_down, "Power down phy and disable link when interface is down (1), or leave phy powered up (0)."); 6192 6193MODULE_AUTHOR("Manfred Spraul <manfred@colorfullife.com>"); 6194MODULE_DESCRIPTION("Reverse Engineered nForce ethernet driver"); 6195MODULE_LICENSE("GPL"); 6196 6197MODULE_DEVICE_TABLE(pci, pci_tbl); 6198 6199module_init(init_nic); 6200module_exit(exit_nic); 6201