1#define VERSION "0.23" 2//#define dprintk printk 3#define dprintk(x...) do { } while (0) 4 5#include <linux/module.h> 6#include <linux/moduleparam.h> 7#include <linux/types.h> 8#include <linux/pci.h> 9#include <linux/dma-mapping.h> 10#include <linux/netdevice.h> 11#include <linux/etherdevice.h> 12#include <linux/delay.h> 13#include <linux/workqueue.h> 14#include <linux/init.h> 15#include <linux/ip.h> /* for iph */ 16#include <linux/in.h> /* for IPPROTO_... */ 17#include <linux/compiler.h> 18#include <linux/prefetch.h> 19#include <linux/ethtool.h> 20#include <linux/sched.h> 21#include <linux/timer.h> 22#include <linux/if_vlan.h> 23#include <linux/rtnetlink.h> 24#include <linux/jiffies.h> 25#include <linux/slab.h> 26 27#include <asm/io.h> 28#include <asm/uaccess.h> 29#include <asm/system.h> 30 31#define DRV_NAME "ns83820" 32 33/* Global parameters. See module_param near the bottom. */ 34static int ihr = 2; 35static int reset_phy = 0; 36static int lnksts = 0; /* CFG_LNKSTS bit polarity */ 37 38/* Dprintk is used for more interesting debug events */ 39#undef Dprintk 40#define Dprintk dprintk 41 42/* tunables */ 43#define RX_BUF_SIZE 1500 /* 8192 */ 44#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) 45#define NS83820_VLAN_ACCEL_SUPPORT 46#endif 47 48/* Must not exceed ~65000. */ 49#define NR_RX_DESC 64 50#define NR_TX_DESC 128 51 52/* not tunable */ 53#define REAL_RX_BUF_SIZE (RX_BUF_SIZE + 14) /* rx/tx mac addr + type */ 54 55#define MIN_TX_DESC_FREE 8 56 57/* register defines */ 58#define CFGCS 0x04 59 60#define CR_TXE 0x00000001 61#define CR_TXD 0x00000002 62/* Ramit : Here's a tip, don't do a RXD immediately followed by an RXE 63 * The Receive engine skips one descriptor and moves 64 * onto the next one!! */ 65#define CR_RXE 0x00000004 66#define CR_RXD 0x00000008 67#define CR_TXR 0x00000010 68#define CR_RXR 0x00000020 69#define CR_SWI 0x00000080 70#define CR_RST 0x00000100 71 72#define PTSCR_EEBIST_FAIL 0x00000001 73#define PTSCR_EEBIST_EN 0x00000002 74#define PTSCR_EELOAD_EN 0x00000004 75#define PTSCR_RBIST_FAIL 0x000001b8 76#define PTSCR_RBIST_DONE 0x00000200 77#define PTSCR_RBIST_EN 0x00000400 78#define PTSCR_RBIST_RST 0x00002000 79 80#define MEAR_EEDI 0x00000001 81#define MEAR_EEDO 0x00000002 82#define MEAR_EECLK 0x00000004 83#define MEAR_EESEL 0x00000008 84#define MEAR_MDIO 0x00000010 85#define MEAR_MDDIR 0x00000020 86#define MEAR_MDC 0x00000040 87 88#define ISR_TXDESC3 0x40000000 89#define ISR_TXDESC2 0x20000000 90#define ISR_TXDESC1 0x10000000 91#define ISR_TXDESC0 0x08000000 92#define ISR_RXDESC3 0x04000000 93#define ISR_RXDESC2 0x02000000 94#define ISR_RXDESC1 0x01000000 95#define ISR_RXDESC0 0x00800000 96#define ISR_TXRCMP 0x00400000 97#define ISR_RXRCMP 0x00200000 98#define ISR_DPERR 0x00100000 99#define ISR_SSERR 0x00080000 100#define ISR_RMABT 0x00040000 101#define ISR_RTABT 0x00020000 102#define ISR_RXSOVR 0x00010000 103#define ISR_HIBINT 0x00008000 104#define ISR_PHY 0x00004000 105#define ISR_PME 0x00002000 106#define ISR_SWI 0x00001000 107#define ISR_MIB 0x00000800 108#define ISR_TXURN 0x00000400 109#define ISR_TXIDLE 0x00000200 110#define ISR_TXERR 0x00000100 111#define ISR_TXDESC 0x00000080 112#define ISR_TXOK 0x00000040 113#define ISR_RXORN 0x00000020 114#define ISR_RXIDLE 0x00000010 115#define ISR_RXEARLY 0x00000008 116#define ISR_RXERR 0x00000004 117#define ISR_RXDESC 0x00000002 118#define ISR_RXOK 0x00000001 119 120#define TXCFG_CSI 0x80000000 121#define TXCFG_HBI 0x40000000 122#define TXCFG_MLB 0x20000000 123#define TXCFG_ATP 0x10000000 124#define TXCFG_ECRETRY 0x00800000 125#define TXCFG_BRST_DIS 0x00080000 126#define TXCFG_MXDMA1024 0x00000000 127#define TXCFG_MXDMA512 0x00700000 128#define TXCFG_MXDMA256 0x00600000 129#define TXCFG_MXDMA128 0x00500000 130#define TXCFG_MXDMA64 0x00400000 131#define TXCFG_MXDMA32 0x00300000 132#define TXCFG_MXDMA16 0x00200000 133#define TXCFG_MXDMA8 0x00100000 134 135#define CFG_LNKSTS 0x80000000 136#define CFG_SPDSTS 0x60000000 137#define CFG_SPDSTS1 0x40000000 138#define CFG_SPDSTS0 0x20000000 139#define CFG_DUPSTS 0x10000000 140#define CFG_TBI_EN 0x01000000 141#define CFG_MODE_1000 0x00400000 142/* Ramit : Dont' ever use AUTO_1000, it never works and is buggy. 143 * Read the Phy response and then configure the MAC accordingly */ 144#define CFG_AUTO_1000 0x00200000 145#define CFG_PINT_CTL 0x001c0000 146#define CFG_PINT_DUPSTS 0x00100000 147#define CFG_PINT_LNKSTS 0x00080000 148#define CFG_PINT_SPDSTS 0x00040000 149#define CFG_TMRTEST 0x00020000 150#define CFG_MRM_DIS 0x00010000 151#define CFG_MWI_DIS 0x00008000 152#define CFG_T64ADDR 0x00004000 153#define CFG_PCI64_DET 0x00002000 154#define CFG_DATA64_EN 0x00001000 155#define CFG_M64ADDR 0x00000800 156#define CFG_PHY_RST 0x00000400 157#define CFG_PHY_DIS 0x00000200 158#define CFG_EXTSTS_EN 0x00000100 159#define CFG_REQALG 0x00000080 160#define CFG_SB 0x00000040 161#define CFG_POW 0x00000020 162#define CFG_EXD 0x00000010 163#define CFG_PESEL 0x00000008 164#define CFG_BROM_DIS 0x00000004 165#define CFG_EXT_125 0x00000002 166#define CFG_BEM 0x00000001 167 168#define EXTSTS_UDPPKT 0x00200000 169#define EXTSTS_TCPPKT 0x00080000 170#define EXTSTS_IPPKT 0x00020000 171#define EXTSTS_VPKT 0x00010000 172#define EXTSTS_VTG_MASK 0x0000ffff 173 174#define SPDSTS_POLARITY (CFG_SPDSTS1 | CFG_SPDSTS0 | CFG_DUPSTS | (lnksts ? CFG_LNKSTS : 0)) 175 176#define MIBC_MIBS 0x00000008 177#define MIBC_ACLR 0x00000004 178#define MIBC_FRZ 0x00000002 179#define MIBC_WRN 0x00000001 180 181#define PCR_PSEN (1 << 31) 182#define PCR_PS_MCAST (1 << 30) 183#define PCR_PS_DA (1 << 29) 184#define PCR_STHI_8 (3 << 23) 185#define PCR_STLO_4 (1 << 23) 186#define PCR_FFHI_8K (3 << 21) 187#define PCR_FFLO_4K (1 << 21) 188#define PCR_PAUSE_CNT 0xFFFE 189 190#define RXCFG_AEP 0x80000000 191#define RXCFG_ARP 0x40000000 192#define RXCFG_STRIPCRC 0x20000000 193#define RXCFG_RX_FD 0x10000000 194#define RXCFG_ALP 0x08000000 195#define RXCFG_AIRL 0x04000000 196#define RXCFG_MXDMA512 0x00700000 197#define RXCFG_DRTH 0x0000003e 198#define RXCFG_DRTH0 0x00000002 199 200#define RFCR_RFEN 0x80000000 201#define RFCR_AAB 0x40000000 202#define RFCR_AAM 0x20000000 203#define RFCR_AAU 0x10000000 204#define RFCR_APM 0x08000000 205#define RFCR_APAT 0x07800000 206#define RFCR_APAT3 0x04000000 207#define RFCR_APAT2 0x02000000 208#define RFCR_APAT1 0x01000000 209#define RFCR_APAT0 0x00800000 210#define RFCR_AARP 0x00400000 211#define RFCR_MHEN 0x00200000 212#define RFCR_UHEN 0x00100000 213#define RFCR_ULM 0x00080000 214 215#define VRCR_RUDPE 0x00000080 216#define VRCR_RTCPE 0x00000040 217#define VRCR_RIPE 0x00000020 218#define VRCR_IPEN 0x00000010 219#define VRCR_DUTF 0x00000008 220#define VRCR_DVTF 0x00000004 221#define VRCR_VTREN 0x00000002 222#define VRCR_VTDEN 0x00000001 223 224#define VTCR_PPCHK 0x00000008 225#define VTCR_GCHK 0x00000004 226#define VTCR_VPPTI 0x00000002 227#define VTCR_VGTI 0x00000001 228 229#define CR 0x00 230#define CFG 0x04 231#define MEAR 0x08 232#define PTSCR 0x0c 233#define ISR 0x10 234#define IMR 0x14 235#define IER 0x18 236#define IHR 0x1c 237#define TXDP 0x20 238#define TXDP_HI 0x24 239#define TXCFG 0x28 240#define GPIOR 0x2c 241#define RXDP 0x30 242#define RXDP_HI 0x34 243#define RXCFG 0x38 244#define PQCR 0x3c 245#define WCSR 0x40 246#define PCR 0x44 247#define RFCR 0x48 248#define RFDR 0x4c 249 250#define SRR 0x58 251 252#define VRCR 0xbc 253#define VTCR 0xc0 254#define VDR 0xc4 255#define CCSR 0xcc 256 257#define TBICR 0xe0 258#define TBISR 0xe4 259#define TANAR 0xe8 260#define TANLPAR 0xec 261#define TANER 0xf0 262#define TESR 0xf4 263 264#define TBICR_MR_AN_ENABLE 0x00001000 265#define TBICR_MR_RESTART_AN 0x00000200 266 267#define TBISR_MR_LINK_STATUS 0x00000020 268#define TBISR_MR_AN_COMPLETE 0x00000004 269 270#define TANAR_PS2 0x00000100 271#define TANAR_PS1 0x00000080 272#define TANAR_HALF_DUP 0x00000040 273#define TANAR_FULL_DUP 0x00000020 274 275#define GPIOR_GP5_OE 0x00000200 276#define GPIOR_GP4_OE 0x00000100 277#define GPIOR_GP3_OE 0x00000080 278#define GPIOR_GP2_OE 0x00000040 279#define GPIOR_GP1_OE 0x00000020 280#define GPIOR_GP3_OUT 0x00000004 281#define GPIOR_GP1_OUT 0x00000001 282 283#define LINK_AUTONEGOTIATE 0x01 284#define LINK_DOWN 0x02 285#define LINK_UP 0x04 286 287#define HW_ADDR_LEN sizeof(dma_addr_t) 288#define desc_addr_set(desc, addr) \ 289 do { \ 290 ((desc)[0] = cpu_to_le32(addr)); \ 291 if (HW_ADDR_LEN == 8) \ 292 (desc)[1] = cpu_to_le32(((u64)addr) >> 32); \ 293 } while(0) 294#define desc_addr_get(desc) \ 295 (le32_to_cpu((desc)[0]) | \ 296 (HW_ADDR_LEN == 8 ? ((dma_addr_t)le32_to_cpu((desc)[1]))<<32 : 0)) 297 298#define DESC_LINK 0 299#define DESC_BUFPTR (DESC_LINK + HW_ADDR_LEN/4) 300#define DESC_CMDSTS (DESC_BUFPTR + HW_ADDR_LEN/4) 301#define DESC_EXTSTS (DESC_CMDSTS + 4/4) 302 303#define CMDSTS_OWN 0x80000000 304#define CMDSTS_MORE 0x40000000 305#define CMDSTS_INTR 0x20000000 306#define CMDSTS_ERR 0x10000000 307#define CMDSTS_OK 0x08000000 308#define CMDSTS_RUNT 0x00200000 309#define CMDSTS_LEN_MASK 0x0000ffff 310 311#define CMDSTS_DEST_MASK 0x01800000 312#define CMDSTS_DEST_SELF 0x00800000 313#define CMDSTS_DEST_MULTI 0x01000000 314 315#define DESC_SIZE 8 /* Should be cache line sized */ 316 317struct rx_info { 318 spinlock_t lock; 319 int up; 320 unsigned long idle; 321 322 struct sk_buff *skbs[NR_RX_DESC]; 323 324 __le32 *next_rx_desc; 325 u16 next_rx, next_empty; 326 327 __le32 *descs; 328 dma_addr_t phy_descs; 329}; 330 331 332struct ns83820 { 333 u8 __iomem *base; 334 335 struct pci_dev *pci_dev; 336 struct net_device *ndev; 337 338#ifdef NS83820_VLAN_ACCEL_SUPPORT 339 struct vlan_group *vlgrp; 340#endif 341 342 struct rx_info rx_info; 343 struct tasklet_struct rx_tasklet; 344 345 unsigned ihr; 346 struct work_struct tq_refill; 347 348 /* protects everything below. irqsave when using. */ 349 spinlock_t misc_lock; 350 351 u32 CFG_cache; 352 353 u32 MEAR_cache; 354 u32 IMR_cache; 355 356 unsigned linkstate; 357 358 spinlock_t tx_lock; 359 360 u16 tx_done_idx; 361 u16 tx_idx; 362 volatile u16 tx_free_idx; /* idx of free desc chain */ 363 u16 tx_intr_idx; 364 365 atomic_t nr_tx_skbs; 366 struct sk_buff *tx_skbs[NR_TX_DESC]; 367 368 char pad[16] __attribute__((aligned(16))); 369 __le32 *tx_descs; 370 dma_addr_t tx_phy_descs; 371 372 struct timer_list tx_watchdog; 373}; 374 375static inline struct ns83820 *PRIV(struct net_device *dev) 376{ 377 return netdev_priv(dev); 378} 379 380#define __kick_rx(dev) writel(CR_RXE, dev->base + CR) 381 382static inline void kick_rx(struct net_device *ndev) 383{ 384 struct ns83820 *dev = PRIV(ndev); 385 dprintk("kick_rx: maybe kicking\n"); 386 if (test_and_clear_bit(0, &dev->rx_info.idle)) { 387 dprintk("actually kicking\n"); 388 writel(dev->rx_info.phy_descs + 389 (4 * DESC_SIZE * dev->rx_info.next_rx), 390 dev->base + RXDP); 391 if (dev->rx_info.next_rx == dev->rx_info.next_empty) 392 printk(KERN_DEBUG "%s: uh-oh: next_rx == next_empty???\n", 393 ndev->name); 394 __kick_rx(dev); 395 } 396} 397 398//free = (tx_done_idx + NR_TX_DESC-2 - free_idx) % NR_TX_DESC 399#define start_tx_okay(dev) \ 400 (((NR_TX_DESC-2 + dev->tx_done_idx - dev->tx_free_idx) % NR_TX_DESC) > MIN_TX_DESC_FREE) 401 402 403#ifdef NS83820_VLAN_ACCEL_SUPPORT 404static void ns83820_vlan_rx_register(struct net_device *ndev, struct vlan_group *grp) 405{ 406 struct ns83820 *dev = PRIV(ndev); 407 408 spin_lock_irq(&dev->misc_lock); 409 spin_lock(&dev->tx_lock); 410 411 dev->vlgrp = grp; 412 413 spin_unlock(&dev->tx_lock); 414 spin_unlock_irq(&dev->misc_lock); 415} 416#endif 417 418/* Packet Receiver 419 * 420 * The hardware supports linked lists of receive descriptors for 421 * which ownership is transfered back and forth by means of an 422 * ownership bit. While the hardware does support the use of a 423 * ring for receive descriptors, we only make use of a chain in 424 * an attempt to reduce bus traffic under heavy load scenarios. 425 * This will also make bugs a bit more obvious. The current code 426 * only makes use of a single rx chain; I hope to implement 427 * priority based rx for version 1.0. Goal: even under overload 428 * conditions, still route realtime traffic with as low jitter as 429 * possible. 430 */ 431static inline void build_rx_desc(struct ns83820 *dev, __le32 *desc, dma_addr_t link, dma_addr_t buf, u32 cmdsts, u32 extsts) 432{ 433 desc_addr_set(desc + DESC_LINK, link); 434 desc_addr_set(desc + DESC_BUFPTR, buf); 435 desc[DESC_EXTSTS] = cpu_to_le32(extsts); 436 mb(); 437 desc[DESC_CMDSTS] = cpu_to_le32(cmdsts); 438} 439 440#define nr_rx_empty(dev) ((NR_RX_DESC-2 + dev->rx_info.next_rx - dev->rx_info.next_empty) % NR_RX_DESC) 441static inline int ns83820_add_rx_skb(struct ns83820 *dev, struct sk_buff *skb) 442{ 443 unsigned next_empty; 444 u32 cmdsts; 445 __le32 *sg; 446 dma_addr_t buf; 447 448 next_empty = dev->rx_info.next_empty; 449 450 /* don't overrun last rx marker */ 451 if (unlikely(nr_rx_empty(dev) <= 2)) { 452 kfree_skb(skb); 453 return 1; 454 } 455 456 457 sg = dev->rx_info.descs + (next_empty * DESC_SIZE); 458 BUG_ON(NULL != dev->rx_info.skbs[next_empty]); 459 dev->rx_info.skbs[next_empty] = skb; 460 461 dev->rx_info.next_empty = (next_empty + 1) % NR_RX_DESC; 462 cmdsts = REAL_RX_BUF_SIZE | CMDSTS_INTR; 463 buf = pci_map_single(dev->pci_dev, skb->data, 464 REAL_RX_BUF_SIZE, PCI_DMA_FROMDEVICE); 465 build_rx_desc(dev, sg, 0, buf, cmdsts, 0); 466 /* update link of previous rx */ 467 if (likely(next_empty != dev->rx_info.next_rx)) 468 dev->rx_info.descs[((NR_RX_DESC + next_empty - 1) % NR_RX_DESC) * DESC_SIZE] = cpu_to_le32(dev->rx_info.phy_descs + (next_empty * DESC_SIZE * 4)); 469 470 return 0; 471} 472 473static inline int rx_refill(struct net_device *ndev, gfp_t gfp) 474{ 475 struct ns83820 *dev = PRIV(ndev); 476 unsigned i; 477 unsigned long flags = 0; 478 479 if (unlikely(nr_rx_empty(dev) <= 2)) 480 return 0; 481 482 dprintk("rx_refill(%p)\n", ndev); 483 if (gfp == GFP_ATOMIC) 484 spin_lock_irqsave(&dev->rx_info.lock, flags); 485 for (i=0; i<NR_RX_DESC; i++) { 486 struct sk_buff *skb; 487 long res; 488 489 /* extra 16 bytes for alignment */ 490 skb = __netdev_alloc_skb(ndev, REAL_RX_BUF_SIZE+16, gfp); 491 if (unlikely(!skb)) 492 break; 493 494 skb_reserve(skb, skb->data - PTR_ALIGN(skb->data, 16)); 495 if (gfp != GFP_ATOMIC) 496 spin_lock_irqsave(&dev->rx_info.lock, flags); 497 res = ns83820_add_rx_skb(dev, skb); 498 if (gfp != GFP_ATOMIC) 499 spin_unlock_irqrestore(&dev->rx_info.lock, flags); 500 if (res) { 501 i = 1; 502 break; 503 } 504 } 505 if (gfp == GFP_ATOMIC) 506 spin_unlock_irqrestore(&dev->rx_info.lock, flags); 507 508 return i ? 0 : -ENOMEM; 509} 510 511static void rx_refill_atomic(struct net_device *ndev) 512{ 513 rx_refill(ndev, GFP_ATOMIC); 514} 515 516/* REFILL */ 517static inline void queue_refill(struct work_struct *work) 518{ 519 struct ns83820 *dev = container_of(work, struct ns83820, tq_refill); 520 struct net_device *ndev = dev->ndev; 521 522 rx_refill(ndev, GFP_KERNEL); 523 if (dev->rx_info.up) 524 kick_rx(ndev); 525} 526 527static inline void clear_rx_desc(struct ns83820 *dev, unsigned i) 528{ 529 build_rx_desc(dev, dev->rx_info.descs + (DESC_SIZE * i), 0, 0, CMDSTS_OWN, 0); 530} 531 532static void phy_intr(struct net_device *ndev) 533{ 534 struct ns83820 *dev = PRIV(ndev); 535 static const char *speeds[] = { "10", "100", "1000", "1000(?)", "1000F" }; 536 u32 cfg, new_cfg; 537 u32 tbisr, tanar, tanlpar; 538 int speed, fullduplex, newlinkstate; 539 540 cfg = readl(dev->base + CFG) ^ SPDSTS_POLARITY; 541 542 if (dev->CFG_cache & CFG_TBI_EN) { 543 /* we have an optical transceiver */ 544 tbisr = readl(dev->base + TBISR); 545 tanar = readl(dev->base + TANAR); 546 tanlpar = readl(dev->base + TANLPAR); 547 dprintk("phy_intr: tbisr=%08x, tanar=%08x, tanlpar=%08x\n", 548 tbisr, tanar, tanlpar); 549 550 if ( (fullduplex = (tanlpar & TANAR_FULL_DUP) && 551 (tanar & TANAR_FULL_DUP)) ) { 552 553 /* both of us are full duplex */ 554 writel(readl(dev->base + TXCFG) 555 | TXCFG_CSI | TXCFG_HBI | TXCFG_ATP, 556 dev->base + TXCFG); 557 writel(readl(dev->base + RXCFG) | RXCFG_RX_FD, 558 dev->base + RXCFG); 559 /* Light up full duplex LED */ 560 writel(readl(dev->base + GPIOR) | GPIOR_GP1_OUT, 561 dev->base + GPIOR); 562 563 } else if (((tanlpar & TANAR_HALF_DUP) && 564 (tanar & TANAR_HALF_DUP)) || 565 ((tanlpar & TANAR_FULL_DUP) && 566 (tanar & TANAR_HALF_DUP)) || 567 ((tanlpar & TANAR_HALF_DUP) && 568 (tanar & TANAR_FULL_DUP))) { 569 570 /* one or both of us are half duplex */ 571 writel((readl(dev->base + TXCFG) 572 & ~(TXCFG_CSI | TXCFG_HBI)) | TXCFG_ATP, 573 dev->base + TXCFG); 574 writel(readl(dev->base + RXCFG) & ~RXCFG_RX_FD, 575 dev->base + RXCFG); 576 /* Turn off full duplex LED */ 577 writel(readl(dev->base + GPIOR) & ~GPIOR_GP1_OUT, 578 dev->base + GPIOR); 579 } 580 581 speed = 4; /* 1000F */ 582 583 } else { 584 /* we have a copper transceiver */ 585 new_cfg = dev->CFG_cache & ~(CFG_SB | CFG_MODE_1000 | CFG_SPDSTS); 586 587 if (cfg & CFG_SPDSTS1) 588 new_cfg |= CFG_MODE_1000; 589 else 590 new_cfg &= ~CFG_MODE_1000; 591 592 speed = ((cfg / CFG_SPDSTS0) & 3); 593 fullduplex = (cfg & CFG_DUPSTS); 594 595 if (fullduplex) { 596 new_cfg |= CFG_SB; 597 writel(readl(dev->base + TXCFG) 598 | TXCFG_CSI | TXCFG_HBI, 599 dev->base + TXCFG); 600 writel(readl(dev->base + RXCFG) | RXCFG_RX_FD, 601 dev->base + RXCFG); 602 } else { 603 writel(readl(dev->base + TXCFG) 604 & ~(TXCFG_CSI | TXCFG_HBI), 605 dev->base + TXCFG); 606 writel(readl(dev->base + RXCFG) & ~(RXCFG_RX_FD), 607 dev->base + RXCFG); 608 } 609 610 if ((cfg & CFG_LNKSTS) && 611 ((new_cfg ^ dev->CFG_cache) != 0)) { 612 writel(new_cfg, dev->base + CFG); 613 dev->CFG_cache = new_cfg; 614 } 615 616 dev->CFG_cache &= ~CFG_SPDSTS; 617 dev->CFG_cache |= cfg & CFG_SPDSTS; 618 } 619 620 newlinkstate = (cfg & CFG_LNKSTS) ? LINK_UP : LINK_DOWN; 621 622 if (newlinkstate & LINK_UP && 623 dev->linkstate != newlinkstate) { 624 netif_start_queue(ndev); 625 netif_wake_queue(ndev); 626 printk(KERN_INFO "%s: link now %s mbps, %s duplex and up.\n", 627 ndev->name, 628 speeds[speed], 629 fullduplex ? "full" : "half"); 630 } else if (newlinkstate & LINK_DOWN && 631 dev->linkstate != newlinkstate) { 632 netif_stop_queue(ndev); 633 printk(KERN_INFO "%s: link now down.\n", ndev->name); 634 } 635 636 dev->linkstate = newlinkstate; 637} 638 639static int ns83820_setup_rx(struct net_device *ndev) 640{ 641 struct ns83820 *dev = PRIV(ndev); 642 unsigned i; 643 int ret; 644 645 dprintk("ns83820_setup_rx(%p)\n", ndev); 646 647 dev->rx_info.idle = 1; 648 dev->rx_info.next_rx = 0; 649 dev->rx_info.next_rx_desc = dev->rx_info.descs; 650 dev->rx_info.next_empty = 0; 651 652 for (i=0; i<NR_RX_DESC; i++) 653 clear_rx_desc(dev, i); 654 655 writel(0, dev->base + RXDP_HI); 656 writel(dev->rx_info.phy_descs, dev->base + RXDP); 657 658 ret = rx_refill(ndev, GFP_KERNEL); 659 if (!ret) { 660 dprintk("starting receiver\n"); 661 /* prevent the interrupt handler from stomping on us */ 662 spin_lock_irq(&dev->rx_info.lock); 663 664 writel(0x0001, dev->base + CCSR); 665 writel(0, dev->base + RFCR); 666 writel(0x7fc00000, dev->base + RFCR); 667 writel(0xffc00000, dev->base + RFCR); 668 669 dev->rx_info.up = 1; 670 671 phy_intr(ndev); 672 673 /* Okay, let it rip */ 674 spin_lock_irq(&dev->misc_lock); 675 dev->IMR_cache |= ISR_PHY; 676 dev->IMR_cache |= ISR_RXRCMP; 677 //dev->IMR_cache |= ISR_RXERR; 678 //dev->IMR_cache |= ISR_RXOK; 679 dev->IMR_cache |= ISR_RXORN; 680 dev->IMR_cache |= ISR_RXSOVR; 681 dev->IMR_cache |= ISR_RXDESC; 682 dev->IMR_cache |= ISR_RXIDLE; 683 dev->IMR_cache |= ISR_TXDESC; 684 dev->IMR_cache |= ISR_TXIDLE; 685 686 writel(dev->IMR_cache, dev->base + IMR); 687 writel(1, dev->base + IER); 688 spin_unlock(&dev->misc_lock); 689 690 kick_rx(ndev); 691 692 spin_unlock_irq(&dev->rx_info.lock); 693 } 694 return ret; 695} 696 697static void ns83820_cleanup_rx(struct ns83820 *dev) 698{ 699 unsigned i; 700 unsigned long flags; 701 702 dprintk("ns83820_cleanup_rx(%p)\n", dev); 703 704 /* disable receive interrupts */ 705 spin_lock_irqsave(&dev->misc_lock, flags); 706 dev->IMR_cache &= ~(ISR_RXOK | ISR_RXDESC | ISR_RXERR | ISR_RXEARLY | ISR_RXIDLE); 707 writel(dev->IMR_cache, dev->base + IMR); 708 spin_unlock_irqrestore(&dev->misc_lock, flags); 709 710 /* synchronize with the interrupt handler and kill it */ 711 dev->rx_info.up = 0; 712 synchronize_irq(dev->pci_dev->irq); 713 714 /* touch the pci bus... */ 715 readl(dev->base + IMR); 716 717 /* assumes the transmitter is already disabled and reset */ 718 writel(0, dev->base + RXDP_HI); 719 writel(0, dev->base + RXDP); 720 721 for (i=0; i<NR_RX_DESC; i++) { 722 struct sk_buff *skb = dev->rx_info.skbs[i]; 723 dev->rx_info.skbs[i] = NULL; 724 clear_rx_desc(dev, i); 725 kfree_skb(skb); 726 } 727} 728 729static void ns83820_rx_kick(struct net_device *ndev) 730{ 731 struct ns83820 *dev = PRIV(ndev); 732 /*if (nr_rx_empty(dev) >= NR_RX_DESC/4)*/ { 733 if (dev->rx_info.up) { 734 rx_refill_atomic(ndev); 735 kick_rx(ndev); 736 } 737 } 738 739 if (dev->rx_info.up && nr_rx_empty(dev) > NR_RX_DESC*3/4) 740 schedule_work(&dev->tq_refill); 741 else 742 kick_rx(ndev); 743 if (dev->rx_info.idle) 744 printk(KERN_DEBUG "%s: BAD\n", ndev->name); 745} 746 747/* rx_irq 748 * 749 */ 750static void rx_irq(struct net_device *ndev) 751{ 752 struct ns83820 *dev = PRIV(ndev); 753 struct rx_info *info = &dev->rx_info; 754 unsigned next_rx; 755 int rx_rc, len; 756 u32 cmdsts; 757 __le32 *desc; 758 unsigned long flags; 759 int nr = 0; 760 761 dprintk("rx_irq(%p)\n", ndev); 762 dprintk("rxdp: %08x, descs: %08lx next_rx[%d]: %p next_empty[%d]: %p\n", 763 readl(dev->base + RXDP), 764 (long)(dev->rx_info.phy_descs), 765 (int)dev->rx_info.next_rx, 766 (dev->rx_info.descs + (DESC_SIZE * dev->rx_info.next_rx)), 767 (int)dev->rx_info.next_empty, 768 (dev->rx_info.descs + (DESC_SIZE * dev->rx_info.next_empty)) 769 ); 770 771 spin_lock_irqsave(&info->lock, flags); 772 if (!info->up) 773 goto out; 774 775 dprintk("walking descs\n"); 776 next_rx = info->next_rx; 777 desc = info->next_rx_desc; 778 while ((CMDSTS_OWN & (cmdsts = le32_to_cpu(desc[DESC_CMDSTS]))) && 779 (cmdsts != CMDSTS_OWN)) { 780 struct sk_buff *skb; 781 u32 extsts = le32_to_cpu(desc[DESC_EXTSTS]); 782 dma_addr_t bufptr = desc_addr_get(desc + DESC_BUFPTR); 783 784 dprintk("cmdsts: %08x\n", cmdsts); 785 dprintk("link: %08x\n", cpu_to_le32(desc[DESC_LINK])); 786 dprintk("extsts: %08x\n", extsts); 787 788 skb = info->skbs[next_rx]; 789 info->skbs[next_rx] = NULL; 790 info->next_rx = (next_rx + 1) % NR_RX_DESC; 791 792 mb(); 793 clear_rx_desc(dev, next_rx); 794 795 pci_unmap_single(dev->pci_dev, bufptr, 796 RX_BUF_SIZE, PCI_DMA_FROMDEVICE); 797 len = cmdsts & CMDSTS_LEN_MASK; 798#ifdef NS83820_VLAN_ACCEL_SUPPORT 799 /* NH: As was mentioned below, this chip is kinda 800 * brain dead about vlan tag stripping. Frames 801 * that are 64 bytes with a vlan header appended 802 * like arp frames, or pings, are flagged as Runts 803 * when the tag is stripped and hardware. This 804 * also means that the OK bit in the descriptor 805 * is cleared when the frame comes in so we have 806 * to do a specific length check here to make sure 807 * the frame would have been ok, had we not stripped 808 * the tag. 809 */ 810 if (likely((CMDSTS_OK & cmdsts) || 811 ((cmdsts & CMDSTS_RUNT) && len >= 56))) { 812#else 813 if (likely(CMDSTS_OK & cmdsts)) { 814#endif 815 skb_put(skb, len); 816 if (unlikely(!skb)) 817 goto netdev_mangle_me_harder_failed; 818 if (cmdsts & CMDSTS_DEST_MULTI) 819 ndev->stats.multicast++; 820 ndev->stats.rx_packets++; 821 ndev->stats.rx_bytes += len; 822 if ((extsts & 0x002a0000) && !(extsts & 0x00540000)) { 823 skb->ip_summed = CHECKSUM_UNNECESSARY; 824 } else { 825 skb->ip_summed = CHECKSUM_NONE; 826 } 827 skb->protocol = eth_type_trans(skb, ndev); 828#ifdef NS83820_VLAN_ACCEL_SUPPORT 829 if(extsts & EXTSTS_VPKT) { 830 unsigned short tag; 831 tag = ntohs(extsts & EXTSTS_VTG_MASK); 832 rx_rc = vlan_hwaccel_rx(skb,dev->vlgrp,tag); 833 } else { 834 rx_rc = netif_rx(skb); 835 } 836#else 837 rx_rc = netif_rx(skb); 838#endif 839 if (NET_RX_DROP == rx_rc) { 840netdev_mangle_me_harder_failed: 841 ndev->stats.rx_dropped++; 842 } 843 } else { 844 kfree_skb(skb); 845 } 846 847 nr++; 848 next_rx = info->next_rx; 849 desc = info->descs + (DESC_SIZE * next_rx); 850 } 851 info->next_rx = next_rx; 852 info->next_rx_desc = info->descs + (DESC_SIZE * next_rx); 853 854out: 855 if (0 && !nr) { 856 Dprintk("dazed: cmdsts_f: %08x\n", cmdsts); 857 } 858 859 spin_unlock_irqrestore(&info->lock, flags); 860} 861 862static void rx_action(unsigned long _dev) 863{ 864 struct net_device *ndev = (void *)_dev; 865 struct ns83820 *dev = PRIV(ndev); 866 rx_irq(ndev); 867 writel(ihr, dev->base + IHR); 868 869 spin_lock_irq(&dev->misc_lock); 870 dev->IMR_cache |= ISR_RXDESC; 871 writel(dev->IMR_cache, dev->base + IMR); 872 spin_unlock_irq(&dev->misc_lock); 873 874 rx_irq(ndev); 875 ns83820_rx_kick(ndev); 876} 877 878/* Packet Transmit code 879 */ 880static inline void kick_tx(struct ns83820 *dev) 881{ 882 dprintk("kick_tx(%p): tx_idx=%d free_idx=%d\n", 883 dev, dev->tx_idx, dev->tx_free_idx); 884 writel(CR_TXE, dev->base + CR); 885} 886 887/* No spinlock needed on the transmit irq path as the interrupt handler is 888 * serialized. 889 */ 890static void do_tx_done(struct net_device *ndev) 891{ 892 struct ns83820 *dev = PRIV(ndev); 893 u32 cmdsts, tx_done_idx; 894 __le32 *desc; 895 896 dprintk("do_tx_done(%p)\n", ndev); 897 tx_done_idx = dev->tx_done_idx; 898 desc = dev->tx_descs + (tx_done_idx * DESC_SIZE); 899 900 dprintk("tx_done_idx=%d free_idx=%d cmdsts=%08x\n", 901 tx_done_idx, dev->tx_free_idx, le32_to_cpu(desc[DESC_CMDSTS])); 902 while ((tx_done_idx != dev->tx_free_idx) && 903 !(CMDSTS_OWN & (cmdsts = le32_to_cpu(desc[DESC_CMDSTS]))) ) { 904 struct sk_buff *skb; 905 unsigned len; 906 dma_addr_t addr; 907 908 if (cmdsts & CMDSTS_ERR) 909 ndev->stats.tx_errors++; 910 if (cmdsts & CMDSTS_OK) 911 ndev->stats.tx_packets++; 912 if (cmdsts & CMDSTS_OK) 913 ndev->stats.tx_bytes += cmdsts & 0xffff; 914 915 dprintk("tx_done_idx=%d free_idx=%d cmdsts=%08x\n", 916 tx_done_idx, dev->tx_free_idx, cmdsts); 917 skb = dev->tx_skbs[tx_done_idx]; 918 dev->tx_skbs[tx_done_idx] = NULL; 919 dprintk("done(%p)\n", skb); 920 921 len = cmdsts & CMDSTS_LEN_MASK; 922 addr = desc_addr_get(desc + DESC_BUFPTR); 923 if (skb) { 924 pci_unmap_single(dev->pci_dev, 925 addr, 926 len, 927 PCI_DMA_TODEVICE); 928 dev_kfree_skb_irq(skb); 929 atomic_dec(&dev->nr_tx_skbs); 930 } else 931 pci_unmap_page(dev->pci_dev, 932 addr, 933 len, 934 PCI_DMA_TODEVICE); 935 936 tx_done_idx = (tx_done_idx + 1) % NR_TX_DESC; 937 dev->tx_done_idx = tx_done_idx; 938 desc[DESC_CMDSTS] = cpu_to_le32(0); 939 mb(); 940 desc = dev->tx_descs + (tx_done_idx * DESC_SIZE); 941 } 942 943 /* Allow network stack to resume queueing packets after we've 944 * finished transmitting at least 1/4 of the packets in the queue. 945 */ 946 if (netif_queue_stopped(ndev) && start_tx_okay(dev)) { 947 dprintk("start_queue(%p)\n", ndev); 948 netif_start_queue(ndev); 949 netif_wake_queue(ndev); 950 } 951} 952 953static void ns83820_cleanup_tx(struct ns83820 *dev) 954{ 955 unsigned i; 956 957 for (i=0; i<NR_TX_DESC; i++) { 958 struct sk_buff *skb = dev->tx_skbs[i]; 959 dev->tx_skbs[i] = NULL; 960 if (skb) { 961 __le32 *desc = dev->tx_descs + (i * DESC_SIZE); 962 pci_unmap_single(dev->pci_dev, 963 desc_addr_get(desc + DESC_BUFPTR), 964 le32_to_cpu(desc[DESC_CMDSTS]) & CMDSTS_LEN_MASK, 965 PCI_DMA_TODEVICE); 966 dev_kfree_skb_irq(skb); 967 atomic_dec(&dev->nr_tx_skbs); 968 } 969 } 970 971 memset(dev->tx_descs, 0, NR_TX_DESC * DESC_SIZE * 4); 972} 973 974/* transmit routine. This code relies on the network layer serializing 975 * its calls in, but will run happily in parallel with the interrupt 976 * handler. This code currently has provisions for fragmenting tx buffers 977 * while trying to track down a bug in either the zero copy code or 978 * the tx fifo (hence the MAX_FRAG_LEN). 979 */ 980static netdev_tx_t ns83820_hard_start_xmit(struct sk_buff *skb, 981 struct net_device *ndev) 982{ 983 struct ns83820 *dev = PRIV(ndev); 984 u32 free_idx, cmdsts, extsts; 985 int nr_free, nr_frags; 986 unsigned tx_done_idx, last_idx; 987 dma_addr_t buf; 988 unsigned len; 989 skb_frag_t *frag; 990 int stopped = 0; 991 int do_intr = 0; 992 volatile __le32 *first_desc; 993 994 dprintk("ns83820_hard_start_xmit\n"); 995 996 nr_frags = skb_shinfo(skb)->nr_frags; 997again: 998 if (unlikely(dev->CFG_cache & CFG_LNKSTS)) { 999 netif_stop_queue(ndev); 1000 if (unlikely(dev->CFG_cache & CFG_LNKSTS)) 1001 return NETDEV_TX_BUSY; 1002 netif_start_queue(ndev); 1003 } 1004 1005 last_idx = free_idx = dev->tx_free_idx; 1006 tx_done_idx = dev->tx_done_idx; 1007 nr_free = (tx_done_idx + NR_TX_DESC-2 - free_idx) % NR_TX_DESC; 1008 nr_free -= 1; 1009 if (nr_free <= nr_frags) { 1010 dprintk("stop_queue - not enough(%p)\n", ndev); 1011 netif_stop_queue(ndev); 1012 1013 /* Check again: we may have raced with a tx done irq */ 1014 if (dev->tx_done_idx != tx_done_idx) { 1015 dprintk("restart queue(%p)\n", ndev); 1016 netif_start_queue(ndev); 1017 goto again; 1018 } 1019 return NETDEV_TX_BUSY; 1020 } 1021 1022 if (free_idx == dev->tx_intr_idx) { 1023 do_intr = 1; 1024 dev->tx_intr_idx = (dev->tx_intr_idx + NR_TX_DESC/4) % NR_TX_DESC; 1025 } 1026 1027 nr_free -= nr_frags; 1028 if (nr_free < MIN_TX_DESC_FREE) { 1029 dprintk("stop_queue - last entry(%p)\n", ndev); 1030 netif_stop_queue(ndev); 1031 stopped = 1; 1032 } 1033 1034 frag = skb_shinfo(skb)->frags; 1035 if (!nr_frags) 1036 frag = NULL; 1037 extsts = 0; 1038 if (skb->ip_summed == CHECKSUM_PARTIAL) { 1039 extsts |= EXTSTS_IPPKT; 1040 if (IPPROTO_TCP == ip_hdr(skb)->protocol) 1041 extsts |= EXTSTS_TCPPKT; 1042 else if (IPPROTO_UDP == ip_hdr(skb)->protocol) 1043 extsts |= EXTSTS_UDPPKT; 1044 } 1045 1046#ifdef NS83820_VLAN_ACCEL_SUPPORT 1047 if(vlan_tx_tag_present(skb)) { 1048 /* fetch the vlan tag info out of the 1049 * ancilliary data if the vlan code 1050 * is using hw vlan acceleration 1051 */ 1052 short tag = vlan_tx_tag_get(skb); 1053 extsts |= (EXTSTS_VPKT | htons(tag)); 1054 } 1055#endif 1056 1057 len = skb->len; 1058 if (nr_frags) 1059 len -= skb->data_len; 1060 buf = pci_map_single(dev->pci_dev, skb->data, len, PCI_DMA_TODEVICE); 1061 1062 first_desc = dev->tx_descs + (free_idx * DESC_SIZE); 1063 1064 for (;;) { 1065 volatile __le32 *desc = dev->tx_descs + (free_idx * DESC_SIZE); 1066 1067 dprintk("frag[%3u]: %4u @ 0x%08Lx\n", free_idx, len, 1068 (unsigned long long)buf); 1069 last_idx = free_idx; 1070 free_idx = (free_idx + 1) % NR_TX_DESC; 1071 desc[DESC_LINK] = cpu_to_le32(dev->tx_phy_descs + (free_idx * DESC_SIZE * 4)); 1072 desc_addr_set(desc + DESC_BUFPTR, buf); 1073 desc[DESC_EXTSTS] = cpu_to_le32(extsts); 1074 1075 cmdsts = ((nr_frags) ? CMDSTS_MORE : do_intr ? CMDSTS_INTR : 0); 1076 cmdsts |= (desc == first_desc) ? 0 : CMDSTS_OWN; 1077 cmdsts |= len; 1078 desc[DESC_CMDSTS] = cpu_to_le32(cmdsts); 1079 1080 if (!nr_frags) 1081 break; 1082 1083 buf = pci_map_page(dev->pci_dev, frag->page, 1084 frag->page_offset, 1085 frag->size, PCI_DMA_TODEVICE); 1086 dprintk("frag: buf=%08Lx page=%08lx offset=%08lx\n", 1087 (long long)buf, (long) page_to_pfn(frag->page), 1088 frag->page_offset); 1089 len = frag->size; 1090 frag++; 1091 nr_frags--; 1092 } 1093 dprintk("done pkt\n"); 1094 1095 spin_lock_irq(&dev->tx_lock); 1096 dev->tx_skbs[last_idx] = skb; 1097 first_desc[DESC_CMDSTS] |= cpu_to_le32(CMDSTS_OWN); 1098 dev->tx_free_idx = free_idx; 1099 atomic_inc(&dev->nr_tx_skbs); 1100 spin_unlock_irq(&dev->tx_lock); 1101 1102 kick_tx(dev); 1103 1104 /* Check again: we may have raced with a tx done irq */ 1105 if (stopped && (dev->tx_done_idx != tx_done_idx) && start_tx_okay(dev)) 1106 netif_start_queue(ndev); 1107 1108 return NETDEV_TX_OK; 1109} 1110 1111static void ns83820_update_stats(struct ns83820 *dev) 1112{ 1113 struct net_device *ndev = dev->ndev; 1114 u8 __iomem *base = dev->base; 1115 1116 /* the DP83820 will freeze counters, so we need to read all of them */ 1117 ndev->stats.rx_errors += readl(base + 0x60) & 0xffff; 1118 ndev->stats.rx_crc_errors += readl(base + 0x64) & 0xffff; 1119 ndev->stats.rx_missed_errors += readl(base + 0x68) & 0xffff; 1120 ndev->stats.rx_frame_errors += readl(base + 0x6c) & 0xffff; 1121 /*ndev->stats.rx_symbol_errors +=*/ readl(base + 0x70); 1122 ndev->stats.rx_length_errors += readl(base + 0x74) & 0xffff; 1123 ndev->stats.rx_length_errors += readl(base + 0x78) & 0xffff; 1124 /*ndev->stats.rx_badopcode_errors += */ readl(base + 0x7c); 1125 /*ndev->stats.rx_pause_count += */ readl(base + 0x80); 1126 /*ndev->stats.tx_pause_count += */ readl(base + 0x84); 1127 ndev->stats.tx_carrier_errors += readl(base + 0x88) & 0xff; 1128} 1129 1130static struct net_device_stats *ns83820_get_stats(struct net_device *ndev) 1131{ 1132 struct ns83820 *dev = PRIV(ndev); 1133 1134 /* somewhat overkill */ 1135 spin_lock_irq(&dev->misc_lock); 1136 ns83820_update_stats(dev); 1137 spin_unlock_irq(&dev->misc_lock); 1138 1139 return &ndev->stats; 1140} 1141 1142/* Let ethtool retrieve info */ 1143static int ns83820_get_settings(struct net_device *ndev, 1144 struct ethtool_cmd *cmd) 1145{ 1146 struct ns83820 *dev = PRIV(ndev); 1147 u32 cfg, tanar, tbicr; 1148 int have_optical = 0; 1149 int fullduplex = 0; 1150 1151 /* 1152 * Here's the list of available ethtool commands from other drivers: 1153 * cmd->advertising = 1154 * cmd->speed = 1155 * cmd->duplex = 1156 * cmd->port = 0; 1157 * cmd->phy_address = 1158 * cmd->transceiver = 0; 1159 * cmd->autoneg = 1160 * cmd->maxtxpkt = 0; 1161 * cmd->maxrxpkt = 0; 1162 */ 1163 1164 /* read current configuration */ 1165 cfg = readl(dev->base + CFG) ^ SPDSTS_POLARITY; 1166 tanar = readl(dev->base + TANAR); 1167 tbicr = readl(dev->base + TBICR); 1168 1169 if (dev->CFG_cache & CFG_TBI_EN) { 1170 /* we have an optical interface */ 1171 have_optical = 1; 1172 fullduplex = (cfg & CFG_DUPSTS) ? 1 : 0; 1173 1174 } else { 1175 /* We have copper */ 1176 fullduplex = (cfg & CFG_DUPSTS) ? 1 : 0; 1177 } 1178 1179 cmd->supported = SUPPORTED_Autoneg; 1180 1181 /* we have optical interface */ 1182 if (dev->CFG_cache & CFG_TBI_EN) { 1183 cmd->supported |= SUPPORTED_1000baseT_Half | 1184 SUPPORTED_1000baseT_Full | 1185 SUPPORTED_FIBRE; 1186 cmd->port = PORT_FIBRE; 1187 } /* TODO: else copper related support */ 1188 1189 cmd->duplex = fullduplex ? DUPLEX_FULL : DUPLEX_HALF; 1190 switch (cfg / CFG_SPDSTS0 & 3) { 1191 case 2: 1192 cmd->speed = SPEED_1000; 1193 break; 1194 case 1: 1195 cmd->speed = SPEED_100; 1196 break; 1197 default: 1198 cmd->speed = SPEED_10; 1199 break; 1200 } 1201 cmd->autoneg = (tbicr & TBICR_MR_AN_ENABLE) ? 1: 0; 1202 return 0; 1203} 1204 1205/* Let ethool change settings*/ 1206static int ns83820_set_settings(struct net_device *ndev, 1207 struct ethtool_cmd *cmd) 1208{ 1209 struct ns83820 *dev = PRIV(ndev); 1210 u32 cfg, tanar; 1211 int have_optical = 0; 1212 int fullduplex = 0; 1213 1214 /* read current configuration */ 1215 cfg = readl(dev->base + CFG) ^ SPDSTS_POLARITY; 1216 tanar = readl(dev->base + TANAR); 1217 1218 if (dev->CFG_cache & CFG_TBI_EN) { 1219 /* we have optical */ 1220 have_optical = 1; 1221 fullduplex = (tanar & TANAR_FULL_DUP); 1222 1223 } else { 1224 /* we have copper */ 1225 fullduplex = cfg & CFG_DUPSTS; 1226 } 1227 1228 spin_lock_irq(&dev->misc_lock); 1229 spin_lock(&dev->tx_lock); 1230 1231 /* Set duplex */ 1232 if (cmd->duplex != fullduplex) { 1233 if (have_optical) { 1234 /*set full duplex*/ 1235 if (cmd->duplex == DUPLEX_FULL) { 1236 /* force full duplex */ 1237 writel(readl(dev->base + TXCFG) 1238 | TXCFG_CSI | TXCFG_HBI | TXCFG_ATP, 1239 dev->base + TXCFG); 1240 writel(readl(dev->base + RXCFG) | RXCFG_RX_FD, 1241 dev->base + RXCFG); 1242 /* Light up full duplex LED */ 1243 writel(readl(dev->base + GPIOR) | GPIOR_GP1_OUT, 1244 dev->base + GPIOR); 1245 } else { 1246 /*TODO: set half duplex */ 1247 } 1248 1249 } else { 1250 /*we have copper*/ 1251 /* TODO: Set duplex for copper cards */ 1252 } 1253 printk(KERN_INFO "%s: Duplex set via ethtool\n", 1254 ndev->name); 1255 } 1256 1257 /* Set autonegotiation */ 1258 if (1) { 1259 if (cmd->autoneg == AUTONEG_ENABLE) { 1260 /* restart auto negotiation */ 1261 writel(TBICR_MR_AN_ENABLE | TBICR_MR_RESTART_AN, 1262 dev->base + TBICR); 1263 writel(TBICR_MR_AN_ENABLE, dev->base + TBICR); 1264 dev->linkstate = LINK_AUTONEGOTIATE; 1265 1266 printk(KERN_INFO "%s: autoneg enabled via ethtool\n", 1267 ndev->name); 1268 } else { 1269 /* disable auto negotiation */ 1270 writel(0x00000000, dev->base + TBICR); 1271 } 1272 1273 printk(KERN_INFO "%s: autoneg %s via ethtool\n", ndev->name, 1274 cmd->autoneg ? "ENABLED" : "DISABLED"); 1275 } 1276 1277 phy_intr(ndev); 1278 spin_unlock(&dev->tx_lock); 1279 spin_unlock_irq(&dev->misc_lock); 1280 1281 return 0; 1282} 1283/* end ethtool get/set support -df */ 1284 1285static void ns83820_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo *info) 1286{ 1287 struct ns83820 *dev = PRIV(ndev); 1288 strcpy(info->driver, "ns83820"); 1289 strcpy(info->version, VERSION); 1290 strcpy(info->bus_info, pci_name(dev->pci_dev)); 1291} 1292 1293static u32 ns83820_get_link(struct net_device *ndev) 1294{ 1295 struct ns83820 *dev = PRIV(ndev); 1296 u32 cfg = readl(dev->base + CFG) ^ SPDSTS_POLARITY; 1297 return cfg & CFG_LNKSTS ? 1 : 0; 1298} 1299 1300static const struct ethtool_ops ops = { 1301 .get_settings = ns83820_get_settings, 1302 .set_settings = ns83820_set_settings, 1303 .get_drvinfo = ns83820_get_drvinfo, 1304 .get_link = ns83820_get_link 1305}; 1306 1307/* this function is called in irq context from the ISR */ 1308static void ns83820_mib_isr(struct ns83820 *dev) 1309{ 1310 unsigned long flags; 1311 spin_lock_irqsave(&dev->misc_lock, flags); 1312 ns83820_update_stats(dev); 1313 spin_unlock_irqrestore(&dev->misc_lock, flags); 1314} 1315 1316static void ns83820_do_isr(struct net_device *ndev, u32 isr); 1317static irqreturn_t ns83820_irq(int foo, void *data) 1318{ 1319 struct net_device *ndev = data; 1320 struct ns83820 *dev = PRIV(ndev); 1321 u32 isr; 1322 dprintk("ns83820_irq(%p)\n", ndev); 1323 1324 dev->ihr = 0; 1325 1326 isr = readl(dev->base + ISR); 1327 dprintk("irq: %08x\n", isr); 1328 ns83820_do_isr(ndev, isr); 1329 return IRQ_HANDLED; 1330} 1331 1332static void ns83820_do_isr(struct net_device *ndev, u32 isr) 1333{ 1334 struct ns83820 *dev = PRIV(ndev); 1335 unsigned long flags; 1336 1337#ifdef DEBUG 1338 if (isr & ~(ISR_PHY | ISR_RXDESC | ISR_RXEARLY | ISR_RXOK | ISR_RXERR | ISR_TXIDLE | ISR_TXOK | ISR_TXDESC)) 1339 Dprintk("odd isr? 0x%08x\n", isr); 1340#endif 1341 1342 if (ISR_RXIDLE & isr) { 1343 dev->rx_info.idle = 1; 1344 Dprintk("oh dear, we are idle\n"); 1345 ns83820_rx_kick(ndev); 1346 } 1347 1348 if ((ISR_RXDESC | ISR_RXOK) & isr) { 1349 prefetch(dev->rx_info.next_rx_desc); 1350 1351 spin_lock_irqsave(&dev->misc_lock, flags); 1352 dev->IMR_cache &= ~(ISR_RXDESC | ISR_RXOK); 1353 writel(dev->IMR_cache, dev->base + IMR); 1354 spin_unlock_irqrestore(&dev->misc_lock, flags); 1355 1356 tasklet_schedule(&dev->rx_tasklet); 1357 //rx_irq(ndev); 1358 //writel(4, dev->base + IHR); 1359 } 1360 1361 if ((ISR_RXIDLE | ISR_RXORN | ISR_RXDESC | ISR_RXOK | ISR_RXERR) & isr) 1362 ns83820_rx_kick(ndev); 1363 1364 if (unlikely(ISR_RXSOVR & isr)) { 1365 //printk("overrun: rxsovr\n"); 1366 ndev->stats.rx_fifo_errors++; 1367 } 1368 1369 if (unlikely(ISR_RXORN & isr)) { 1370 //printk("overrun: rxorn\n"); 1371 ndev->stats.rx_fifo_errors++; 1372 } 1373 1374 if ((ISR_RXRCMP & isr) && dev->rx_info.up) 1375 writel(CR_RXE, dev->base + CR); 1376 1377 if (ISR_TXIDLE & isr) { 1378 u32 txdp; 1379 txdp = readl(dev->base + TXDP); 1380 dprintk("txdp: %08x\n", txdp); 1381 txdp -= dev->tx_phy_descs; 1382 dev->tx_idx = txdp / (DESC_SIZE * 4); 1383 if (dev->tx_idx >= NR_TX_DESC) { 1384 printk(KERN_ALERT "%s: BUG -- txdp out of range\n", ndev->name); 1385 dev->tx_idx = 0; 1386 } 1387 /* The may have been a race between a pci originated read 1388 * and the descriptor update from the cpu. Just in case, 1389 * kick the transmitter if the hardware thinks it is on a 1390 * different descriptor than we are. 1391 */ 1392 if (dev->tx_idx != dev->tx_free_idx) 1393 kick_tx(dev); 1394 } 1395 1396 /* Defer tx ring processing until more than a minimum amount of 1397 * work has accumulated 1398 */ 1399 if ((ISR_TXDESC | ISR_TXIDLE | ISR_TXOK | ISR_TXERR) & isr) { 1400 spin_lock_irqsave(&dev->tx_lock, flags); 1401 do_tx_done(ndev); 1402 spin_unlock_irqrestore(&dev->tx_lock, flags); 1403 1404 /* Disable TxOk if there are no outstanding tx packets. 1405 */ 1406 if ((dev->tx_done_idx == dev->tx_free_idx) && 1407 (dev->IMR_cache & ISR_TXOK)) { 1408 spin_lock_irqsave(&dev->misc_lock, flags); 1409 dev->IMR_cache &= ~ISR_TXOK; 1410 writel(dev->IMR_cache, dev->base + IMR); 1411 spin_unlock_irqrestore(&dev->misc_lock, flags); 1412 } 1413 } 1414 1415 /* The TxIdle interrupt can come in before the transmit has 1416 * completed. Normally we reap packets off of the combination 1417 * of TxDesc and TxIdle and leave TxOk disabled (since it 1418 * occurs on every packet), but when no further irqs of this 1419 * nature are expected, we must enable TxOk. 1420 */ 1421 if ((ISR_TXIDLE & isr) && (dev->tx_done_idx != dev->tx_free_idx)) { 1422 spin_lock_irqsave(&dev->misc_lock, flags); 1423 dev->IMR_cache |= ISR_TXOK; 1424 writel(dev->IMR_cache, dev->base + IMR); 1425 spin_unlock_irqrestore(&dev->misc_lock, flags); 1426 } 1427 1428 /* MIB interrupt: one of the statistics counters is about to overflow */ 1429 if (unlikely(ISR_MIB & isr)) 1430 ns83820_mib_isr(dev); 1431 1432 /* PHY: Link up/down/negotiation state change */ 1433 if (unlikely(ISR_PHY & isr)) 1434 phy_intr(ndev); 1435 1436} 1437 1438static void ns83820_do_reset(struct ns83820 *dev, u32 which) 1439{ 1440 Dprintk("resetting chip...\n"); 1441 writel(which, dev->base + CR); 1442 do { 1443 schedule(); 1444 } while (readl(dev->base + CR) & which); 1445 Dprintk("okay!\n"); 1446} 1447 1448static int ns83820_stop(struct net_device *ndev) 1449{ 1450 struct ns83820 *dev = PRIV(ndev); 1451 1452 del_timer_sync(&dev->tx_watchdog); 1453 1454 /* disable interrupts */ 1455 writel(0, dev->base + IMR); 1456 writel(0, dev->base + IER); 1457 readl(dev->base + IER); 1458 1459 dev->rx_info.up = 0; 1460 synchronize_irq(dev->pci_dev->irq); 1461 1462 ns83820_do_reset(dev, CR_RST); 1463 1464 synchronize_irq(dev->pci_dev->irq); 1465 1466 spin_lock_irq(&dev->misc_lock); 1467 dev->IMR_cache &= ~(ISR_TXURN | ISR_TXIDLE | ISR_TXERR | ISR_TXDESC | ISR_TXOK); 1468 spin_unlock_irq(&dev->misc_lock); 1469 1470 ns83820_cleanup_rx(dev); 1471 ns83820_cleanup_tx(dev); 1472 1473 return 0; 1474} 1475 1476static void ns83820_tx_timeout(struct net_device *ndev) 1477{ 1478 struct ns83820 *dev = PRIV(ndev); 1479 u32 tx_done_idx; 1480 __le32 *desc; 1481 unsigned long flags; 1482 1483 spin_lock_irqsave(&dev->tx_lock, flags); 1484 1485 tx_done_idx = dev->tx_done_idx; 1486 desc = dev->tx_descs + (tx_done_idx * DESC_SIZE); 1487 1488 printk(KERN_INFO "%s: tx_timeout: tx_done_idx=%d free_idx=%d cmdsts=%08x\n", 1489 ndev->name, 1490 tx_done_idx, dev->tx_free_idx, le32_to_cpu(desc[DESC_CMDSTS])); 1491 1492#if defined(DEBUG) 1493 { 1494 u32 isr; 1495 isr = readl(dev->base + ISR); 1496 printk("irq: %08x imr: %08x\n", isr, dev->IMR_cache); 1497 ns83820_do_isr(ndev, isr); 1498 } 1499#endif 1500 1501 do_tx_done(ndev); 1502 1503 tx_done_idx = dev->tx_done_idx; 1504 desc = dev->tx_descs + (tx_done_idx * DESC_SIZE); 1505 1506 printk(KERN_INFO "%s: after: tx_done_idx=%d free_idx=%d cmdsts=%08x\n", 1507 ndev->name, 1508 tx_done_idx, dev->tx_free_idx, le32_to_cpu(desc[DESC_CMDSTS])); 1509 1510 spin_unlock_irqrestore(&dev->tx_lock, flags); 1511} 1512 1513static void ns83820_tx_watch(unsigned long data) 1514{ 1515 struct net_device *ndev = (void *)data; 1516 struct ns83820 *dev = PRIV(ndev); 1517 1518#if defined(DEBUG) 1519 printk("ns83820_tx_watch: %u %u %d\n", 1520 dev->tx_done_idx, dev->tx_free_idx, atomic_read(&dev->nr_tx_skbs) 1521 ); 1522#endif 1523 1524 if (time_after(jiffies, dev_trans_start(ndev) + 1*HZ) && 1525 dev->tx_done_idx != dev->tx_free_idx) { 1526 printk(KERN_DEBUG "%s: ns83820_tx_watch: %u %u %d\n", 1527 ndev->name, 1528 dev->tx_done_idx, dev->tx_free_idx, 1529 atomic_read(&dev->nr_tx_skbs)); 1530 ns83820_tx_timeout(ndev); 1531 } 1532 1533 mod_timer(&dev->tx_watchdog, jiffies + 2*HZ); 1534} 1535 1536static int ns83820_open(struct net_device *ndev) 1537{ 1538 struct ns83820 *dev = PRIV(ndev); 1539 unsigned i; 1540 u32 desc; 1541 int ret; 1542 1543 dprintk("ns83820_open\n"); 1544 1545 writel(0, dev->base + PQCR); 1546 1547 ret = ns83820_setup_rx(ndev); 1548 if (ret) 1549 goto failed; 1550 1551 memset(dev->tx_descs, 0, 4 * NR_TX_DESC * DESC_SIZE); 1552 for (i=0; i<NR_TX_DESC; i++) { 1553 dev->tx_descs[(i * DESC_SIZE) + DESC_LINK] 1554 = cpu_to_le32( 1555 dev->tx_phy_descs 1556 + ((i+1) % NR_TX_DESC) * DESC_SIZE * 4); 1557 } 1558 1559 dev->tx_idx = 0; 1560 dev->tx_done_idx = 0; 1561 desc = dev->tx_phy_descs; 1562 writel(0, dev->base + TXDP_HI); 1563 writel(desc, dev->base + TXDP); 1564 1565 init_timer(&dev->tx_watchdog); 1566 dev->tx_watchdog.data = (unsigned long)ndev; 1567 dev->tx_watchdog.function = ns83820_tx_watch; 1568 mod_timer(&dev->tx_watchdog, jiffies + 2*HZ); 1569 1570 netif_start_queue(ndev); 1571 1572 return 0; 1573 1574failed: 1575 ns83820_stop(ndev); 1576 return ret; 1577} 1578 1579static void ns83820_getmac(struct ns83820 *dev, u8 *mac) 1580{ 1581 unsigned i; 1582 for (i=0; i<3; i++) { 1583 u32 data; 1584 1585 /* Read from the perfect match memory: this is loaded by 1586 * the chip from the EEPROM via the EELOAD self test. 1587 */ 1588 writel(i*2, dev->base + RFCR); 1589 data = readl(dev->base + RFDR); 1590 1591 *mac++ = data; 1592 *mac++ = data >> 8; 1593 } 1594} 1595 1596static int ns83820_change_mtu(struct net_device *ndev, int new_mtu) 1597{ 1598 if (new_mtu > RX_BUF_SIZE) 1599 return -EINVAL; 1600 ndev->mtu = new_mtu; 1601 return 0; 1602} 1603 1604static void ns83820_set_multicast(struct net_device *ndev) 1605{ 1606 struct ns83820 *dev = PRIV(ndev); 1607 u8 __iomem *rfcr = dev->base + RFCR; 1608 u32 and_mask = 0xffffffff; 1609 u32 or_mask = 0; 1610 u32 val; 1611 1612 if (ndev->flags & IFF_PROMISC) 1613 or_mask |= RFCR_AAU | RFCR_AAM; 1614 else 1615 and_mask &= ~(RFCR_AAU | RFCR_AAM); 1616 1617 if (ndev->flags & IFF_ALLMULTI || netdev_mc_count(ndev)) 1618 or_mask |= RFCR_AAM; 1619 else 1620 and_mask &= ~RFCR_AAM; 1621 1622 spin_lock_irq(&dev->misc_lock); 1623 val = (readl(rfcr) & and_mask) | or_mask; 1624 /* Ramit : RFCR Write Fix doc says RFEN must be 0 modify other bits */ 1625 writel(val & ~RFCR_RFEN, rfcr); 1626 writel(val, rfcr); 1627 spin_unlock_irq(&dev->misc_lock); 1628} 1629 1630static void ns83820_run_bist(struct net_device *ndev, const char *name, u32 enable, u32 done, u32 fail) 1631{ 1632 struct ns83820 *dev = PRIV(ndev); 1633 int timed_out = 0; 1634 unsigned long start; 1635 u32 status; 1636 int loops = 0; 1637 1638 dprintk("%s: start %s\n", ndev->name, name); 1639 1640 start = jiffies; 1641 1642 writel(enable, dev->base + PTSCR); 1643 for (;;) { 1644 loops++; 1645 status = readl(dev->base + PTSCR); 1646 if (!(status & enable)) 1647 break; 1648 if (status & done) 1649 break; 1650 if (status & fail) 1651 break; 1652 if (time_after_eq(jiffies, start + HZ)) { 1653 timed_out = 1; 1654 break; 1655 } 1656 schedule_timeout_uninterruptible(1); 1657 } 1658 1659 if (status & fail) 1660 printk(KERN_INFO "%s: %s failed! (0x%08x & 0x%08x)\n", 1661 ndev->name, name, status, fail); 1662 else if (timed_out) 1663 printk(KERN_INFO "%s: run_bist %s timed out! (%08x)\n", 1664 ndev->name, name, status); 1665 1666 dprintk("%s: done %s in %d loops\n", ndev->name, name, loops); 1667} 1668 1669#ifdef PHY_CODE_IS_FINISHED 1670static void ns83820_mii_write_bit(struct ns83820 *dev, int bit) 1671{ 1672 /* drive MDC low */ 1673 dev->MEAR_cache &= ~MEAR_MDC; 1674 writel(dev->MEAR_cache, dev->base + MEAR); 1675 readl(dev->base + MEAR); 1676 1677 /* enable output, set bit */ 1678 dev->MEAR_cache |= MEAR_MDDIR; 1679 if (bit) 1680 dev->MEAR_cache |= MEAR_MDIO; 1681 else 1682 dev->MEAR_cache &= ~MEAR_MDIO; 1683 1684 /* set the output bit */ 1685 writel(dev->MEAR_cache, dev->base + MEAR); 1686 readl(dev->base + MEAR); 1687 1688 /* Wait. Max clock rate is 2.5MHz, this way we come in under 1MHz */ 1689 udelay(1); 1690 1691 /* drive MDC high causing the data bit to be latched */ 1692 dev->MEAR_cache |= MEAR_MDC; 1693 writel(dev->MEAR_cache, dev->base + MEAR); 1694 readl(dev->base + MEAR); 1695 1696 /* Wait again... */ 1697 udelay(1); 1698} 1699 1700static int ns83820_mii_read_bit(struct ns83820 *dev) 1701{ 1702 int bit; 1703 1704 /* drive MDC low, disable output */ 1705 dev->MEAR_cache &= ~MEAR_MDC; 1706 dev->MEAR_cache &= ~MEAR_MDDIR; 1707 writel(dev->MEAR_cache, dev->base + MEAR); 1708 readl(dev->base + MEAR); 1709 1710 /* Wait. Max clock rate is 2.5MHz, this way we come in under 1MHz */ 1711 udelay(1); 1712 1713 /* drive MDC high causing the data bit to be latched */ 1714 bit = (readl(dev->base + MEAR) & MEAR_MDIO) ? 1 : 0; 1715 dev->MEAR_cache |= MEAR_MDC; 1716 writel(dev->MEAR_cache, dev->base + MEAR); 1717 1718 /* Wait again... */ 1719 udelay(1); 1720 1721 return bit; 1722} 1723 1724static unsigned ns83820_mii_read_reg(struct ns83820 *dev, unsigned phy, unsigned reg) 1725{ 1726 unsigned data = 0; 1727 int i; 1728 1729 /* read some garbage so that we eventually sync up */ 1730 for (i=0; i<64; i++) 1731 ns83820_mii_read_bit(dev); 1732 1733 ns83820_mii_write_bit(dev, 0); /* start */ 1734 ns83820_mii_write_bit(dev, 1); 1735 ns83820_mii_write_bit(dev, 1); /* opcode read */ 1736 ns83820_mii_write_bit(dev, 0); 1737 1738 /* write out the phy address: 5 bits, msb first */ 1739 for (i=0; i<5; i++) 1740 ns83820_mii_write_bit(dev, phy & (0x10 >> i)); 1741 1742 /* write out the register address, 5 bits, msb first */ 1743 for (i=0; i<5; i++) 1744 ns83820_mii_write_bit(dev, reg & (0x10 >> i)); 1745 1746 ns83820_mii_read_bit(dev); /* turn around cycles */ 1747 ns83820_mii_read_bit(dev); 1748 1749 /* read in the register data, 16 bits msb first */ 1750 for (i=0; i<16; i++) { 1751 data <<= 1; 1752 data |= ns83820_mii_read_bit(dev); 1753 } 1754 1755 return data; 1756} 1757 1758static unsigned ns83820_mii_write_reg(struct ns83820 *dev, unsigned phy, unsigned reg, unsigned data) 1759{ 1760 int i; 1761 1762 /* read some garbage so that we eventually sync up */ 1763 for (i=0; i<64; i++) 1764 ns83820_mii_read_bit(dev); 1765 1766 ns83820_mii_write_bit(dev, 0); /* start */ 1767 ns83820_mii_write_bit(dev, 1); 1768 ns83820_mii_write_bit(dev, 0); /* opcode read */ 1769 ns83820_mii_write_bit(dev, 1); 1770 1771 /* write out the phy address: 5 bits, msb first */ 1772 for (i=0; i<5; i++) 1773 ns83820_mii_write_bit(dev, phy & (0x10 >> i)); 1774 1775 /* write out the register address, 5 bits, msb first */ 1776 for (i=0; i<5; i++) 1777 ns83820_mii_write_bit(dev, reg & (0x10 >> i)); 1778 1779 ns83820_mii_read_bit(dev); /* turn around cycles */ 1780 ns83820_mii_read_bit(dev); 1781 1782 /* read in the register data, 16 bits msb first */ 1783 for (i=0; i<16; i++) 1784 ns83820_mii_write_bit(dev, (data >> (15 - i)) & 1); 1785 1786 return data; 1787} 1788 1789static void ns83820_probe_phy(struct net_device *ndev) 1790{ 1791 struct ns83820 *dev = PRIV(ndev); 1792 static int first; 1793 int i; 1794#define MII_PHYIDR1 0x02 1795#define MII_PHYIDR2 0x03 1796 1797 first = 1; 1798 1799 for (i=1; i<2; i++) { 1800 int j; 1801 unsigned a, b; 1802 a = ns83820_mii_read_reg(dev, i, MII_PHYIDR1); 1803 b = ns83820_mii_read_reg(dev, i, MII_PHYIDR2); 1804 1805 //printk("%s: phy %d: 0x%04x 0x%04x\n", 1806 // ndev->name, i, a, b); 1807 1808 for (j=0; j<0x16; j+=4) { 1809 dprintk("%s: [0x%02x] %04x %04x %04x %04x\n", 1810 ndev->name, j, 1811 ns83820_mii_read_reg(dev, i, 0 + j), 1812 ns83820_mii_read_reg(dev, i, 1 + j), 1813 ns83820_mii_read_reg(dev, i, 2 + j), 1814 ns83820_mii_read_reg(dev, i, 3 + j) 1815 ); 1816 } 1817 } 1818 { 1819 unsigned a, b; 1820 /* read firmware version: memory addr is 0x8402 and 0x8403 */ 1821 ns83820_mii_write_reg(dev, 1, 0x16, 0x000d); 1822 ns83820_mii_write_reg(dev, 1, 0x1e, 0x810e); 1823 a = ns83820_mii_read_reg(dev, 1, 0x1d); 1824 1825 ns83820_mii_write_reg(dev, 1, 0x16, 0x000d); 1826 ns83820_mii_write_reg(dev, 1, 0x1e, 0x810e); 1827 b = ns83820_mii_read_reg(dev, 1, 0x1d); 1828 dprintk("version: 0x%04x 0x%04x\n", a, b); 1829 } 1830} 1831#endif 1832 1833static const struct net_device_ops netdev_ops = { 1834 .ndo_open = ns83820_open, 1835 .ndo_stop = ns83820_stop, 1836 .ndo_start_xmit = ns83820_hard_start_xmit, 1837 .ndo_get_stats = ns83820_get_stats, 1838 .ndo_change_mtu = ns83820_change_mtu, 1839 .ndo_set_multicast_list = ns83820_set_multicast, 1840 .ndo_validate_addr = eth_validate_addr, 1841 .ndo_set_mac_address = eth_mac_addr, 1842 .ndo_tx_timeout = ns83820_tx_timeout, 1843#ifdef NS83820_VLAN_ACCEL_SUPPORT 1844 .ndo_vlan_rx_register = ns83820_vlan_rx_register, 1845#endif 1846}; 1847 1848static int __devinit ns83820_init_one(struct pci_dev *pci_dev, 1849 const struct pci_device_id *id) 1850{ 1851 struct net_device *ndev; 1852 struct ns83820 *dev; 1853 long addr; 1854 int err; 1855 int using_dac = 0; 1856 1857 /* See if we can set the dma mask early on; failure is fatal. */ 1858 if (sizeof(dma_addr_t) == 8 && 1859 !pci_set_dma_mask(pci_dev, DMA_BIT_MASK(64))) { 1860 using_dac = 1; 1861 } else if (!pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32))) { 1862 using_dac = 0; 1863 } else { 1864 dev_warn(&pci_dev->dev, "pci_set_dma_mask failed!\n"); 1865 return -ENODEV; 1866 } 1867 1868 ndev = alloc_etherdev(sizeof(struct ns83820)); 1869 dev = PRIV(ndev); 1870 1871 err = -ENOMEM; 1872 if (!dev) 1873 goto out; 1874 1875 dev->ndev = ndev; 1876 1877 spin_lock_init(&dev->rx_info.lock); 1878 spin_lock_init(&dev->tx_lock); 1879 spin_lock_init(&dev->misc_lock); 1880 dev->pci_dev = pci_dev; 1881 1882 SET_NETDEV_DEV(ndev, &pci_dev->dev); 1883 1884 INIT_WORK(&dev->tq_refill, queue_refill); 1885 tasklet_init(&dev->rx_tasklet, rx_action, (unsigned long)ndev); 1886 1887 err = pci_enable_device(pci_dev); 1888 if (err) { 1889 dev_info(&pci_dev->dev, "pci_enable_dev failed: %d\n", err); 1890 goto out_free; 1891 } 1892 1893 pci_set_master(pci_dev); 1894 addr = pci_resource_start(pci_dev, 1); 1895 dev->base = ioremap_nocache(addr, PAGE_SIZE); 1896 dev->tx_descs = pci_alloc_consistent(pci_dev, 1897 4 * DESC_SIZE * NR_TX_DESC, &dev->tx_phy_descs); 1898 dev->rx_info.descs = pci_alloc_consistent(pci_dev, 1899 4 * DESC_SIZE * NR_RX_DESC, &dev->rx_info.phy_descs); 1900 err = -ENOMEM; 1901 if (!dev->base || !dev->tx_descs || !dev->rx_info.descs) 1902 goto out_disable; 1903 1904 dprintk("%p: %08lx %p: %08lx\n", 1905 dev->tx_descs, (long)dev->tx_phy_descs, 1906 dev->rx_info.descs, (long)dev->rx_info.phy_descs); 1907 1908 /* disable interrupts */ 1909 writel(0, dev->base + IMR); 1910 writel(0, dev->base + IER); 1911 readl(dev->base + IER); 1912 1913 dev->IMR_cache = 0; 1914 1915 err = request_irq(pci_dev->irq, ns83820_irq, IRQF_SHARED, 1916 DRV_NAME, ndev); 1917 if (err) { 1918 dev_info(&pci_dev->dev, "unable to register irq %d, err %d\n", 1919 pci_dev->irq, err); 1920 goto out_disable; 1921 } 1922 1923 rtnl_lock(); 1924 err = dev_alloc_name(ndev, ndev->name); 1925 if (err < 0) { 1926 dev_info(&pci_dev->dev, "unable to get netdev name: %d\n", err); 1927 goto out_free_irq; 1928 } 1929 1930 printk("%s: ns83820.c: 0x22c: %08x, subsystem: %04x:%04x\n", 1931 ndev->name, le32_to_cpu(readl(dev->base + 0x22c)), 1932 pci_dev->subsystem_vendor, pci_dev->subsystem_device); 1933 1934 ndev->netdev_ops = &netdev_ops; 1935 SET_ETHTOOL_OPS(ndev, &ops); 1936 ndev->watchdog_timeo = 5 * HZ; 1937 pci_set_drvdata(pci_dev, ndev); 1938 1939 ns83820_do_reset(dev, CR_RST); 1940 1941 /* Must reset the ram bist before running it */ 1942 writel(PTSCR_RBIST_RST, dev->base + PTSCR); 1943 ns83820_run_bist(ndev, "sram bist", PTSCR_RBIST_EN, 1944 PTSCR_RBIST_DONE, PTSCR_RBIST_FAIL); 1945 ns83820_run_bist(ndev, "eeprom bist", PTSCR_EEBIST_EN, 0, 1946 PTSCR_EEBIST_FAIL); 1947 ns83820_run_bist(ndev, "eeprom load", PTSCR_EELOAD_EN, 0, 0); 1948 1949 /* I love config registers */ 1950 dev->CFG_cache = readl(dev->base + CFG); 1951 1952 if ((dev->CFG_cache & CFG_PCI64_DET)) { 1953 printk(KERN_INFO "%s: detected 64 bit PCI data bus.\n", 1954 ndev->name); 1955 /*dev->CFG_cache |= CFG_DATA64_EN;*/ 1956 if (!(dev->CFG_cache & CFG_DATA64_EN)) 1957 printk(KERN_INFO "%s: EEPROM did not enable 64 bit bus. Disabled.\n", 1958 ndev->name); 1959 } else 1960 dev->CFG_cache &= ~(CFG_DATA64_EN); 1961 1962 dev->CFG_cache &= (CFG_TBI_EN | CFG_MRM_DIS | CFG_MWI_DIS | 1963 CFG_T64ADDR | CFG_DATA64_EN | CFG_EXT_125 | 1964 CFG_M64ADDR); 1965 dev->CFG_cache |= CFG_PINT_DUPSTS | CFG_PINT_LNKSTS | CFG_PINT_SPDSTS | 1966 CFG_EXTSTS_EN | CFG_EXD | CFG_PESEL; 1967 dev->CFG_cache |= CFG_REQALG; 1968 dev->CFG_cache |= CFG_POW; 1969 dev->CFG_cache |= CFG_TMRTEST; 1970 1971 /* When compiled with 64 bit addressing, we must always enable 1972 * the 64 bit descriptor format. 1973 */ 1974 if (sizeof(dma_addr_t) == 8) 1975 dev->CFG_cache |= CFG_M64ADDR; 1976 if (using_dac) 1977 dev->CFG_cache |= CFG_T64ADDR; 1978 1979 /* Big endian mode does not seem to do what the docs suggest */ 1980 dev->CFG_cache &= ~CFG_BEM; 1981 1982 /* setup optical transceiver if we have one */ 1983 if (dev->CFG_cache & CFG_TBI_EN) { 1984 printk(KERN_INFO "%s: enabling optical transceiver\n", 1985 ndev->name); 1986 writel(readl(dev->base + GPIOR) | 0x3e8, dev->base + GPIOR); 1987 1988 /* setup auto negotiation feature advertisement */ 1989 writel(readl(dev->base + TANAR) 1990 | TANAR_HALF_DUP | TANAR_FULL_DUP, 1991 dev->base + TANAR); 1992 1993 /* start auto negotiation */ 1994 writel(TBICR_MR_AN_ENABLE | TBICR_MR_RESTART_AN, 1995 dev->base + TBICR); 1996 writel(TBICR_MR_AN_ENABLE, dev->base + TBICR); 1997 dev->linkstate = LINK_AUTONEGOTIATE; 1998 1999 dev->CFG_cache |= CFG_MODE_1000; 2000 } 2001 2002 writel(dev->CFG_cache, dev->base + CFG); 2003 dprintk("CFG: %08x\n", dev->CFG_cache); 2004 2005 if (reset_phy) { 2006 printk(KERN_INFO "%s: resetting phy\n", ndev->name); 2007 writel(dev->CFG_cache | CFG_PHY_RST, dev->base + CFG); 2008 msleep(10); 2009 writel(dev->CFG_cache, dev->base + CFG); 2010 } 2011 2012 2013 /* Note! The DMA burst size interacts with packet 2014 * transmission, such that the largest packet that 2015 * can be transmitted is 8192 - FLTH - burst size. 2016 * If only the transmit fifo was larger... 2017 */ 2018 /* Ramit : 1024 DMA is not a good idea, it ends up banging 2019 * some DELL and COMPAQ SMP systems */ 2020 writel(TXCFG_CSI | TXCFG_HBI | TXCFG_ATP | TXCFG_MXDMA512 2021 | ((1600 / 32) * 0x100), 2022 dev->base + TXCFG); 2023 2024 /* Flush the interrupt holdoff timer */ 2025 writel(0x000, dev->base + IHR); 2026 writel(0x100, dev->base + IHR); 2027 writel(0x000, dev->base + IHR); 2028 2029 /* Set Rx to full duplex, don't accept runt, errored, long or length 2030 * range errored packets. Use 512 byte DMA. 2031 */ 2032 /* Ramit : 1024 DMA is not a good idea, it ends up banging 2033 * some DELL and COMPAQ SMP systems 2034 * Turn on ALP, only we are accpeting Jumbo Packets */ 2035 writel(RXCFG_AEP | RXCFG_ARP | RXCFG_AIRL | RXCFG_RX_FD 2036 | RXCFG_STRIPCRC 2037 //| RXCFG_ALP 2038 | (RXCFG_MXDMA512) | 0, dev->base + RXCFG); 2039 2040 /* Disable priority queueing */ 2041 writel(0, dev->base + PQCR); 2042 2043 /* Enable IP checksum validation and detetion of VLAN headers. 2044 * Note: do not set the reject options as at least the 0x102 2045 * revision of the chip does not properly accept IP fragments 2046 * at least for UDP. 2047 */ 2048 /* Ramit : Be sure to turn on RXCFG_ARP if VLAN's are enabled, since 2049 * the MAC it calculates the packetsize AFTER stripping the VLAN 2050 * header, and if a VLAN Tagged packet of 64 bytes is received (like 2051 * a ping with a VLAN header) then the card, strips the 4 byte VLAN 2052 * tag and then checks the packet size, so if RXCFG_ARP is not enabled, 2053 * it discrards it!. These guys...... 2054 * also turn on tag stripping if hardware acceleration is enabled 2055 */ 2056#ifdef NS83820_VLAN_ACCEL_SUPPORT 2057#define VRCR_INIT_VALUE (VRCR_IPEN|VRCR_VTDEN|VRCR_VTREN) 2058#else 2059#define VRCR_INIT_VALUE (VRCR_IPEN|VRCR_VTDEN) 2060#endif 2061 writel(VRCR_INIT_VALUE, dev->base + VRCR); 2062 2063 /* Enable per-packet TCP/UDP/IP checksumming 2064 * and per packet vlan tag insertion if 2065 * vlan hardware acceleration is enabled 2066 */ 2067#ifdef NS83820_VLAN_ACCEL_SUPPORT 2068#define VTCR_INIT_VALUE (VTCR_PPCHK|VTCR_VPPTI) 2069#else 2070#define VTCR_INIT_VALUE VTCR_PPCHK 2071#endif 2072 writel(VTCR_INIT_VALUE, dev->base + VTCR); 2073 2074 /* Ramit : Enable async and sync pause frames */ 2075 /* writel(0, dev->base + PCR); */ 2076 writel((PCR_PS_MCAST | PCR_PS_DA | PCR_PSEN | PCR_FFLO_4K | 2077 PCR_FFHI_8K | PCR_STLO_4 | PCR_STHI_8 | PCR_PAUSE_CNT), 2078 dev->base + PCR); 2079 2080 /* Disable Wake On Lan */ 2081 writel(0, dev->base + WCSR); 2082 2083 ns83820_getmac(dev, ndev->dev_addr); 2084 2085 /* Yes, we support dumb IP checksum on transmit */ 2086 ndev->features |= NETIF_F_SG; 2087 ndev->features |= NETIF_F_IP_CSUM; 2088 2089#ifdef NS83820_VLAN_ACCEL_SUPPORT 2090 /* We also support hardware vlan acceleration */ 2091 ndev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; 2092#endif 2093 2094 if (using_dac) { 2095 printk(KERN_INFO "%s: using 64 bit addressing.\n", 2096 ndev->name); 2097 ndev->features |= NETIF_F_HIGHDMA; 2098 } 2099 2100 printk(KERN_INFO "%s: ns83820 v" VERSION ": DP83820 v%u.%u: %pM io=0x%08lx irq=%d f=%s\n", 2101 ndev->name, 2102 (unsigned)readl(dev->base + SRR) >> 8, 2103 (unsigned)readl(dev->base + SRR) & 0xff, 2104 ndev->dev_addr, addr, pci_dev->irq, 2105 (ndev->features & NETIF_F_HIGHDMA) ? "h,sg" : "sg" 2106 ); 2107 2108#ifdef PHY_CODE_IS_FINISHED 2109 ns83820_probe_phy(ndev); 2110#endif 2111 2112 err = register_netdevice(ndev); 2113 if (err) { 2114 printk(KERN_INFO "ns83820: unable to register netdev: %d\n", err); 2115 goto out_cleanup; 2116 } 2117 rtnl_unlock(); 2118 2119 return 0; 2120 2121out_cleanup: 2122 writel(0, dev->base + IMR); /* paranoia */ 2123 writel(0, dev->base + IER); 2124 readl(dev->base + IER); 2125out_free_irq: 2126 rtnl_unlock(); 2127 free_irq(pci_dev->irq, ndev); 2128out_disable: 2129 if (dev->base) 2130 iounmap(dev->base); 2131 pci_free_consistent(pci_dev, 4 * DESC_SIZE * NR_TX_DESC, dev->tx_descs, dev->tx_phy_descs); 2132 pci_free_consistent(pci_dev, 4 * DESC_SIZE * NR_RX_DESC, dev->rx_info.descs, dev->rx_info.phy_descs); 2133 pci_disable_device(pci_dev); 2134out_free: 2135 free_netdev(ndev); 2136 pci_set_drvdata(pci_dev, NULL); 2137out: 2138 return err; 2139} 2140 2141static void __devexit ns83820_remove_one(struct pci_dev *pci_dev) 2142{ 2143 struct net_device *ndev = pci_get_drvdata(pci_dev); 2144 struct ns83820 *dev = PRIV(ndev); /* ok even if NULL */ 2145 2146 if (!ndev) /* paranoia */ 2147 return; 2148 2149 writel(0, dev->base + IMR); /* paranoia */ 2150 writel(0, dev->base + IER); 2151 readl(dev->base + IER); 2152 2153 unregister_netdev(ndev); 2154 free_irq(dev->pci_dev->irq, ndev); 2155 iounmap(dev->base); 2156 pci_free_consistent(dev->pci_dev, 4 * DESC_SIZE * NR_TX_DESC, 2157 dev->tx_descs, dev->tx_phy_descs); 2158 pci_free_consistent(dev->pci_dev, 4 * DESC_SIZE * NR_RX_DESC, 2159 dev->rx_info.descs, dev->rx_info.phy_descs); 2160 pci_disable_device(dev->pci_dev); 2161 free_netdev(ndev); 2162 pci_set_drvdata(pci_dev, NULL); 2163} 2164 2165static DEFINE_PCI_DEVICE_TABLE(ns83820_pci_tbl) = { 2166 { 0x100b, 0x0022, PCI_ANY_ID, PCI_ANY_ID, 0, .driver_data = 0, }, 2167 { 0, }, 2168}; 2169 2170static struct pci_driver driver = { 2171 .name = "ns83820", 2172 .id_table = ns83820_pci_tbl, 2173 .probe = ns83820_init_one, 2174 .remove = __devexit_p(ns83820_remove_one), 2175}; 2176 2177 2178static int __init ns83820_init(void) 2179{ 2180 printk(KERN_INFO "ns83820.c: National Semiconductor DP83820 10/100/1000 driver.\n"); 2181 return pci_register_driver(&driver); 2182} 2183 2184static void __exit ns83820_exit(void) 2185{ 2186 pci_unregister_driver(&driver); 2187} 2188 2189MODULE_AUTHOR("Benjamin LaHaise <bcrl@kvack.org>"); 2190MODULE_DESCRIPTION("National Semiconductor DP83820 10/100/1000 driver"); 2191MODULE_LICENSE("GPL"); 2192 2193MODULE_DEVICE_TABLE(pci, ns83820_pci_tbl); 2194 2195module_param(lnksts, int, 0); 2196MODULE_PARM_DESC(lnksts, "Polarity of LNKSTS bit"); 2197 2198module_param(ihr, int, 0); 2199MODULE_PARM_DESC(ihr, "Time in 100 us increments to delay interrupts (range 0-127)"); 2200 2201module_param(reset_phy, int, 0); 2202MODULE_PARM_DESC(reset_phy, "Set to 1 to reset the PHY on startup"); 2203 2204module_init(ns83820_init); 2205module_exit(ns83820_exit); 2206