1/* 2 sis190.c: Silicon Integrated Systems SiS190 ethernet driver 3 4 Copyright (c) 2003 K.M. Liu <kmliu@sis.com> 5 Copyright (c) 2003, 2004 Jeff Garzik <jgarzik@pobox.com> 6 Copyright (c) 2003, 2004, 2005 Francois Romieu <romieu@fr.zoreil.com> 7 8 Based on r8169.c, tg3.c, 8139cp.c, skge.c, epic100.c and SiS 190/191 9 genuine driver. 10 11 This software may be used and distributed according to the terms of 12 the GNU General Public License (GPL), incorporated herein by reference. 13 Drivers based on or derived from this code fall under the GPL and must 14 retain the authorship, copyright and license notice. This file is not 15 a complete program and may only be used when the entire operating 16 system is licensed under the GPL. 17 18 See the file COPYING in this distribution for more information. 19 20*/ 21 22#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 23 24#include <linux/module.h> 25#include <linux/moduleparam.h> 26#include <linux/netdevice.h> 27#include <linux/rtnetlink.h> 28#include <linux/etherdevice.h> 29#include <linux/ethtool.h> 30#include <linux/pci.h> 31#include <linux/mii.h> 32#include <linux/delay.h> 33#include <linux/crc32.h> 34#include <linux/dma-mapping.h> 35#include <linux/slab.h> 36#include <asm/irq.h> 37 38#define PHY_MAX_ADDR 32 39#define PHY_ID_ANY 0x1f 40#define MII_REG_ANY 0x1f 41 42#define DRV_VERSION "1.4" 43#define DRV_NAME "sis190" 44#define SIS190_DRIVER_NAME DRV_NAME " Gigabit Ethernet driver " DRV_VERSION 45 46#define sis190_rx_skb netif_rx 47#define sis190_rx_quota(count, quota) count 48 49#define MAC_ADDR_LEN 6 50 51#define NUM_TX_DESC 64 /* [8..1024] */ 52#define NUM_RX_DESC 64 /* [8..8192] */ 53#define TX_RING_BYTES (NUM_TX_DESC * sizeof(struct TxDesc)) 54#define RX_RING_BYTES (NUM_RX_DESC * sizeof(struct RxDesc)) 55#define RX_BUF_SIZE 1536 56#define RX_BUF_MASK 0xfff8 57 58#define SIS190_REGS_SIZE 0x80 59#define SIS190_TX_TIMEOUT (6*HZ) 60#define SIS190_PHY_TIMEOUT (10*HZ) 61#define SIS190_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | \ 62 NETIF_MSG_LINK | NETIF_MSG_IFUP | \ 63 NETIF_MSG_IFDOWN) 64 65/* Enhanced PHY access register bit definitions */ 66#define EhnMIIread 0x0000 67#define EhnMIIwrite 0x0020 68#define EhnMIIdataShift 16 69#define EhnMIIpmdShift 6 /* 7016 only */ 70#define EhnMIIregShift 11 71#define EhnMIIreq 0x0010 72#define EhnMIInotDone 0x0010 73 74/* Write/read MMIO register */ 75#define SIS_W8(reg, val) writeb ((val), ioaddr + (reg)) 76#define SIS_W16(reg, val) writew ((val), ioaddr + (reg)) 77#define SIS_W32(reg, val) writel ((val), ioaddr + (reg)) 78#define SIS_R8(reg) readb (ioaddr + (reg)) 79#define SIS_R16(reg) readw (ioaddr + (reg)) 80#define SIS_R32(reg) readl (ioaddr + (reg)) 81 82#define SIS_PCI_COMMIT() SIS_R32(IntrControl) 83 84enum sis190_registers { 85 TxControl = 0x00, 86 TxDescStartAddr = 0x04, 87 rsv0 = 0x08, // reserved 88 TxSts = 0x0c, // unused (Control/Status) 89 RxControl = 0x10, 90 RxDescStartAddr = 0x14, 91 rsv1 = 0x18, // reserved 92 RxSts = 0x1c, // unused 93 IntrStatus = 0x20, 94 IntrMask = 0x24, 95 IntrControl = 0x28, 96 IntrTimer = 0x2c, // unused (Interupt Timer) 97 PMControl = 0x30, // unused (Power Mgmt Control/Status) 98 rsv2 = 0x34, // reserved 99 ROMControl = 0x38, 100 ROMInterface = 0x3c, 101 StationControl = 0x40, 102 GMIIControl = 0x44, 103 GIoCR = 0x48, // unused (GMAC IO Compensation) 104 GIoCtrl = 0x4c, // unused (GMAC IO Control) 105 TxMacControl = 0x50, 106 TxLimit = 0x54, // unused (Tx MAC Timer/TryLimit) 107 RGDelay = 0x58, // unused (RGMII Tx Internal Delay) 108 rsv3 = 0x5c, // reserved 109 RxMacControl = 0x60, 110 RxMacAddr = 0x62, 111 RxHashTable = 0x68, 112 // Undocumented = 0x6c, 113 RxWolCtrl = 0x70, 114 RxWolData = 0x74, // unused (Rx WOL Data Access) 115 RxMPSControl = 0x78, // unused (Rx MPS Control) 116 rsv4 = 0x7c, // reserved 117}; 118 119enum sis190_register_content { 120 /* IntrStatus */ 121 SoftInt = 0x40000000, // unused 122 Timeup = 0x20000000, // unused 123 PauseFrame = 0x00080000, // unused 124 MagicPacket = 0x00040000, // unused 125 WakeupFrame = 0x00020000, // unused 126 LinkChange = 0x00010000, 127 RxQEmpty = 0x00000080, 128 RxQInt = 0x00000040, 129 TxQ1Empty = 0x00000020, // unused 130 TxQ1Int = 0x00000010, 131 TxQ0Empty = 0x00000008, // unused 132 TxQ0Int = 0x00000004, 133 RxHalt = 0x00000002, 134 TxHalt = 0x00000001, 135 136 /* {Rx/Tx}CmdBits */ 137 CmdReset = 0x10, 138 CmdRxEnb = 0x08, // unused 139 CmdTxEnb = 0x01, 140 RxBufEmpty = 0x01, // unused 141 142 /* Cfg9346Bits */ 143 Cfg9346_Lock = 0x00, // unused 144 Cfg9346_Unlock = 0xc0, // unused 145 146 /* RxMacControl */ 147 AcceptErr = 0x20, // unused 148 AcceptRunt = 0x10, // unused 149 AcceptBroadcast = 0x0800, 150 AcceptMulticast = 0x0400, 151 AcceptMyPhys = 0x0200, 152 AcceptAllPhys = 0x0100, 153 154 /* RxConfigBits */ 155 RxCfgFIFOShift = 13, 156 RxCfgDMAShift = 8, // 0x1a in RxControl ? 157 158 /* TxConfigBits */ 159 TxInterFrameGapShift = 24, 160 TxDMAShift = 8, /* DMA burst value (0-7) is shift this many bits */ 161 162 LinkStatus = 0x02, // unused 163 FullDup = 0x01, // unused 164 165 /* TBICSRBit */ 166 TBILinkOK = 0x02000000, // unused 167}; 168 169struct TxDesc { 170 __le32 PSize; 171 __le32 status; 172 __le32 addr; 173 __le32 size; 174}; 175 176struct RxDesc { 177 __le32 PSize; 178 __le32 status; 179 __le32 addr; 180 __le32 size; 181}; 182 183enum _DescStatusBit { 184 /* _Desc.status */ 185 OWNbit = 0x80000000, // RXOWN/TXOWN 186 INTbit = 0x40000000, // RXINT/TXINT 187 CRCbit = 0x00020000, // CRCOFF/CRCEN 188 PADbit = 0x00010000, // PREADD/PADEN 189 /* _Desc.size */ 190 RingEnd = 0x80000000, 191 /* TxDesc.status */ 192 LSEN = 0x08000000, // TSO ? -- FR 193 IPCS = 0x04000000, 194 TCPCS = 0x02000000, 195 UDPCS = 0x01000000, 196 BSTEN = 0x00800000, 197 EXTEN = 0x00400000, 198 DEFEN = 0x00200000, 199 BKFEN = 0x00100000, 200 CRSEN = 0x00080000, 201 COLEN = 0x00040000, 202 THOL3 = 0x30000000, 203 THOL2 = 0x20000000, 204 THOL1 = 0x10000000, 205 THOL0 = 0x00000000, 206 207 WND = 0x00080000, 208 TABRT = 0x00040000, 209 FIFO = 0x00020000, 210 LINK = 0x00010000, 211 ColCountMask = 0x0000ffff, 212 /* RxDesc.status */ 213 IPON = 0x20000000, 214 TCPON = 0x10000000, 215 UDPON = 0x08000000, 216 Wakup = 0x00400000, 217 Magic = 0x00200000, 218 Pause = 0x00100000, 219 DEFbit = 0x00200000, 220 BCAST = 0x000c0000, 221 MCAST = 0x00080000, 222 UCAST = 0x00040000, 223 /* RxDesc.PSize */ 224 TAGON = 0x80000000, 225 RxDescCountMask = 0x7f000000, // multi-desc pkt when > 1 ? -- FR 226 ABORT = 0x00800000, 227 SHORT = 0x00400000, 228 LIMIT = 0x00200000, 229 MIIER = 0x00100000, 230 OVRUN = 0x00080000, 231 NIBON = 0x00040000, 232 COLON = 0x00020000, 233 CRCOK = 0x00010000, 234 RxSizeMask = 0x0000ffff 235 /* 236 * The asic could apparently do vlan, TSO, jumbo (sis191 only) and 237 * provide two (unused with Linux) Tx queues. No publically 238 * available documentation alas. 239 */ 240}; 241 242enum sis190_eeprom_access_register_bits { 243 EECS = 0x00000001, // unused 244 EECLK = 0x00000002, // unused 245 EEDO = 0x00000008, // unused 246 EEDI = 0x00000004, // unused 247 EEREQ = 0x00000080, 248 EEROP = 0x00000200, 249 EEWOP = 0x00000100 // unused 250}; 251 252/* EEPROM Addresses */ 253enum sis190_eeprom_address { 254 EEPROMSignature = 0x00, 255 EEPROMCLK = 0x01, // unused 256 EEPROMInfo = 0x02, 257 EEPROMMACAddr = 0x03 258}; 259 260enum sis190_feature { 261 F_HAS_RGMII = 1, 262 F_PHY_88E1111 = 2, 263 F_PHY_BCM5461 = 4 264}; 265 266struct sis190_private { 267 void __iomem *mmio_addr; 268 struct pci_dev *pci_dev; 269 struct net_device *dev; 270 spinlock_t lock; 271 u32 rx_buf_sz; 272 u32 cur_rx; 273 u32 cur_tx; 274 u32 dirty_rx; 275 u32 dirty_tx; 276 dma_addr_t rx_dma; 277 dma_addr_t tx_dma; 278 struct RxDesc *RxDescRing; 279 struct TxDesc *TxDescRing; 280 struct sk_buff *Rx_skbuff[NUM_RX_DESC]; 281 struct sk_buff *Tx_skbuff[NUM_TX_DESC]; 282 struct work_struct phy_task; 283 struct timer_list timer; 284 u32 msg_enable; 285 struct mii_if_info mii_if; 286 struct list_head first_phy; 287 u32 features; 288 u32 negotiated_lpa; 289 enum { 290 LNK_OFF, 291 LNK_ON, 292 LNK_AUTONEG, 293 } link_status; 294}; 295 296struct sis190_phy { 297 struct list_head list; 298 int phy_id; 299 u16 id[2]; 300 u16 status; 301 u8 type; 302}; 303 304enum sis190_phy_type { 305 UNKNOWN = 0x00, 306 HOME = 0x01, 307 LAN = 0x02, 308 MIX = 0x03 309}; 310 311static struct mii_chip_info { 312 const char *name; 313 u16 id[2]; 314 unsigned int type; 315 u32 feature; 316} mii_chip_table[] = { 317 { "Atheros PHY", { 0x004d, 0xd010 }, LAN, 0 }, 318 { "Atheros PHY AR8012", { 0x004d, 0xd020 }, LAN, 0 }, 319 { "Broadcom PHY BCM5461", { 0x0020, 0x60c0 }, LAN, F_PHY_BCM5461 }, 320 { "Broadcom PHY AC131", { 0x0143, 0xbc70 }, LAN, 0 }, 321 { "Agere PHY ET1101B", { 0x0282, 0xf010 }, LAN, 0 }, 322 { "Marvell PHY 88E1111", { 0x0141, 0x0cc0 }, LAN, F_PHY_88E1111 }, 323 { "Realtek PHY RTL8201", { 0x0000, 0x8200 }, LAN, 0 }, 324 { NULL, } 325}; 326 327static const struct { 328 const char *name; 329} sis_chip_info[] = { 330 { "SiS 190 PCI Fast Ethernet adapter" }, 331 { "SiS 191 PCI Gigabit Ethernet adapter" }, 332}; 333 334static DEFINE_PCI_DEVICE_TABLE(sis190_pci_tbl) = { 335 { PCI_DEVICE(PCI_VENDOR_ID_SI, 0x0190), 0, 0, 0 }, 336 { PCI_DEVICE(PCI_VENDOR_ID_SI, 0x0191), 0, 0, 1 }, 337 { 0, }, 338}; 339 340MODULE_DEVICE_TABLE(pci, sis190_pci_tbl); 341 342static int rx_copybreak = 200; 343 344static struct { 345 u32 msg_enable; 346} debug = { -1 }; 347 348MODULE_DESCRIPTION("SiS sis190/191 Gigabit Ethernet driver"); 349module_param(rx_copybreak, int, 0); 350MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames"); 351module_param_named(debug, debug.msg_enable, int, 0); 352MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)"); 353MODULE_AUTHOR("K.M. Liu <kmliu@sis.com>, Ueimor <romieu@fr.zoreil.com>"); 354MODULE_VERSION(DRV_VERSION); 355MODULE_LICENSE("GPL"); 356 357static const u32 sis190_intr_mask = 358 RxQEmpty | RxQInt | TxQ1Int | TxQ0Int | RxHalt | TxHalt | LinkChange; 359 360/* 361 * Maximum number of multicast addresses to filter (vs. Rx-all-multicast). 362 * The chips use a 64 element hash table based on the Ethernet CRC. 363 */ 364static const int multicast_filter_limit = 32; 365 366static void __mdio_cmd(void __iomem *ioaddr, u32 ctl) 367{ 368 unsigned int i; 369 370 SIS_W32(GMIIControl, ctl); 371 372 msleep(1); 373 374 for (i = 0; i < 100; i++) { 375 if (!(SIS_R32(GMIIControl) & EhnMIInotDone)) 376 break; 377 msleep(1); 378 } 379 380 if (i > 99) 381 pr_err("PHY command failed !\n"); 382} 383 384static void mdio_write(void __iomem *ioaddr, int phy_id, int reg, int val) 385{ 386 __mdio_cmd(ioaddr, EhnMIIreq | EhnMIIwrite | 387 (((u32) reg) << EhnMIIregShift) | (phy_id << EhnMIIpmdShift) | 388 (((u32) val) << EhnMIIdataShift)); 389} 390 391static int mdio_read(void __iomem *ioaddr, int phy_id, int reg) 392{ 393 __mdio_cmd(ioaddr, EhnMIIreq | EhnMIIread | 394 (((u32) reg) << EhnMIIregShift) | (phy_id << EhnMIIpmdShift)); 395 396 return (u16) (SIS_R32(GMIIControl) >> EhnMIIdataShift); 397} 398 399static void __mdio_write(struct net_device *dev, int phy_id, int reg, int val) 400{ 401 struct sis190_private *tp = netdev_priv(dev); 402 403 mdio_write(tp->mmio_addr, phy_id, reg, val); 404} 405 406static int __mdio_read(struct net_device *dev, int phy_id, int reg) 407{ 408 struct sis190_private *tp = netdev_priv(dev); 409 410 return mdio_read(tp->mmio_addr, phy_id, reg); 411} 412 413static u16 mdio_read_latched(void __iomem *ioaddr, int phy_id, int reg) 414{ 415 mdio_read(ioaddr, phy_id, reg); 416 return mdio_read(ioaddr, phy_id, reg); 417} 418 419static u16 __devinit sis190_read_eeprom(void __iomem *ioaddr, u32 reg) 420{ 421 u16 data = 0xffff; 422 unsigned int i; 423 424 if (!(SIS_R32(ROMControl) & 0x0002)) 425 return 0; 426 427 SIS_W32(ROMInterface, EEREQ | EEROP | (reg << 10)); 428 429 for (i = 0; i < 200; i++) { 430 if (!(SIS_R32(ROMInterface) & EEREQ)) { 431 data = (SIS_R32(ROMInterface) & 0xffff0000) >> 16; 432 break; 433 } 434 msleep(1); 435 } 436 437 return data; 438} 439 440static void sis190_irq_mask_and_ack(void __iomem *ioaddr) 441{ 442 SIS_W32(IntrMask, 0x00); 443 SIS_W32(IntrStatus, 0xffffffff); 444 SIS_PCI_COMMIT(); 445} 446 447static void sis190_asic_down(void __iomem *ioaddr) 448{ 449 /* Stop the chip's Tx and Rx DMA processes. */ 450 451 SIS_W32(TxControl, 0x1a00); 452 SIS_W32(RxControl, 0x1a00); 453 454 sis190_irq_mask_and_ack(ioaddr); 455} 456 457static void sis190_mark_as_last_descriptor(struct RxDesc *desc) 458{ 459 desc->size |= cpu_to_le32(RingEnd); 460} 461 462static inline void sis190_give_to_asic(struct RxDesc *desc, u32 rx_buf_sz) 463{ 464 u32 eor = le32_to_cpu(desc->size) & RingEnd; 465 466 desc->PSize = 0x0; 467 desc->size = cpu_to_le32((rx_buf_sz & RX_BUF_MASK) | eor); 468 wmb(); 469 desc->status = cpu_to_le32(OWNbit | INTbit); 470} 471 472static inline void sis190_map_to_asic(struct RxDesc *desc, dma_addr_t mapping, 473 u32 rx_buf_sz) 474{ 475 desc->addr = cpu_to_le32(mapping); 476 sis190_give_to_asic(desc, rx_buf_sz); 477} 478 479static inline void sis190_make_unusable_by_asic(struct RxDesc *desc) 480{ 481 desc->PSize = 0x0; 482 desc->addr = cpu_to_le32(0xdeadbeef); 483 desc->size &= cpu_to_le32(RingEnd); 484 wmb(); 485 desc->status = 0x0; 486} 487 488static struct sk_buff *sis190_alloc_rx_skb(struct sis190_private *tp, 489 struct RxDesc *desc) 490{ 491 u32 rx_buf_sz = tp->rx_buf_sz; 492 struct sk_buff *skb; 493 dma_addr_t mapping; 494 495 skb = netdev_alloc_skb(tp->dev, rx_buf_sz); 496 if (unlikely(!skb)) 497 goto skb_alloc_failed; 498 mapping = pci_map_single(tp->pci_dev, skb->data, tp->rx_buf_sz, 499 PCI_DMA_FROMDEVICE); 500 if (pci_dma_mapping_error(tp->pci_dev, mapping)) 501 goto out; 502 sis190_map_to_asic(desc, mapping, rx_buf_sz); 503 504 return skb; 505 506out: 507 dev_kfree_skb_any(skb); 508skb_alloc_failed: 509 sis190_make_unusable_by_asic(desc); 510 return NULL; 511} 512 513static u32 sis190_rx_fill(struct sis190_private *tp, struct net_device *dev, 514 u32 start, u32 end) 515{ 516 u32 cur; 517 518 for (cur = start; cur < end; cur++) { 519 unsigned int i = cur % NUM_RX_DESC; 520 521 if (tp->Rx_skbuff[i]) 522 continue; 523 524 tp->Rx_skbuff[i] = sis190_alloc_rx_skb(tp, tp->RxDescRing + i); 525 526 if (!tp->Rx_skbuff[i]) 527 break; 528 } 529 return cur - start; 530} 531 532static bool sis190_try_rx_copy(struct sis190_private *tp, 533 struct sk_buff **sk_buff, int pkt_size, 534 dma_addr_t addr) 535{ 536 struct sk_buff *skb; 537 bool done = false; 538 539 if (pkt_size >= rx_copybreak) 540 goto out; 541 542 skb = netdev_alloc_skb_ip_align(tp->dev, pkt_size); 543 if (!skb) 544 goto out; 545 546 pci_dma_sync_single_for_cpu(tp->pci_dev, addr, tp->rx_buf_sz, 547 PCI_DMA_FROMDEVICE); 548 skb_copy_to_linear_data(skb, sk_buff[0]->data, pkt_size); 549 *sk_buff = skb; 550 done = true; 551out: 552 return done; 553} 554 555static inline int sis190_rx_pkt_err(u32 status, struct net_device_stats *stats) 556{ 557#define ErrMask (OVRUN | SHORT | LIMIT | MIIER | NIBON | COLON | ABORT) 558 559 if ((status & CRCOK) && !(status & ErrMask)) 560 return 0; 561 562 if (!(status & CRCOK)) 563 stats->rx_crc_errors++; 564 else if (status & OVRUN) 565 stats->rx_over_errors++; 566 else if (status & (SHORT | LIMIT)) 567 stats->rx_length_errors++; 568 else if (status & (MIIER | NIBON | COLON)) 569 stats->rx_frame_errors++; 570 571 stats->rx_errors++; 572 return -1; 573} 574 575static int sis190_rx_interrupt(struct net_device *dev, 576 struct sis190_private *tp, void __iomem *ioaddr) 577{ 578 struct net_device_stats *stats = &dev->stats; 579 u32 rx_left, cur_rx = tp->cur_rx; 580 u32 delta, count; 581 582 rx_left = NUM_RX_DESC + tp->dirty_rx - cur_rx; 583 rx_left = sis190_rx_quota(rx_left, (u32) dev->quota); 584 585 for (; rx_left > 0; rx_left--, cur_rx++) { 586 unsigned int entry = cur_rx % NUM_RX_DESC; 587 struct RxDesc *desc = tp->RxDescRing + entry; 588 u32 status; 589 590 if (le32_to_cpu(desc->status) & OWNbit) 591 break; 592 593 status = le32_to_cpu(desc->PSize); 594 595 //netif_info(tp, intr, dev, "Rx PSize = %08x\n", status); 596 597 if (sis190_rx_pkt_err(status, stats) < 0) 598 sis190_give_to_asic(desc, tp->rx_buf_sz); 599 else { 600 struct sk_buff *skb = tp->Rx_skbuff[entry]; 601 dma_addr_t addr = le32_to_cpu(desc->addr); 602 int pkt_size = (status & RxSizeMask) - 4; 603 struct pci_dev *pdev = tp->pci_dev; 604 605 if (unlikely(pkt_size > tp->rx_buf_sz)) { 606 netif_info(tp, intr, dev, 607 "(frag) status = %08x\n", status); 608 stats->rx_dropped++; 609 stats->rx_length_errors++; 610 sis190_give_to_asic(desc, tp->rx_buf_sz); 611 continue; 612 } 613 614 615 if (sis190_try_rx_copy(tp, &skb, pkt_size, addr)) { 616 pci_dma_sync_single_for_device(pdev, addr, 617 tp->rx_buf_sz, PCI_DMA_FROMDEVICE); 618 sis190_give_to_asic(desc, tp->rx_buf_sz); 619 } else { 620 pci_unmap_single(pdev, addr, tp->rx_buf_sz, 621 PCI_DMA_FROMDEVICE); 622 tp->Rx_skbuff[entry] = NULL; 623 sis190_make_unusable_by_asic(desc); 624 } 625 626 skb_put(skb, pkt_size); 627 skb->protocol = eth_type_trans(skb, dev); 628 629 sis190_rx_skb(skb); 630 631 stats->rx_packets++; 632 stats->rx_bytes += pkt_size; 633 if ((status & BCAST) == MCAST) 634 stats->multicast++; 635 } 636 } 637 count = cur_rx - tp->cur_rx; 638 tp->cur_rx = cur_rx; 639 640 delta = sis190_rx_fill(tp, dev, tp->dirty_rx, tp->cur_rx); 641 if (!delta && count) 642 netif_info(tp, intr, dev, "no Rx buffer allocated\n"); 643 tp->dirty_rx += delta; 644 645 if ((tp->dirty_rx + NUM_RX_DESC) == tp->cur_rx) 646 netif_emerg(tp, intr, dev, "Rx buffers exhausted\n"); 647 648 return count; 649} 650 651static void sis190_unmap_tx_skb(struct pci_dev *pdev, struct sk_buff *skb, 652 struct TxDesc *desc) 653{ 654 unsigned int len; 655 656 len = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len; 657 658 pci_unmap_single(pdev, le32_to_cpu(desc->addr), len, PCI_DMA_TODEVICE); 659 660 memset(desc, 0x00, sizeof(*desc)); 661} 662 663static inline int sis190_tx_pkt_err(u32 status, struct net_device_stats *stats) 664{ 665#define TxErrMask (WND | TABRT | FIFO | LINK) 666 667 if (!unlikely(status & TxErrMask)) 668 return 0; 669 670 if (status & WND) 671 stats->tx_window_errors++; 672 if (status & TABRT) 673 stats->tx_aborted_errors++; 674 if (status & FIFO) 675 stats->tx_fifo_errors++; 676 if (status & LINK) 677 stats->tx_carrier_errors++; 678 679 stats->tx_errors++; 680 681 return -1; 682} 683 684static void sis190_tx_interrupt(struct net_device *dev, 685 struct sis190_private *tp, void __iomem *ioaddr) 686{ 687 struct net_device_stats *stats = &dev->stats; 688 u32 pending, dirty_tx = tp->dirty_tx; 689 /* 690 * It would not be needed if queueing was allowed to be enabled 691 * again too early (hint: think preempt and unclocked smp systems). 692 */ 693 unsigned int queue_stopped; 694 695 smp_rmb(); 696 pending = tp->cur_tx - dirty_tx; 697 queue_stopped = (pending == NUM_TX_DESC); 698 699 for (; pending; pending--, dirty_tx++) { 700 unsigned int entry = dirty_tx % NUM_TX_DESC; 701 struct TxDesc *txd = tp->TxDescRing + entry; 702 u32 status = le32_to_cpu(txd->status); 703 struct sk_buff *skb; 704 705 if (status & OWNbit) 706 break; 707 708 skb = tp->Tx_skbuff[entry]; 709 710 if (likely(sis190_tx_pkt_err(status, stats) == 0)) { 711 stats->tx_packets++; 712 stats->tx_bytes += skb->len; 713 stats->collisions += ((status & ColCountMask) - 1); 714 } 715 716 sis190_unmap_tx_skb(tp->pci_dev, skb, txd); 717 tp->Tx_skbuff[entry] = NULL; 718 dev_kfree_skb_irq(skb); 719 } 720 721 if (tp->dirty_tx != dirty_tx) { 722 tp->dirty_tx = dirty_tx; 723 smp_wmb(); 724 if (queue_stopped) 725 netif_wake_queue(dev); 726 } 727} 728 729/* 730 * The interrupt handler does all of the Rx thread work and cleans up after 731 * the Tx thread. 732 */ 733static irqreturn_t sis190_interrupt(int irq, void *__dev) 734{ 735 struct net_device *dev = __dev; 736 struct sis190_private *tp = netdev_priv(dev); 737 void __iomem *ioaddr = tp->mmio_addr; 738 unsigned int handled = 0; 739 u32 status; 740 741 status = SIS_R32(IntrStatus); 742 743 if ((status == 0xffffffff) || !status) 744 goto out; 745 746 handled = 1; 747 748 if (unlikely(!netif_running(dev))) { 749 sis190_asic_down(ioaddr); 750 goto out; 751 } 752 753 SIS_W32(IntrStatus, status); 754 755// netif_info(tp, intr, dev, "status = %08x\n", status); 756 757 if (status & LinkChange) { 758 netif_info(tp, intr, dev, "link change\n"); 759 del_timer(&tp->timer); 760 schedule_work(&tp->phy_task); 761 } 762 763 if (status & RxQInt) 764 sis190_rx_interrupt(dev, tp, ioaddr); 765 766 if (status & TxQ0Int) 767 sis190_tx_interrupt(dev, tp, ioaddr); 768out: 769 return IRQ_RETVAL(handled); 770} 771 772#ifdef CONFIG_NET_POLL_CONTROLLER 773static void sis190_netpoll(struct net_device *dev) 774{ 775 struct sis190_private *tp = netdev_priv(dev); 776 struct pci_dev *pdev = tp->pci_dev; 777 778 disable_irq(pdev->irq); 779 sis190_interrupt(pdev->irq, dev); 780 enable_irq(pdev->irq); 781} 782#endif 783 784static void sis190_free_rx_skb(struct sis190_private *tp, 785 struct sk_buff **sk_buff, struct RxDesc *desc) 786{ 787 struct pci_dev *pdev = tp->pci_dev; 788 789 pci_unmap_single(pdev, le32_to_cpu(desc->addr), tp->rx_buf_sz, 790 PCI_DMA_FROMDEVICE); 791 dev_kfree_skb(*sk_buff); 792 *sk_buff = NULL; 793 sis190_make_unusable_by_asic(desc); 794} 795 796static void sis190_rx_clear(struct sis190_private *tp) 797{ 798 unsigned int i; 799 800 for (i = 0; i < NUM_RX_DESC; i++) { 801 if (!tp->Rx_skbuff[i]) 802 continue; 803 sis190_free_rx_skb(tp, tp->Rx_skbuff + i, tp->RxDescRing + i); 804 } 805} 806 807static void sis190_init_ring_indexes(struct sis190_private *tp) 808{ 809 tp->dirty_tx = tp->dirty_rx = tp->cur_tx = tp->cur_rx = 0; 810} 811 812static int sis190_init_ring(struct net_device *dev) 813{ 814 struct sis190_private *tp = netdev_priv(dev); 815 816 sis190_init_ring_indexes(tp); 817 818 memset(tp->Tx_skbuff, 0x0, NUM_TX_DESC * sizeof(struct sk_buff *)); 819 memset(tp->Rx_skbuff, 0x0, NUM_RX_DESC * sizeof(struct sk_buff *)); 820 821 if (sis190_rx_fill(tp, dev, 0, NUM_RX_DESC) != NUM_RX_DESC) 822 goto err_rx_clear; 823 824 sis190_mark_as_last_descriptor(tp->RxDescRing + NUM_RX_DESC - 1); 825 826 return 0; 827 828err_rx_clear: 829 sis190_rx_clear(tp); 830 return -ENOMEM; 831} 832 833static void sis190_set_rx_mode(struct net_device *dev) 834{ 835 struct sis190_private *tp = netdev_priv(dev); 836 void __iomem *ioaddr = tp->mmio_addr; 837 unsigned long flags; 838 u32 mc_filter[2]; /* Multicast hash filter */ 839 u16 rx_mode; 840 841 if (dev->flags & IFF_PROMISC) { 842 rx_mode = 843 AcceptBroadcast | AcceptMulticast | AcceptMyPhys | 844 AcceptAllPhys; 845 mc_filter[1] = mc_filter[0] = 0xffffffff; 846 } else if ((netdev_mc_count(dev) > multicast_filter_limit) || 847 (dev->flags & IFF_ALLMULTI)) { 848 /* Too many to filter perfectly -- accept all multicasts. */ 849 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys; 850 mc_filter[1] = mc_filter[0] = 0xffffffff; 851 } else { 852 struct netdev_hw_addr *ha; 853 854 rx_mode = AcceptBroadcast | AcceptMyPhys; 855 mc_filter[1] = mc_filter[0] = 0; 856 netdev_for_each_mc_addr(ha, dev) { 857 int bit_nr = 858 ether_crc(ETH_ALEN, ha->addr) & 0x3f; 859 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31); 860 rx_mode |= AcceptMulticast; 861 } 862 } 863 864 spin_lock_irqsave(&tp->lock, flags); 865 866 SIS_W16(RxMacControl, rx_mode | 0x2); 867 SIS_W32(RxHashTable, mc_filter[0]); 868 SIS_W32(RxHashTable + 4, mc_filter[1]); 869 870 spin_unlock_irqrestore(&tp->lock, flags); 871} 872 873static void sis190_soft_reset(void __iomem *ioaddr) 874{ 875 SIS_W32(IntrControl, 0x8000); 876 SIS_PCI_COMMIT(); 877 SIS_W32(IntrControl, 0x0); 878 sis190_asic_down(ioaddr); 879} 880 881static void sis190_hw_start(struct net_device *dev) 882{ 883 struct sis190_private *tp = netdev_priv(dev); 884 void __iomem *ioaddr = tp->mmio_addr; 885 886 sis190_soft_reset(ioaddr); 887 888 SIS_W32(TxDescStartAddr, tp->tx_dma); 889 SIS_W32(RxDescStartAddr, tp->rx_dma); 890 891 SIS_W32(IntrStatus, 0xffffffff); 892 SIS_W32(IntrMask, 0x0); 893 SIS_W32(GMIIControl, 0x0); 894 SIS_W32(TxMacControl, 0x60); 895 SIS_W16(RxMacControl, 0x02); 896 SIS_W32(RxHashTable, 0x0); 897 SIS_W32(0x6c, 0x0); 898 SIS_W32(RxWolCtrl, 0x0); 899 SIS_W32(RxWolData, 0x0); 900 901 SIS_PCI_COMMIT(); 902 903 sis190_set_rx_mode(dev); 904 905 /* Enable all known interrupts by setting the interrupt mask. */ 906 SIS_W32(IntrMask, sis190_intr_mask); 907 908 SIS_W32(TxControl, 0x1a00 | CmdTxEnb); 909 SIS_W32(RxControl, 0x1a1d); 910 911 netif_start_queue(dev); 912} 913 914static void sis190_phy_task(struct work_struct *work) 915{ 916 struct sis190_private *tp = 917 container_of(work, struct sis190_private, phy_task); 918 struct net_device *dev = tp->dev; 919 void __iomem *ioaddr = tp->mmio_addr; 920 int phy_id = tp->mii_if.phy_id; 921 u16 val; 922 923 rtnl_lock(); 924 925 if (!netif_running(dev)) 926 goto out_unlock; 927 928 val = mdio_read(ioaddr, phy_id, MII_BMCR); 929 if (val & BMCR_RESET) { 930 mod_timer(&tp->timer, jiffies + HZ/10); 931 goto out_unlock; 932 } 933 934 val = mdio_read_latched(ioaddr, phy_id, MII_BMSR); 935 if (!(val & BMSR_ANEGCOMPLETE) && tp->link_status != LNK_AUTONEG) { 936 netif_carrier_off(dev); 937 netif_warn(tp, link, dev, "auto-negotiating...\n"); 938 tp->link_status = LNK_AUTONEG; 939 } else if ((val & BMSR_LSTATUS) && tp->link_status != LNK_ON) { 940 /* Rejoice ! */ 941 struct { 942 int val; 943 u32 ctl; 944 const char *msg; 945 } reg31[] = { 946 { LPA_1000FULL, 0x07000c00 | 0x00001000, 947 "1000 Mbps Full Duplex" }, 948 { LPA_1000HALF, 0x07000c00, 949 "1000 Mbps Half Duplex" }, 950 { LPA_100FULL, 0x04000800 | 0x00001000, 951 "100 Mbps Full Duplex" }, 952 { LPA_100HALF, 0x04000800, 953 "100 Mbps Half Duplex" }, 954 { LPA_10FULL, 0x04000400 | 0x00001000, 955 "10 Mbps Full Duplex" }, 956 { LPA_10HALF, 0x04000400, 957 "10 Mbps Half Duplex" }, 958 { 0, 0x04000400, "unknown" } 959 }, *p = NULL; 960 u16 adv, autoexp, gigadv, gigrec; 961 962 val = mdio_read(ioaddr, phy_id, 0x1f); 963 netif_info(tp, link, dev, "mii ext = %04x\n", val); 964 965 val = mdio_read(ioaddr, phy_id, MII_LPA); 966 adv = mdio_read(ioaddr, phy_id, MII_ADVERTISE); 967 autoexp = mdio_read(ioaddr, phy_id, MII_EXPANSION); 968 netif_info(tp, link, dev, "mii lpa=%04x adv=%04x exp=%04x\n", 969 val, adv, autoexp); 970 971 if (val & LPA_NPAGE && autoexp & EXPANSION_NWAY) { 972 /* check for gigabit speed */ 973 gigadv = mdio_read(ioaddr, phy_id, MII_CTRL1000); 974 gigrec = mdio_read(ioaddr, phy_id, MII_STAT1000); 975 val = (gigadv & (gigrec >> 2)); 976 if (val & ADVERTISE_1000FULL) 977 p = reg31; 978 else if (val & ADVERTISE_1000HALF) 979 p = reg31 + 1; 980 } 981 if (!p) { 982 val &= adv; 983 984 for (p = reg31; p->val; p++) { 985 if ((val & p->val) == p->val) 986 break; 987 } 988 } 989 990 p->ctl |= SIS_R32(StationControl) & ~0x0f001c00; 991 992 if ((tp->features & F_HAS_RGMII) && 993 (tp->features & F_PHY_BCM5461)) { 994 // Set Tx Delay in RGMII mode. 995 mdio_write(ioaddr, phy_id, 0x18, 0xf1c7); 996 udelay(200); 997 mdio_write(ioaddr, phy_id, 0x1c, 0x8c00); 998 p->ctl |= 0x03000000; 999 } 1000 1001 SIS_W32(StationControl, p->ctl); 1002 1003 if (tp->features & F_HAS_RGMII) { 1004 SIS_W32(RGDelay, 0x0441); 1005 SIS_W32(RGDelay, 0x0440); 1006 } 1007 1008 tp->negotiated_lpa = p->val; 1009 1010 netif_info(tp, link, dev, "link on %s mode\n", p->msg); 1011 netif_carrier_on(dev); 1012 tp->link_status = LNK_ON; 1013 } else if (!(val & BMSR_LSTATUS) && tp->link_status != LNK_AUTONEG) 1014 tp->link_status = LNK_OFF; 1015 mod_timer(&tp->timer, jiffies + SIS190_PHY_TIMEOUT); 1016 1017out_unlock: 1018 rtnl_unlock(); 1019} 1020 1021static void sis190_phy_timer(unsigned long __opaque) 1022{ 1023 struct net_device *dev = (struct net_device *)__opaque; 1024 struct sis190_private *tp = netdev_priv(dev); 1025 1026 if (likely(netif_running(dev))) 1027 schedule_work(&tp->phy_task); 1028} 1029 1030static inline void sis190_delete_timer(struct net_device *dev) 1031{ 1032 struct sis190_private *tp = netdev_priv(dev); 1033 1034 del_timer_sync(&tp->timer); 1035} 1036 1037static inline void sis190_request_timer(struct net_device *dev) 1038{ 1039 struct sis190_private *tp = netdev_priv(dev); 1040 struct timer_list *timer = &tp->timer; 1041 1042 init_timer(timer); 1043 timer->expires = jiffies + SIS190_PHY_TIMEOUT; 1044 timer->data = (unsigned long)dev; 1045 timer->function = sis190_phy_timer; 1046 add_timer(timer); 1047} 1048 1049static void sis190_set_rxbufsize(struct sis190_private *tp, 1050 struct net_device *dev) 1051{ 1052 unsigned int mtu = dev->mtu; 1053 1054 tp->rx_buf_sz = (mtu > RX_BUF_SIZE) ? mtu + ETH_HLEN + 8 : RX_BUF_SIZE; 1055 /* RxDesc->size has a licence to kill the lower bits */ 1056 if (tp->rx_buf_sz & 0x07) { 1057 tp->rx_buf_sz += 8; 1058 tp->rx_buf_sz &= RX_BUF_MASK; 1059 } 1060} 1061 1062static int sis190_open(struct net_device *dev) 1063{ 1064 struct sis190_private *tp = netdev_priv(dev); 1065 struct pci_dev *pdev = tp->pci_dev; 1066 int rc = -ENOMEM; 1067 1068 sis190_set_rxbufsize(tp, dev); 1069 1070 /* 1071 * Rx and Tx descriptors need 256 bytes alignment. 1072 * pci_alloc_consistent() guarantees a stronger alignment. 1073 */ 1074 tp->TxDescRing = pci_alloc_consistent(pdev, TX_RING_BYTES, &tp->tx_dma); 1075 if (!tp->TxDescRing) 1076 goto out; 1077 1078 tp->RxDescRing = pci_alloc_consistent(pdev, RX_RING_BYTES, &tp->rx_dma); 1079 if (!tp->RxDescRing) 1080 goto err_free_tx_0; 1081 1082 rc = sis190_init_ring(dev); 1083 if (rc < 0) 1084 goto err_free_rx_1; 1085 1086 sis190_request_timer(dev); 1087 1088 rc = request_irq(dev->irq, sis190_interrupt, IRQF_SHARED, dev->name, dev); 1089 if (rc < 0) 1090 goto err_release_timer_2; 1091 1092 sis190_hw_start(dev); 1093out: 1094 return rc; 1095 1096err_release_timer_2: 1097 sis190_delete_timer(dev); 1098 sis190_rx_clear(tp); 1099err_free_rx_1: 1100 pci_free_consistent(tp->pci_dev, RX_RING_BYTES, tp->RxDescRing, 1101 tp->rx_dma); 1102err_free_tx_0: 1103 pci_free_consistent(tp->pci_dev, TX_RING_BYTES, tp->TxDescRing, 1104 tp->tx_dma); 1105 goto out; 1106} 1107 1108static void sis190_tx_clear(struct sis190_private *tp) 1109{ 1110 unsigned int i; 1111 1112 for (i = 0; i < NUM_TX_DESC; i++) { 1113 struct sk_buff *skb = tp->Tx_skbuff[i]; 1114 1115 if (!skb) 1116 continue; 1117 1118 sis190_unmap_tx_skb(tp->pci_dev, skb, tp->TxDescRing + i); 1119 tp->Tx_skbuff[i] = NULL; 1120 dev_kfree_skb(skb); 1121 1122 tp->dev->stats.tx_dropped++; 1123 } 1124 tp->cur_tx = tp->dirty_tx = 0; 1125} 1126 1127static void sis190_down(struct net_device *dev) 1128{ 1129 struct sis190_private *tp = netdev_priv(dev); 1130 void __iomem *ioaddr = tp->mmio_addr; 1131 unsigned int poll_locked = 0; 1132 1133 sis190_delete_timer(dev); 1134 1135 netif_stop_queue(dev); 1136 1137 do { 1138 spin_lock_irq(&tp->lock); 1139 1140 sis190_asic_down(ioaddr); 1141 1142 spin_unlock_irq(&tp->lock); 1143 1144 synchronize_irq(dev->irq); 1145 1146 if (!poll_locked) 1147 poll_locked++; 1148 1149 synchronize_sched(); 1150 1151 } while (SIS_R32(IntrMask)); 1152 1153 sis190_tx_clear(tp); 1154 sis190_rx_clear(tp); 1155} 1156 1157static int sis190_close(struct net_device *dev) 1158{ 1159 struct sis190_private *tp = netdev_priv(dev); 1160 struct pci_dev *pdev = tp->pci_dev; 1161 1162 sis190_down(dev); 1163 1164 free_irq(dev->irq, dev); 1165 1166 pci_free_consistent(pdev, TX_RING_BYTES, tp->TxDescRing, tp->tx_dma); 1167 pci_free_consistent(pdev, RX_RING_BYTES, tp->RxDescRing, tp->rx_dma); 1168 1169 tp->TxDescRing = NULL; 1170 tp->RxDescRing = NULL; 1171 1172 return 0; 1173} 1174 1175static netdev_tx_t sis190_start_xmit(struct sk_buff *skb, 1176 struct net_device *dev) 1177{ 1178 struct sis190_private *tp = netdev_priv(dev); 1179 void __iomem *ioaddr = tp->mmio_addr; 1180 u32 len, entry, dirty_tx; 1181 struct TxDesc *desc; 1182 dma_addr_t mapping; 1183 1184 if (unlikely(skb->len < ETH_ZLEN)) { 1185 if (skb_padto(skb, ETH_ZLEN)) { 1186 dev->stats.tx_dropped++; 1187 goto out; 1188 } 1189 len = ETH_ZLEN; 1190 } else { 1191 len = skb->len; 1192 } 1193 1194 entry = tp->cur_tx % NUM_TX_DESC; 1195 desc = tp->TxDescRing + entry; 1196 1197 if (unlikely(le32_to_cpu(desc->status) & OWNbit)) { 1198 netif_stop_queue(dev); 1199 netif_err(tp, tx_err, dev, 1200 "BUG! Tx Ring full when queue awake!\n"); 1201 return NETDEV_TX_BUSY; 1202 } 1203 1204 mapping = pci_map_single(tp->pci_dev, skb->data, len, PCI_DMA_TODEVICE); 1205 if (pci_dma_mapping_error(tp->pci_dev, mapping)) { 1206 netif_err(tp, tx_err, dev, 1207 "PCI mapping failed, dropping packet"); 1208 return NETDEV_TX_BUSY; 1209 } 1210 1211 tp->Tx_skbuff[entry] = skb; 1212 1213 desc->PSize = cpu_to_le32(len); 1214 desc->addr = cpu_to_le32(mapping); 1215 1216 desc->size = cpu_to_le32(len); 1217 if (entry == (NUM_TX_DESC - 1)) 1218 desc->size |= cpu_to_le32(RingEnd); 1219 1220 wmb(); 1221 1222 desc->status = cpu_to_le32(OWNbit | INTbit | DEFbit | CRCbit | PADbit); 1223 if (tp->negotiated_lpa & (LPA_1000HALF | LPA_100HALF | LPA_10HALF)) { 1224 /* Half Duplex */ 1225 desc->status |= cpu_to_le32(COLEN | CRSEN | BKFEN); 1226 if (tp->negotiated_lpa & (LPA_1000HALF | LPA_1000FULL)) 1227 desc->status |= cpu_to_le32(EXTEN | BSTEN); /* gigabit HD */ 1228 } 1229 1230 tp->cur_tx++; 1231 1232 smp_wmb(); 1233 1234 SIS_W32(TxControl, 0x1a00 | CmdReset | CmdTxEnb); 1235 1236 dirty_tx = tp->dirty_tx; 1237 if ((tp->cur_tx - NUM_TX_DESC) == dirty_tx) { 1238 netif_stop_queue(dev); 1239 smp_rmb(); 1240 if (dirty_tx != tp->dirty_tx) 1241 netif_wake_queue(dev); 1242 } 1243out: 1244 return NETDEV_TX_OK; 1245} 1246 1247static void sis190_free_phy(struct list_head *first_phy) 1248{ 1249 struct sis190_phy *cur, *next; 1250 1251 list_for_each_entry_safe(cur, next, first_phy, list) { 1252 kfree(cur); 1253 } 1254} 1255 1256/** 1257 * sis190_default_phy - Select default PHY for sis190 mac. 1258 * @dev: the net device to probe for 1259 * 1260 * Select first detected PHY with link as default. 1261 * If no one is link on, select PHY whose types is HOME as default. 1262 * If HOME doesn't exist, select LAN. 1263 */ 1264static u16 sis190_default_phy(struct net_device *dev) 1265{ 1266 struct sis190_phy *phy, *phy_home, *phy_default, *phy_lan; 1267 struct sis190_private *tp = netdev_priv(dev); 1268 struct mii_if_info *mii_if = &tp->mii_if; 1269 void __iomem *ioaddr = tp->mmio_addr; 1270 u16 status; 1271 1272 phy_home = phy_default = phy_lan = NULL; 1273 1274 list_for_each_entry(phy, &tp->first_phy, list) { 1275 status = mdio_read_latched(ioaddr, phy->phy_id, MII_BMSR); 1276 1277 // Link ON & Not select default PHY & not ghost PHY. 1278 if ((status & BMSR_LSTATUS) && 1279 !phy_default && 1280 (phy->type != UNKNOWN)) { 1281 phy_default = phy; 1282 } else { 1283 status = mdio_read(ioaddr, phy->phy_id, MII_BMCR); 1284 mdio_write(ioaddr, phy->phy_id, MII_BMCR, 1285 status | BMCR_ANENABLE | BMCR_ISOLATE); 1286 if (phy->type == HOME) 1287 phy_home = phy; 1288 else if (phy->type == LAN) 1289 phy_lan = phy; 1290 } 1291 } 1292 1293 if (!phy_default) { 1294 if (phy_home) 1295 phy_default = phy_home; 1296 else if (phy_lan) 1297 phy_default = phy_lan; 1298 else 1299 phy_default = list_first_entry(&tp->first_phy, 1300 struct sis190_phy, list); 1301 } 1302 1303 if (mii_if->phy_id != phy_default->phy_id) { 1304 mii_if->phy_id = phy_default->phy_id; 1305 if (netif_msg_probe(tp)) 1306 pr_info("%s: Using transceiver at address %d as default\n", 1307 pci_name(tp->pci_dev), mii_if->phy_id); 1308 } 1309 1310 status = mdio_read(ioaddr, mii_if->phy_id, MII_BMCR); 1311 status &= (~BMCR_ISOLATE); 1312 1313 mdio_write(ioaddr, mii_if->phy_id, MII_BMCR, status); 1314 status = mdio_read_latched(ioaddr, mii_if->phy_id, MII_BMSR); 1315 1316 return status; 1317} 1318 1319static void sis190_init_phy(struct net_device *dev, struct sis190_private *tp, 1320 struct sis190_phy *phy, unsigned int phy_id, 1321 u16 mii_status) 1322{ 1323 void __iomem *ioaddr = tp->mmio_addr; 1324 struct mii_chip_info *p; 1325 1326 INIT_LIST_HEAD(&phy->list); 1327 phy->status = mii_status; 1328 phy->phy_id = phy_id; 1329 1330 phy->id[0] = mdio_read(ioaddr, phy_id, MII_PHYSID1); 1331 phy->id[1] = mdio_read(ioaddr, phy_id, MII_PHYSID2); 1332 1333 for (p = mii_chip_table; p->type; p++) { 1334 if ((p->id[0] == phy->id[0]) && 1335 (p->id[1] == (phy->id[1] & 0xfff0))) { 1336 break; 1337 } 1338 } 1339 1340 if (p->id[1]) { 1341 phy->type = (p->type == MIX) ? 1342 ((mii_status & (BMSR_100FULL | BMSR_100HALF)) ? 1343 LAN : HOME) : p->type; 1344 tp->features |= p->feature; 1345 if (netif_msg_probe(tp)) 1346 pr_info("%s: %s transceiver at address %d\n", 1347 pci_name(tp->pci_dev), p->name, phy_id); 1348 } else { 1349 phy->type = UNKNOWN; 1350 if (netif_msg_probe(tp)) 1351 pr_info("%s: unknown PHY 0x%x:0x%x transceiver at address %d\n", 1352 pci_name(tp->pci_dev), 1353 phy->id[0], (phy->id[1] & 0xfff0), phy_id); 1354 } 1355} 1356 1357static void sis190_mii_probe_88e1111_fixup(struct sis190_private *tp) 1358{ 1359 if (tp->features & F_PHY_88E1111) { 1360 void __iomem *ioaddr = tp->mmio_addr; 1361 int phy_id = tp->mii_if.phy_id; 1362 u16 reg[2][2] = { 1363 { 0x808b, 0x0ce1 }, 1364 { 0x808f, 0x0c60 } 1365 }, *p; 1366 1367 p = (tp->features & F_HAS_RGMII) ? reg[0] : reg[1]; 1368 1369 mdio_write(ioaddr, phy_id, 0x1b, p[0]); 1370 udelay(200); 1371 mdio_write(ioaddr, phy_id, 0x14, p[1]); 1372 udelay(200); 1373 } 1374} 1375 1376/** 1377 * sis190_mii_probe - Probe MII PHY for sis190 1378 * @dev: the net device to probe for 1379 * 1380 * Search for total of 32 possible mii phy addresses. 1381 * Identify and set current phy if found one, 1382 * return error if it failed to found. 1383 */ 1384static int __devinit sis190_mii_probe(struct net_device *dev) 1385{ 1386 struct sis190_private *tp = netdev_priv(dev); 1387 struct mii_if_info *mii_if = &tp->mii_if; 1388 void __iomem *ioaddr = tp->mmio_addr; 1389 int phy_id; 1390 int rc = 0; 1391 1392 INIT_LIST_HEAD(&tp->first_phy); 1393 1394 for (phy_id = 0; phy_id < PHY_MAX_ADDR; phy_id++) { 1395 struct sis190_phy *phy; 1396 u16 status; 1397 1398 status = mdio_read_latched(ioaddr, phy_id, MII_BMSR); 1399 1400 // Try next mii if the current one is not accessible. 1401 if (status == 0xffff || status == 0x0000) 1402 continue; 1403 1404 phy = kmalloc(sizeof(*phy), GFP_KERNEL); 1405 if (!phy) { 1406 sis190_free_phy(&tp->first_phy); 1407 rc = -ENOMEM; 1408 goto out; 1409 } 1410 1411 sis190_init_phy(dev, tp, phy, phy_id, status); 1412 1413 list_add(&tp->first_phy, &phy->list); 1414 } 1415 1416 if (list_empty(&tp->first_phy)) { 1417 if (netif_msg_probe(tp)) 1418 pr_info("%s: No MII transceivers found!\n", 1419 pci_name(tp->pci_dev)); 1420 rc = -EIO; 1421 goto out; 1422 } 1423 1424 /* Select default PHY for mac */ 1425 sis190_default_phy(dev); 1426 1427 sis190_mii_probe_88e1111_fixup(tp); 1428 1429 mii_if->dev = dev; 1430 mii_if->mdio_read = __mdio_read; 1431 mii_if->mdio_write = __mdio_write; 1432 mii_if->phy_id_mask = PHY_ID_ANY; 1433 mii_if->reg_num_mask = MII_REG_ANY; 1434out: 1435 return rc; 1436} 1437 1438static void sis190_mii_remove(struct net_device *dev) 1439{ 1440 struct sis190_private *tp = netdev_priv(dev); 1441 1442 sis190_free_phy(&tp->first_phy); 1443} 1444 1445static void sis190_release_board(struct pci_dev *pdev) 1446{ 1447 struct net_device *dev = pci_get_drvdata(pdev); 1448 struct sis190_private *tp = netdev_priv(dev); 1449 1450 iounmap(tp->mmio_addr); 1451 pci_release_regions(pdev); 1452 pci_disable_device(pdev); 1453 free_netdev(dev); 1454} 1455 1456static struct net_device * __devinit sis190_init_board(struct pci_dev *pdev) 1457{ 1458 struct sis190_private *tp; 1459 struct net_device *dev; 1460 void __iomem *ioaddr; 1461 int rc; 1462 1463 dev = alloc_etherdev(sizeof(*tp)); 1464 if (!dev) { 1465 if (netif_msg_drv(&debug)) 1466 pr_err("unable to alloc new ethernet\n"); 1467 rc = -ENOMEM; 1468 goto err_out_0; 1469 } 1470 1471 SET_NETDEV_DEV(dev, &pdev->dev); 1472 1473 tp = netdev_priv(dev); 1474 tp->dev = dev; 1475 tp->msg_enable = netif_msg_init(debug.msg_enable, SIS190_MSG_DEFAULT); 1476 1477 rc = pci_enable_device(pdev); 1478 if (rc < 0) { 1479 if (netif_msg_probe(tp)) 1480 pr_err("%s: enable failure\n", pci_name(pdev)); 1481 goto err_free_dev_1; 1482 } 1483 1484 rc = -ENODEV; 1485 1486 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { 1487 if (netif_msg_probe(tp)) 1488 pr_err("%s: region #0 is no MMIO resource\n", 1489 pci_name(pdev)); 1490 goto err_pci_disable_2; 1491 } 1492 if (pci_resource_len(pdev, 0) < SIS190_REGS_SIZE) { 1493 if (netif_msg_probe(tp)) 1494 pr_err("%s: invalid PCI region size(s)\n", 1495 pci_name(pdev)); 1496 goto err_pci_disable_2; 1497 } 1498 1499 rc = pci_request_regions(pdev, DRV_NAME); 1500 if (rc < 0) { 1501 if (netif_msg_probe(tp)) 1502 pr_err("%s: could not request regions\n", 1503 pci_name(pdev)); 1504 goto err_pci_disable_2; 1505 } 1506 1507 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 1508 if (rc < 0) { 1509 if (netif_msg_probe(tp)) 1510 pr_err("%s: DMA configuration failed\n", 1511 pci_name(pdev)); 1512 goto err_free_res_3; 1513 } 1514 1515 pci_set_master(pdev); 1516 1517 ioaddr = ioremap(pci_resource_start(pdev, 0), SIS190_REGS_SIZE); 1518 if (!ioaddr) { 1519 if (netif_msg_probe(tp)) 1520 pr_err("%s: cannot remap MMIO, aborting\n", 1521 pci_name(pdev)); 1522 rc = -EIO; 1523 goto err_free_res_3; 1524 } 1525 1526 tp->pci_dev = pdev; 1527 tp->mmio_addr = ioaddr; 1528 tp->link_status = LNK_OFF; 1529 1530 sis190_irq_mask_and_ack(ioaddr); 1531 1532 sis190_soft_reset(ioaddr); 1533out: 1534 return dev; 1535 1536err_free_res_3: 1537 pci_release_regions(pdev); 1538err_pci_disable_2: 1539 pci_disable_device(pdev); 1540err_free_dev_1: 1541 free_netdev(dev); 1542err_out_0: 1543 dev = ERR_PTR(rc); 1544 goto out; 1545} 1546 1547static void sis190_tx_timeout(struct net_device *dev) 1548{ 1549 struct sis190_private *tp = netdev_priv(dev); 1550 void __iomem *ioaddr = tp->mmio_addr; 1551 u8 tmp8; 1552 1553 /* Disable Tx, if not already */ 1554 tmp8 = SIS_R8(TxControl); 1555 if (tmp8 & CmdTxEnb) 1556 SIS_W8(TxControl, tmp8 & ~CmdTxEnb); 1557 1558 netif_info(tp, tx_err, dev, "Transmit timeout, status %08x %08x\n", 1559 SIS_R32(TxControl), SIS_R32(TxSts)); 1560 1561 /* Disable interrupts by clearing the interrupt mask. */ 1562 SIS_W32(IntrMask, 0x0000); 1563 1564 /* Stop a shared interrupt from scavenging while we are. */ 1565 spin_lock_irq(&tp->lock); 1566 sis190_tx_clear(tp); 1567 spin_unlock_irq(&tp->lock); 1568 1569 /* ...and finally, reset everything. */ 1570 sis190_hw_start(dev); 1571 1572 netif_wake_queue(dev); 1573} 1574 1575static void sis190_set_rgmii(struct sis190_private *tp, u8 reg) 1576{ 1577 tp->features |= (reg & 0x80) ? F_HAS_RGMII : 0; 1578} 1579 1580static int __devinit sis190_get_mac_addr_from_eeprom(struct pci_dev *pdev, 1581 struct net_device *dev) 1582{ 1583 struct sis190_private *tp = netdev_priv(dev); 1584 void __iomem *ioaddr = tp->mmio_addr; 1585 u16 sig; 1586 int i; 1587 1588 if (netif_msg_probe(tp)) 1589 pr_info("%s: Read MAC address from EEPROM\n", pci_name(pdev)); 1590 1591 /* Check to see if there is a sane EEPROM */ 1592 sig = (u16) sis190_read_eeprom(ioaddr, EEPROMSignature); 1593 1594 if ((sig == 0xffff) || (sig == 0x0000)) { 1595 if (netif_msg_probe(tp)) 1596 pr_info("%s: Error EEPROM read %x\n", 1597 pci_name(pdev), sig); 1598 return -EIO; 1599 } 1600 1601 /* Get MAC address from EEPROM */ 1602 for (i = 0; i < MAC_ADDR_LEN / 2; i++) { 1603 u16 w = sis190_read_eeprom(ioaddr, EEPROMMACAddr + i); 1604 1605 ((__le16 *)dev->dev_addr)[i] = cpu_to_le16(w); 1606 } 1607 1608 sis190_set_rgmii(tp, sis190_read_eeprom(ioaddr, EEPROMInfo)); 1609 1610 return 0; 1611} 1612 1613/** 1614 * sis190_get_mac_addr_from_apc - Get MAC address for SiS96x model 1615 * @pdev: PCI device 1616 * @dev: network device to get address for 1617 * 1618 * SiS96x model, use APC CMOS RAM to store MAC address. 1619 * APC CMOS RAM is accessed through ISA bridge. 1620 * MAC address is read into @net_dev->dev_addr. 1621 */ 1622static int __devinit sis190_get_mac_addr_from_apc(struct pci_dev *pdev, 1623 struct net_device *dev) 1624{ 1625 static const u16 __devinitdata ids[] = { 0x0965, 0x0966, 0x0968 }; 1626 struct sis190_private *tp = netdev_priv(dev); 1627 struct pci_dev *isa_bridge; 1628 u8 reg, tmp8; 1629 unsigned int i; 1630 1631 if (netif_msg_probe(tp)) 1632 pr_info("%s: Read MAC address from APC\n", pci_name(pdev)); 1633 1634 for (i = 0; i < ARRAY_SIZE(ids); i++) { 1635 isa_bridge = pci_get_device(PCI_VENDOR_ID_SI, ids[i], NULL); 1636 if (isa_bridge) 1637 break; 1638 } 1639 1640 if (!isa_bridge) { 1641 if (netif_msg_probe(tp)) 1642 pr_info("%s: Can not find ISA bridge\n", 1643 pci_name(pdev)); 1644 return -EIO; 1645 } 1646 1647 /* Enable port 78h & 79h to access APC Registers. */ 1648 pci_read_config_byte(isa_bridge, 0x48, &tmp8); 1649 reg = (tmp8 & ~0x02); 1650 pci_write_config_byte(isa_bridge, 0x48, reg); 1651 udelay(50); 1652 pci_read_config_byte(isa_bridge, 0x48, ®); 1653 1654 for (i = 0; i < MAC_ADDR_LEN; i++) { 1655 outb(0x9 + i, 0x78); 1656 dev->dev_addr[i] = inb(0x79); 1657 } 1658 1659 outb(0x12, 0x78); 1660 reg = inb(0x79); 1661 1662 sis190_set_rgmii(tp, reg); 1663 1664 /* Restore the value to ISA Bridge */ 1665 pci_write_config_byte(isa_bridge, 0x48, tmp8); 1666 pci_dev_put(isa_bridge); 1667 1668 return 0; 1669} 1670 1671/** 1672 * sis190_init_rxfilter - Initialize the Rx filter 1673 * @dev: network device to initialize 1674 * 1675 * Set receive filter address to our MAC address 1676 * and enable packet filtering. 1677 */ 1678static inline void sis190_init_rxfilter(struct net_device *dev) 1679{ 1680 struct sis190_private *tp = netdev_priv(dev); 1681 void __iomem *ioaddr = tp->mmio_addr; 1682 u16 ctl; 1683 int i; 1684 1685 ctl = SIS_R16(RxMacControl); 1686 /* 1687 * Disable packet filtering before setting filter. 1688 * Note: SiS's driver writes 32 bits but RxMacControl is 16 bits 1689 * only and followed by RxMacAddr (6 bytes). Strange. -- FR 1690 */ 1691 SIS_W16(RxMacControl, ctl & ~0x0f00); 1692 1693 for (i = 0; i < MAC_ADDR_LEN; i++) 1694 SIS_W8(RxMacAddr + i, dev->dev_addr[i]); 1695 1696 SIS_W16(RxMacControl, ctl); 1697 SIS_PCI_COMMIT(); 1698} 1699 1700static int __devinit sis190_get_mac_addr(struct pci_dev *pdev, 1701 struct net_device *dev) 1702{ 1703 int rc; 1704 1705 rc = sis190_get_mac_addr_from_eeprom(pdev, dev); 1706 if (rc < 0) { 1707 u8 reg; 1708 1709 pci_read_config_byte(pdev, 0x73, ®); 1710 1711 if (reg & 0x00000001) 1712 rc = sis190_get_mac_addr_from_apc(pdev, dev); 1713 } 1714 return rc; 1715} 1716 1717static void sis190_set_speed_auto(struct net_device *dev) 1718{ 1719 struct sis190_private *tp = netdev_priv(dev); 1720 void __iomem *ioaddr = tp->mmio_addr; 1721 int phy_id = tp->mii_if.phy_id; 1722 int val; 1723 1724 netif_info(tp, link, dev, "Enabling Auto-negotiation\n"); 1725 1726 val = mdio_read(ioaddr, phy_id, MII_ADVERTISE); 1727 1728 // Enable 10/100 Full/Half Mode, leave MII_ADVERTISE bit4:0 1729 // unchanged. 1730 mdio_write(ioaddr, phy_id, MII_ADVERTISE, (val & ADVERTISE_SLCT) | 1731 ADVERTISE_100FULL | ADVERTISE_10FULL | 1732 ADVERTISE_100HALF | ADVERTISE_10HALF); 1733 1734 // Enable 1000 Full Mode. 1735 mdio_write(ioaddr, phy_id, MII_CTRL1000, ADVERTISE_1000FULL); 1736 1737 // Enable auto-negotiation and restart auto-negotiation. 1738 mdio_write(ioaddr, phy_id, MII_BMCR, 1739 BMCR_ANENABLE | BMCR_ANRESTART | BMCR_RESET); 1740} 1741 1742static int sis190_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 1743{ 1744 struct sis190_private *tp = netdev_priv(dev); 1745 1746 return mii_ethtool_gset(&tp->mii_if, cmd); 1747} 1748 1749static int sis190_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 1750{ 1751 struct sis190_private *tp = netdev_priv(dev); 1752 1753 return mii_ethtool_sset(&tp->mii_if, cmd); 1754} 1755 1756static void sis190_get_drvinfo(struct net_device *dev, 1757 struct ethtool_drvinfo *info) 1758{ 1759 struct sis190_private *tp = netdev_priv(dev); 1760 1761 strcpy(info->driver, DRV_NAME); 1762 strcpy(info->version, DRV_VERSION); 1763 strcpy(info->bus_info, pci_name(tp->pci_dev)); 1764} 1765 1766static int sis190_get_regs_len(struct net_device *dev) 1767{ 1768 return SIS190_REGS_SIZE; 1769} 1770 1771static void sis190_get_regs(struct net_device *dev, struct ethtool_regs *regs, 1772 void *p) 1773{ 1774 struct sis190_private *tp = netdev_priv(dev); 1775 unsigned long flags; 1776 1777 if (regs->len > SIS190_REGS_SIZE) 1778 regs->len = SIS190_REGS_SIZE; 1779 1780 spin_lock_irqsave(&tp->lock, flags); 1781 memcpy_fromio(p, tp->mmio_addr, regs->len); 1782 spin_unlock_irqrestore(&tp->lock, flags); 1783} 1784 1785static int sis190_nway_reset(struct net_device *dev) 1786{ 1787 struct sis190_private *tp = netdev_priv(dev); 1788 1789 return mii_nway_restart(&tp->mii_if); 1790} 1791 1792static u32 sis190_get_msglevel(struct net_device *dev) 1793{ 1794 struct sis190_private *tp = netdev_priv(dev); 1795 1796 return tp->msg_enable; 1797} 1798 1799static void sis190_set_msglevel(struct net_device *dev, u32 value) 1800{ 1801 struct sis190_private *tp = netdev_priv(dev); 1802 1803 tp->msg_enable = value; 1804} 1805 1806static const struct ethtool_ops sis190_ethtool_ops = { 1807 .get_settings = sis190_get_settings, 1808 .set_settings = sis190_set_settings, 1809 .get_drvinfo = sis190_get_drvinfo, 1810 .get_regs_len = sis190_get_regs_len, 1811 .get_regs = sis190_get_regs, 1812 .get_link = ethtool_op_get_link, 1813 .get_msglevel = sis190_get_msglevel, 1814 .set_msglevel = sis190_set_msglevel, 1815 .nway_reset = sis190_nway_reset, 1816}; 1817 1818static int sis190_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 1819{ 1820 struct sis190_private *tp = netdev_priv(dev); 1821 1822 return !netif_running(dev) ? -EINVAL : 1823 generic_mii_ioctl(&tp->mii_if, if_mii(ifr), cmd, NULL); 1824} 1825 1826static const struct net_device_ops sis190_netdev_ops = { 1827 .ndo_open = sis190_open, 1828 .ndo_stop = sis190_close, 1829 .ndo_do_ioctl = sis190_ioctl, 1830 .ndo_start_xmit = sis190_start_xmit, 1831 .ndo_tx_timeout = sis190_tx_timeout, 1832 .ndo_set_multicast_list = sis190_set_rx_mode, 1833 .ndo_change_mtu = eth_change_mtu, 1834 .ndo_set_mac_address = eth_mac_addr, 1835 .ndo_validate_addr = eth_validate_addr, 1836#ifdef CONFIG_NET_POLL_CONTROLLER 1837 .ndo_poll_controller = sis190_netpoll, 1838#endif 1839}; 1840 1841static int __devinit sis190_init_one(struct pci_dev *pdev, 1842 const struct pci_device_id *ent) 1843{ 1844 static int printed_version = 0; 1845 struct sis190_private *tp; 1846 struct net_device *dev; 1847 void __iomem *ioaddr; 1848 int rc; 1849 1850 if (!printed_version) { 1851 if (netif_msg_drv(&debug)) 1852 pr_info(SIS190_DRIVER_NAME " loaded\n"); 1853 printed_version = 1; 1854 } 1855 1856 dev = sis190_init_board(pdev); 1857 if (IS_ERR(dev)) { 1858 rc = PTR_ERR(dev); 1859 goto out; 1860 } 1861 1862 pci_set_drvdata(pdev, dev); 1863 1864 tp = netdev_priv(dev); 1865 ioaddr = tp->mmio_addr; 1866 1867 rc = sis190_get_mac_addr(pdev, dev); 1868 if (rc < 0) 1869 goto err_release_board; 1870 1871 sis190_init_rxfilter(dev); 1872 1873 INIT_WORK(&tp->phy_task, sis190_phy_task); 1874 1875 dev->netdev_ops = &sis190_netdev_ops; 1876 1877 SET_ETHTOOL_OPS(dev, &sis190_ethtool_ops); 1878 dev->irq = pdev->irq; 1879 dev->base_addr = (unsigned long) 0xdead; 1880 dev->watchdog_timeo = SIS190_TX_TIMEOUT; 1881 1882 spin_lock_init(&tp->lock); 1883 1884 rc = sis190_mii_probe(dev); 1885 if (rc < 0) 1886 goto err_release_board; 1887 1888 rc = register_netdev(dev); 1889 if (rc < 0) 1890 goto err_remove_mii; 1891 1892 if (netif_msg_probe(tp)) { 1893 netdev_info(dev, "%s: %s at %p (IRQ: %d), %pM\n", 1894 pci_name(pdev), 1895 sis_chip_info[ent->driver_data].name, 1896 ioaddr, dev->irq, dev->dev_addr); 1897 netdev_info(dev, "%s mode.\n", 1898 (tp->features & F_HAS_RGMII) ? "RGMII" : "GMII"); 1899 } 1900 1901 netif_carrier_off(dev); 1902 1903 sis190_set_speed_auto(dev); 1904out: 1905 return rc; 1906 1907err_remove_mii: 1908 sis190_mii_remove(dev); 1909err_release_board: 1910 sis190_release_board(pdev); 1911 goto out; 1912} 1913 1914static void __devexit sis190_remove_one(struct pci_dev *pdev) 1915{ 1916 struct net_device *dev = pci_get_drvdata(pdev); 1917 1918 sis190_mii_remove(dev); 1919 flush_scheduled_work(); 1920 unregister_netdev(dev); 1921 sis190_release_board(pdev); 1922 pci_set_drvdata(pdev, NULL); 1923} 1924 1925static struct pci_driver sis190_pci_driver = { 1926 .name = DRV_NAME, 1927 .id_table = sis190_pci_tbl, 1928 .probe = sis190_init_one, 1929 .remove = __devexit_p(sis190_remove_one), 1930}; 1931 1932static int __init sis190_init_module(void) 1933{ 1934 return pci_register_driver(&sis190_pci_driver); 1935} 1936 1937static void __exit sis190_cleanup_module(void) 1938{ 1939 pci_unregister_driver(&sis190_pci_driver); 1940} 1941 1942module_init(sis190_init_module); 1943module_exit(sis190_cleanup_module); 1944