1/* 2 * JMicron JMC2x0 series PCIe Ethernet Linux Device Driver 3 * 4 * Copyright 2008 JMicron Technology Corporation 5 * http://www.jmicron.com/ 6 * 7 * Author: Guo-Fu Tseng <cooldavid@cooldavid.org> 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License as published by 11 * the Free Software Foundation; either version 2 of the License. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program; if not, write to the Free Software 20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 21 * 22 */ 23 24#include <linux/module.h> 25#include <linux/kernel.h> 26#include <linux/pci.h> 27#include <linux/netdevice.h> 28#include <linux/etherdevice.h> 29#include <linux/ethtool.h> 30#include <linux/mii.h> 31#include <linux/crc32.h> 32#include <linux/delay.h> 33#include <linux/spinlock.h> 34#include <linux/in.h> 35#include <linux/ip.h> 36#include <linux/ipv6.h> 37#include <linux/tcp.h> 38#include <linux/udp.h> 39#include <linux/if_vlan.h> 40#include <linux/slab.h> 41#include <net/ip6_checksum.h> 42#include "jme.h" 43 44static int force_pseudohp = -1; 45static int no_pseudohp = -1; 46static int no_extplug = -1; 47module_param(force_pseudohp, int, 0); 48MODULE_PARM_DESC(force_pseudohp, 49 "Enable pseudo hot-plug feature manually by driver instead of BIOS."); 50module_param(no_pseudohp, int, 0); 51MODULE_PARM_DESC(no_pseudohp, "Disable pseudo hot-plug feature."); 52module_param(no_extplug, int, 0); 53MODULE_PARM_DESC(no_extplug, 54 "Do not use external plug signal for pseudo hot-plug."); 55 56static int 57jme_mdio_read(struct net_device *netdev, int phy, int reg) 58{ 59 struct jme_adapter *jme = netdev_priv(netdev); 60 int i, val, again = (reg == MII_BMSR) ? 1 : 0; 61 62read_again: 63 jwrite32(jme, JME_SMI, SMI_OP_REQ | 64 smi_phy_addr(phy) | 65 smi_reg_addr(reg)); 66 67 wmb(); 68 for (i = JME_PHY_TIMEOUT * 50 ; i > 0 ; --i) { 69 udelay(20); 70 val = jread32(jme, JME_SMI); 71 if ((val & SMI_OP_REQ) == 0) 72 break; 73 } 74 75 if (i == 0) { 76 jeprintk(jme->pdev, "phy(%d) read timeout : %d\n", phy, reg); 77 return 0; 78 } 79 80 if (again--) 81 goto read_again; 82 83 return (val & SMI_DATA_MASK) >> SMI_DATA_SHIFT; 84} 85 86static void 87jme_mdio_write(struct net_device *netdev, 88 int phy, int reg, int val) 89{ 90 struct jme_adapter *jme = netdev_priv(netdev); 91 int i; 92 93 jwrite32(jme, JME_SMI, SMI_OP_WRITE | SMI_OP_REQ | 94 ((val << SMI_DATA_SHIFT) & SMI_DATA_MASK) | 95 smi_phy_addr(phy) | smi_reg_addr(reg)); 96 97 wmb(); 98 for (i = JME_PHY_TIMEOUT * 50 ; i > 0 ; --i) { 99 udelay(20); 100 if ((jread32(jme, JME_SMI) & SMI_OP_REQ) == 0) 101 break; 102 } 103 104 if (i == 0) 105 jeprintk(jme->pdev, "phy(%d) write timeout : %d\n", phy, reg); 106} 107 108static inline void 109jme_reset_phy_processor(struct jme_adapter *jme) 110{ 111 u32 val; 112 113 jme_mdio_write(jme->dev, 114 jme->mii_if.phy_id, 115 MII_ADVERTISE, ADVERTISE_ALL | 116 ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); 117 118 if (jme->pdev->device == PCI_DEVICE_ID_JMICRON_JMC250) 119 jme_mdio_write(jme->dev, 120 jme->mii_if.phy_id, 121 MII_CTRL1000, 122 ADVERTISE_1000FULL | ADVERTISE_1000HALF); 123 124 val = jme_mdio_read(jme->dev, 125 jme->mii_if.phy_id, 126 MII_BMCR); 127 128 jme_mdio_write(jme->dev, 129 jme->mii_if.phy_id, 130 MII_BMCR, val | BMCR_RESET); 131} 132 133static void 134jme_setup_wakeup_frame(struct jme_adapter *jme, 135 u32 *mask, u32 crc, int fnr) 136{ 137 int i; 138 139 /* 140 * Setup CRC pattern 141 */ 142 jwrite32(jme, JME_WFOI, WFOI_CRC_SEL | (fnr & WFOI_FRAME_SEL)); 143 wmb(); 144 jwrite32(jme, JME_WFODP, crc); 145 wmb(); 146 147 /* 148 * Setup Mask 149 */ 150 for (i = 0 ; i < WAKEUP_FRAME_MASK_DWNR ; ++i) { 151 jwrite32(jme, JME_WFOI, 152 ((i << WFOI_MASK_SHIFT) & WFOI_MASK_SEL) | 153 (fnr & WFOI_FRAME_SEL)); 154 wmb(); 155 jwrite32(jme, JME_WFODP, mask[i]); 156 wmb(); 157 } 158} 159 160static inline void 161jme_reset_mac_processor(struct jme_adapter *jme) 162{ 163 u32 mask[WAKEUP_FRAME_MASK_DWNR] = {0, 0, 0, 0}; 164 u32 crc = 0xCDCDCDCD; 165 u32 gpreg0; 166 int i; 167 168 jwrite32(jme, JME_GHC, jme->reg_ghc | GHC_SWRST); 169 udelay(2); 170 jwrite32(jme, JME_GHC, jme->reg_ghc); 171 172 jwrite32(jme, JME_RXDBA_LO, 0x00000000); 173 jwrite32(jme, JME_RXDBA_HI, 0x00000000); 174 jwrite32(jme, JME_RXQDC, 0x00000000); 175 jwrite32(jme, JME_RXNDA, 0x00000000); 176 jwrite32(jme, JME_TXDBA_LO, 0x00000000); 177 jwrite32(jme, JME_TXDBA_HI, 0x00000000); 178 jwrite32(jme, JME_TXQDC, 0x00000000); 179 jwrite32(jme, JME_TXNDA, 0x00000000); 180 181 jwrite32(jme, JME_RXMCHT_LO, 0x00000000); 182 jwrite32(jme, JME_RXMCHT_HI, 0x00000000); 183 for (i = 0 ; i < WAKEUP_FRAME_NR ; ++i) 184 jme_setup_wakeup_frame(jme, mask, crc, i); 185 if (jme->fpgaver) 186 gpreg0 = GPREG0_DEFAULT | GPREG0_LNKINTPOLL; 187 else 188 gpreg0 = GPREG0_DEFAULT; 189 jwrite32(jme, JME_GPREG0, gpreg0); 190 jwrite32(jme, JME_GPREG1, GPREG1_DEFAULT); 191} 192 193static inline void 194jme_reset_ghc_speed(struct jme_adapter *jme) 195{ 196 jme->reg_ghc &= ~(GHC_SPEED_1000M | GHC_DPX); 197 jwrite32(jme, JME_GHC, jme->reg_ghc); 198} 199 200static inline void 201jme_clear_pm(struct jme_adapter *jme) 202{ 203 jwrite32(jme, JME_PMCS, 0xFFFF0000 | jme->reg_pmcs); 204 pci_set_power_state(jme->pdev, PCI_D0); 205 pci_enable_wake(jme->pdev, PCI_D0, false); 206} 207 208static int 209jme_reload_eeprom(struct jme_adapter *jme) 210{ 211 u32 val; 212 int i; 213 214 val = jread32(jme, JME_SMBCSR); 215 216 if (val & SMBCSR_EEPROMD) { 217 val |= SMBCSR_CNACK; 218 jwrite32(jme, JME_SMBCSR, val); 219 val |= SMBCSR_RELOAD; 220 jwrite32(jme, JME_SMBCSR, val); 221 mdelay(12); 222 223 for (i = JME_EEPROM_RELOAD_TIMEOUT; i > 0; --i) { 224 mdelay(1); 225 if ((jread32(jme, JME_SMBCSR) & SMBCSR_RELOAD) == 0) 226 break; 227 } 228 229 if (i == 0) { 230 jeprintk(jme->pdev, "eeprom reload timeout\n"); 231 return -EIO; 232 } 233 } 234 235 return 0; 236} 237 238static void 239jme_load_macaddr(struct net_device *netdev) 240{ 241 struct jme_adapter *jme = netdev_priv(netdev); 242 unsigned char macaddr[6]; 243 u32 val; 244 245 spin_lock_bh(&jme->macaddr_lock); 246 val = jread32(jme, JME_RXUMA_LO); 247 macaddr[0] = (val >> 0) & 0xFF; 248 macaddr[1] = (val >> 8) & 0xFF; 249 macaddr[2] = (val >> 16) & 0xFF; 250 macaddr[3] = (val >> 24) & 0xFF; 251 val = jread32(jme, JME_RXUMA_HI); 252 macaddr[4] = (val >> 0) & 0xFF; 253 macaddr[5] = (val >> 8) & 0xFF; 254 memcpy(netdev->dev_addr, macaddr, 6); 255 spin_unlock_bh(&jme->macaddr_lock); 256} 257 258static inline void 259jme_set_rx_pcc(struct jme_adapter *jme, int p) 260{ 261 switch (p) { 262 case PCC_OFF: 263 jwrite32(jme, JME_PCCRX0, 264 ((PCC_OFF_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) | 265 ((PCC_OFF_CNT << PCCRX_SHIFT) & PCCRX_MASK)); 266 break; 267 case PCC_P1: 268 jwrite32(jme, JME_PCCRX0, 269 ((PCC_P1_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) | 270 ((PCC_P1_CNT << PCCRX_SHIFT) & PCCRX_MASK)); 271 break; 272 case PCC_P2: 273 jwrite32(jme, JME_PCCRX0, 274 ((PCC_P2_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) | 275 ((PCC_P2_CNT << PCCRX_SHIFT) & PCCRX_MASK)); 276 break; 277 case PCC_P3: 278 jwrite32(jme, JME_PCCRX0, 279 ((PCC_P3_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) | 280 ((PCC_P3_CNT << PCCRX_SHIFT) & PCCRX_MASK)); 281 break; 282 default: 283 break; 284 } 285 wmb(); 286 287 if (!(test_bit(JME_FLAG_POLL, &jme->flags))) 288 netif_info(jme, rx_status, jme->dev, "Switched to PCC_P%d\n", p); 289} 290 291static void 292jme_start_irq(struct jme_adapter *jme) 293{ 294 register struct dynpcc_info *dpi = &(jme->dpi); 295 296 jme_set_rx_pcc(jme, PCC_P1); 297 dpi->cur = PCC_P1; 298 dpi->attempt = PCC_P1; 299 dpi->cnt = 0; 300 301 jwrite32(jme, JME_PCCTX, 302 ((PCC_TX_TO << PCCTXTO_SHIFT) & PCCTXTO_MASK) | 303 ((PCC_TX_CNT << PCCTX_SHIFT) & PCCTX_MASK) | 304 PCCTXQ0_EN 305 ); 306 307 /* 308 * Enable Interrupts 309 */ 310 jwrite32(jme, JME_IENS, INTR_ENABLE); 311} 312 313static inline void 314jme_stop_irq(struct jme_adapter *jme) 315{ 316 /* 317 * Disable Interrupts 318 */ 319 jwrite32f(jme, JME_IENC, INTR_ENABLE); 320} 321 322static u32 323jme_linkstat_from_phy(struct jme_adapter *jme) 324{ 325 u32 phylink, bmsr; 326 327 phylink = jme_mdio_read(jme->dev, jme->mii_if.phy_id, 17); 328 bmsr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMSR); 329 if (bmsr & BMSR_ANCOMP) 330 phylink |= PHY_LINK_AUTONEG_COMPLETE; 331 332 return phylink; 333} 334 335static inline void 336jme_set_phyfifoa(struct jme_adapter *jme) 337{ 338 jme_mdio_write(jme->dev, jme->mii_if.phy_id, 27, 0x0004); 339} 340 341static inline void 342jme_set_phyfifob(struct jme_adapter *jme) 343{ 344 jme_mdio_write(jme->dev, jme->mii_if.phy_id, 27, 0x0000); 345} 346 347static int 348jme_check_link(struct net_device *netdev, int testonly) 349{ 350 struct jme_adapter *jme = netdev_priv(netdev); 351 u32 phylink, ghc, cnt = JME_SPDRSV_TIMEOUT, bmcr, gpreg1; 352 char linkmsg[64]; 353 int rc = 0; 354 355 linkmsg[0] = '\0'; 356 357 if (jme->fpgaver) 358 phylink = jme_linkstat_from_phy(jme); 359 else 360 phylink = jread32(jme, JME_PHY_LINK); 361 362 if (phylink & PHY_LINK_UP) { 363 if (!(phylink & PHY_LINK_AUTONEG_COMPLETE)) { 364 /* 365 * If we did not enable AN 366 * Speed/Duplex Info should be obtained from SMI 367 */ 368 phylink = PHY_LINK_UP; 369 370 bmcr = jme_mdio_read(jme->dev, 371 jme->mii_if.phy_id, 372 MII_BMCR); 373 374 phylink |= ((bmcr & BMCR_SPEED1000) && 375 (bmcr & BMCR_SPEED100) == 0) ? 376 PHY_LINK_SPEED_1000M : 377 (bmcr & BMCR_SPEED100) ? 378 PHY_LINK_SPEED_100M : 379 PHY_LINK_SPEED_10M; 380 381 phylink |= (bmcr & BMCR_FULLDPLX) ? 382 PHY_LINK_DUPLEX : 0; 383 384 strcat(linkmsg, "Forced: "); 385 } else { 386 /* 387 * Keep polling for speed/duplex resolve complete 388 */ 389 while (!(phylink & PHY_LINK_SPEEDDPU_RESOLVED) && 390 --cnt) { 391 392 udelay(1); 393 394 if (jme->fpgaver) 395 phylink = jme_linkstat_from_phy(jme); 396 else 397 phylink = jread32(jme, JME_PHY_LINK); 398 } 399 if (!cnt) 400 jeprintk(jme->pdev, 401 "Waiting speed resolve timeout.\n"); 402 403 strcat(linkmsg, "ANed: "); 404 } 405 406 if (jme->phylink == phylink) { 407 rc = 1; 408 goto out; 409 } 410 if (testonly) 411 goto out; 412 413 jme->phylink = phylink; 414 415 ghc = jme->reg_ghc & ~(GHC_SPEED | GHC_DPX | 416 GHC_TO_CLK_PCIE | GHC_TXMAC_CLK_PCIE | 417 GHC_TO_CLK_GPHY | GHC_TXMAC_CLK_GPHY); 418 switch (phylink & PHY_LINK_SPEED_MASK) { 419 case PHY_LINK_SPEED_10M: 420 ghc |= GHC_SPEED_10M | 421 GHC_TO_CLK_PCIE | GHC_TXMAC_CLK_PCIE; 422 strcat(linkmsg, "10 Mbps, "); 423 break; 424 case PHY_LINK_SPEED_100M: 425 ghc |= GHC_SPEED_100M | 426 GHC_TO_CLK_PCIE | GHC_TXMAC_CLK_PCIE; 427 strcat(linkmsg, "100 Mbps, "); 428 break; 429 case PHY_LINK_SPEED_1000M: 430 ghc |= GHC_SPEED_1000M | 431 GHC_TO_CLK_GPHY | GHC_TXMAC_CLK_GPHY; 432 strcat(linkmsg, "1000 Mbps, "); 433 break; 434 default: 435 break; 436 } 437 438 if (phylink & PHY_LINK_DUPLEX) { 439 jwrite32(jme, JME_TXMCS, TXMCS_DEFAULT); 440 ghc |= GHC_DPX; 441 } else { 442 jwrite32(jme, JME_TXMCS, TXMCS_DEFAULT | 443 TXMCS_BACKOFF | 444 TXMCS_CARRIERSENSE | 445 TXMCS_COLLISION); 446 jwrite32(jme, JME_TXTRHD, TXTRHD_TXPEN | 447 ((0x2000 << TXTRHD_TXP_SHIFT) & TXTRHD_TXP) | 448 TXTRHD_TXREN | 449 ((8 << TXTRHD_TXRL_SHIFT) & TXTRHD_TXRL)); 450 } 451 452 gpreg1 = GPREG1_DEFAULT; 453 if (is_buggy250(jme->pdev->device, jme->chiprev)) { 454 if (!(phylink & PHY_LINK_DUPLEX)) 455 gpreg1 |= GPREG1_HALFMODEPATCH; 456 switch (phylink & PHY_LINK_SPEED_MASK) { 457 case PHY_LINK_SPEED_10M: 458 jme_set_phyfifoa(jme); 459 gpreg1 |= GPREG1_RSSPATCH; 460 break; 461 case PHY_LINK_SPEED_100M: 462 jme_set_phyfifob(jme); 463 gpreg1 |= GPREG1_RSSPATCH; 464 break; 465 case PHY_LINK_SPEED_1000M: 466 jme_set_phyfifoa(jme); 467 break; 468 default: 469 break; 470 } 471 } 472 473 jwrite32(jme, JME_GPREG1, gpreg1); 474 jwrite32(jme, JME_GHC, ghc); 475 jme->reg_ghc = ghc; 476 477 strcat(linkmsg, (phylink & PHY_LINK_DUPLEX) ? 478 "Full-Duplex, " : 479 "Half-Duplex, "); 480 strcat(linkmsg, (phylink & PHY_LINK_MDI_STAT) ? 481 "MDI-X" : 482 "MDI"); 483 netif_info(jme, link, jme->dev, "Link is up at %s.\n", linkmsg); 484 netif_carrier_on(netdev); 485 } else { 486 if (testonly) 487 goto out; 488 489 netif_info(jme, link, jme->dev, "Link is down.\n"); 490 jme->phylink = 0; 491 netif_carrier_off(netdev); 492 } 493 494out: 495 return rc; 496} 497 498static int 499jme_setup_tx_resources(struct jme_adapter *jme) 500{ 501 struct jme_ring *txring = &(jme->txring[0]); 502 503 txring->alloc = dma_alloc_coherent(&(jme->pdev->dev), 504 TX_RING_ALLOC_SIZE(jme->tx_ring_size), 505 &(txring->dmaalloc), 506 GFP_ATOMIC); 507 508 if (!txring->alloc) 509 goto err_set_null; 510 511 /* 512 * 16 Bytes align 513 */ 514 txring->desc = (void *)ALIGN((unsigned long)(txring->alloc), 515 RING_DESC_ALIGN); 516 txring->dma = ALIGN(txring->dmaalloc, RING_DESC_ALIGN); 517 txring->next_to_use = 0; 518 atomic_set(&txring->next_to_clean, 0); 519 atomic_set(&txring->nr_free, jme->tx_ring_size); 520 521 txring->bufinf = kmalloc(sizeof(struct jme_buffer_info) * 522 jme->tx_ring_size, GFP_ATOMIC); 523 if (unlikely(!(txring->bufinf))) 524 goto err_free_txring; 525 526 /* 527 * Initialize Transmit Descriptors 528 */ 529 memset(txring->alloc, 0, TX_RING_ALLOC_SIZE(jme->tx_ring_size)); 530 memset(txring->bufinf, 0, 531 sizeof(struct jme_buffer_info) * jme->tx_ring_size); 532 533 return 0; 534 535err_free_txring: 536 dma_free_coherent(&(jme->pdev->dev), 537 TX_RING_ALLOC_SIZE(jme->tx_ring_size), 538 txring->alloc, 539 txring->dmaalloc); 540 541err_set_null: 542 txring->desc = NULL; 543 txring->dmaalloc = 0; 544 txring->dma = 0; 545 txring->bufinf = NULL; 546 547 return -ENOMEM; 548} 549 550static void 551jme_free_tx_resources(struct jme_adapter *jme) 552{ 553 int i; 554 struct jme_ring *txring = &(jme->txring[0]); 555 struct jme_buffer_info *txbi; 556 557 if (txring->alloc) { 558 if (txring->bufinf) { 559 for (i = 0 ; i < jme->tx_ring_size ; ++i) { 560 txbi = txring->bufinf + i; 561 if (txbi->skb) { 562 dev_kfree_skb(txbi->skb); 563 txbi->skb = NULL; 564 } 565 txbi->mapping = 0; 566 txbi->len = 0; 567 txbi->nr_desc = 0; 568 txbi->start_xmit = 0; 569 } 570 kfree(txring->bufinf); 571 } 572 573 dma_free_coherent(&(jme->pdev->dev), 574 TX_RING_ALLOC_SIZE(jme->tx_ring_size), 575 txring->alloc, 576 txring->dmaalloc); 577 578 txring->alloc = NULL; 579 txring->desc = NULL; 580 txring->dmaalloc = 0; 581 txring->dma = 0; 582 txring->bufinf = NULL; 583 } 584 txring->next_to_use = 0; 585 atomic_set(&txring->next_to_clean, 0); 586 atomic_set(&txring->nr_free, 0); 587} 588 589static inline void 590jme_enable_tx_engine(struct jme_adapter *jme) 591{ 592 /* 593 * Select Queue 0 594 */ 595 jwrite32(jme, JME_TXCS, TXCS_DEFAULT | TXCS_SELECT_QUEUE0); 596 wmb(); 597 598 /* 599 * Setup TX Queue 0 DMA Bass Address 600 */ 601 jwrite32(jme, JME_TXDBA_LO, (__u64)jme->txring[0].dma & 0xFFFFFFFFUL); 602 jwrite32(jme, JME_TXDBA_HI, (__u64)(jme->txring[0].dma) >> 32); 603 jwrite32(jme, JME_TXNDA, (__u64)jme->txring[0].dma & 0xFFFFFFFFUL); 604 605 /* 606 * Setup TX Descptor Count 607 */ 608 jwrite32(jme, JME_TXQDC, jme->tx_ring_size); 609 610 /* 611 * Enable TX Engine 612 */ 613 wmb(); 614 jwrite32(jme, JME_TXCS, jme->reg_txcs | 615 TXCS_SELECT_QUEUE0 | 616 TXCS_ENABLE); 617 618} 619 620static inline void 621jme_restart_tx_engine(struct jme_adapter *jme) 622{ 623 /* 624 * Restart TX Engine 625 */ 626 jwrite32(jme, JME_TXCS, jme->reg_txcs | 627 TXCS_SELECT_QUEUE0 | 628 TXCS_ENABLE); 629} 630 631static inline void 632jme_disable_tx_engine(struct jme_adapter *jme) 633{ 634 int i; 635 u32 val; 636 637 /* 638 * Disable TX Engine 639 */ 640 jwrite32(jme, JME_TXCS, jme->reg_txcs | TXCS_SELECT_QUEUE0); 641 wmb(); 642 643 val = jread32(jme, JME_TXCS); 644 for (i = JME_TX_DISABLE_TIMEOUT ; (val & TXCS_ENABLE) && i > 0 ; --i) { 645 mdelay(1); 646 val = jread32(jme, JME_TXCS); 647 rmb(); 648 } 649 650 if (!i) 651 jeprintk(jme->pdev, "Disable TX engine timeout.\n"); 652} 653 654static void 655jme_set_clean_rxdesc(struct jme_adapter *jme, int i) 656{ 657 struct jme_ring *rxring = &(jme->rxring[0]); 658 register struct rxdesc *rxdesc = rxring->desc; 659 struct jme_buffer_info *rxbi = rxring->bufinf; 660 rxdesc += i; 661 rxbi += i; 662 663 rxdesc->dw[0] = 0; 664 rxdesc->dw[1] = 0; 665 rxdesc->desc1.bufaddrh = cpu_to_le32((__u64)rxbi->mapping >> 32); 666 rxdesc->desc1.bufaddrl = cpu_to_le32( 667 (__u64)rxbi->mapping & 0xFFFFFFFFUL); 668 rxdesc->desc1.datalen = cpu_to_le16(rxbi->len); 669 if (jme->dev->features & NETIF_F_HIGHDMA) 670 rxdesc->desc1.flags = RXFLAG_64BIT; 671 wmb(); 672 rxdesc->desc1.flags |= RXFLAG_OWN | RXFLAG_INT; 673} 674 675static int 676jme_make_new_rx_buf(struct jme_adapter *jme, int i) 677{ 678 struct jme_ring *rxring = &(jme->rxring[0]); 679 struct jme_buffer_info *rxbi = rxring->bufinf + i; 680 struct sk_buff *skb; 681 682 skb = netdev_alloc_skb(jme->dev, 683 jme->dev->mtu + RX_EXTRA_LEN); 684 if (unlikely(!skb)) 685 return -ENOMEM; 686 687 rxbi->skb = skb; 688 rxbi->len = skb_tailroom(skb); 689 rxbi->mapping = pci_map_page(jme->pdev, 690 virt_to_page(skb->data), 691 offset_in_page(skb->data), 692 rxbi->len, 693 PCI_DMA_FROMDEVICE); 694 695 return 0; 696} 697 698static void 699jme_free_rx_buf(struct jme_adapter *jme, int i) 700{ 701 struct jme_ring *rxring = &(jme->rxring[0]); 702 struct jme_buffer_info *rxbi = rxring->bufinf; 703 rxbi += i; 704 705 if (rxbi->skb) { 706 pci_unmap_page(jme->pdev, 707 rxbi->mapping, 708 rxbi->len, 709 PCI_DMA_FROMDEVICE); 710 dev_kfree_skb(rxbi->skb); 711 rxbi->skb = NULL; 712 rxbi->mapping = 0; 713 rxbi->len = 0; 714 } 715} 716 717static void 718jme_free_rx_resources(struct jme_adapter *jme) 719{ 720 int i; 721 struct jme_ring *rxring = &(jme->rxring[0]); 722 723 if (rxring->alloc) { 724 if (rxring->bufinf) { 725 for (i = 0 ; i < jme->rx_ring_size ; ++i) 726 jme_free_rx_buf(jme, i); 727 kfree(rxring->bufinf); 728 } 729 730 dma_free_coherent(&(jme->pdev->dev), 731 RX_RING_ALLOC_SIZE(jme->rx_ring_size), 732 rxring->alloc, 733 rxring->dmaalloc); 734 rxring->alloc = NULL; 735 rxring->desc = NULL; 736 rxring->dmaalloc = 0; 737 rxring->dma = 0; 738 rxring->bufinf = NULL; 739 } 740 rxring->next_to_use = 0; 741 atomic_set(&rxring->next_to_clean, 0); 742} 743 744static int 745jme_setup_rx_resources(struct jme_adapter *jme) 746{ 747 int i; 748 struct jme_ring *rxring = &(jme->rxring[0]); 749 750 rxring->alloc = dma_alloc_coherent(&(jme->pdev->dev), 751 RX_RING_ALLOC_SIZE(jme->rx_ring_size), 752 &(rxring->dmaalloc), 753 GFP_ATOMIC); 754 if (!rxring->alloc) 755 goto err_set_null; 756 757 /* 758 * 16 Bytes align 759 */ 760 rxring->desc = (void *)ALIGN((unsigned long)(rxring->alloc), 761 RING_DESC_ALIGN); 762 rxring->dma = ALIGN(rxring->dmaalloc, RING_DESC_ALIGN); 763 rxring->next_to_use = 0; 764 atomic_set(&rxring->next_to_clean, 0); 765 766 rxring->bufinf = kmalloc(sizeof(struct jme_buffer_info) * 767 jme->rx_ring_size, GFP_ATOMIC); 768 if (unlikely(!(rxring->bufinf))) 769 goto err_free_rxring; 770 771 /* 772 * Initiallize Receive Descriptors 773 */ 774 memset(rxring->bufinf, 0, 775 sizeof(struct jme_buffer_info) * jme->rx_ring_size); 776 for (i = 0 ; i < jme->rx_ring_size ; ++i) { 777 if (unlikely(jme_make_new_rx_buf(jme, i))) { 778 jme_free_rx_resources(jme); 779 return -ENOMEM; 780 } 781 782 jme_set_clean_rxdesc(jme, i); 783 } 784 785 return 0; 786 787err_free_rxring: 788 dma_free_coherent(&(jme->pdev->dev), 789 RX_RING_ALLOC_SIZE(jme->rx_ring_size), 790 rxring->alloc, 791 rxring->dmaalloc); 792err_set_null: 793 rxring->desc = NULL; 794 rxring->dmaalloc = 0; 795 rxring->dma = 0; 796 rxring->bufinf = NULL; 797 798 return -ENOMEM; 799} 800 801static inline void 802jme_enable_rx_engine(struct jme_adapter *jme) 803{ 804 /* 805 * Select Queue 0 806 */ 807 jwrite32(jme, JME_RXCS, jme->reg_rxcs | 808 RXCS_QUEUESEL_Q0); 809 wmb(); 810 811 /* 812 * Setup RX DMA Bass Address 813 */ 814 jwrite32(jme, JME_RXDBA_LO, (__u64)(jme->rxring[0].dma) & 0xFFFFFFFFUL); 815 jwrite32(jme, JME_RXDBA_HI, (__u64)(jme->rxring[0].dma) >> 32); 816 jwrite32(jme, JME_RXNDA, (__u64)(jme->rxring[0].dma) & 0xFFFFFFFFUL); 817 818 /* 819 * Setup RX Descriptor Count 820 */ 821 jwrite32(jme, JME_RXQDC, jme->rx_ring_size); 822 823 /* 824 * Setup Unicast Filter 825 */ 826 jme_set_multi(jme->dev); 827 828 /* 829 * Enable RX Engine 830 */ 831 wmb(); 832 jwrite32(jme, JME_RXCS, jme->reg_rxcs | 833 RXCS_QUEUESEL_Q0 | 834 RXCS_ENABLE | 835 RXCS_QST); 836} 837 838static inline void 839jme_restart_rx_engine(struct jme_adapter *jme) 840{ 841 /* 842 * Start RX Engine 843 */ 844 jwrite32(jme, JME_RXCS, jme->reg_rxcs | 845 RXCS_QUEUESEL_Q0 | 846 RXCS_ENABLE | 847 RXCS_QST); 848} 849 850static inline void 851jme_disable_rx_engine(struct jme_adapter *jme) 852{ 853 int i; 854 u32 val; 855 856 /* 857 * Disable RX Engine 858 */ 859 jwrite32(jme, JME_RXCS, jme->reg_rxcs); 860 wmb(); 861 862 val = jread32(jme, JME_RXCS); 863 for (i = JME_RX_DISABLE_TIMEOUT ; (val & RXCS_ENABLE) && i > 0 ; --i) { 864 mdelay(1); 865 val = jread32(jme, JME_RXCS); 866 rmb(); 867 } 868 869 if (!i) 870 jeprintk(jme->pdev, "Disable RX engine timeout.\n"); 871 872} 873 874static int 875jme_rxsum_ok(struct jme_adapter *jme, u16 flags) 876{ 877 if (!(flags & (RXWBFLAG_TCPON | RXWBFLAG_UDPON | RXWBFLAG_IPV4))) 878 return false; 879 880 if (unlikely((flags & (RXWBFLAG_MF | RXWBFLAG_TCPON | RXWBFLAG_TCPCS)) 881 == RXWBFLAG_TCPON)) { 882 if (flags & RXWBFLAG_IPV4) 883 netif_err(jme, rx_err, jme->dev, "TCP Checksum error\n"); 884 return false; 885 } 886 887 if (unlikely((flags & (RXWBFLAG_MF | RXWBFLAG_UDPON | RXWBFLAG_UDPCS)) 888 == RXWBFLAG_UDPON)) { 889 if (flags & RXWBFLAG_IPV4) 890 netif_err(jme, rx_err, jme->dev, "UDP Checksum error.\n"); 891 return false; 892 } 893 894 if (unlikely((flags & (RXWBFLAG_IPV4 | RXWBFLAG_IPCS)) 895 == RXWBFLAG_IPV4)) { 896 netif_err(jme, rx_err, jme->dev, "IPv4 Checksum error.\n"); 897 return false; 898 } 899 900 return true; 901} 902 903static void 904jme_alloc_and_feed_skb(struct jme_adapter *jme, int idx) 905{ 906 struct jme_ring *rxring = &(jme->rxring[0]); 907 struct rxdesc *rxdesc = rxring->desc; 908 struct jme_buffer_info *rxbi = rxring->bufinf; 909 struct sk_buff *skb; 910 int framesize; 911 912 rxdesc += idx; 913 rxbi += idx; 914 915 skb = rxbi->skb; 916 pci_dma_sync_single_for_cpu(jme->pdev, 917 rxbi->mapping, 918 rxbi->len, 919 PCI_DMA_FROMDEVICE); 920 921 if (unlikely(jme_make_new_rx_buf(jme, idx))) { 922 pci_dma_sync_single_for_device(jme->pdev, 923 rxbi->mapping, 924 rxbi->len, 925 PCI_DMA_FROMDEVICE); 926 927 ++(NET_STAT(jme).rx_dropped); 928 } else { 929 framesize = le16_to_cpu(rxdesc->descwb.framesize) 930 - RX_PREPAD_SIZE; 931 932 skb_reserve(skb, RX_PREPAD_SIZE); 933 skb_put(skb, framesize); 934 skb->protocol = eth_type_trans(skb, jme->dev); 935 936 if (jme_rxsum_ok(jme, le16_to_cpu(rxdesc->descwb.flags))) 937 skb->ip_summed = CHECKSUM_UNNECESSARY; 938 else 939 skb->ip_summed = CHECKSUM_NONE; 940 941 if (rxdesc->descwb.flags & cpu_to_le16(RXWBFLAG_TAGON)) { 942 if (jme->vlgrp) { 943 jme->jme_vlan_rx(skb, jme->vlgrp, 944 le16_to_cpu(rxdesc->descwb.vlan)); 945 NET_STAT(jme).rx_bytes += 4; 946 } else { 947 dev_kfree_skb(skb); 948 } 949 } else { 950 jme->jme_rx(skb); 951 } 952 953 if ((rxdesc->descwb.flags & cpu_to_le16(RXWBFLAG_DEST)) == 954 cpu_to_le16(RXWBFLAG_DEST_MUL)) 955 ++(NET_STAT(jme).multicast); 956 957 NET_STAT(jme).rx_bytes += framesize; 958 ++(NET_STAT(jme).rx_packets); 959 } 960 961 jme_set_clean_rxdesc(jme, idx); 962 963} 964 965static int 966jme_process_receive(struct jme_adapter *jme, int limit) 967{ 968 struct jme_ring *rxring = &(jme->rxring[0]); 969 struct rxdesc *rxdesc = rxring->desc; 970 int i, j, ccnt, desccnt, mask = jme->rx_ring_mask; 971 972 if (unlikely(!atomic_dec_and_test(&jme->rx_cleaning))) 973 goto out_inc; 974 975 if (unlikely(atomic_read(&jme->link_changing) != 1)) 976 goto out_inc; 977 978 if (unlikely(!netif_carrier_ok(jme->dev))) 979 goto out_inc; 980 981 i = atomic_read(&rxring->next_to_clean); 982 while (limit > 0) { 983 rxdesc = rxring->desc; 984 rxdesc += i; 985 986 if ((rxdesc->descwb.flags & cpu_to_le16(RXWBFLAG_OWN)) || 987 !(rxdesc->descwb.desccnt & RXWBDCNT_WBCPL)) 988 goto out; 989 --limit; 990 991 desccnt = rxdesc->descwb.desccnt & RXWBDCNT_DCNT; 992 993 if (unlikely(desccnt > 1 || 994 rxdesc->descwb.errstat & RXWBERR_ALLERR)) { 995 996 if (rxdesc->descwb.errstat & RXWBERR_CRCERR) 997 ++(NET_STAT(jme).rx_crc_errors); 998 else if (rxdesc->descwb.errstat & RXWBERR_OVERUN) 999 ++(NET_STAT(jme).rx_fifo_errors); 1000 else 1001 ++(NET_STAT(jme).rx_errors); 1002 1003 if (desccnt > 1) 1004 limit -= desccnt - 1; 1005 1006 for (j = i, ccnt = desccnt ; ccnt-- ; ) { 1007 jme_set_clean_rxdesc(jme, j); 1008 j = (j + 1) & (mask); 1009 } 1010 1011 } else { 1012 jme_alloc_and_feed_skb(jme, i); 1013 } 1014 1015 i = (i + desccnt) & (mask); 1016 } 1017 1018out: 1019 atomic_set(&rxring->next_to_clean, i); 1020 1021out_inc: 1022 atomic_inc(&jme->rx_cleaning); 1023 1024 return limit > 0 ? limit : 0; 1025 1026} 1027 1028static void 1029jme_attempt_pcc(struct dynpcc_info *dpi, int atmp) 1030{ 1031 if (likely(atmp == dpi->cur)) { 1032 dpi->cnt = 0; 1033 return; 1034 } 1035 1036 if (dpi->attempt == atmp) { 1037 ++(dpi->cnt); 1038 } else { 1039 dpi->attempt = atmp; 1040 dpi->cnt = 0; 1041 } 1042 1043} 1044 1045static void 1046jme_dynamic_pcc(struct jme_adapter *jme) 1047{ 1048 register struct dynpcc_info *dpi = &(jme->dpi); 1049 1050 if ((NET_STAT(jme).rx_bytes - dpi->last_bytes) > PCC_P3_THRESHOLD) 1051 jme_attempt_pcc(dpi, PCC_P3); 1052 else if ((NET_STAT(jme).rx_packets - dpi->last_pkts) > PCC_P2_THRESHOLD || 1053 dpi->intr_cnt > PCC_INTR_THRESHOLD) 1054 jme_attempt_pcc(dpi, PCC_P2); 1055 else 1056 jme_attempt_pcc(dpi, PCC_P1); 1057 1058 if (unlikely(dpi->attempt != dpi->cur && dpi->cnt > 5)) { 1059 if (dpi->attempt < dpi->cur) 1060 tasklet_schedule(&jme->rxclean_task); 1061 jme_set_rx_pcc(jme, dpi->attempt); 1062 dpi->cur = dpi->attempt; 1063 dpi->cnt = 0; 1064 } 1065} 1066 1067static void 1068jme_start_pcc_timer(struct jme_adapter *jme) 1069{ 1070 struct dynpcc_info *dpi = &(jme->dpi); 1071 dpi->last_bytes = NET_STAT(jme).rx_bytes; 1072 dpi->last_pkts = NET_STAT(jme).rx_packets; 1073 dpi->intr_cnt = 0; 1074 jwrite32(jme, JME_TMCSR, 1075 TMCSR_EN | ((0xFFFFFF - PCC_INTERVAL_US) & TMCSR_CNT)); 1076} 1077 1078static inline void 1079jme_stop_pcc_timer(struct jme_adapter *jme) 1080{ 1081 jwrite32(jme, JME_TMCSR, 0); 1082} 1083 1084static void 1085jme_shutdown_nic(struct jme_adapter *jme) 1086{ 1087 u32 phylink; 1088 1089 phylink = jme_linkstat_from_phy(jme); 1090 1091 if (!(phylink & PHY_LINK_UP)) { 1092 /* 1093 * Disable all interrupt before issue timer 1094 */ 1095 jme_stop_irq(jme); 1096 jwrite32(jme, JME_TIMER2, TMCSR_EN | 0xFFFFFE); 1097 } 1098} 1099 1100static void 1101jme_pcc_tasklet(unsigned long arg) 1102{ 1103 struct jme_adapter *jme = (struct jme_adapter *)arg; 1104 struct net_device *netdev = jme->dev; 1105 1106 if (unlikely(test_bit(JME_FLAG_SHUTDOWN, &jme->flags))) { 1107 jme_shutdown_nic(jme); 1108 return; 1109 } 1110 1111 if (unlikely(!netif_carrier_ok(netdev) || 1112 (atomic_read(&jme->link_changing) != 1) 1113 )) { 1114 jme_stop_pcc_timer(jme); 1115 return; 1116 } 1117 1118 if (!(test_bit(JME_FLAG_POLL, &jme->flags))) 1119 jme_dynamic_pcc(jme); 1120 1121 jme_start_pcc_timer(jme); 1122} 1123 1124static inline void 1125jme_polling_mode(struct jme_adapter *jme) 1126{ 1127 jme_set_rx_pcc(jme, PCC_OFF); 1128} 1129 1130static inline void 1131jme_interrupt_mode(struct jme_adapter *jme) 1132{ 1133 jme_set_rx_pcc(jme, PCC_P1); 1134} 1135 1136static inline int 1137jme_pseudo_hotplug_enabled(struct jme_adapter *jme) 1138{ 1139 u32 apmc; 1140 apmc = jread32(jme, JME_APMC); 1141 return apmc & JME_APMC_PSEUDO_HP_EN; 1142} 1143 1144static void 1145jme_start_shutdown_timer(struct jme_adapter *jme) 1146{ 1147 u32 apmc; 1148 1149 apmc = jread32(jme, JME_APMC) | JME_APMC_PCIE_SD_EN; 1150 apmc &= ~JME_APMC_EPIEN_CTRL; 1151 if (!no_extplug) { 1152 jwrite32f(jme, JME_APMC, apmc | JME_APMC_EPIEN_CTRL_EN); 1153 wmb(); 1154 } 1155 jwrite32f(jme, JME_APMC, apmc); 1156 1157 jwrite32f(jme, JME_TIMER2, 0); 1158 set_bit(JME_FLAG_SHUTDOWN, &jme->flags); 1159 jwrite32(jme, JME_TMCSR, 1160 TMCSR_EN | ((0xFFFFFF - APMC_PHP_SHUTDOWN_DELAY) & TMCSR_CNT)); 1161} 1162 1163static void 1164jme_stop_shutdown_timer(struct jme_adapter *jme) 1165{ 1166 u32 apmc; 1167 1168 jwrite32f(jme, JME_TMCSR, 0); 1169 jwrite32f(jme, JME_TIMER2, 0); 1170 clear_bit(JME_FLAG_SHUTDOWN, &jme->flags); 1171 1172 apmc = jread32(jme, JME_APMC); 1173 apmc &= ~(JME_APMC_PCIE_SD_EN | JME_APMC_EPIEN_CTRL); 1174 jwrite32f(jme, JME_APMC, apmc | JME_APMC_EPIEN_CTRL_DIS); 1175 wmb(); 1176 jwrite32f(jme, JME_APMC, apmc); 1177} 1178 1179static void 1180jme_link_change_tasklet(unsigned long arg) 1181{ 1182 struct jme_adapter *jme = (struct jme_adapter *)arg; 1183 struct net_device *netdev = jme->dev; 1184 int rc; 1185 1186 while (!atomic_dec_and_test(&jme->link_changing)) { 1187 atomic_inc(&jme->link_changing); 1188 netif_info(jme, intr, jme->dev, "Get link change lock failed.\n"); 1189 while (atomic_read(&jme->link_changing) != 1) 1190 netif_info(jme, intr, jme->dev, "Waiting link change lock.\n"); 1191 } 1192 1193 if (jme_check_link(netdev, 1) && jme->old_mtu == netdev->mtu) 1194 goto out; 1195 1196 jme->old_mtu = netdev->mtu; 1197 netif_stop_queue(netdev); 1198 if (jme_pseudo_hotplug_enabled(jme)) 1199 jme_stop_shutdown_timer(jme); 1200 1201 jme_stop_pcc_timer(jme); 1202 tasklet_disable(&jme->txclean_task); 1203 tasklet_disable(&jme->rxclean_task); 1204 tasklet_disable(&jme->rxempty_task); 1205 1206 if (netif_carrier_ok(netdev)) { 1207 jme_reset_ghc_speed(jme); 1208 jme_disable_rx_engine(jme); 1209 jme_disable_tx_engine(jme); 1210 jme_reset_mac_processor(jme); 1211 jme_free_rx_resources(jme); 1212 jme_free_tx_resources(jme); 1213 1214 if (test_bit(JME_FLAG_POLL, &jme->flags)) 1215 jme_polling_mode(jme); 1216 1217 netif_carrier_off(netdev); 1218 } 1219 1220 jme_check_link(netdev, 0); 1221 if (netif_carrier_ok(netdev)) { 1222 rc = jme_setup_rx_resources(jme); 1223 if (rc) { 1224 jeprintk(jme->pdev, "Allocating resources for RX error" 1225 ", Device STOPPED!\n"); 1226 goto out_enable_tasklet; 1227 } 1228 1229 rc = jme_setup_tx_resources(jme); 1230 if (rc) { 1231 jeprintk(jme->pdev, "Allocating resources for TX error" 1232 ", Device STOPPED!\n"); 1233 goto err_out_free_rx_resources; 1234 } 1235 1236 jme_enable_rx_engine(jme); 1237 jme_enable_tx_engine(jme); 1238 1239 netif_start_queue(netdev); 1240 1241 if (test_bit(JME_FLAG_POLL, &jme->flags)) 1242 jme_interrupt_mode(jme); 1243 1244 jme_start_pcc_timer(jme); 1245 } else if (jme_pseudo_hotplug_enabled(jme)) { 1246 jme_start_shutdown_timer(jme); 1247 } 1248 1249 goto out_enable_tasklet; 1250 1251err_out_free_rx_resources: 1252 jme_free_rx_resources(jme); 1253out_enable_tasklet: 1254 tasklet_enable(&jme->txclean_task); 1255 tasklet_hi_enable(&jme->rxclean_task); 1256 tasklet_hi_enable(&jme->rxempty_task); 1257out: 1258 atomic_inc(&jme->link_changing); 1259} 1260 1261static void 1262jme_rx_clean_tasklet(unsigned long arg) 1263{ 1264 struct jme_adapter *jme = (struct jme_adapter *)arg; 1265 struct dynpcc_info *dpi = &(jme->dpi); 1266 1267 jme_process_receive(jme, jme->rx_ring_size); 1268 ++(dpi->intr_cnt); 1269 1270} 1271 1272static int 1273jme_poll(JME_NAPI_HOLDER(holder), JME_NAPI_WEIGHT(budget)) 1274{ 1275 struct jme_adapter *jme = jme_napi_priv(holder); 1276 int rest; 1277 1278 rest = jme_process_receive(jme, JME_NAPI_WEIGHT_VAL(budget)); 1279 1280 while (atomic_read(&jme->rx_empty) > 0) { 1281 atomic_dec(&jme->rx_empty); 1282 ++(NET_STAT(jme).rx_dropped); 1283 jme_restart_rx_engine(jme); 1284 } 1285 atomic_inc(&jme->rx_empty); 1286 1287 if (rest) { 1288 JME_RX_COMPLETE(netdev, holder); 1289 jme_interrupt_mode(jme); 1290 } 1291 1292 JME_NAPI_WEIGHT_SET(budget, rest); 1293 return JME_NAPI_WEIGHT_VAL(budget) - rest; 1294} 1295 1296static void 1297jme_rx_empty_tasklet(unsigned long arg) 1298{ 1299 struct jme_adapter *jme = (struct jme_adapter *)arg; 1300 1301 if (unlikely(atomic_read(&jme->link_changing) != 1)) 1302 return; 1303 1304 if (unlikely(!netif_carrier_ok(jme->dev))) 1305 return; 1306 1307 netif_info(jme, rx_status, jme->dev, "RX Queue Full!\n"); 1308 1309 jme_rx_clean_tasklet(arg); 1310 1311 while (atomic_read(&jme->rx_empty) > 0) { 1312 atomic_dec(&jme->rx_empty); 1313 ++(NET_STAT(jme).rx_dropped); 1314 jme_restart_rx_engine(jme); 1315 } 1316 atomic_inc(&jme->rx_empty); 1317} 1318 1319static void 1320jme_wake_queue_if_stopped(struct jme_adapter *jme) 1321{ 1322 struct jme_ring *txring = &(jme->txring[0]); 1323 1324 smp_wmb(); 1325 if (unlikely(netif_queue_stopped(jme->dev) && 1326 atomic_read(&txring->nr_free) >= (jme->tx_wake_threshold))) { 1327 netif_info(jme, tx_done, jme->dev, "TX Queue Waked.\n"); 1328 netif_wake_queue(jme->dev); 1329 } 1330 1331} 1332 1333static void 1334jme_tx_clean_tasklet(unsigned long arg) 1335{ 1336 struct jme_adapter *jme = (struct jme_adapter *)arg; 1337 struct jme_ring *txring = &(jme->txring[0]); 1338 struct txdesc *txdesc = txring->desc; 1339 struct jme_buffer_info *txbi = txring->bufinf, *ctxbi, *ttxbi; 1340 int i, j, cnt = 0, max, err, mask; 1341 1342 tx_dbg(jme, "Into txclean.\n"); 1343 1344 if (unlikely(!atomic_dec_and_test(&jme->tx_cleaning))) 1345 goto out; 1346 1347 if (unlikely(atomic_read(&jme->link_changing) != 1)) 1348 goto out; 1349 1350 if (unlikely(!netif_carrier_ok(jme->dev))) 1351 goto out; 1352 1353 max = jme->tx_ring_size - atomic_read(&txring->nr_free); 1354 mask = jme->tx_ring_mask; 1355 1356 for (i = atomic_read(&txring->next_to_clean) ; cnt < max ; ) { 1357 1358 ctxbi = txbi + i; 1359 1360 if (likely(ctxbi->skb && 1361 !(txdesc[i].descwb.flags & TXWBFLAG_OWN))) { 1362 1363 tx_dbg(jme, "txclean: %d+%d@%lu\n", 1364 i, ctxbi->nr_desc, jiffies); 1365 1366 err = txdesc[i].descwb.flags & TXWBFLAG_ALLERR; 1367 1368 for (j = 1 ; j < ctxbi->nr_desc ; ++j) { 1369 ttxbi = txbi + ((i + j) & (mask)); 1370 txdesc[(i + j) & (mask)].dw[0] = 0; 1371 1372 pci_unmap_page(jme->pdev, 1373 ttxbi->mapping, 1374 ttxbi->len, 1375 PCI_DMA_TODEVICE); 1376 1377 ttxbi->mapping = 0; 1378 ttxbi->len = 0; 1379 } 1380 1381 dev_kfree_skb(ctxbi->skb); 1382 1383 cnt += ctxbi->nr_desc; 1384 1385 if (unlikely(err)) { 1386 ++(NET_STAT(jme).tx_carrier_errors); 1387 } else { 1388 ++(NET_STAT(jme).tx_packets); 1389 NET_STAT(jme).tx_bytes += ctxbi->len; 1390 } 1391 1392 ctxbi->skb = NULL; 1393 ctxbi->len = 0; 1394 ctxbi->start_xmit = 0; 1395 1396 } else { 1397 break; 1398 } 1399 1400 i = (i + ctxbi->nr_desc) & mask; 1401 1402 ctxbi->nr_desc = 0; 1403 } 1404 1405 tx_dbg(jme, "txclean: done %d@%lu.\n", i, jiffies); 1406 atomic_set(&txring->next_to_clean, i); 1407 atomic_add(cnt, &txring->nr_free); 1408 1409 jme_wake_queue_if_stopped(jme); 1410 1411out: 1412 atomic_inc(&jme->tx_cleaning); 1413} 1414 1415static void 1416jme_intr_msi(struct jme_adapter *jme, u32 intrstat) 1417{ 1418 /* 1419 * Disable interrupt 1420 */ 1421 jwrite32f(jme, JME_IENC, INTR_ENABLE); 1422 1423 if (intrstat & (INTR_LINKCH | INTR_SWINTR)) { 1424 /* 1425 * Link change event is critical 1426 * all other events are ignored 1427 */ 1428 jwrite32(jme, JME_IEVE, intrstat); 1429 tasklet_schedule(&jme->linkch_task); 1430 goto out_reenable; 1431 } 1432 1433 if (intrstat & INTR_TMINTR) { 1434 jwrite32(jme, JME_IEVE, INTR_TMINTR); 1435 tasklet_schedule(&jme->pcc_task); 1436 } 1437 1438 if (intrstat & (INTR_PCCTXTO | INTR_PCCTX)) { 1439 jwrite32(jme, JME_IEVE, INTR_PCCTXTO | INTR_PCCTX | INTR_TX0); 1440 tasklet_schedule(&jme->txclean_task); 1441 } 1442 1443 if ((intrstat & (INTR_PCCRX0TO | INTR_PCCRX0 | INTR_RX0EMP))) { 1444 jwrite32(jme, JME_IEVE, (intrstat & (INTR_PCCRX0TO | 1445 INTR_PCCRX0 | 1446 INTR_RX0EMP)) | 1447 INTR_RX0); 1448 } 1449 1450 if (test_bit(JME_FLAG_POLL, &jme->flags)) { 1451 if (intrstat & INTR_RX0EMP) 1452 atomic_inc(&jme->rx_empty); 1453 1454 if ((intrstat & (INTR_PCCRX0TO | INTR_PCCRX0 | INTR_RX0EMP))) { 1455 if (likely(JME_RX_SCHEDULE_PREP(jme))) { 1456 jme_polling_mode(jme); 1457 JME_RX_SCHEDULE(jme); 1458 } 1459 } 1460 } else { 1461 if (intrstat & INTR_RX0EMP) { 1462 atomic_inc(&jme->rx_empty); 1463 tasklet_hi_schedule(&jme->rxempty_task); 1464 } else if (intrstat & (INTR_PCCRX0TO | INTR_PCCRX0)) { 1465 tasklet_hi_schedule(&jme->rxclean_task); 1466 } 1467 } 1468 1469out_reenable: 1470 /* 1471 * Re-enable interrupt 1472 */ 1473 jwrite32f(jme, JME_IENS, INTR_ENABLE); 1474} 1475 1476static irqreturn_t 1477jme_intr(int irq, void *dev_id) 1478{ 1479 struct net_device *netdev = dev_id; 1480 struct jme_adapter *jme = netdev_priv(netdev); 1481 u32 intrstat; 1482 1483 intrstat = jread32(jme, JME_IEVE); 1484 1485 /* 1486 * Check if it's really an interrupt for us 1487 */ 1488 if (unlikely((intrstat & INTR_ENABLE) == 0)) 1489 return IRQ_NONE; 1490 1491 /* 1492 * Check if the device still exist 1493 */ 1494 if (unlikely(intrstat == ~((typeof(intrstat))0))) 1495 return IRQ_NONE; 1496 1497 jme_intr_msi(jme, intrstat); 1498 1499 return IRQ_HANDLED; 1500} 1501 1502static irqreturn_t 1503jme_msi(int irq, void *dev_id) 1504{ 1505 struct net_device *netdev = dev_id; 1506 struct jme_adapter *jme = netdev_priv(netdev); 1507 u32 intrstat; 1508 1509 intrstat = jread32(jme, JME_IEVE); 1510 1511 jme_intr_msi(jme, intrstat); 1512 1513 return IRQ_HANDLED; 1514} 1515 1516static void 1517jme_reset_link(struct jme_adapter *jme) 1518{ 1519 jwrite32(jme, JME_TMCSR, TMCSR_SWIT); 1520} 1521 1522static void 1523jme_restart_an(struct jme_adapter *jme) 1524{ 1525 u32 bmcr; 1526 1527 spin_lock_bh(&jme->phy_lock); 1528 bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR); 1529 bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART); 1530 jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, bmcr); 1531 spin_unlock_bh(&jme->phy_lock); 1532} 1533 1534static int 1535jme_request_irq(struct jme_adapter *jme) 1536{ 1537 int rc; 1538 struct net_device *netdev = jme->dev; 1539 irq_handler_t handler = jme_intr; 1540 int irq_flags = IRQF_SHARED; 1541 1542 if (!pci_enable_msi(jme->pdev)) { 1543 set_bit(JME_FLAG_MSI, &jme->flags); 1544 handler = jme_msi; 1545 irq_flags = 0; 1546 } 1547 1548 rc = request_irq(jme->pdev->irq, handler, irq_flags, netdev->name, 1549 netdev); 1550 if (rc) { 1551 jeprintk(jme->pdev, 1552 "Unable to request %s interrupt (return: %d)\n", 1553 test_bit(JME_FLAG_MSI, &jme->flags) ? "MSI" : "INTx", 1554 rc); 1555 1556 if (test_bit(JME_FLAG_MSI, &jme->flags)) { 1557 pci_disable_msi(jme->pdev); 1558 clear_bit(JME_FLAG_MSI, &jme->flags); 1559 } 1560 } else { 1561 netdev->irq = jme->pdev->irq; 1562 } 1563 1564 return rc; 1565} 1566 1567static void 1568jme_free_irq(struct jme_adapter *jme) 1569{ 1570 free_irq(jme->pdev->irq, jme->dev); 1571 if (test_bit(JME_FLAG_MSI, &jme->flags)) { 1572 pci_disable_msi(jme->pdev); 1573 clear_bit(JME_FLAG_MSI, &jme->flags); 1574 jme->dev->irq = jme->pdev->irq; 1575 } 1576} 1577 1578static inline void 1579jme_phy_on(struct jme_adapter *jme) 1580{ 1581 u32 bmcr; 1582 1583 bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR); 1584 bmcr &= ~BMCR_PDOWN; 1585 jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, bmcr); 1586} 1587 1588static int 1589jme_open(struct net_device *netdev) 1590{ 1591 struct jme_adapter *jme = netdev_priv(netdev); 1592 int rc; 1593 1594 jme_clear_pm(jme); 1595 JME_NAPI_ENABLE(jme); 1596 1597 tasklet_enable(&jme->linkch_task); 1598 tasklet_enable(&jme->txclean_task); 1599 tasklet_hi_enable(&jme->rxclean_task); 1600 tasklet_hi_enable(&jme->rxempty_task); 1601 1602 rc = jme_request_irq(jme); 1603 if (rc) 1604 goto err_out; 1605 1606 jme_start_irq(jme); 1607 1608 if (test_bit(JME_FLAG_SSET, &jme->flags)) { 1609 jme_phy_on(jme); 1610 jme_set_settings(netdev, &jme->old_ecmd); 1611 } else { 1612 jme_reset_phy_processor(jme); 1613 } 1614 1615 jme_reset_link(jme); 1616 1617 return 0; 1618 1619err_out: 1620 netif_stop_queue(netdev); 1621 netif_carrier_off(netdev); 1622 return rc; 1623} 1624 1625#ifdef CONFIG_PM 1626static void 1627jme_set_100m_half(struct jme_adapter *jme) 1628{ 1629 u32 bmcr, tmp; 1630 1631 bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR); 1632 tmp = bmcr & ~(BMCR_ANENABLE | BMCR_SPEED100 | 1633 BMCR_SPEED1000 | BMCR_FULLDPLX); 1634 tmp |= BMCR_SPEED100; 1635 1636 if (bmcr != tmp) 1637 jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, tmp); 1638 1639 if (jme->fpgaver) 1640 jwrite32(jme, JME_GHC, GHC_SPEED_100M | GHC_LINK_POLL); 1641 else 1642 jwrite32(jme, JME_GHC, GHC_SPEED_100M); 1643} 1644 1645#define JME_WAIT_LINK_TIME 2000 /* 2000ms */ 1646static void 1647jme_wait_link(struct jme_adapter *jme) 1648{ 1649 u32 phylink, to = JME_WAIT_LINK_TIME; 1650 1651 mdelay(1000); 1652 phylink = jme_linkstat_from_phy(jme); 1653 while (!(phylink & PHY_LINK_UP) && (to -= 10) > 0) { 1654 mdelay(10); 1655 phylink = jme_linkstat_from_phy(jme); 1656 } 1657} 1658#endif 1659 1660static inline void 1661jme_phy_off(struct jme_adapter *jme) 1662{ 1663 jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, BMCR_PDOWN); 1664} 1665 1666static int 1667jme_close(struct net_device *netdev) 1668{ 1669 struct jme_adapter *jme = netdev_priv(netdev); 1670 1671 netif_stop_queue(netdev); 1672 netif_carrier_off(netdev); 1673 1674 jme_stop_irq(jme); 1675 jme_free_irq(jme); 1676 1677 JME_NAPI_DISABLE(jme); 1678 1679 tasklet_disable(&jme->linkch_task); 1680 tasklet_disable(&jme->txclean_task); 1681 tasklet_disable(&jme->rxclean_task); 1682 tasklet_disable(&jme->rxempty_task); 1683 1684 jme_reset_ghc_speed(jme); 1685 jme_disable_rx_engine(jme); 1686 jme_disable_tx_engine(jme); 1687 jme_reset_mac_processor(jme); 1688 jme_free_rx_resources(jme); 1689 jme_free_tx_resources(jme); 1690 jme->phylink = 0; 1691 jme_phy_off(jme); 1692 1693 return 0; 1694} 1695 1696static int 1697jme_alloc_txdesc(struct jme_adapter *jme, 1698 struct sk_buff *skb) 1699{ 1700 struct jme_ring *txring = &(jme->txring[0]); 1701 int idx, nr_alloc, mask = jme->tx_ring_mask; 1702 1703 idx = txring->next_to_use; 1704 nr_alloc = skb_shinfo(skb)->nr_frags + 2; 1705 1706 if (unlikely(atomic_read(&txring->nr_free) < nr_alloc)) 1707 return -1; 1708 1709 atomic_sub(nr_alloc, &txring->nr_free); 1710 1711 txring->next_to_use = (txring->next_to_use + nr_alloc) & mask; 1712 1713 return idx; 1714} 1715 1716static void 1717jme_fill_tx_map(struct pci_dev *pdev, 1718 struct txdesc *txdesc, 1719 struct jme_buffer_info *txbi, 1720 struct page *page, 1721 u32 page_offset, 1722 u32 len, 1723 u8 hidma) 1724{ 1725 dma_addr_t dmaaddr; 1726 1727 dmaaddr = pci_map_page(pdev, 1728 page, 1729 page_offset, 1730 len, 1731 PCI_DMA_TODEVICE); 1732 1733 pci_dma_sync_single_for_device(pdev, 1734 dmaaddr, 1735 len, 1736 PCI_DMA_TODEVICE); 1737 1738 txdesc->dw[0] = 0; 1739 txdesc->dw[1] = 0; 1740 txdesc->desc2.flags = TXFLAG_OWN; 1741 txdesc->desc2.flags |= (hidma) ? TXFLAG_64BIT : 0; 1742 txdesc->desc2.datalen = cpu_to_le16(len); 1743 txdesc->desc2.bufaddrh = cpu_to_le32((__u64)dmaaddr >> 32); 1744 txdesc->desc2.bufaddrl = cpu_to_le32( 1745 (__u64)dmaaddr & 0xFFFFFFFFUL); 1746 1747 txbi->mapping = dmaaddr; 1748 txbi->len = len; 1749} 1750 1751static void 1752jme_map_tx_skb(struct jme_adapter *jme, struct sk_buff *skb, int idx) 1753{ 1754 struct jme_ring *txring = &(jme->txring[0]); 1755 struct txdesc *txdesc = txring->desc, *ctxdesc; 1756 struct jme_buffer_info *txbi = txring->bufinf, *ctxbi; 1757 u8 hidma = jme->dev->features & NETIF_F_HIGHDMA; 1758 int i, nr_frags = skb_shinfo(skb)->nr_frags; 1759 int mask = jme->tx_ring_mask; 1760 struct skb_frag_struct *frag; 1761 u32 len; 1762 1763 for (i = 0 ; i < nr_frags ; ++i) { 1764 frag = &skb_shinfo(skb)->frags[i]; 1765 ctxdesc = txdesc + ((idx + i + 2) & (mask)); 1766 ctxbi = txbi + ((idx + i + 2) & (mask)); 1767 1768 jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi, frag->page, 1769 frag->page_offset, frag->size, hidma); 1770 } 1771 1772 len = skb_is_nonlinear(skb) ? skb_headlen(skb) : skb->len; 1773 ctxdesc = txdesc + ((idx + 1) & (mask)); 1774 ctxbi = txbi + ((idx + 1) & (mask)); 1775 jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi, virt_to_page(skb->data), 1776 offset_in_page(skb->data), len, hidma); 1777 1778} 1779 1780static int 1781jme_expand_header(struct jme_adapter *jme, struct sk_buff *skb) 1782{ 1783 if (unlikely(skb_shinfo(skb)->gso_size && 1784 skb_header_cloned(skb) && 1785 pskb_expand_head(skb, 0, 0, GFP_ATOMIC))) { 1786 dev_kfree_skb(skb); 1787 return -1; 1788 } 1789 1790 return 0; 1791} 1792 1793static int 1794jme_tx_tso(struct sk_buff *skb, __le16 *mss, u8 *flags) 1795{ 1796 *mss = cpu_to_le16(skb_shinfo(skb)->gso_size << TXDESC_MSS_SHIFT); 1797 if (*mss) { 1798 *flags |= TXFLAG_LSEN; 1799 1800 if (skb->protocol == htons(ETH_P_IP)) { 1801 struct iphdr *iph = ip_hdr(skb); 1802 1803 iph->check = 0; 1804 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, 1805 iph->daddr, 0, 1806 IPPROTO_TCP, 1807 0); 1808 } else { 1809 struct ipv6hdr *ip6h = ipv6_hdr(skb); 1810 1811 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ip6h->saddr, 1812 &ip6h->daddr, 0, 1813 IPPROTO_TCP, 1814 0); 1815 } 1816 1817 return 0; 1818 } 1819 1820 return 1; 1821} 1822 1823static void 1824jme_tx_csum(struct jme_adapter *jme, struct sk_buff *skb, u8 *flags) 1825{ 1826 if (skb->ip_summed == CHECKSUM_PARTIAL) { 1827 u8 ip_proto; 1828 1829 switch (skb->protocol) { 1830 case htons(ETH_P_IP): 1831 ip_proto = ip_hdr(skb)->protocol; 1832 break; 1833 case htons(ETH_P_IPV6): 1834 ip_proto = ipv6_hdr(skb)->nexthdr; 1835 break; 1836 default: 1837 ip_proto = 0; 1838 break; 1839 } 1840 1841 switch (ip_proto) { 1842 case IPPROTO_TCP: 1843 *flags |= TXFLAG_TCPCS; 1844 break; 1845 case IPPROTO_UDP: 1846 *flags |= TXFLAG_UDPCS; 1847 break; 1848 default: 1849 netif_err(jme, tx_err, jme->dev, "Error upper layer protocol.\n"); 1850 break; 1851 } 1852 } 1853} 1854 1855static inline void 1856jme_tx_vlan(struct sk_buff *skb, __le16 *vlan, u8 *flags) 1857{ 1858 if (vlan_tx_tag_present(skb)) { 1859 *flags |= TXFLAG_TAGON; 1860 *vlan = cpu_to_le16(vlan_tx_tag_get(skb)); 1861 } 1862} 1863 1864static int 1865jme_fill_tx_desc(struct jme_adapter *jme, struct sk_buff *skb, int idx) 1866{ 1867 struct jme_ring *txring = &(jme->txring[0]); 1868 struct txdesc *txdesc; 1869 struct jme_buffer_info *txbi; 1870 u8 flags; 1871 1872 txdesc = (struct txdesc *)txring->desc + idx; 1873 txbi = txring->bufinf + idx; 1874 1875 txdesc->dw[0] = 0; 1876 txdesc->dw[1] = 0; 1877 txdesc->dw[2] = 0; 1878 txdesc->dw[3] = 0; 1879 txdesc->desc1.pktsize = cpu_to_le16(skb->len); 1880 /* 1881 * Set OWN bit at final. 1882 * When kernel transmit faster than NIC. 1883 * And NIC trying to send this descriptor before we tell 1884 * it to start sending this TX queue. 1885 * Other fields are already filled correctly. 1886 */ 1887 wmb(); 1888 flags = TXFLAG_OWN | TXFLAG_INT; 1889 /* 1890 * Set checksum flags while not tso 1891 */ 1892 if (jme_tx_tso(skb, &txdesc->desc1.mss, &flags)) 1893 jme_tx_csum(jme, skb, &flags); 1894 jme_tx_vlan(skb, &txdesc->desc1.vlan, &flags); 1895 jme_map_tx_skb(jme, skb, idx); 1896 txdesc->desc1.flags = flags; 1897 /* 1898 * Set tx buffer info after telling NIC to send 1899 * For better tx_clean timing 1900 */ 1901 wmb(); 1902 txbi->nr_desc = skb_shinfo(skb)->nr_frags + 2; 1903 txbi->skb = skb; 1904 txbi->len = skb->len; 1905 txbi->start_xmit = jiffies; 1906 if (!txbi->start_xmit) 1907 txbi->start_xmit = (0UL-1); 1908 1909 return 0; 1910} 1911 1912static void 1913jme_stop_queue_if_full(struct jme_adapter *jme) 1914{ 1915 struct jme_ring *txring = &(jme->txring[0]); 1916 struct jme_buffer_info *txbi = txring->bufinf; 1917 int idx = atomic_read(&txring->next_to_clean); 1918 1919 txbi += idx; 1920 1921 smp_wmb(); 1922 if (unlikely(atomic_read(&txring->nr_free) < (MAX_SKB_FRAGS+2))) { 1923 netif_stop_queue(jme->dev); 1924 netif_info(jme, tx_queued, jme->dev, "TX Queue Paused.\n"); 1925 smp_wmb(); 1926 if (atomic_read(&txring->nr_free) 1927 >= (jme->tx_wake_threshold)) { 1928 netif_wake_queue(jme->dev); 1929 netif_info(jme, tx_queued, jme->dev, "TX Queue Fast Waked.\n"); 1930 } 1931 } 1932 1933 if (unlikely(txbi->start_xmit && 1934 (jiffies - txbi->start_xmit) >= TX_TIMEOUT && 1935 txbi->skb)) { 1936 netif_stop_queue(jme->dev); 1937 netif_info(jme, tx_queued, jme->dev, "TX Queue Stopped %d@%lu.\n", idx, jiffies); 1938 } 1939} 1940 1941/* 1942 * This function is already protected by netif_tx_lock() 1943 */ 1944 1945static netdev_tx_t 1946jme_start_xmit(struct sk_buff *skb, struct net_device *netdev) 1947{ 1948 struct jme_adapter *jme = netdev_priv(netdev); 1949 int idx; 1950 1951 if (unlikely(jme_expand_header(jme, skb))) { 1952 ++(NET_STAT(jme).tx_dropped); 1953 return NETDEV_TX_OK; 1954 } 1955 1956 idx = jme_alloc_txdesc(jme, skb); 1957 1958 if (unlikely(idx < 0)) { 1959 netif_stop_queue(netdev); 1960 netif_err(jme, tx_err, jme->dev, "BUG! Tx ring full when queue awake!\n"); 1961 1962 return NETDEV_TX_BUSY; 1963 } 1964 1965 jme_fill_tx_desc(jme, skb, idx); 1966 1967 jwrite32(jme, JME_TXCS, jme->reg_txcs | 1968 TXCS_SELECT_QUEUE0 | 1969 TXCS_QUEUE0S | 1970 TXCS_ENABLE); 1971 1972 tx_dbg(jme, "xmit: %d+%d@%lu\n", idx, 1973 skb_shinfo(skb)->nr_frags + 2, 1974 jiffies); 1975 jme_stop_queue_if_full(jme); 1976 1977 return NETDEV_TX_OK; 1978} 1979 1980static int 1981jme_set_macaddr(struct net_device *netdev, void *p) 1982{ 1983 struct jme_adapter *jme = netdev_priv(netdev); 1984 struct sockaddr *addr = p; 1985 u32 val; 1986 1987 if (netif_running(netdev)) 1988 return -EBUSY; 1989 1990 spin_lock_bh(&jme->macaddr_lock); 1991 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 1992 1993 val = (addr->sa_data[3] & 0xff) << 24 | 1994 (addr->sa_data[2] & 0xff) << 16 | 1995 (addr->sa_data[1] & 0xff) << 8 | 1996 (addr->sa_data[0] & 0xff); 1997 jwrite32(jme, JME_RXUMA_LO, val); 1998 val = (addr->sa_data[5] & 0xff) << 8 | 1999 (addr->sa_data[4] & 0xff); 2000 jwrite32(jme, JME_RXUMA_HI, val); 2001 spin_unlock_bh(&jme->macaddr_lock); 2002 2003 return 0; 2004} 2005 2006static void 2007jme_set_multi(struct net_device *netdev) 2008{ 2009 struct jme_adapter *jme = netdev_priv(netdev); 2010 u32 mc_hash[2] = {}; 2011 2012 spin_lock_bh(&jme->rxmcs_lock); 2013 2014 jme->reg_rxmcs |= RXMCS_BRDFRAME | RXMCS_UNIFRAME; 2015 2016 if (netdev->flags & IFF_PROMISC) { 2017 jme->reg_rxmcs |= RXMCS_ALLFRAME; 2018 } else if (netdev->flags & IFF_ALLMULTI) { 2019 jme->reg_rxmcs |= RXMCS_ALLMULFRAME; 2020 } else if (netdev->flags & IFF_MULTICAST) { 2021 struct netdev_hw_addr *ha; 2022 int bit_nr; 2023 2024 jme->reg_rxmcs |= RXMCS_MULFRAME | RXMCS_MULFILTERED; 2025 netdev_for_each_mc_addr(ha, netdev) { 2026 bit_nr = ether_crc(ETH_ALEN, ha->addr) & 0x3F; 2027 mc_hash[bit_nr >> 5] |= 1 << (bit_nr & 0x1F); 2028 } 2029 2030 jwrite32(jme, JME_RXMCHT_LO, mc_hash[0]); 2031 jwrite32(jme, JME_RXMCHT_HI, mc_hash[1]); 2032 } 2033 2034 wmb(); 2035 jwrite32(jme, JME_RXMCS, jme->reg_rxmcs); 2036 2037 spin_unlock_bh(&jme->rxmcs_lock); 2038} 2039 2040static int 2041jme_change_mtu(struct net_device *netdev, int new_mtu) 2042{ 2043 struct jme_adapter *jme = netdev_priv(netdev); 2044 2045 if (new_mtu == jme->old_mtu) 2046 return 0; 2047 2048 if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) || 2049 ((new_mtu) < IPV6_MIN_MTU)) 2050 return -EINVAL; 2051 2052 if (new_mtu > 4000) { 2053 jme->reg_rxcs &= ~RXCS_FIFOTHNP; 2054 jme->reg_rxcs |= RXCS_FIFOTHNP_64QW; 2055 jme_restart_rx_engine(jme); 2056 } else { 2057 jme->reg_rxcs &= ~RXCS_FIFOTHNP; 2058 jme->reg_rxcs |= RXCS_FIFOTHNP_128QW; 2059 jme_restart_rx_engine(jme); 2060 } 2061 2062 if (new_mtu > 1900) { 2063 netdev->features &= ~(NETIF_F_HW_CSUM | 2064 NETIF_F_TSO | 2065 NETIF_F_TSO6); 2066 } else { 2067 if (test_bit(JME_FLAG_TXCSUM, &jme->flags)) 2068 netdev->features |= NETIF_F_HW_CSUM; 2069 if (test_bit(JME_FLAG_TSO, &jme->flags)) 2070 netdev->features |= NETIF_F_TSO | NETIF_F_TSO6; 2071 } 2072 2073 netdev->mtu = new_mtu; 2074 jme_reset_link(jme); 2075 2076 return 0; 2077} 2078 2079static void 2080jme_tx_timeout(struct net_device *netdev) 2081{ 2082 struct jme_adapter *jme = netdev_priv(netdev); 2083 2084 jme->phylink = 0; 2085 jme_reset_phy_processor(jme); 2086 if (test_bit(JME_FLAG_SSET, &jme->flags)) 2087 jme_set_settings(netdev, &jme->old_ecmd); 2088 2089 /* 2090 * Force to Reset the link again 2091 */ 2092 jme_reset_link(jme); 2093} 2094 2095static inline void jme_pause_rx(struct jme_adapter *jme) 2096{ 2097 atomic_dec(&jme->link_changing); 2098 2099 jme_set_rx_pcc(jme, PCC_OFF); 2100 if (test_bit(JME_FLAG_POLL, &jme->flags)) { 2101 JME_NAPI_DISABLE(jme); 2102 } else { 2103 tasklet_disable(&jme->rxclean_task); 2104 tasklet_disable(&jme->rxempty_task); 2105 } 2106} 2107 2108static inline void jme_resume_rx(struct jme_adapter *jme) 2109{ 2110 struct dynpcc_info *dpi = &(jme->dpi); 2111 2112 if (test_bit(JME_FLAG_POLL, &jme->flags)) { 2113 JME_NAPI_ENABLE(jme); 2114 } else { 2115 tasklet_hi_enable(&jme->rxclean_task); 2116 tasklet_hi_enable(&jme->rxempty_task); 2117 } 2118 dpi->cur = PCC_P1; 2119 dpi->attempt = PCC_P1; 2120 dpi->cnt = 0; 2121 jme_set_rx_pcc(jme, PCC_P1); 2122 2123 atomic_inc(&jme->link_changing); 2124} 2125 2126static void 2127jme_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp) 2128{ 2129 struct jme_adapter *jme = netdev_priv(netdev); 2130 2131 jme_pause_rx(jme); 2132 jme->vlgrp = grp; 2133 jme_resume_rx(jme); 2134} 2135 2136static void 2137jme_get_drvinfo(struct net_device *netdev, 2138 struct ethtool_drvinfo *info) 2139{ 2140 struct jme_adapter *jme = netdev_priv(netdev); 2141 2142 strcpy(info->driver, DRV_NAME); 2143 strcpy(info->version, DRV_VERSION); 2144 strcpy(info->bus_info, pci_name(jme->pdev)); 2145} 2146 2147static int 2148jme_get_regs_len(struct net_device *netdev) 2149{ 2150 return JME_REG_LEN; 2151} 2152 2153static void 2154mmapio_memcpy(struct jme_adapter *jme, u32 *p, u32 reg, int len) 2155{ 2156 int i; 2157 2158 for (i = 0 ; i < len ; i += 4) 2159 p[i >> 2] = jread32(jme, reg + i); 2160} 2161 2162static void 2163mdio_memcpy(struct jme_adapter *jme, u32 *p, int reg_nr) 2164{ 2165 int i; 2166 u16 *p16 = (u16 *)p; 2167 2168 for (i = 0 ; i < reg_nr ; ++i) 2169 p16[i] = jme_mdio_read(jme->dev, jme->mii_if.phy_id, i); 2170} 2171 2172static void 2173jme_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p) 2174{ 2175 struct jme_adapter *jme = netdev_priv(netdev); 2176 u32 *p32 = (u32 *)p; 2177 2178 memset(p, 0xFF, JME_REG_LEN); 2179 2180 regs->version = 1; 2181 mmapio_memcpy(jme, p32, JME_MAC, JME_MAC_LEN); 2182 2183 p32 += 0x100 >> 2; 2184 mmapio_memcpy(jme, p32, JME_PHY, JME_PHY_LEN); 2185 2186 p32 += 0x100 >> 2; 2187 mmapio_memcpy(jme, p32, JME_MISC, JME_MISC_LEN); 2188 2189 p32 += 0x100 >> 2; 2190 mmapio_memcpy(jme, p32, JME_RSS, JME_RSS_LEN); 2191 2192 p32 += 0x100 >> 2; 2193 mdio_memcpy(jme, p32, JME_PHY_REG_NR); 2194} 2195 2196static int 2197jme_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ecmd) 2198{ 2199 struct jme_adapter *jme = netdev_priv(netdev); 2200 2201 ecmd->tx_coalesce_usecs = PCC_TX_TO; 2202 ecmd->tx_max_coalesced_frames = PCC_TX_CNT; 2203 2204 if (test_bit(JME_FLAG_POLL, &jme->flags)) { 2205 ecmd->use_adaptive_rx_coalesce = false; 2206 ecmd->rx_coalesce_usecs = 0; 2207 ecmd->rx_max_coalesced_frames = 0; 2208 return 0; 2209 } 2210 2211 ecmd->use_adaptive_rx_coalesce = true; 2212 2213 switch (jme->dpi.cur) { 2214 case PCC_P1: 2215 ecmd->rx_coalesce_usecs = PCC_P1_TO; 2216 ecmd->rx_max_coalesced_frames = PCC_P1_CNT; 2217 break; 2218 case PCC_P2: 2219 ecmd->rx_coalesce_usecs = PCC_P2_TO; 2220 ecmd->rx_max_coalesced_frames = PCC_P2_CNT; 2221 break; 2222 case PCC_P3: 2223 ecmd->rx_coalesce_usecs = PCC_P3_TO; 2224 ecmd->rx_max_coalesced_frames = PCC_P3_CNT; 2225 break; 2226 default: 2227 break; 2228 } 2229 2230 return 0; 2231} 2232 2233static int 2234jme_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ecmd) 2235{ 2236 struct jme_adapter *jme = netdev_priv(netdev); 2237 struct dynpcc_info *dpi = &(jme->dpi); 2238 2239 if (netif_running(netdev)) 2240 return -EBUSY; 2241 2242 if (ecmd->use_adaptive_rx_coalesce && 2243 test_bit(JME_FLAG_POLL, &jme->flags)) { 2244 clear_bit(JME_FLAG_POLL, &jme->flags); 2245 jme->jme_rx = netif_rx; 2246 jme->jme_vlan_rx = vlan_hwaccel_rx; 2247 dpi->cur = PCC_P1; 2248 dpi->attempt = PCC_P1; 2249 dpi->cnt = 0; 2250 jme_set_rx_pcc(jme, PCC_P1); 2251 jme_interrupt_mode(jme); 2252 } else if (!(ecmd->use_adaptive_rx_coalesce) && 2253 !(test_bit(JME_FLAG_POLL, &jme->flags))) { 2254 set_bit(JME_FLAG_POLL, &jme->flags); 2255 jme->jme_rx = netif_receive_skb; 2256 jme->jme_vlan_rx = vlan_hwaccel_receive_skb; 2257 jme_interrupt_mode(jme); 2258 } 2259 2260 return 0; 2261} 2262 2263static void 2264jme_get_pauseparam(struct net_device *netdev, 2265 struct ethtool_pauseparam *ecmd) 2266{ 2267 struct jme_adapter *jme = netdev_priv(netdev); 2268 u32 val; 2269 2270 ecmd->tx_pause = (jme->reg_txpfc & TXPFC_PF_EN) != 0; 2271 ecmd->rx_pause = (jme->reg_rxmcs & RXMCS_FLOWCTRL) != 0; 2272 2273 spin_lock_bh(&jme->phy_lock); 2274 val = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_ADVERTISE); 2275 spin_unlock_bh(&jme->phy_lock); 2276 2277 ecmd->autoneg = 2278 (val & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM)) != 0; 2279} 2280 2281static int 2282jme_set_pauseparam(struct net_device *netdev, 2283 struct ethtool_pauseparam *ecmd) 2284{ 2285 struct jme_adapter *jme = netdev_priv(netdev); 2286 u32 val; 2287 2288 if (((jme->reg_txpfc & TXPFC_PF_EN) != 0) ^ 2289 (ecmd->tx_pause != 0)) { 2290 2291 if (ecmd->tx_pause) 2292 jme->reg_txpfc |= TXPFC_PF_EN; 2293 else 2294 jme->reg_txpfc &= ~TXPFC_PF_EN; 2295 2296 jwrite32(jme, JME_TXPFC, jme->reg_txpfc); 2297 } 2298 2299 spin_lock_bh(&jme->rxmcs_lock); 2300 if (((jme->reg_rxmcs & RXMCS_FLOWCTRL) != 0) ^ 2301 (ecmd->rx_pause != 0)) { 2302 2303 if (ecmd->rx_pause) 2304 jme->reg_rxmcs |= RXMCS_FLOWCTRL; 2305 else 2306 jme->reg_rxmcs &= ~RXMCS_FLOWCTRL; 2307 2308 jwrite32(jme, JME_RXMCS, jme->reg_rxmcs); 2309 } 2310 spin_unlock_bh(&jme->rxmcs_lock); 2311 2312 spin_lock_bh(&jme->phy_lock); 2313 val = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_ADVERTISE); 2314 if (((val & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM)) != 0) ^ 2315 (ecmd->autoneg != 0)) { 2316 2317 if (ecmd->autoneg) 2318 val |= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); 2319 else 2320 val &= ~(ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); 2321 2322 jme_mdio_write(jme->dev, jme->mii_if.phy_id, 2323 MII_ADVERTISE, val); 2324 } 2325 spin_unlock_bh(&jme->phy_lock); 2326 2327 return 0; 2328} 2329 2330static void 2331jme_get_wol(struct net_device *netdev, 2332 struct ethtool_wolinfo *wol) 2333{ 2334 struct jme_adapter *jme = netdev_priv(netdev); 2335 2336 wol->supported = WAKE_MAGIC | WAKE_PHY; 2337 2338 wol->wolopts = 0; 2339 2340 if (jme->reg_pmcs & (PMCS_LFEN | PMCS_LREN)) 2341 wol->wolopts |= WAKE_PHY; 2342 2343 if (jme->reg_pmcs & PMCS_MFEN) 2344 wol->wolopts |= WAKE_MAGIC; 2345 2346} 2347 2348static int 2349jme_set_wol(struct net_device *netdev, 2350 struct ethtool_wolinfo *wol) 2351{ 2352 struct jme_adapter *jme = netdev_priv(netdev); 2353 2354 if (wol->wolopts & (WAKE_MAGICSECURE | 2355 WAKE_UCAST | 2356 WAKE_MCAST | 2357 WAKE_BCAST | 2358 WAKE_ARP)) 2359 return -EOPNOTSUPP; 2360 2361 jme->reg_pmcs = 0; 2362 2363 if (wol->wolopts & WAKE_PHY) 2364 jme->reg_pmcs |= PMCS_LFEN | PMCS_LREN; 2365 2366 if (wol->wolopts & WAKE_MAGIC) 2367 jme->reg_pmcs |= PMCS_MFEN; 2368 2369 jwrite32(jme, JME_PMCS, jme->reg_pmcs); 2370 2371 return 0; 2372} 2373 2374static int 2375jme_get_settings(struct net_device *netdev, 2376 struct ethtool_cmd *ecmd) 2377{ 2378 struct jme_adapter *jme = netdev_priv(netdev); 2379 int rc; 2380 2381 spin_lock_bh(&jme->phy_lock); 2382 rc = mii_ethtool_gset(&(jme->mii_if), ecmd); 2383 spin_unlock_bh(&jme->phy_lock); 2384 return rc; 2385} 2386 2387static int 2388jme_set_settings(struct net_device *netdev, 2389 struct ethtool_cmd *ecmd) 2390{ 2391 struct jme_adapter *jme = netdev_priv(netdev); 2392 int rc, fdc = 0; 2393 2394 if (ecmd->speed == SPEED_1000 && ecmd->autoneg != AUTONEG_ENABLE) 2395 return -EINVAL; 2396 2397 if (jme->mii_if.force_media && 2398 ecmd->autoneg != AUTONEG_ENABLE && 2399 (jme->mii_if.full_duplex != ecmd->duplex)) 2400 fdc = 1; 2401 2402 spin_lock_bh(&jme->phy_lock); 2403 rc = mii_ethtool_sset(&(jme->mii_if), ecmd); 2404 spin_unlock_bh(&jme->phy_lock); 2405 2406 if (!rc && fdc) 2407 jme_reset_link(jme); 2408 2409 if (!rc) { 2410 set_bit(JME_FLAG_SSET, &jme->flags); 2411 jme->old_ecmd = *ecmd; 2412 } 2413 2414 return rc; 2415} 2416 2417static u32 2418jme_get_link(struct net_device *netdev) 2419{ 2420 struct jme_adapter *jme = netdev_priv(netdev); 2421 return jread32(jme, JME_PHY_LINK) & PHY_LINK_UP; 2422} 2423 2424static u32 2425jme_get_msglevel(struct net_device *netdev) 2426{ 2427 struct jme_adapter *jme = netdev_priv(netdev); 2428 return jme->msg_enable; 2429} 2430 2431static void 2432jme_set_msglevel(struct net_device *netdev, u32 value) 2433{ 2434 struct jme_adapter *jme = netdev_priv(netdev); 2435 jme->msg_enable = value; 2436} 2437 2438static u32 2439jme_get_rx_csum(struct net_device *netdev) 2440{ 2441 struct jme_adapter *jme = netdev_priv(netdev); 2442 return jme->reg_rxmcs & RXMCS_CHECKSUM; 2443} 2444 2445static int 2446jme_set_rx_csum(struct net_device *netdev, u32 on) 2447{ 2448 struct jme_adapter *jme = netdev_priv(netdev); 2449 2450 spin_lock_bh(&jme->rxmcs_lock); 2451 if (on) 2452 jme->reg_rxmcs |= RXMCS_CHECKSUM; 2453 else 2454 jme->reg_rxmcs &= ~RXMCS_CHECKSUM; 2455 jwrite32(jme, JME_RXMCS, jme->reg_rxmcs); 2456 spin_unlock_bh(&jme->rxmcs_lock); 2457 2458 return 0; 2459} 2460 2461static int 2462jme_set_tx_csum(struct net_device *netdev, u32 on) 2463{ 2464 struct jme_adapter *jme = netdev_priv(netdev); 2465 2466 if (on) { 2467 set_bit(JME_FLAG_TXCSUM, &jme->flags); 2468 if (netdev->mtu <= 1900) 2469 netdev->features |= NETIF_F_HW_CSUM; 2470 } else { 2471 clear_bit(JME_FLAG_TXCSUM, &jme->flags); 2472 netdev->features &= ~NETIF_F_HW_CSUM; 2473 } 2474 2475 return 0; 2476} 2477 2478static int 2479jme_set_tso(struct net_device *netdev, u32 on) 2480{ 2481 struct jme_adapter *jme = netdev_priv(netdev); 2482 2483 if (on) { 2484 set_bit(JME_FLAG_TSO, &jme->flags); 2485 if (netdev->mtu <= 1900) 2486 netdev->features |= NETIF_F_TSO | NETIF_F_TSO6; 2487 } else { 2488 clear_bit(JME_FLAG_TSO, &jme->flags); 2489 netdev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6); 2490 } 2491 2492 return 0; 2493} 2494 2495static int 2496jme_nway_reset(struct net_device *netdev) 2497{ 2498 struct jme_adapter *jme = netdev_priv(netdev); 2499 jme_restart_an(jme); 2500 return 0; 2501} 2502 2503static u8 2504jme_smb_read(struct jme_adapter *jme, unsigned int addr) 2505{ 2506 u32 val; 2507 int to; 2508 2509 val = jread32(jme, JME_SMBCSR); 2510 to = JME_SMB_BUSY_TIMEOUT; 2511 while ((val & SMBCSR_BUSY) && --to) { 2512 msleep(1); 2513 val = jread32(jme, JME_SMBCSR); 2514 } 2515 if (!to) { 2516 netif_err(jme, hw, jme->dev, "SMB Bus Busy.\n"); 2517 return 0xFF; 2518 } 2519 2520 jwrite32(jme, JME_SMBINTF, 2521 ((addr << SMBINTF_HWADDR_SHIFT) & SMBINTF_HWADDR) | 2522 SMBINTF_HWRWN_READ | 2523 SMBINTF_HWCMD); 2524 2525 val = jread32(jme, JME_SMBINTF); 2526 to = JME_SMB_BUSY_TIMEOUT; 2527 while ((val & SMBINTF_HWCMD) && --to) { 2528 msleep(1); 2529 val = jread32(jme, JME_SMBINTF); 2530 } 2531 if (!to) { 2532 netif_err(jme, hw, jme->dev, "SMB Bus Busy.\n"); 2533 return 0xFF; 2534 } 2535 2536 return (val & SMBINTF_HWDATR) >> SMBINTF_HWDATR_SHIFT; 2537} 2538 2539static void 2540jme_smb_write(struct jme_adapter *jme, unsigned int addr, u8 data) 2541{ 2542 u32 val; 2543 int to; 2544 2545 val = jread32(jme, JME_SMBCSR); 2546 to = JME_SMB_BUSY_TIMEOUT; 2547 while ((val & SMBCSR_BUSY) && --to) { 2548 msleep(1); 2549 val = jread32(jme, JME_SMBCSR); 2550 } 2551 if (!to) { 2552 netif_err(jme, hw, jme->dev, "SMB Bus Busy.\n"); 2553 return; 2554 } 2555 2556 jwrite32(jme, JME_SMBINTF, 2557 ((data << SMBINTF_HWDATW_SHIFT) & SMBINTF_HWDATW) | 2558 ((addr << SMBINTF_HWADDR_SHIFT) & SMBINTF_HWADDR) | 2559 SMBINTF_HWRWN_WRITE | 2560 SMBINTF_HWCMD); 2561 2562 val = jread32(jme, JME_SMBINTF); 2563 to = JME_SMB_BUSY_TIMEOUT; 2564 while ((val & SMBINTF_HWCMD) && --to) { 2565 msleep(1); 2566 val = jread32(jme, JME_SMBINTF); 2567 } 2568 if (!to) { 2569 netif_err(jme, hw, jme->dev, "SMB Bus Busy.\n"); 2570 return; 2571 } 2572 2573 mdelay(2); 2574} 2575 2576static int 2577jme_get_eeprom_len(struct net_device *netdev) 2578{ 2579 struct jme_adapter *jme = netdev_priv(netdev); 2580 u32 val; 2581 val = jread32(jme, JME_SMBCSR); 2582 return (val & SMBCSR_EEPROMD) ? JME_SMB_LEN : 0; 2583} 2584 2585static int 2586jme_get_eeprom(struct net_device *netdev, 2587 struct ethtool_eeprom *eeprom, u8 *data) 2588{ 2589 struct jme_adapter *jme = netdev_priv(netdev); 2590 int i, offset = eeprom->offset, len = eeprom->len; 2591 2592 /* 2593 * ethtool will check the boundary for us 2594 */ 2595 eeprom->magic = JME_EEPROM_MAGIC; 2596 for (i = 0 ; i < len ; ++i) 2597 data[i] = jme_smb_read(jme, i + offset); 2598 2599 return 0; 2600} 2601 2602static int 2603jme_set_eeprom(struct net_device *netdev, 2604 struct ethtool_eeprom *eeprom, u8 *data) 2605{ 2606 struct jme_adapter *jme = netdev_priv(netdev); 2607 int i, offset = eeprom->offset, len = eeprom->len; 2608 2609 if (eeprom->magic != JME_EEPROM_MAGIC) 2610 return -EINVAL; 2611 2612 /* 2613 * ethtool will check the boundary for us 2614 */ 2615 for (i = 0 ; i < len ; ++i) 2616 jme_smb_write(jme, i + offset, data[i]); 2617 2618 return 0; 2619} 2620 2621static const struct ethtool_ops jme_ethtool_ops = { 2622 .get_drvinfo = jme_get_drvinfo, 2623 .get_regs_len = jme_get_regs_len, 2624 .get_regs = jme_get_regs, 2625 .get_coalesce = jme_get_coalesce, 2626 .set_coalesce = jme_set_coalesce, 2627 .get_pauseparam = jme_get_pauseparam, 2628 .set_pauseparam = jme_set_pauseparam, 2629 .get_wol = jme_get_wol, 2630 .set_wol = jme_set_wol, 2631 .get_settings = jme_get_settings, 2632 .set_settings = jme_set_settings, 2633 .get_link = jme_get_link, 2634 .get_msglevel = jme_get_msglevel, 2635 .set_msglevel = jme_set_msglevel, 2636 .get_rx_csum = jme_get_rx_csum, 2637 .set_rx_csum = jme_set_rx_csum, 2638 .set_tx_csum = jme_set_tx_csum, 2639 .set_tso = jme_set_tso, 2640 .set_sg = ethtool_op_set_sg, 2641 .nway_reset = jme_nway_reset, 2642 .get_eeprom_len = jme_get_eeprom_len, 2643 .get_eeprom = jme_get_eeprom, 2644 .set_eeprom = jme_set_eeprom, 2645}; 2646 2647static int 2648jme_pci_dma64(struct pci_dev *pdev) 2649{ 2650 if (pdev->device == PCI_DEVICE_ID_JMICRON_JMC250 && 2651 !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) 2652 if (!pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) 2653 return 1; 2654 2655 if (pdev->device == PCI_DEVICE_ID_JMICRON_JMC250 && 2656 !pci_set_dma_mask(pdev, DMA_BIT_MASK(40))) 2657 if (!pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40))) 2658 return 1; 2659 2660 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) 2661 if (!pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) 2662 return 0; 2663 2664 return -1; 2665} 2666 2667static inline void 2668jme_phy_init(struct jme_adapter *jme) 2669{ 2670 u16 reg26; 2671 2672 reg26 = jme_mdio_read(jme->dev, jme->mii_if.phy_id, 26); 2673 jme_mdio_write(jme->dev, jme->mii_if.phy_id, 26, reg26 | 0x1000); 2674} 2675 2676static inline void 2677jme_check_hw_ver(struct jme_adapter *jme) 2678{ 2679 u32 chipmode; 2680 2681 chipmode = jread32(jme, JME_CHIPMODE); 2682 2683 jme->fpgaver = (chipmode & CM_FPGAVER_MASK) >> CM_FPGAVER_SHIFT; 2684 jme->chiprev = (chipmode & CM_CHIPREV_MASK) >> CM_CHIPREV_SHIFT; 2685} 2686 2687static const struct net_device_ops jme_netdev_ops = { 2688 .ndo_open = jme_open, 2689 .ndo_stop = jme_close, 2690 .ndo_validate_addr = eth_validate_addr, 2691 .ndo_start_xmit = jme_start_xmit, 2692 .ndo_set_mac_address = jme_set_macaddr, 2693 .ndo_set_multicast_list = jme_set_multi, 2694 .ndo_change_mtu = jme_change_mtu, 2695 .ndo_tx_timeout = jme_tx_timeout, 2696 .ndo_vlan_rx_register = jme_vlan_rx_register, 2697}; 2698 2699static int __devinit 2700jme_init_one(struct pci_dev *pdev, 2701 const struct pci_device_id *ent) 2702{ 2703 int rc = 0, using_dac, i; 2704 struct net_device *netdev; 2705 struct jme_adapter *jme; 2706 u16 bmcr, bmsr; 2707 u32 apmc; 2708 2709 /* 2710 * set up PCI device basics 2711 */ 2712 rc = pci_enable_device(pdev); 2713 if (rc) { 2714 jeprintk(pdev, "Cannot enable PCI device.\n"); 2715 goto err_out; 2716 } 2717 2718 using_dac = jme_pci_dma64(pdev); 2719 if (using_dac < 0) { 2720 jeprintk(pdev, "Cannot set PCI DMA Mask.\n"); 2721 rc = -EIO; 2722 goto err_out_disable_pdev; 2723 } 2724 2725 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { 2726 jeprintk(pdev, "No PCI resource region found.\n"); 2727 rc = -ENOMEM; 2728 goto err_out_disable_pdev; 2729 } 2730 2731 rc = pci_request_regions(pdev, DRV_NAME); 2732 if (rc) { 2733 jeprintk(pdev, "Cannot obtain PCI resource region.\n"); 2734 goto err_out_disable_pdev; 2735 } 2736 2737 pci_set_master(pdev); 2738 2739 /* 2740 * alloc and init net device 2741 */ 2742 netdev = alloc_etherdev(sizeof(*jme)); 2743 if (!netdev) { 2744 jeprintk(pdev, "Cannot allocate netdev structure.\n"); 2745 rc = -ENOMEM; 2746 goto err_out_release_regions; 2747 } 2748 netdev->netdev_ops = &jme_netdev_ops; 2749 netdev->ethtool_ops = &jme_ethtool_ops; 2750 netdev->watchdog_timeo = TX_TIMEOUT; 2751 netdev->features = NETIF_F_HW_CSUM | 2752 NETIF_F_SG | 2753 NETIF_F_TSO | 2754 NETIF_F_TSO6 | 2755 NETIF_F_HW_VLAN_TX | 2756 NETIF_F_HW_VLAN_RX; 2757 if (using_dac) 2758 netdev->features |= NETIF_F_HIGHDMA; 2759 2760 SET_NETDEV_DEV(netdev, &pdev->dev); 2761 pci_set_drvdata(pdev, netdev); 2762 2763 /* 2764 * init adapter info 2765 */ 2766 jme = netdev_priv(netdev); 2767 jme->pdev = pdev; 2768 jme->dev = netdev; 2769 jme->jme_rx = netif_rx; 2770 jme->jme_vlan_rx = vlan_hwaccel_rx; 2771 jme->old_mtu = netdev->mtu = 1500; 2772 jme->phylink = 0; 2773 jme->tx_ring_size = 1 << 10; 2774 jme->tx_ring_mask = jme->tx_ring_size - 1; 2775 jme->tx_wake_threshold = 1 << 9; 2776 jme->rx_ring_size = 1 << 9; 2777 jme->rx_ring_mask = jme->rx_ring_size - 1; 2778 jme->msg_enable = JME_DEF_MSG_ENABLE; 2779 jme->regs = ioremap(pci_resource_start(pdev, 0), 2780 pci_resource_len(pdev, 0)); 2781 if (!(jme->regs)) { 2782 jeprintk(pdev, "Mapping PCI resource region error.\n"); 2783 rc = -ENOMEM; 2784 goto err_out_free_netdev; 2785 } 2786 2787 if (no_pseudohp) { 2788 apmc = jread32(jme, JME_APMC) & ~JME_APMC_PSEUDO_HP_EN; 2789 jwrite32(jme, JME_APMC, apmc); 2790 } else if (force_pseudohp) { 2791 apmc = jread32(jme, JME_APMC) | JME_APMC_PSEUDO_HP_EN; 2792 jwrite32(jme, JME_APMC, apmc); 2793 } 2794 2795 NETIF_NAPI_SET(netdev, &jme->napi, jme_poll, jme->rx_ring_size >> 2) 2796 2797 spin_lock_init(&jme->phy_lock); 2798 spin_lock_init(&jme->macaddr_lock); 2799 spin_lock_init(&jme->rxmcs_lock); 2800 2801 atomic_set(&jme->link_changing, 1); 2802 atomic_set(&jme->rx_cleaning, 1); 2803 atomic_set(&jme->tx_cleaning, 1); 2804 atomic_set(&jme->rx_empty, 1); 2805 2806 tasklet_init(&jme->pcc_task, 2807 jme_pcc_tasklet, 2808 (unsigned long) jme); 2809 tasklet_init(&jme->linkch_task, 2810 jme_link_change_tasklet, 2811 (unsigned long) jme); 2812 tasklet_init(&jme->txclean_task, 2813 jme_tx_clean_tasklet, 2814 (unsigned long) jme); 2815 tasklet_init(&jme->rxclean_task, 2816 jme_rx_clean_tasklet, 2817 (unsigned long) jme); 2818 tasklet_init(&jme->rxempty_task, 2819 jme_rx_empty_tasklet, 2820 (unsigned long) jme); 2821 tasklet_disable_nosync(&jme->linkch_task); 2822 tasklet_disable_nosync(&jme->txclean_task); 2823 tasklet_disable_nosync(&jme->rxclean_task); 2824 tasklet_disable_nosync(&jme->rxempty_task); 2825 jme->dpi.cur = PCC_P1; 2826 2827 jme->reg_ghc = 0; 2828 jme->reg_rxcs = RXCS_DEFAULT; 2829 jme->reg_rxmcs = RXMCS_DEFAULT; 2830 jme->reg_txpfc = 0; 2831 jme->reg_pmcs = PMCS_MFEN; 2832 set_bit(JME_FLAG_TXCSUM, &jme->flags); 2833 set_bit(JME_FLAG_TSO, &jme->flags); 2834 2835 /* 2836 * Get Max Read Req Size from PCI Config Space 2837 */ 2838 pci_read_config_byte(pdev, PCI_DCSR_MRRS, &jme->mrrs); 2839 jme->mrrs &= PCI_DCSR_MRRS_MASK; 2840 switch (jme->mrrs) { 2841 case MRRS_128B: 2842 jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_128B; 2843 break; 2844 case MRRS_256B: 2845 jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_256B; 2846 break; 2847 default: 2848 jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_512B; 2849 break; 2850 } 2851 2852 /* 2853 * Must check before reset_mac_processor 2854 */ 2855 jme_check_hw_ver(jme); 2856 jme->mii_if.dev = netdev; 2857 if (jme->fpgaver) { 2858 jme->mii_if.phy_id = 0; 2859 for (i = 1 ; i < 32 ; ++i) { 2860 bmcr = jme_mdio_read(netdev, i, MII_BMCR); 2861 bmsr = jme_mdio_read(netdev, i, MII_BMSR); 2862 if (bmcr != 0xFFFFU && (bmcr != 0 || bmsr != 0)) { 2863 jme->mii_if.phy_id = i; 2864 break; 2865 } 2866 } 2867 2868 if (!jme->mii_if.phy_id) { 2869 rc = -EIO; 2870 jeprintk(pdev, "Can not find phy_id.\n"); 2871 goto err_out_unmap; 2872 } 2873 2874 jme->reg_ghc |= GHC_LINK_POLL; 2875 } else { 2876 jme->mii_if.phy_id = 1; 2877 } 2878 if (pdev->device == PCI_DEVICE_ID_JMICRON_JMC250) 2879 jme->mii_if.supports_gmii = true; 2880 else 2881 jme->mii_if.supports_gmii = false; 2882 jme->mii_if.mdio_read = jme_mdio_read; 2883 jme->mii_if.mdio_write = jme_mdio_write; 2884 2885 jme_clear_pm(jme); 2886 jme_set_phyfifoa(jme); 2887 pci_read_config_byte(pdev, PCI_REVISION_ID, &jme->rev); 2888 if (!jme->fpgaver) 2889 jme_phy_init(jme); 2890 jme_phy_off(jme); 2891 2892 /* 2893 * Reset MAC processor and reload EEPROM for MAC Address 2894 */ 2895 jme_reset_mac_processor(jme); 2896 rc = jme_reload_eeprom(jme); 2897 if (rc) { 2898 jeprintk(pdev, 2899 "Reload eeprom for reading MAC Address error.\n"); 2900 goto err_out_unmap; 2901 } 2902 jme_load_macaddr(netdev); 2903 2904 /* 2905 * Tell stack that we are not ready to work until open() 2906 */ 2907 netif_carrier_off(netdev); 2908 netif_stop_queue(netdev); 2909 2910 /* 2911 * Register netdev 2912 */ 2913 rc = register_netdev(netdev); 2914 if (rc) { 2915 jeprintk(pdev, "Cannot register net device.\n"); 2916 goto err_out_unmap; 2917 } 2918 2919 netif_info(jme, probe, jme->dev, "%s%s ver:%x rev:%x macaddr:%pM\n", 2920 (jme->pdev->device == PCI_DEVICE_ID_JMICRON_JMC250) ? 2921 "JMC250 Gigabit Ethernet" : 2922 (jme->pdev->device == PCI_DEVICE_ID_JMICRON_JMC260) ? 2923 "JMC260 Fast Ethernet" : "Unknown", 2924 (jme->fpgaver != 0) ? " (FPGA)" : "", 2925 (jme->fpgaver != 0) ? jme->fpgaver : jme->chiprev, 2926 jme->rev, netdev->dev_addr); 2927 2928 return 0; 2929 2930err_out_unmap: 2931 iounmap(jme->regs); 2932err_out_free_netdev: 2933 pci_set_drvdata(pdev, NULL); 2934 free_netdev(netdev); 2935err_out_release_regions: 2936 pci_release_regions(pdev); 2937err_out_disable_pdev: 2938 pci_disable_device(pdev); 2939err_out: 2940 return rc; 2941} 2942 2943static void __devexit 2944jme_remove_one(struct pci_dev *pdev) 2945{ 2946 struct net_device *netdev = pci_get_drvdata(pdev); 2947 struct jme_adapter *jme = netdev_priv(netdev); 2948 2949 unregister_netdev(netdev); 2950 iounmap(jme->regs); 2951 pci_set_drvdata(pdev, NULL); 2952 free_netdev(netdev); 2953 pci_release_regions(pdev); 2954 pci_disable_device(pdev); 2955 2956} 2957 2958#ifdef CONFIG_PM 2959static int 2960jme_suspend(struct pci_dev *pdev, pm_message_t state) 2961{ 2962 struct net_device *netdev = pci_get_drvdata(pdev); 2963 struct jme_adapter *jme = netdev_priv(netdev); 2964 2965 atomic_dec(&jme->link_changing); 2966 2967 netif_device_detach(netdev); 2968 netif_stop_queue(netdev); 2969 jme_stop_irq(jme); 2970 2971 tasklet_disable(&jme->txclean_task); 2972 tasklet_disable(&jme->rxclean_task); 2973 tasklet_disable(&jme->rxempty_task); 2974 2975 if (netif_carrier_ok(netdev)) { 2976 if (test_bit(JME_FLAG_POLL, &jme->flags)) 2977 jme_polling_mode(jme); 2978 2979 jme_stop_pcc_timer(jme); 2980 jme_reset_ghc_speed(jme); 2981 jme_disable_rx_engine(jme); 2982 jme_disable_tx_engine(jme); 2983 jme_reset_mac_processor(jme); 2984 jme_free_rx_resources(jme); 2985 jme_free_tx_resources(jme); 2986 netif_carrier_off(netdev); 2987 jme->phylink = 0; 2988 } 2989 2990 tasklet_enable(&jme->txclean_task); 2991 tasklet_hi_enable(&jme->rxclean_task); 2992 tasklet_hi_enable(&jme->rxempty_task); 2993 2994 pci_save_state(pdev); 2995 if (jme->reg_pmcs) { 2996 jme_set_100m_half(jme); 2997 2998 if (jme->reg_pmcs & (PMCS_LFEN | PMCS_LREN)) 2999 jme_wait_link(jme); 3000 3001 jwrite32(jme, JME_PMCS, jme->reg_pmcs); 3002 3003 pci_enable_wake(pdev, PCI_D3cold, true); 3004 } else { 3005 jme_phy_off(jme); 3006 } 3007 pci_set_power_state(pdev, PCI_D3cold); 3008 3009 return 0; 3010} 3011 3012static int 3013jme_resume(struct pci_dev *pdev) 3014{ 3015 struct net_device *netdev = pci_get_drvdata(pdev); 3016 struct jme_adapter *jme = netdev_priv(netdev); 3017 3018 jme_clear_pm(jme); 3019 pci_restore_state(pdev); 3020 3021 if (test_bit(JME_FLAG_SSET, &jme->flags)) { 3022 jme_phy_on(jme); 3023 jme_set_settings(netdev, &jme->old_ecmd); 3024 } else { 3025 jme_reset_phy_processor(jme); 3026 } 3027 3028 jme_start_irq(jme); 3029 netif_device_attach(netdev); 3030 3031 atomic_inc(&jme->link_changing); 3032 3033 jme_reset_link(jme); 3034 3035 return 0; 3036} 3037#endif 3038 3039static DEFINE_PCI_DEVICE_TABLE(jme_pci_tbl) = { 3040 { PCI_VDEVICE(JMICRON, PCI_DEVICE_ID_JMICRON_JMC250) }, 3041 { PCI_VDEVICE(JMICRON, PCI_DEVICE_ID_JMICRON_JMC260) }, 3042 { } 3043}; 3044 3045static struct pci_driver jme_driver = { 3046 .name = DRV_NAME, 3047 .id_table = jme_pci_tbl, 3048 .probe = jme_init_one, 3049 .remove = __devexit_p(jme_remove_one), 3050#ifdef CONFIG_PM 3051 .suspend = jme_suspend, 3052 .resume = jme_resume, 3053#endif /* CONFIG_PM */ 3054}; 3055 3056static int __init 3057jme_init_module(void) 3058{ 3059 printk(KERN_INFO PFX "JMicron JMC2XX ethernet " 3060 "driver version %s\n", DRV_VERSION); 3061 return pci_register_driver(&jme_driver); 3062} 3063 3064static void __exit 3065jme_cleanup_module(void) 3066{ 3067 pci_unregister_driver(&jme_driver); 3068} 3069 3070module_init(jme_init_module); 3071module_exit(jme_cleanup_module); 3072 3073MODULE_AUTHOR("Guo-Fu Tseng <cooldavid@cooldavid.org>"); 3074MODULE_DESCRIPTION("JMicron JMC2x0 PCI Express Ethernet driver"); 3075MODULE_LICENSE("GPL"); 3076MODULE_VERSION(DRV_VERSION); 3077MODULE_DEVICE_TABLE(pci, jme_pci_tbl); 3078