1/* 2 * Copyright (C) 2001,2002,2003,2004 Broadcom Corporation 3 * Copyright (c) 2006, 2007 Maciej W. Rozycki 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License 7 * as published by the Free Software Foundation; either version 2 8 * of the License, or (at your option) any later version. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program; if not, write to the Free Software 17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 18 * 19 * 20 * This driver is designed for the Broadcom SiByte SOC built-in 21 * Ethernet controllers. Written by Mitch Lichtenberg at Broadcom Corp. 22 * 23 * Updated to the driver model and the PHY abstraction layer 24 * by Maciej W. Rozycki. 25 */ 26 27#include <linux/bug.h> 28#include <linux/module.h> 29#include <linux/kernel.h> 30#include <linux/string.h> 31#include <linux/timer.h> 32#include <linux/errno.h> 33#include <linux/ioport.h> 34#include <linux/slab.h> 35#include <linux/interrupt.h> 36#include <linux/netdevice.h> 37#include <linux/etherdevice.h> 38#include <linux/skbuff.h> 39#include <linux/init.h> 40#include <linux/bitops.h> 41#include <linux/err.h> 42#include <linux/ethtool.h> 43#include <linux/mii.h> 44#include <linux/phy.h> 45#include <linux/platform_device.h> 46 47#include <asm/cache.h> 48#include <asm/io.h> 49#include <asm/processor.h> /* Processor type for cache alignment. */ 50 51/* Operational parameters that usually are not changed. */ 52 53#define CONFIG_SBMAC_COALESCE 54 55/* Time in jiffies before concluding the transmitter is hung. */ 56#define TX_TIMEOUT (2*HZ) 57 58 59MODULE_AUTHOR("Mitch Lichtenberg (Broadcom Corp.)"); 60MODULE_DESCRIPTION("Broadcom SiByte SOC GB Ethernet driver"); 61 62/* A few user-configurable values which may be modified when a driver 63 module is loaded. */ 64 65/* 1 normal messages, 0 quiet .. 7 verbose. */ 66static int debug = 1; 67module_param(debug, int, S_IRUGO); 68MODULE_PARM_DESC(debug, "Debug messages"); 69 70#ifdef CONFIG_SBMAC_COALESCE 71static int int_pktcnt_tx = 255; 72module_param(int_pktcnt_tx, int, S_IRUGO); 73MODULE_PARM_DESC(int_pktcnt_tx, "TX packet count"); 74 75static int int_timeout_tx = 255; 76module_param(int_timeout_tx, int, S_IRUGO); 77MODULE_PARM_DESC(int_timeout_tx, "TX timeout value"); 78 79static int int_pktcnt_rx = 64; 80module_param(int_pktcnt_rx, int, S_IRUGO); 81MODULE_PARM_DESC(int_pktcnt_rx, "RX packet count"); 82 83static int int_timeout_rx = 64; 84module_param(int_timeout_rx, int, S_IRUGO); 85MODULE_PARM_DESC(int_timeout_rx, "RX timeout value"); 86#endif 87 88#include <asm/sibyte/board.h> 89#include <asm/sibyte/sb1250.h> 90#if defined(CONFIG_SIBYTE_BCM1x55) || defined(CONFIG_SIBYTE_BCM1x80) 91#include <asm/sibyte/bcm1480_regs.h> 92#include <asm/sibyte/bcm1480_int.h> 93#define R_MAC_DMA_OODPKTLOST_RX R_MAC_DMA_OODPKTLOST 94#elif defined(CONFIG_SIBYTE_SB1250) || defined(CONFIG_SIBYTE_BCM112X) 95#include <asm/sibyte/sb1250_regs.h> 96#include <asm/sibyte/sb1250_int.h> 97#else 98#error invalid SiByte MAC configuation 99#endif 100#include <asm/sibyte/sb1250_scd.h> 101#include <asm/sibyte/sb1250_mac.h> 102#include <asm/sibyte/sb1250_dma.h> 103 104#if defined(CONFIG_SIBYTE_BCM1x55) || defined(CONFIG_SIBYTE_BCM1x80) 105#define UNIT_INT(n) (K_BCM1480_INT_MAC_0 + ((n) * 2)) 106#elif defined(CONFIG_SIBYTE_SB1250) || defined(CONFIG_SIBYTE_BCM112X) 107#define UNIT_INT(n) (K_INT_MAC_0 + (n)) 108#else 109#error invalid SiByte MAC configuation 110#endif 111 112#ifdef K_INT_PHY 113#define SBMAC_PHY_INT K_INT_PHY 114#else 115#define SBMAC_PHY_INT PHY_POLL 116#endif 117 118/********************************************************************** 119 * Simple types 120 ********************************************************************* */ 121 122enum sbmac_speed { 123 sbmac_speed_none = 0, 124 sbmac_speed_10 = SPEED_10, 125 sbmac_speed_100 = SPEED_100, 126 sbmac_speed_1000 = SPEED_1000, 127}; 128 129enum sbmac_duplex { 130 sbmac_duplex_none = -1, 131 sbmac_duplex_half = DUPLEX_HALF, 132 sbmac_duplex_full = DUPLEX_FULL, 133}; 134 135enum sbmac_fc { 136 sbmac_fc_none, 137 sbmac_fc_disabled, 138 sbmac_fc_frame, 139 sbmac_fc_collision, 140 sbmac_fc_carrier, 141}; 142 143enum sbmac_state { 144 sbmac_state_uninit, 145 sbmac_state_off, 146 sbmac_state_on, 147 sbmac_state_broken, 148}; 149 150 151/********************************************************************** 152 * Macros 153 ********************************************************************* */ 154 155 156#define SBDMA_NEXTBUF(d,f) ((((d)->f+1) == (d)->sbdma_dscrtable_end) ? \ 157 (d)->sbdma_dscrtable : (d)->f+1) 158 159 160#define NUMCACHEBLKS(x) (((x)+SMP_CACHE_BYTES-1)/SMP_CACHE_BYTES) 161 162#define SBMAC_MAX_TXDESCR 256 163#define SBMAC_MAX_RXDESCR 256 164 165#define ETHER_ADDR_LEN 6 166#define ENET_PACKET_SIZE 1518 167/*#define ENET_PACKET_SIZE 9216 */ 168 169/********************************************************************** 170 * DMA Descriptor structure 171 ********************************************************************* */ 172 173struct sbdmadscr { 174 uint64_t dscr_a; 175 uint64_t dscr_b; 176}; 177 178/********************************************************************** 179 * DMA Controller structure 180 ********************************************************************* */ 181 182struct sbmacdma { 183 184 /* 185 * This stuff is used to identify the channel and the registers 186 * associated with it. 187 */ 188 struct sbmac_softc *sbdma_eth; /* back pointer to associated 189 MAC */ 190 int sbdma_channel; /* channel number */ 191 int sbdma_txdir; /* direction (1=transmit) */ 192 int sbdma_maxdescr; /* total # of descriptors 193 in ring */ 194#ifdef CONFIG_SBMAC_COALESCE 195 int sbdma_int_pktcnt; 196 /* # descriptors rx/tx 197 before interrupt */ 198 int sbdma_int_timeout; 199 /* # usec rx/tx interrupt */ 200#endif 201 void __iomem *sbdma_config0; /* DMA config register 0 */ 202 void __iomem *sbdma_config1; /* DMA config register 1 */ 203 void __iomem *sbdma_dscrbase; 204 /* descriptor base address */ 205 void __iomem *sbdma_dscrcnt; /* descriptor count register */ 206 void __iomem *sbdma_curdscr; /* current descriptor 207 address */ 208 void __iomem *sbdma_oodpktlost; 209 /* pkt drop (rx only) */ 210 211 /* 212 * This stuff is for maintenance of the ring 213 */ 214 void *sbdma_dscrtable_unaligned; 215 struct sbdmadscr *sbdma_dscrtable; 216 /* base of descriptor table */ 217 struct sbdmadscr *sbdma_dscrtable_end; 218 /* end of descriptor table */ 219 struct sk_buff **sbdma_ctxtable; 220 /* context table, one 221 per descr */ 222 dma_addr_t sbdma_dscrtable_phys; 223 /* and also the phys addr */ 224 struct sbdmadscr *sbdma_addptr; /* next dscr for sw to add */ 225 struct sbdmadscr *sbdma_remptr; /* next dscr for sw 226 to remove */ 227}; 228 229 230/********************************************************************** 231 * Ethernet softc structure 232 ********************************************************************* */ 233 234struct sbmac_softc { 235 236 /* 237 * Linux-specific things 238 */ 239 struct net_device *sbm_dev; /* pointer to linux device */ 240 struct napi_struct napi; 241 struct phy_device *phy_dev; /* the associated PHY device */ 242 struct mii_bus *mii_bus; /* the MII bus */ 243 int phy_irq[PHY_MAX_ADDR]; 244 spinlock_t sbm_lock; /* spin lock */ 245 int sbm_devflags; /* current device flags */ 246 247 /* 248 * Controller-specific things 249 */ 250 void __iomem *sbm_base; /* MAC's base address */ 251 enum sbmac_state sbm_state; /* current state */ 252 253 void __iomem *sbm_macenable; /* MAC Enable Register */ 254 void __iomem *sbm_maccfg; /* MAC Config Register */ 255 void __iomem *sbm_fifocfg; /* FIFO Config Register */ 256 void __iomem *sbm_framecfg; /* Frame Config Register */ 257 void __iomem *sbm_rxfilter; /* Receive Filter Register */ 258 void __iomem *sbm_isr; /* Interrupt Status Register */ 259 void __iomem *sbm_imr; /* Interrupt Mask Register */ 260 void __iomem *sbm_mdio; /* MDIO Register */ 261 262 enum sbmac_speed sbm_speed; /* current speed */ 263 enum sbmac_duplex sbm_duplex; /* current duplex */ 264 enum sbmac_fc sbm_fc; /* cur. flow control setting */ 265 int sbm_pause; /* current pause setting */ 266 int sbm_link; /* current link state */ 267 268 unsigned char sbm_hwaddr[ETHER_ADDR_LEN]; 269 270 struct sbmacdma sbm_txdma; /* only channel 0 for now */ 271 struct sbmacdma sbm_rxdma; 272 int rx_hw_checksum; 273 int sbe_idx; 274}; 275 276 277/********************************************************************** 278 * Externs 279 ********************************************************************* */ 280 281/********************************************************************** 282 * Prototypes 283 ********************************************************************* */ 284 285static void sbdma_initctx(struct sbmacdma *d, struct sbmac_softc *s, int chan, 286 int txrx, int maxdescr); 287static void sbdma_channel_start(struct sbmacdma *d, int rxtx); 288static int sbdma_add_rcvbuffer(struct sbmac_softc *sc, struct sbmacdma *d, 289 struct sk_buff *m); 290static int sbdma_add_txbuffer(struct sbmacdma *d, struct sk_buff *m); 291static void sbdma_emptyring(struct sbmacdma *d); 292static void sbdma_fillring(struct sbmac_softc *sc, struct sbmacdma *d); 293static int sbdma_rx_process(struct sbmac_softc *sc, struct sbmacdma *d, 294 int work_to_do, int poll); 295static void sbdma_tx_process(struct sbmac_softc *sc, struct sbmacdma *d, 296 int poll); 297static int sbmac_initctx(struct sbmac_softc *s); 298static void sbmac_channel_start(struct sbmac_softc *s); 299static void sbmac_channel_stop(struct sbmac_softc *s); 300static enum sbmac_state sbmac_set_channel_state(struct sbmac_softc *, 301 enum sbmac_state); 302static void sbmac_promiscuous_mode(struct sbmac_softc *sc, int onoff); 303static uint64_t sbmac_addr2reg(unsigned char *ptr); 304static irqreturn_t sbmac_intr(int irq, void *dev_instance); 305static int sbmac_start_tx(struct sk_buff *skb, struct net_device *dev); 306static void sbmac_setmulti(struct sbmac_softc *sc); 307static int sbmac_init(struct platform_device *pldev, long long base); 308static int sbmac_set_speed(struct sbmac_softc *s, enum sbmac_speed speed); 309static int sbmac_set_duplex(struct sbmac_softc *s, enum sbmac_duplex duplex, 310 enum sbmac_fc fc); 311 312static int sbmac_open(struct net_device *dev); 313static void sbmac_tx_timeout (struct net_device *dev); 314static void sbmac_set_rx_mode(struct net_device *dev); 315static int sbmac_mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); 316static int sbmac_close(struct net_device *dev); 317static int sbmac_poll(struct napi_struct *napi, int budget); 318 319static void sbmac_mii_poll(struct net_device *dev); 320static int sbmac_mii_probe(struct net_device *dev); 321 322static void sbmac_mii_sync(void __iomem *sbm_mdio); 323static void sbmac_mii_senddata(void __iomem *sbm_mdio, unsigned int data, 324 int bitcnt); 325static int sbmac_mii_read(struct mii_bus *bus, int phyaddr, int regidx); 326static int sbmac_mii_write(struct mii_bus *bus, int phyaddr, int regidx, 327 u16 val); 328 329 330/********************************************************************** 331 * Globals 332 ********************************************************************* */ 333 334static char sbmac_string[] = "sb1250-mac"; 335 336static char sbmac_mdio_string[] = "sb1250-mac-mdio"; 337 338 339/********************************************************************** 340 * MDIO constants 341 ********************************************************************* */ 342 343#define MII_COMMAND_START 0x01 344#define MII_COMMAND_READ 0x02 345#define MII_COMMAND_WRITE 0x01 346#define MII_COMMAND_ACK 0x02 347 348#define M_MAC_MDIO_DIR_OUTPUT 0 /* for clarity */ 349 350#define ENABLE 1 351#define DISABLE 0 352 353/********************************************************************** 354 * SBMAC_MII_SYNC(sbm_mdio) 355 * 356 * Synchronize with the MII - send a pattern of bits to the MII 357 * that will guarantee that it is ready to accept a command. 358 * 359 * Input parameters: 360 * sbm_mdio - address of the MAC's MDIO register 361 * 362 * Return value: 363 * nothing 364 ********************************************************************* */ 365 366static void sbmac_mii_sync(void __iomem *sbm_mdio) 367{ 368 int cnt; 369 uint64_t bits; 370 int mac_mdio_genc; 371 372 mac_mdio_genc = __raw_readq(sbm_mdio) & M_MAC_GENC; 373 374 bits = M_MAC_MDIO_DIR_OUTPUT | M_MAC_MDIO_OUT; 375 376 __raw_writeq(bits | mac_mdio_genc, sbm_mdio); 377 378 for (cnt = 0; cnt < 32; cnt++) { 379 __raw_writeq(bits | M_MAC_MDC | mac_mdio_genc, sbm_mdio); 380 __raw_writeq(bits | mac_mdio_genc, sbm_mdio); 381 } 382} 383 384/********************************************************************** 385 * SBMAC_MII_SENDDATA(sbm_mdio, data, bitcnt) 386 * 387 * Send some bits to the MII. The bits to be sent are right- 388 * justified in the 'data' parameter. 389 * 390 * Input parameters: 391 * sbm_mdio - address of the MAC's MDIO register 392 * data - data to send 393 * bitcnt - number of bits to send 394 ********************************************************************* */ 395 396static void sbmac_mii_senddata(void __iomem *sbm_mdio, unsigned int data, 397 int bitcnt) 398{ 399 int i; 400 uint64_t bits; 401 unsigned int curmask; 402 int mac_mdio_genc; 403 404 mac_mdio_genc = __raw_readq(sbm_mdio) & M_MAC_GENC; 405 406 bits = M_MAC_MDIO_DIR_OUTPUT; 407 __raw_writeq(bits | mac_mdio_genc, sbm_mdio); 408 409 curmask = 1 << (bitcnt - 1); 410 411 for (i = 0; i < bitcnt; i++) { 412 if (data & curmask) 413 bits |= M_MAC_MDIO_OUT; 414 else bits &= ~M_MAC_MDIO_OUT; 415 __raw_writeq(bits | mac_mdio_genc, sbm_mdio); 416 __raw_writeq(bits | M_MAC_MDC | mac_mdio_genc, sbm_mdio); 417 __raw_writeq(bits | mac_mdio_genc, sbm_mdio); 418 curmask >>= 1; 419 } 420} 421 422 423 424/********************************************************************** 425 * SBMAC_MII_READ(bus, phyaddr, regidx) 426 * Read a PHY register. 427 * 428 * Input parameters: 429 * bus - MDIO bus handle 430 * phyaddr - PHY's address 431 * regnum - index of register to read 432 * 433 * Return value: 434 * value read, or 0xffff if an error occurred. 435 ********************************************************************* */ 436 437static int sbmac_mii_read(struct mii_bus *bus, int phyaddr, int regidx) 438{ 439 struct sbmac_softc *sc = (struct sbmac_softc *)bus->priv; 440 void __iomem *sbm_mdio = sc->sbm_mdio; 441 int idx; 442 int error; 443 int regval; 444 int mac_mdio_genc; 445 446 /* 447 * Synchronize ourselves so that the PHY knows the next 448 * thing coming down is a command 449 */ 450 sbmac_mii_sync(sbm_mdio); 451 452 /* 453 * Send the data to the PHY. The sequence is 454 * a "start" command (2 bits) 455 * a "read" command (2 bits) 456 * the PHY addr (5 bits) 457 * the register index (5 bits) 458 */ 459 sbmac_mii_senddata(sbm_mdio, MII_COMMAND_START, 2); 460 sbmac_mii_senddata(sbm_mdio, MII_COMMAND_READ, 2); 461 sbmac_mii_senddata(sbm_mdio, phyaddr, 5); 462 sbmac_mii_senddata(sbm_mdio, regidx, 5); 463 464 mac_mdio_genc = __raw_readq(sbm_mdio) & M_MAC_GENC; 465 466 /* 467 * Switch the port around without a clock transition. 468 */ 469 __raw_writeq(M_MAC_MDIO_DIR_INPUT | mac_mdio_genc, sbm_mdio); 470 471 /* 472 * Send out a clock pulse to signal we want the status 473 */ 474 __raw_writeq(M_MAC_MDIO_DIR_INPUT | M_MAC_MDC | mac_mdio_genc, 475 sbm_mdio); 476 __raw_writeq(M_MAC_MDIO_DIR_INPUT | mac_mdio_genc, sbm_mdio); 477 478 /* 479 * If an error occurred, the PHY will signal '1' back 480 */ 481 error = __raw_readq(sbm_mdio) & M_MAC_MDIO_IN; 482 483 /* 484 * Issue an 'idle' clock pulse, but keep the direction 485 * the same. 486 */ 487 __raw_writeq(M_MAC_MDIO_DIR_INPUT | M_MAC_MDC | mac_mdio_genc, 488 sbm_mdio); 489 __raw_writeq(M_MAC_MDIO_DIR_INPUT | mac_mdio_genc, sbm_mdio); 490 491 regval = 0; 492 493 for (idx = 0; idx < 16; idx++) { 494 regval <<= 1; 495 496 if (error == 0) { 497 if (__raw_readq(sbm_mdio) & M_MAC_MDIO_IN) 498 regval |= 1; 499 } 500 501 __raw_writeq(M_MAC_MDIO_DIR_INPUT | M_MAC_MDC | mac_mdio_genc, 502 sbm_mdio); 503 __raw_writeq(M_MAC_MDIO_DIR_INPUT | mac_mdio_genc, sbm_mdio); 504 } 505 506 /* Switch back to output */ 507 __raw_writeq(M_MAC_MDIO_DIR_OUTPUT | mac_mdio_genc, sbm_mdio); 508 509 if (error == 0) 510 return regval; 511 return 0xffff; 512} 513 514 515/********************************************************************** 516 * SBMAC_MII_WRITE(bus, phyaddr, regidx, regval) 517 * 518 * Write a value to a PHY register. 519 * 520 * Input parameters: 521 * bus - MDIO bus handle 522 * phyaddr - PHY to use 523 * regidx - register within the PHY 524 * regval - data to write to register 525 * 526 * Return value: 527 * 0 for success 528 ********************************************************************* */ 529 530static int sbmac_mii_write(struct mii_bus *bus, int phyaddr, int regidx, 531 u16 regval) 532{ 533 struct sbmac_softc *sc = (struct sbmac_softc *)bus->priv; 534 void __iomem *sbm_mdio = sc->sbm_mdio; 535 int mac_mdio_genc; 536 537 sbmac_mii_sync(sbm_mdio); 538 539 sbmac_mii_senddata(sbm_mdio, MII_COMMAND_START, 2); 540 sbmac_mii_senddata(sbm_mdio, MII_COMMAND_WRITE, 2); 541 sbmac_mii_senddata(sbm_mdio, phyaddr, 5); 542 sbmac_mii_senddata(sbm_mdio, regidx, 5); 543 sbmac_mii_senddata(sbm_mdio, MII_COMMAND_ACK, 2); 544 sbmac_mii_senddata(sbm_mdio, regval, 16); 545 546 mac_mdio_genc = __raw_readq(sbm_mdio) & M_MAC_GENC; 547 548 __raw_writeq(M_MAC_MDIO_DIR_OUTPUT | mac_mdio_genc, sbm_mdio); 549 550 return 0; 551} 552 553 554 555/********************************************************************** 556 * SBDMA_INITCTX(d,s,chan,txrx,maxdescr) 557 * 558 * Initialize a DMA channel context. Since there are potentially 559 * eight DMA channels per MAC, it's nice to do this in a standard 560 * way. 561 * 562 * Input parameters: 563 * d - struct sbmacdma (DMA channel context) 564 * s - struct sbmac_softc (pointer to a MAC) 565 * chan - channel number (0..1 right now) 566 * txrx - Identifies DMA_TX or DMA_RX for channel direction 567 * maxdescr - number of descriptors 568 * 569 * Return value: 570 * nothing 571 ********************************************************************* */ 572 573static void sbdma_initctx(struct sbmacdma *d, struct sbmac_softc *s, int chan, 574 int txrx, int maxdescr) 575{ 576#ifdef CONFIG_SBMAC_COALESCE 577 int int_pktcnt, int_timeout; 578#endif 579 580 /* 581 * Save away interesting stuff in the structure 582 */ 583 584 d->sbdma_eth = s; 585 d->sbdma_channel = chan; 586 d->sbdma_txdir = txrx; 587 588 589 __raw_writeq(0, s->sbm_base + R_MAC_RMON_TX_BYTES); 590 __raw_writeq(0, s->sbm_base + R_MAC_RMON_COLLISIONS); 591 __raw_writeq(0, s->sbm_base + R_MAC_RMON_LATE_COL); 592 __raw_writeq(0, s->sbm_base + R_MAC_RMON_EX_COL); 593 __raw_writeq(0, s->sbm_base + R_MAC_RMON_FCS_ERROR); 594 __raw_writeq(0, s->sbm_base + R_MAC_RMON_TX_ABORT); 595 __raw_writeq(0, s->sbm_base + R_MAC_RMON_TX_BAD); 596 __raw_writeq(0, s->sbm_base + R_MAC_RMON_TX_GOOD); 597 __raw_writeq(0, s->sbm_base + R_MAC_RMON_TX_RUNT); 598 __raw_writeq(0, s->sbm_base + R_MAC_RMON_TX_OVERSIZE); 599 __raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_BYTES); 600 __raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_MCAST); 601 __raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_BCAST); 602 __raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_BAD); 603 __raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_GOOD); 604 __raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_RUNT); 605 __raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_OVERSIZE); 606 __raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_FCS_ERROR); 607 __raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_LENGTH_ERROR); 608 __raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_CODE_ERROR); 609 __raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_ALIGN_ERROR); 610 611 /* 612 * initialize register pointers 613 */ 614 615 d->sbdma_config0 = 616 s->sbm_base + R_MAC_DMA_REGISTER(txrx,chan,R_MAC_DMA_CONFIG0); 617 d->sbdma_config1 = 618 s->sbm_base + R_MAC_DMA_REGISTER(txrx,chan,R_MAC_DMA_CONFIG1); 619 d->sbdma_dscrbase = 620 s->sbm_base + R_MAC_DMA_REGISTER(txrx,chan,R_MAC_DMA_DSCR_BASE); 621 d->sbdma_dscrcnt = 622 s->sbm_base + R_MAC_DMA_REGISTER(txrx,chan,R_MAC_DMA_DSCR_CNT); 623 d->sbdma_curdscr = 624 s->sbm_base + R_MAC_DMA_REGISTER(txrx,chan,R_MAC_DMA_CUR_DSCRADDR); 625 if (d->sbdma_txdir) 626 d->sbdma_oodpktlost = NULL; 627 else 628 d->sbdma_oodpktlost = 629 s->sbm_base + R_MAC_DMA_REGISTER(txrx,chan,R_MAC_DMA_OODPKTLOST_RX); 630 631 /* 632 * Allocate memory for the ring 633 */ 634 635 d->sbdma_maxdescr = maxdescr; 636 637 d->sbdma_dscrtable_unaligned = kcalloc(d->sbdma_maxdescr + 1, 638 sizeof(*d->sbdma_dscrtable), 639 GFP_KERNEL); 640 641 /* 642 * The descriptor table must be aligned to at least 16 bytes or the 643 * MAC will corrupt it. 644 */ 645 d->sbdma_dscrtable = (struct sbdmadscr *) 646 ALIGN((unsigned long)d->sbdma_dscrtable_unaligned, 647 sizeof(*d->sbdma_dscrtable)); 648 649 d->sbdma_dscrtable_end = d->sbdma_dscrtable + d->sbdma_maxdescr; 650 651 d->sbdma_dscrtable_phys = virt_to_phys(d->sbdma_dscrtable); 652 653 /* 654 * And context table 655 */ 656 657 d->sbdma_ctxtable = kcalloc(d->sbdma_maxdescr, 658 sizeof(*d->sbdma_ctxtable), GFP_KERNEL); 659 660#ifdef CONFIG_SBMAC_COALESCE 661 /* 662 * Setup Rx/Tx DMA coalescing defaults 663 */ 664 665 int_pktcnt = (txrx == DMA_TX) ? int_pktcnt_tx : int_pktcnt_rx; 666 if ( int_pktcnt ) { 667 d->sbdma_int_pktcnt = int_pktcnt; 668 } else { 669 d->sbdma_int_pktcnt = 1; 670 } 671 672 int_timeout = (txrx == DMA_TX) ? int_timeout_tx : int_timeout_rx; 673 if ( int_timeout ) { 674 d->sbdma_int_timeout = int_timeout; 675 } else { 676 d->sbdma_int_timeout = 0; 677 } 678#endif 679 680} 681 682/********************************************************************** 683 * SBDMA_CHANNEL_START(d) 684 * 685 * Initialize the hardware registers for a DMA channel. 686 * 687 * Input parameters: 688 * d - DMA channel to init (context must be previously init'd 689 * rxtx - DMA_RX or DMA_TX depending on what type of channel 690 * 691 * Return value: 692 * nothing 693 ********************************************************************* */ 694 695static void sbdma_channel_start(struct sbmacdma *d, int rxtx) 696{ 697 /* 698 * Turn on the DMA channel 699 */ 700 701#ifdef CONFIG_SBMAC_COALESCE 702 __raw_writeq(V_DMA_INT_TIMEOUT(d->sbdma_int_timeout) | 703 0, d->sbdma_config1); 704 __raw_writeq(M_DMA_EOP_INT_EN | 705 V_DMA_RINGSZ(d->sbdma_maxdescr) | 706 V_DMA_INT_PKTCNT(d->sbdma_int_pktcnt) | 707 0, d->sbdma_config0); 708#else 709 __raw_writeq(0, d->sbdma_config1); 710 __raw_writeq(V_DMA_RINGSZ(d->sbdma_maxdescr) | 711 0, d->sbdma_config0); 712#endif 713 714 __raw_writeq(d->sbdma_dscrtable_phys, d->sbdma_dscrbase); 715 716 /* 717 * Initialize ring pointers 718 */ 719 720 d->sbdma_addptr = d->sbdma_dscrtable; 721 d->sbdma_remptr = d->sbdma_dscrtable; 722} 723 724/********************************************************************** 725 * SBDMA_CHANNEL_STOP(d) 726 * 727 * Initialize the hardware registers for a DMA channel. 728 * 729 * Input parameters: 730 * d - DMA channel to init (context must be previously init'd 731 * 732 * Return value: 733 * nothing 734 ********************************************************************* */ 735 736static void sbdma_channel_stop(struct sbmacdma *d) 737{ 738 /* 739 * Turn off the DMA channel 740 */ 741 742 __raw_writeq(0, d->sbdma_config1); 743 744 __raw_writeq(0, d->sbdma_dscrbase); 745 746 __raw_writeq(0, d->sbdma_config0); 747 748 /* 749 * Zero ring pointers 750 */ 751 752 d->sbdma_addptr = NULL; 753 d->sbdma_remptr = NULL; 754} 755 756static inline void sbdma_align_skb(struct sk_buff *skb, 757 unsigned int power2, unsigned int offset) 758{ 759 unsigned char *addr = skb->data; 760 unsigned char *newaddr = PTR_ALIGN(addr, power2); 761 762 skb_reserve(skb, newaddr - addr + offset); 763} 764 765 766/********************************************************************** 767 * SBDMA_ADD_RCVBUFFER(d,sb) 768 * 769 * Add a buffer to the specified DMA channel. For receive channels, 770 * this queues a buffer for inbound packets. 771 * 772 * Input parameters: 773 * sc - softc structure 774 * d - DMA channel descriptor 775 * sb - sk_buff to add, or NULL if we should allocate one 776 * 777 * Return value: 778 * 0 if buffer could not be added (ring is full) 779 * 1 if buffer added successfully 780 ********************************************************************* */ 781 782 783static int sbdma_add_rcvbuffer(struct sbmac_softc *sc, struct sbmacdma *d, 784 struct sk_buff *sb) 785{ 786 struct net_device *dev = sc->sbm_dev; 787 struct sbdmadscr *dsc; 788 struct sbdmadscr *nextdsc; 789 struct sk_buff *sb_new = NULL; 790 int pktsize = ENET_PACKET_SIZE; 791 792 /* get pointer to our current place in the ring */ 793 794 dsc = d->sbdma_addptr; 795 nextdsc = SBDMA_NEXTBUF(d,sbdma_addptr); 796 797 /* 798 * figure out if the ring is full - if the next descriptor 799 * is the same as the one that we're going to remove from 800 * the ring, the ring is full 801 */ 802 803 if (nextdsc == d->sbdma_remptr) { 804 return -ENOSPC; 805 } 806 807 /* 808 * Allocate a sk_buff if we don't already have one. 809 * If we do have an sk_buff, reset it so that it's empty. 810 * 811 * Note: sk_buffs don't seem to be guaranteed to have any sort 812 * of alignment when they are allocated. Therefore, allocate enough 813 * extra space to make sure that: 814 * 815 * 1. the data does not start in the middle of a cache line. 816 * 2. The data does not end in the middle of a cache line 817 * 3. The buffer can be aligned such that the IP addresses are 818 * naturally aligned. 819 * 820 * Remember, the SOCs MAC writes whole cache lines at a time, 821 * without reading the old contents first. So, if the sk_buff's 822 * data portion starts in the middle of a cache line, the SOC 823 * DMA will trash the beginning (and ending) portions. 824 */ 825 826 if (sb == NULL) { 827 sb_new = netdev_alloc_skb(dev, ENET_PACKET_SIZE + 828 SMP_CACHE_BYTES * 2 + 829 NET_IP_ALIGN); 830 if (sb_new == NULL) { 831 pr_info("%s: sk_buff allocation failed\n", 832 d->sbdma_eth->sbm_dev->name); 833 return -ENOBUFS; 834 } 835 836 sbdma_align_skb(sb_new, SMP_CACHE_BYTES, NET_IP_ALIGN); 837 } 838 else { 839 sb_new = sb; 840 /* 841 * nothing special to reinit buffer, it's already aligned 842 * and sb->data already points to a good place. 843 */ 844 } 845 846 /* 847 * fill in the descriptor 848 */ 849 850#ifdef CONFIG_SBMAC_COALESCE 851 /* 852 * Do not interrupt per DMA transfer. 853 */ 854 dsc->dscr_a = virt_to_phys(sb_new->data) | 855 V_DMA_DSCRA_A_SIZE(NUMCACHEBLKS(pktsize + NET_IP_ALIGN)) | 0; 856#else 857 dsc->dscr_a = virt_to_phys(sb_new->data) | 858 V_DMA_DSCRA_A_SIZE(NUMCACHEBLKS(pktsize + NET_IP_ALIGN)) | 859 M_DMA_DSCRA_INTERRUPT; 860#endif 861 862 /* receiving: no options */ 863 dsc->dscr_b = 0; 864 865 /* 866 * fill in the context 867 */ 868 869 d->sbdma_ctxtable[dsc-d->sbdma_dscrtable] = sb_new; 870 871 /* 872 * point at next packet 873 */ 874 875 d->sbdma_addptr = nextdsc; 876 877 /* 878 * Give the buffer to the DMA engine. 879 */ 880 881 __raw_writeq(1, d->sbdma_dscrcnt); 882 883 return 0; /* we did it */ 884} 885 886/********************************************************************** 887 * SBDMA_ADD_TXBUFFER(d,sb) 888 * 889 * Add a transmit buffer to the specified DMA channel, causing a 890 * transmit to start. 891 * 892 * Input parameters: 893 * d - DMA channel descriptor 894 * sb - sk_buff to add 895 * 896 * Return value: 897 * 0 transmit queued successfully 898 * otherwise error code 899 ********************************************************************* */ 900 901 902static int sbdma_add_txbuffer(struct sbmacdma *d, struct sk_buff *sb) 903{ 904 struct sbdmadscr *dsc; 905 struct sbdmadscr *nextdsc; 906 uint64_t phys; 907 uint64_t ncb; 908 int length; 909 910 /* get pointer to our current place in the ring */ 911 912 dsc = d->sbdma_addptr; 913 nextdsc = SBDMA_NEXTBUF(d,sbdma_addptr); 914 915 /* 916 * figure out if the ring is full - if the next descriptor 917 * is the same as the one that we're going to remove from 918 * the ring, the ring is full 919 */ 920 921 if (nextdsc == d->sbdma_remptr) { 922 return -ENOSPC; 923 } 924 925 /* 926 * Under Linux, it's not necessary to copy/coalesce buffers 927 * like it is on NetBSD. We think they're all contiguous, 928 * but that may not be true for GBE. 929 */ 930 931 length = sb->len; 932 933 /* 934 * fill in the descriptor. Note that the number of cache 935 * blocks in the descriptor is the number of blocks 936 * *spanned*, so we need to add in the offset (if any) 937 * while doing the calculation. 938 */ 939 940 phys = virt_to_phys(sb->data); 941 ncb = NUMCACHEBLKS(length+(phys & (SMP_CACHE_BYTES - 1))); 942 943 dsc->dscr_a = phys | 944 V_DMA_DSCRA_A_SIZE(ncb) | 945#ifndef CONFIG_SBMAC_COALESCE 946 M_DMA_DSCRA_INTERRUPT | 947#endif 948 M_DMA_ETHTX_SOP; 949 950 /* transmitting: set outbound options and length */ 951 952 dsc->dscr_b = V_DMA_DSCRB_OPTIONS(K_DMA_ETHTX_APPENDCRC_APPENDPAD) | 953 V_DMA_DSCRB_PKT_SIZE(length); 954 955 /* 956 * fill in the context 957 */ 958 959 d->sbdma_ctxtable[dsc-d->sbdma_dscrtable] = sb; 960 961 /* 962 * point at next packet 963 */ 964 965 d->sbdma_addptr = nextdsc; 966 967 /* 968 * Give the buffer to the DMA engine. 969 */ 970 971 __raw_writeq(1, d->sbdma_dscrcnt); 972 973 return 0; /* we did it */ 974} 975 976 977 978 979/********************************************************************** 980 * SBDMA_EMPTYRING(d) 981 * 982 * Free all allocated sk_buffs on the specified DMA channel; 983 * 984 * Input parameters: 985 * d - DMA channel 986 * 987 * Return value: 988 * nothing 989 ********************************************************************* */ 990 991static void sbdma_emptyring(struct sbmacdma *d) 992{ 993 int idx; 994 struct sk_buff *sb; 995 996 for (idx = 0; idx < d->sbdma_maxdescr; idx++) { 997 sb = d->sbdma_ctxtable[idx]; 998 if (sb) { 999 dev_kfree_skb(sb); 1000 d->sbdma_ctxtable[idx] = NULL; 1001 } 1002 } 1003} 1004 1005 1006/********************************************************************** 1007 * SBDMA_FILLRING(d) 1008 * 1009 * Fill the specified DMA channel (must be receive channel) 1010 * with sk_buffs 1011 * 1012 * Input parameters: 1013 * sc - softc structure 1014 * d - DMA channel 1015 * 1016 * Return value: 1017 * nothing 1018 ********************************************************************* */ 1019 1020static void sbdma_fillring(struct sbmac_softc *sc, struct sbmacdma *d) 1021{ 1022 int idx; 1023 1024 for (idx = 0; idx < SBMAC_MAX_RXDESCR - 1; idx++) { 1025 if (sbdma_add_rcvbuffer(sc, d, NULL) != 0) 1026 break; 1027 } 1028} 1029 1030#ifdef CONFIG_NET_POLL_CONTROLLER 1031static void sbmac_netpoll(struct net_device *netdev) 1032{ 1033 struct sbmac_softc *sc = netdev_priv(netdev); 1034 int irq = sc->sbm_dev->irq; 1035 1036 __raw_writeq(0, sc->sbm_imr); 1037 1038 sbmac_intr(irq, netdev); 1039 1040#ifdef CONFIG_SBMAC_COALESCE 1041 __raw_writeq(((M_MAC_INT_EOP_COUNT | M_MAC_INT_EOP_TIMER) << S_MAC_TX_CH0) | 1042 ((M_MAC_INT_EOP_COUNT | M_MAC_INT_EOP_TIMER) << S_MAC_RX_CH0), 1043 sc->sbm_imr); 1044#else 1045 __raw_writeq((M_MAC_INT_CHANNEL << S_MAC_TX_CH0) | 1046 (M_MAC_INT_CHANNEL << S_MAC_RX_CH0), sc->sbm_imr); 1047#endif 1048} 1049#endif 1050 1051/********************************************************************** 1052 * SBDMA_RX_PROCESS(sc,d,work_to_do,poll) 1053 * 1054 * Process "completed" receive buffers on the specified DMA channel. 1055 * 1056 * Input parameters: 1057 * sc - softc structure 1058 * d - DMA channel context 1059 * work_to_do - no. of packets to process before enabling interrupt 1060 * again (for NAPI) 1061 * poll - 1: using polling (for NAPI) 1062 * 1063 * Return value: 1064 * nothing 1065 ********************************************************************* */ 1066 1067static int sbdma_rx_process(struct sbmac_softc *sc, struct sbmacdma *d, 1068 int work_to_do, int poll) 1069{ 1070 struct net_device *dev = sc->sbm_dev; 1071 int curidx; 1072 int hwidx; 1073 struct sbdmadscr *dsc; 1074 struct sk_buff *sb; 1075 int len; 1076 int work_done = 0; 1077 int dropped = 0; 1078 1079 prefetch(d); 1080 1081again: 1082 /* Check if the HW dropped any frames */ 1083 dev->stats.rx_fifo_errors 1084 += __raw_readq(sc->sbm_rxdma.sbdma_oodpktlost) & 0xffff; 1085 __raw_writeq(0, sc->sbm_rxdma.sbdma_oodpktlost); 1086 1087 while (work_to_do-- > 0) { 1088 /* 1089 * figure out where we are (as an index) and where 1090 * the hardware is (also as an index) 1091 * 1092 * This could be done faster if (for example) the 1093 * descriptor table was page-aligned and contiguous in 1094 * both virtual and physical memory -- you could then 1095 * just compare the low-order bits of the virtual address 1096 * (sbdma_remptr) and the physical address (sbdma_curdscr CSR) 1097 */ 1098 1099 dsc = d->sbdma_remptr; 1100 curidx = dsc - d->sbdma_dscrtable; 1101 1102 prefetch(dsc); 1103 prefetch(&d->sbdma_ctxtable[curidx]); 1104 1105 hwidx = ((__raw_readq(d->sbdma_curdscr) & M_DMA_CURDSCR_ADDR) - 1106 d->sbdma_dscrtable_phys) / 1107 sizeof(*d->sbdma_dscrtable); 1108 1109 /* 1110 * If they're the same, that means we've processed all 1111 * of the descriptors up to (but not including) the one that 1112 * the hardware is working on right now. 1113 */ 1114 1115 if (curidx == hwidx) 1116 goto done; 1117 1118 /* 1119 * Otherwise, get the packet's sk_buff ptr back 1120 */ 1121 1122 sb = d->sbdma_ctxtable[curidx]; 1123 d->sbdma_ctxtable[curidx] = NULL; 1124 1125 len = (int)G_DMA_DSCRB_PKT_SIZE(dsc->dscr_b) - 4; 1126 1127 /* 1128 * Check packet status. If good, process it. 1129 * If not, silently drop it and put it back on the 1130 * receive ring. 1131 */ 1132 1133 if (likely (!(dsc->dscr_a & M_DMA_ETHRX_BAD))) { 1134 1135 /* 1136 * Add a new buffer to replace the old one. If we fail 1137 * to allocate a buffer, we're going to drop this 1138 * packet and put it right back on the receive ring. 1139 */ 1140 1141 if (unlikely(sbdma_add_rcvbuffer(sc, d, NULL) == 1142 -ENOBUFS)) { 1143 dev->stats.rx_dropped++; 1144 /* Re-add old buffer */ 1145 sbdma_add_rcvbuffer(sc, d, sb); 1146 /* No point in continuing at the moment */ 1147 printk(KERN_ERR "dropped packet (1)\n"); 1148 d->sbdma_remptr = SBDMA_NEXTBUF(d,sbdma_remptr); 1149 goto done; 1150 } else { 1151 /* 1152 * Set length into the packet 1153 */ 1154 skb_put(sb,len); 1155 1156 /* 1157 * Buffer has been replaced on the 1158 * receive ring. Pass the buffer to 1159 * the kernel 1160 */ 1161 sb->protocol = eth_type_trans(sb,d->sbdma_eth->sbm_dev); 1162 /* Check hw IPv4/TCP checksum if supported */ 1163 if (sc->rx_hw_checksum == ENABLE) { 1164 if (!((dsc->dscr_a) & M_DMA_ETHRX_BADIP4CS) && 1165 !((dsc->dscr_a) & M_DMA_ETHRX_BADTCPCS)) { 1166 sb->ip_summed = CHECKSUM_UNNECESSARY; 1167 /* don't need to set sb->csum */ 1168 } else { 1169 sb->ip_summed = CHECKSUM_NONE; 1170 } 1171 } 1172 prefetch(sb->data); 1173 prefetch((const void *)(((char *)sb->data)+32)); 1174 if (poll) 1175 dropped = netif_receive_skb(sb); 1176 else 1177 dropped = netif_rx(sb); 1178 1179 if (dropped == NET_RX_DROP) { 1180 dev->stats.rx_dropped++; 1181 d->sbdma_remptr = SBDMA_NEXTBUF(d,sbdma_remptr); 1182 goto done; 1183 } 1184 else { 1185 dev->stats.rx_bytes += len; 1186 dev->stats.rx_packets++; 1187 } 1188 } 1189 } else { 1190 /* 1191 * Packet was mangled somehow. Just drop it and 1192 * put it back on the receive ring. 1193 */ 1194 dev->stats.rx_errors++; 1195 sbdma_add_rcvbuffer(sc, d, sb); 1196 } 1197 1198 1199 /* 1200 * .. and advance to the next buffer. 1201 */ 1202 1203 d->sbdma_remptr = SBDMA_NEXTBUF(d,sbdma_remptr); 1204 work_done++; 1205 } 1206 if (!poll) { 1207 work_to_do = 32; 1208 goto again; /* collect fifo drop statistics again */ 1209 } 1210done: 1211 return work_done; 1212} 1213 1214/********************************************************************** 1215 * SBDMA_TX_PROCESS(sc,d) 1216 * 1217 * Process "completed" transmit buffers on the specified DMA channel. 1218 * This is normally called within the interrupt service routine. 1219 * Note that this isn't really ideal for priority channels, since 1220 * it processes all of the packets on a given channel before 1221 * returning. 1222 * 1223 * Input parameters: 1224 * sc - softc structure 1225 * d - DMA channel context 1226 * poll - 1: using polling (for NAPI) 1227 * 1228 * Return value: 1229 * nothing 1230 ********************************************************************* */ 1231 1232static void sbdma_tx_process(struct sbmac_softc *sc, struct sbmacdma *d, 1233 int poll) 1234{ 1235 struct net_device *dev = sc->sbm_dev; 1236 int curidx; 1237 int hwidx; 1238 struct sbdmadscr *dsc; 1239 struct sk_buff *sb; 1240 unsigned long flags; 1241 int packets_handled = 0; 1242 1243 spin_lock_irqsave(&(sc->sbm_lock), flags); 1244 1245 if (d->sbdma_remptr == d->sbdma_addptr) 1246 goto end_unlock; 1247 1248 hwidx = ((__raw_readq(d->sbdma_curdscr) & M_DMA_CURDSCR_ADDR) - 1249 d->sbdma_dscrtable_phys) / sizeof(*d->sbdma_dscrtable); 1250 1251 for (;;) { 1252 /* 1253 * figure out where we are (as an index) and where 1254 * the hardware is (also as an index) 1255 * 1256 * This could be done faster if (for example) the 1257 * descriptor table was page-aligned and contiguous in 1258 * both virtual and physical memory -- you could then 1259 * just compare the low-order bits of the virtual address 1260 * (sbdma_remptr) and the physical address (sbdma_curdscr CSR) 1261 */ 1262 1263 curidx = d->sbdma_remptr - d->sbdma_dscrtable; 1264 1265 /* 1266 * If they're the same, that means we've processed all 1267 * of the descriptors up to (but not including) the one that 1268 * the hardware is working on right now. 1269 */ 1270 1271 if (curidx == hwidx) 1272 break; 1273 1274 /* 1275 * Otherwise, get the packet's sk_buff ptr back 1276 */ 1277 1278 dsc = &(d->sbdma_dscrtable[curidx]); 1279 sb = d->sbdma_ctxtable[curidx]; 1280 d->sbdma_ctxtable[curidx] = NULL; 1281 1282 /* 1283 * Stats 1284 */ 1285 1286 dev->stats.tx_bytes += sb->len; 1287 dev->stats.tx_packets++; 1288 1289 /* 1290 * for transmits, we just free buffers. 1291 */ 1292 1293 dev_kfree_skb_irq(sb); 1294 1295 /* 1296 * .. and advance to the next buffer. 1297 */ 1298 1299 d->sbdma_remptr = SBDMA_NEXTBUF(d,sbdma_remptr); 1300 1301 packets_handled++; 1302 1303 } 1304 1305 /* 1306 * Decide if we should wake up the protocol or not. 1307 * Other drivers seem to do this when we reach a low 1308 * watermark on the transmit queue. 1309 */ 1310 1311 if (packets_handled) 1312 netif_wake_queue(d->sbdma_eth->sbm_dev); 1313 1314end_unlock: 1315 spin_unlock_irqrestore(&(sc->sbm_lock), flags); 1316 1317} 1318 1319 1320 1321/********************************************************************** 1322 * SBMAC_INITCTX(s) 1323 * 1324 * Initialize an Ethernet context structure - this is called 1325 * once per MAC on the 1250. Memory is allocated here, so don't 1326 * call it again from inside the ioctl routines that bring the 1327 * interface up/down 1328 * 1329 * Input parameters: 1330 * s - sbmac context structure 1331 * 1332 * Return value: 1333 * 0 1334 ********************************************************************* */ 1335 1336static int sbmac_initctx(struct sbmac_softc *s) 1337{ 1338 1339 /* 1340 * figure out the addresses of some ports 1341 */ 1342 1343 s->sbm_macenable = s->sbm_base + R_MAC_ENABLE; 1344 s->sbm_maccfg = s->sbm_base + R_MAC_CFG; 1345 s->sbm_fifocfg = s->sbm_base + R_MAC_THRSH_CFG; 1346 s->sbm_framecfg = s->sbm_base + R_MAC_FRAMECFG; 1347 s->sbm_rxfilter = s->sbm_base + R_MAC_ADFILTER_CFG; 1348 s->sbm_isr = s->sbm_base + R_MAC_STATUS; 1349 s->sbm_imr = s->sbm_base + R_MAC_INT_MASK; 1350 s->sbm_mdio = s->sbm_base + R_MAC_MDIO; 1351 1352 /* 1353 * Initialize the DMA channels. Right now, only one per MAC is used 1354 * Note: Only do this _once_, as it allocates memory from the kernel! 1355 */ 1356 1357 sbdma_initctx(&(s->sbm_txdma),s,0,DMA_TX,SBMAC_MAX_TXDESCR); 1358 sbdma_initctx(&(s->sbm_rxdma),s,0,DMA_RX,SBMAC_MAX_RXDESCR); 1359 1360 /* 1361 * initial state is OFF 1362 */ 1363 1364 s->sbm_state = sbmac_state_off; 1365 1366 return 0; 1367} 1368 1369 1370static void sbdma_uninitctx(struct sbmacdma *d) 1371{ 1372 if (d->sbdma_dscrtable_unaligned) { 1373 kfree(d->sbdma_dscrtable_unaligned); 1374 d->sbdma_dscrtable_unaligned = d->sbdma_dscrtable = NULL; 1375 } 1376 1377 if (d->sbdma_ctxtable) { 1378 kfree(d->sbdma_ctxtable); 1379 d->sbdma_ctxtable = NULL; 1380 } 1381} 1382 1383 1384static void sbmac_uninitctx(struct sbmac_softc *sc) 1385{ 1386 sbdma_uninitctx(&(sc->sbm_txdma)); 1387 sbdma_uninitctx(&(sc->sbm_rxdma)); 1388} 1389 1390 1391/********************************************************************** 1392 * SBMAC_CHANNEL_START(s) 1393 * 1394 * Start packet processing on this MAC. 1395 * 1396 * Input parameters: 1397 * s - sbmac structure 1398 * 1399 * Return value: 1400 * nothing 1401 ********************************************************************* */ 1402 1403static void sbmac_channel_start(struct sbmac_softc *s) 1404{ 1405 uint64_t reg; 1406 void __iomem *port; 1407 uint64_t cfg,fifo,framecfg; 1408 int idx, th_value; 1409 1410 /* 1411 * Don't do this if running 1412 */ 1413 1414 if (s->sbm_state == sbmac_state_on) 1415 return; 1416 1417 /* 1418 * Bring the controller out of reset, but leave it off. 1419 */ 1420 1421 __raw_writeq(0, s->sbm_macenable); 1422 1423 /* 1424 * Ignore all received packets 1425 */ 1426 1427 __raw_writeq(0, s->sbm_rxfilter); 1428 1429 /* 1430 * Calculate values for various control registers. 1431 */ 1432 1433 cfg = M_MAC_RETRY_EN | 1434 M_MAC_TX_HOLD_SOP_EN | 1435 V_MAC_TX_PAUSE_CNT_16K | 1436 M_MAC_AP_STAT_EN | 1437 M_MAC_FAST_SYNC | 1438 M_MAC_SS_EN | 1439 0; 1440 1441 /* 1442 * Be sure that RD_THRSH+WR_THRSH <= 32 for pass1 pars 1443 * and make sure that RD_THRSH + WR_THRSH <=128 for pass2 and above 1444 * Use a larger RD_THRSH for gigabit 1445 */ 1446 if (soc_type == K_SYS_SOC_TYPE_BCM1250 && periph_rev < 2) 1447 th_value = 28; 1448 else 1449 th_value = 64; 1450 1451 fifo = V_MAC_TX_WR_THRSH(4) | /* Must be '4' or '8' */ 1452 ((s->sbm_speed == sbmac_speed_1000) 1453 ? V_MAC_TX_RD_THRSH(th_value) : V_MAC_TX_RD_THRSH(4)) | 1454 V_MAC_TX_RL_THRSH(4) | 1455 V_MAC_RX_PL_THRSH(4) | 1456 V_MAC_RX_RD_THRSH(4) | /* Must be '4' */ 1457 V_MAC_RX_RL_THRSH(8) | 1458 0; 1459 1460 framecfg = V_MAC_MIN_FRAMESZ_DEFAULT | 1461 V_MAC_MAX_FRAMESZ_DEFAULT | 1462 V_MAC_BACKOFF_SEL(1); 1463 1464 /* 1465 * Clear out the hash address map 1466 */ 1467 1468 port = s->sbm_base + R_MAC_HASH_BASE; 1469 for (idx = 0; idx < MAC_HASH_COUNT; idx++) { 1470 __raw_writeq(0, port); 1471 port += sizeof(uint64_t); 1472 } 1473 1474 /* 1475 * Clear out the exact-match table 1476 */ 1477 1478 port = s->sbm_base + R_MAC_ADDR_BASE; 1479 for (idx = 0; idx < MAC_ADDR_COUNT; idx++) { 1480 __raw_writeq(0, port); 1481 port += sizeof(uint64_t); 1482 } 1483 1484 /* 1485 * Clear out the DMA Channel mapping table registers 1486 */ 1487 1488 port = s->sbm_base + R_MAC_CHUP0_BASE; 1489 for (idx = 0; idx < MAC_CHMAP_COUNT; idx++) { 1490 __raw_writeq(0, port); 1491 port += sizeof(uint64_t); 1492 } 1493 1494 1495 port = s->sbm_base + R_MAC_CHLO0_BASE; 1496 for (idx = 0; idx < MAC_CHMAP_COUNT; idx++) { 1497 __raw_writeq(0, port); 1498 port += sizeof(uint64_t); 1499 } 1500 1501 /* 1502 * Program the hardware address. It goes into the hardware-address 1503 * register as well as the first filter register. 1504 */ 1505 1506 reg = sbmac_addr2reg(s->sbm_hwaddr); 1507 1508 port = s->sbm_base + R_MAC_ADDR_BASE; 1509 __raw_writeq(reg, port); 1510 port = s->sbm_base + R_MAC_ETHERNET_ADDR; 1511 1512#ifdef CONFIG_SB1_PASS_1_WORKAROUNDS 1513 /* 1514 * Pass1 SOCs do not receive packets addressed to the 1515 * destination address in the R_MAC_ETHERNET_ADDR register. 1516 * Set the value to zero. 1517 */ 1518 __raw_writeq(0, port); 1519#else 1520 __raw_writeq(reg, port); 1521#endif 1522 1523 /* 1524 * Set the receive filter for no packets, and write values 1525 * to the various config registers 1526 */ 1527 1528 __raw_writeq(0, s->sbm_rxfilter); 1529 __raw_writeq(0, s->sbm_imr); 1530 __raw_writeq(framecfg, s->sbm_framecfg); 1531 __raw_writeq(fifo, s->sbm_fifocfg); 1532 __raw_writeq(cfg, s->sbm_maccfg); 1533 1534 /* 1535 * Initialize DMA channels (rings should be ok now) 1536 */ 1537 1538 sbdma_channel_start(&(s->sbm_rxdma), DMA_RX); 1539 sbdma_channel_start(&(s->sbm_txdma), DMA_TX); 1540 1541 /* 1542 * Configure the speed, duplex, and flow control 1543 */ 1544 1545 sbmac_set_speed(s,s->sbm_speed); 1546 sbmac_set_duplex(s,s->sbm_duplex,s->sbm_fc); 1547 1548 /* 1549 * Fill the receive ring 1550 */ 1551 1552 sbdma_fillring(s, &(s->sbm_rxdma)); 1553 1554 /* 1555 * Turn on the rest of the bits in the enable register 1556 */ 1557 1558#if defined(CONFIG_SIBYTE_BCM1x55) || defined(CONFIG_SIBYTE_BCM1x80) 1559 __raw_writeq(M_MAC_RXDMA_EN0 | 1560 M_MAC_TXDMA_EN0, s->sbm_macenable); 1561#elif defined(CONFIG_SIBYTE_SB1250) || defined(CONFIG_SIBYTE_BCM112X) 1562 __raw_writeq(M_MAC_RXDMA_EN0 | 1563 M_MAC_TXDMA_EN0 | 1564 M_MAC_RX_ENABLE | 1565 M_MAC_TX_ENABLE, s->sbm_macenable); 1566#else 1567#error invalid SiByte MAC configuation 1568#endif 1569 1570#ifdef CONFIG_SBMAC_COALESCE 1571 __raw_writeq(((M_MAC_INT_EOP_COUNT | M_MAC_INT_EOP_TIMER) << S_MAC_TX_CH0) | 1572 ((M_MAC_INT_EOP_COUNT | M_MAC_INT_EOP_TIMER) << S_MAC_RX_CH0), s->sbm_imr); 1573#else 1574 __raw_writeq((M_MAC_INT_CHANNEL << S_MAC_TX_CH0) | 1575 (M_MAC_INT_CHANNEL << S_MAC_RX_CH0), s->sbm_imr); 1576#endif 1577 1578 /* 1579 * Enable receiving unicasts and broadcasts 1580 */ 1581 1582 __raw_writeq(M_MAC_UCAST_EN | M_MAC_BCAST_EN, s->sbm_rxfilter); 1583 1584 /* 1585 * we're running now. 1586 */ 1587 1588 s->sbm_state = sbmac_state_on; 1589 1590 /* 1591 * Program multicast addresses 1592 */ 1593 1594 sbmac_setmulti(s); 1595 1596 /* 1597 * If channel was in promiscuous mode before, turn that on 1598 */ 1599 1600 if (s->sbm_devflags & IFF_PROMISC) { 1601 sbmac_promiscuous_mode(s,1); 1602 } 1603 1604} 1605 1606 1607/********************************************************************** 1608 * SBMAC_CHANNEL_STOP(s) 1609 * 1610 * Stop packet processing on this MAC. 1611 * 1612 * Input parameters: 1613 * s - sbmac structure 1614 * 1615 * Return value: 1616 * nothing 1617 ********************************************************************* */ 1618 1619static void sbmac_channel_stop(struct sbmac_softc *s) 1620{ 1621 /* don't do this if already stopped */ 1622 1623 if (s->sbm_state == sbmac_state_off) 1624 return; 1625 1626 /* don't accept any packets, disable all interrupts */ 1627 1628 __raw_writeq(0, s->sbm_rxfilter); 1629 __raw_writeq(0, s->sbm_imr); 1630 1631 /* Turn off ticker */ 1632 1633 1634 /* turn off receiver and transmitter */ 1635 1636 __raw_writeq(0, s->sbm_macenable); 1637 1638 /* We're stopped now. */ 1639 1640 s->sbm_state = sbmac_state_off; 1641 1642 /* 1643 * Stop DMA channels (rings should be ok now) 1644 */ 1645 1646 sbdma_channel_stop(&(s->sbm_rxdma)); 1647 sbdma_channel_stop(&(s->sbm_txdma)); 1648 1649 /* Empty the receive and transmit rings */ 1650 1651 sbdma_emptyring(&(s->sbm_rxdma)); 1652 sbdma_emptyring(&(s->sbm_txdma)); 1653 1654} 1655 1656/********************************************************************** 1657 * SBMAC_SET_CHANNEL_STATE(state) 1658 * 1659 * Set the channel's state ON or OFF 1660 * 1661 * Input parameters: 1662 * state - new state 1663 * 1664 * Return value: 1665 * old state 1666 ********************************************************************* */ 1667static enum sbmac_state sbmac_set_channel_state(struct sbmac_softc *sc, 1668 enum sbmac_state state) 1669{ 1670 enum sbmac_state oldstate = sc->sbm_state; 1671 1672 /* 1673 * If same as previous state, return 1674 */ 1675 1676 if (state == oldstate) { 1677 return oldstate; 1678 } 1679 1680 /* 1681 * If new state is ON, turn channel on 1682 */ 1683 1684 if (state == sbmac_state_on) { 1685 sbmac_channel_start(sc); 1686 } 1687 else { 1688 sbmac_channel_stop(sc); 1689 } 1690 1691 /* 1692 * Return previous state 1693 */ 1694 1695 return oldstate; 1696} 1697 1698 1699/********************************************************************** 1700 * SBMAC_PROMISCUOUS_MODE(sc,onoff) 1701 * 1702 * Turn on or off promiscuous mode 1703 * 1704 * Input parameters: 1705 * sc - softc 1706 * onoff - 1 to turn on, 0 to turn off 1707 * 1708 * Return value: 1709 * nothing 1710 ********************************************************************* */ 1711 1712static void sbmac_promiscuous_mode(struct sbmac_softc *sc,int onoff) 1713{ 1714 uint64_t reg; 1715 1716 if (sc->sbm_state != sbmac_state_on) 1717 return; 1718 1719 if (onoff) { 1720 reg = __raw_readq(sc->sbm_rxfilter); 1721 reg |= M_MAC_ALLPKT_EN; 1722 __raw_writeq(reg, sc->sbm_rxfilter); 1723 } 1724 else { 1725 reg = __raw_readq(sc->sbm_rxfilter); 1726 reg &= ~M_MAC_ALLPKT_EN; 1727 __raw_writeq(reg, sc->sbm_rxfilter); 1728 } 1729} 1730 1731/********************************************************************** 1732 * SBMAC_SETIPHDR_OFFSET(sc,onoff) 1733 * 1734 * Set the iphdr offset as 15 assuming ethernet encapsulation 1735 * 1736 * Input parameters: 1737 * sc - softc 1738 * 1739 * Return value: 1740 * nothing 1741 ********************************************************************* */ 1742 1743static void sbmac_set_iphdr_offset(struct sbmac_softc *sc) 1744{ 1745 uint64_t reg; 1746 1747 /* Hard code the off set to 15 for now */ 1748 reg = __raw_readq(sc->sbm_rxfilter); 1749 reg &= ~M_MAC_IPHDR_OFFSET | V_MAC_IPHDR_OFFSET(15); 1750 __raw_writeq(reg, sc->sbm_rxfilter); 1751 1752 /* BCM1250 pass1 didn't have hardware checksum. Everything 1753 later does. */ 1754 if (soc_type == K_SYS_SOC_TYPE_BCM1250 && periph_rev < 2) { 1755 sc->rx_hw_checksum = DISABLE; 1756 } else { 1757 sc->rx_hw_checksum = ENABLE; 1758 } 1759} 1760 1761 1762/********************************************************************** 1763 * SBMAC_ADDR2REG(ptr) 1764 * 1765 * Convert six bytes into the 64-bit register value that 1766 * we typically write into the SBMAC's address/mcast registers 1767 * 1768 * Input parameters: 1769 * ptr - pointer to 6 bytes 1770 * 1771 * Return value: 1772 * register value 1773 ********************************************************************* */ 1774 1775static uint64_t sbmac_addr2reg(unsigned char *ptr) 1776{ 1777 uint64_t reg = 0; 1778 1779 ptr += 6; 1780 1781 reg |= (uint64_t) *(--ptr); 1782 reg <<= 8; 1783 reg |= (uint64_t) *(--ptr); 1784 reg <<= 8; 1785 reg |= (uint64_t) *(--ptr); 1786 reg <<= 8; 1787 reg |= (uint64_t) *(--ptr); 1788 reg <<= 8; 1789 reg |= (uint64_t) *(--ptr); 1790 reg <<= 8; 1791 reg |= (uint64_t) *(--ptr); 1792 1793 return reg; 1794} 1795 1796 1797/********************************************************************** 1798 * SBMAC_SET_SPEED(s,speed) 1799 * 1800 * Configure LAN speed for the specified MAC. 1801 * Warning: must be called when MAC is off! 1802 * 1803 * Input parameters: 1804 * s - sbmac structure 1805 * speed - speed to set MAC to (see enum sbmac_speed) 1806 * 1807 * Return value: 1808 * 1 if successful 1809 * 0 indicates invalid parameters 1810 ********************************************************************* */ 1811 1812static int sbmac_set_speed(struct sbmac_softc *s, enum sbmac_speed speed) 1813{ 1814 uint64_t cfg; 1815 uint64_t framecfg; 1816 1817 /* 1818 * Save new current values 1819 */ 1820 1821 s->sbm_speed = speed; 1822 1823 if (s->sbm_state == sbmac_state_on) 1824 return 0; /* save for next restart */ 1825 1826 /* 1827 * Read current register values 1828 */ 1829 1830 cfg = __raw_readq(s->sbm_maccfg); 1831 framecfg = __raw_readq(s->sbm_framecfg); 1832 1833 /* 1834 * Mask out the stuff we want to change 1835 */ 1836 1837 cfg &= ~(M_MAC_BURST_EN | M_MAC_SPEED_SEL); 1838 framecfg &= ~(M_MAC_IFG_RX | M_MAC_IFG_TX | M_MAC_IFG_THRSH | 1839 M_MAC_SLOT_SIZE); 1840 1841 /* 1842 * Now add in the new bits 1843 */ 1844 1845 switch (speed) { 1846 case sbmac_speed_10: 1847 framecfg |= V_MAC_IFG_RX_10 | 1848 V_MAC_IFG_TX_10 | 1849 K_MAC_IFG_THRSH_10 | 1850 V_MAC_SLOT_SIZE_10; 1851 cfg |= V_MAC_SPEED_SEL_10MBPS; 1852 break; 1853 1854 case sbmac_speed_100: 1855 framecfg |= V_MAC_IFG_RX_100 | 1856 V_MAC_IFG_TX_100 | 1857 V_MAC_IFG_THRSH_100 | 1858 V_MAC_SLOT_SIZE_100; 1859 cfg |= V_MAC_SPEED_SEL_100MBPS ; 1860 break; 1861 1862 case sbmac_speed_1000: 1863 framecfg |= V_MAC_IFG_RX_1000 | 1864 V_MAC_IFG_TX_1000 | 1865 V_MAC_IFG_THRSH_1000 | 1866 V_MAC_SLOT_SIZE_1000; 1867 cfg |= V_MAC_SPEED_SEL_1000MBPS | M_MAC_BURST_EN; 1868 break; 1869 1870 default: 1871 return 0; 1872 } 1873 1874 /* 1875 * Send the bits back to the hardware 1876 */ 1877 1878 __raw_writeq(framecfg, s->sbm_framecfg); 1879 __raw_writeq(cfg, s->sbm_maccfg); 1880 1881 return 1; 1882} 1883 1884/********************************************************************** 1885 * SBMAC_SET_DUPLEX(s,duplex,fc) 1886 * 1887 * Set Ethernet duplex and flow control options for this MAC 1888 * Warning: must be called when MAC is off! 1889 * 1890 * Input parameters: 1891 * s - sbmac structure 1892 * duplex - duplex setting (see enum sbmac_duplex) 1893 * fc - flow control setting (see enum sbmac_fc) 1894 * 1895 * Return value: 1896 * 1 if ok 1897 * 0 if an invalid parameter combination was specified 1898 ********************************************************************* */ 1899 1900static int sbmac_set_duplex(struct sbmac_softc *s, enum sbmac_duplex duplex, 1901 enum sbmac_fc fc) 1902{ 1903 uint64_t cfg; 1904 1905 /* 1906 * Save new current values 1907 */ 1908 1909 s->sbm_duplex = duplex; 1910 s->sbm_fc = fc; 1911 1912 if (s->sbm_state == sbmac_state_on) 1913 return 0; /* save for next restart */ 1914 1915 /* 1916 * Read current register values 1917 */ 1918 1919 cfg = __raw_readq(s->sbm_maccfg); 1920 1921 /* 1922 * Mask off the stuff we're about to change 1923 */ 1924 1925 cfg &= ~(M_MAC_FC_SEL | M_MAC_FC_CMD | M_MAC_HDX_EN); 1926 1927 1928 switch (duplex) { 1929 case sbmac_duplex_half: 1930 switch (fc) { 1931 case sbmac_fc_disabled: 1932 cfg |= M_MAC_HDX_EN | V_MAC_FC_CMD_DISABLED; 1933 break; 1934 1935 case sbmac_fc_collision: 1936 cfg |= M_MAC_HDX_EN | V_MAC_FC_CMD_ENABLED; 1937 break; 1938 1939 case sbmac_fc_carrier: 1940 cfg |= M_MAC_HDX_EN | V_MAC_FC_CMD_ENAB_FALSECARR; 1941 break; 1942 1943 case sbmac_fc_frame: /* not valid in half duplex */ 1944 default: /* invalid selection */ 1945 return 0; 1946 } 1947 break; 1948 1949 case sbmac_duplex_full: 1950 switch (fc) { 1951 case sbmac_fc_disabled: 1952 cfg |= V_MAC_FC_CMD_DISABLED; 1953 break; 1954 1955 case sbmac_fc_frame: 1956 cfg |= V_MAC_FC_CMD_ENABLED; 1957 break; 1958 1959 case sbmac_fc_collision: /* not valid in full duplex */ 1960 case sbmac_fc_carrier: /* not valid in full duplex */ 1961 default: 1962 return 0; 1963 } 1964 break; 1965 default: 1966 return 0; 1967 } 1968 1969 /* 1970 * Send the bits back to the hardware 1971 */ 1972 1973 __raw_writeq(cfg, s->sbm_maccfg); 1974 1975 return 1; 1976} 1977 1978 1979 1980 1981/********************************************************************** 1982 * SBMAC_INTR() 1983 * 1984 * Interrupt handler for MAC interrupts 1985 * 1986 * Input parameters: 1987 * MAC structure 1988 * 1989 * Return value: 1990 * nothing 1991 ********************************************************************* */ 1992static irqreturn_t sbmac_intr(int irq,void *dev_instance) 1993{ 1994 struct net_device *dev = (struct net_device *) dev_instance; 1995 struct sbmac_softc *sc = netdev_priv(dev); 1996 uint64_t isr; 1997 int handled = 0; 1998 1999 /* 2000 * Read the ISR (this clears the bits in the real 2001 * register, except for counter addr) 2002 */ 2003 2004 isr = __raw_readq(sc->sbm_isr) & ~M_MAC_COUNTER_ADDR; 2005 2006 if (isr == 0) 2007 return IRQ_RETVAL(0); 2008 handled = 1; 2009 2010 /* 2011 * Transmits on channel 0 2012 */ 2013 2014 if (isr & (M_MAC_INT_CHANNEL << S_MAC_TX_CH0)) 2015 sbdma_tx_process(sc,&(sc->sbm_txdma), 0); 2016 2017 if (isr & (M_MAC_INT_CHANNEL << S_MAC_RX_CH0)) { 2018 if (napi_schedule_prep(&sc->napi)) { 2019 __raw_writeq(0, sc->sbm_imr); 2020 __napi_schedule(&sc->napi); 2021 /* Depend on the exit from poll to reenable intr */ 2022 } 2023 else { 2024 /* may leave some packets behind */ 2025 sbdma_rx_process(sc,&(sc->sbm_rxdma), 2026 SBMAC_MAX_RXDESCR * 2, 0); 2027 } 2028 } 2029 return IRQ_RETVAL(handled); 2030} 2031 2032/********************************************************************** 2033 * SBMAC_START_TX(skb,dev) 2034 * 2035 * Start output on the specified interface. Basically, we 2036 * queue as many buffers as we can until the ring fills up, or 2037 * we run off the end of the queue, whichever comes first. 2038 * 2039 * Input parameters: 2040 * 2041 * 2042 * Return value: 2043 * nothing 2044 ********************************************************************* */ 2045static int sbmac_start_tx(struct sk_buff *skb, struct net_device *dev) 2046{ 2047 struct sbmac_softc *sc = netdev_priv(dev); 2048 unsigned long flags; 2049 2050 /* lock eth irq */ 2051 spin_lock_irqsave(&sc->sbm_lock, flags); 2052 2053 /* 2054 * Put the buffer on the transmit ring. If we 2055 * don't have room, stop the queue. 2056 */ 2057 2058 if (sbdma_add_txbuffer(&(sc->sbm_txdma),skb)) { 2059 netif_stop_queue(dev); 2060 spin_unlock_irqrestore(&sc->sbm_lock, flags); 2061 2062 return NETDEV_TX_BUSY; 2063 } 2064 2065 spin_unlock_irqrestore(&sc->sbm_lock, flags); 2066 2067 return NETDEV_TX_OK; 2068} 2069 2070/********************************************************************** 2071 * SBMAC_SETMULTI(sc) 2072 * 2073 * Reprogram the multicast table into the hardware, given 2074 * the list of multicasts associated with the interface 2075 * structure. 2076 * 2077 * Input parameters: 2078 * sc - softc 2079 * 2080 * Return value: 2081 * nothing 2082 ********************************************************************* */ 2083 2084static void sbmac_setmulti(struct sbmac_softc *sc) 2085{ 2086 uint64_t reg; 2087 void __iomem *port; 2088 int idx; 2089 struct netdev_hw_addr *ha; 2090 struct net_device *dev = sc->sbm_dev; 2091 2092 /* 2093 * Clear out entire multicast table. We do this by nuking 2094 * the entire hash table and all the direct matches except 2095 * the first one, which is used for our station address 2096 */ 2097 2098 for (idx = 1; idx < MAC_ADDR_COUNT; idx++) { 2099 port = sc->sbm_base + R_MAC_ADDR_BASE+(idx*sizeof(uint64_t)); 2100 __raw_writeq(0, port); 2101 } 2102 2103 for (idx = 0; idx < MAC_HASH_COUNT; idx++) { 2104 port = sc->sbm_base + R_MAC_HASH_BASE+(idx*sizeof(uint64_t)); 2105 __raw_writeq(0, port); 2106 } 2107 2108 /* 2109 * Clear the filter to say we don't want any multicasts. 2110 */ 2111 2112 reg = __raw_readq(sc->sbm_rxfilter); 2113 reg &= ~(M_MAC_MCAST_INV | M_MAC_MCAST_EN); 2114 __raw_writeq(reg, sc->sbm_rxfilter); 2115 2116 if (dev->flags & IFF_ALLMULTI) { 2117 /* 2118 * Enable ALL multicasts. Do this by inverting the 2119 * multicast enable bit. 2120 */ 2121 reg = __raw_readq(sc->sbm_rxfilter); 2122 reg |= (M_MAC_MCAST_INV | M_MAC_MCAST_EN); 2123 __raw_writeq(reg, sc->sbm_rxfilter); 2124 return; 2125 } 2126 2127 2128 /* 2129 * Progam new multicast entries. For now, only use the 2130 * perfect filter. In the future we'll need to use the 2131 * hash filter if the perfect filter overflows 2132 */ 2133 2134 2135 idx = 1; /* skip station address */ 2136 netdev_for_each_mc_addr(ha, dev) { 2137 if (idx == MAC_ADDR_COUNT) 2138 break; 2139 reg = sbmac_addr2reg(ha->addr); 2140 port = sc->sbm_base + R_MAC_ADDR_BASE+(idx * sizeof(uint64_t)); 2141 __raw_writeq(reg, port); 2142 idx++; 2143 } 2144 2145 /* 2146 * Enable the "accept multicast bits" if we programmed at least one 2147 * multicast. 2148 */ 2149 2150 if (idx > 1) { 2151 reg = __raw_readq(sc->sbm_rxfilter); 2152 reg |= M_MAC_MCAST_EN; 2153 __raw_writeq(reg, sc->sbm_rxfilter); 2154 } 2155} 2156 2157static int sb1250_change_mtu(struct net_device *_dev, int new_mtu) 2158{ 2159 if (new_mtu > ENET_PACKET_SIZE) 2160 return -EINVAL; 2161 _dev->mtu = new_mtu; 2162 pr_info("changing the mtu to %d\n", new_mtu); 2163 return 0; 2164} 2165 2166static const struct net_device_ops sbmac_netdev_ops = { 2167 .ndo_open = sbmac_open, 2168 .ndo_stop = sbmac_close, 2169 .ndo_start_xmit = sbmac_start_tx, 2170 .ndo_set_multicast_list = sbmac_set_rx_mode, 2171 .ndo_tx_timeout = sbmac_tx_timeout, 2172 .ndo_do_ioctl = sbmac_mii_ioctl, 2173 .ndo_change_mtu = sb1250_change_mtu, 2174 .ndo_validate_addr = eth_validate_addr, 2175 .ndo_set_mac_address = eth_mac_addr, 2176#ifdef CONFIG_NET_POLL_CONTROLLER 2177 .ndo_poll_controller = sbmac_netpoll, 2178#endif 2179}; 2180 2181/********************************************************************** 2182 * SBMAC_INIT(dev) 2183 * 2184 * Attach routine - init hardware and hook ourselves into linux 2185 * 2186 * Input parameters: 2187 * dev - net_device structure 2188 * 2189 * Return value: 2190 * status 2191 ********************************************************************* */ 2192 2193static int sbmac_init(struct platform_device *pldev, long long base) 2194{ 2195 struct net_device *dev = dev_get_drvdata(&pldev->dev); 2196 int idx = pldev->id; 2197 struct sbmac_softc *sc = netdev_priv(dev); 2198 unsigned char *eaddr; 2199 uint64_t ea_reg; 2200 int i; 2201 int err; 2202 2203 sc->sbm_dev = dev; 2204 sc->sbe_idx = idx; 2205 2206 eaddr = sc->sbm_hwaddr; 2207 2208 /* 2209 * Read the ethernet address. The firmware left this programmed 2210 * for us in the ethernet address register for each mac. 2211 */ 2212 2213 ea_reg = __raw_readq(sc->sbm_base + R_MAC_ETHERNET_ADDR); 2214 __raw_writeq(0, sc->sbm_base + R_MAC_ETHERNET_ADDR); 2215 for (i = 0; i < 6; i++) { 2216 eaddr[i] = (uint8_t) (ea_reg & 0xFF); 2217 ea_reg >>= 8; 2218 } 2219 2220 for (i = 0; i < 6; i++) { 2221 dev->dev_addr[i] = eaddr[i]; 2222 } 2223 2224 /* 2225 * Initialize context (get pointers to registers and stuff), then 2226 * allocate the memory for the descriptor tables. 2227 */ 2228 2229 sbmac_initctx(sc); 2230 2231 /* 2232 * Set up Linux device callins 2233 */ 2234 2235 spin_lock_init(&(sc->sbm_lock)); 2236 2237 dev->netdev_ops = &sbmac_netdev_ops; 2238 dev->watchdog_timeo = TX_TIMEOUT; 2239 2240 netif_napi_add(dev, &sc->napi, sbmac_poll, 16); 2241 2242 dev->irq = UNIT_INT(idx); 2243 2244 /* This is needed for PASS2 for Rx H/W checksum feature */ 2245 sbmac_set_iphdr_offset(sc); 2246 2247 sc->mii_bus = mdiobus_alloc(); 2248 if (sc->mii_bus == NULL) { 2249 err = -ENOMEM; 2250 goto uninit_ctx; 2251 } 2252 2253 sc->mii_bus->name = sbmac_mdio_string; 2254 snprintf(sc->mii_bus->id, MII_BUS_ID_SIZE, "%x", idx); 2255 sc->mii_bus->priv = sc; 2256 sc->mii_bus->read = sbmac_mii_read; 2257 sc->mii_bus->write = sbmac_mii_write; 2258 sc->mii_bus->irq = sc->phy_irq; 2259 for (i = 0; i < PHY_MAX_ADDR; ++i) 2260 sc->mii_bus->irq[i] = SBMAC_PHY_INT; 2261 2262 sc->mii_bus->parent = &pldev->dev; 2263 /* 2264 * Probe PHY address 2265 */ 2266 err = mdiobus_register(sc->mii_bus); 2267 if (err) { 2268 printk(KERN_ERR "%s: unable to register MDIO bus\n", 2269 dev->name); 2270 goto free_mdio; 2271 } 2272 dev_set_drvdata(&pldev->dev, sc->mii_bus); 2273 2274 err = register_netdev(dev); 2275 if (err) { 2276 printk(KERN_ERR "%s.%d: unable to register netdev\n", 2277 sbmac_string, idx); 2278 goto unreg_mdio; 2279 } 2280 2281 pr_info("%s.%d: registered as %s\n", sbmac_string, idx, dev->name); 2282 2283 if (sc->rx_hw_checksum == ENABLE) 2284 pr_info("%s: enabling TCP rcv checksum\n", dev->name); 2285 2286 /* 2287 * Display Ethernet address (this is called during the config 2288 * process so we need to finish off the config message that 2289 * was being displayed) 2290 */ 2291 pr_info("%s: SiByte Ethernet at 0x%08Lx, address: %pM\n", 2292 dev->name, base, eaddr); 2293 2294 return 0; 2295unreg_mdio: 2296 mdiobus_unregister(sc->mii_bus); 2297 dev_set_drvdata(&pldev->dev, NULL); 2298free_mdio: 2299 mdiobus_free(sc->mii_bus); 2300uninit_ctx: 2301 sbmac_uninitctx(sc); 2302 return err; 2303} 2304 2305 2306static int sbmac_open(struct net_device *dev) 2307{ 2308 struct sbmac_softc *sc = netdev_priv(dev); 2309 int err; 2310 2311 if (debug > 1) 2312 pr_debug("%s: sbmac_open() irq %d.\n", dev->name, dev->irq); 2313 2314 /* 2315 * map/route interrupt (clear status first, in case something 2316 * weird is pending; we haven't initialized the mac registers 2317 * yet) 2318 */ 2319 2320 __raw_readq(sc->sbm_isr); 2321 err = request_irq(dev->irq, sbmac_intr, IRQF_SHARED, dev->name, dev); 2322 if (err) { 2323 printk(KERN_ERR "%s: unable to get IRQ %d\n", dev->name, 2324 dev->irq); 2325 goto out_err; 2326 } 2327 2328 sc->sbm_speed = sbmac_speed_none; 2329 sc->sbm_duplex = sbmac_duplex_none; 2330 sc->sbm_fc = sbmac_fc_none; 2331 sc->sbm_pause = -1; 2332 sc->sbm_link = 0; 2333 2334 /* 2335 * Attach to the PHY 2336 */ 2337 err = sbmac_mii_probe(dev); 2338 if (err) 2339 goto out_unregister; 2340 2341 /* 2342 * Turn on the channel 2343 */ 2344 2345 sbmac_set_channel_state(sc,sbmac_state_on); 2346 2347 netif_start_queue(dev); 2348 2349 sbmac_set_rx_mode(dev); 2350 2351 phy_start(sc->phy_dev); 2352 2353 napi_enable(&sc->napi); 2354 2355 return 0; 2356 2357out_unregister: 2358 free_irq(dev->irq, dev); 2359out_err: 2360 return err; 2361} 2362 2363static int sbmac_mii_probe(struct net_device *dev) 2364{ 2365 struct sbmac_softc *sc = netdev_priv(dev); 2366 struct phy_device *phy_dev; 2367 int i; 2368 2369 for (i = 0; i < PHY_MAX_ADDR; i++) { 2370 phy_dev = sc->mii_bus->phy_map[i]; 2371 if (phy_dev) 2372 break; 2373 } 2374 if (!phy_dev) { 2375 printk(KERN_ERR "%s: no PHY found\n", dev->name); 2376 return -ENXIO; 2377 } 2378 2379 phy_dev = phy_connect(dev, dev_name(&phy_dev->dev), &sbmac_mii_poll, 0, 2380 PHY_INTERFACE_MODE_GMII); 2381 if (IS_ERR(phy_dev)) { 2382 printk(KERN_ERR "%s: could not attach to PHY\n", dev->name); 2383 return PTR_ERR(phy_dev); 2384 } 2385 2386 /* Remove any features not supported by the controller */ 2387 phy_dev->supported &= SUPPORTED_10baseT_Half | 2388 SUPPORTED_10baseT_Full | 2389 SUPPORTED_100baseT_Half | 2390 SUPPORTED_100baseT_Full | 2391 SUPPORTED_1000baseT_Half | 2392 SUPPORTED_1000baseT_Full | 2393 SUPPORTED_Autoneg | 2394 SUPPORTED_MII | 2395 SUPPORTED_Pause | 2396 SUPPORTED_Asym_Pause; 2397 phy_dev->advertising = phy_dev->supported; 2398 2399 pr_info("%s: attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n", 2400 dev->name, phy_dev->drv->name, 2401 dev_name(&phy_dev->dev), phy_dev->irq); 2402 2403 sc->phy_dev = phy_dev; 2404 2405 return 0; 2406} 2407 2408 2409static void sbmac_mii_poll(struct net_device *dev) 2410{ 2411 struct sbmac_softc *sc = netdev_priv(dev); 2412 struct phy_device *phy_dev = sc->phy_dev; 2413 unsigned long flags; 2414 enum sbmac_fc fc; 2415 int link_chg, speed_chg, duplex_chg, pause_chg, fc_chg; 2416 2417 link_chg = (sc->sbm_link != phy_dev->link); 2418 speed_chg = (sc->sbm_speed != phy_dev->speed); 2419 duplex_chg = (sc->sbm_duplex != phy_dev->duplex); 2420 pause_chg = (sc->sbm_pause != phy_dev->pause); 2421 2422 if (!link_chg && !speed_chg && !duplex_chg && !pause_chg) 2423 return; /* Hmmm... */ 2424 2425 if (!phy_dev->link) { 2426 if (link_chg) { 2427 sc->sbm_link = phy_dev->link; 2428 sc->sbm_speed = sbmac_speed_none; 2429 sc->sbm_duplex = sbmac_duplex_none; 2430 sc->sbm_fc = sbmac_fc_disabled; 2431 sc->sbm_pause = -1; 2432 pr_info("%s: link unavailable\n", dev->name); 2433 } 2434 return; 2435 } 2436 2437 if (phy_dev->duplex == DUPLEX_FULL) { 2438 if (phy_dev->pause) 2439 fc = sbmac_fc_frame; 2440 else 2441 fc = sbmac_fc_disabled; 2442 } else 2443 fc = sbmac_fc_collision; 2444 fc_chg = (sc->sbm_fc != fc); 2445 2446 pr_info("%s: link available: %dbase-%cD\n", dev->name, phy_dev->speed, 2447 phy_dev->duplex == DUPLEX_FULL ? 'F' : 'H'); 2448 2449 spin_lock_irqsave(&sc->sbm_lock, flags); 2450 2451 sc->sbm_speed = phy_dev->speed; 2452 sc->sbm_duplex = phy_dev->duplex; 2453 sc->sbm_fc = fc; 2454 sc->sbm_pause = phy_dev->pause; 2455 sc->sbm_link = phy_dev->link; 2456 2457 if ((speed_chg || duplex_chg || fc_chg) && 2458 sc->sbm_state != sbmac_state_off) { 2459 /* 2460 * something changed, restart the channel 2461 */ 2462 if (debug > 1) 2463 pr_debug("%s: restarting channel " 2464 "because PHY state changed\n", dev->name); 2465 sbmac_channel_stop(sc); 2466 sbmac_channel_start(sc); 2467 } 2468 2469 spin_unlock_irqrestore(&sc->sbm_lock, flags); 2470} 2471 2472 2473static void sbmac_tx_timeout (struct net_device *dev) 2474{ 2475 struct sbmac_softc *sc = netdev_priv(dev); 2476 unsigned long flags; 2477 2478 spin_lock_irqsave(&sc->sbm_lock, flags); 2479 2480 2481 dev->trans_start = jiffies; /* prevent tx timeout */ 2482 dev->stats.tx_errors++; 2483 2484 spin_unlock_irqrestore(&sc->sbm_lock, flags); 2485 2486 printk (KERN_WARNING "%s: Transmit timed out\n",dev->name); 2487} 2488 2489 2490 2491 2492static void sbmac_set_rx_mode(struct net_device *dev) 2493{ 2494 unsigned long flags; 2495 struct sbmac_softc *sc = netdev_priv(dev); 2496 2497 spin_lock_irqsave(&sc->sbm_lock, flags); 2498 if ((dev->flags ^ sc->sbm_devflags) & IFF_PROMISC) { 2499 /* 2500 * Promiscuous changed. 2501 */ 2502 2503 if (dev->flags & IFF_PROMISC) { 2504 sbmac_promiscuous_mode(sc,1); 2505 } 2506 else { 2507 sbmac_promiscuous_mode(sc,0); 2508 } 2509 } 2510 spin_unlock_irqrestore(&sc->sbm_lock, flags); 2511 2512 /* 2513 * Program the multicasts. Do this every time. 2514 */ 2515 2516 sbmac_setmulti(sc); 2517 2518} 2519 2520static int sbmac_mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 2521{ 2522 struct sbmac_softc *sc = netdev_priv(dev); 2523 2524 if (!netif_running(dev) || !sc->phy_dev) 2525 return -EINVAL; 2526 2527 return phy_mii_ioctl(sc->phy_dev, rq, cmd); 2528} 2529 2530static int sbmac_close(struct net_device *dev) 2531{ 2532 struct sbmac_softc *sc = netdev_priv(dev); 2533 2534 napi_disable(&sc->napi); 2535 2536 phy_stop(sc->phy_dev); 2537 2538 sbmac_set_channel_state(sc, sbmac_state_off); 2539 2540 netif_stop_queue(dev); 2541 2542 if (debug > 1) 2543 pr_debug("%s: Shutting down ethercard\n", dev->name); 2544 2545 phy_disconnect(sc->phy_dev); 2546 sc->phy_dev = NULL; 2547 free_irq(dev->irq, dev); 2548 2549 sbdma_emptyring(&(sc->sbm_txdma)); 2550 sbdma_emptyring(&(sc->sbm_rxdma)); 2551 2552 return 0; 2553} 2554 2555static int sbmac_poll(struct napi_struct *napi, int budget) 2556{ 2557 struct sbmac_softc *sc = container_of(napi, struct sbmac_softc, napi); 2558 int work_done; 2559 2560 work_done = sbdma_rx_process(sc, &(sc->sbm_rxdma), budget, 1); 2561 sbdma_tx_process(sc, &(sc->sbm_txdma), 1); 2562 2563 if (work_done < budget) { 2564 napi_complete(napi); 2565 2566#ifdef CONFIG_SBMAC_COALESCE 2567 __raw_writeq(((M_MAC_INT_EOP_COUNT | M_MAC_INT_EOP_TIMER) << S_MAC_TX_CH0) | 2568 ((M_MAC_INT_EOP_COUNT | M_MAC_INT_EOP_TIMER) << S_MAC_RX_CH0), 2569 sc->sbm_imr); 2570#else 2571 __raw_writeq((M_MAC_INT_CHANNEL << S_MAC_TX_CH0) | 2572 (M_MAC_INT_CHANNEL << S_MAC_RX_CH0), sc->sbm_imr); 2573#endif 2574 } 2575 2576 return work_done; 2577} 2578 2579 2580static int __devinit sbmac_probe(struct platform_device *pldev) 2581{ 2582 struct net_device *dev; 2583 struct sbmac_softc *sc; 2584 void __iomem *sbm_base; 2585 struct resource *res; 2586 u64 sbmac_orig_hwaddr; 2587 int err; 2588 2589 res = platform_get_resource(pldev, IORESOURCE_MEM, 0); 2590 BUG_ON(!res); 2591 sbm_base = ioremap_nocache(res->start, res->end - res->start + 1); 2592 if (!sbm_base) { 2593 printk(KERN_ERR "%s: unable to map device registers\n", 2594 dev_name(&pldev->dev)); 2595 err = -ENOMEM; 2596 goto out_out; 2597 } 2598 2599 /* 2600 * The R_MAC_ETHERNET_ADDR register will be set to some nonzero 2601 * value for us by the firmware if we're going to use this MAC. 2602 * If we find a zero, skip this MAC. 2603 */ 2604 sbmac_orig_hwaddr = __raw_readq(sbm_base + R_MAC_ETHERNET_ADDR); 2605 pr_debug("%s: %sconfiguring MAC at 0x%08Lx\n", dev_name(&pldev->dev), 2606 sbmac_orig_hwaddr ? "" : "not ", (long long)res->start); 2607 if (sbmac_orig_hwaddr == 0) { 2608 err = 0; 2609 goto out_unmap; 2610 } 2611 2612 /* 2613 * Okay, cool. Initialize this MAC. 2614 */ 2615 dev = alloc_etherdev(sizeof(struct sbmac_softc)); 2616 if (!dev) { 2617 printk(KERN_ERR "%s: unable to allocate etherdev\n", 2618 dev_name(&pldev->dev)); 2619 err = -ENOMEM; 2620 goto out_unmap; 2621 } 2622 2623 dev_set_drvdata(&pldev->dev, dev); 2624 SET_NETDEV_DEV(dev, &pldev->dev); 2625 2626 sc = netdev_priv(dev); 2627 sc->sbm_base = sbm_base; 2628 2629 err = sbmac_init(pldev, res->start); 2630 if (err) 2631 goto out_kfree; 2632 2633 return 0; 2634 2635out_kfree: 2636 free_netdev(dev); 2637 __raw_writeq(sbmac_orig_hwaddr, sbm_base + R_MAC_ETHERNET_ADDR); 2638 2639out_unmap: 2640 iounmap(sbm_base); 2641 2642out_out: 2643 return err; 2644} 2645 2646static int __exit sbmac_remove(struct platform_device *pldev) 2647{ 2648 struct net_device *dev = dev_get_drvdata(&pldev->dev); 2649 struct sbmac_softc *sc = netdev_priv(dev); 2650 2651 unregister_netdev(dev); 2652 sbmac_uninitctx(sc); 2653 mdiobus_unregister(sc->mii_bus); 2654 mdiobus_free(sc->mii_bus); 2655 iounmap(sc->sbm_base); 2656 free_netdev(dev); 2657 2658 return 0; 2659} 2660 2661static struct platform_driver sbmac_driver = { 2662 .probe = sbmac_probe, 2663 .remove = __exit_p(sbmac_remove), 2664 .driver = { 2665 .name = sbmac_string, 2666 .owner = THIS_MODULE, 2667 }, 2668}; 2669 2670static int __init sbmac_init_module(void) 2671{ 2672 return platform_driver_register(&sbmac_driver); 2673} 2674 2675static void __exit sbmac_cleanup_module(void) 2676{ 2677 platform_driver_unregister(&sbmac_driver); 2678} 2679 2680module_init(sbmac_init_module); 2681module_exit(sbmac_cleanup_module); 2682