1/* sunqe.c: Sparc QuadEthernet 10baseT SBUS card driver. 2 * Once again I am out to prove that every ethernet 3 * controller out there can be most efficiently programmed 4 * if you make it look like a LANCE. 5 * 6 * Copyright (C) 1996, 1999, 2003, 2006, 2008 David S. Miller (davem@davemloft.net) 7 */ 8 9#include <linux/module.h> 10#include <linux/kernel.h> 11#include <linux/types.h> 12#include <linux/errno.h> 13#include <linux/fcntl.h> 14#include <linux/interrupt.h> 15#include <linux/ioport.h> 16#include <linux/in.h> 17#include <linux/slab.h> 18#include <linux/string.h> 19#include <linux/delay.h> 20#include <linux/init.h> 21#include <linux/crc32.h> 22#include <linux/netdevice.h> 23#include <linux/etherdevice.h> 24#include <linux/skbuff.h> 25#include <linux/ethtool.h> 26#include <linux/bitops.h> 27#include <linux/dma-mapping.h> 28#include <linux/of.h> 29#include <linux/of_device.h> 30 31#include <asm/system.h> 32#include <asm/io.h> 33#include <asm/dma.h> 34#include <asm/byteorder.h> 35#include <asm/idprom.h> 36#include <asm/openprom.h> 37#include <asm/oplib.h> 38#include <asm/auxio.h> 39#include <asm/pgtable.h> 40#include <asm/irq.h> 41 42#include "sunqe.h" 43 44#define DRV_NAME "sunqe" 45#define DRV_VERSION "4.1" 46#define DRV_RELDATE "August 27, 2008" 47#define DRV_AUTHOR "David S. Miller (davem@davemloft.net)" 48 49static char version[] = 50 DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " " DRV_AUTHOR "\n"; 51 52MODULE_VERSION(DRV_VERSION); 53MODULE_AUTHOR(DRV_AUTHOR); 54MODULE_DESCRIPTION("Sun QuadEthernet 10baseT SBUS card driver"); 55MODULE_LICENSE("GPL"); 56 57static struct sunqec *root_qec_dev; 58 59static void qe_set_multicast(struct net_device *dev); 60 61#define QEC_RESET_TRIES 200 62 63static inline int qec_global_reset(void __iomem *gregs) 64{ 65 int tries = QEC_RESET_TRIES; 66 67 sbus_writel(GLOB_CTRL_RESET, gregs + GLOB_CTRL); 68 while (--tries) { 69 u32 tmp = sbus_readl(gregs + GLOB_CTRL); 70 if (tmp & GLOB_CTRL_RESET) { 71 udelay(20); 72 continue; 73 } 74 break; 75 } 76 if (tries) 77 return 0; 78 printk(KERN_ERR "QuadEther: AIEEE cannot reset the QEC!\n"); 79 return -1; 80} 81 82#define MACE_RESET_RETRIES 200 83#define QE_RESET_RETRIES 200 84 85static inline int qe_stop(struct sunqe *qep) 86{ 87 void __iomem *cregs = qep->qcregs; 88 void __iomem *mregs = qep->mregs; 89 int tries; 90 91 /* Reset the MACE, then the QEC channel. */ 92 sbus_writeb(MREGS_BCONFIG_RESET, mregs + MREGS_BCONFIG); 93 tries = MACE_RESET_RETRIES; 94 while (--tries) { 95 u8 tmp = sbus_readb(mregs + MREGS_BCONFIG); 96 if (tmp & MREGS_BCONFIG_RESET) { 97 udelay(20); 98 continue; 99 } 100 break; 101 } 102 if (!tries) { 103 printk(KERN_ERR "QuadEther: AIEEE cannot reset the MACE!\n"); 104 return -1; 105 } 106 107 sbus_writel(CREG_CTRL_RESET, cregs + CREG_CTRL); 108 tries = QE_RESET_RETRIES; 109 while (--tries) { 110 u32 tmp = sbus_readl(cregs + CREG_CTRL); 111 if (tmp & CREG_CTRL_RESET) { 112 udelay(20); 113 continue; 114 } 115 break; 116 } 117 if (!tries) { 118 printk(KERN_ERR "QuadEther: Cannot reset QE channel!\n"); 119 return -1; 120 } 121 return 0; 122} 123 124static void qe_init_rings(struct sunqe *qep) 125{ 126 struct qe_init_block *qb = qep->qe_block; 127 struct sunqe_buffers *qbufs = qep->buffers; 128 __u32 qbufs_dvma = qep->buffers_dvma; 129 int i; 130 131 qep->rx_new = qep->rx_old = qep->tx_new = qep->tx_old = 0; 132 memset(qb, 0, sizeof(struct qe_init_block)); 133 memset(qbufs, 0, sizeof(struct sunqe_buffers)); 134 for (i = 0; i < RX_RING_SIZE; i++) { 135 qb->qe_rxd[i].rx_addr = qbufs_dvma + qebuf_offset(rx_buf, i); 136 qb->qe_rxd[i].rx_flags = 137 (RXD_OWN | ((RXD_PKT_SZ) & RXD_LENGTH)); 138 } 139} 140 141static int qe_init(struct sunqe *qep, int from_irq) 142{ 143 struct sunqec *qecp = qep->parent; 144 void __iomem *cregs = qep->qcregs; 145 void __iomem *mregs = qep->mregs; 146 void __iomem *gregs = qecp->gregs; 147 unsigned char *e = &qep->dev->dev_addr[0]; 148 u32 tmp; 149 int i; 150 151 /* Shut it up. */ 152 if (qe_stop(qep)) 153 return -EAGAIN; 154 155 /* Setup initial rx/tx init block pointers. */ 156 sbus_writel(qep->qblock_dvma + qib_offset(qe_rxd, 0), cregs + CREG_RXDS); 157 sbus_writel(qep->qblock_dvma + qib_offset(qe_txd, 0), cregs + CREG_TXDS); 158 159 /* Enable/mask the various irq's. */ 160 sbus_writel(0, cregs + CREG_RIMASK); 161 sbus_writel(1, cregs + CREG_TIMASK); 162 163 sbus_writel(0, cregs + CREG_QMASK); 164 sbus_writel(CREG_MMASK_RXCOLL, cregs + CREG_MMASK); 165 166 /* Setup the FIFO pointers into QEC local memory. */ 167 tmp = qep->channel * sbus_readl(gregs + GLOB_MSIZE); 168 sbus_writel(tmp, cregs + CREG_RXRBUFPTR); 169 sbus_writel(tmp, cregs + CREG_RXWBUFPTR); 170 171 tmp = sbus_readl(cregs + CREG_RXRBUFPTR) + 172 sbus_readl(gregs + GLOB_RSIZE); 173 sbus_writel(tmp, cregs + CREG_TXRBUFPTR); 174 sbus_writel(tmp, cregs + CREG_TXWBUFPTR); 175 176 /* Clear the channel collision counter. */ 177 sbus_writel(0, cregs + CREG_CCNT); 178 179 /* For 10baseT, inter frame space nor throttle seems to be necessary. */ 180 sbus_writel(0, cregs + CREG_PIPG); 181 182 /* Now dork with the AMD MACE. */ 183 sbus_writeb(MREGS_PHYCONFIG_AUTO, mregs + MREGS_PHYCONFIG); 184 sbus_writeb(MREGS_TXFCNTL_AUTOPAD, mregs + MREGS_TXFCNTL); 185 sbus_writeb(0, mregs + MREGS_RXFCNTL); 186 187 /* The QEC dma's the rx'd packets from local memory out to main memory, 188 * and therefore it interrupts when the packet reception is "complete". 189 * So don't listen for the MACE talking about it. 190 */ 191 sbus_writeb(MREGS_IMASK_COLL | MREGS_IMASK_RXIRQ, mregs + MREGS_IMASK); 192 sbus_writeb(MREGS_BCONFIG_BSWAP | MREGS_BCONFIG_64TS, mregs + MREGS_BCONFIG); 193 sbus_writeb((MREGS_FCONFIG_TXF16 | MREGS_FCONFIG_RXF32 | 194 MREGS_FCONFIG_RFWU | MREGS_FCONFIG_TFWU), 195 mregs + MREGS_FCONFIG); 196 197 /* Only usable interface on QuadEther is twisted pair. */ 198 sbus_writeb(MREGS_PLSCONFIG_TP, mregs + MREGS_PLSCONFIG); 199 200 /* Tell MACE we are changing the ether address. */ 201 sbus_writeb(MREGS_IACONFIG_ACHNGE | MREGS_IACONFIG_PARESET, 202 mregs + MREGS_IACONFIG); 203 while ((sbus_readb(mregs + MREGS_IACONFIG) & MREGS_IACONFIG_ACHNGE) != 0) 204 barrier(); 205 sbus_writeb(e[0], mregs + MREGS_ETHADDR); 206 sbus_writeb(e[1], mregs + MREGS_ETHADDR); 207 sbus_writeb(e[2], mregs + MREGS_ETHADDR); 208 sbus_writeb(e[3], mregs + MREGS_ETHADDR); 209 sbus_writeb(e[4], mregs + MREGS_ETHADDR); 210 sbus_writeb(e[5], mregs + MREGS_ETHADDR); 211 212 /* Clear out the address filter. */ 213 sbus_writeb(MREGS_IACONFIG_ACHNGE | MREGS_IACONFIG_LARESET, 214 mregs + MREGS_IACONFIG); 215 while ((sbus_readb(mregs + MREGS_IACONFIG) & MREGS_IACONFIG_ACHNGE) != 0) 216 barrier(); 217 for (i = 0; i < 8; i++) 218 sbus_writeb(0, mregs + MREGS_FILTER); 219 220 /* Address changes are now complete. */ 221 sbus_writeb(0, mregs + MREGS_IACONFIG); 222 223 qe_init_rings(qep); 224 225 /* Wait a little bit for the link to come up... */ 226 mdelay(5); 227 if (!(sbus_readb(mregs + MREGS_PHYCONFIG) & MREGS_PHYCONFIG_LTESTDIS)) { 228 int tries = 50; 229 230 while (--tries) { 231 u8 tmp; 232 233 mdelay(5); 234 barrier(); 235 tmp = sbus_readb(mregs + MREGS_PHYCONFIG); 236 if ((tmp & MREGS_PHYCONFIG_LSTAT) != 0) 237 break; 238 } 239 if (tries == 0) 240 printk(KERN_NOTICE "%s: Warning, link state is down.\n", qep->dev->name); 241 } 242 243 /* Missed packet counter is cleared on a read. */ 244 sbus_readb(mregs + MREGS_MPCNT); 245 246 /* Reload multicast information, this will enable the receiver 247 * and transmitter. 248 */ 249 qe_set_multicast(qep->dev); 250 251 /* QEC should now start to show interrupts. */ 252 return 0; 253} 254 255/* Grrr, certain error conditions completely lock up the AMD MACE, 256 * so when we get these we _must_ reset the chip. 257 */ 258static int qe_is_bolixed(struct sunqe *qep, u32 qe_status) 259{ 260 struct net_device *dev = qep->dev; 261 int mace_hwbug_workaround = 0; 262 263 if (qe_status & CREG_STAT_EDEFER) { 264 printk(KERN_ERR "%s: Excessive transmit defers.\n", dev->name); 265 dev->stats.tx_errors++; 266 } 267 268 if (qe_status & CREG_STAT_CLOSS) { 269 printk(KERN_ERR "%s: Carrier lost, link down?\n", dev->name); 270 dev->stats.tx_errors++; 271 dev->stats.tx_carrier_errors++; 272 } 273 274 if (qe_status & CREG_STAT_ERETRIES) { 275 printk(KERN_ERR "%s: Excessive transmit retries (more than 16).\n", dev->name); 276 dev->stats.tx_errors++; 277 mace_hwbug_workaround = 1; 278 } 279 280 if (qe_status & CREG_STAT_LCOLL) { 281 printk(KERN_ERR "%s: Late transmit collision.\n", dev->name); 282 dev->stats.tx_errors++; 283 dev->stats.collisions++; 284 mace_hwbug_workaround = 1; 285 } 286 287 if (qe_status & CREG_STAT_FUFLOW) { 288 printk(KERN_ERR "%s: Transmit fifo underflow, driver bug.\n", dev->name); 289 dev->stats.tx_errors++; 290 mace_hwbug_workaround = 1; 291 } 292 293 if (qe_status & CREG_STAT_JERROR) { 294 printk(KERN_ERR "%s: Jabber error.\n", dev->name); 295 } 296 297 if (qe_status & CREG_STAT_BERROR) { 298 printk(KERN_ERR "%s: Babble error.\n", dev->name); 299 } 300 301 if (qe_status & CREG_STAT_CCOFLOW) { 302 dev->stats.tx_errors += 256; 303 dev->stats.collisions += 256; 304 } 305 306 if (qe_status & CREG_STAT_TXDERROR) { 307 printk(KERN_ERR "%s: Transmit descriptor is bogus, driver bug.\n", dev->name); 308 dev->stats.tx_errors++; 309 dev->stats.tx_aborted_errors++; 310 mace_hwbug_workaround = 1; 311 } 312 313 if (qe_status & CREG_STAT_TXLERR) { 314 printk(KERN_ERR "%s: Transmit late error.\n", dev->name); 315 dev->stats.tx_errors++; 316 mace_hwbug_workaround = 1; 317 } 318 319 if (qe_status & CREG_STAT_TXPERR) { 320 printk(KERN_ERR "%s: Transmit DMA parity error.\n", dev->name); 321 dev->stats.tx_errors++; 322 dev->stats.tx_aborted_errors++; 323 mace_hwbug_workaround = 1; 324 } 325 326 if (qe_status & CREG_STAT_TXSERR) { 327 printk(KERN_ERR "%s: Transmit DMA sbus error ack.\n", dev->name); 328 dev->stats.tx_errors++; 329 dev->stats.tx_aborted_errors++; 330 mace_hwbug_workaround = 1; 331 } 332 333 if (qe_status & CREG_STAT_RCCOFLOW) { 334 dev->stats.rx_errors += 256; 335 dev->stats.collisions += 256; 336 } 337 338 if (qe_status & CREG_STAT_RUOFLOW) { 339 dev->stats.rx_errors += 256; 340 dev->stats.rx_over_errors += 256; 341 } 342 343 if (qe_status & CREG_STAT_MCOFLOW) { 344 dev->stats.rx_errors += 256; 345 dev->stats.rx_missed_errors += 256; 346 } 347 348 if (qe_status & CREG_STAT_RXFOFLOW) { 349 printk(KERN_ERR "%s: Receive fifo overflow.\n", dev->name); 350 dev->stats.rx_errors++; 351 dev->stats.rx_over_errors++; 352 } 353 354 if (qe_status & CREG_STAT_RLCOLL) { 355 printk(KERN_ERR "%s: Late receive collision.\n", dev->name); 356 dev->stats.rx_errors++; 357 dev->stats.collisions++; 358 } 359 360 if (qe_status & CREG_STAT_FCOFLOW) { 361 dev->stats.rx_errors += 256; 362 dev->stats.rx_frame_errors += 256; 363 } 364 365 if (qe_status & CREG_STAT_CECOFLOW) { 366 dev->stats.rx_errors += 256; 367 dev->stats.rx_crc_errors += 256; 368 } 369 370 if (qe_status & CREG_STAT_RXDROP) { 371 printk(KERN_ERR "%s: Receive packet dropped.\n", dev->name); 372 dev->stats.rx_errors++; 373 dev->stats.rx_dropped++; 374 dev->stats.rx_missed_errors++; 375 } 376 377 if (qe_status & CREG_STAT_RXSMALL) { 378 printk(KERN_ERR "%s: Receive buffer too small, driver bug.\n", dev->name); 379 dev->stats.rx_errors++; 380 dev->stats.rx_length_errors++; 381 } 382 383 if (qe_status & CREG_STAT_RXLERR) { 384 printk(KERN_ERR "%s: Receive late error.\n", dev->name); 385 dev->stats.rx_errors++; 386 mace_hwbug_workaround = 1; 387 } 388 389 if (qe_status & CREG_STAT_RXPERR) { 390 printk(KERN_ERR "%s: Receive DMA parity error.\n", dev->name); 391 dev->stats.rx_errors++; 392 dev->stats.rx_missed_errors++; 393 mace_hwbug_workaround = 1; 394 } 395 396 if (qe_status & CREG_STAT_RXSERR) { 397 printk(KERN_ERR "%s: Receive DMA sbus error ack.\n", dev->name); 398 dev->stats.rx_errors++; 399 dev->stats.rx_missed_errors++; 400 mace_hwbug_workaround = 1; 401 } 402 403 if (mace_hwbug_workaround) 404 qe_init(qep, 1); 405 return mace_hwbug_workaround; 406} 407 408/* Per-QE receive interrupt service routine. Just like on the happy meal 409 * we receive directly into skb's with a small packet copy water mark. 410 */ 411static void qe_rx(struct sunqe *qep) 412{ 413 struct qe_rxd *rxbase = &qep->qe_block->qe_rxd[0]; 414 struct net_device *dev = qep->dev; 415 struct qe_rxd *this; 416 struct sunqe_buffers *qbufs = qep->buffers; 417 __u32 qbufs_dvma = qep->buffers_dvma; 418 int elem = qep->rx_new, drops = 0; 419 u32 flags; 420 421 this = &rxbase[elem]; 422 while (!((flags = this->rx_flags) & RXD_OWN)) { 423 struct sk_buff *skb; 424 unsigned char *this_qbuf = 425 &qbufs->rx_buf[elem & (RX_RING_SIZE - 1)][0]; 426 __u32 this_qbuf_dvma = qbufs_dvma + 427 qebuf_offset(rx_buf, (elem & (RX_RING_SIZE - 1))); 428 struct qe_rxd *end_rxd = 429 &rxbase[(elem+RX_RING_SIZE)&(RX_RING_MAXSIZE-1)]; 430 int len = (flags & RXD_LENGTH) - 4; /* QE adds ether FCS size to len */ 431 432 /* Check for errors. */ 433 if (len < ETH_ZLEN) { 434 dev->stats.rx_errors++; 435 dev->stats.rx_length_errors++; 436 dev->stats.rx_dropped++; 437 } else { 438 skb = dev_alloc_skb(len + 2); 439 if (skb == NULL) { 440 drops++; 441 dev->stats.rx_dropped++; 442 } else { 443 skb_reserve(skb, 2); 444 skb_put(skb, len); 445 skb_copy_to_linear_data(skb, (unsigned char *) this_qbuf, 446 len); 447 skb->protocol = eth_type_trans(skb, qep->dev); 448 netif_rx(skb); 449 dev->stats.rx_packets++; 450 dev->stats.rx_bytes += len; 451 } 452 } 453 end_rxd->rx_addr = this_qbuf_dvma; 454 end_rxd->rx_flags = (RXD_OWN | ((RXD_PKT_SZ) & RXD_LENGTH)); 455 456 elem = NEXT_RX(elem); 457 this = &rxbase[elem]; 458 } 459 qep->rx_new = elem; 460 if (drops) 461 printk(KERN_NOTICE "%s: Memory squeeze, deferring packet.\n", qep->dev->name); 462} 463 464static void qe_tx_reclaim(struct sunqe *qep); 465 466/* Interrupts for all QE's get filtered out via the QEC master controller, 467 * so we just run through each qe and check to see who is signaling 468 * and thus needs to be serviced. 469 */ 470static irqreturn_t qec_interrupt(int irq, void *dev_id) 471{ 472 struct sunqec *qecp = dev_id; 473 u32 qec_status; 474 int channel = 0; 475 476 /* Latch the status now. */ 477 qec_status = sbus_readl(qecp->gregs + GLOB_STAT); 478 while (channel < 4) { 479 if (qec_status & 0xf) { 480 struct sunqe *qep = qecp->qes[channel]; 481 u32 qe_status; 482 483 qe_status = sbus_readl(qep->qcregs + CREG_STAT); 484 if (qe_status & CREG_STAT_ERRORS) { 485 if (qe_is_bolixed(qep, qe_status)) 486 goto next; 487 } 488 if (qe_status & CREG_STAT_RXIRQ) 489 qe_rx(qep); 490 if (netif_queue_stopped(qep->dev) && 491 (qe_status & CREG_STAT_TXIRQ)) { 492 spin_lock(&qep->lock); 493 qe_tx_reclaim(qep); 494 if (TX_BUFFS_AVAIL(qep) > 0) { 495 /* Wake net queue and return to 496 * lazy tx reclaim. 497 */ 498 netif_wake_queue(qep->dev); 499 sbus_writel(1, qep->qcregs + CREG_TIMASK); 500 } 501 spin_unlock(&qep->lock); 502 } 503 next: 504 ; 505 } 506 qec_status >>= 4; 507 channel++; 508 } 509 510 return IRQ_HANDLED; 511} 512 513static int qe_open(struct net_device *dev) 514{ 515 struct sunqe *qep = netdev_priv(dev); 516 517 qep->mconfig = (MREGS_MCONFIG_TXENAB | 518 MREGS_MCONFIG_RXENAB | 519 MREGS_MCONFIG_MBAENAB); 520 return qe_init(qep, 0); 521} 522 523static int qe_close(struct net_device *dev) 524{ 525 struct sunqe *qep = netdev_priv(dev); 526 527 qe_stop(qep); 528 return 0; 529} 530 531/* Reclaim TX'd frames from the ring. This must always run under 532 * the IRQ protected qep->lock. 533 */ 534static void qe_tx_reclaim(struct sunqe *qep) 535{ 536 struct qe_txd *txbase = &qep->qe_block->qe_txd[0]; 537 int elem = qep->tx_old; 538 539 while (elem != qep->tx_new) { 540 u32 flags = txbase[elem].tx_flags; 541 542 if (flags & TXD_OWN) 543 break; 544 elem = NEXT_TX(elem); 545 } 546 qep->tx_old = elem; 547} 548 549static void qe_tx_timeout(struct net_device *dev) 550{ 551 struct sunqe *qep = netdev_priv(dev); 552 int tx_full; 553 554 spin_lock_irq(&qep->lock); 555 556 /* Try to reclaim, if that frees up some tx 557 * entries, we're fine. 558 */ 559 qe_tx_reclaim(qep); 560 tx_full = TX_BUFFS_AVAIL(qep) <= 0; 561 562 spin_unlock_irq(&qep->lock); 563 564 if (! tx_full) 565 goto out; 566 567 printk(KERN_ERR "%s: transmit timed out, resetting\n", dev->name); 568 qe_init(qep, 1); 569 570out: 571 netif_wake_queue(dev); 572} 573 574/* Get a packet queued to go onto the wire. */ 575static int qe_start_xmit(struct sk_buff *skb, struct net_device *dev) 576{ 577 struct sunqe *qep = netdev_priv(dev); 578 struct sunqe_buffers *qbufs = qep->buffers; 579 __u32 txbuf_dvma, qbufs_dvma = qep->buffers_dvma; 580 unsigned char *txbuf; 581 int len, entry; 582 583 spin_lock_irq(&qep->lock); 584 585 qe_tx_reclaim(qep); 586 587 len = skb->len; 588 entry = qep->tx_new; 589 590 txbuf = &qbufs->tx_buf[entry & (TX_RING_SIZE - 1)][0]; 591 txbuf_dvma = qbufs_dvma + 592 qebuf_offset(tx_buf, (entry & (TX_RING_SIZE - 1))); 593 594 /* Avoid a race... */ 595 qep->qe_block->qe_txd[entry].tx_flags = TXD_UPDATE; 596 597 skb_copy_from_linear_data(skb, txbuf, len); 598 599 qep->qe_block->qe_txd[entry].tx_addr = txbuf_dvma; 600 qep->qe_block->qe_txd[entry].tx_flags = 601 (TXD_OWN | TXD_SOP | TXD_EOP | (len & TXD_LENGTH)); 602 qep->tx_new = NEXT_TX(entry); 603 604 /* Get it going. */ 605 sbus_writel(CREG_CTRL_TWAKEUP, qep->qcregs + CREG_CTRL); 606 607 dev->stats.tx_packets++; 608 dev->stats.tx_bytes += len; 609 610 if (TX_BUFFS_AVAIL(qep) <= 0) { 611 /* Halt the net queue and enable tx interrupts. 612 * When the tx queue empties the tx irq handler 613 * will wake up the queue and return us back to 614 * the lazy tx reclaim scheme. 615 */ 616 netif_stop_queue(dev); 617 sbus_writel(0, qep->qcregs + CREG_TIMASK); 618 } 619 spin_unlock_irq(&qep->lock); 620 621 dev_kfree_skb(skb); 622 623 return NETDEV_TX_OK; 624} 625 626static void qe_set_multicast(struct net_device *dev) 627{ 628 struct sunqe *qep = netdev_priv(dev); 629 struct netdev_hw_addr *ha; 630 u8 new_mconfig = qep->mconfig; 631 char *addrs; 632 int i; 633 u32 crc; 634 635 /* Lock out others. */ 636 netif_stop_queue(dev); 637 638 if ((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 64)) { 639 sbus_writeb(MREGS_IACONFIG_ACHNGE | MREGS_IACONFIG_LARESET, 640 qep->mregs + MREGS_IACONFIG); 641 while ((sbus_readb(qep->mregs + MREGS_IACONFIG) & MREGS_IACONFIG_ACHNGE) != 0) 642 barrier(); 643 for (i = 0; i < 8; i++) 644 sbus_writeb(0xff, qep->mregs + MREGS_FILTER); 645 sbus_writeb(0, qep->mregs + MREGS_IACONFIG); 646 } else if (dev->flags & IFF_PROMISC) { 647 new_mconfig |= MREGS_MCONFIG_PROMISC; 648 } else { 649 u16 hash_table[4]; 650 u8 *hbytes = (unsigned char *) &hash_table[0]; 651 652 memset(hash_table, 0, sizeof(hash_table)); 653 netdev_for_each_mc_addr(ha, dev) { 654 addrs = ha->addr; 655 656 if (!(*addrs & 1)) 657 continue; 658 crc = ether_crc_le(6, addrs); 659 crc >>= 26; 660 hash_table[crc >> 4] |= 1 << (crc & 0xf); 661 } 662 /* Program the qe with the new filter value. */ 663 sbus_writeb(MREGS_IACONFIG_ACHNGE | MREGS_IACONFIG_LARESET, 664 qep->mregs + MREGS_IACONFIG); 665 while ((sbus_readb(qep->mregs + MREGS_IACONFIG) & MREGS_IACONFIG_ACHNGE) != 0) 666 barrier(); 667 for (i = 0; i < 8; i++) { 668 u8 tmp = *hbytes++; 669 sbus_writeb(tmp, qep->mregs + MREGS_FILTER); 670 } 671 sbus_writeb(0, qep->mregs + MREGS_IACONFIG); 672 } 673 674 /* Any change of the logical address filter, the physical address, 675 * or enabling/disabling promiscuous mode causes the MACE to disable 676 * the receiver. So we must re-enable them here or else the MACE 677 * refuses to listen to anything on the network. Sheesh, took 678 * me a day or two to find this bug. 679 */ 680 qep->mconfig = new_mconfig; 681 sbus_writeb(qep->mconfig, qep->mregs + MREGS_MCONFIG); 682 683 /* Let us get going again. */ 684 netif_wake_queue(dev); 685} 686 687/* Ethtool support... */ 688static void qe_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) 689{ 690 const struct linux_prom_registers *regs; 691 struct sunqe *qep = netdev_priv(dev); 692 struct platform_device *op; 693 694 strcpy(info->driver, "sunqe"); 695 strcpy(info->version, "3.0"); 696 697 op = qep->op; 698 regs = of_get_property(op->dev.of_node, "reg", NULL); 699 if (regs) 700 sprintf(info->bus_info, "SBUS:%d", regs->which_io); 701 702} 703 704static u32 qe_get_link(struct net_device *dev) 705{ 706 struct sunqe *qep = netdev_priv(dev); 707 void __iomem *mregs = qep->mregs; 708 u8 phyconfig; 709 710 spin_lock_irq(&qep->lock); 711 phyconfig = sbus_readb(mregs + MREGS_PHYCONFIG); 712 spin_unlock_irq(&qep->lock); 713 714 return (phyconfig & MREGS_PHYCONFIG_LSTAT); 715} 716 717static const struct ethtool_ops qe_ethtool_ops = { 718 .get_drvinfo = qe_get_drvinfo, 719 .get_link = qe_get_link, 720}; 721 722/* This is only called once at boot time for each card probed. */ 723static void qec_init_once(struct sunqec *qecp, struct platform_device *op) 724{ 725 u8 bsizes = qecp->qec_bursts; 726 727 if (sbus_can_burst64() && (bsizes & DMA_BURST64)) { 728 sbus_writel(GLOB_CTRL_B64, qecp->gregs + GLOB_CTRL); 729 } else if (bsizes & DMA_BURST32) { 730 sbus_writel(GLOB_CTRL_B32, qecp->gregs + GLOB_CTRL); 731 } else { 732 sbus_writel(GLOB_CTRL_B16, qecp->gregs + GLOB_CTRL); 733 } 734 735 /* Packetsize only used in 100baseT BigMAC configurations, 736 * set it to zero just to be on the safe side. 737 */ 738 sbus_writel(GLOB_PSIZE_2048, qecp->gregs + GLOB_PSIZE); 739 740 /* Set the local memsize register, divided up to one piece per QE channel. */ 741 sbus_writel((resource_size(&op->resource[1]) >> 2), 742 qecp->gregs + GLOB_MSIZE); 743 744 /* Divide up the local QEC memory amongst the 4 QE receiver and 745 * transmitter FIFOs. Basically it is (total / 2 / num_channels). 746 */ 747 sbus_writel((resource_size(&op->resource[1]) >> 2) >> 1, 748 qecp->gregs + GLOB_TSIZE); 749 sbus_writel((resource_size(&op->resource[1]) >> 2) >> 1, 750 qecp->gregs + GLOB_RSIZE); 751} 752 753static u8 __devinit qec_get_burst(struct device_node *dp) 754{ 755 u8 bsizes, bsizes_more; 756 757 /* Find and set the burst sizes for the QEC, since it 758 * does the actual dma for all 4 channels. 759 */ 760 bsizes = of_getintprop_default(dp, "burst-sizes", 0xff); 761 bsizes &= 0xff; 762 bsizes_more = of_getintprop_default(dp->parent, "burst-sizes", 0xff); 763 764 if (bsizes_more != 0xff) 765 bsizes &= bsizes_more; 766 if (bsizes == 0xff || (bsizes & DMA_BURST16) == 0 || 767 (bsizes & DMA_BURST32)==0) 768 bsizes = (DMA_BURST32 - 1); 769 770 return bsizes; 771} 772 773static struct sunqec * __devinit get_qec(struct platform_device *child) 774{ 775 struct platform_device *op = to_platform_device(child->dev.parent); 776 struct sunqec *qecp; 777 778 qecp = dev_get_drvdata(&op->dev); 779 if (!qecp) { 780 qecp = kzalloc(sizeof(struct sunqec), GFP_KERNEL); 781 if (qecp) { 782 u32 ctrl; 783 784 qecp->op = op; 785 qecp->gregs = of_ioremap(&op->resource[0], 0, 786 GLOB_REG_SIZE, 787 "QEC Global Registers"); 788 if (!qecp->gregs) 789 goto fail; 790 791 /* Make sure the QEC is in MACE mode. */ 792 ctrl = sbus_readl(qecp->gregs + GLOB_CTRL); 793 ctrl &= 0xf0000000; 794 if (ctrl != GLOB_CTRL_MMODE) { 795 printk(KERN_ERR "qec: Not in MACE mode!\n"); 796 goto fail; 797 } 798 799 if (qec_global_reset(qecp->gregs)) 800 goto fail; 801 802 qecp->qec_bursts = qec_get_burst(op->dev.of_node); 803 804 qec_init_once(qecp, op); 805 806 if (request_irq(op->archdata.irqs[0], qec_interrupt, 807 IRQF_SHARED, "qec", (void *) qecp)) { 808 printk(KERN_ERR "qec: Can't register irq.\n"); 809 goto fail; 810 } 811 812 dev_set_drvdata(&op->dev, qecp); 813 814 qecp->next_module = root_qec_dev; 815 root_qec_dev = qecp; 816 } 817 } 818 819 return qecp; 820 821fail: 822 if (qecp->gregs) 823 of_iounmap(&op->resource[0], qecp->gregs, GLOB_REG_SIZE); 824 kfree(qecp); 825 return NULL; 826} 827 828static const struct net_device_ops qec_ops = { 829 .ndo_open = qe_open, 830 .ndo_stop = qe_close, 831 .ndo_start_xmit = qe_start_xmit, 832 .ndo_set_multicast_list = qe_set_multicast, 833 .ndo_tx_timeout = qe_tx_timeout, 834 .ndo_change_mtu = eth_change_mtu, 835 .ndo_set_mac_address = eth_mac_addr, 836 .ndo_validate_addr = eth_validate_addr, 837}; 838 839static int __devinit qec_ether_init(struct platform_device *op) 840{ 841 static unsigned version_printed; 842 struct net_device *dev; 843 struct sunqec *qecp; 844 struct sunqe *qe; 845 int i, res; 846 847 if (version_printed++ == 0) 848 printk(KERN_INFO "%s", version); 849 850 dev = alloc_etherdev(sizeof(struct sunqe)); 851 if (!dev) 852 return -ENOMEM; 853 854 memcpy(dev->dev_addr, idprom->id_ethaddr, 6); 855 856 qe = netdev_priv(dev); 857 858 res = -ENODEV; 859 860 i = of_getintprop_default(op->dev.of_node, "channel#", -1); 861 if (i == -1) 862 goto fail; 863 qe->channel = i; 864 spin_lock_init(&qe->lock); 865 866 qecp = get_qec(op); 867 if (!qecp) 868 goto fail; 869 870 qecp->qes[qe->channel] = qe; 871 qe->dev = dev; 872 qe->parent = qecp; 873 qe->op = op; 874 875 res = -ENOMEM; 876 qe->qcregs = of_ioremap(&op->resource[0], 0, 877 CREG_REG_SIZE, "QEC Channel Registers"); 878 if (!qe->qcregs) { 879 printk(KERN_ERR "qe: Cannot map channel registers.\n"); 880 goto fail; 881 } 882 883 qe->mregs = of_ioremap(&op->resource[1], 0, 884 MREGS_REG_SIZE, "QE MACE Registers"); 885 if (!qe->mregs) { 886 printk(KERN_ERR "qe: Cannot map MACE registers.\n"); 887 goto fail; 888 } 889 890 qe->qe_block = dma_alloc_coherent(&op->dev, PAGE_SIZE, 891 &qe->qblock_dvma, GFP_ATOMIC); 892 qe->buffers = dma_alloc_coherent(&op->dev, sizeof(struct sunqe_buffers), 893 &qe->buffers_dvma, GFP_ATOMIC); 894 if (qe->qe_block == NULL || qe->qblock_dvma == 0 || 895 qe->buffers == NULL || qe->buffers_dvma == 0) 896 goto fail; 897 898 /* Stop this QE. */ 899 qe_stop(qe); 900 901 SET_NETDEV_DEV(dev, &op->dev); 902 903 dev->watchdog_timeo = 5*HZ; 904 dev->irq = op->archdata.irqs[0]; 905 dev->dma = 0; 906 dev->ethtool_ops = &qe_ethtool_ops; 907 dev->netdev_ops = &qec_ops; 908 909 res = register_netdev(dev); 910 if (res) 911 goto fail; 912 913 dev_set_drvdata(&op->dev, qe); 914 915 printk(KERN_INFO "%s: qe channel[%d] ", dev->name, qe->channel); 916 for (i = 0; i < 6; i++) 917 printk ("%2.2x%c", 918 dev->dev_addr[i], 919 i == 5 ? ' ': ':'); 920 printk("\n"); 921 922 923 return 0; 924 925fail: 926 if (qe->qcregs) 927 of_iounmap(&op->resource[0], qe->qcregs, CREG_REG_SIZE); 928 if (qe->mregs) 929 of_iounmap(&op->resource[1], qe->mregs, MREGS_REG_SIZE); 930 if (qe->qe_block) 931 dma_free_coherent(&op->dev, PAGE_SIZE, 932 qe->qe_block, qe->qblock_dvma); 933 if (qe->buffers) 934 dma_free_coherent(&op->dev, 935 sizeof(struct sunqe_buffers), 936 qe->buffers, 937 qe->buffers_dvma); 938 939 free_netdev(dev); 940 941 return res; 942} 943 944static int __devinit qec_sbus_probe(struct platform_device *op, const struct of_device_id *match) 945{ 946 return qec_ether_init(op); 947} 948 949static int __devexit qec_sbus_remove(struct platform_device *op) 950{ 951 struct sunqe *qp = dev_get_drvdata(&op->dev); 952 struct net_device *net_dev = qp->dev; 953 954 unregister_netdev(net_dev); 955 956 of_iounmap(&op->resource[0], qp->qcregs, CREG_REG_SIZE); 957 of_iounmap(&op->resource[1], qp->mregs, MREGS_REG_SIZE); 958 dma_free_coherent(&op->dev, PAGE_SIZE, 959 qp->qe_block, qp->qblock_dvma); 960 dma_free_coherent(&op->dev, sizeof(struct sunqe_buffers), 961 qp->buffers, qp->buffers_dvma); 962 963 free_netdev(net_dev); 964 965 dev_set_drvdata(&op->dev, NULL); 966 967 return 0; 968} 969 970static const struct of_device_id qec_sbus_match[] = { 971 { 972 .name = "qe", 973 }, 974 {}, 975}; 976 977MODULE_DEVICE_TABLE(of, qec_sbus_match); 978 979static struct of_platform_driver qec_sbus_driver = { 980 .driver = { 981 .name = "qec", 982 .owner = THIS_MODULE, 983 .of_match_table = qec_sbus_match, 984 }, 985 .probe = qec_sbus_probe, 986 .remove = __devexit_p(qec_sbus_remove), 987}; 988 989static int __init qec_init(void) 990{ 991 return of_register_platform_driver(&qec_sbus_driver); 992} 993 994static void __exit qec_exit(void) 995{ 996 of_unregister_platform_driver(&qec_sbus_driver); 997 998 while (root_qec_dev) { 999 struct sunqec *next = root_qec_dev->next_module; 1000 struct platform_device *op = root_qec_dev->op; 1001 1002 free_irq(op->archdata.irqs[0], (void *) root_qec_dev); 1003 of_iounmap(&op->resource[0], root_qec_dev->gregs, 1004 GLOB_REG_SIZE); 1005 kfree(root_qec_dev); 1006 1007 root_qec_dev = next; 1008 } 1009} 1010 1011module_init(qec_init); 1012module_exit(qec_exit); 1013