1/* $Id: sunqe.c,v 1.1.1.1 2008/10/15 03:26:40 james26_jang Exp $ 2 * sunqe.c: Sparc QuadEthernet 10baseT SBUS card driver. 3 * Once again I am out to prove that every ethernet 4 * controller out there can be most efficiently programmed 5 * if you make it look like a LANCE. 6 * 7 * Copyright (C) 1996, 1999 David S. Miller (davem@redhat.com) 8 */ 9 10static char version[] = 11 "sunqe.c:v2.9 9/11/99 David S. Miller (davem@redhat.com)\n"; 12 13#include <linux/module.h> 14 15#include <linux/kernel.h> 16#include <linux/sched.h> 17#include <linux/types.h> 18#include <linux/fcntl.h> 19#include <linux/interrupt.h> 20#include <linux/ptrace.h> 21#include <linux/ioport.h> 22#include <linux/in.h> 23#include <linux/slab.h> 24#include <linux/string.h> 25#include <linux/delay.h> 26#include <linux/init.h> 27#include <linux/crc32.h> 28 29#include <asm/system.h> 30#include <asm/bitops.h> 31#include <asm/io.h> 32#include <asm/dma.h> 33#include <linux/errno.h> 34#include <asm/byteorder.h> 35 36#include <asm/idprom.h> 37#include <asm/sbus.h> 38#include <asm/openprom.h> 39#include <asm/oplib.h> 40#include <asm/auxio.h> 41#include <asm/pgtable.h> 42#include <asm/irq.h> 43 44#include <linux/netdevice.h> 45#include <linux/etherdevice.h> 46#include <linux/skbuff.h> 47 48#include "sunqe.h" 49 50static struct sunqec *root_qec_dev; 51 52static void qe_set_multicast(struct net_device *dev); 53 54#define QEC_RESET_TRIES 200 55 56static inline int qec_global_reset(unsigned long gregs) 57{ 58 int tries = QEC_RESET_TRIES; 59 60 sbus_writel(GLOB_CTRL_RESET, gregs + GLOB_CTRL); 61 while (--tries) { 62 u32 tmp = sbus_readl(gregs + GLOB_CTRL); 63 if (tmp & GLOB_CTRL_RESET) { 64 udelay(20); 65 continue; 66 } 67 break; 68 } 69 if (tries) 70 return 0; 71 printk(KERN_ERR "QuadEther: AIEEE cannot reset the QEC!\n"); 72 return -1; 73} 74 75#define MACE_RESET_RETRIES 200 76#define QE_RESET_RETRIES 200 77 78static inline int qe_stop(struct sunqe *qep) 79{ 80 unsigned long cregs = qep->qcregs; 81 unsigned long mregs = qep->mregs; 82 int tries; 83 84 /* Reset the MACE, then the QEC channel. */ 85 sbus_writeb(MREGS_BCONFIG_RESET, mregs + MREGS_BCONFIG); 86 tries = MACE_RESET_RETRIES; 87 while (--tries) { 88 u8 tmp = sbus_readb(mregs + MREGS_BCONFIG); 89 if (tmp & MREGS_BCONFIG_RESET) { 90 udelay(20); 91 continue; 92 } 93 break; 94 } 95 if (!tries) { 96 printk(KERN_ERR "QuadEther: AIEEE cannot reset the MACE!\n"); 97 return -1; 98 } 99 100 sbus_writel(CREG_CTRL_RESET, cregs + CREG_CTRL); 101 tries = QE_RESET_RETRIES; 102 while (--tries) { 103 u32 tmp = sbus_readl(cregs + CREG_CTRL); 104 if (tmp & CREG_CTRL_RESET) { 105 udelay(20); 106 continue; 107 } 108 break; 109 } 110 if (!tries) { 111 printk(KERN_ERR "QuadEther: Cannot reset QE channel!\n"); 112 return -1; 113 } 114 return 0; 115} 116 117static void qe_init_rings(struct sunqe *qep) 118{ 119 struct qe_init_block *qb = qep->qe_block; 120 struct sunqe_buffers *qbufs = qep->buffers; 121 __u32 qbufs_dvma = qep->buffers_dvma; 122 int i; 123 124 qep->rx_new = qep->rx_old = qep->tx_new = qep->tx_old = 0; 125 memset(qb, 0, sizeof(struct qe_init_block)); 126 memset(qbufs, 0, sizeof(struct sunqe_buffers)); 127 for (i = 0; i < RX_RING_SIZE; i++) { 128 qb->qe_rxd[i].rx_addr = qbufs_dvma + qebuf_offset(rx_buf, i); 129 qb->qe_rxd[i].rx_flags = 130 (RXD_OWN | ((RXD_PKT_SZ) & RXD_LENGTH)); 131 } 132} 133 134static int qe_init(struct sunqe *qep, int from_irq) 135{ 136 struct sunqec *qecp = qep->parent; 137 unsigned long cregs = qep->qcregs; 138 unsigned long mregs = qep->mregs; 139 unsigned long gregs = qecp->gregs; 140 unsigned char *e = &qep->dev->dev_addr[0]; 141 u32 tmp; 142 int i; 143 144 /* Shut it up. */ 145 if (qe_stop(qep)) 146 return -EAGAIN; 147 148 /* Setup initial rx/tx init block pointers. */ 149 sbus_writel(qep->qblock_dvma + qib_offset(qe_rxd, 0), cregs + CREG_RXDS); 150 sbus_writel(qep->qblock_dvma + qib_offset(qe_txd, 0), cregs + CREG_TXDS); 151 152 /* Enable/mask the various irq's. */ 153 sbus_writel(0, cregs + CREG_RIMASK); 154 sbus_writel(1, cregs + CREG_TIMASK); 155 156 sbus_writel(0, cregs + CREG_QMASK); 157 sbus_writel(CREG_MMASK_RXCOLL, cregs + CREG_MMASK); 158 159 /* Setup the FIFO pointers into QEC local memory. */ 160 tmp = qep->channel * sbus_readl(gregs + GLOB_MSIZE); 161 sbus_writel(tmp, cregs + CREG_RXRBUFPTR); 162 sbus_writel(tmp, cregs + CREG_RXWBUFPTR); 163 164 tmp = sbus_readl(cregs + CREG_RXRBUFPTR) + 165 sbus_readl(gregs + GLOB_RSIZE); 166 sbus_writel(tmp, cregs + CREG_TXRBUFPTR); 167 sbus_writel(tmp, cregs + CREG_TXWBUFPTR); 168 169 /* Clear the channel collision counter. */ 170 sbus_writel(0, cregs + CREG_CCNT); 171 172 /* For 10baseT, inter frame space nor throttle seems to be necessary. */ 173 sbus_writel(0, cregs + CREG_PIPG); 174 175 /* Now dork with the AMD MACE. */ 176 sbus_writeb(MREGS_PHYCONFIG_AUTO, mregs + MREGS_PHYCONFIG); 177 sbus_writeb(MREGS_TXFCNTL_AUTOPAD, mregs + MREGS_TXFCNTL); 178 sbus_writeb(0, mregs + MREGS_RXFCNTL); 179 180 /* The QEC dma's the rx'd packets from local memory out to main memory, 181 * and therefore it interrupts when the packet reception is "complete". 182 * So don't listen for the MACE talking about it. 183 */ 184 sbus_writeb(MREGS_IMASK_COLL | MREGS_IMASK_RXIRQ, mregs + MREGS_IMASK); 185 sbus_writeb(MREGS_BCONFIG_BSWAP | MREGS_BCONFIG_64TS, mregs + MREGS_BCONFIG); 186 sbus_writeb((MREGS_FCONFIG_TXF16 | MREGS_FCONFIG_RXF32 | 187 MREGS_FCONFIG_RFWU | MREGS_FCONFIG_TFWU), 188 mregs + MREGS_FCONFIG); 189 190 /* Only usable interface on QuadEther is twisted pair. */ 191 sbus_writeb(MREGS_PLSCONFIG_TP, mregs + MREGS_PLSCONFIG); 192 193 /* Tell MACE we are changing the ether address. */ 194 sbus_writeb(MREGS_IACONFIG_ACHNGE | MREGS_IACONFIG_PARESET, 195 mregs + MREGS_IACONFIG); 196 while ((sbus_readb(mregs + MREGS_IACONFIG) & MREGS_IACONFIG_ACHNGE) != 0) 197 barrier(); 198 sbus_writeb(e[0], mregs + MREGS_ETHADDR); 199 sbus_writeb(e[1], mregs + MREGS_ETHADDR); 200 sbus_writeb(e[2], mregs + MREGS_ETHADDR); 201 sbus_writeb(e[3], mregs + MREGS_ETHADDR); 202 sbus_writeb(e[4], mregs + MREGS_ETHADDR); 203 sbus_writeb(e[5], mregs + MREGS_ETHADDR); 204 205 /* Clear out the address filter. */ 206 sbus_writeb(MREGS_IACONFIG_ACHNGE | MREGS_IACONFIG_LARESET, 207 mregs + MREGS_IACONFIG); 208 while ((sbus_readb(mregs + MREGS_IACONFIG) & MREGS_IACONFIG_ACHNGE) != 0) 209 barrier(); 210 for (i = 0; i < 8; i++) 211 sbus_writeb(0, mregs + MREGS_FILTER); 212 213 /* Address changes are now complete. */ 214 sbus_writeb(0, mregs + MREGS_IACONFIG); 215 216 qe_init_rings(qep); 217 218 /* Wait a little bit for the link to come up... */ 219 mdelay(5); 220 if (!(sbus_readb(mregs + MREGS_PHYCONFIG) & MREGS_PHYCONFIG_LTESTDIS)) { 221 int tries = 50; 222 223 while (tries--) { 224 u8 tmp; 225 226 mdelay(5); 227 barrier(); 228 tmp = sbus_readb(mregs + MREGS_PHYCONFIG); 229 if ((tmp & MREGS_PHYCONFIG_LSTAT) != 0) 230 break; 231 } 232 if (tries == 0) 233 printk(KERN_NOTICE "%s: Warning, link state is down.\n", qep->dev->name); 234 } 235 236 /* Missed packet counter is cleared on a read. */ 237 sbus_readb(mregs + MREGS_MPCNT); 238 239 /* Reload multicast information, this will enable the receiver 240 * and transmitter. 241 */ 242 qe_set_multicast(qep->dev); 243 244 /* QEC should now start to show interrupts. */ 245 return 0; 246} 247 248/* Grrr, certain error conditions completely lock up the AMD MACE, 249 * so when we get these we _must_ reset the chip. 250 */ 251static int qe_is_bolixed(struct sunqe *qep, u32 qe_status) 252{ 253 struct net_device *dev = qep->dev; 254 int mace_hwbug_workaround = 0; 255 256 if (qe_status & CREG_STAT_EDEFER) { 257 printk(KERN_ERR "%s: Excessive transmit defers.\n", dev->name); 258 qep->net_stats.tx_errors++; 259 } 260 261 if (qe_status & CREG_STAT_CLOSS) { 262 printk(KERN_ERR "%s: Carrier lost, link down?\n", dev->name); 263 qep->net_stats.tx_errors++; 264 qep->net_stats.tx_carrier_errors++; 265 } 266 267 if (qe_status & CREG_STAT_ERETRIES) { 268 printk(KERN_ERR "%s: Excessive transmit retries (more than 16).\n", dev->name); 269 qep->net_stats.tx_errors++; 270 mace_hwbug_workaround = 1; 271 } 272 273 if (qe_status & CREG_STAT_LCOLL) { 274 printk(KERN_ERR "%s: Late transmit collision.\n", dev->name); 275 qep->net_stats.tx_errors++; 276 qep->net_stats.collisions++; 277 mace_hwbug_workaround = 1; 278 } 279 280 if (qe_status & CREG_STAT_FUFLOW) { 281 printk(KERN_ERR "%s: Transmit fifo underflow, driver bug.\n", dev->name); 282 qep->net_stats.tx_errors++; 283 mace_hwbug_workaround = 1; 284 } 285 286 if (qe_status & CREG_STAT_JERROR) { 287 printk(KERN_ERR "%s: Jabber error.\n", dev->name); 288 } 289 290 if (qe_status & CREG_STAT_BERROR) { 291 printk(KERN_ERR "%s: Babble error.\n", dev->name); 292 } 293 294 if (qe_status & CREG_STAT_CCOFLOW) { 295 qep->net_stats.tx_errors += 256; 296 qep->net_stats.collisions += 256; 297 } 298 299 if (qe_status & CREG_STAT_TXDERROR) { 300 printk(KERN_ERR "%s: Transmit descriptor is bogus, driver bug.\n", dev->name); 301 qep->net_stats.tx_errors++; 302 qep->net_stats.tx_aborted_errors++; 303 mace_hwbug_workaround = 1; 304 } 305 306 if (qe_status & CREG_STAT_TXLERR) { 307 printk(KERN_ERR "%s: Transmit late error.\n", dev->name); 308 qep->net_stats.tx_errors++; 309 mace_hwbug_workaround = 1; 310 } 311 312 if (qe_status & CREG_STAT_TXPERR) { 313 printk(KERN_ERR "%s: Transmit DMA parity error.\n", dev->name); 314 qep->net_stats.tx_errors++; 315 qep->net_stats.tx_aborted_errors++; 316 mace_hwbug_workaround = 1; 317 } 318 319 if (qe_status & CREG_STAT_TXSERR) { 320 printk(KERN_ERR "%s: Transmit DMA sbus error ack.\n", dev->name); 321 qep->net_stats.tx_errors++; 322 qep->net_stats.tx_aborted_errors++; 323 mace_hwbug_workaround = 1; 324 } 325 326 if (qe_status & CREG_STAT_RCCOFLOW) { 327 qep->net_stats.rx_errors += 256; 328 qep->net_stats.collisions += 256; 329 } 330 331 if (qe_status & CREG_STAT_RUOFLOW) { 332 qep->net_stats.rx_errors += 256; 333 qep->net_stats.rx_over_errors += 256; 334 } 335 336 if (qe_status & CREG_STAT_MCOFLOW) { 337 qep->net_stats.rx_errors += 256; 338 qep->net_stats.rx_missed_errors += 256; 339 } 340 341 if (qe_status & CREG_STAT_RXFOFLOW) { 342 printk(KERN_ERR "%s: Receive fifo overflow.\n", dev->name); 343 qep->net_stats.rx_errors++; 344 qep->net_stats.rx_over_errors++; 345 } 346 347 if (qe_status & CREG_STAT_RLCOLL) { 348 printk(KERN_ERR "%s: Late receive collision.\n", dev->name); 349 qep->net_stats.rx_errors++; 350 qep->net_stats.collisions++; 351 } 352 353 if (qe_status & CREG_STAT_FCOFLOW) { 354 qep->net_stats.rx_errors += 256; 355 qep->net_stats.rx_frame_errors += 256; 356 } 357 358 if (qe_status & CREG_STAT_CECOFLOW) { 359 qep->net_stats.rx_errors += 256; 360 qep->net_stats.rx_crc_errors += 256; 361 } 362 363 if (qe_status & CREG_STAT_RXDROP) { 364 printk(KERN_ERR "%s: Receive packet dropped.\n", dev->name); 365 qep->net_stats.rx_errors++; 366 qep->net_stats.rx_dropped++; 367 qep->net_stats.rx_missed_errors++; 368 } 369 370 if (qe_status & CREG_STAT_RXSMALL) { 371 printk(KERN_ERR "%s: Receive buffer too small, driver bug.\n", dev->name); 372 qep->net_stats.rx_errors++; 373 qep->net_stats.rx_length_errors++; 374 } 375 376 if (qe_status & CREG_STAT_RXLERR) { 377 printk(KERN_ERR "%s: Receive late error.\n", dev->name); 378 qep->net_stats.rx_errors++; 379 mace_hwbug_workaround = 1; 380 } 381 382 if (qe_status & CREG_STAT_RXPERR) { 383 printk(KERN_ERR "%s: Receive DMA parity error.\n", dev->name); 384 qep->net_stats.rx_errors++; 385 qep->net_stats.rx_missed_errors++; 386 mace_hwbug_workaround = 1; 387 } 388 389 if (qe_status & CREG_STAT_RXSERR) { 390 printk(KERN_ERR "%s: Receive DMA sbus error ack.\n", dev->name); 391 qep->net_stats.rx_errors++; 392 qep->net_stats.rx_missed_errors++; 393 mace_hwbug_workaround = 1; 394 } 395 396 if (mace_hwbug_workaround) 397 qe_init(qep, 1); 398 return mace_hwbug_workaround; 399} 400 401/* Per-QE receive interrupt service routine. Just like on the happy meal 402 * we receive directly into skb's with a small packet copy water mark. 403 */ 404static void qe_rx(struct sunqe *qep) 405{ 406 struct qe_rxd *rxbase = &qep->qe_block->qe_rxd[0]; 407 struct qe_rxd *this; 408 struct sunqe_buffers *qbufs = qep->buffers; 409 __u32 qbufs_dvma = qep->buffers_dvma; 410 int elem = qep->rx_new, drops = 0; 411 u32 flags; 412 413 this = &rxbase[elem]; 414 while (!((flags = this->rx_flags) & RXD_OWN)) { 415 struct sk_buff *skb; 416 unsigned char *this_qbuf = 417 &qbufs->rx_buf[elem & (RX_RING_SIZE - 1)][0]; 418 __u32 this_qbuf_dvma = qbufs_dvma + 419 qebuf_offset(rx_buf, (elem & (RX_RING_SIZE - 1))); 420 struct qe_rxd *end_rxd = 421 &rxbase[(elem+RX_RING_SIZE)&(RX_RING_MAXSIZE-1)]; 422 int len = (flags & RXD_LENGTH) - 4; /* QE adds ether FCS size to len */ 423 424 /* Check for errors. */ 425 if (len < ETH_ZLEN) { 426 qep->net_stats.rx_errors++; 427 qep->net_stats.rx_length_errors++; 428 qep->net_stats.rx_dropped++; 429 } else { 430 skb = dev_alloc_skb(len + 2); 431 if (skb == NULL) { 432 drops++; 433 qep->net_stats.rx_dropped++; 434 } else { 435 skb->dev = qep->dev; 436 skb_reserve(skb, 2); 437 skb_put(skb, len); 438 eth_copy_and_sum(skb, (unsigned char *) this_qbuf, 439 len, 0); 440 skb->protocol = eth_type_trans(skb, qep->dev); 441 netif_rx(skb); 442 qep->dev->last_rx = jiffies; 443 qep->net_stats.rx_packets++; 444 qep->net_stats.rx_bytes += len; 445 } 446 } 447 end_rxd->rx_addr = this_qbuf_dvma; 448 end_rxd->rx_flags = (RXD_OWN | ((RXD_PKT_SZ) & RXD_LENGTH)); 449 450 elem = NEXT_RX(elem); 451 this = &rxbase[elem]; 452 } 453 qep->rx_new = elem; 454 if (drops) 455 printk(KERN_NOTICE "%s: Memory squeeze, deferring packet.\n", qep->dev->name); 456} 457 458static void qe_tx_reclaim(struct sunqe *qep); 459 460/* Interrupts for all QE's get filtered out via the QEC master controller, 461 * so we just run through each qe and check to see who is signaling 462 * and thus needs to be serviced. 463 */ 464static void qec_interrupt(int irq, void *dev_id, struct pt_regs *regs) 465{ 466 struct sunqec *qecp = (struct sunqec *) dev_id; 467 u32 qec_status; 468 int channel = 0; 469 470 /* Latch the status now. */ 471 qec_status = sbus_readl(qecp->gregs + GLOB_STAT); 472 while (channel < 4) { 473 if (qec_status & 0xf) { 474 struct sunqe *qep = qecp->qes[channel]; 475 u32 qe_status; 476 477 qe_status = sbus_readl(qep->qcregs + CREG_STAT); 478 if (qe_status & CREG_STAT_ERRORS) { 479 if (qe_is_bolixed(qep, qe_status)) 480 goto next; 481 } 482 if (qe_status & CREG_STAT_RXIRQ) 483 qe_rx(qep); 484 if (netif_queue_stopped(qep->dev) && 485 (qe_status & CREG_STAT_TXIRQ)) { 486 spin_lock(&qep->lock); 487 qe_tx_reclaim(qep); 488 if (TX_BUFFS_AVAIL(qep) > 0) { 489 /* Wake net queue and return to 490 * lazy tx reclaim. 491 */ 492 netif_wake_queue(qep->dev); 493 sbus_writel(1, qep->qcregs + CREG_TIMASK); 494 } 495 spin_unlock(&qep->lock); 496 } 497 next: 498 ; 499 } 500 qec_status >>= 4; 501 channel++; 502 } 503} 504 505static int qe_open(struct net_device *dev) 506{ 507 struct sunqe *qep = (struct sunqe *) dev->priv; 508 509 qep->mconfig = (MREGS_MCONFIG_TXENAB | 510 MREGS_MCONFIG_RXENAB | 511 MREGS_MCONFIG_MBAENAB); 512 return qe_init(qep, 0); 513} 514 515static int qe_close(struct net_device *dev) 516{ 517 struct sunqe *qep = (struct sunqe *) dev->priv; 518 519 qe_stop(qep); 520 return 0; 521} 522 523/* Reclaim TX'd frames from the ring. This must always run under 524 * the IRQ protected qep->lock. 525 */ 526static void qe_tx_reclaim(struct sunqe *qep) 527{ 528 struct qe_txd *txbase = &qep->qe_block->qe_txd[0]; 529 int elem = qep->tx_old; 530 531 while (elem != qep->tx_new) { 532 u32 flags = txbase[elem].tx_flags; 533 534 if (flags & TXD_OWN) 535 break; 536 elem = NEXT_TX(elem); 537 } 538 qep->tx_old = elem; 539} 540 541static void qe_tx_timeout(struct net_device *dev) 542{ 543 struct sunqe *qep = (struct sunqe *) dev->priv; 544 int tx_full; 545 546 spin_lock_irq(&qep->lock); 547 548 /* Try to reclaim, if that frees up some tx 549 * entries, we're fine. 550 */ 551 qe_tx_reclaim(qep); 552 tx_full = TX_BUFFS_AVAIL(qep) <= 0; 553 554 spin_unlock_irq(&qep->lock); 555 556 if (! tx_full) 557 goto out; 558 559 printk(KERN_ERR "%s: transmit timed out, resetting\n", dev->name); 560 qe_init(qep, 1); 561 562out: 563 netif_wake_queue(dev); 564} 565 566/* Get a packet queued to go onto the wire. */ 567static int qe_start_xmit(struct sk_buff *skb, struct net_device *dev) 568{ 569 struct sunqe *qep = (struct sunqe *) dev->priv; 570 struct sunqe_buffers *qbufs = qep->buffers; 571 __u32 txbuf_dvma, qbufs_dvma = qep->buffers_dvma; 572 unsigned char *txbuf; 573 int len, entry; 574 575 spin_lock_irq(&qep->lock); 576 577 qe_tx_reclaim(qep); 578 579 len = skb->len; 580 entry = qep->tx_new; 581 582 txbuf = &qbufs->tx_buf[entry & (TX_RING_SIZE - 1)][0]; 583 txbuf_dvma = qbufs_dvma + 584 qebuf_offset(tx_buf, (entry & (TX_RING_SIZE - 1))); 585 586 /* Avoid a race... */ 587 qep->qe_block->qe_txd[entry].tx_flags = TXD_UPDATE; 588 589 memcpy(txbuf, skb->data, len); 590 591 qep->qe_block->qe_txd[entry].tx_addr = txbuf_dvma; 592 qep->qe_block->qe_txd[entry].tx_flags = 593 (TXD_OWN | TXD_SOP | TXD_EOP | (len & TXD_LENGTH)); 594 qep->tx_new = NEXT_TX(entry); 595 596 /* Get it going. */ 597 dev->trans_start = jiffies; 598 sbus_writel(CREG_CTRL_TWAKEUP, qep->qcregs + CREG_CTRL); 599 600 qep->net_stats.tx_packets++; 601 qep->net_stats.tx_bytes += len; 602 603 if (TX_BUFFS_AVAIL(qep) <= 0) { 604 /* Halt the net queue and enable tx interrupts. 605 * When the tx queue empties the tx irq handler 606 * will wake up the queue and return us back to 607 * the lazy tx reclaim scheme. 608 */ 609 netif_stop_queue(dev); 610 sbus_writel(0, qep->qcregs + CREG_TIMASK); 611 } 612 spin_unlock_irq(&qep->lock); 613 614 dev_kfree_skb(skb); 615 616 return 0; 617} 618 619static struct net_device_stats *qe_get_stats(struct net_device *dev) 620{ 621 struct sunqe *qep = (struct sunqe *) dev->priv; 622 623 return &qep->net_stats; 624} 625 626static void qe_set_multicast(struct net_device *dev) 627{ 628 struct sunqe *qep = (struct sunqe *) dev->priv; 629 struct dev_mc_list *dmi = dev->mc_list; 630 u8 new_mconfig = qep->mconfig; 631 char *addrs; 632 int i; 633 u32 crc; 634 635 /* Lock out others. */ 636 netif_stop_queue(dev); 637 638 if ((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 64)) { 639 sbus_writeb(MREGS_IACONFIG_ACHNGE | MREGS_IACONFIG_LARESET, 640 qep->mregs + MREGS_IACONFIG); 641 while ((sbus_readb(qep->mregs + MREGS_IACONFIG) & MREGS_IACONFIG_ACHNGE) != 0) 642 barrier(); 643 for (i = 0; i < 8; i++) 644 sbus_writeb(0xff, qep->mregs + MREGS_FILTER); 645 sbus_writeb(0, qep->mregs + MREGS_IACONFIG); 646 } else if (dev->flags & IFF_PROMISC) { 647 new_mconfig |= MREGS_MCONFIG_PROMISC; 648 } else { 649 u16 hash_table[4]; 650 u8 *hbytes = (unsigned char *) &hash_table[0]; 651 652 for (i = 0; i < 4; i++) 653 hash_table[i] = 0; 654 655 for (i = 0; i < dev->mc_count; i++) { 656 addrs = dmi->dmi_addr; 657 dmi = dmi->next; 658 659 if (!(*addrs & 1)) 660 continue; 661 crc = ether_crc_le(6, addrs); 662 crc >>= 26; 663 hash_table[crc >> 4] |= 1 << (crc & 0xf); 664 } 665 /* Program the qe with the new filter value. */ 666 sbus_writeb(MREGS_IACONFIG_ACHNGE | MREGS_IACONFIG_LARESET, 667 qep->mregs + MREGS_IACONFIG); 668 while ((sbus_readb(qep->mregs + MREGS_IACONFIG) & MREGS_IACONFIG_ACHNGE) != 0) 669 barrier(); 670 for (i = 0; i < 8; i++) { 671 u8 tmp = *hbytes++; 672 sbus_writeb(tmp, qep->mregs + MREGS_FILTER); 673 } 674 sbus_writeb(0, qep->mregs + MREGS_IACONFIG); 675 } 676 677 /* Any change of the logical address filter, the physical address, 678 * or enabling/disabling promiscuous mode causes the MACE to disable 679 * the receiver. So we must re-enable them here or else the MACE 680 * refuses to listen to anything on the network. Sheesh, took 681 * me a day or two to find this bug. 682 */ 683 qep->mconfig = new_mconfig; 684 sbus_writeb(qep->mconfig, qep->mregs + MREGS_MCONFIG); 685 686 /* Let us get going again. */ 687 netif_wake_queue(dev); 688} 689 690/* This is only called once at boot time for each card probed. */ 691static inline void qec_init_once(struct sunqec *qecp, struct sbus_dev *qsdev) 692{ 693 u8 bsizes = qecp->qec_bursts; 694 695 if (sbus_can_burst64(qsdev) && (bsizes & DMA_BURST64)) { 696 sbus_writel(GLOB_CTRL_B64, qecp->gregs + GLOB_CTRL); 697 } else if (bsizes & DMA_BURST32) { 698 sbus_writel(GLOB_CTRL_B32, qecp->gregs + GLOB_CTRL); 699 } else { 700 sbus_writel(GLOB_CTRL_B16, qecp->gregs + GLOB_CTRL); 701 } 702 703 /* Packetsize only used in 100baseT BigMAC configurations, 704 * set it to zero just to be on the safe side. 705 */ 706 sbus_writel(GLOB_PSIZE_2048, qecp->gregs + GLOB_PSIZE); 707 708 /* Set the local memsize register, divided up to one piece per QE channel. */ 709 sbus_writel((qsdev->reg_addrs[1].reg_size >> 2), 710 qecp->gregs + GLOB_MSIZE); 711 712 /* Divide up the local QEC memory amongst the 4 QE receiver and 713 * transmitter FIFOs. Basically it is (total / 2 / num_channels). 714 */ 715 sbus_writel((qsdev->reg_addrs[1].reg_size >> 2) >> 1, 716 qecp->gregs + GLOB_TSIZE); 717 sbus_writel((qsdev->reg_addrs[1].reg_size >> 2) >> 1, 718 qecp->gregs + GLOB_RSIZE); 719} 720 721/* Four QE's per QEC card. */ 722static int __init qec_ether_init(struct net_device *dev, struct sbus_dev *sdev) 723{ 724 static unsigned version_printed; 725 struct net_device *qe_devs[4]; 726 struct sunqe *qeps[4]; 727 struct sbus_dev *qesdevs[4]; 728 struct sunqec *qecp = NULL; 729 u8 bsizes, bsizes_more; 730 int i, j, res = ENOMEM; 731 732 dev = init_etherdev(0, sizeof(struct sunqe)); 733 qe_devs[0] = dev; 734 qeps[0] = (struct sunqe *) dev->priv; 735 qeps[0]->channel = 0; 736 spin_lock_init(&qeps[0]->lock); 737 for (j = 0; j < 6; j++) 738 qe_devs[0]->dev_addr[j] = idprom->id_ethaddr[j]; 739 740 if (version_printed++ == 0) 741 printk(KERN_INFO "%s", version); 742 743 qe_devs[1] = qe_devs[2] = qe_devs[3] = NULL; 744 for (i = 1; i < 4; i++) { 745 qe_devs[i] = init_etherdev(0, sizeof(struct sunqe)); 746 if (qe_devs[i] == NULL || qe_devs[i]->priv == NULL) 747 goto qec_free_devs; 748 qeps[i] = (struct sunqe *) qe_devs[i]->priv; 749 for (j = 0; j < 6; j++) 750 qe_devs[i]->dev_addr[j] = idprom->id_ethaddr[j]; 751 qeps[i]->channel = i; 752 } 753 qecp = kmalloc(sizeof(struct sunqec), GFP_KERNEL); 754 if (qecp == NULL) 755 goto qec_free_devs; 756 qecp->qec_sdev = sdev; 757 758 for (i = 0; i < 4; i++) { 759 qecp->qes[i] = qeps[i]; 760 qeps[i]->dev = qe_devs[i]; 761 qeps[i]->parent = qecp; 762 } 763 764 /* Link in channel 0. */ 765 i = prom_getintdefault(sdev->child->prom_node, "channel#", -1); 766 if (i == -1) { res=ENODEV; goto qec_free_devs; } 767 qesdevs[i] = sdev->child; 768 769 /* Link in channel 1. */ 770 i = prom_getintdefault(sdev->child->next->prom_node, "channel#", -1); 771 if (i == -1) { res=ENODEV; goto qec_free_devs; } 772 qesdevs[i] = sdev->child->next; 773 774 /* Link in channel 2. */ 775 i = prom_getintdefault(sdev->child->next->next->prom_node, "channel#", -1); 776 if (i == -1) { res=ENODEV; goto qec_free_devs; } 777 qesdevs[i] = sdev->child->next->next; 778 779 /* Link in channel 3. */ 780 i = prom_getintdefault(sdev->child->next->next->next->prom_node, "channel#", -1); 781 if (i == -1) { res=ENODEV; goto qec_free_devs; } 782 qesdevs[i] = sdev->child->next->next->next; 783 784 for (i = 0; i < 4; i++) 785 qeps[i]->qe_sdev = qesdevs[i]; 786 787 /* Now map in the registers, QEC globals first. */ 788 qecp->gregs = sbus_ioremap(&sdev->resource[0], 0, 789 GLOB_REG_SIZE, "QEC Global Registers"); 790 if (!qecp->gregs) { 791 printk(KERN_ERR "QuadEther: Cannot map QEC global registers.\n"); 792 res = ENODEV; 793 goto qec_free_devs; 794 } 795 796 /* Make sure the QEC is in MACE mode. */ 797 if ((sbus_readl(qecp->gregs + GLOB_CTRL) & 0xf0000000) != GLOB_CTRL_MMODE) { 798 printk(KERN_ERR "QuadEther: AIEEE, QEC is not in MACE mode!\n"); 799 res = ENODEV; 800 goto qec_free_devs; 801 } 802 803 /* Reset the QEC. */ 804 if (qec_global_reset(qecp->gregs)) { 805 res = ENODEV; 806 goto qec_free_devs; 807 } 808 809 /* Find and set the burst sizes for the QEC, since it does 810 * the actual dma for all 4 channels. 811 */ 812 bsizes = prom_getintdefault(sdev->prom_node, "burst-sizes", 0xff); 813 bsizes &= 0xff; 814 bsizes_more = prom_getintdefault(sdev->bus->prom_node, "burst-sizes", 0xff); 815 816 if (bsizes_more != 0xff) 817 bsizes &= bsizes_more; 818 if (bsizes == 0xff || (bsizes & DMA_BURST16) == 0 || 819 (bsizes & DMA_BURST32)==0) 820 bsizes = (DMA_BURST32 - 1); 821 822 qecp->qec_bursts = bsizes; 823 824 /* Perform one time QEC initialization, we never touch the QEC 825 * globals again after this. 826 */ 827 qec_init_once(qecp, sdev); 828 829 for (i = 0; i < 4; i++) { 830 /* Map in QEC per-channel control registers. */ 831 qeps[i]->qcregs = sbus_ioremap(&qesdevs[i]->resource[0], 0, 832 CREG_REG_SIZE, "QEC Channel Registers"); 833 if (!qeps[i]->qcregs) { 834 printk(KERN_ERR "QuadEther: Cannot map QE %d's channel registers.\n", i); 835 res = ENODEV; 836 goto qec_free_devs; 837 } 838 839 /* Map in per-channel AMD MACE registers. */ 840 qeps[i]->mregs = sbus_ioremap(&qesdevs[i]->resource[1], 0, 841 MREGS_REG_SIZE, "QE MACE Registers"); 842 if (!qeps[i]->mregs) { 843 printk(KERN_ERR "QuadEther: Cannot map QE %d's MACE registers.\n", i); 844 res = ENODEV; 845 goto qec_free_devs; 846 } 847 848 qeps[i]->qe_block = sbus_alloc_consistent(qesdevs[i], 849 PAGE_SIZE, 850 &qeps[i]->qblock_dvma); 851 qeps[i]->buffers = sbus_alloc_consistent(qesdevs[i], 852 sizeof(struct sunqe_buffers), 853 &qeps[i]->buffers_dvma); 854 if (qeps[i]->qe_block == NULL || 855 qeps[i]->qblock_dvma == 0 || 856 qeps[i]->buffers == NULL || 857 qeps[i]->buffers_dvma == 0) { 858 res = ENODEV; 859 goto qec_free_devs; 860 } 861 862 /* Stop this QE. */ 863 qe_stop(qeps[i]); 864 } 865 866 for (i = 0; i < 4; i++) { 867 SET_MODULE_OWNER(qe_devs[i]); 868 qe_devs[i]->open = qe_open; 869 qe_devs[i]->stop = qe_close; 870 qe_devs[i]->hard_start_xmit = qe_start_xmit; 871 qe_devs[i]->get_stats = qe_get_stats; 872 qe_devs[i]->set_multicast_list = qe_set_multicast; 873 qe_devs[i]->tx_timeout = qe_tx_timeout; 874 qe_devs[i]->watchdog_timeo = 5*HZ; 875 qe_devs[i]->irq = sdev->irqs[0]; 876 qe_devs[i]->dma = 0; 877 ether_setup(qe_devs[i]); 878 } 879 880 /* QEC receives interrupts from each QE, then it sends the actual 881 * IRQ to the cpu itself. Since QEC is the single point of 882 * interrupt for all QE channels we register the IRQ handler 883 * for it now. 884 */ 885 if (request_irq(sdev->irqs[0], &qec_interrupt, 886 SA_SHIRQ, "QuadEther", (void *) qecp)) { 887 printk(KERN_ERR "QuadEther: Can't register QEC master irq handler.\n"); 888 res = EAGAIN; 889 goto qec_free_devs; 890 } 891 892 /* Report the QE channels. */ 893 for (i = 0; i < 4; i++) { 894 printk(KERN_INFO "%s: QuadEthernet channel[%d] ", qe_devs[i]->name, i); 895 for (j = 0; j < 6; j++) 896 printk ("%2.2x%c", 897 qe_devs[i]->dev_addr[j], 898 j == 5 ? ' ': ':'); 899 printk("\n"); 900 } 901 902 /* We are home free at this point, link the qe's into 903 * the master list for later driver exit. 904 */ 905 for (i = 0; i < 4; i++) 906 qe_devs[i]->ifindex = dev_new_index(); 907 qecp->next_module = root_qec_dev; 908 root_qec_dev = qecp; 909 910 return 0; 911 912qec_free_devs: 913 for (i = 0; i < 4; i++) { 914 if (qe_devs[i] != NULL) { 915 if (qe_devs[i]->priv) { 916 struct sunqe *qe = (struct sunqe *)qe_devs[i]->priv; 917 918 if (qe->qcregs) 919 sbus_iounmap(qe->qcregs, CREG_REG_SIZE); 920 if (qe->mregs) 921 sbus_iounmap(qe->mregs, MREGS_REG_SIZE); 922 if (qe->qe_block != NULL) 923 sbus_free_consistent(qe->qe_sdev, 924 PAGE_SIZE, 925 qe->qe_block, 926 qe->qblock_dvma); 927 if (qe->buffers != NULL) 928 sbus_free_consistent(qe->qe_sdev, 929 sizeof(struct sunqe_buffers), 930 qe->buffers, 931 qe->buffers_dvma); 932 kfree(qe_devs[i]->priv); 933 } 934 kfree(qe_devs[i]); 935 } 936 } 937 if (qecp != NULL) { 938 if (qecp->gregs) 939 sbus_iounmap(qecp->gregs, GLOB_REG_SIZE); 940 kfree(qecp); 941 } 942 return res; 943} 944 945static int __init qec_match(struct sbus_dev *sdev) 946{ 947 struct sbus_dev *sibling; 948 int i; 949 950 if (strcmp(sdev->prom_name, "qec") != 0) 951 return 0; 952 953 /* QEC can be parent of either QuadEthernet or BigMAC 954 * children. Do not confuse this with qfe/SUNW,qfe 955 * which is a quad-happymeal card and handled by 956 * a different driver. 957 */ 958 sibling = sdev->child; 959 for (i = 0; i < 4; i++) { 960 if (sibling == NULL) 961 return 0; 962 if (strcmp(sibling->prom_name, "qe") != 0) 963 return 0; 964 sibling = sibling->next; 965 } 966 return 1; 967} 968 969static int __init qec_probe(void) 970{ 971 struct net_device *dev = NULL; 972 struct sbus_bus *bus; 973 struct sbus_dev *sdev = 0; 974 static int called; 975 int cards = 0, v; 976 977 root_qec_dev = NULL; 978 979 if (called) 980 return -ENODEV; 981 called++; 982 983 for_each_sbus(bus) { 984 for_each_sbusdev(sdev, bus) { 985 if (cards) 986 dev = NULL; 987 988 if (qec_match(sdev)) { 989 cards++; 990 if ((v = qec_ether_init(dev, sdev))) 991 return v; 992 } 993 } 994 } 995 if (!cards) 996 return -ENODEV; 997 return 0; 998} 999 1000static void __exit qec_cleanup(void) 1001{ 1002 struct sunqec *next_qec; 1003 int i; 1004 1005 while (root_qec_dev) { 1006 next_qec = root_qec_dev->next_module; 1007 1008 /* Release all four QE channels, then the QEC itself. */ 1009 for (i = 0; i < 4; i++) { 1010 unregister_netdev(root_qec_dev->qes[i]->dev); 1011 sbus_iounmap(root_qec_dev->qes[i]->qcregs, CREG_REG_SIZE); 1012 sbus_iounmap(root_qec_dev->qes[i]->mregs, MREGS_REG_SIZE); 1013 sbus_free_consistent(root_qec_dev->qes[i]->qe_sdev, 1014 PAGE_SIZE, 1015 root_qec_dev->qes[i]->qe_block, 1016 root_qec_dev->qes[i]->qblock_dvma); 1017 sbus_free_consistent(root_qec_dev->qes[i]->qe_sdev, 1018 sizeof(struct sunqe_buffers), 1019 root_qec_dev->qes[i]->buffers, 1020 root_qec_dev->qes[i]->buffers_dvma); 1021 kfree(root_qec_dev->qes[i]->dev); 1022 } 1023 free_irq(root_qec_dev->qec_sdev->irqs[0], (void *)root_qec_dev); 1024 sbus_iounmap(root_qec_dev->gregs, GLOB_REG_SIZE); 1025 kfree(root_qec_dev); 1026 root_qec_dev = next_qec; 1027 } 1028} 1029 1030module_init(qec_probe); 1031module_exit(qec_cleanup); 1032MODULE_LICENSE("GPL"); 1033