1/* 2 * drivers/net/gianfar.c 3 * 4 * Gianfar Ethernet Driver 5 * This driver is designed for the non-CPM ethernet controllers 6 * on the 85xx and 83xx family of integrated processors 7 * Based on 8260_io/fcc_enet.c 8 * 9 * Author: Andy Fleming 10 * Maintainer: Kumar Gala 11 * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com> 12 * 13 * Copyright 2002-2009 Freescale Semiconductor, Inc. 14 * Copyright 2007 MontaVista Software, Inc. 15 * 16 * This program is free software; you can redistribute it and/or modify it 17 * under the terms of the GNU General Public License as published by the 18 * Free Software Foundation; either version 2 of the License, or (at your 19 * option) any later version. 20 * 21 * Gianfar: AKA Lambda Draconis, "Dragon" 22 * RA 11 31 24.2 23 * Dec +69 19 52 24 * V 3.84 25 * B-V +1.62 26 * 27 * Theory of operation 28 * 29 * The driver is initialized through of_device. Configuration information 30 * is therefore conveyed through an OF-style device tree. 31 * 32 * The Gianfar Ethernet Controller uses a ring of buffer 33 * descriptors. The beginning is indicated by a register 34 * pointing to the physical address of the start of the ring. 35 * The end is determined by a "wrap" bit being set in the 36 * last descriptor of the ring. 37 * 38 * When a packet is received, the RXF bit in the 39 * IEVENT register is set, triggering an interrupt when the 40 * corresponding bit in the IMASK register is also set (if 41 * interrupt coalescing is active, then the interrupt may not 42 * happen immediately, but will wait until either a set number 43 * of frames or amount of time have passed). In NAPI, the 44 * interrupt handler will signal there is work to be done, and 45 * exit. This method will start at the last known empty 46 * descriptor, and process every subsequent descriptor until there 47 * are none left with data (NAPI will stop after a set number of 48 * packets to give time to other tasks, but will eventually 49 * process all the packets). The data arrives inside a 50 * pre-allocated skb, and so after the skb is passed up to the 51 * stack, a new skb must be allocated, and the address field in 52 * the buffer descriptor must be updated to indicate this new 53 * skb. 54 * 55 * When the kernel requests that a packet be transmitted, the 56 * driver starts where it left off last time, and points the 57 * descriptor at the buffer which was passed in. The driver 58 * then informs the DMA engine that there are packets ready to 59 * be transmitted. Once the controller is finished transmitting 60 * the packet, an interrupt may be triggered (under the same 61 * conditions as for reception, but depending on the TXF bit). 62 * The driver then cleans up the buffer. 63 */ 64 65#include <linux/kernel.h> 66#include <linux/string.h> 67#include <linux/errno.h> 68#include <linux/unistd.h> 69#include <linux/slab.h> 70#include <linux/interrupt.h> 71#include <linux/init.h> 72#include <linux/delay.h> 73#include <linux/netdevice.h> 74#include <linux/etherdevice.h> 75#include <linux/skbuff.h> 76#include <linux/if_vlan.h> 77#include <linux/spinlock.h> 78#include <linux/mm.h> 79#include <linux/of_mdio.h> 80#include <linux/of_platform.h> 81#include <linux/ip.h> 82#include <linux/tcp.h> 83#include <linux/udp.h> 84#include <linux/in.h> 85#include <linux/net_tstamp.h> 86 87#include <asm/io.h> 88#include <asm/reg.h> 89#include <asm/irq.h> 90#include <asm/uaccess.h> 91#include <linux/module.h> 92#include <linux/dma-mapping.h> 93#include <linux/crc32.h> 94#include <linux/mii.h> 95#include <linux/phy.h> 96#include <linux/phy_fixed.h> 97#include <linux/of.h> 98 99#include "gianfar.h" 100#include "fsl_pq_mdio.h" 101 102#define TX_TIMEOUT (1*HZ) 103#undef BRIEF_GFAR_ERRORS 104#undef VERBOSE_GFAR_ERRORS 105 106const char gfar_driver_name[] = "Gianfar Ethernet"; 107const char gfar_driver_version[] = "1.3"; 108 109static int gfar_enet_open(struct net_device *dev); 110static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev); 111static void gfar_reset_task(struct work_struct *work); 112static void gfar_timeout(struct net_device *dev); 113static int gfar_close(struct net_device *dev); 114struct sk_buff *gfar_new_skb(struct net_device *dev); 115static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp, 116 struct sk_buff *skb); 117static int gfar_set_mac_address(struct net_device *dev); 118static int gfar_change_mtu(struct net_device *dev, int new_mtu); 119static irqreturn_t gfar_error(int irq, void *dev_id); 120static irqreturn_t gfar_transmit(int irq, void *dev_id); 121static irqreturn_t gfar_interrupt(int irq, void *dev_id); 122static void adjust_link(struct net_device *dev); 123static void init_registers(struct net_device *dev); 124static int init_phy(struct net_device *dev); 125static int gfar_probe(struct platform_device *ofdev, 126 const struct of_device_id *match); 127static int gfar_remove(struct platform_device *ofdev); 128static void free_skb_resources(struct gfar_private *priv); 129static void gfar_set_multi(struct net_device *dev); 130static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr); 131static void gfar_configure_serdes(struct net_device *dev); 132static int gfar_poll(struct napi_struct *napi, int budget); 133#ifdef CONFIG_NET_POLL_CONTROLLER 134static void gfar_netpoll(struct net_device *dev); 135#endif 136int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit); 137static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue); 138static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb, 139 int amount_pull); 140static void gfar_vlan_rx_register(struct net_device *netdev, 141 struct vlan_group *grp); 142void gfar_halt(struct net_device *dev); 143static void gfar_halt_nodisable(struct net_device *dev); 144void gfar_start(struct net_device *dev); 145static void gfar_clear_exact_match(struct net_device *dev); 146static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr); 147static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); 148 149MODULE_AUTHOR("Freescale Semiconductor, Inc"); 150MODULE_DESCRIPTION("Gianfar Ethernet Driver"); 151MODULE_LICENSE("GPL"); 152 153static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp, 154 dma_addr_t buf) 155{ 156 u32 lstatus; 157 158 bdp->bufPtr = buf; 159 160 lstatus = BD_LFLAG(RXBD_EMPTY | RXBD_INTERRUPT); 161 if (bdp == rx_queue->rx_bd_base + rx_queue->rx_ring_size - 1) 162 lstatus |= BD_LFLAG(RXBD_WRAP); 163 164 eieio(); 165 166 bdp->lstatus = lstatus; 167} 168 169static int gfar_init_bds(struct net_device *ndev) 170{ 171 struct gfar_private *priv = netdev_priv(ndev); 172 struct gfar_priv_tx_q *tx_queue = NULL; 173 struct gfar_priv_rx_q *rx_queue = NULL; 174 struct txbd8 *txbdp; 175 struct rxbd8 *rxbdp; 176 int i, j; 177 178 for (i = 0; i < priv->num_tx_queues; i++) { 179 tx_queue = priv->tx_queue[i]; 180 /* Initialize some variables in our dev structure */ 181 tx_queue->num_txbdfree = tx_queue->tx_ring_size; 182 tx_queue->dirty_tx = tx_queue->tx_bd_base; 183 tx_queue->cur_tx = tx_queue->tx_bd_base; 184 tx_queue->skb_curtx = 0; 185 tx_queue->skb_dirtytx = 0; 186 187 /* Initialize Transmit Descriptor Ring */ 188 txbdp = tx_queue->tx_bd_base; 189 for (j = 0; j < tx_queue->tx_ring_size; j++) { 190 txbdp->lstatus = 0; 191 txbdp->bufPtr = 0; 192 txbdp++; 193 } 194 195 /* Set the last descriptor in the ring to indicate wrap */ 196 txbdp--; 197 txbdp->status |= TXBD_WRAP; 198 } 199 200 for (i = 0; i < priv->num_rx_queues; i++) { 201 rx_queue = priv->rx_queue[i]; 202 rx_queue->cur_rx = rx_queue->rx_bd_base; 203 rx_queue->skb_currx = 0; 204 rxbdp = rx_queue->rx_bd_base; 205 206 for (j = 0; j < rx_queue->rx_ring_size; j++) { 207 struct sk_buff *skb = rx_queue->rx_skbuff[j]; 208 209 if (skb) { 210 gfar_init_rxbdp(rx_queue, rxbdp, 211 rxbdp->bufPtr); 212 } else { 213 skb = gfar_new_skb(ndev); 214 if (!skb) { 215 pr_err("%s: Can't allocate RX buffers\n", 216 ndev->name); 217 goto err_rxalloc_fail; 218 } 219 rx_queue->rx_skbuff[j] = skb; 220 221 gfar_new_rxbdp(rx_queue, rxbdp, skb); 222 } 223 224 rxbdp++; 225 } 226 227 } 228 229 return 0; 230 231err_rxalloc_fail: 232 free_skb_resources(priv); 233 return -ENOMEM; 234} 235 236static int gfar_alloc_skb_resources(struct net_device *ndev) 237{ 238 void *vaddr; 239 dma_addr_t addr; 240 int i, j, k; 241 struct gfar_private *priv = netdev_priv(ndev); 242 struct device *dev = &priv->ofdev->dev; 243 struct gfar_priv_tx_q *tx_queue = NULL; 244 struct gfar_priv_rx_q *rx_queue = NULL; 245 246 priv->total_tx_ring_size = 0; 247 for (i = 0; i < priv->num_tx_queues; i++) 248 priv->total_tx_ring_size += priv->tx_queue[i]->tx_ring_size; 249 250 priv->total_rx_ring_size = 0; 251 for (i = 0; i < priv->num_rx_queues; i++) 252 priv->total_rx_ring_size += priv->rx_queue[i]->rx_ring_size; 253 254 /* Allocate memory for the buffer descriptors */ 255 vaddr = dma_alloc_coherent(dev, 256 sizeof(struct txbd8) * priv->total_tx_ring_size + 257 sizeof(struct rxbd8) * priv->total_rx_ring_size, 258 &addr, GFP_KERNEL); 259 if (!vaddr) { 260 if (netif_msg_ifup(priv)) 261 pr_err("%s: Could not allocate buffer descriptors!\n", 262 ndev->name); 263 return -ENOMEM; 264 } 265 266 for (i = 0; i < priv->num_tx_queues; i++) { 267 tx_queue = priv->tx_queue[i]; 268 tx_queue->tx_bd_base = (struct txbd8 *) vaddr; 269 tx_queue->tx_bd_dma_base = addr; 270 tx_queue->dev = ndev; 271 /* enet DMA only understands physical addresses */ 272 addr += sizeof(struct txbd8) *tx_queue->tx_ring_size; 273 vaddr += sizeof(struct txbd8) *tx_queue->tx_ring_size; 274 } 275 276 /* Start the rx descriptor ring where the tx ring leaves off */ 277 for (i = 0; i < priv->num_rx_queues; i++) { 278 rx_queue = priv->rx_queue[i]; 279 rx_queue->rx_bd_base = (struct rxbd8 *) vaddr; 280 rx_queue->rx_bd_dma_base = addr; 281 rx_queue->dev = ndev; 282 addr += sizeof (struct rxbd8) * rx_queue->rx_ring_size; 283 vaddr += sizeof (struct rxbd8) * rx_queue->rx_ring_size; 284 } 285 286 /* Setup the skbuff rings */ 287 for (i = 0; i < priv->num_tx_queues; i++) { 288 tx_queue = priv->tx_queue[i]; 289 tx_queue->tx_skbuff = kmalloc(sizeof(*tx_queue->tx_skbuff) * 290 tx_queue->tx_ring_size, GFP_KERNEL); 291 if (!tx_queue->tx_skbuff) { 292 if (netif_msg_ifup(priv)) 293 pr_err("%s: Could not allocate tx_skbuff\n", 294 ndev->name); 295 goto cleanup; 296 } 297 298 for (k = 0; k < tx_queue->tx_ring_size; k++) 299 tx_queue->tx_skbuff[k] = NULL; 300 } 301 302 for (i = 0; i < priv->num_rx_queues; i++) { 303 rx_queue = priv->rx_queue[i]; 304 rx_queue->rx_skbuff = kmalloc(sizeof(*rx_queue->rx_skbuff) * 305 rx_queue->rx_ring_size, GFP_KERNEL); 306 307 if (!rx_queue->rx_skbuff) { 308 if (netif_msg_ifup(priv)) 309 pr_err("%s: Could not allocate rx_skbuff\n", 310 ndev->name); 311 goto cleanup; 312 } 313 314 for (j = 0; j < rx_queue->rx_ring_size; j++) 315 rx_queue->rx_skbuff[j] = NULL; 316 } 317 318 if (gfar_init_bds(ndev)) 319 goto cleanup; 320 321 return 0; 322 323cleanup: 324 free_skb_resources(priv); 325 return -ENOMEM; 326} 327 328static void gfar_init_tx_rx_base(struct gfar_private *priv) 329{ 330 struct gfar __iomem *regs = priv->gfargrp[0].regs; 331 u32 __iomem *baddr; 332 int i; 333 334 baddr = ®s->tbase0; 335 for(i = 0; i < priv->num_tx_queues; i++) { 336 gfar_write(baddr, priv->tx_queue[i]->tx_bd_dma_base); 337 baddr += 2; 338 } 339 340 baddr = ®s->rbase0; 341 for(i = 0; i < priv->num_rx_queues; i++) { 342 gfar_write(baddr, priv->rx_queue[i]->rx_bd_dma_base); 343 baddr += 2; 344 } 345} 346 347static void gfar_init_mac(struct net_device *ndev) 348{ 349 struct gfar_private *priv = netdev_priv(ndev); 350 struct gfar __iomem *regs = priv->gfargrp[0].regs; 351 u32 rctrl = 0; 352 u32 tctrl = 0; 353 u32 attrs = 0; 354 355 /* write the tx/rx base registers */ 356 gfar_init_tx_rx_base(priv); 357 358 /* Configure the coalescing support */ 359 gfar_configure_coalescing(priv, 0xFF, 0xFF); 360 361 if (priv->rx_filer_enable) { 362 rctrl |= RCTRL_FILREN; 363 /* Program the RIR0 reg with the required distribution */ 364 gfar_write(®s->rir0, DEFAULT_RIR0); 365 } 366 367 if (priv->rx_csum_enable) 368 rctrl |= RCTRL_CHECKSUMMING; 369 370 if (priv->extended_hash) { 371 rctrl |= RCTRL_EXTHASH; 372 373 gfar_clear_exact_match(ndev); 374 rctrl |= RCTRL_EMEN; 375 } 376 377 if (priv->padding) { 378 rctrl &= ~RCTRL_PAL_MASK; 379 rctrl |= RCTRL_PADDING(priv->padding); 380 } 381 382 /* Insert receive time stamps into padding alignment bytes */ 383 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER) { 384 rctrl &= ~RCTRL_PAL_MASK; 385 rctrl |= RCTRL_PADDING(8); 386 priv->padding = 8; 387 } 388 389 /* Enable HW time stamping if requested from user space */ 390 if (priv->hwts_rx_en) 391 rctrl |= RCTRL_PRSDEP_INIT | RCTRL_TS_ENABLE; 392 393 /* keep vlan related bits if it's enabled */ 394 if (priv->vlgrp) { 395 rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT; 396 tctrl |= TCTRL_VLINS; 397 } 398 399 /* Init rctrl based on our settings */ 400 gfar_write(®s->rctrl, rctrl); 401 402 if (ndev->features & NETIF_F_IP_CSUM) 403 tctrl |= TCTRL_INIT_CSUM; 404 405 tctrl |= TCTRL_TXSCHED_PRIO; 406 407 gfar_write(®s->tctrl, tctrl); 408 409 /* Set the extraction length and index */ 410 attrs = ATTRELI_EL(priv->rx_stash_size) | 411 ATTRELI_EI(priv->rx_stash_index); 412 413 gfar_write(®s->attreli, attrs); 414 415 /* Start with defaults, and add stashing or locking 416 * depending on the approprate variables */ 417 attrs = ATTR_INIT_SETTINGS; 418 419 if (priv->bd_stash_en) 420 attrs |= ATTR_BDSTASH; 421 422 if (priv->rx_stash_size != 0) 423 attrs |= ATTR_BUFSTASH; 424 425 gfar_write(®s->attr, attrs); 426 427 gfar_write(®s->fifo_tx_thr, priv->fifo_threshold); 428 gfar_write(®s->fifo_tx_starve, priv->fifo_starve); 429 gfar_write(®s->fifo_tx_starve_shutoff, priv->fifo_starve_off); 430} 431 432static struct net_device_stats *gfar_get_stats(struct net_device *dev) 433{ 434 struct gfar_private *priv = netdev_priv(dev); 435 struct netdev_queue *txq; 436 unsigned long rx_packets = 0, rx_bytes = 0, rx_dropped = 0; 437 unsigned long tx_packets = 0, tx_bytes = 0; 438 int i = 0; 439 440 for (i = 0; i < priv->num_rx_queues; i++) { 441 rx_packets += priv->rx_queue[i]->stats.rx_packets; 442 rx_bytes += priv->rx_queue[i]->stats.rx_bytes; 443 rx_dropped += priv->rx_queue[i]->stats.rx_dropped; 444 } 445 446 dev->stats.rx_packets = rx_packets; 447 dev->stats.rx_bytes = rx_bytes; 448 dev->stats.rx_dropped = rx_dropped; 449 450 for (i = 0; i < priv->num_tx_queues; i++) { 451 txq = netdev_get_tx_queue(dev, i); 452 tx_bytes += txq->tx_bytes; 453 tx_packets += txq->tx_packets; 454 } 455 456 dev->stats.tx_bytes = tx_bytes; 457 dev->stats.tx_packets = tx_packets; 458 459 return &dev->stats; 460} 461 462static const struct net_device_ops gfar_netdev_ops = { 463 .ndo_open = gfar_enet_open, 464 .ndo_start_xmit = gfar_start_xmit, 465 .ndo_stop = gfar_close, 466 .ndo_change_mtu = gfar_change_mtu, 467 .ndo_set_multicast_list = gfar_set_multi, 468 .ndo_tx_timeout = gfar_timeout, 469 .ndo_do_ioctl = gfar_ioctl, 470 .ndo_get_stats = gfar_get_stats, 471 .ndo_vlan_rx_register = gfar_vlan_rx_register, 472 .ndo_set_mac_address = eth_mac_addr, 473 .ndo_validate_addr = eth_validate_addr, 474#ifdef CONFIG_NET_POLL_CONTROLLER 475 .ndo_poll_controller = gfar_netpoll, 476#endif 477}; 478 479unsigned int ftp_rqfpr[MAX_FILER_IDX + 1]; 480unsigned int ftp_rqfcr[MAX_FILER_IDX + 1]; 481 482void lock_rx_qs(struct gfar_private *priv) 483{ 484 int i = 0x0; 485 486 for (i = 0; i < priv->num_rx_queues; i++) 487 spin_lock(&priv->rx_queue[i]->rxlock); 488} 489 490void lock_tx_qs(struct gfar_private *priv) 491{ 492 int i = 0x0; 493 494 for (i = 0; i < priv->num_tx_queues; i++) 495 spin_lock(&priv->tx_queue[i]->txlock); 496} 497 498void unlock_rx_qs(struct gfar_private *priv) 499{ 500 int i = 0x0; 501 502 for (i = 0; i < priv->num_rx_queues; i++) 503 spin_unlock(&priv->rx_queue[i]->rxlock); 504} 505 506void unlock_tx_qs(struct gfar_private *priv) 507{ 508 int i = 0x0; 509 510 for (i = 0; i < priv->num_tx_queues; i++) 511 spin_unlock(&priv->tx_queue[i]->txlock); 512} 513 514/* Returns 1 if incoming frames use an FCB */ 515static inline int gfar_uses_fcb(struct gfar_private *priv) 516{ 517 return priv->vlgrp || priv->rx_csum_enable || 518 (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER); 519} 520 521static void free_tx_pointers(struct gfar_private *priv) 522{ 523 int i = 0; 524 525 for (i = 0; i < priv->num_tx_queues; i++) 526 kfree(priv->tx_queue[i]); 527} 528 529static void free_rx_pointers(struct gfar_private *priv) 530{ 531 int i = 0; 532 533 for (i = 0; i < priv->num_rx_queues; i++) 534 kfree(priv->rx_queue[i]); 535} 536 537static void unmap_group_regs(struct gfar_private *priv) 538{ 539 int i = 0; 540 541 for (i = 0; i < MAXGROUPS; i++) 542 if (priv->gfargrp[i].regs) 543 iounmap(priv->gfargrp[i].regs); 544} 545 546static void disable_napi(struct gfar_private *priv) 547{ 548 int i = 0; 549 550 for (i = 0; i < priv->num_grps; i++) 551 napi_disable(&priv->gfargrp[i].napi); 552} 553 554static void enable_napi(struct gfar_private *priv) 555{ 556 int i = 0; 557 558 for (i = 0; i < priv->num_grps; i++) 559 napi_enable(&priv->gfargrp[i].napi); 560} 561 562static int gfar_parse_group(struct device_node *np, 563 struct gfar_private *priv, const char *model) 564{ 565 u32 *queue_mask; 566 567 priv->gfargrp[priv->num_grps].regs = of_iomap(np, 0); 568 if (!priv->gfargrp[priv->num_grps].regs) 569 return -ENOMEM; 570 571 priv->gfargrp[priv->num_grps].interruptTransmit = 572 irq_of_parse_and_map(np, 0); 573 574 /* If we aren't the FEC we have multiple interrupts */ 575 if (model && strcasecmp(model, "FEC")) { 576 priv->gfargrp[priv->num_grps].interruptReceive = 577 irq_of_parse_and_map(np, 1); 578 priv->gfargrp[priv->num_grps].interruptError = 579 irq_of_parse_and_map(np,2); 580 if (priv->gfargrp[priv->num_grps].interruptTransmit < 0 || 581 priv->gfargrp[priv->num_grps].interruptReceive < 0 || 582 priv->gfargrp[priv->num_grps].interruptError < 0) { 583 return -EINVAL; 584 } 585 } 586 587 priv->gfargrp[priv->num_grps].grp_id = priv->num_grps; 588 priv->gfargrp[priv->num_grps].priv = priv; 589 spin_lock_init(&priv->gfargrp[priv->num_grps].grplock); 590 if(priv->mode == MQ_MG_MODE) { 591 queue_mask = (u32 *)of_get_property(np, 592 "fsl,rx-bit-map", NULL); 593 priv->gfargrp[priv->num_grps].rx_bit_map = 594 queue_mask ? *queue_mask :(DEFAULT_MAPPING >> priv->num_grps); 595 queue_mask = (u32 *)of_get_property(np, 596 "fsl,tx-bit-map", NULL); 597 priv->gfargrp[priv->num_grps].tx_bit_map = 598 queue_mask ? *queue_mask : (DEFAULT_MAPPING >> priv->num_grps); 599 } else { 600 priv->gfargrp[priv->num_grps].rx_bit_map = 0xFF; 601 priv->gfargrp[priv->num_grps].tx_bit_map = 0xFF; 602 } 603 priv->num_grps++; 604 605 return 0; 606} 607 608static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) 609{ 610 const char *model; 611 const char *ctype; 612 const void *mac_addr; 613 int err = 0, i; 614 struct net_device *dev = NULL; 615 struct gfar_private *priv = NULL; 616 struct device_node *np = ofdev->dev.of_node; 617 struct device_node *child = NULL; 618 const u32 *stash; 619 const u32 *stash_len; 620 const u32 *stash_idx; 621 unsigned int num_tx_qs, num_rx_qs; 622 u32 *tx_queues, *rx_queues; 623 624 if (!np || !of_device_is_available(np)) 625 return -ENODEV; 626 627 /* parse the num of tx and rx queues */ 628 tx_queues = (u32 *)of_get_property(np, "fsl,num_tx_queues", NULL); 629 num_tx_qs = tx_queues ? *tx_queues : 1; 630 631 if (num_tx_qs > MAX_TX_QS) { 632 printk(KERN_ERR "num_tx_qs(=%d) greater than MAX_TX_QS(=%d)\n", 633 num_tx_qs, MAX_TX_QS); 634 printk(KERN_ERR "Cannot do alloc_etherdev, aborting\n"); 635 return -EINVAL; 636 } 637 638 rx_queues = (u32 *)of_get_property(np, "fsl,num_rx_queues", NULL); 639 num_rx_qs = rx_queues ? *rx_queues : 1; 640 641 if (num_rx_qs > MAX_RX_QS) { 642 printk(KERN_ERR "num_rx_qs(=%d) greater than MAX_RX_QS(=%d)\n", 643 num_tx_qs, MAX_TX_QS); 644 printk(KERN_ERR "Cannot do alloc_etherdev, aborting\n"); 645 return -EINVAL; 646 } 647 648 *pdev = alloc_etherdev_mq(sizeof(*priv), num_tx_qs); 649 dev = *pdev; 650 if (NULL == dev) 651 return -ENOMEM; 652 653 priv = netdev_priv(dev); 654 priv->node = ofdev->dev.of_node; 655 priv->ndev = dev; 656 657 dev->num_tx_queues = num_tx_qs; 658 dev->real_num_tx_queues = num_tx_qs; 659 priv->num_tx_queues = num_tx_qs; 660 priv->num_rx_queues = num_rx_qs; 661 priv->num_grps = 0x0; 662 663 model = of_get_property(np, "model", NULL); 664 665 for (i = 0; i < MAXGROUPS; i++) 666 priv->gfargrp[i].regs = NULL; 667 668 /* Parse and initialize group specific information */ 669 if (of_device_is_compatible(np, "fsl,etsec2")) { 670 priv->mode = MQ_MG_MODE; 671 for_each_child_of_node(np, child) { 672 err = gfar_parse_group(child, priv, model); 673 if (err) 674 goto err_grp_init; 675 } 676 } else { 677 priv->mode = SQ_SG_MODE; 678 err = gfar_parse_group(np, priv, model); 679 if(err) 680 goto err_grp_init; 681 } 682 683 for (i = 0; i < priv->num_tx_queues; i++) 684 priv->tx_queue[i] = NULL; 685 for (i = 0; i < priv->num_rx_queues; i++) 686 priv->rx_queue[i] = NULL; 687 688 for (i = 0; i < priv->num_tx_queues; i++) { 689 priv->tx_queue[i] = kzalloc(sizeof(struct gfar_priv_tx_q), 690 GFP_KERNEL); 691 if (!priv->tx_queue[i]) { 692 err = -ENOMEM; 693 goto tx_alloc_failed; 694 } 695 priv->tx_queue[i]->tx_skbuff = NULL; 696 priv->tx_queue[i]->qindex = i; 697 priv->tx_queue[i]->dev = dev; 698 spin_lock_init(&(priv->tx_queue[i]->txlock)); 699 } 700 701 for (i = 0; i < priv->num_rx_queues; i++) { 702 priv->rx_queue[i] = kzalloc(sizeof(struct gfar_priv_rx_q), 703 GFP_KERNEL); 704 if (!priv->rx_queue[i]) { 705 err = -ENOMEM; 706 goto rx_alloc_failed; 707 } 708 priv->rx_queue[i]->rx_skbuff = NULL; 709 priv->rx_queue[i]->qindex = i; 710 priv->rx_queue[i]->dev = dev; 711 spin_lock_init(&(priv->rx_queue[i]->rxlock)); 712 } 713 714 715 stash = of_get_property(np, "bd-stash", NULL); 716 717 if (stash) { 718 priv->device_flags |= FSL_GIANFAR_DEV_HAS_BD_STASHING; 719 priv->bd_stash_en = 1; 720 } 721 722 stash_len = of_get_property(np, "rx-stash-len", NULL); 723 724 if (stash_len) 725 priv->rx_stash_size = *stash_len; 726 727 stash_idx = of_get_property(np, "rx-stash-idx", NULL); 728 729 if (stash_idx) 730 priv->rx_stash_index = *stash_idx; 731 732 if (stash_len || stash_idx) 733 priv->device_flags |= FSL_GIANFAR_DEV_HAS_BUF_STASHING; 734 735 mac_addr = of_get_mac_address(np); 736 if (mac_addr) 737 memcpy(dev->dev_addr, mac_addr, MAC_ADDR_LEN); 738 739 if (model && !strcasecmp(model, "TSEC")) 740 priv->device_flags = 741 FSL_GIANFAR_DEV_HAS_GIGABIT | 742 FSL_GIANFAR_DEV_HAS_COALESCE | 743 FSL_GIANFAR_DEV_HAS_RMON | 744 FSL_GIANFAR_DEV_HAS_MULTI_INTR; 745 if (model && !strcasecmp(model, "eTSEC")) 746 priv->device_flags = 747 FSL_GIANFAR_DEV_HAS_GIGABIT | 748 FSL_GIANFAR_DEV_HAS_COALESCE | 749 FSL_GIANFAR_DEV_HAS_RMON | 750 FSL_GIANFAR_DEV_HAS_MULTI_INTR | 751 FSL_GIANFAR_DEV_HAS_PADDING | 752 FSL_GIANFAR_DEV_HAS_CSUM | 753 FSL_GIANFAR_DEV_HAS_VLAN | 754 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET | 755 FSL_GIANFAR_DEV_HAS_EXTENDED_HASH | 756 FSL_GIANFAR_DEV_HAS_TIMER; 757 758 ctype = of_get_property(np, "phy-connection-type", NULL); 759 760 /* We only care about rgmii-id. The rest are autodetected */ 761 if (ctype && !strcmp(ctype, "rgmii-id")) 762 priv->interface = PHY_INTERFACE_MODE_RGMII_ID; 763 else 764 priv->interface = PHY_INTERFACE_MODE_MII; 765 766 if (of_get_property(np, "fsl,magic-packet", NULL)) 767 priv->device_flags |= FSL_GIANFAR_DEV_HAS_MAGIC_PACKET; 768 769 priv->phy_node = of_parse_phandle(np, "phy-handle", 0); 770 771 /* Find the TBI PHY. If it's not there, we don't support SGMII */ 772 priv->tbi_node = of_parse_phandle(np, "tbi-handle", 0); 773 774 return 0; 775 776rx_alloc_failed: 777 free_rx_pointers(priv); 778tx_alloc_failed: 779 free_tx_pointers(priv); 780err_grp_init: 781 unmap_group_regs(priv); 782 free_netdev(dev); 783 return err; 784} 785 786static int gfar_hwtstamp_ioctl(struct net_device *netdev, 787 struct ifreq *ifr, int cmd) 788{ 789 struct hwtstamp_config config; 790 struct gfar_private *priv = netdev_priv(netdev); 791 792 if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) 793 return -EFAULT; 794 795 /* reserved for future extensions */ 796 if (config.flags) 797 return -EINVAL; 798 799 switch (config.tx_type) { 800 case HWTSTAMP_TX_OFF: 801 priv->hwts_tx_en = 0; 802 break; 803 case HWTSTAMP_TX_ON: 804 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)) 805 return -ERANGE; 806 priv->hwts_tx_en = 1; 807 break; 808 default: 809 return -ERANGE; 810 } 811 812 switch (config.rx_filter) { 813 case HWTSTAMP_FILTER_NONE: 814 if (priv->hwts_rx_en) { 815 stop_gfar(netdev); 816 priv->hwts_rx_en = 0; 817 startup_gfar(netdev); 818 } 819 break; 820 default: 821 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)) 822 return -ERANGE; 823 if (!priv->hwts_rx_en) { 824 stop_gfar(netdev); 825 priv->hwts_rx_en = 1; 826 startup_gfar(netdev); 827 } 828 config.rx_filter = HWTSTAMP_FILTER_ALL; 829 break; 830 } 831 832 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? 833 -EFAULT : 0; 834} 835 836/* Ioctl MII Interface */ 837static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 838{ 839 struct gfar_private *priv = netdev_priv(dev); 840 841 if (!netif_running(dev)) 842 return -EINVAL; 843 844 if (cmd == SIOCSHWTSTAMP) 845 return gfar_hwtstamp_ioctl(dev, rq, cmd); 846 847 if (!priv->phydev) 848 return -ENODEV; 849 850 return phy_mii_ioctl(priv->phydev, rq, cmd); 851} 852 853static unsigned int reverse_bitmap(unsigned int bit_map, unsigned int max_qs) 854{ 855 unsigned int new_bit_map = 0x0; 856 int mask = 0x1 << (max_qs - 1), i; 857 for (i = 0; i < max_qs; i++) { 858 if (bit_map & mask) 859 new_bit_map = new_bit_map + (1 << i); 860 mask = mask >> 0x1; 861 } 862 return new_bit_map; 863} 864 865static u32 cluster_entry_per_class(struct gfar_private *priv, u32 rqfar, 866 u32 class) 867{ 868 u32 rqfpr = FPR_FILER_MASK; 869 u32 rqfcr = 0x0; 870 871 rqfar--; 872 rqfcr = RQFCR_CLE | RQFCR_PID_MASK | RQFCR_CMP_EXACT; 873 ftp_rqfpr[rqfar] = rqfpr; 874 ftp_rqfcr[rqfar] = rqfcr; 875 gfar_write_filer(priv, rqfar, rqfcr, rqfpr); 876 877 rqfar--; 878 rqfcr = RQFCR_CMP_NOMATCH; 879 ftp_rqfpr[rqfar] = rqfpr; 880 ftp_rqfcr[rqfar] = rqfcr; 881 gfar_write_filer(priv, rqfar, rqfcr, rqfpr); 882 883 rqfar--; 884 rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_PARSE | RQFCR_CLE | RQFCR_AND; 885 rqfpr = class; 886 ftp_rqfcr[rqfar] = rqfcr; 887 ftp_rqfpr[rqfar] = rqfpr; 888 gfar_write_filer(priv, rqfar, rqfcr, rqfpr); 889 890 rqfar--; 891 rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_MASK | RQFCR_AND; 892 rqfpr = class; 893 ftp_rqfcr[rqfar] = rqfcr; 894 ftp_rqfpr[rqfar] = rqfpr; 895 gfar_write_filer(priv, rqfar, rqfcr, rqfpr); 896 897 return rqfar; 898} 899 900static void gfar_init_filer_table(struct gfar_private *priv) 901{ 902 int i = 0x0; 903 u32 rqfar = MAX_FILER_IDX; 904 u32 rqfcr = 0x0; 905 u32 rqfpr = FPR_FILER_MASK; 906 907 /* Default rule */ 908 rqfcr = RQFCR_CMP_MATCH; 909 ftp_rqfcr[rqfar] = rqfcr; 910 ftp_rqfpr[rqfar] = rqfpr; 911 gfar_write_filer(priv, rqfar, rqfcr, rqfpr); 912 913 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6); 914 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_UDP); 915 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_TCP); 916 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4); 917 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_UDP); 918 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_TCP); 919 920 /* cur_filer_idx indicated the first non-masked rule */ 921 priv->cur_filer_idx = rqfar; 922 923 /* Rest are masked rules */ 924 rqfcr = RQFCR_CMP_NOMATCH; 925 for (i = 0; i < rqfar; i++) { 926 ftp_rqfcr[i] = rqfcr; 927 ftp_rqfpr[i] = rqfpr; 928 gfar_write_filer(priv, i, rqfcr, rqfpr); 929 } 930} 931 932static void gfar_detect_errata(struct gfar_private *priv) 933{ 934 struct device *dev = &priv->ofdev->dev; 935 unsigned int pvr = mfspr(SPRN_PVR); 936 unsigned int svr = mfspr(SPRN_SVR); 937 unsigned int mod = (svr >> 16) & 0xfff6; /* w/o E suffix */ 938 unsigned int rev = svr & 0xffff; 939 940 /* MPC8313 Rev 2.0 and higher; All MPC837x */ 941 if ((pvr == 0x80850010 && mod == 0x80b0 && rev >= 0x0020) || 942 (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0)) 943 priv->errata |= GFAR_ERRATA_74; 944 945 /* MPC8313 and MPC837x all rev */ 946 if ((pvr == 0x80850010 && mod == 0x80b0) || 947 (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0)) 948 priv->errata |= GFAR_ERRATA_76; 949 950 /* MPC8313 and MPC837x all rev */ 951 if ((pvr == 0x80850010 && mod == 0x80b0) || 952 (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0)) 953 priv->errata |= GFAR_ERRATA_A002; 954 955 if (priv->errata) 956 dev_info(dev, "enabled errata workarounds, flags: 0x%x\n", 957 priv->errata); 958} 959 960/* Set up the ethernet device structure, private data, 961 * and anything else we need before we start */ 962static int gfar_probe(struct platform_device *ofdev, 963 const struct of_device_id *match) 964{ 965 u32 tempval; 966 struct net_device *dev = NULL; 967 struct gfar_private *priv = NULL; 968 struct gfar __iomem *regs = NULL; 969 int err = 0, i, grp_idx = 0; 970 int len_devname; 971 u32 rstat = 0, tstat = 0, rqueue = 0, tqueue = 0; 972 u32 isrg = 0; 973 u32 __iomem *baddr; 974 975 err = gfar_of_init(ofdev, &dev); 976 977 if (err) 978 return err; 979 980 priv = netdev_priv(dev); 981 priv->ndev = dev; 982 priv->ofdev = ofdev; 983 priv->node = ofdev->dev.of_node; 984 SET_NETDEV_DEV(dev, &ofdev->dev); 985 986 spin_lock_init(&priv->bflock); 987 INIT_WORK(&priv->reset_task, gfar_reset_task); 988 989 dev_set_drvdata(&ofdev->dev, priv); 990 regs = priv->gfargrp[0].regs; 991 992 gfar_detect_errata(priv); 993 994 /* Stop the DMA engine now, in case it was running before */ 995 /* (The firmware could have used it, and left it running). */ 996 gfar_halt(dev); 997 998 /* Reset MAC layer */ 999 gfar_write(®s->maccfg1, MACCFG1_SOFT_RESET); 1000 1001 /* We need to delay at least 3 TX clocks */ 1002 udelay(2); 1003 1004 tempval = (MACCFG1_TX_FLOW | MACCFG1_RX_FLOW); 1005 gfar_write(®s->maccfg1, tempval); 1006 1007 /* Initialize MACCFG2. */ 1008 tempval = MACCFG2_INIT_SETTINGS; 1009 if (gfar_has_errata(priv, GFAR_ERRATA_74)) 1010 tempval |= MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK; 1011 gfar_write(®s->maccfg2, tempval); 1012 1013 /* Initialize ECNTRL */ 1014 gfar_write(®s->ecntrl, ECNTRL_INIT_SETTINGS); 1015 1016 /* Set the dev->base_addr to the gfar reg region */ 1017 dev->base_addr = (unsigned long) regs; 1018 1019 SET_NETDEV_DEV(dev, &ofdev->dev); 1020 1021 /* Fill in the dev structure */ 1022 dev->watchdog_timeo = TX_TIMEOUT; 1023 dev->mtu = 1500; 1024 dev->netdev_ops = &gfar_netdev_ops; 1025 dev->ethtool_ops = &gfar_ethtool_ops; 1026 1027 /* Register for napi ...We are registering NAPI for each grp */ 1028 for (i = 0; i < priv->num_grps; i++) 1029 netif_napi_add(dev, &priv->gfargrp[i].napi, gfar_poll, GFAR_DEV_WEIGHT); 1030 1031 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) { 1032 priv->rx_csum_enable = 1; 1033 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_HIGHDMA; 1034 } else 1035 priv->rx_csum_enable = 0; 1036 1037 priv->vlgrp = NULL; 1038 1039 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) 1040 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; 1041 1042 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) { 1043 priv->extended_hash = 1; 1044 priv->hash_width = 9; 1045 1046 priv->hash_regs[0] = ®s->igaddr0; 1047 priv->hash_regs[1] = ®s->igaddr1; 1048 priv->hash_regs[2] = ®s->igaddr2; 1049 priv->hash_regs[3] = ®s->igaddr3; 1050 priv->hash_regs[4] = ®s->igaddr4; 1051 priv->hash_regs[5] = ®s->igaddr5; 1052 priv->hash_regs[6] = ®s->igaddr6; 1053 priv->hash_regs[7] = ®s->igaddr7; 1054 priv->hash_regs[8] = ®s->gaddr0; 1055 priv->hash_regs[9] = ®s->gaddr1; 1056 priv->hash_regs[10] = ®s->gaddr2; 1057 priv->hash_regs[11] = ®s->gaddr3; 1058 priv->hash_regs[12] = ®s->gaddr4; 1059 priv->hash_regs[13] = ®s->gaddr5; 1060 priv->hash_regs[14] = ®s->gaddr6; 1061 priv->hash_regs[15] = ®s->gaddr7; 1062 1063 } else { 1064 priv->extended_hash = 0; 1065 priv->hash_width = 8; 1066 1067 priv->hash_regs[0] = ®s->gaddr0; 1068 priv->hash_regs[1] = ®s->gaddr1; 1069 priv->hash_regs[2] = ®s->gaddr2; 1070 priv->hash_regs[3] = ®s->gaddr3; 1071 priv->hash_regs[4] = ®s->gaddr4; 1072 priv->hash_regs[5] = ®s->gaddr5; 1073 priv->hash_regs[6] = ®s->gaddr6; 1074 priv->hash_regs[7] = ®s->gaddr7; 1075 } 1076 1077 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_PADDING) 1078 priv->padding = DEFAULT_PADDING; 1079 else 1080 priv->padding = 0; 1081 1082 if (dev->features & NETIF_F_IP_CSUM || 1083 priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER) 1084 dev->hard_header_len += GMAC_FCB_LEN; 1085 1086 /* Program the isrg regs only if number of grps > 1 */ 1087 if (priv->num_grps > 1) { 1088 baddr = ®s->isrg0; 1089 for (i = 0; i < priv->num_grps; i++) { 1090 isrg |= (priv->gfargrp[i].rx_bit_map << ISRG_SHIFT_RX); 1091 isrg |= (priv->gfargrp[i].tx_bit_map << ISRG_SHIFT_TX); 1092 gfar_write(baddr, isrg); 1093 baddr++; 1094 isrg = 0x0; 1095 } 1096 } 1097 1098 /* Need to reverse the bit maps as bit_map's MSB is q0 1099 * but, for_each_set_bit parses from right to left, which 1100 * basically reverses the queue numbers */ 1101 for (i = 0; i< priv->num_grps; i++) { 1102 priv->gfargrp[i].tx_bit_map = reverse_bitmap( 1103 priv->gfargrp[i].tx_bit_map, MAX_TX_QS); 1104 priv->gfargrp[i].rx_bit_map = reverse_bitmap( 1105 priv->gfargrp[i].rx_bit_map, MAX_RX_QS); 1106 } 1107 1108 /* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values, 1109 * also assign queues to groups */ 1110 for (grp_idx = 0; grp_idx < priv->num_grps; grp_idx++) { 1111 priv->gfargrp[grp_idx].num_rx_queues = 0x0; 1112 for_each_set_bit(i, &priv->gfargrp[grp_idx].rx_bit_map, 1113 priv->num_rx_queues) { 1114 priv->gfargrp[grp_idx].num_rx_queues++; 1115 priv->rx_queue[i]->grp = &priv->gfargrp[grp_idx]; 1116 rstat = rstat | (RSTAT_CLEAR_RHALT >> i); 1117 rqueue = rqueue | ((RQUEUE_EN0 | RQUEUE_EX0) >> i); 1118 } 1119 priv->gfargrp[grp_idx].num_tx_queues = 0x0; 1120 for_each_set_bit(i, &priv->gfargrp[grp_idx].tx_bit_map, 1121 priv->num_tx_queues) { 1122 priv->gfargrp[grp_idx].num_tx_queues++; 1123 priv->tx_queue[i]->grp = &priv->gfargrp[grp_idx]; 1124 tstat = tstat | (TSTAT_CLEAR_THALT >> i); 1125 tqueue = tqueue | (TQUEUE_EN0 >> i); 1126 } 1127 priv->gfargrp[grp_idx].rstat = rstat; 1128 priv->gfargrp[grp_idx].tstat = tstat; 1129 rstat = tstat =0; 1130 } 1131 1132 gfar_write(®s->rqueue, rqueue); 1133 gfar_write(®s->tqueue, tqueue); 1134 1135 priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE; 1136 1137 /* Initializing some of the rx/tx queue level parameters */ 1138 for (i = 0; i < priv->num_tx_queues; i++) { 1139 priv->tx_queue[i]->tx_ring_size = DEFAULT_TX_RING_SIZE; 1140 priv->tx_queue[i]->num_txbdfree = DEFAULT_TX_RING_SIZE; 1141 priv->tx_queue[i]->txcoalescing = DEFAULT_TX_COALESCE; 1142 priv->tx_queue[i]->txic = DEFAULT_TXIC; 1143 } 1144 1145 for (i = 0; i < priv->num_rx_queues; i++) { 1146 priv->rx_queue[i]->rx_ring_size = DEFAULT_RX_RING_SIZE; 1147 priv->rx_queue[i]->rxcoalescing = DEFAULT_RX_COALESCE; 1148 priv->rx_queue[i]->rxic = DEFAULT_RXIC; 1149 } 1150 1151 /* enable filer if using multiple RX queues*/ 1152 if(priv->num_rx_queues > 1) 1153 priv->rx_filer_enable = 1; 1154 /* Enable most messages by default */ 1155 priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1; 1156 1157 /* Carrier starts down, phylib will bring it up */ 1158 netif_carrier_off(dev); 1159 1160 err = register_netdev(dev); 1161 1162 if (err) { 1163 printk(KERN_ERR "%s: Cannot register net device, aborting.\n", 1164 dev->name); 1165 goto register_fail; 1166 } 1167 1168 device_init_wakeup(&dev->dev, 1169 priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET); 1170 1171 /* fill out IRQ number and name fields */ 1172 len_devname = strlen(dev->name); 1173 for (i = 0; i < priv->num_grps; i++) { 1174 strncpy(&priv->gfargrp[i].int_name_tx[0], dev->name, 1175 len_devname); 1176 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { 1177 strncpy(&priv->gfargrp[i].int_name_tx[len_devname], 1178 "_g", sizeof("_g")); 1179 priv->gfargrp[i].int_name_tx[ 1180 strlen(priv->gfargrp[i].int_name_tx)] = i+48; 1181 strncpy(&priv->gfargrp[i].int_name_tx[strlen( 1182 priv->gfargrp[i].int_name_tx)], 1183 "_tx", sizeof("_tx") + 1); 1184 1185 strncpy(&priv->gfargrp[i].int_name_rx[0], dev->name, 1186 len_devname); 1187 strncpy(&priv->gfargrp[i].int_name_rx[len_devname], 1188 "_g", sizeof("_g")); 1189 priv->gfargrp[i].int_name_rx[ 1190 strlen(priv->gfargrp[i].int_name_rx)] = i+48; 1191 strncpy(&priv->gfargrp[i].int_name_rx[strlen( 1192 priv->gfargrp[i].int_name_rx)], 1193 "_rx", sizeof("_rx") + 1); 1194 1195 strncpy(&priv->gfargrp[i].int_name_er[0], dev->name, 1196 len_devname); 1197 strncpy(&priv->gfargrp[i].int_name_er[len_devname], 1198 "_g", sizeof("_g")); 1199 priv->gfargrp[i].int_name_er[strlen( 1200 priv->gfargrp[i].int_name_er)] = i+48; 1201 strncpy(&priv->gfargrp[i].int_name_er[strlen(\ 1202 priv->gfargrp[i].int_name_er)], 1203 "_er", sizeof("_er") + 1); 1204 } else 1205 priv->gfargrp[i].int_name_tx[len_devname] = '\0'; 1206 } 1207 1208 /* Initialize the filer table */ 1209 gfar_init_filer_table(priv); 1210 1211 /* Create all the sysfs files */ 1212 gfar_init_sysfs(dev); 1213 1214 /* Print out the device info */ 1215 printk(KERN_INFO DEVICE_NAME "%pM\n", dev->name, dev->dev_addr); 1216 1217 /* Even more device info helps when determining which kernel */ 1218 /* provided which set of benchmarks. */ 1219 printk(KERN_INFO "%s: Running with NAPI enabled\n", dev->name); 1220 for (i = 0; i < priv->num_rx_queues; i++) 1221 printk(KERN_INFO "%s: RX BD ring size for Q[%d]: %d\n", 1222 dev->name, i, priv->rx_queue[i]->rx_ring_size); 1223 for(i = 0; i < priv->num_tx_queues; i++) 1224 printk(KERN_INFO "%s: TX BD ring size for Q[%d]: %d\n", 1225 dev->name, i, priv->tx_queue[i]->tx_ring_size); 1226 1227 return 0; 1228 1229register_fail: 1230 unmap_group_regs(priv); 1231 free_tx_pointers(priv); 1232 free_rx_pointers(priv); 1233 if (priv->phy_node) 1234 of_node_put(priv->phy_node); 1235 if (priv->tbi_node) 1236 of_node_put(priv->tbi_node); 1237 free_netdev(dev); 1238 return err; 1239} 1240 1241static int gfar_remove(struct platform_device *ofdev) 1242{ 1243 struct gfar_private *priv = dev_get_drvdata(&ofdev->dev); 1244 1245 if (priv->phy_node) 1246 of_node_put(priv->phy_node); 1247 if (priv->tbi_node) 1248 of_node_put(priv->tbi_node); 1249 1250 dev_set_drvdata(&ofdev->dev, NULL); 1251 1252 unregister_netdev(priv->ndev); 1253 unmap_group_regs(priv); 1254 free_netdev(priv->ndev); 1255 1256 return 0; 1257} 1258 1259#ifdef CONFIG_PM 1260 1261static int gfar_suspend(struct device *dev) 1262{ 1263 struct gfar_private *priv = dev_get_drvdata(dev); 1264 struct net_device *ndev = priv->ndev; 1265 struct gfar __iomem *regs = priv->gfargrp[0].regs; 1266 unsigned long flags; 1267 u32 tempval; 1268 1269 int magic_packet = priv->wol_en && 1270 (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET); 1271 1272 netif_device_detach(ndev); 1273 1274 if (netif_running(ndev)) { 1275 1276 local_irq_save(flags); 1277 lock_tx_qs(priv); 1278 lock_rx_qs(priv); 1279 1280 gfar_halt_nodisable(ndev); 1281 1282 /* Disable Tx, and Rx if wake-on-LAN is disabled. */ 1283 tempval = gfar_read(®s->maccfg1); 1284 1285 tempval &= ~MACCFG1_TX_EN; 1286 1287 if (!magic_packet) 1288 tempval &= ~MACCFG1_RX_EN; 1289 1290 gfar_write(®s->maccfg1, tempval); 1291 1292 unlock_rx_qs(priv); 1293 unlock_tx_qs(priv); 1294 local_irq_restore(flags); 1295 1296 disable_napi(priv); 1297 1298 if (magic_packet) { 1299 /* Enable interrupt on Magic Packet */ 1300 gfar_write(®s->imask, IMASK_MAG); 1301 1302 /* Enable Magic Packet mode */ 1303 tempval = gfar_read(®s->maccfg2); 1304 tempval |= MACCFG2_MPEN; 1305 gfar_write(®s->maccfg2, tempval); 1306 } else { 1307 phy_stop(priv->phydev); 1308 } 1309 } 1310 1311 return 0; 1312} 1313 1314static int gfar_resume(struct device *dev) 1315{ 1316 struct gfar_private *priv = dev_get_drvdata(dev); 1317 struct net_device *ndev = priv->ndev; 1318 struct gfar __iomem *regs = priv->gfargrp[0].regs; 1319 unsigned long flags; 1320 u32 tempval; 1321 int magic_packet = priv->wol_en && 1322 (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET); 1323 1324 if (!netif_running(ndev)) { 1325 netif_device_attach(ndev); 1326 return 0; 1327 } 1328 1329 if (!magic_packet && priv->phydev) 1330 phy_start(priv->phydev); 1331 1332 /* Disable Magic Packet mode, in case something 1333 * else woke us up. 1334 */ 1335 local_irq_save(flags); 1336 lock_tx_qs(priv); 1337 lock_rx_qs(priv); 1338 1339 tempval = gfar_read(®s->maccfg2); 1340 tempval &= ~MACCFG2_MPEN; 1341 gfar_write(®s->maccfg2, tempval); 1342 1343 gfar_start(ndev); 1344 1345 unlock_rx_qs(priv); 1346 unlock_tx_qs(priv); 1347 local_irq_restore(flags); 1348 1349 netif_device_attach(ndev); 1350 1351 enable_napi(priv); 1352 1353 return 0; 1354} 1355 1356static int gfar_restore(struct device *dev) 1357{ 1358 struct gfar_private *priv = dev_get_drvdata(dev); 1359 struct net_device *ndev = priv->ndev; 1360 1361 if (!netif_running(ndev)) 1362 return 0; 1363 1364 gfar_init_bds(ndev); 1365 init_registers(ndev); 1366 gfar_set_mac_address(ndev); 1367 gfar_init_mac(ndev); 1368 gfar_start(ndev); 1369 1370 priv->oldlink = 0; 1371 priv->oldspeed = 0; 1372 priv->oldduplex = -1; 1373 1374 if (priv->phydev) 1375 phy_start(priv->phydev); 1376 1377 netif_device_attach(ndev); 1378 enable_napi(priv); 1379 1380 return 0; 1381} 1382 1383static struct dev_pm_ops gfar_pm_ops = { 1384 .suspend = gfar_suspend, 1385 .resume = gfar_resume, 1386 .freeze = gfar_suspend, 1387 .thaw = gfar_resume, 1388 .restore = gfar_restore, 1389}; 1390 1391#define GFAR_PM_OPS (&gfar_pm_ops) 1392 1393#else 1394 1395#define GFAR_PM_OPS NULL 1396 1397#endif 1398 1399/* Reads the controller's registers to determine what interface 1400 * connects it to the PHY. 1401 */ 1402static phy_interface_t gfar_get_interface(struct net_device *dev) 1403{ 1404 struct gfar_private *priv = netdev_priv(dev); 1405 struct gfar __iomem *regs = priv->gfargrp[0].regs; 1406 u32 ecntrl; 1407 1408 ecntrl = gfar_read(®s->ecntrl); 1409 1410 if (ecntrl & ECNTRL_SGMII_MODE) 1411 return PHY_INTERFACE_MODE_SGMII; 1412 1413 if (ecntrl & ECNTRL_TBI_MODE) { 1414 if (ecntrl & ECNTRL_REDUCED_MODE) 1415 return PHY_INTERFACE_MODE_RTBI; 1416 else 1417 return PHY_INTERFACE_MODE_TBI; 1418 } 1419 1420 if (ecntrl & ECNTRL_REDUCED_MODE) { 1421 if (ecntrl & ECNTRL_REDUCED_MII_MODE) 1422 return PHY_INTERFACE_MODE_RMII; 1423 else { 1424 phy_interface_t interface = priv->interface; 1425 1426 /* 1427 * This isn't autodetected right now, so it must 1428 * be set by the device tree or platform code. 1429 */ 1430 if (interface == PHY_INTERFACE_MODE_RGMII_ID) 1431 return PHY_INTERFACE_MODE_RGMII_ID; 1432 1433 return PHY_INTERFACE_MODE_RGMII; 1434 } 1435 } 1436 1437 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT) 1438 return PHY_INTERFACE_MODE_GMII; 1439 1440 return PHY_INTERFACE_MODE_MII; 1441} 1442 1443 1444/* Initializes driver's PHY state, and attaches to the PHY. 1445 * Returns 0 on success. 1446 */ 1447static int init_phy(struct net_device *dev) 1448{ 1449 struct gfar_private *priv = netdev_priv(dev); 1450 uint gigabit_support = 1451 priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT ? 1452 SUPPORTED_1000baseT_Full : 0; 1453 phy_interface_t interface; 1454 1455 priv->oldlink = 0; 1456 priv->oldspeed = 0; 1457 priv->oldduplex = -1; 1458 1459 interface = gfar_get_interface(dev); 1460 1461 priv->phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, 0, 1462 interface); 1463 if (!priv->phydev) 1464 priv->phydev = of_phy_connect_fixed_link(dev, &adjust_link, 1465 interface); 1466 if (!priv->phydev) { 1467 dev_err(&dev->dev, "could not attach to PHY\n"); 1468 return -ENODEV; 1469 } 1470 1471 if (interface == PHY_INTERFACE_MODE_SGMII) 1472 gfar_configure_serdes(dev); 1473 1474 /* Remove any features not supported by the controller */ 1475 priv->phydev->supported &= (GFAR_SUPPORTED | gigabit_support); 1476 priv->phydev->advertising = priv->phydev->supported; 1477 1478 return 0; 1479} 1480 1481/* 1482 * Initialize TBI PHY interface for communicating with the 1483 * SERDES lynx PHY on the chip. We communicate with this PHY 1484 * through the MDIO bus on each controller, treating it as a 1485 * "normal" PHY at the address found in the TBIPA register. We assume 1486 * that the TBIPA register is valid. Either the MDIO bus code will set 1487 * it to a value that doesn't conflict with other PHYs on the bus, or the 1488 * value doesn't matter, as there are no other PHYs on the bus. 1489 */ 1490static void gfar_configure_serdes(struct net_device *dev) 1491{ 1492 struct gfar_private *priv = netdev_priv(dev); 1493 struct phy_device *tbiphy; 1494 1495 if (!priv->tbi_node) { 1496 dev_warn(&dev->dev, "error: SGMII mode requires that the " 1497 "device tree specify a tbi-handle\n"); 1498 return; 1499 } 1500 1501 tbiphy = of_phy_find_device(priv->tbi_node); 1502 if (!tbiphy) { 1503 dev_err(&dev->dev, "error: Could not get TBI device\n"); 1504 return; 1505 } 1506 1507 /* 1508 * If the link is already up, we must already be ok, and don't need to 1509 * configure and reset the TBI<->SerDes link. Maybe U-Boot configured 1510 * everything for us? Resetting it takes the link down and requires 1511 * several seconds for it to come back. 1512 */ 1513 if (phy_read(tbiphy, MII_BMSR) & BMSR_LSTATUS) 1514 return; 1515 1516 /* Single clk mode, mii mode off(for serdes communication) */ 1517 phy_write(tbiphy, MII_TBICON, TBICON_CLK_SELECT); 1518 1519 phy_write(tbiphy, MII_ADVERTISE, 1520 ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE | 1521 ADVERTISE_1000XPSE_ASYM); 1522 1523 phy_write(tbiphy, MII_BMCR, BMCR_ANENABLE | 1524 BMCR_ANRESTART | BMCR_FULLDPLX | BMCR_SPEED1000); 1525} 1526 1527static void init_registers(struct net_device *dev) 1528{ 1529 struct gfar_private *priv = netdev_priv(dev); 1530 struct gfar __iomem *regs = NULL; 1531 int i = 0; 1532 1533 for (i = 0; i < priv->num_grps; i++) { 1534 regs = priv->gfargrp[i].regs; 1535 /* Clear IEVENT */ 1536 gfar_write(®s->ievent, IEVENT_INIT_CLEAR); 1537 1538 /* Initialize IMASK */ 1539 gfar_write(®s->imask, IMASK_INIT_CLEAR); 1540 } 1541 1542 regs = priv->gfargrp[0].regs; 1543 /* Init hash registers to zero */ 1544 gfar_write(®s->igaddr0, 0); 1545 gfar_write(®s->igaddr1, 0); 1546 gfar_write(®s->igaddr2, 0); 1547 gfar_write(®s->igaddr3, 0); 1548 gfar_write(®s->igaddr4, 0); 1549 gfar_write(®s->igaddr5, 0); 1550 gfar_write(®s->igaddr6, 0); 1551 gfar_write(®s->igaddr7, 0); 1552 1553 gfar_write(®s->gaddr0, 0); 1554 gfar_write(®s->gaddr1, 0); 1555 gfar_write(®s->gaddr2, 0); 1556 gfar_write(®s->gaddr3, 0); 1557 gfar_write(®s->gaddr4, 0); 1558 gfar_write(®s->gaddr5, 0); 1559 gfar_write(®s->gaddr6, 0); 1560 gfar_write(®s->gaddr7, 0); 1561 1562 /* Zero out the rmon mib registers if it has them */ 1563 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) { 1564 memset_io(&(regs->rmon), 0, sizeof (struct rmon_mib)); 1565 1566 /* Mask off the CAM interrupts */ 1567 gfar_write(®s->rmon.cam1, 0xffffffff); 1568 gfar_write(®s->rmon.cam2, 0xffffffff); 1569 } 1570 1571 /* Initialize the max receive buffer length */ 1572 gfar_write(®s->mrblr, priv->rx_buffer_size); 1573 1574 /* Initialize the Minimum Frame Length Register */ 1575 gfar_write(®s->minflr, MINFLR_INIT_SETTINGS); 1576} 1577 1578static int __gfar_is_rx_idle(struct gfar_private *priv) 1579{ 1580 u32 res; 1581 1582 /* 1583 * Normaly TSEC should not hang on GRS commands, so we should 1584 * actually wait for IEVENT_GRSC flag. 1585 */ 1586 if (likely(!gfar_has_errata(priv, GFAR_ERRATA_A002))) 1587 return 0; 1588 1589 /* 1590 * Read the eTSEC register at offset 0xD1C. If bits 7-14 are 1591 * the same as bits 23-30, the eTSEC Rx is assumed to be idle 1592 * and the Rx can be safely reset. 1593 */ 1594 res = gfar_read((void __iomem *)priv->gfargrp[0].regs + 0xd1c); 1595 res &= 0x7f807f80; 1596 if ((res & 0xffff) == (res >> 16)) 1597 return 1; 1598 1599 return 0; 1600} 1601 1602/* Halt the receive and transmit queues */ 1603static void gfar_halt_nodisable(struct net_device *dev) 1604{ 1605 struct gfar_private *priv = netdev_priv(dev); 1606 struct gfar __iomem *regs = NULL; 1607 u32 tempval; 1608 int i = 0; 1609 1610 for (i = 0; i < priv->num_grps; i++) { 1611 regs = priv->gfargrp[i].regs; 1612 /* Mask all interrupts */ 1613 gfar_write(®s->imask, IMASK_INIT_CLEAR); 1614 1615 /* Clear all interrupts */ 1616 gfar_write(®s->ievent, IEVENT_INIT_CLEAR); 1617 } 1618 1619 regs = priv->gfargrp[0].regs; 1620 /* Stop the DMA, and wait for it to stop */ 1621 tempval = gfar_read(®s->dmactrl); 1622 if ((tempval & (DMACTRL_GRS | DMACTRL_GTS)) 1623 != (DMACTRL_GRS | DMACTRL_GTS)) { 1624 int ret; 1625 1626 tempval |= (DMACTRL_GRS | DMACTRL_GTS); 1627 gfar_write(®s->dmactrl, tempval); 1628 1629 do { 1630 ret = spin_event_timeout(((gfar_read(®s->ievent) & 1631 (IEVENT_GRSC | IEVENT_GTSC)) == 1632 (IEVENT_GRSC | IEVENT_GTSC)), 1000000, 0); 1633 if (!ret && !(gfar_read(®s->ievent) & IEVENT_GRSC)) 1634 ret = __gfar_is_rx_idle(priv); 1635 } while (!ret); 1636 } 1637} 1638 1639/* Halt the receive and transmit queues */ 1640void gfar_halt(struct net_device *dev) 1641{ 1642 struct gfar_private *priv = netdev_priv(dev); 1643 struct gfar __iomem *regs = priv->gfargrp[0].regs; 1644 u32 tempval; 1645 1646 gfar_halt_nodisable(dev); 1647 1648 /* Disable Rx and Tx */ 1649 tempval = gfar_read(®s->maccfg1); 1650 tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN); 1651 gfar_write(®s->maccfg1, tempval); 1652} 1653 1654static void free_grp_irqs(struct gfar_priv_grp *grp) 1655{ 1656 free_irq(grp->interruptError, grp); 1657 free_irq(grp->interruptTransmit, grp); 1658 free_irq(grp->interruptReceive, grp); 1659} 1660 1661void stop_gfar(struct net_device *dev) 1662{ 1663 struct gfar_private *priv = netdev_priv(dev); 1664 unsigned long flags; 1665 int i; 1666 1667 phy_stop(priv->phydev); 1668 1669 1670 /* Lock it down */ 1671 local_irq_save(flags); 1672 lock_tx_qs(priv); 1673 lock_rx_qs(priv); 1674 1675 gfar_halt(dev); 1676 1677 unlock_rx_qs(priv); 1678 unlock_tx_qs(priv); 1679 local_irq_restore(flags); 1680 1681 /* Free the IRQs */ 1682 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { 1683 for (i = 0; i < priv->num_grps; i++) 1684 free_grp_irqs(&priv->gfargrp[i]); 1685 } else { 1686 for (i = 0; i < priv->num_grps; i++) 1687 free_irq(priv->gfargrp[i].interruptTransmit, 1688 &priv->gfargrp[i]); 1689 } 1690 1691 free_skb_resources(priv); 1692} 1693 1694static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue) 1695{ 1696 struct txbd8 *txbdp; 1697 struct gfar_private *priv = netdev_priv(tx_queue->dev); 1698 int i, j; 1699 1700 txbdp = tx_queue->tx_bd_base; 1701 1702 for (i = 0; i < tx_queue->tx_ring_size; i++) { 1703 if (!tx_queue->tx_skbuff[i]) 1704 continue; 1705 1706 dma_unmap_single(&priv->ofdev->dev, txbdp->bufPtr, 1707 txbdp->length, DMA_TO_DEVICE); 1708 txbdp->lstatus = 0; 1709 for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags; 1710 j++) { 1711 txbdp++; 1712 dma_unmap_page(&priv->ofdev->dev, txbdp->bufPtr, 1713 txbdp->length, DMA_TO_DEVICE); 1714 } 1715 txbdp++; 1716 dev_kfree_skb_any(tx_queue->tx_skbuff[i]); 1717 tx_queue->tx_skbuff[i] = NULL; 1718 } 1719 kfree(tx_queue->tx_skbuff); 1720} 1721 1722static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue) 1723{ 1724 struct rxbd8 *rxbdp; 1725 struct gfar_private *priv = netdev_priv(rx_queue->dev); 1726 int i; 1727 1728 rxbdp = rx_queue->rx_bd_base; 1729 1730 for (i = 0; i < rx_queue->rx_ring_size; i++) { 1731 if (rx_queue->rx_skbuff[i]) { 1732 dma_unmap_single(&priv->ofdev->dev, 1733 rxbdp->bufPtr, priv->rx_buffer_size, 1734 DMA_FROM_DEVICE); 1735 dev_kfree_skb_any(rx_queue->rx_skbuff[i]); 1736 rx_queue->rx_skbuff[i] = NULL; 1737 } 1738 rxbdp->lstatus = 0; 1739 rxbdp->bufPtr = 0; 1740 rxbdp++; 1741 } 1742 kfree(rx_queue->rx_skbuff); 1743} 1744 1745/* If there are any tx skbs or rx skbs still around, free them. 1746 * Then free tx_skbuff and rx_skbuff */ 1747static void free_skb_resources(struct gfar_private *priv) 1748{ 1749 struct gfar_priv_tx_q *tx_queue = NULL; 1750 struct gfar_priv_rx_q *rx_queue = NULL; 1751 int i; 1752 1753 /* Go through all the buffer descriptors and free their data buffers */ 1754 for (i = 0; i < priv->num_tx_queues; i++) { 1755 tx_queue = priv->tx_queue[i]; 1756 if(tx_queue->tx_skbuff) 1757 free_skb_tx_queue(tx_queue); 1758 } 1759 1760 for (i = 0; i < priv->num_rx_queues; i++) { 1761 rx_queue = priv->rx_queue[i]; 1762 if(rx_queue->rx_skbuff) 1763 free_skb_rx_queue(rx_queue); 1764 } 1765 1766 dma_free_coherent(&priv->ofdev->dev, 1767 sizeof(struct txbd8) * priv->total_tx_ring_size + 1768 sizeof(struct rxbd8) * priv->total_rx_ring_size, 1769 priv->tx_queue[0]->tx_bd_base, 1770 priv->tx_queue[0]->tx_bd_dma_base); 1771 skb_queue_purge(&priv->rx_recycle); 1772} 1773 1774void gfar_start(struct net_device *dev) 1775{ 1776 struct gfar_private *priv = netdev_priv(dev); 1777 struct gfar __iomem *regs = priv->gfargrp[0].regs; 1778 u32 tempval; 1779 int i = 0; 1780 1781 /* Enable Rx and Tx in MACCFG1 */ 1782 tempval = gfar_read(®s->maccfg1); 1783 tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN); 1784 gfar_write(®s->maccfg1, tempval); 1785 1786 /* Initialize DMACTRL to have WWR and WOP */ 1787 tempval = gfar_read(®s->dmactrl); 1788 tempval |= DMACTRL_INIT_SETTINGS; 1789 gfar_write(®s->dmactrl, tempval); 1790 1791 /* Make sure we aren't stopped */ 1792 tempval = gfar_read(®s->dmactrl); 1793 tempval &= ~(DMACTRL_GRS | DMACTRL_GTS); 1794 gfar_write(®s->dmactrl, tempval); 1795 1796 for (i = 0; i < priv->num_grps; i++) { 1797 regs = priv->gfargrp[i].regs; 1798 /* Clear THLT/RHLT, so that the DMA starts polling now */ 1799 gfar_write(®s->tstat, priv->gfargrp[i].tstat); 1800 gfar_write(®s->rstat, priv->gfargrp[i].rstat); 1801 /* Unmask the interrupts we look for */ 1802 gfar_write(®s->imask, IMASK_DEFAULT); 1803 } 1804 1805 dev->trans_start = jiffies; /* prevent tx timeout */ 1806} 1807 1808void gfar_configure_coalescing(struct gfar_private *priv, 1809 unsigned long tx_mask, unsigned long rx_mask) 1810{ 1811 struct gfar __iomem *regs = priv->gfargrp[0].regs; 1812 u32 __iomem *baddr; 1813 int i = 0; 1814 1815 /* Backward compatible case ---- even if we enable 1816 * multiple queues, there's only single reg to program 1817 */ 1818 gfar_write(®s->txic, 0); 1819 if(likely(priv->tx_queue[0]->txcoalescing)) 1820 gfar_write(®s->txic, priv->tx_queue[0]->txic); 1821 1822 gfar_write(®s->rxic, 0); 1823 if(unlikely(priv->rx_queue[0]->rxcoalescing)) 1824 gfar_write(®s->rxic, priv->rx_queue[0]->rxic); 1825 1826 if (priv->mode == MQ_MG_MODE) { 1827 baddr = ®s->txic0; 1828 for_each_set_bit(i, &tx_mask, priv->num_tx_queues) { 1829 if (likely(priv->tx_queue[i]->txcoalescing)) { 1830 gfar_write(baddr + i, 0); 1831 gfar_write(baddr + i, priv->tx_queue[i]->txic); 1832 } 1833 } 1834 1835 baddr = ®s->rxic0; 1836 for_each_set_bit(i, &rx_mask, priv->num_rx_queues) { 1837 if (likely(priv->rx_queue[i]->rxcoalescing)) { 1838 gfar_write(baddr + i, 0); 1839 gfar_write(baddr + i, priv->rx_queue[i]->rxic); 1840 } 1841 } 1842 } 1843} 1844 1845static int register_grp_irqs(struct gfar_priv_grp *grp) 1846{ 1847 struct gfar_private *priv = grp->priv; 1848 struct net_device *dev = priv->ndev; 1849 int err; 1850 1851 /* If the device has multiple interrupts, register for 1852 * them. Otherwise, only register for the one */ 1853 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { 1854 /* Install our interrupt handlers for Error, 1855 * Transmit, and Receive */ 1856 if ((err = request_irq(grp->interruptError, gfar_error, 0, 1857 grp->int_name_er,grp)) < 0) { 1858 if (netif_msg_intr(priv)) 1859 printk(KERN_ERR "%s: Can't get IRQ %d\n", 1860 dev->name, grp->interruptError); 1861 1862 goto err_irq_fail; 1863 } 1864 1865 if ((err = request_irq(grp->interruptTransmit, gfar_transmit, 1866 0, grp->int_name_tx, grp)) < 0) { 1867 if (netif_msg_intr(priv)) 1868 printk(KERN_ERR "%s: Can't get IRQ %d\n", 1869 dev->name, grp->interruptTransmit); 1870 goto tx_irq_fail; 1871 } 1872 1873 if ((err = request_irq(grp->interruptReceive, gfar_receive, 0, 1874 grp->int_name_rx, grp)) < 0) { 1875 if (netif_msg_intr(priv)) 1876 printk(KERN_ERR "%s: Can't get IRQ %d\n", 1877 dev->name, grp->interruptReceive); 1878 goto rx_irq_fail; 1879 } 1880 } else { 1881 if ((err = request_irq(grp->interruptTransmit, gfar_interrupt, 0, 1882 grp->int_name_tx, grp)) < 0) { 1883 if (netif_msg_intr(priv)) 1884 printk(KERN_ERR "%s: Can't get IRQ %d\n", 1885 dev->name, grp->interruptTransmit); 1886 goto err_irq_fail; 1887 } 1888 } 1889 1890 return 0; 1891 1892rx_irq_fail: 1893 free_irq(grp->interruptTransmit, grp); 1894tx_irq_fail: 1895 free_irq(grp->interruptError, grp); 1896err_irq_fail: 1897 return err; 1898 1899} 1900 1901/* Bring the controller up and running */ 1902int startup_gfar(struct net_device *ndev) 1903{ 1904 struct gfar_private *priv = netdev_priv(ndev); 1905 struct gfar __iomem *regs = NULL; 1906 int err, i, j; 1907 1908 for (i = 0; i < priv->num_grps; i++) { 1909 regs= priv->gfargrp[i].regs; 1910 gfar_write(®s->imask, IMASK_INIT_CLEAR); 1911 } 1912 1913 regs= priv->gfargrp[0].regs; 1914 err = gfar_alloc_skb_resources(ndev); 1915 if (err) 1916 return err; 1917 1918 gfar_init_mac(ndev); 1919 1920 for (i = 0; i < priv->num_grps; i++) { 1921 err = register_grp_irqs(&priv->gfargrp[i]); 1922 if (err) { 1923 for (j = 0; j < i; j++) 1924 free_grp_irqs(&priv->gfargrp[j]); 1925 goto irq_fail; 1926 } 1927 } 1928 1929 /* Start the controller */ 1930 gfar_start(ndev); 1931 1932 phy_start(priv->phydev); 1933 1934 gfar_configure_coalescing(priv, 0xFF, 0xFF); 1935 1936 return 0; 1937 1938irq_fail: 1939 free_skb_resources(priv); 1940 return err; 1941} 1942 1943/* Called when something needs to use the ethernet device */ 1944/* Returns 0 for success. */ 1945static int gfar_enet_open(struct net_device *dev) 1946{ 1947 struct gfar_private *priv = netdev_priv(dev); 1948 int err; 1949 1950 enable_napi(priv); 1951 1952 skb_queue_head_init(&priv->rx_recycle); 1953 1954 /* Initialize a bunch of registers */ 1955 init_registers(dev); 1956 1957 gfar_set_mac_address(dev); 1958 1959 err = init_phy(dev); 1960 1961 if (err) { 1962 disable_napi(priv); 1963 return err; 1964 } 1965 1966 err = startup_gfar(dev); 1967 if (err) { 1968 disable_napi(priv); 1969 return err; 1970 } 1971 1972 netif_tx_start_all_queues(dev); 1973 1974 device_set_wakeup_enable(&dev->dev, priv->wol_en); 1975 1976 return err; 1977} 1978 1979static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb) 1980{ 1981 struct txfcb *fcb = (struct txfcb *)skb_push(skb, GMAC_FCB_LEN); 1982 1983 memset(fcb, 0, GMAC_FCB_LEN); 1984 1985 return fcb; 1986} 1987 1988static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb) 1989{ 1990 u8 flags = 0; 1991 1992 /* If we're here, it's a IP packet with a TCP or UDP 1993 * payload. We set it to checksum, using a pseudo-header 1994 * we provide 1995 */ 1996 flags = TXFCB_DEFAULT; 1997 1998 /* Tell the controller what the protocol is */ 1999 /* And provide the already calculated phcs */ 2000 if (ip_hdr(skb)->protocol == IPPROTO_UDP) { 2001 flags |= TXFCB_UDP; 2002 fcb->phcs = udp_hdr(skb)->check; 2003 } else 2004 fcb->phcs = tcp_hdr(skb)->check; 2005 2006 /* l3os is the distance between the start of the 2007 * frame (skb->data) and the start of the IP hdr. 2008 * l4os is the distance between the start of the 2009 * l3 hdr and the l4 hdr */ 2010 fcb->l3os = (u16)(skb_network_offset(skb) - GMAC_FCB_LEN); 2011 fcb->l4os = skb_network_header_len(skb); 2012 2013 fcb->flags = flags; 2014} 2015 2016void inline gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb) 2017{ 2018 fcb->flags |= TXFCB_VLN; 2019 fcb->vlctl = vlan_tx_tag_get(skb); 2020} 2021 2022static inline struct txbd8 *skip_txbd(struct txbd8 *bdp, int stride, 2023 struct txbd8 *base, int ring_size) 2024{ 2025 struct txbd8 *new_bd = bdp + stride; 2026 2027 return (new_bd >= (base + ring_size)) ? (new_bd - ring_size) : new_bd; 2028} 2029 2030static inline struct txbd8 *next_txbd(struct txbd8 *bdp, struct txbd8 *base, 2031 int ring_size) 2032{ 2033 return skip_txbd(bdp, 1, base, ring_size); 2034} 2035 2036/* This is called by the kernel when a frame is ready for transmission. */ 2037/* It is pointed to by the dev->hard_start_xmit function pointer */ 2038static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) 2039{ 2040 struct gfar_private *priv = netdev_priv(dev); 2041 struct gfar_priv_tx_q *tx_queue = NULL; 2042 struct netdev_queue *txq; 2043 struct gfar __iomem *regs = NULL; 2044 struct txfcb *fcb = NULL; 2045 struct txbd8 *txbdp, *txbdp_start, *base, *txbdp_tstamp = NULL; 2046 u32 lstatus; 2047 int i, rq = 0, do_tstamp = 0; 2048 u32 bufaddr; 2049 unsigned long flags; 2050 unsigned int nr_frags, nr_txbds, length; 2051 union skb_shared_tx *shtx; 2052 2053 /* 2054 * TOE=1 frames larger than 2500 bytes may see excess delays 2055 * before start of transmission. 2056 */ 2057 if (unlikely(gfar_has_errata(priv, GFAR_ERRATA_76) && 2058 skb->ip_summed == CHECKSUM_PARTIAL && 2059 skb->len > 2500)) { 2060 int ret; 2061 2062 ret = skb_checksum_help(skb); 2063 if (ret) 2064 return ret; 2065 } 2066 2067 rq = skb->queue_mapping; 2068 tx_queue = priv->tx_queue[rq]; 2069 txq = netdev_get_tx_queue(dev, rq); 2070 base = tx_queue->tx_bd_base; 2071 regs = tx_queue->grp->regs; 2072 shtx = skb_tx(skb); 2073 2074 /* check if time stamp should be generated */ 2075 if (unlikely(shtx->hardware && priv->hwts_tx_en)) 2076 do_tstamp = 1; 2077 2078 /* make space for additional header when fcb is needed */ 2079 if (((skb->ip_summed == CHECKSUM_PARTIAL) || 2080 (priv->vlgrp && vlan_tx_tag_present(skb)) || 2081 unlikely(do_tstamp)) && 2082 (skb_headroom(skb) < GMAC_FCB_LEN)) { 2083 struct sk_buff *skb_new; 2084 2085 skb_new = skb_realloc_headroom(skb, GMAC_FCB_LEN); 2086 if (!skb_new) { 2087 dev->stats.tx_errors++; 2088 kfree_skb(skb); 2089 return NETDEV_TX_OK; 2090 } 2091 kfree_skb(skb); 2092 skb = skb_new; 2093 } 2094 2095 /* total number of fragments in the SKB */ 2096 nr_frags = skb_shinfo(skb)->nr_frags; 2097 2098 /* calculate the required number of TxBDs for this skb */ 2099 if (unlikely(do_tstamp)) 2100 nr_txbds = nr_frags + 2; 2101 else 2102 nr_txbds = nr_frags + 1; 2103 2104 /* check if there is space to queue this packet */ 2105 if (nr_txbds > tx_queue->num_txbdfree) { 2106 /* no space, stop the queue */ 2107 netif_tx_stop_queue(txq); 2108 dev->stats.tx_fifo_errors++; 2109 return NETDEV_TX_BUSY; 2110 } 2111 2112 /* Update transmit stats */ 2113 txq->tx_bytes += skb->len; 2114 txq->tx_packets ++; 2115 2116 txbdp = txbdp_start = tx_queue->cur_tx; 2117 lstatus = txbdp->lstatus; 2118 2119 /* Time stamp insertion requires one additional TxBD */ 2120 if (unlikely(do_tstamp)) 2121 txbdp_tstamp = txbdp = next_txbd(txbdp, base, 2122 tx_queue->tx_ring_size); 2123 2124 if (nr_frags == 0) { 2125 if (unlikely(do_tstamp)) 2126 txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_LAST | 2127 TXBD_INTERRUPT); 2128 else 2129 lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT); 2130 } else { 2131 /* Place the fragment addresses and lengths into the TxBDs */ 2132 for (i = 0; i < nr_frags; i++) { 2133 /* Point at the next BD, wrapping as needed */ 2134 txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size); 2135 2136 length = skb_shinfo(skb)->frags[i].size; 2137 2138 lstatus = txbdp->lstatus | length | 2139 BD_LFLAG(TXBD_READY); 2140 2141 /* Handle the last BD specially */ 2142 if (i == nr_frags - 1) 2143 lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT); 2144 2145 bufaddr = dma_map_page(&priv->ofdev->dev, 2146 skb_shinfo(skb)->frags[i].page, 2147 skb_shinfo(skb)->frags[i].page_offset, 2148 length, 2149 DMA_TO_DEVICE); 2150 2151 /* set the TxBD length and buffer pointer */ 2152 txbdp->bufPtr = bufaddr; 2153 txbdp->lstatus = lstatus; 2154 } 2155 2156 lstatus = txbdp_start->lstatus; 2157 } 2158 2159 /* Set up checksumming */ 2160 if (CHECKSUM_PARTIAL == skb->ip_summed) { 2161 fcb = gfar_add_fcb(skb); 2162 lstatus |= BD_LFLAG(TXBD_TOE); 2163 gfar_tx_checksum(skb, fcb); 2164 } 2165 2166 if (priv->vlgrp && vlan_tx_tag_present(skb)) { 2167 if (unlikely(NULL == fcb)) { 2168 fcb = gfar_add_fcb(skb); 2169 lstatus |= BD_LFLAG(TXBD_TOE); 2170 } 2171 2172 gfar_tx_vlan(skb, fcb); 2173 } 2174 2175 /* Setup tx hardware time stamping if requested */ 2176 if (unlikely(do_tstamp)) { 2177 shtx->in_progress = 1; 2178 if (fcb == NULL) 2179 fcb = gfar_add_fcb(skb); 2180 fcb->ptp = 1; 2181 lstatus |= BD_LFLAG(TXBD_TOE); 2182 } 2183 2184 txbdp_start->bufPtr = dma_map_single(&priv->ofdev->dev, skb->data, 2185 skb_headlen(skb), DMA_TO_DEVICE); 2186 2187 /* 2188 * If time stamping is requested one additional TxBD must be set up. The 2189 * first TxBD points to the FCB and must have a data length of 2190 * GMAC_FCB_LEN. The second TxBD points to the actual frame data with 2191 * the full frame length. 2192 */ 2193 if (unlikely(do_tstamp)) { 2194 txbdp_tstamp->bufPtr = txbdp_start->bufPtr + GMAC_FCB_LEN; 2195 txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_READY) | 2196 (skb_headlen(skb) - GMAC_FCB_LEN); 2197 lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | GMAC_FCB_LEN; 2198 } else { 2199 lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb); 2200 } 2201 2202 /* 2203 * We can work in parallel with gfar_clean_tx_ring(), except 2204 * when modifying num_txbdfree. Note that we didn't grab the lock 2205 * when we were reading the num_txbdfree and checking for available 2206 * space, that's because outside of this function it can only grow, 2207 * and once we've got needed space, it cannot suddenly disappear. 2208 * 2209 * The lock also protects us from gfar_error(), which can modify 2210 * regs->tstat and thus retrigger the transfers, which is why we 2211 * also must grab the lock before setting ready bit for the first 2212 * to be transmitted BD. 2213 */ 2214 spin_lock_irqsave(&tx_queue->txlock, flags); 2215 2216 /* 2217 * The powerpc-specific eieio() is used, as wmb() has too strong 2218 * semantics (it requires synchronization between cacheable and 2219 * uncacheable mappings, which eieio doesn't provide and which we 2220 * don't need), thus requiring a more expensive sync instruction. At 2221 * some point, the set of architecture-independent barrier functions 2222 * should be expanded to include weaker barriers. 2223 */ 2224 eieio(); 2225 2226 txbdp_start->lstatus = lstatus; 2227 2228 eieio(); /* force lstatus write before tx_skbuff */ 2229 2230 tx_queue->tx_skbuff[tx_queue->skb_curtx] = skb; 2231 2232 /* Update the current skb pointer to the next entry we will use 2233 * (wrapping if necessary) */ 2234 tx_queue->skb_curtx = (tx_queue->skb_curtx + 1) & 2235 TX_RING_MOD_MASK(tx_queue->tx_ring_size); 2236 2237 tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size); 2238 2239 /* reduce TxBD free count */ 2240 tx_queue->num_txbdfree -= (nr_txbds); 2241 2242 /* If the next BD still needs to be cleaned up, then the bds 2243 are full. We need to tell the kernel to stop sending us stuff. */ 2244 if (!tx_queue->num_txbdfree) { 2245 netif_tx_stop_queue(txq); 2246 2247 dev->stats.tx_fifo_errors++; 2248 } 2249 2250 /* Tell the DMA to go go go */ 2251 gfar_write(®s->tstat, TSTAT_CLEAR_THALT >> tx_queue->qindex); 2252 2253 /* Unlock priv */ 2254 spin_unlock_irqrestore(&tx_queue->txlock, flags); 2255 2256 return NETDEV_TX_OK; 2257} 2258 2259/* Stops the kernel queue, and halts the controller */ 2260static int gfar_close(struct net_device *dev) 2261{ 2262 struct gfar_private *priv = netdev_priv(dev); 2263 2264 disable_napi(priv); 2265 2266 cancel_work_sync(&priv->reset_task); 2267 stop_gfar(dev); 2268 2269 /* Disconnect from the PHY */ 2270 phy_disconnect(priv->phydev); 2271 priv->phydev = NULL; 2272 2273 netif_tx_stop_all_queues(dev); 2274 2275 return 0; 2276} 2277 2278/* Changes the mac address if the controller is not running. */ 2279static int gfar_set_mac_address(struct net_device *dev) 2280{ 2281 gfar_set_mac_for_addr(dev, 0, dev->dev_addr); 2282 2283 return 0; 2284} 2285 2286 2287/* Enables and disables VLAN insertion/extraction */ 2288static void gfar_vlan_rx_register(struct net_device *dev, 2289 struct vlan_group *grp) 2290{ 2291 struct gfar_private *priv = netdev_priv(dev); 2292 struct gfar __iomem *regs = NULL; 2293 unsigned long flags; 2294 u32 tempval; 2295 2296 regs = priv->gfargrp[0].regs; 2297 local_irq_save(flags); 2298 lock_rx_qs(priv); 2299 2300 priv->vlgrp = grp; 2301 2302 if (grp) { 2303 /* Enable VLAN tag insertion */ 2304 tempval = gfar_read(®s->tctrl); 2305 tempval |= TCTRL_VLINS; 2306 2307 gfar_write(®s->tctrl, tempval); 2308 2309 /* Enable VLAN tag extraction */ 2310 tempval = gfar_read(®s->rctrl); 2311 tempval |= (RCTRL_VLEX | RCTRL_PRSDEP_INIT); 2312 gfar_write(®s->rctrl, tempval); 2313 } else { 2314 /* Disable VLAN tag insertion */ 2315 tempval = gfar_read(®s->tctrl); 2316 tempval &= ~TCTRL_VLINS; 2317 gfar_write(®s->tctrl, tempval); 2318 2319 /* Disable VLAN tag extraction */ 2320 tempval = gfar_read(®s->rctrl); 2321 tempval &= ~RCTRL_VLEX; 2322 /* If parse is no longer required, then disable parser */ 2323 if (tempval & RCTRL_REQ_PARSER) 2324 tempval |= RCTRL_PRSDEP_INIT; 2325 else 2326 tempval &= ~RCTRL_PRSDEP_INIT; 2327 gfar_write(®s->rctrl, tempval); 2328 } 2329 2330 gfar_change_mtu(dev, dev->mtu); 2331 2332 unlock_rx_qs(priv); 2333 local_irq_restore(flags); 2334} 2335 2336static int gfar_change_mtu(struct net_device *dev, int new_mtu) 2337{ 2338 int tempsize, tempval; 2339 struct gfar_private *priv = netdev_priv(dev); 2340 struct gfar __iomem *regs = priv->gfargrp[0].regs; 2341 int oldsize = priv->rx_buffer_size; 2342 int frame_size = new_mtu + ETH_HLEN; 2343 2344 if (priv->vlgrp) 2345 frame_size += VLAN_HLEN; 2346 2347 if ((frame_size < 64) || (frame_size > JUMBO_FRAME_SIZE)) { 2348 if (netif_msg_drv(priv)) 2349 printk(KERN_ERR "%s: Invalid MTU setting\n", 2350 dev->name); 2351 return -EINVAL; 2352 } 2353 2354 if (gfar_uses_fcb(priv)) 2355 frame_size += GMAC_FCB_LEN; 2356 2357 frame_size += priv->padding; 2358 2359 tempsize = 2360 (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) + 2361 INCREMENTAL_BUFFER_SIZE; 2362 2363 /* Only stop and start the controller if it isn't already 2364 * stopped, and we changed something */ 2365 if ((oldsize != tempsize) && (dev->flags & IFF_UP)) 2366 stop_gfar(dev); 2367 2368 priv->rx_buffer_size = tempsize; 2369 2370 dev->mtu = new_mtu; 2371 2372 gfar_write(®s->mrblr, priv->rx_buffer_size); 2373 gfar_write(®s->maxfrm, priv->rx_buffer_size); 2374 2375 /* If the mtu is larger than the max size for standard 2376 * ethernet frames (ie, a jumbo frame), then set maccfg2 2377 * to allow huge frames, and to check the length */ 2378 tempval = gfar_read(®s->maccfg2); 2379 2380 if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE || 2381 gfar_has_errata(priv, GFAR_ERRATA_74)) 2382 tempval |= (MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK); 2383 else 2384 tempval &= ~(MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK); 2385 2386 gfar_write(®s->maccfg2, tempval); 2387 2388 if ((oldsize != tempsize) && (dev->flags & IFF_UP)) 2389 startup_gfar(dev); 2390 2391 return 0; 2392} 2393 2394/* gfar_reset_task gets scheduled when a packet has not been 2395 * transmitted after a set amount of time. 2396 * For now, assume that clearing out all the structures, and 2397 * starting over will fix the problem. 2398 */ 2399static void gfar_reset_task(struct work_struct *work) 2400{ 2401 struct gfar_private *priv = container_of(work, struct gfar_private, 2402 reset_task); 2403 struct net_device *dev = priv->ndev; 2404 2405 if (dev->flags & IFF_UP) { 2406 netif_tx_stop_all_queues(dev); 2407 stop_gfar(dev); 2408 startup_gfar(dev); 2409 netif_tx_start_all_queues(dev); 2410 } 2411 2412 netif_tx_schedule_all(dev); 2413} 2414 2415static void gfar_timeout(struct net_device *dev) 2416{ 2417 struct gfar_private *priv = netdev_priv(dev); 2418 2419 dev->stats.tx_errors++; 2420 schedule_work(&priv->reset_task); 2421} 2422 2423static void gfar_align_skb(struct sk_buff *skb) 2424{ 2425 /* We need the data buffer to be aligned properly. We will reserve 2426 * as many bytes as needed to align the data properly 2427 */ 2428 skb_reserve(skb, RXBUF_ALIGNMENT - 2429 (((unsigned long) skb->data) & (RXBUF_ALIGNMENT - 1))); 2430} 2431 2432/* Interrupt Handler for Transmit complete */ 2433static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue) 2434{ 2435 struct net_device *dev = tx_queue->dev; 2436 struct gfar_private *priv = netdev_priv(dev); 2437 struct gfar_priv_rx_q *rx_queue = NULL; 2438 struct txbd8 *bdp, *next = NULL; 2439 struct txbd8 *lbdp = NULL; 2440 struct txbd8 *base = tx_queue->tx_bd_base; 2441 struct sk_buff *skb; 2442 int skb_dirtytx; 2443 int tx_ring_size = tx_queue->tx_ring_size; 2444 int frags = 0, nr_txbds = 0; 2445 int i; 2446 int howmany = 0; 2447 u32 lstatus; 2448 size_t buflen; 2449 union skb_shared_tx *shtx; 2450 2451 rx_queue = priv->rx_queue[tx_queue->qindex]; 2452 bdp = tx_queue->dirty_tx; 2453 skb_dirtytx = tx_queue->skb_dirtytx; 2454 2455 while ((skb = tx_queue->tx_skbuff[skb_dirtytx])) { 2456 unsigned long flags; 2457 2458 frags = skb_shinfo(skb)->nr_frags; 2459 2460 /* 2461 * When time stamping, one additional TxBD must be freed. 2462 * Also, we need to dma_unmap_single() the TxPAL. 2463 */ 2464 shtx = skb_tx(skb); 2465 if (unlikely(shtx->in_progress)) 2466 nr_txbds = frags + 2; 2467 else 2468 nr_txbds = frags + 1; 2469 2470 lbdp = skip_txbd(bdp, nr_txbds - 1, base, tx_ring_size); 2471 2472 lstatus = lbdp->lstatus; 2473 2474 /* Only clean completed frames */ 2475 if ((lstatus & BD_LFLAG(TXBD_READY)) && 2476 (lstatus & BD_LENGTH_MASK)) 2477 break; 2478 2479 if (unlikely(shtx->in_progress)) { 2480 next = next_txbd(bdp, base, tx_ring_size); 2481 buflen = next->length + GMAC_FCB_LEN; 2482 } else 2483 buflen = bdp->length; 2484 2485 dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr, 2486 buflen, DMA_TO_DEVICE); 2487 2488 if (unlikely(shtx->in_progress)) { 2489 struct skb_shared_hwtstamps shhwtstamps; 2490 u64 *ns = (u64*) (((u32)skb->data + 0x10) & ~0x7); 2491 memset(&shhwtstamps, 0, sizeof(shhwtstamps)); 2492 shhwtstamps.hwtstamp = ns_to_ktime(*ns); 2493 skb_tstamp_tx(skb, &shhwtstamps); 2494 bdp->lstatus &= BD_LFLAG(TXBD_WRAP); 2495 bdp = next; 2496 } 2497 2498 bdp->lstatus &= BD_LFLAG(TXBD_WRAP); 2499 bdp = next_txbd(bdp, base, tx_ring_size); 2500 2501 for (i = 0; i < frags; i++) { 2502 dma_unmap_page(&priv->ofdev->dev, 2503 bdp->bufPtr, 2504 bdp->length, 2505 DMA_TO_DEVICE); 2506 bdp->lstatus &= BD_LFLAG(TXBD_WRAP); 2507 bdp = next_txbd(bdp, base, tx_ring_size); 2508 } 2509 2510 /* 2511 * If there's room in the queue (limit it to rx_buffer_size) 2512 * we add this skb back into the pool, if it's the right size 2513 */ 2514 if (skb_queue_len(&priv->rx_recycle) < rx_queue->rx_ring_size && 2515 skb_recycle_check(skb, priv->rx_buffer_size + 2516 RXBUF_ALIGNMENT)) { 2517 gfar_align_skb(skb); 2518 __skb_queue_head(&priv->rx_recycle, skb); 2519 } else 2520 dev_kfree_skb_any(skb); 2521 2522 tx_queue->tx_skbuff[skb_dirtytx] = NULL; 2523 2524 skb_dirtytx = (skb_dirtytx + 1) & 2525 TX_RING_MOD_MASK(tx_ring_size); 2526 2527 howmany++; 2528 spin_lock_irqsave(&tx_queue->txlock, flags); 2529 tx_queue->num_txbdfree += nr_txbds; 2530 spin_unlock_irqrestore(&tx_queue->txlock, flags); 2531 } 2532 2533 /* If we freed a buffer, we can restart transmission, if necessary */ 2534 if (__netif_subqueue_stopped(dev, tx_queue->qindex) && tx_queue->num_txbdfree) 2535 netif_wake_subqueue(dev, tx_queue->qindex); 2536 2537 /* Update dirty indicators */ 2538 tx_queue->skb_dirtytx = skb_dirtytx; 2539 tx_queue->dirty_tx = bdp; 2540 2541 return howmany; 2542} 2543 2544static void gfar_schedule_cleanup(struct gfar_priv_grp *gfargrp) 2545{ 2546 unsigned long flags; 2547 2548 spin_lock_irqsave(&gfargrp->grplock, flags); 2549 if (napi_schedule_prep(&gfargrp->napi)) { 2550 gfar_write(&gfargrp->regs->imask, IMASK_RTX_DISABLED); 2551 __napi_schedule(&gfargrp->napi); 2552 } else { 2553 /* 2554 * Clear IEVENT, so interrupts aren't called again 2555 * because of the packets that have already arrived. 2556 */ 2557 gfar_write(&gfargrp->regs->ievent, IEVENT_RTX_MASK); 2558 } 2559 spin_unlock_irqrestore(&gfargrp->grplock, flags); 2560 2561} 2562 2563/* Interrupt Handler for Transmit complete */ 2564static irqreturn_t gfar_transmit(int irq, void *grp_id) 2565{ 2566 gfar_schedule_cleanup((struct gfar_priv_grp *)grp_id); 2567 return IRQ_HANDLED; 2568} 2569 2570static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp, 2571 struct sk_buff *skb) 2572{ 2573 struct net_device *dev = rx_queue->dev; 2574 struct gfar_private *priv = netdev_priv(dev); 2575 dma_addr_t buf; 2576 2577 buf = dma_map_single(&priv->ofdev->dev, skb->data, 2578 priv->rx_buffer_size, DMA_FROM_DEVICE); 2579 gfar_init_rxbdp(rx_queue, bdp, buf); 2580} 2581 2582static struct sk_buff * gfar_alloc_skb(struct net_device *dev) 2583{ 2584 struct gfar_private *priv = netdev_priv(dev); 2585 struct sk_buff *skb = NULL; 2586 2587 skb = netdev_alloc_skb(dev, priv->rx_buffer_size + RXBUF_ALIGNMENT); 2588 if (!skb) 2589 return NULL; 2590 2591 gfar_align_skb(skb); 2592 2593 return skb; 2594} 2595 2596struct sk_buff * gfar_new_skb(struct net_device *dev) 2597{ 2598 struct gfar_private *priv = netdev_priv(dev); 2599 struct sk_buff *skb = NULL; 2600 2601 skb = __skb_dequeue(&priv->rx_recycle); 2602 if (!skb) 2603 skb = gfar_alloc_skb(dev); 2604 2605 return skb; 2606} 2607 2608static inline void count_errors(unsigned short status, struct net_device *dev) 2609{ 2610 struct gfar_private *priv = netdev_priv(dev); 2611 struct net_device_stats *stats = &dev->stats; 2612 struct gfar_extra_stats *estats = &priv->extra_stats; 2613 2614 /* If the packet was truncated, none of the other errors 2615 * matter */ 2616 if (status & RXBD_TRUNCATED) { 2617 stats->rx_length_errors++; 2618 2619 estats->rx_trunc++; 2620 2621 return; 2622 } 2623 /* Count the errors, if there were any */ 2624 if (status & (RXBD_LARGE | RXBD_SHORT)) { 2625 stats->rx_length_errors++; 2626 2627 if (status & RXBD_LARGE) 2628 estats->rx_large++; 2629 else 2630 estats->rx_short++; 2631 } 2632 if (status & RXBD_NONOCTET) { 2633 stats->rx_frame_errors++; 2634 estats->rx_nonoctet++; 2635 } 2636 if (status & RXBD_CRCERR) { 2637 estats->rx_crcerr++; 2638 stats->rx_crc_errors++; 2639 } 2640 if (status & RXBD_OVERRUN) { 2641 estats->rx_overrun++; 2642 stats->rx_crc_errors++; 2643 } 2644} 2645 2646irqreturn_t gfar_receive(int irq, void *grp_id) 2647{ 2648 gfar_schedule_cleanup((struct gfar_priv_grp *)grp_id); 2649 return IRQ_HANDLED; 2650} 2651 2652static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb) 2653{ 2654 /* If valid headers were found, and valid sums 2655 * were verified, then we tell the kernel that no 2656 * checksumming is necessary. Otherwise, it is */ 2657 if ((fcb->flags & RXFCB_CSUM_MASK) == (RXFCB_CIP | RXFCB_CTU)) 2658 skb->ip_summed = CHECKSUM_UNNECESSARY; 2659 else 2660 skb->ip_summed = CHECKSUM_NONE; 2661} 2662 2663 2664/* gfar_process_frame() -- handle one incoming packet if skb 2665 * isn't NULL. */ 2666static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb, 2667 int amount_pull) 2668{ 2669 struct gfar_private *priv = netdev_priv(dev); 2670 struct rxfcb *fcb = NULL; 2671 2672 int ret; 2673 2674 /* fcb is at the beginning if exists */ 2675 fcb = (struct rxfcb *)skb->data; 2676 2677 /* Remove the FCB from the skb */ 2678 /* Remove the padded bytes, if there are any */ 2679 if (amount_pull) { 2680 skb_record_rx_queue(skb, fcb->rq); 2681 skb_pull(skb, amount_pull); 2682 } 2683 2684 /* Get receive timestamp from the skb */ 2685 if (priv->hwts_rx_en) { 2686 struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb); 2687 u64 *ns = (u64 *) skb->data; 2688 memset(shhwtstamps, 0, sizeof(*shhwtstamps)); 2689 shhwtstamps->hwtstamp = ns_to_ktime(*ns); 2690 } 2691 2692 if (priv->padding) 2693 skb_pull(skb, priv->padding); 2694 2695 if (priv->rx_csum_enable) 2696 gfar_rx_checksum(skb, fcb); 2697 2698 /* Tell the skb what kind of packet this is */ 2699 skb->protocol = eth_type_trans(skb, dev); 2700 2701 /* Send the packet up the stack */ 2702 if (unlikely(priv->vlgrp && (fcb->flags & RXFCB_VLN))) 2703 ret = vlan_hwaccel_receive_skb(skb, priv->vlgrp, fcb->vlctl); 2704 else 2705 ret = netif_receive_skb(skb); 2706 2707 if (NET_RX_DROP == ret) 2708 priv->extra_stats.kernel_dropped++; 2709 2710 return 0; 2711} 2712 2713/* gfar_clean_rx_ring() -- Processes each frame in the rx ring 2714 * until the budget/quota has been reached. Returns the number 2715 * of frames handled 2716 */ 2717int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit) 2718{ 2719 struct net_device *dev = rx_queue->dev; 2720 struct rxbd8 *bdp, *base; 2721 struct sk_buff *skb; 2722 int pkt_len; 2723 int amount_pull; 2724 int howmany = 0; 2725 struct gfar_private *priv = netdev_priv(dev); 2726 2727 /* Get the first full descriptor */ 2728 bdp = rx_queue->cur_rx; 2729 base = rx_queue->rx_bd_base; 2730 2731 amount_pull = (gfar_uses_fcb(priv) ? GMAC_FCB_LEN : 0); 2732 2733 while (!((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))) { 2734 struct sk_buff *newskb; 2735 rmb(); 2736 2737 /* Add another skb for the future */ 2738 newskb = gfar_new_skb(dev); 2739 2740 skb = rx_queue->rx_skbuff[rx_queue->skb_currx]; 2741 2742 dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr, 2743 priv->rx_buffer_size, DMA_FROM_DEVICE); 2744 2745 if (unlikely(!(bdp->status & RXBD_ERR) && 2746 bdp->length > priv->rx_buffer_size)) 2747 bdp->status = RXBD_LARGE; 2748 2749 /* We drop the frame if we failed to allocate a new buffer */ 2750 if (unlikely(!newskb || !(bdp->status & RXBD_LAST) || 2751 bdp->status & RXBD_ERR)) { 2752 count_errors(bdp->status, dev); 2753 2754 if (unlikely(!newskb)) 2755 newskb = skb; 2756 else if (skb) 2757 __skb_queue_head(&priv->rx_recycle, skb); 2758 } else { 2759 /* Increment the number of packets */ 2760 rx_queue->stats.rx_packets++; 2761 howmany++; 2762 2763 if (likely(skb)) { 2764 pkt_len = bdp->length - ETH_FCS_LEN; 2765 /* Remove the FCS from the packet length */ 2766 skb_put(skb, pkt_len); 2767 rx_queue->stats.rx_bytes += pkt_len; 2768 skb_record_rx_queue(skb, rx_queue->qindex); 2769 gfar_process_frame(dev, skb, amount_pull); 2770 2771 } else { 2772 if (netif_msg_rx_err(priv)) 2773 printk(KERN_WARNING 2774 "%s: Missing skb!\n", dev->name); 2775 rx_queue->stats.rx_dropped++; 2776 priv->extra_stats.rx_skbmissing++; 2777 } 2778 2779 } 2780 2781 rx_queue->rx_skbuff[rx_queue->skb_currx] = newskb; 2782 2783 /* Setup the new bdp */ 2784 gfar_new_rxbdp(rx_queue, bdp, newskb); 2785 2786 /* Update to the next pointer */ 2787 bdp = next_bd(bdp, base, rx_queue->rx_ring_size); 2788 2789 /* update to point at the next skb */ 2790 rx_queue->skb_currx = 2791 (rx_queue->skb_currx + 1) & 2792 RX_RING_MOD_MASK(rx_queue->rx_ring_size); 2793 } 2794 2795 /* Update the current rxbd pointer to be the next one */ 2796 rx_queue->cur_rx = bdp; 2797 2798 return howmany; 2799} 2800 2801static int gfar_poll(struct napi_struct *napi, int budget) 2802{ 2803 struct gfar_priv_grp *gfargrp = container_of(napi, 2804 struct gfar_priv_grp, napi); 2805 struct gfar_private *priv = gfargrp->priv; 2806 struct gfar __iomem *regs = gfargrp->regs; 2807 struct gfar_priv_tx_q *tx_queue = NULL; 2808 struct gfar_priv_rx_q *rx_queue = NULL; 2809 int rx_cleaned = 0, budget_per_queue = 0, rx_cleaned_per_queue = 0; 2810 int tx_cleaned = 0, i, left_over_budget = budget; 2811 unsigned long serviced_queues = 0; 2812 int num_queues = 0; 2813 2814 num_queues = gfargrp->num_rx_queues; 2815 budget_per_queue = budget/num_queues; 2816 2817 /* Clear IEVENT, so interrupts aren't called again 2818 * because of the packets that have already arrived */ 2819 gfar_write(®s->ievent, IEVENT_RTX_MASK); 2820 2821 while (num_queues && left_over_budget) { 2822 2823 budget_per_queue = left_over_budget/num_queues; 2824 left_over_budget = 0; 2825 2826 for_each_set_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) { 2827 if (test_bit(i, &serviced_queues)) 2828 continue; 2829 rx_queue = priv->rx_queue[i]; 2830 tx_queue = priv->tx_queue[rx_queue->qindex]; 2831 2832 tx_cleaned += gfar_clean_tx_ring(tx_queue); 2833 rx_cleaned_per_queue = gfar_clean_rx_ring(rx_queue, 2834 budget_per_queue); 2835 rx_cleaned += rx_cleaned_per_queue; 2836 if(rx_cleaned_per_queue < budget_per_queue) { 2837 left_over_budget = left_over_budget + 2838 (budget_per_queue - rx_cleaned_per_queue); 2839 set_bit(i, &serviced_queues); 2840 num_queues--; 2841 } 2842 } 2843 } 2844 2845 if (tx_cleaned) 2846 return budget; 2847 2848 if (rx_cleaned < budget) { 2849 napi_complete(napi); 2850 2851 /* Clear the halt bit in RSTAT */ 2852 gfar_write(®s->rstat, gfargrp->rstat); 2853 2854 gfar_write(®s->imask, IMASK_DEFAULT); 2855 2856 /* If we are coalescing interrupts, update the timer */ 2857 /* Otherwise, clear it */ 2858 gfar_configure_coalescing(priv, 2859 gfargrp->rx_bit_map, gfargrp->tx_bit_map); 2860 } 2861 2862 return rx_cleaned; 2863} 2864 2865#ifdef CONFIG_NET_POLL_CONTROLLER 2866/* 2867 * Polling 'interrupt' - used by things like netconsole to send skbs 2868 * without having to re-enable interrupts. It's not called while 2869 * the interrupt routine is executing. 2870 */ 2871static void gfar_netpoll(struct net_device *dev) 2872{ 2873 struct gfar_private *priv = netdev_priv(dev); 2874 int i = 0; 2875 2876 /* If the device has multiple interrupts, run tx/rx */ 2877 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { 2878 for (i = 0; i < priv->num_grps; i++) { 2879 disable_irq(priv->gfargrp[i].interruptTransmit); 2880 disable_irq(priv->gfargrp[i].interruptReceive); 2881 disable_irq(priv->gfargrp[i].interruptError); 2882 gfar_interrupt(priv->gfargrp[i].interruptTransmit, 2883 &priv->gfargrp[i]); 2884 enable_irq(priv->gfargrp[i].interruptError); 2885 enable_irq(priv->gfargrp[i].interruptReceive); 2886 enable_irq(priv->gfargrp[i].interruptTransmit); 2887 } 2888 } else { 2889 for (i = 0; i < priv->num_grps; i++) { 2890 disable_irq(priv->gfargrp[i].interruptTransmit); 2891 gfar_interrupt(priv->gfargrp[i].interruptTransmit, 2892 &priv->gfargrp[i]); 2893 enable_irq(priv->gfargrp[i].interruptTransmit); 2894 } 2895 } 2896} 2897#endif 2898 2899/* The interrupt handler for devices with one interrupt */ 2900static irqreturn_t gfar_interrupt(int irq, void *grp_id) 2901{ 2902 struct gfar_priv_grp *gfargrp = grp_id; 2903 2904 /* Save ievent for future reference */ 2905 u32 events = gfar_read(&gfargrp->regs->ievent); 2906 2907 /* Check for reception */ 2908 if (events & IEVENT_RX_MASK) 2909 gfar_receive(irq, grp_id); 2910 2911 /* Check for transmit completion */ 2912 if (events & IEVENT_TX_MASK) 2913 gfar_transmit(irq, grp_id); 2914 2915 /* Check for errors */ 2916 if (events & IEVENT_ERR_MASK) 2917 gfar_error(irq, grp_id); 2918 2919 return IRQ_HANDLED; 2920} 2921 2922/* Called every time the controller might need to be made 2923 * aware of new link state. The PHY code conveys this 2924 * information through variables in the phydev structure, and this 2925 * function converts those variables into the appropriate 2926 * register values, and can bring down the device if needed. 2927 */ 2928static void adjust_link(struct net_device *dev) 2929{ 2930 struct gfar_private *priv = netdev_priv(dev); 2931 struct gfar __iomem *regs = priv->gfargrp[0].regs; 2932 unsigned long flags; 2933 struct phy_device *phydev = priv->phydev; 2934 int new_state = 0; 2935 2936 local_irq_save(flags); 2937 lock_tx_qs(priv); 2938 2939 if (phydev->link) { 2940 u32 tempval = gfar_read(®s->maccfg2); 2941 u32 ecntrl = gfar_read(®s->ecntrl); 2942 2943 /* Now we make sure that we can be in full duplex mode. 2944 * If not, we operate in half-duplex mode. */ 2945 if (phydev->duplex != priv->oldduplex) { 2946 new_state = 1; 2947 if (!(phydev->duplex)) 2948 tempval &= ~(MACCFG2_FULL_DUPLEX); 2949 else 2950 tempval |= MACCFG2_FULL_DUPLEX; 2951 2952 priv->oldduplex = phydev->duplex; 2953 } 2954 2955 if (phydev->speed != priv->oldspeed) { 2956 new_state = 1; 2957 switch (phydev->speed) { 2958 case 1000: 2959 tempval = 2960 ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII); 2961 2962 ecntrl &= ~(ECNTRL_R100); 2963 break; 2964 case 100: 2965 case 10: 2966 tempval = 2967 ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII); 2968 2969 /* Reduced mode distinguishes 2970 * between 10 and 100 */ 2971 if (phydev->speed == SPEED_100) 2972 ecntrl |= ECNTRL_R100; 2973 else 2974 ecntrl &= ~(ECNTRL_R100); 2975 break; 2976 default: 2977 if (netif_msg_link(priv)) 2978 printk(KERN_WARNING 2979 "%s: Ack! Speed (%d) is not 10/100/1000!\n", 2980 dev->name, phydev->speed); 2981 break; 2982 } 2983 2984 priv->oldspeed = phydev->speed; 2985 } 2986 2987 gfar_write(®s->maccfg2, tempval); 2988 gfar_write(®s->ecntrl, ecntrl); 2989 2990 if (!priv->oldlink) { 2991 new_state = 1; 2992 priv->oldlink = 1; 2993 } 2994 } else if (priv->oldlink) { 2995 new_state = 1; 2996 priv->oldlink = 0; 2997 priv->oldspeed = 0; 2998 priv->oldduplex = -1; 2999 } 3000 3001 if (new_state && netif_msg_link(priv)) 3002 phy_print_status(phydev); 3003 unlock_tx_qs(priv); 3004 local_irq_restore(flags); 3005} 3006 3007/* Update the hash table based on the current list of multicast 3008 * addresses we subscribe to. Also, change the promiscuity of 3009 * the device based on the flags (this function is called 3010 * whenever dev->flags is changed */ 3011static void gfar_set_multi(struct net_device *dev) 3012{ 3013 struct netdev_hw_addr *ha; 3014 struct gfar_private *priv = netdev_priv(dev); 3015 struct gfar __iomem *regs = priv->gfargrp[0].regs; 3016 u32 tempval; 3017 3018 if (dev->flags & IFF_PROMISC) { 3019 /* Set RCTRL to PROM */ 3020 tempval = gfar_read(®s->rctrl); 3021 tempval |= RCTRL_PROM; 3022 gfar_write(®s->rctrl, tempval); 3023 } else { 3024 /* Set RCTRL to not PROM */ 3025 tempval = gfar_read(®s->rctrl); 3026 tempval &= ~(RCTRL_PROM); 3027 gfar_write(®s->rctrl, tempval); 3028 } 3029 3030 if (dev->flags & IFF_ALLMULTI) { 3031 /* Set the hash to rx all multicast frames */ 3032 gfar_write(®s->igaddr0, 0xffffffff); 3033 gfar_write(®s->igaddr1, 0xffffffff); 3034 gfar_write(®s->igaddr2, 0xffffffff); 3035 gfar_write(®s->igaddr3, 0xffffffff); 3036 gfar_write(®s->igaddr4, 0xffffffff); 3037 gfar_write(®s->igaddr5, 0xffffffff); 3038 gfar_write(®s->igaddr6, 0xffffffff); 3039 gfar_write(®s->igaddr7, 0xffffffff); 3040 gfar_write(®s->gaddr0, 0xffffffff); 3041 gfar_write(®s->gaddr1, 0xffffffff); 3042 gfar_write(®s->gaddr2, 0xffffffff); 3043 gfar_write(®s->gaddr3, 0xffffffff); 3044 gfar_write(®s->gaddr4, 0xffffffff); 3045 gfar_write(®s->gaddr5, 0xffffffff); 3046 gfar_write(®s->gaddr6, 0xffffffff); 3047 gfar_write(®s->gaddr7, 0xffffffff); 3048 } else { 3049 int em_num; 3050 int idx; 3051 3052 /* zero out the hash */ 3053 gfar_write(®s->igaddr0, 0x0); 3054 gfar_write(®s->igaddr1, 0x0); 3055 gfar_write(®s->igaddr2, 0x0); 3056 gfar_write(®s->igaddr3, 0x0); 3057 gfar_write(®s->igaddr4, 0x0); 3058 gfar_write(®s->igaddr5, 0x0); 3059 gfar_write(®s->igaddr6, 0x0); 3060 gfar_write(®s->igaddr7, 0x0); 3061 gfar_write(®s->gaddr0, 0x0); 3062 gfar_write(®s->gaddr1, 0x0); 3063 gfar_write(®s->gaddr2, 0x0); 3064 gfar_write(®s->gaddr3, 0x0); 3065 gfar_write(®s->gaddr4, 0x0); 3066 gfar_write(®s->gaddr5, 0x0); 3067 gfar_write(®s->gaddr6, 0x0); 3068 gfar_write(®s->gaddr7, 0x0); 3069 3070 /* If we have extended hash tables, we need to 3071 * clear the exact match registers to prepare for 3072 * setting them */ 3073 if (priv->extended_hash) { 3074 em_num = GFAR_EM_NUM + 1; 3075 gfar_clear_exact_match(dev); 3076 idx = 1; 3077 } else { 3078 idx = 0; 3079 em_num = 0; 3080 } 3081 3082 if (netdev_mc_empty(dev)) 3083 return; 3084 3085 /* Parse the list, and set the appropriate bits */ 3086 netdev_for_each_mc_addr(ha, dev) { 3087 if (idx < em_num) { 3088 gfar_set_mac_for_addr(dev, idx, ha->addr); 3089 idx++; 3090 } else 3091 gfar_set_hash_for_addr(dev, ha->addr); 3092 } 3093 } 3094} 3095 3096 3097/* Clears each of the exact match registers to zero, so they 3098 * don't interfere with normal reception */ 3099static void gfar_clear_exact_match(struct net_device *dev) 3100{ 3101 int idx; 3102 u8 zero_arr[MAC_ADDR_LEN] = {0,0,0,0,0,0}; 3103 3104 for(idx = 1;idx < GFAR_EM_NUM + 1;idx++) 3105 gfar_set_mac_for_addr(dev, idx, (u8 *)zero_arr); 3106} 3107 3108/* Set the appropriate hash bit for the given addr */ 3109/* The algorithm works like so: 3110 * 1) Take the Destination Address (ie the multicast address), and 3111 * do a CRC on it (little endian), and reverse the bits of the 3112 * result. 3113 * 2) Use the 8 most significant bits as a hash into a 256-entry 3114 * table. The table is controlled through 8 32-bit registers: 3115 * gaddr0-7. gaddr0's MSB is entry 0, and gaddr7's LSB is 3116 * gaddr7. This means that the 3 most significant bits in the 3117 * hash index which gaddr register to use, and the 5 other bits 3118 * indicate which bit (assuming an IBM numbering scheme, which 3119 * for PowerPC (tm) is usually the case) in the register holds 3120 * the entry. */ 3121static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr) 3122{ 3123 u32 tempval; 3124 struct gfar_private *priv = netdev_priv(dev); 3125 u32 result = ether_crc(MAC_ADDR_LEN, addr); 3126 int width = priv->hash_width; 3127 u8 whichbit = (result >> (32 - width)) & 0x1f; 3128 u8 whichreg = result >> (32 - width + 5); 3129 u32 value = (1 << (31-whichbit)); 3130 3131 tempval = gfar_read(priv->hash_regs[whichreg]); 3132 tempval |= value; 3133 gfar_write(priv->hash_regs[whichreg], tempval); 3134} 3135 3136 3137/* There are multiple MAC Address register pairs on some controllers 3138 * This function sets the numth pair to a given address 3139 */ 3140static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr) 3141{ 3142 struct gfar_private *priv = netdev_priv(dev); 3143 struct gfar __iomem *regs = priv->gfargrp[0].regs; 3144 int idx; 3145 char tmpbuf[MAC_ADDR_LEN]; 3146 u32 tempval; 3147 u32 __iomem *macptr = ®s->macstnaddr1; 3148 3149 macptr += num*2; 3150 3151 /* Now copy it into the mac registers backwards, cuz */ 3152 /* little endian is silly */ 3153 for (idx = 0; idx < MAC_ADDR_LEN; idx++) 3154 tmpbuf[MAC_ADDR_LEN - 1 - idx] = addr[idx]; 3155 3156 gfar_write(macptr, *((u32 *) (tmpbuf))); 3157 3158 tempval = *((u32 *) (tmpbuf + 4)); 3159 3160 gfar_write(macptr+1, tempval); 3161} 3162 3163/* GFAR error interrupt handler */ 3164static irqreturn_t gfar_error(int irq, void *grp_id) 3165{ 3166 struct gfar_priv_grp *gfargrp = grp_id; 3167 struct gfar __iomem *regs = gfargrp->regs; 3168 struct gfar_private *priv= gfargrp->priv; 3169 struct net_device *dev = priv->ndev; 3170 3171 /* Save ievent for future reference */ 3172 u32 events = gfar_read(®s->ievent); 3173 3174 /* Clear IEVENT */ 3175 gfar_write(®s->ievent, events & IEVENT_ERR_MASK); 3176 3177 /* Magic Packet is not an error. */ 3178 if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) && 3179 (events & IEVENT_MAG)) 3180 events &= ~IEVENT_MAG; 3181 3182 /* Hmm... */ 3183 if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv)) 3184 printk(KERN_DEBUG "%s: error interrupt (ievent=0x%08x imask=0x%08x)\n", 3185 dev->name, events, gfar_read(®s->imask)); 3186 3187 /* Update the error counters */ 3188 if (events & IEVENT_TXE) { 3189 dev->stats.tx_errors++; 3190 3191 if (events & IEVENT_LC) 3192 dev->stats.tx_window_errors++; 3193 if (events & IEVENT_CRL) 3194 dev->stats.tx_aborted_errors++; 3195 if (events & IEVENT_XFUN) { 3196 unsigned long flags; 3197 3198 if (netif_msg_tx_err(priv)) 3199 printk(KERN_DEBUG "%s: TX FIFO underrun, " 3200 "packet dropped.\n", dev->name); 3201 dev->stats.tx_dropped++; 3202 priv->extra_stats.tx_underrun++; 3203 3204 local_irq_save(flags); 3205 lock_tx_qs(priv); 3206 3207 /* Reactivate the Tx Queues */ 3208 gfar_write(®s->tstat, gfargrp->tstat); 3209 3210 unlock_tx_qs(priv); 3211 local_irq_restore(flags); 3212 } 3213 if (netif_msg_tx_err(priv)) 3214 printk(KERN_DEBUG "%s: Transmit Error\n", dev->name); 3215 } 3216 if (events & IEVENT_BSY) { 3217 dev->stats.rx_errors++; 3218 priv->extra_stats.rx_bsy++; 3219 3220 gfar_receive(irq, grp_id); 3221 3222 if (netif_msg_rx_err(priv)) 3223 printk(KERN_DEBUG "%s: busy error (rstat: %x)\n", 3224 dev->name, gfar_read(®s->rstat)); 3225 } 3226 if (events & IEVENT_BABR) { 3227 dev->stats.rx_errors++; 3228 priv->extra_stats.rx_babr++; 3229 3230 if (netif_msg_rx_err(priv)) 3231 printk(KERN_DEBUG "%s: babbling RX error\n", dev->name); 3232 } 3233 if (events & IEVENT_EBERR) { 3234 priv->extra_stats.eberr++; 3235 if (netif_msg_rx_err(priv)) 3236 printk(KERN_DEBUG "%s: bus error\n", dev->name); 3237 } 3238 if ((events & IEVENT_RXC) && netif_msg_rx_status(priv)) 3239 printk(KERN_DEBUG "%s: control frame\n", dev->name); 3240 3241 if (events & IEVENT_BABT) { 3242 priv->extra_stats.tx_babt++; 3243 if (netif_msg_tx_err(priv)) 3244 printk(KERN_DEBUG "%s: babbling TX error\n", dev->name); 3245 } 3246 return IRQ_HANDLED; 3247} 3248 3249static struct of_device_id gfar_match[] = 3250{ 3251 { 3252 .type = "network", 3253 .compatible = "gianfar", 3254 }, 3255 { 3256 .compatible = "fsl,etsec2", 3257 }, 3258 {}, 3259}; 3260MODULE_DEVICE_TABLE(of, gfar_match); 3261 3262/* Structure for a device driver */ 3263static struct of_platform_driver gfar_driver = { 3264 .driver = { 3265 .name = "fsl-gianfar", 3266 .owner = THIS_MODULE, 3267 .pm = GFAR_PM_OPS, 3268 .of_match_table = gfar_match, 3269 }, 3270 .probe = gfar_probe, 3271 .remove = gfar_remove, 3272}; 3273 3274static int __init gfar_init(void) 3275{ 3276 return of_register_platform_driver(&gfar_driver); 3277} 3278 3279static void __exit gfar_exit(void) 3280{ 3281 of_unregister_platform_driver(&gfar_driver); 3282} 3283 3284module_init(gfar_init); 3285module_exit(gfar_exit); 3286