1/* 2 * Driver for Marvell Discovery (MV643XX) and Marvell Orion ethernet ports 3 * Copyright (C) 2002 Matthew Dharm <mdharm@momenco.com> 4 * 5 * Based on the 64360 driver from: 6 * Copyright (C) 2002 Rabeeh Khoury <rabeeh@galileo.co.il> 7 * Rabeeh Khoury <rabeeh@marvell.com> 8 * 9 * Copyright (C) 2003 PMC-Sierra, Inc., 10 * written by Manish Lachwani 11 * 12 * Copyright (C) 2003 Ralf Baechle <ralf@linux-mips.org> 13 * 14 * Copyright (C) 2004-2006 MontaVista Software, Inc. 15 * Dale Farnsworth <dale@farnsworth.org> 16 * 17 * Copyright (C) 2004 Steven J. Hill <sjhill1@rockwellcollins.com> 18 * <sjhill@realitydiluted.com> 19 * 20 * Copyright (C) 2007-2008 Marvell Semiconductor 21 * Lennert Buytenhek <buytenh@marvell.com> 22 * 23 * This program is free software; you can redistribute it and/or 24 * modify it under the terms of the GNU General Public License 25 * as published by the Free Software Foundation; either version 2 26 * of the License, or (at your option) any later version. 27 * 28 * This program is distributed in the hope that it will be useful, 29 * but WITHOUT ANY WARRANTY; without even the implied warranty of 30 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 31 * GNU General Public License for more details. 32 * 33 * You should have received a copy of the GNU General Public License 34 * along with this program; if not, write to the Free Software 35 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 36 */ 37 38#include <linux/init.h> 39#include <linux/dma-mapping.h> 40#include <linux/in.h> 41#include <linux/ip.h> 42#include <linux/tcp.h> 43#include <linux/udp.h> 44#include <linux/etherdevice.h> 45#include <linux/delay.h> 46#include <linux/ethtool.h> 47#include <linux/platform_device.h> 48#include <linux/module.h> 49#include <linux/kernel.h> 50#include <linux/spinlock.h> 51#include <linux/workqueue.h> 52#include <linux/phy.h> 53#include <linux/mv643xx_eth.h> 54#include <linux/io.h> 55#include <linux/types.h> 56#include <linux/inet_lro.h> 57#include <linux/slab.h> 58#include <asm/system.h> 59 60static char mv643xx_eth_driver_name[] = "mv643xx_eth"; 61static char mv643xx_eth_driver_version[] = "1.4"; 62 63 64/* 65 * Registers shared between all ports. 66 */ 67#define PHY_ADDR 0x0000 68#define SMI_REG 0x0004 69#define SMI_BUSY 0x10000000 70#define SMI_READ_VALID 0x08000000 71#define SMI_OPCODE_READ 0x04000000 72#define SMI_OPCODE_WRITE 0x00000000 73#define ERR_INT_CAUSE 0x0080 74#define ERR_INT_SMI_DONE 0x00000010 75#define ERR_INT_MASK 0x0084 76#define WINDOW_BASE(w) (0x0200 + ((w) << 3)) 77#define WINDOW_SIZE(w) (0x0204 + ((w) << 3)) 78#define WINDOW_REMAP_HIGH(w) (0x0280 + ((w) << 2)) 79#define WINDOW_BAR_ENABLE 0x0290 80#define WINDOW_PROTECT(w) (0x0294 + ((w) << 4)) 81 82/* 83 * Main per-port registers. These live at offset 0x0400 for 84 * port #0, 0x0800 for port #1, and 0x0c00 for port #2. 85 */ 86#define PORT_CONFIG 0x0000 87#define UNICAST_PROMISCUOUS_MODE 0x00000001 88#define PORT_CONFIG_EXT 0x0004 89#define MAC_ADDR_LOW 0x0014 90#define MAC_ADDR_HIGH 0x0018 91#define SDMA_CONFIG 0x001c 92#define TX_BURST_SIZE_16_64BIT 0x01000000 93#define TX_BURST_SIZE_4_64BIT 0x00800000 94#define BLM_TX_NO_SWAP 0x00000020 95#define BLM_RX_NO_SWAP 0x00000010 96#define RX_BURST_SIZE_16_64BIT 0x00000008 97#define RX_BURST_SIZE_4_64BIT 0x00000004 98#define PORT_SERIAL_CONTROL 0x003c 99#define SET_MII_SPEED_TO_100 0x01000000 100#define SET_GMII_SPEED_TO_1000 0x00800000 101#define SET_FULL_DUPLEX_MODE 0x00200000 102#define MAX_RX_PACKET_9700BYTE 0x000a0000 103#define DISABLE_AUTO_NEG_SPEED_GMII 0x00002000 104#define DO_NOT_FORCE_LINK_FAIL 0x00000400 105#define SERIAL_PORT_CONTROL_RESERVED 0x00000200 106#define DISABLE_AUTO_NEG_FOR_FLOW_CTRL 0x00000008 107#define DISABLE_AUTO_NEG_FOR_DUPLEX 0x00000004 108#define FORCE_LINK_PASS 0x00000002 109#define SERIAL_PORT_ENABLE 0x00000001 110#define PORT_STATUS 0x0044 111#define TX_FIFO_EMPTY 0x00000400 112#define TX_IN_PROGRESS 0x00000080 113#define PORT_SPEED_MASK 0x00000030 114#define PORT_SPEED_1000 0x00000010 115#define PORT_SPEED_100 0x00000020 116#define PORT_SPEED_10 0x00000000 117#define FLOW_CONTROL_ENABLED 0x00000008 118#define FULL_DUPLEX 0x00000004 119#define LINK_UP 0x00000002 120#define TXQ_COMMAND 0x0048 121#define TXQ_FIX_PRIO_CONF 0x004c 122#define TX_BW_RATE 0x0050 123#define TX_BW_MTU 0x0058 124#define TX_BW_BURST 0x005c 125#define INT_CAUSE 0x0060 126#define INT_TX_END 0x07f80000 127#define INT_TX_END_0 0x00080000 128#define INT_RX 0x000003fc 129#define INT_RX_0 0x00000004 130#define INT_EXT 0x00000002 131#define INT_CAUSE_EXT 0x0064 132#define INT_EXT_LINK_PHY 0x00110000 133#define INT_EXT_TX 0x000000ff 134#define INT_MASK 0x0068 135#define INT_MASK_EXT 0x006c 136#define TX_FIFO_URGENT_THRESHOLD 0x0074 137#define TXQ_FIX_PRIO_CONF_MOVED 0x00dc 138#define TX_BW_RATE_MOVED 0x00e0 139#define TX_BW_MTU_MOVED 0x00e8 140#define TX_BW_BURST_MOVED 0x00ec 141#define RXQ_CURRENT_DESC_PTR(q) (0x020c + ((q) << 4)) 142#define RXQ_COMMAND 0x0280 143#define TXQ_CURRENT_DESC_PTR(q) (0x02c0 + ((q) << 2)) 144#define TXQ_BW_TOKENS(q) (0x0300 + ((q) << 4)) 145#define TXQ_BW_CONF(q) (0x0304 + ((q) << 4)) 146#define TXQ_BW_WRR_CONF(q) (0x0308 + ((q) << 4)) 147 148/* 149 * Misc per-port registers. 150 */ 151#define MIB_COUNTERS(p) (0x1000 + ((p) << 7)) 152#define SPECIAL_MCAST_TABLE(p) (0x1400 + ((p) << 10)) 153#define OTHER_MCAST_TABLE(p) (0x1500 + ((p) << 10)) 154#define UNICAST_TABLE(p) (0x1600 + ((p) << 10)) 155 156 157/* 158 * SDMA configuration register default value. 159 */ 160#if defined(__BIG_ENDIAN) 161#define PORT_SDMA_CONFIG_DEFAULT_VALUE \ 162 (RX_BURST_SIZE_4_64BIT | \ 163 TX_BURST_SIZE_4_64BIT) 164#elif defined(__LITTLE_ENDIAN) 165#define PORT_SDMA_CONFIG_DEFAULT_VALUE \ 166 (RX_BURST_SIZE_4_64BIT | \ 167 BLM_RX_NO_SWAP | \ 168 BLM_TX_NO_SWAP | \ 169 TX_BURST_SIZE_4_64BIT) 170#else 171#error One of __BIG_ENDIAN or __LITTLE_ENDIAN must be defined 172#endif 173 174 175/* 176 * Misc definitions. 177 */ 178#define DEFAULT_RX_QUEUE_SIZE 128 179#define DEFAULT_TX_QUEUE_SIZE 256 180#define SKB_DMA_REALIGN ((PAGE_SIZE - NET_SKB_PAD) % SMP_CACHE_BYTES) 181 182 183/* 184 * RX/TX descriptors. 185 */ 186#if defined(__BIG_ENDIAN) 187struct rx_desc { 188 u16 byte_cnt; /* Descriptor buffer byte count */ 189 u16 buf_size; /* Buffer size */ 190 u32 cmd_sts; /* Descriptor command status */ 191 u32 next_desc_ptr; /* Next descriptor pointer */ 192 u32 buf_ptr; /* Descriptor buffer pointer */ 193}; 194 195struct tx_desc { 196 u16 byte_cnt; /* buffer byte count */ 197 u16 l4i_chk; /* CPU provided TCP checksum */ 198 u32 cmd_sts; /* Command/status field */ 199 u32 next_desc_ptr; /* Pointer to next descriptor */ 200 u32 buf_ptr; /* pointer to buffer for this descriptor*/ 201}; 202#elif defined(__LITTLE_ENDIAN) 203struct rx_desc { 204 u32 cmd_sts; /* Descriptor command status */ 205 u16 buf_size; /* Buffer size */ 206 u16 byte_cnt; /* Descriptor buffer byte count */ 207 u32 buf_ptr; /* Descriptor buffer pointer */ 208 u32 next_desc_ptr; /* Next descriptor pointer */ 209}; 210 211struct tx_desc { 212 u32 cmd_sts; /* Command/status field */ 213 u16 l4i_chk; /* CPU provided TCP checksum */ 214 u16 byte_cnt; /* buffer byte count */ 215 u32 buf_ptr; /* pointer to buffer for this descriptor*/ 216 u32 next_desc_ptr; /* Pointer to next descriptor */ 217}; 218#else 219#error One of __BIG_ENDIAN or __LITTLE_ENDIAN must be defined 220#endif 221 222/* RX & TX descriptor command */ 223#define BUFFER_OWNED_BY_DMA 0x80000000 224 225/* RX & TX descriptor status */ 226#define ERROR_SUMMARY 0x00000001 227 228/* RX descriptor status */ 229#define LAYER_4_CHECKSUM_OK 0x40000000 230#define RX_ENABLE_INTERRUPT 0x20000000 231#define RX_FIRST_DESC 0x08000000 232#define RX_LAST_DESC 0x04000000 233#define RX_IP_HDR_OK 0x02000000 234#define RX_PKT_IS_IPV4 0x01000000 235#define RX_PKT_IS_ETHERNETV2 0x00800000 236#define RX_PKT_LAYER4_TYPE_MASK 0x00600000 237#define RX_PKT_LAYER4_TYPE_TCP_IPV4 0x00000000 238#define RX_PKT_IS_VLAN_TAGGED 0x00080000 239 240/* TX descriptor command */ 241#define TX_ENABLE_INTERRUPT 0x00800000 242#define GEN_CRC 0x00400000 243#define TX_FIRST_DESC 0x00200000 244#define TX_LAST_DESC 0x00100000 245#define ZERO_PADDING 0x00080000 246#define GEN_IP_V4_CHECKSUM 0x00040000 247#define GEN_TCP_UDP_CHECKSUM 0x00020000 248#define UDP_FRAME 0x00010000 249#define MAC_HDR_EXTRA_4_BYTES 0x00008000 250#define MAC_HDR_EXTRA_8_BYTES 0x00000200 251 252#define TX_IHL_SHIFT 11 253 254 255/* global *******************************************************************/ 256struct mv643xx_eth_shared_private { 257 /* 258 * Ethernet controller base address. 259 */ 260 void __iomem *base; 261 262 /* 263 * Points at the right SMI instance to use. 264 */ 265 struct mv643xx_eth_shared_private *smi; 266 267 /* 268 * Provides access to local SMI interface. 269 */ 270 struct mii_bus *smi_bus; 271 272 /* 273 * If we have access to the error interrupt pin (which is 274 * somewhat misnamed as it not only reflects internal errors 275 * but also reflects SMI completion), use that to wait for 276 * SMI access completion instead of polling the SMI busy bit. 277 */ 278 int err_interrupt; 279 wait_queue_head_t smi_busy_wait; 280 281 /* 282 * Per-port MBUS window access register value. 283 */ 284 u32 win_protect; 285 286 /* 287 * Hardware-specific parameters. 288 */ 289 unsigned int t_clk; 290 int extended_rx_coal_limit; 291 int tx_bw_control; 292 int tx_csum_limit; 293}; 294 295#define TX_BW_CONTROL_ABSENT 0 296#define TX_BW_CONTROL_OLD_LAYOUT 1 297#define TX_BW_CONTROL_NEW_LAYOUT 2 298 299static int mv643xx_eth_open(struct net_device *dev); 300static int mv643xx_eth_stop(struct net_device *dev); 301 302 303/* per-port *****************************************************************/ 304struct mib_counters { 305 u64 good_octets_received; 306 u32 bad_octets_received; 307 u32 internal_mac_transmit_err; 308 u32 good_frames_received; 309 u32 bad_frames_received; 310 u32 broadcast_frames_received; 311 u32 multicast_frames_received; 312 u32 frames_64_octets; 313 u32 frames_65_to_127_octets; 314 u32 frames_128_to_255_octets; 315 u32 frames_256_to_511_octets; 316 u32 frames_512_to_1023_octets; 317 u32 frames_1024_to_max_octets; 318 u64 good_octets_sent; 319 u32 good_frames_sent; 320 u32 excessive_collision; 321 u32 multicast_frames_sent; 322 u32 broadcast_frames_sent; 323 u32 unrec_mac_control_received; 324 u32 fc_sent; 325 u32 good_fc_received; 326 u32 bad_fc_received; 327 u32 undersize_received; 328 u32 fragments_received; 329 u32 oversize_received; 330 u32 jabber_received; 331 u32 mac_receive_error; 332 u32 bad_crc_event; 333 u32 collision; 334 u32 late_collision; 335}; 336 337struct lro_counters { 338 u32 lro_aggregated; 339 u32 lro_flushed; 340 u32 lro_no_desc; 341}; 342 343struct rx_queue { 344 int index; 345 346 int rx_ring_size; 347 348 int rx_desc_count; 349 int rx_curr_desc; 350 int rx_used_desc; 351 352 struct rx_desc *rx_desc_area; 353 dma_addr_t rx_desc_dma; 354 int rx_desc_area_size; 355 struct sk_buff **rx_skb; 356 357 struct net_lro_mgr lro_mgr; 358 struct net_lro_desc lro_arr[8]; 359}; 360 361struct tx_queue { 362 int index; 363 364 int tx_ring_size; 365 366 int tx_desc_count; 367 int tx_curr_desc; 368 int tx_used_desc; 369 370 struct tx_desc *tx_desc_area; 371 dma_addr_t tx_desc_dma; 372 int tx_desc_area_size; 373 374 struct sk_buff_head tx_skb; 375 376 unsigned long tx_packets; 377 unsigned long tx_bytes; 378 unsigned long tx_dropped; 379}; 380 381struct mv643xx_eth_private { 382 struct mv643xx_eth_shared_private *shared; 383 void __iomem *base; 384 int port_num; 385 386 struct net_device *dev; 387 388 struct phy_device *phy; 389 390 struct timer_list mib_counters_timer; 391 spinlock_t mib_counters_lock; 392 struct mib_counters mib_counters; 393 394 struct lro_counters lro_counters; 395 396 struct work_struct tx_timeout_task; 397 398 struct napi_struct napi; 399 u32 int_mask; 400 u8 oom; 401 u8 work_link; 402 u8 work_tx; 403 u8 work_tx_end; 404 u8 work_rx; 405 u8 work_rx_refill; 406 407 int skb_size; 408 struct sk_buff_head rx_recycle; 409 410 /* 411 * RX state. 412 */ 413 int rx_ring_size; 414 unsigned long rx_desc_sram_addr; 415 int rx_desc_sram_size; 416 int rxq_count; 417 struct timer_list rx_oom; 418 struct rx_queue rxq[8]; 419 420 /* 421 * TX state. 422 */ 423 int tx_ring_size; 424 unsigned long tx_desc_sram_addr; 425 int tx_desc_sram_size; 426 int txq_count; 427 struct tx_queue txq[8]; 428}; 429 430 431/* port register accessors **************************************************/ 432static inline u32 rdl(struct mv643xx_eth_private *mp, int offset) 433{ 434 return readl(mp->shared->base + offset); 435} 436 437static inline u32 rdlp(struct mv643xx_eth_private *mp, int offset) 438{ 439 return readl(mp->base + offset); 440} 441 442static inline void wrl(struct mv643xx_eth_private *mp, int offset, u32 data) 443{ 444 writel(data, mp->shared->base + offset); 445} 446 447static inline void wrlp(struct mv643xx_eth_private *mp, int offset, u32 data) 448{ 449 writel(data, mp->base + offset); 450} 451 452 453/* rxq/txq helper functions *************************************************/ 454static struct mv643xx_eth_private *rxq_to_mp(struct rx_queue *rxq) 455{ 456 return container_of(rxq, struct mv643xx_eth_private, rxq[rxq->index]); 457} 458 459static struct mv643xx_eth_private *txq_to_mp(struct tx_queue *txq) 460{ 461 return container_of(txq, struct mv643xx_eth_private, txq[txq->index]); 462} 463 464static void rxq_enable(struct rx_queue *rxq) 465{ 466 struct mv643xx_eth_private *mp = rxq_to_mp(rxq); 467 wrlp(mp, RXQ_COMMAND, 1 << rxq->index); 468} 469 470static void rxq_disable(struct rx_queue *rxq) 471{ 472 struct mv643xx_eth_private *mp = rxq_to_mp(rxq); 473 u8 mask = 1 << rxq->index; 474 475 wrlp(mp, RXQ_COMMAND, mask << 8); 476 while (rdlp(mp, RXQ_COMMAND) & mask) 477 udelay(10); 478} 479 480static void txq_reset_hw_ptr(struct tx_queue *txq) 481{ 482 struct mv643xx_eth_private *mp = txq_to_mp(txq); 483 u32 addr; 484 485 addr = (u32)txq->tx_desc_dma; 486 addr += txq->tx_curr_desc * sizeof(struct tx_desc); 487 wrlp(mp, TXQ_CURRENT_DESC_PTR(txq->index), addr); 488} 489 490static void txq_enable(struct tx_queue *txq) 491{ 492 struct mv643xx_eth_private *mp = txq_to_mp(txq); 493 wrlp(mp, TXQ_COMMAND, 1 << txq->index); 494} 495 496static void txq_disable(struct tx_queue *txq) 497{ 498 struct mv643xx_eth_private *mp = txq_to_mp(txq); 499 u8 mask = 1 << txq->index; 500 501 wrlp(mp, TXQ_COMMAND, mask << 8); 502 while (rdlp(mp, TXQ_COMMAND) & mask) 503 udelay(10); 504} 505 506static void txq_maybe_wake(struct tx_queue *txq) 507{ 508 struct mv643xx_eth_private *mp = txq_to_mp(txq); 509 struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index); 510 511 if (netif_tx_queue_stopped(nq)) { 512 __netif_tx_lock(nq, smp_processor_id()); 513 if (txq->tx_ring_size - txq->tx_desc_count >= MAX_SKB_FRAGS + 1) 514 netif_tx_wake_queue(nq); 515 __netif_tx_unlock(nq); 516 } 517} 518 519 520/* rx napi ******************************************************************/ 521static int 522mv643xx_get_skb_header(struct sk_buff *skb, void **iphdr, void **tcph, 523 u64 *hdr_flags, void *priv) 524{ 525 unsigned long cmd_sts = (unsigned long)priv; 526 527 /* 528 * Make sure that this packet is Ethernet II, is not VLAN 529 * tagged, is IPv4, has a valid IP header, and is TCP. 530 */ 531 if ((cmd_sts & (RX_IP_HDR_OK | RX_PKT_IS_IPV4 | 532 RX_PKT_IS_ETHERNETV2 | RX_PKT_LAYER4_TYPE_MASK | 533 RX_PKT_IS_VLAN_TAGGED)) != 534 (RX_IP_HDR_OK | RX_PKT_IS_IPV4 | 535 RX_PKT_IS_ETHERNETV2 | RX_PKT_LAYER4_TYPE_TCP_IPV4)) 536 return -1; 537 538 skb_reset_network_header(skb); 539 skb_set_transport_header(skb, ip_hdrlen(skb)); 540 *iphdr = ip_hdr(skb); 541 *tcph = tcp_hdr(skb); 542 *hdr_flags = LRO_IPV4 | LRO_TCP; 543 544 return 0; 545} 546 547static int rxq_process(struct rx_queue *rxq, int budget) 548{ 549 struct mv643xx_eth_private *mp = rxq_to_mp(rxq); 550 struct net_device_stats *stats = &mp->dev->stats; 551 int lro_flush_needed; 552 int rx; 553 554 lro_flush_needed = 0; 555 rx = 0; 556 while (rx < budget && rxq->rx_desc_count) { 557 struct rx_desc *rx_desc; 558 unsigned int cmd_sts; 559 struct sk_buff *skb; 560 u16 byte_cnt; 561 562 rx_desc = &rxq->rx_desc_area[rxq->rx_curr_desc]; 563 564 cmd_sts = rx_desc->cmd_sts; 565 if (cmd_sts & BUFFER_OWNED_BY_DMA) 566 break; 567 rmb(); 568 569 skb = rxq->rx_skb[rxq->rx_curr_desc]; 570 rxq->rx_skb[rxq->rx_curr_desc] = NULL; 571 572 rxq->rx_curr_desc++; 573 if (rxq->rx_curr_desc == rxq->rx_ring_size) 574 rxq->rx_curr_desc = 0; 575 576 dma_unmap_single(mp->dev->dev.parent, rx_desc->buf_ptr, 577 rx_desc->buf_size, DMA_FROM_DEVICE); 578 rxq->rx_desc_count--; 579 rx++; 580 581 mp->work_rx_refill |= 1 << rxq->index; 582 583 byte_cnt = rx_desc->byte_cnt; 584 585 /* 586 * Update statistics. 587 * 588 * Note that the descriptor byte count includes 2 dummy 589 * bytes automatically inserted by the hardware at the 590 * start of the packet (which we don't count), and a 4 591 * byte CRC at the end of the packet (which we do count). 592 */ 593 stats->rx_packets++; 594 stats->rx_bytes += byte_cnt - 2; 595 596 /* 597 * In case we received a packet without first / last bits 598 * on, or the error summary bit is set, the packet needs 599 * to be dropped. 600 */ 601 if ((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC | ERROR_SUMMARY)) 602 != (RX_FIRST_DESC | RX_LAST_DESC)) 603 goto err; 604 605 /* 606 * The -4 is for the CRC in the trailer of the 607 * received packet 608 */ 609 skb_put(skb, byte_cnt - 2 - 4); 610 611 if (cmd_sts & LAYER_4_CHECKSUM_OK) 612 skb->ip_summed = CHECKSUM_UNNECESSARY; 613 skb->protocol = eth_type_trans(skb, mp->dev); 614 615 if (skb->dev->features & NETIF_F_LRO && 616 skb->ip_summed == CHECKSUM_UNNECESSARY) { 617 lro_receive_skb(&rxq->lro_mgr, skb, (void *)cmd_sts); 618 lro_flush_needed = 1; 619 } else 620 netif_receive_skb(skb); 621 622 continue; 623 624err: 625 stats->rx_dropped++; 626 627 if ((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC)) != 628 (RX_FIRST_DESC | RX_LAST_DESC)) { 629 if (net_ratelimit()) 630 dev_printk(KERN_ERR, &mp->dev->dev, 631 "received packet spanning " 632 "multiple descriptors\n"); 633 } 634 635 if (cmd_sts & ERROR_SUMMARY) 636 stats->rx_errors++; 637 638 dev_kfree_skb(skb); 639 } 640 641 if (lro_flush_needed) 642 lro_flush_all(&rxq->lro_mgr); 643 644 if (rx < budget) 645 mp->work_rx &= ~(1 << rxq->index); 646 647 return rx; 648} 649 650static int rxq_refill(struct rx_queue *rxq, int budget) 651{ 652 struct mv643xx_eth_private *mp = rxq_to_mp(rxq); 653 int refilled; 654 655 refilled = 0; 656 while (refilled < budget && rxq->rx_desc_count < rxq->rx_ring_size) { 657 struct sk_buff *skb; 658 int rx; 659 struct rx_desc *rx_desc; 660 int size; 661 662 skb = __skb_dequeue(&mp->rx_recycle); 663 if (skb == NULL) 664 skb = dev_alloc_skb(mp->skb_size); 665 666 if (skb == NULL) { 667 mp->oom = 1; 668 goto oom; 669 } 670 671 if (SKB_DMA_REALIGN) 672 skb_reserve(skb, SKB_DMA_REALIGN); 673 674 refilled++; 675 rxq->rx_desc_count++; 676 677 rx = rxq->rx_used_desc++; 678 if (rxq->rx_used_desc == rxq->rx_ring_size) 679 rxq->rx_used_desc = 0; 680 681 rx_desc = rxq->rx_desc_area + rx; 682 683 size = skb->end - skb->data; 684 rx_desc->buf_ptr = dma_map_single(mp->dev->dev.parent, 685 skb->data, size, 686 DMA_FROM_DEVICE); 687 rx_desc->buf_size = size; 688 rxq->rx_skb[rx] = skb; 689 wmb(); 690 rx_desc->cmd_sts = BUFFER_OWNED_BY_DMA | RX_ENABLE_INTERRUPT; 691 wmb(); 692 693 /* 694 * The hardware automatically prepends 2 bytes of 695 * dummy data to each received packet, so that the 696 * IP header ends up 16-byte aligned. 697 */ 698 skb_reserve(skb, 2); 699 } 700 701 if (refilled < budget) 702 mp->work_rx_refill &= ~(1 << rxq->index); 703 704oom: 705 return refilled; 706} 707 708 709/* tx ***********************************************************************/ 710static inline unsigned int has_tiny_unaligned_frags(struct sk_buff *skb) 711{ 712 int frag; 713 714 for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) { 715 skb_frag_t *fragp = &skb_shinfo(skb)->frags[frag]; 716 if (fragp->size <= 8 && fragp->page_offset & 7) 717 return 1; 718 } 719 720 return 0; 721} 722 723static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb) 724{ 725 struct mv643xx_eth_private *mp = txq_to_mp(txq); 726 int nr_frags = skb_shinfo(skb)->nr_frags; 727 int frag; 728 729 for (frag = 0; frag < nr_frags; frag++) { 730 skb_frag_t *this_frag; 731 int tx_index; 732 struct tx_desc *desc; 733 734 this_frag = &skb_shinfo(skb)->frags[frag]; 735 tx_index = txq->tx_curr_desc++; 736 if (txq->tx_curr_desc == txq->tx_ring_size) 737 txq->tx_curr_desc = 0; 738 desc = &txq->tx_desc_area[tx_index]; 739 740 /* 741 * The last fragment will generate an interrupt 742 * which will free the skb on TX completion. 743 */ 744 if (frag == nr_frags - 1) { 745 desc->cmd_sts = BUFFER_OWNED_BY_DMA | 746 ZERO_PADDING | TX_LAST_DESC | 747 TX_ENABLE_INTERRUPT; 748 } else { 749 desc->cmd_sts = BUFFER_OWNED_BY_DMA; 750 } 751 752 desc->l4i_chk = 0; 753 desc->byte_cnt = this_frag->size; 754 desc->buf_ptr = dma_map_page(mp->dev->dev.parent, 755 this_frag->page, 756 this_frag->page_offset, 757 this_frag->size, DMA_TO_DEVICE); 758 } 759} 760 761static inline __be16 sum16_as_be(__sum16 sum) 762{ 763 return (__force __be16)sum; 764} 765 766static int txq_submit_skb(struct tx_queue *txq, struct sk_buff *skb) 767{ 768 struct mv643xx_eth_private *mp = txq_to_mp(txq); 769 int nr_frags = skb_shinfo(skb)->nr_frags; 770 int tx_index; 771 struct tx_desc *desc; 772 u32 cmd_sts; 773 u16 l4i_chk; 774 int length; 775 776 cmd_sts = TX_FIRST_DESC | GEN_CRC | BUFFER_OWNED_BY_DMA; 777 l4i_chk = 0; 778 779 if (skb->ip_summed == CHECKSUM_PARTIAL) { 780 int hdr_len; 781 int tag_bytes; 782 783 BUG_ON(skb->protocol != htons(ETH_P_IP) && 784 skb->protocol != htons(ETH_P_8021Q)); 785 786 hdr_len = (void *)ip_hdr(skb) - (void *)skb->data; 787 tag_bytes = hdr_len - ETH_HLEN; 788 if (skb->len - hdr_len > mp->shared->tx_csum_limit || 789 unlikely(tag_bytes & ~12)) { 790 if (skb_checksum_help(skb) == 0) 791 goto no_csum; 792 kfree_skb(skb); 793 return 1; 794 } 795 796 if (tag_bytes & 4) 797 cmd_sts |= MAC_HDR_EXTRA_4_BYTES; 798 if (tag_bytes & 8) 799 cmd_sts |= MAC_HDR_EXTRA_8_BYTES; 800 801 cmd_sts |= GEN_TCP_UDP_CHECKSUM | 802 GEN_IP_V4_CHECKSUM | 803 ip_hdr(skb)->ihl << TX_IHL_SHIFT; 804 805 switch (ip_hdr(skb)->protocol) { 806 case IPPROTO_UDP: 807 cmd_sts |= UDP_FRAME; 808 l4i_chk = ntohs(sum16_as_be(udp_hdr(skb)->check)); 809 break; 810 case IPPROTO_TCP: 811 l4i_chk = ntohs(sum16_as_be(tcp_hdr(skb)->check)); 812 break; 813 default: 814 BUG(); 815 } 816 } else { 817no_csum: 818 /* Errata BTS #50, IHL must be 5 if no HW checksum */ 819 cmd_sts |= 5 << TX_IHL_SHIFT; 820 } 821 822 tx_index = txq->tx_curr_desc++; 823 if (txq->tx_curr_desc == txq->tx_ring_size) 824 txq->tx_curr_desc = 0; 825 desc = &txq->tx_desc_area[tx_index]; 826 827 if (nr_frags) { 828 txq_submit_frag_skb(txq, skb); 829 length = skb_headlen(skb); 830 } else { 831 cmd_sts |= ZERO_PADDING | TX_LAST_DESC | TX_ENABLE_INTERRUPT; 832 length = skb->len; 833 } 834 835 desc->l4i_chk = l4i_chk; 836 desc->byte_cnt = length; 837 desc->buf_ptr = dma_map_single(mp->dev->dev.parent, skb->data, 838 length, DMA_TO_DEVICE); 839 840 __skb_queue_tail(&txq->tx_skb, skb); 841 842 /* ensure all other descriptors are written before first cmd_sts */ 843 wmb(); 844 desc->cmd_sts = cmd_sts; 845 846 /* clear TX_END status */ 847 mp->work_tx_end &= ~(1 << txq->index); 848 849 /* ensure all descriptors are written before poking hardware */ 850 wmb(); 851 txq_enable(txq); 852 853 txq->tx_desc_count += nr_frags + 1; 854 855 return 0; 856} 857 858static netdev_tx_t mv643xx_eth_xmit(struct sk_buff *skb, struct net_device *dev) 859{ 860 struct mv643xx_eth_private *mp = netdev_priv(dev); 861 int queue; 862 struct tx_queue *txq; 863 struct netdev_queue *nq; 864 865 queue = skb_get_queue_mapping(skb); 866 txq = mp->txq + queue; 867 nq = netdev_get_tx_queue(dev, queue); 868 869 if (has_tiny_unaligned_frags(skb) && __skb_linearize(skb)) { 870 txq->tx_dropped++; 871 dev_printk(KERN_DEBUG, &dev->dev, 872 "failed to linearize skb with tiny " 873 "unaligned fragment\n"); 874 return NETDEV_TX_BUSY; 875 } 876 877 if (txq->tx_ring_size - txq->tx_desc_count < MAX_SKB_FRAGS + 1) { 878 if (net_ratelimit()) 879 dev_printk(KERN_ERR, &dev->dev, "tx queue full?!\n"); 880 kfree_skb(skb); 881 return NETDEV_TX_OK; 882 } 883 884 if (!txq_submit_skb(txq, skb)) { 885 int entries_left; 886 887 txq->tx_bytes += skb->len; 888 txq->tx_packets++; 889 890 entries_left = txq->tx_ring_size - txq->tx_desc_count; 891 if (entries_left < MAX_SKB_FRAGS + 1) 892 netif_tx_stop_queue(nq); 893 } 894 895 return NETDEV_TX_OK; 896} 897 898 899/* tx napi ******************************************************************/ 900static void txq_kick(struct tx_queue *txq) 901{ 902 struct mv643xx_eth_private *mp = txq_to_mp(txq); 903 struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index); 904 u32 hw_desc_ptr; 905 u32 expected_ptr; 906 907 __netif_tx_lock(nq, smp_processor_id()); 908 909 if (rdlp(mp, TXQ_COMMAND) & (1 << txq->index)) 910 goto out; 911 912 hw_desc_ptr = rdlp(mp, TXQ_CURRENT_DESC_PTR(txq->index)); 913 expected_ptr = (u32)txq->tx_desc_dma + 914 txq->tx_curr_desc * sizeof(struct tx_desc); 915 916 if (hw_desc_ptr != expected_ptr) 917 txq_enable(txq); 918 919out: 920 __netif_tx_unlock(nq); 921 922 mp->work_tx_end &= ~(1 << txq->index); 923} 924 925static int txq_reclaim(struct tx_queue *txq, int budget, int force) 926{ 927 struct mv643xx_eth_private *mp = txq_to_mp(txq); 928 struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index); 929 int reclaimed; 930 931 __netif_tx_lock(nq, smp_processor_id()); 932 933 reclaimed = 0; 934 while (reclaimed < budget && txq->tx_desc_count > 0) { 935 int tx_index; 936 struct tx_desc *desc; 937 u32 cmd_sts; 938 struct sk_buff *skb; 939 940 tx_index = txq->tx_used_desc; 941 desc = &txq->tx_desc_area[tx_index]; 942 cmd_sts = desc->cmd_sts; 943 944 if (cmd_sts & BUFFER_OWNED_BY_DMA) { 945 if (!force) 946 break; 947 desc->cmd_sts = cmd_sts & ~BUFFER_OWNED_BY_DMA; 948 } 949 950 txq->tx_used_desc = tx_index + 1; 951 if (txq->tx_used_desc == txq->tx_ring_size) 952 txq->tx_used_desc = 0; 953 954 reclaimed++; 955 txq->tx_desc_count--; 956 957 skb = NULL; 958 if (cmd_sts & TX_LAST_DESC) 959 skb = __skb_dequeue(&txq->tx_skb); 960 961 if (cmd_sts & ERROR_SUMMARY) { 962 dev_printk(KERN_INFO, &mp->dev->dev, "tx error\n"); 963 mp->dev->stats.tx_errors++; 964 } 965 966 if (cmd_sts & TX_FIRST_DESC) { 967 dma_unmap_single(mp->dev->dev.parent, desc->buf_ptr, 968 desc->byte_cnt, DMA_TO_DEVICE); 969 } else { 970 dma_unmap_page(mp->dev->dev.parent, desc->buf_ptr, 971 desc->byte_cnt, DMA_TO_DEVICE); 972 } 973 974 if (skb != NULL) { 975 if (skb_queue_len(&mp->rx_recycle) < 976 mp->rx_ring_size && 977 skb_recycle_check(skb, mp->skb_size)) 978 __skb_queue_head(&mp->rx_recycle, skb); 979 else 980 dev_kfree_skb(skb); 981 } 982 } 983 984 __netif_tx_unlock(nq); 985 986 if (reclaimed < budget) 987 mp->work_tx &= ~(1 << txq->index); 988 989 return reclaimed; 990} 991 992 993/* tx rate control **********************************************************/ 994/* 995 * Set total maximum TX rate (shared by all TX queues for this port) 996 * to 'rate' bits per second, with a maximum burst of 'burst' bytes. 997 */ 998static void tx_set_rate(struct mv643xx_eth_private *mp, int rate, int burst) 999{ 1000 int token_rate; 1001 int mtu; 1002 int bucket_size; 1003 1004 token_rate = ((rate / 1000) * 64) / (mp->shared->t_clk / 1000); 1005 if (token_rate > 1023) 1006 token_rate = 1023; 1007 1008 mtu = (mp->dev->mtu + 255) >> 8; 1009 if (mtu > 63) 1010 mtu = 63; 1011 1012 bucket_size = (burst + 255) >> 8; 1013 if (bucket_size > 65535) 1014 bucket_size = 65535; 1015 1016 switch (mp->shared->tx_bw_control) { 1017 case TX_BW_CONTROL_OLD_LAYOUT: 1018 wrlp(mp, TX_BW_RATE, token_rate); 1019 wrlp(mp, TX_BW_MTU, mtu); 1020 wrlp(mp, TX_BW_BURST, bucket_size); 1021 break; 1022 case TX_BW_CONTROL_NEW_LAYOUT: 1023 wrlp(mp, TX_BW_RATE_MOVED, token_rate); 1024 wrlp(mp, TX_BW_MTU_MOVED, mtu); 1025 wrlp(mp, TX_BW_BURST_MOVED, bucket_size); 1026 break; 1027 } 1028} 1029 1030static void txq_set_rate(struct tx_queue *txq, int rate, int burst) 1031{ 1032 struct mv643xx_eth_private *mp = txq_to_mp(txq); 1033 int token_rate; 1034 int bucket_size; 1035 1036 token_rate = ((rate / 1000) * 64) / (mp->shared->t_clk / 1000); 1037 if (token_rate > 1023) 1038 token_rate = 1023; 1039 1040 bucket_size = (burst + 255) >> 8; 1041 if (bucket_size > 65535) 1042 bucket_size = 65535; 1043 1044 wrlp(mp, TXQ_BW_TOKENS(txq->index), token_rate << 14); 1045 wrlp(mp, TXQ_BW_CONF(txq->index), (bucket_size << 10) | token_rate); 1046} 1047 1048static void txq_set_fixed_prio_mode(struct tx_queue *txq) 1049{ 1050 struct mv643xx_eth_private *mp = txq_to_mp(txq); 1051 int off; 1052 u32 val; 1053 1054 /* 1055 * Turn on fixed priority mode. 1056 */ 1057 off = 0; 1058 switch (mp->shared->tx_bw_control) { 1059 case TX_BW_CONTROL_OLD_LAYOUT: 1060 off = TXQ_FIX_PRIO_CONF; 1061 break; 1062 case TX_BW_CONTROL_NEW_LAYOUT: 1063 off = TXQ_FIX_PRIO_CONF_MOVED; 1064 break; 1065 } 1066 1067 if (off) { 1068 val = rdlp(mp, off); 1069 val |= 1 << txq->index; 1070 wrlp(mp, off, val); 1071 } 1072} 1073 1074 1075/* mii management interface *************************************************/ 1076static irqreturn_t mv643xx_eth_err_irq(int irq, void *dev_id) 1077{ 1078 struct mv643xx_eth_shared_private *msp = dev_id; 1079 1080 if (readl(msp->base + ERR_INT_CAUSE) & ERR_INT_SMI_DONE) { 1081 writel(~ERR_INT_SMI_DONE, msp->base + ERR_INT_CAUSE); 1082 wake_up(&msp->smi_busy_wait); 1083 return IRQ_HANDLED; 1084 } 1085 1086 return IRQ_NONE; 1087} 1088 1089static int smi_is_done(struct mv643xx_eth_shared_private *msp) 1090{ 1091 return !(readl(msp->base + SMI_REG) & SMI_BUSY); 1092} 1093 1094static int smi_wait_ready(struct mv643xx_eth_shared_private *msp) 1095{ 1096 if (msp->err_interrupt == NO_IRQ) { 1097 int i; 1098 1099 for (i = 0; !smi_is_done(msp); i++) { 1100 if (i == 10) 1101 return -ETIMEDOUT; 1102 msleep(10); 1103 } 1104 1105 return 0; 1106 } 1107 1108 if (!smi_is_done(msp)) { 1109 wait_event_timeout(msp->smi_busy_wait, smi_is_done(msp), 1110 msecs_to_jiffies(100)); 1111 if (!smi_is_done(msp)) 1112 return -ETIMEDOUT; 1113 } 1114 1115 return 0; 1116} 1117 1118static int smi_bus_read(struct mii_bus *bus, int addr, int reg) 1119{ 1120 struct mv643xx_eth_shared_private *msp = bus->priv; 1121 void __iomem *smi_reg = msp->base + SMI_REG; 1122 int ret; 1123 1124 if (smi_wait_ready(msp)) { 1125 printk(KERN_WARNING "mv643xx_eth: SMI bus busy timeout\n"); 1126 return -ETIMEDOUT; 1127 } 1128 1129 writel(SMI_OPCODE_READ | (reg << 21) | (addr << 16), smi_reg); 1130 1131 if (smi_wait_ready(msp)) { 1132 printk(KERN_WARNING "mv643xx_eth: SMI bus busy timeout\n"); 1133 return -ETIMEDOUT; 1134 } 1135 1136 ret = readl(smi_reg); 1137 if (!(ret & SMI_READ_VALID)) { 1138 printk(KERN_WARNING "mv643xx_eth: SMI bus read not valid\n"); 1139 return -ENODEV; 1140 } 1141 1142 return ret & 0xffff; 1143} 1144 1145static int smi_bus_write(struct mii_bus *bus, int addr, int reg, u16 val) 1146{ 1147 struct mv643xx_eth_shared_private *msp = bus->priv; 1148 void __iomem *smi_reg = msp->base + SMI_REG; 1149 1150 if (smi_wait_ready(msp)) { 1151 printk(KERN_WARNING "mv643xx_eth: SMI bus busy timeout\n"); 1152 return -ETIMEDOUT; 1153 } 1154 1155 writel(SMI_OPCODE_WRITE | (reg << 21) | 1156 (addr << 16) | (val & 0xffff), smi_reg); 1157 1158 if (smi_wait_ready(msp)) { 1159 printk(KERN_WARNING "mv643xx_eth: SMI bus busy timeout\n"); 1160 return -ETIMEDOUT; 1161 } 1162 1163 return 0; 1164} 1165 1166 1167/* statistics ***************************************************************/ 1168static struct net_device_stats *mv643xx_eth_get_stats(struct net_device *dev) 1169{ 1170 struct mv643xx_eth_private *mp = netdev_priv(dev); 1171 struct net_device_stats *stats = &dev->stats; 1172 unsigned long tx_packets = 0; 1173 unsigned long tx_bytes = 0; 1174 unsigned long tx_dropped = 0; 1175 int i; 1176 1177 for (i = 0; i < mp->txq_count; i++) { 1178 struct tx_queue *txq = mp->txq + i; 1179 1180 tx_packets += txq->tx_packets; 1181 tx_bytes += txq->tx_bytes; 1182 tx_dropped += txq->tx_dropped; 1183 } 1184 1185 stats->tx_packets = tx_packets; 1186 stats->tx_bytes = tx_bytes; 1187 stats->tx_dropped = tx_dropped; 1188 1189 return stats; 1190} 1191 1192static void mv643xx_eth_grab_lro_stats(struct mv643xx_eth_private *mp) 1193{ 1194 u32 lro_aggregated = 0; 1195 u32 lro_flushed = 0; 1196 u32 lro_no_desc = 0; 1197 int i; 1198 1199 for (i = 0; i < mp->rxq_count; i++) { 1200 struct rx_queue *rxq = mp->rxq + i; 1201 1202 lro_aggregated += rxq->lro_mgr.stats.aggregated; 1203 lro_flushed += rxq->lro_mgr.stats.flushed; 1204 lro_no_desc += rxq->lro_mgr.stats.no_desc; 1205 } 1206 1207 mp->lro_counters.lro_aggregated = lro_aggregated; 1208 mp->lro_counters.lro_flushed = lro_flushed; 1209 mp->lro_counters.lro_no_desc = lro_no_desc; 1210} 1211 1212static inline u32 mib_read(struct mv643xx_eth_private *mp, int offset) 1213{ 1214 return rdl(mp, MIB_COUNTERS(mp->port_num) + offset); 1215} 1216 1217static void mib_counters_clear(struct mv643xx_eth_private *mp) 1218{ 1219 int i; 1220 1221 for (i = 0; i < 0x80; i += 4) 1222 mib_read(mp, i); 1223} 1224 1225static void mib_counters_update(struct mv643xx_eth_private *mp) 1226{ 1227 struct mib_counters *p = &mp->mib_counters; 1228 1229 spin_lock_bh(&mp->mib_counters_lock); 1230 p->good_octets_received += mib_read(mp, 0x00); 1231 p->bad_octets_received += mib_read(mp, 0x08); 1232 p->internal_mac_transmit_err += mib_read(mp, 0x0c); 1233 p->good_frames_received += mib_read(mp, 0x10); 1234 p->bad_frames_received += mib_read(mp, 0x14); 1235 p->broadcast_frames_received += mib_read(mp, 0x18); 1236 p->multicast_frames_received += mib_read(mp, 0x1c); 1237 p->frames_64_octets += mib_read(mp, 0x20); 1238 p->frames_65_to_127_octets += mib_read(mp, 0x24); 1239 p->frames_128_to_255_octets += mib_read(mp, 0x28); 1240 p->frames_256_to_511_octets += mib_read(mp, 0x2c); 1241 p->frames_512_to_1023_octets += mib_read(mp, 0x30); 1242 p->frames_1024_to_max_octets += mib_read(mp, 0x34); 1243 p->good_octets_sent += mib_read(mp, 0x38); 1244 p->good_frames_sent += mib_read(mp, 0x40); 1245 p->excessive_collision += mib_read(mp, 0x44); 1246 p->multicast_frames_sent += mib_read(mp, 0x48); 1247 p->broadcast_frames_sent += mib_read(mp, 0x4c); 1248 p->unrec_mac_control_received += mib_read(mp, 0x50); 1249 p->fc_sent += mib_read(mp, 0x54); 1250 p->good_fc_received += mib_read(mp, 0x58); 1251 p->bad_fc_received += mib_read(mp, 0x5c); 1252 p->undersize_received += mib_read(mp, 0x60); 1253 p->fragments_received += mib_read(mp, 0x64); 1254 p->oversize_received += mib_read(mp, 0x68); 1255 p->jabber_received += mib_read(mp, 0x6c); 1256 p->mac_receive_error += mib_read(mp, 0x70); 1257 p->bad_crc_event += mib_read(mp, 0x74); 1258 p->collision += mib_read(mp, 0x78); 1259 p->late_collision += mib_read(mp, 0x7c); 1260 spin_unlock_bh(&mp->mib_counters_lock); 1261 1262 mod_timer(&mp->mib_counters_timer, jiffies + 30 * HZ); 1263} 1264 1265static void mib_counters_timer_wrapper(unsigned long _mp) 1266{ 1267 struct mv643xx_eth_private *mp = (void *)_mp; 1268 1269 mib_counters_update(mp); 1270} 1271 1272 1273/* interrupt coalescing *****************************************************/ 1274/* 1275 * Hardware coalescing parameters are set in units of 64 t_clk 1276 * cycles. I.e.: 1277 * 1278 * coal_delay_in_usec = 64000000 * register_value / t_clk_rate 1279 * 1280 * register_value = coal_delay_in_usec * t_clk_rate / 64000000 1281 * 1282 * In the ->set*() methods, we round the computed register value 1283 * to the nearest integer. 1284 */ 1285static unsigned int get_rx_coal(struct mv643xx_eth_private *mp) 1286{ 1287 u32 val = rdlp(mp, SDMA_CONFIG); 1288 u64 temp; 1289 1290 if (mp->shared->extended_rx_coal_limit) 1291 temp = ((val & 0x02000000) >> 10) | ((val & 0x003fff80) >> 7); 1292 else 1293 temp = (val & 0x003fff00) >> 8; 1294 1295 temp *= 64000000; 1296 do_div(temp, mp->shared->t_clk); 1297 1298 return (unsigned int)temp; 1299} 1300 1301static void set_rx_coal(struct mv643xx_eth_private *mp, unsigned int usec) 1302{ 1303 u64 temp; 1304 u32 val; 1305 1306 temp = (u64)usec * mp->shared->t_clk; 1307 temp += 31999999; 1308 do_div(temp, 64000000); 1309 1310 val = rdlp(mp, SDMA_CONFIG); 1311 if (mp->shared->extended_rx_coal_limit) { 1312 if (temp > 0xffff) 1313 temp = 0xffff; 1314 val &= ~0x023fff80; 1315 val |= (temp & 0x8000) << 10; 1316 val |= (temp & 0x7fff) << 7; 1317 } else { 1318 if (temp > 0x3fff) 1319 temp = 0x3fff; 1320 val &= ~0x003fff00; 1321 val |= (temp & 0x3fff) << 8; 1322 } 1323 wrlp(mp, SDMA_CONFIG, val); 1324} 1325 1326static unsigned int get_tx_coal(struct mv643xx_eth_private *mp) 1327{ 1328 u64 temp; 1329 1330 temp = (rdlp(mp, TX_FIFO_URGENT_THRESHOLD) & 0x3fff0) >> 4; 1331 temp *= 64000000; 1332 do_div(temp, mp->shared->t_clk); 1333 1334 return (unsigned int)temp; 1335} 1336 1337static void set_tx_coal(struct mv643xx_eth_private *mp, unsigned int usec) 1338{ 1339 u64 temp; 1340 1341 temp = (u64)usec * mp->shared->t_clk; 1342 temp += 31999999; 1343 do_div(temp, 64000000); 1344 1345 if (temp > 0x3fff) 1346 temp = 0x3fff; 1347 1348 wrlp(mp, TX_FIFO_URGENT_THRESHOLD, temp << 4); 1349} 1350 1351 1352/* ethtool ******************************************************************/ 1353struct mv643xx_eth_stats { 1354 char stat_string[ETH_GSTRING_LEN]; 1355 int sizeof_stat; 1356 int netdev_off; 1357 int mp_off; 1358}; 1359 1360#define SSTAT(m) \ 1361 { #m, FIELD_SIZEOF(struct net_device_stats, m), \ 1362 offsetof(struct net_device, stats.m), -1 } 1363 1364#define MIBSTAT(m) \ 1365 { #m, FIELD_SIZEOF(struct mib_counters, m), \ 1366 -1, offsetof(struct mv643xx_eth_private, mib_counters.m) } 1367 1368#define LROSTAT(m) \ 1369 { #m, FIELD_SIZEOF(struct lro_counters, m), \ 1370 -1, offsetof(struct mv643xx_eth_private, lro_counters.m) } 1371 1372static const struct mv643xx_eth_stats mv643xx_eth_stats[] = { 1373 SSTAT(rx_packets), 1374 SSTAT(tx_packets), 1375 SSTAT(rx_bytes), 1376 SSTAT(tx_bytes), 1377 SSTAT(rx_errors), 1378 SSTAT(tx_errors), 1379 SSTAT(rx_dropped), 1380 SSTAT(tx_dropped), 1381 MIBSTAT(good_octets_received), 1382 MIBSTAT(bad_octets_received), 1383 MIBSTAT(internal_mac_transmit_err), 1384 MIBSTAT(good_frames_received), 1385 MIBSTAT(bad_frames_received), 1386 MIBSTAT(broadcast_frames_received), 1387 MIBSTAT(multicast_frames_received), 1388 MIBSTAT(frames_64_octets), 1389 MIBSTAT(frames_65_to_127_octets), 1390 MIBSTAT(frames_128_to_255_octets), 1391 MIBSTAT(frames_256_to_511_octets), 1392 MIBSTAT(frames_512_to_1023_octets), 1393 MIBSTAT(frames_1024_to_max_octets), 1394 MIBSTAT(good_octets_sent), 1395 MIBSTAT(good_frames_sent), 1396 MIBSTAT(excessive_collision), 1397 MIBSTAT(multicast_frames_sent), 1398 MIBSTAT(broadcast_frames_sent), 1399 MIBSTAT(unrec_mac_control_received), 1400 MIBSTAT(fc_sent), 1401 MIBSTAT(good_fc_received), 1402 MIBSTAT(bad_fc_received), 1403 MIBSTAT(undersize_received), 1404 MIBSTAT(fragments_received), 1405 MIBSTAT(oversize_received), 1406 MIBSTAT(jabber_received), 1407 MIBSTAT(mac_receive_error), 1408 MIBSTAT(bad_crc_event), 1409 MIBSTAT(collision), 1410 MIBSTAT(late_collision), 1411 LROSTAT(lro_aggregated), 1412 LROSTAT(lro_flushed), 1413 LROSTAT(lro_no_desc), 1414}; 1415 1416static int 1417mv643xx_eth_get_settings_phy(struct mv643xx_eth_private *mp, 1418 struct ethtool_cmd *cmd) 1419{ 1420 int err; 1421 1422 err = phy_read_status(mp->phy); 1423 if (err == 0) 1424 err = phy_ethtool_gset(mp->phy, cmd); 1425 1426 /* 1427 * The MAC does not support 1000baseT_Half. 1428 */ 1429 cmd->supported &= ~SUPPORTED_1000baseT_Half; 1430 cmd->advertising &= ~ADVERTISED_1000baseT_Half; 1431 1432 return err; 1433} 1434 1435static int 1436mv643xx_eth_get_settings_phyless(struct mv643xx_eth_private *mp, 1437 struct ethtool_cmd *cmd) 1438{ 1439 u32 port_status; 1440 1441 port_status = rdlp(mp, PORT_STATUS); 1442 1443 cmd->supported = SUPPORTED_MII; 1444 cmd->advertising = ADVERTISED_MII; 1445 switch (port_status & PORT_SPEED_MASK) { 1446 case PORT_SPEED_10: 1447 cmd->speed = SPEED_10; 1448 break; 1449 case PORT_SPEED_100: 1450 cmd->speed = SPEED_100; 1451 break; 1452 case PORT_SPEED_1000: 1453 cmd->speed = SPEED_1000; 1454 break; 1455 default: 1456 cmd->speed = -1; 1457 break; 1458 } 1459 cmd->duplex = (port_status & FULL_DUPLEX) ? DUPLEX_FULL : DUPLEX_HALF; 1460 cmd->port = PORT_MII; 1461 cmd->phy_address = 0; 1462 cmd->transceiver = XCVR_INTERNAL; 1463 cmd->autoneg = AUTONEG_DISABLE; 1464 cmd->maxtxpkt = 1; 1465 cmd->maxrxpkt = 1; 1466 1467 return 0; 1468} 1469 1470static int 1471mv643xx_eth_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 1472{ 1473 struct mv643xx_eth_private *mp = netdev_priv(dev); 1474 1475 if (mp->phy != NULL) 1476 return mv643xx_eth_get_settings_phy(mp, cmd); 1477 else 1478 return mv643xx_eth_get_settings_phyless(mp, cmd); 1479} 1480 1481static int 1482mv643xx_eth_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 1483{ 1484 struct mv643xx_eth_private *mp = netdev_priv(dev); 1485 1486 if (mp->phy == NULL) 1487 return -EINVAL; 1488 1489 /* 1490 * The MAC does not support 1000baseT_Half. 1491 */ 1492 cmd->advertising &= ~ADVERTISED_1000baseT_Half; 1493 1494 return phy_ethtool_sset(mp->phy, cmd); 1495} 1496 1497static void mv643xx_eth_get_drvinfo(struct net_device *dev, 1498 struct ethtool_drvinfo *drvinfo) 1499{ 1500 strncpy(drvinfo->driver, mv643xx_eth_driver_name, 32); 1501 strncpy(drvinfo->version, mv643xx_eth_driver_version, 32); 1502 strncpy(drvinfo->fw_version, "N/A", 32); 1503 strncpy(drvinfo->bus_info, "platform", 32); 1504 drvinfo->n_stats = ARRAY_SIZE(mv643xx_eth_stats); 1505} 1506 1507static int mv643xx_eth_nway_reset(struct net_device *dev) 1508{ 1509 struct mv643xx_eth_private *mp = netdev_priv(dev); 1510 1511 if (mp->phy == NULL) 1512 return -EINVAL; 1513 1514 return genphy_restart_aneg(mp->phy); 1515} 1516 1517static u32 mv643xx_eth_get_link(struct net_device *dev) 1518{ 1519 return !!netif_carrier_ok(dev); 1520} 1521 1522static int 1523mv643xx_eth_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) 1524{ 1525 struct mv643xx_eth_private *mp = netdev_priv(dev); 1526 1527 ec->rx_coalesce_usecs = get_rx_coal(mp); 1528 ec->tx_coalesce_usecs = get_tx_coal(mp); 1529 1530 return 0; 1531} 1532 1533static int 1534mv643xx_eth_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) 1535{ 1536 struct mv643xx_eth_private *mp = netdev_priv(dev); 1537 1538 set_rx_coal(mp, ec->rx_coalesce_usecs); 1539 set_tx_coal(mp, ec->tx_coalesce_usecs); 1540 1541 return 0; 1542} 1543 1544static void 1545mv643xx_eth_get_ringparam(struct net_device *dev, struct ethtool_ringparam *er) 1546{ 1547 struct mv643xx_eth_private *mp = netdev_priv(dev); 1548 1549 er->rx_max_pending = 4096; 1550 er->tx_max_pending = 4096; 1551 er->rx_mini_max_pending = 0; 1552 er->rx_jumbo_max_pending = 0; 1553 1554 er->rx_pending = mp->rx_ring_size; 1555 er->tx_pending = mp->tx_ring_size; 1556 er->rx_mini_pending = 0; 1557 er->rx_jumbo_pending = 0; 1558} 1559 1560static int 1561mv643xx_eth_set_ringparam(struct net_device *dev, struct ethtool_ringparam *er) 1562{ 1563 struct mv643xx_eth_private *mp = netdev_priv(dev); 1564 1565 if (er->rx_mini_pending || er->rx_jumbo_pending) 1566 return -EINVAL; 1567 1568 mp->rx_ring_size = er->rx_pending < 4096 ? er->rx_pending : 4096; 1569 mp->tx_ring_size = er->tx_pending < 4096 ? er->tx_pending : 4096; 1570 1571 if (netif_running(dev)) { 1572 mv643xx_eth_stop(dev); 1573 if (mv643xx_eth_open(dev)) { 1574 dev_printk(KERN_ERR, &dev->dev, 1575 "fatal error on re-opening device after " 1576 "ring param change\n"); 1577 return -ENOMEM; 1578 } 1579 } 1580 1581 return 0; 1582} 1583 1584static u32 1585mv643xx_eth_get_rx_csum(struct net_device *dev) 1586{ 1587 struct mv643xx_eth_private *mp = netdev_priv(dev); 1588 1589 return !!(rdlp(mp, PORT_CONFIG) & 0x02000000); 1590} 1591 1592static int 1593mv643xx_eth_set_rx_csum(struct net_device *dev, u32 rx_csum) 1594{ 1595 struct mv643xx_eth_private *mp = netdev_priv(dev); 1596 1597 wrlp(mp, PORT_CONFIG, rx_csum ? 0x02000000 : 0x00000000); 1598 1599 return 0; 1600} 1601 1602static void mv643xx_eth_get_strings(struct net_device *dev, 1603 uint32_t stringset, uint8_t *data) 1604{ 1605 int i; 1606 1607 if (stringset == ETH_SS_STATS) { 1608 for (i = 0; i < ARRAY_SIZE(mv643xx_eth_stats); i++) { 1609 memcpy(data + i * ETH_GSTRING_LEN, 1610 mv643xx_eth_stats[i].stat_string, 1611 ETH_GSTRING_LEN); 1612 } 1613 } 1614} 1615 1616static void mv643xx_eth_get_ethtool_stats(struct net_device *dev, 1617 struct ethtool_stats *stats, 1618 uint64_t *data) 1619{ 1620 struct mv643xx_eth_private *mp = netdev_priv(dev); 1621 int i; 1622 1623 mv643xx_eth_get_stats(dev); 1624 mib_counters_update(mp); 1625 mv643xx_eth_grab_lro_stats(mp); 1626 1627 for (i = 0; i < ARRAY_SIZE(mv643xx_eth_stats); i++) { 1628 const struct mv643xx_eth_stats *stat; 1629 void *p; 1630 1631 stat = mv643xx_eth_stats + i; 1632 1633 if (stat->netdev_off >= 0) 1634 p = ((void *)mp->dev) + stat->netdev_off; 1635 else 1636 p = ((void *)mp) + stat->mp_off; 1637 1638 data[i] = (stat->sizeof_stat == 8) ? 1639 *(uint64_t *)p : *(uint32_t *)p; 1640 } 1641} 1642 1643static int mv643xx_eth_set_flags(struct net_device *dev, u32 data) 1644{ 1645 return ethtool_op_set_flags(dev, data, ETH_FLAG_LRO); 1646} 1647 1648static int mv643xx_eth_get_sset_count(struct net_device *dev, int sset) 1649{ 1650 if (sset == ETH_SS_STATS) 1651 return ARRAY_SIZE(mv643xx_eth_stats); 1652 1653 return -EOPNOTSUPP; 1654} 1655 1656static const struct ethtool_ops mv643xx_eth_ethtool_ops = { 1657 .get_settings = mv643xx_eth_get_settings, 1658 .set_settings = mv643xx_eth_set_settings, 1659 .get_drvinfo = mv643xx_eth_get_drvinfo, 1660 .nway_reset = mv643xx_eth_nway_reset, 1661 .get_link = mv643xx_eth_get_link, 1662 .get_coalesce = mv643xx_eth_get_coalesce, 1663 .set_coalesce = mv643xx_eth_set_coalesce, 1664 .get_ringparam = mv643xx_eth_get_ringparam, 1665 .set_ringparam = mv643xx_eth_set_ringparam, 1666 .get_rx_csum = mv643xx_eth_get_rx_csum, 1667 .set_rx_csum = mv643xx_eth_set_rx_csum, 1668 .set_tx_csum = ethtool_op_set_tx_csum, 1669 .set_sg = ethtool_op_set_sg, 1670 .get_strings = mv643xx_eth_get_strings, 1671 .get_ethtool_stats = mv643xx_eth_get_ethtool_stats, 1672 .get_flags = ethtool_op_get_flags, 1673 .set_flags = mv643xx_eth_set_flags, 1674 .get_sset_count = mv643xx_eth_get_sset_count, 1675}; 1676 1677 1678/* address handling *********************************************************/ 1679static void uc_addr_get(struct mv643xx_eth_private *mp, unsigned char *addr) 1680{ 1681 unsigned int mac_h = rdlp(mp, MAC_ADDR_HIGH); 1682 unsigned int mac_l = rdlp(mp, MAC_ADDR_LOW); 1683 1684 addr[0] = (mac_h >> 24) & 0xff; 1685 addr[1] = (mac_h >> 16) & 0xff; 1686 addr[2] = (mac_h >> 8) & 0xff; 1687 addr[3] = mac_h & 0xff; 1688 addr[4] = (mac_l >> 8) & 0xff; 1689 addr[5] = mac_l & 0xff; 1690} 1691 1692static void uc_addr_set(struct mv643xx_eth_private *mp, unsigned char *addr) 1693{ 1694 wrlp(mp, MAC_ADDR_HIGH, 1695 (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) | addr[3]); 1696 wrlp(mp, MAC_ADDR_LOW, (addr[4] << 8) | addr[5]); 1697} 1698 1699static u32 uc_addr_filter_mask(struct net_device *dev) 1700{ 1701 struct netdev_hw_addr *ha; 1702 u32 nibbles; 1703 1704 if (dev->flags & IFF_PROMISC) 1705 return 0; 1706 1707 nibbles = 1 << (dev->dev_addr[5] & 0x0f); 1708 netdev_for_each_uc_addr(ha, dev) { 1709 if (memcmp(dev->dev_addr, ha->addr, 5)) 1710 return 0; 1711 if ((dev->dev_addr[5] ^ ha->addr[5]) & 0xf0) 1712 return 0; 1713 1714 nibbles |= 1 << (ha->addr[5] & 0x0f); 1715 } 1716 1717 return nibbles; 1718} 1719 1720static void mv643xx_eth_program_unicast_filter(struct net_device *dev) 1721{ 1722 struct mv643xx_eth_private *mp = netdev_priv(dev); 1723 u32 port_config; 1724 u32 nibbles; 1725 int i; 1726 1727 uc_addr_set(mp, dev->dev_addr); 1728 1729 port_config = rdlp(mp, PORT_CONFIG) & ~UNICAST_PROMISCUOUS_MODE; 1730 1731 nibbles = uc_addr_filter_mask(dev); 1732 if (!nibbles) { 1733 port_config |= UNICAST_PROMISCUOUS_MODE; 1734 nibbles = 0xffff; 1735 } 1736 1737 for (i = 0; i < 16; i += 4) { 1738 int off = UNICAST_TABLE(mp->port_num) + i; 1739 u32 v; 1740 1741 v = 0; 1742 if (nibbles & 1) 1743 v |= 0x00000001; 1744 if (nibbles & 2) 1745 v |= 0x00000100; 1746 if (nibbles & 4) 1747 v |= 0x00010000; 1748 if (nibbles & 8) 1749 v |= 0x01000000; 1750 nibbles >>= 4; 1751 1752 wrl(mp, off, v); 1753 } 1754 1755 wrlp(mp, PORT_CONFIG, port_config); 1756} 1757 1758static int addr_crc(unsigned char *addr) 1759{ 1760 int crc = 0; 1761 int i; 1762 1763 for (i = 0; i < 6; i++) { 1764 int j; 1765 1766 crc = (crc ^ addr[i]) << 8; 1767 for (j = 7; j >= 0; j--) { 1768 if (crc & (0x100 << j)) 1769 crc ^= 0x107 << j; 1770 } 1771 } 1772 1773 return crc; 1774} 1775 1776static void mv643xx_eth_program_multicast_filter(struct net_device *dev) 1777{ 1778 struct mv643xx_eth_private *mp = netdev_priv(dev); 1779 u32 *mc_spec; 1780 u32 *mc_other; 1781 struct netdev_hw_addr *ha; 1782 int i; 1783 1784 if (dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) { 1785 int port_num; 1786 u32 accept; 1787 1788oom: 1789 port_num = mp->port_num; 1790 accept = 0x01010101; 1791 for (i = 0; i < 0x100; i += 4) { 1792 wrl(mp, SPECIAL_MCAST_TABLE(port_num) + i, accept); 1793 wrl(mp, OTHER_MCAST_TABLE(port_num) + i, accept); 1794 } 1795 return; 1796 } 1797 1798 mc_spec = kmalloc(0x200, GFP_ATOMIC); 1799 if (mc_spec == NULL) 1800 goto oom; 1801 mc_other = mc_spec + (0x100 >> 2); 1802 1803 memset(mc_spec, 0, 0x100); 1804 memset(mc_other, 0, 0x100); 1805 1806 netdev_for_each_mc_addr(ha, dev) { 1807 u8 *a = ha->addr; 1808 u32 *table; 1809 int entry; 1810 1811 if (memcmp(a, "\x01\x00\x5e\x00\x00", 5) == 0) { 1812 table = mc_spec; 1813 entry = a[5]; 1814 } else { 1815 table = mc_other; 1816 entry = addr_crc(a); 1817 } 1818 1819 table[entry >> 2] |= 1 << (8 * (entry & 3)); 1820 } 1821 1822 for (i = 0; i < 0x100; i += 4) { 1823 wrl(mp, SPECIAL_MCAST_TABLE(mp->port_num) + i, mc_spec[i >> 2]); 1824 wrl(mp, OTHER_MCAST_TABLE(mp->port_num) + i, mc_other[i >> 2]); 1825 } 1826 1827 kfree(mc_spec); 1828} 1829 1830static void mv643xx_eth_set_rx_mode(struct net_device *dev) 1831{ 1832 mv643xx_eth_program_unicast_filter(dev); 1833 mv643xx_eth_program_multicast_filter(dev); 1834} 1835 1836static int mv643xx_eth_set_mac_address(struct net_device *dev, void *addr) 1837{ 1838 struct sockaddr *sa = addr; 1839 1840 if (!is_valid_ether_addr(sa->sa_data)) 1841 return -EINVAL; 1842 1843 memcpy(dev->dev_addr, sa->sa_data, ETH_ALEN); 1844 1845 netif_addr_lock_bh(dev); 1846 mv643xx_eth_program_unicast_filter(dev); 1847 netif_addr_unlock_bh(dev); 1848 1849 return 0; 1850} 1851 1852 1853/* rx/tx queue initialisation ***********************************************/ 1854static int rxq_init(struct mv643xx_eth_private *mp, int index) 1855{ 1856 struct rx_queue *rxq = mp->rxq + index; 1857 struct rx_desc *rx_desc; 1858 int size; 1859 int i; 1860 1861 rxq->index = index; 1862 1863 rxq->rx_ring_size = mp->rx_ring_size; 1864 1865 rxq->rx_desc_count = 0; 1866 rxq->rx_curr_desc = 0; 1867 rxq->rx_used_desc = 0; 1868 1869 size = rxq->rx_ring_size * sizeof(struct rx_desc); 1870 1871 if (index == 0 && size <= mp->rx_desc_sram_size) { 1872 rxq->rx_desc_area = ioremap(mp->rx_desc_sram_addr, 1873 mp->rx_desc_sram_size); 1874 rxq->rx_desc_dma = mp->rx_desc_sram_addr; 1875 } else { 1876 rxq->rx_desc_area = dma_alloc_coherent(mp->dev->dev.parent, 1877 size, &rxq->rx_desc_dma, 1878 GFP_KERNEL); 1879 } 1880 1881 if (rxq->rx_desc_area == NULL) { 1882 dev_printk(KERN_ERR, &mp->dev->dev, 1883 "can't allocate rx ring (%d bytes)\n", size); 1884 goto out; 1885 } 1886 memset(rxq->rx_desc_area, 0, size); 1887 1888 rxq->rx_desc_area_size = size; 1889 rxq->rx_skb = kmalloc(rxq->rx_ring_size * sizeof(*rxq->rx_skb), 1890 GFP_KERNEL); 1891 if (rxq->rx_skb == NULL) { 1892 dev_printk(KERN_ERR, &mp->dev->dev, 1893 "can't allocate rx skb ring\n"); 1894 goto out_free; 1895 } 1896 1897 rx_desc = (struct rx_desc *)rxq->rx_desc_area; 1898 for (i = 0; i < rxq->rx_ring_size; i++) { 1899 int nexti; 1900 1901 nexti = i + 1; 1902 if (nexti == rxq->rx_ring_size) 1903 nexti = 0; 1904 1905 rx_desc[i].next_desc_ptr = rxq->rx_desc_dma + 1906 nexti * sizeof(struct rx_desc); 1907 } 1908 1909 rxq->lro_mgr.dev = mp->dev; 1910 memset(&rxq->lro_mgr.stats, 0, sizeof(rxq->lro_mgr.stats)); 1911 rxq->lro_mgr.features = LRO_F_NAPI; 1912 rxq->lro_mgr.ip_summed = CHECKSUM_UNNECESSARY; 1913 rxq->lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY; 1914 rxq->lro_mgr.max_desc = ARRAY_SIZE(rxq->lro_arr); 1915 rxq->lro_mgr.max_aggr = 32; 1916 rxq->lro_mgr.frag_align_pad = 0; 1917 rxq->lro_mgr.lro_arr = rxq->lro_arr; 1918 rxq->lro_mgr.get_skb_header = mv643xx_get_skb_header; 1919 1920 memset(&rxq->lro_arr, 0, sizeof(rxq->lro_arr)); 1921 1922 return 0; 1923 1924 1925out_free: 1926 if (index == 0 && size <= mp->rx_desc_sram_size) 1927 iounmap(rxq->rx_desc_area); 1928 else 1929 dma_free_coherent(mp->dev->dev.parent, size, 1930 rxq->rx_desc_area, 1931 rxq->rx_desc_dma); 1932 1933out: 1934 return -ENOMEM; 1935} 1936 1937static void rxq_deinit(struct rx_queue *rxq) 1938{ 1939 struct mv643xx_eth_private *mp = rxq_to_mp(rxq); 1940 int i; 1941 1942 rxq_disable(rxq); 1943 1944 for (i = 0; i < rxq->rx_ring_size; i++) { 1945 if (rxq->rx_skb[i]) { 1946 dev_kfree_skb(rxq->rx_skb[i]); 1947 rxq->rx_desc_count--; 1948 } 1949 } 1950 1951 if (rxq->rx_desc_count) { 1952 dev_printk(KERN_ERR, &mp->dev->dev, 1953 "error freeing rx ring -- %d skbs stuck\n", 1954 rxq->rx_desc_count); 1955 } 1956 1957 if (rxq->index == 0 && 1958 rxq->rx_desc_area_size <= mp->rx_desc_sram_size) 1959 iounmap(rxq->rx_desc_area); 1960 else 1961 dma_free_coherent(mp->dev->dev.parent, rxq->rx_desc_area_size, 1962 rxq->rx_desc_area, rxq->rx_desc_dma); 1963 1964 kfree(rxq->rx_skb); 1965} 1966 1967static int txq_init(struct mv643xx_eth_private *mp, int index) 1968{ 1969 struct tx_queue *txq = mp->txq + index; 1970 struct tx_desc *tx_desc; 1971 int size; 1972 int i; 1973 1974 txq->index = index; 1975 1976 txq->tx_ring_size = mp->tx_ring_size; 1977 1978 txq->tx_desc_count = 0; 1979 txq->tx_curr_desc = 0; 1980 txq->tx_used_desc = 0; 1981 1982 size = txq->tx_ring_size * sizeof(struct tx_desc); 1983 1984 if (index == 0 && size <= mp->tx_desc_sram_size) { 1985 txq->tx_desc_area = ioremap(mp->tx_desc_sram_addr, 1986 mp->tx_desc_sram_size); 1987 txq->tx_desc_dma = mp->tx_desc_sram_addr; 1988 } else { 1989 txq->tx_desc_area = dma_alloc_coherent(mp->dev->dev.parent, 1990 size, &txq->tx_desc_dma, 1991 GFP_KERNEL); 1992 } 1993 1994 if (txq->tx_desc_area == NULL) { 1995 dev_printk(KERN_ERR, &mp->dev->dev, 1996 "can't allocate tx ring (%d bytes)\n", size); 1997 return -ENOMEM; 1998 } 1999 memset(txq->tx_desc_area, 0, size); 2000 2001 txq->tx_desc_area_size = size; 2002 2003 tx_desc = (struct tx_desc *)txq->tx_desc_area; 2004 for (i = 0; i < txq->tx_ring_size; i++) { 2005 struct tx_desc *txd = tx_desc + i; 2006 int nexti; 2007 2008 nexti = i + 1; 2009 if (nexti == txq->tx_ring_size) 2010 nexti = 0; 2011 2012 txd->cmd_sts = 0; 2013 txd->next_desc_ptr = txq->tx_desc_dma + 2014 nexti * sizeof(struct tx_desc); 2015 } 2016 2017 skb_queue_head_init(&txq->tx_skb); 2018 2019 return 0; 2020} 2021 2022static void txq_deinit(struct tx_queue *txq) 2023{ 2024 struct mv643xx_eth_private *mp = txq_to_mp(txq); 2025 2026 txq_disable(txq); 2027 txq_reclaim(txq, txq->tx_ring_size, 1); 2028 2029 BUG_ON(txq->tx_used_desc != txq->tx_curr_desc); 2030 2031 if (txq->index == 0 && 2032 txq->tx_desc_area_size <= mp->tx_desc_sram_size) 2033 iounmap(txq->tx_desc_area); 2034 else 2035 dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size, 2036 txq->tx_desc_area, txq->tx_desc_dma); 2037} 2038 2039 2040/* netdev ops and related ***************************************************/ 2041static int mv643xx_eth_collect_events(struct mv643xx_eth_private *mp) 2042{ 2043 u32 int_cause; 2044 u32 int_cause_ext; 2045 2046 int_cause = rdlp(mp, INT_CAUSE) & mp->int_mask; 2047 if (int_cause == 0) 2048 return 0; 2049 2050 int_cause_ext = 0; 2051 if (int_cause & INT_EXT) { 2052 int_cause &= ~INT_EXT; 2053 int_cause_ext = rdlp(mp, INT_CAUSE_EXT); 2054 } 2055 2056 if (int_cause) { 2057 wrlp(mp, INT_CAUSE, ~int_cause); 2058 mp->work_tx_end |= ((int_cause & INT_TX_END) >> 19) & 2059 ~(rdlp(mp, TXQ_COMMAND) & 0xff); 2060 mp->work_rx |= (int_cause & INT_RX) >> 2; 2061 } 2062 2063 int_cause_ext &= INT_EXT_LINK_PHY | INT_EXT_TX; 2064 if (int_cause_ext) { 2065 wrlp(mp, INT_CAUSE_EXT, ~int_cause_ext); 2066 if (int_cause_ext & INT_EXT_LINK_PHY) 2067 mp->work_link = 1; 2068 mp->work_tx |= int_cause_ext & INT_EXT_TX; 2069 } 2070 2071 return 1; 2072} 2073 2074static irqreturn_t mv643xx_eth_irq(int irq, void *dev_id) 2075{ 2076 struct net_device *dev = (struct net_device *)dev_id; 2077 struct mv643xx_eth_private *mp = netdev_priv(dev); 2078 2079 if (unlikely(!mv643xx_eth_collect_events(mp))) 2080 return IRQ_NONE; 2081 2082 wrlp(mp, INT_MASK, 0); 2083 napi_schedule(&mp->napi); 2084 2085 return IRQ_HANDLED; 2086} 2087 2088static void handle_link_event(struct mv643xx_eth_private *mp) 2089{ 2090 struct net_device *dev = mp->dev; 2091 u32 port_status; 2092 int speed; 2093 int duplex; 2094 int fc; 2095 2096 port_status = rdlp(mp, PORT_STATUS); 2097 if (!(port_status & LINK_UP)) { 2098 if (netif_carrier_ok(dev)) { 2099 int i; 2100 2101 printk(KERN_INFO "%s: link down\n", dev->name); 2102 2103 netif_carrier_off(dev); 2104 2105 for (i = 0; i < mp->txq_count; i++) { 2106 struct tx_queue *txq = mp->txq + i; 2107 2108 txq_reclaim(txq, txq->tx_ring_size, 1); 2109 txq_reset_hw_ptr(txq); 2110 } 2111 } 2112 return; 2113 } 2114 2115 switch (port_status & PORT_SPEED_MASK) { 2116 case PORT_SPEED_10: 2117 speed = 10; 2118 break; 2119 case PORT_SPEED_100: 2120 speed = 100; 2121 break; 2122 case PORT_SPEED_1000: 2123 speed = 1000; 2124 break; 2125 default: 2126 speed = -1; 2127 break; 2128 } 2129 duplex = (port_status & FULL_DUPLEX) ? 1 : 0; 2130 fc = (port_status & FLOW_CONTROL_ENABLED) ? 1 : 0; 2131 2132 printk(KERN_INFO "%s: link up, %d Mb/s, %s duplex, " 2133 "flow control %sabled\n", dev->name, 2134 speed, duplex ? "full" : "half", 2135 fc ? "en" : "dis"); 2136 2137 if (!netif_carrier_ok(dev)) 2138 netif_carrier_on(dev); 2139} 2140 2141static int mv643xx_eth_poll(struct napi_struct *napi, int budget) 2142{ 2143 struct mv643xx_eth_private *mp; 2144 int work_done; 2145 2146 mp = container_of(napi, struct mv643xx_eth_private, napi); 2147 2148 if (unlikely(mp->oom)) { 2149 mp->oom = 0; 2150 del_timer(&mp->rx_oom); 2151 } 2152 2153 work_done = 0; 2154 while (work_done < budget) { 2155 u8 queue_mask; 2156 int queue; 2157 int work_tbd; 2158 2159 if (mp->work_link) { 2160 mp->work_link = 0; 2161 handle_link_event(mp); 2162 work_done++; 2163 continue; 2164 } 2165 2166 queue_mask = mp->work_tx | mp->work_tx_end | mp->work_rx; 2167 if (likely(!mp->oom)) 2168 queue_mask |= mp->work_rx_refill; 2169 2170 if (!queue_mask) { 2171 if (mv643xx_eth_collect_events(mp)) 2172 continue; 2173 break; 2174 } 2175 2176 queue = fls(queue_mask) - 1; 2177 queue_mask = 1 << queue; 2178 2179 work_tbd = budget - work_done; 2180 if (work_tbd > 16) 2181 work_tbd = 16; 2182 2183 if (mp->work_tx_end & queue_mask) { 2184 txq_kick(mp->txq + queue); 2185 } else if (mp->work_tx & queue_mask) { 2186 work_done += txq_reclaim(mp->txq + queue, work_tbd, 0); 2187 txq_maybe_wake(mp->txq + queue); 2188 } else if (mp->work_rx & queue_mask) { 2189 work_done += rxq_process(mp->rxq + queue, work_tbd); 2190 } else if (!mp->oom && (mp->work_rx_refill & queue_mask)) { 2191 work_done += rxq_refill(mp->rxq + queue, work_tbd); 2192 } else { 2193 BUG(); 2194 } 2195 } 2196 2197 if (work_done < budget) { 2198 if (mp->oom) 2199 mod_timer(&mp->rx_oom, jiffies + (HZ / 10)); 2200 napi_complete(napi); 2201 wrlp(mp, INT_MASK, mp->int_mask); 2202 } 2203 2204 return work_done; 2205} 2206 2207static inline void oom_timer_wrapper(unsigned long data) 2208{ 2209 struct mv643xx_eth_private *mp = (void *)data; 2210 2211 napi_schedule(&mp->napi); 2212} 2213 2214static void phy_reset(struct mv643xx_eth_private *mp) 2215{ 2216 int data; 2217 2218 data = phy_read(mp->phy, MII_BMCR); 2219 if (data < 0) 2220 return; 2221 2222 data |= BMCR_RESET; 2223 if (phy_write(mp->phy, MII_BMCR, data) < 0) 2224 return; 2225 2226 do { 2227 data = phy_read(mp->phy, MII_BMCR); 2228 } while (data >= 0 && data & BMCR_RESET); 2229} 2230 2231static void port_start(struct mv643xx_eth_private *mp) 2232{ 2233 u32 pscr; 2234 int i; 2235 2236 /* 2237 * Perform PHY reset, if there is a PHY. 2238 */ 2239 if (mp->phy != NULL) { 2240 struct ethtool_cmd cmd; 2241 2242 mv643xx_eth_get_settings(mp->dev, &cmd); 2243 phy_reset(mp); 2244 mv643xx_eth_set_settings(mp->dev, &cmd); 2245 } 2246 2247 /* 2248 * Configure basic link parameters. 2249 */ 2250 pscr = rdlp(mp, PORT_SERIAL_CONTROL); 2251 2252 pscr |= SERIAL_PORT_ENABLE; 2253 wrlp(mp, PORT_SERIAL_CONTROL, pscr); 2254 2255 pscr |= DO_NOT_FORCE_LINK_FAIL; 2256 if (mp->phy == NULL) 2257 pscr |= FORCE_LINK_PASS; 2258 wrlp(mp, PORT_SERIAL_CONTROL, pscr); 2259 2260 /* 2261 * Configure TX path and queues. 2262 */ 2263 tx_set_rate(mp, 1000000000, 16777216); 2264 for (i = 0; i < mp->txq_count; i++) { 2265 struct tx_queue *txq = mp->txq + i; 2266 2267 txq_reset_hw_ptr(txq); 2268 txq_set_rate(txq, 1000000000, 16777216); 2269 txq_set_fixed_prio_mode(txq); 2270 } 2271 2272 /* 2273 * Receive all unmatched unicast, TCP, UDP, BPDU and broadcast 2274 * frames to RX queue #0, and include the pseudo-header when 2275 * calculating receive checksums. 2276 */ 2277 wrlp(mp, PORT_CONFIG, 0x02000000); 2278 2279 /* 2280 * Treat BPDUs as normal multicasts, and disable partition mode. 2281 */ 2282 wrlp(mp, PORT_CONFIG_EXT, 0x00000000); 2283 2284 /* 2285 * Add configured unicast addresses to address filter table. 2286 */ 2287 mv643xx_eth_program_unicast_filter(mp->dev); 2288 2289 /* 2290 * Enable the receive queues. 2291 */ 2292 for (i = 0; i < mp->rxq_count; i++) { 2293 struct rx_queue *rxq = mp->rxq + i; 2294 u32 addr; 2295 2296 addr = (u32)rxq->rx_desc_dma; 2297 addr += rxq->rx_curr_desc * sizeof(struct rx_desc); 2298 wrlp(mp, RXQ_CURRENT_DESC_PTR(i), addr); 2299 2300 rxq_enable(rxq); 2301 } 2302} 2303 2304static void mv643xx_eth_recalc_skb_size(struct mv643xx_eth_private *mp) 2305{ 2306 int skb_size; 2307 2308 /* 2309 * Reserve 2+14 bytes for an ethernet header (the hardware 2310 * automatically prepends 2 bytes of dummy data to each 2311 * received packet), 16 bytes for up to four VLAN tags, and 2312 * 4 bytes for the trailing FCS -- 36 bytes total. 2313 */ 2314 skb_size = mp->dev->mtu + 36; 2315 2316 /* 2317 * Make sure that the skb size is a multiple of 8 bytes, as 2318 * the lower three bits of the receive descriptor's buffer 2319 * size field are ignored by the hardware. 2320 */ 2321 mp->skb_size = (skb_size + 7) & ~7; 2322 2323 /* 2324 * If NET_SKB_PAD is smaller than a cache line, 2325 * netdev_alloc_skb() will cause skb->data to be misaligned 2326 * to a cache line boundary. If this is the case, include 2327 * some extra space to allow re-aligning the data area. 2328 */ 2329 mp->skb_size += SKB_DMA_REALIGN; 2330} 2331 2332static int mv643xx_eth_open(struct net_device *dev) 2333{ 2334 struct mv643xx_eth_private *mp = netdev_priv(dev); 2335 int err; 2336 int i; 2337 2338 wrlp(mp, INT_CAUSE, 0); 2339 wrlp(mp, INT_CAUSE_EXT, 0); 2340 rdlp(mp, INT_CAUSE_EXT); 2341 2342 err = request_irq(dev->irq, mv643xx_eth_irq, 2343 IRQF_SHARED, dev->name, dev); 2344 if (err) { 2345 dev_printk(KERN_ERR, &dev->dev, "can't assign irq\n"); 2346 return -EAGAIN; 2347 } 2348 2349 mv643xx_eth_recalc_skb_size(mp); 2350 2351 napi_enable(&mp->napi); 2352 2353 skb_queue_head_init(&mp->rx_recycle); 2354 2355 mp->int_mask = INT_EXT; 2356 2357 for (i = 0; i < mp->rxq_count; i++) { 2358 err = rxq_init(mp, i); 2359 if (err) { 2360 while (--i >= 0) 2361 rxq_deinit(mp->rxq + i); 2362 goto out; 2363 } 2364 2365 rxq_refill(mp->rxq + i, INT_MAX); 2366 mp->int_mask |= INT_RX_0 << i; 2367 } 2368 2369 if (mp->oom) { 2370 mp->rx_oom.expires = jiffies + (HZ / 10); 2371 add_timer(&mp->rx_oom); 2372 } 2373 2374 for (i = 0; i < mp->txq_count; i++) { 2375 err = txq_init(mp, i); 2376 if (err) { 2377 while (--i >= 0) 2378 txq_deinit(mp->txq + i); 2379 goto out_free; 2380 } 2381 mp->int_mask |= INT_TX_END_0 << i; 2382 } 2383 2384 port_start(mp); 2385 2386 wrlp(mp, INT_MASK_EXT, INT_EXT_LINK_PHY | INT_EXT_TX); 2387 wrlp(mp, INT_MASK, mp->int_mask); 2388 2389 return 0; 2390 2391 2392out_free: 2393 for (i = 0; i < mp->rxq_count; i++) 2394 rxq_deinit(mp->rxq + i); 2395out: 2396 free_irq(dev->irq, dev); 2397 2398 return err; 2399} 2400 2401static void port_reset(struct mv643xx_eth_private *mp) 2402{ 2403 unsigned int data; 2404 int i; 2405 2406 for (i = 0; i < mp->rxq_count; i++) 2407 rxq_disable(mp->rxq + i); 2408 for (i = 0; i < mp->txq_count; i++) 2409 txq_disable(mp->txq + i); 2410 2411 while (1) { 2412 u32 ps = rdlp(mp, PORT_STATUS); 2413 2414 if ((ps & (TX_IN_PROGRESS | TX_FIFO_EMPTY)) == TX_FIFO_EMPTY) 2415 break; 2416 udelay(10); 2417 } 2418 2419 /* Reset the Enable bit in the Configuration Register */ 2420 data = rdlp(mp, PORT_SERIAL_CONTROL); 2421 data &= ~(SERIAL_PORT_ENABLE | 2422 DO_NOT_FORCE_LINK_FAIL | 2423 FORCE_LINK_PASS); 2424 wrlp(mp, PORT_SERIAL_CONTROL, data); 2425} 2426 2427static int mv643xx_eth_stop(struct net_device *dev) 2428{ 2429 struct mv643xx_eth_private *mp = netdev_priv(dev); 2430 int i; 2431 2432 wrlp(mp, INT_MASK_EXT, 0x00000000); 2433 wrlp(mp, INT_MASK, 0x00000000); 2434 rdlp(mp, INT_MASK); 2435 2436 napi_disable(&mp->napi); 2437 2438 del_timer_sync(&mp->rx_oom); 2439 2440 netif_carrier_off(dev); 2441 2442 free_irq(dev->irq, dev); 2443 2444 port_reset(mp); 2445 mv643xx_eth_get_stats(dev); 2446 mib_counters_update(mp); 2447 del_timer_sync(&mp->mib_counters_timer); 2448 2449 skb_queue_purge(&mp->rx_recycle); 2450 2451 for (i = 0; i < mp->rxq_count; i++) 2452 rxq_deinit(mp->rxq + i); 2453 for (i = 0; i < mp->txq_count; i++) 2454 txq_deinit(mp->txq + i); 2455 2456 return 0; 2457} 2458 2459static int mv643xx_eth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 2460{ 2461 struct mv643xx_eth_private *mp = netdev_priv(dev); 2462 2463 if (mp->phy != NULL) 2464 return phy_mii_ioctl(mp->phy, ifr, cmd); 2465 2466 return -EOPNOTSUPP; 2467} 2468 2469static int mv643xx_eth_change_mtu(struct net_device *dev, int new_mtu) 2470{ 2471 struct mv643xx_eth_private *mp = netdev_priv(dev); 2472 2473 if (new_mtu < 64 || new_mtu > 9500) 2474 return -EINVAL; 2475 2476 dev->mtu = new_mtu; 2477 mv643xx_eth_recalc_skb_size(mp); 2478 tx_set_rate(mp, 1000000000, 16777216); 2479 2480 if (!netif_running(dev)) 2481 return 0; 2482 2483 /* 2484 * Stop and then re-open the interface. This will allocate RX 2485 * skbs of the new MTU. 2486 * There is a possible danger that the open will not succeed, 2487 * due to memory being full. 2488 */ 2489 mv643xx_eth_stop(dev); 2490 if (mv643xx_eth_open(dev)) { 2491 dev_printk(KERN_ERR, &dev->dev, 2492 "fatal error on re-opening device after " 2493 "MTU change\n"); 2494 } 2495 2496 return 0; 2497} 2498 2499static void tx_timeout_task(struct work_struct *ugly) 2500{ 2501 struct mv643xx_eth_private *mp; 2502 2503 mp = container_of(ugly, struct mv643xx_eth_private, tx_timeout_task); 2504 if (netif_running(mp->dev)) { 2505 netif_tx_stop_all_queues(mp->dev); 2506 port_reset(mp); 2507 port_start(mp); 2508 netif_tx_wake_all_queues(mp->dev); 2509 } 2510} 2511 2512static void mv643xx_eth_tx_timeout(struct net_device *dev) 2513{ 2514 struct mv643xx_eth_private *mp = netdev_priv(dev); 2515 2516 dev_printk(KERN_INFO, &dev->dev, "tx timeout\n"); 2517 2518 schedule_work(&mp->tx_timeout_task); 2519} 2520 2521#ifdef CONFIG_NET_POLL_CONTROLLER 2522static void mv643xx_eth_netpoll(struct net_device *dev) 2523{ 2524 struct mv643xx_eth_private *mp = netdev_priv(dev); 2525 2526 wrlp(mp, INT_MASK, 0x00000000); 2527 rdlp(mp, INT_MASK); 2528 2529 mv643xx_eth_irq(dev->irq, dev); 2530 2531 wrlp(mp, INT_MASK, mp->int_mask); 2532} 2533#endif 2534 2535 2536/* platform glue ************************************************************/ 2537static void 2538mv643xx_eth_conf_mbus_windows(struct mv643xx_eth_shared_private *msp, 2539 struct mbus_dram_target_info *dram) 2540{ 2541 void __iomem *base = msp->base; 2542 u32 win_enable; 2543 u32 win_protect; 2544 int i; 2545 2546 for (i = 0; i < 6; i++) { 2547 writel(0, base + WINDOW_BASE(i)); 2548 writel(0, base + WINDOW_SIZE(i)); 2549 if (i < 4) 2550 writel(0, base + WINDOW_REMAP_HIGH(i)); 2551 } 2552 2553 win_enable = 0x3f; 2554 win_protect = 0; 2555 2556 for (i = 0; i < dram->num_cs; i++) { 2557 struct mbus_dram_window *cs = dram->cs + i; 2558 2559 writel((cs->base & 0xffff0000) | 2560 (cs->mbus_attr << 8) | 2561 dram->mbus_dram_target_id, base + WINDOW_BASE(i)); 2562 writel((cs->size - 1) & 0xffff0000, base + WINDOW_SIZE(i)); 2563 2564 win_enable &= ~(1 << i); 2565 win_protect |= 3 << (2 * i); 2566 } 2567 2568 writel(win_enable, base + WINDOW_BAR_ENABLE); 2569 msp->win_protect = win_protect; 2570} 2571 2572static void infer_hw_params(struct mv643xx_eth_shared_private *msp) 2573{ 2574 /* 2575 * Check whether we have a 14-bit coal limit field in bits 2576 * [21:8], or a 16-bit coal limit in bits [25,21:7] of the 2577 * SDMA config register. 2578 */ 2579 writel(0x02000000, msp->base + 0x0400 + SDMA_CONFIG); 2580 if (readl(msp->base + 0x0400 + SDMA_CONFIG) & 0x02000000) 2581 msp->extended_rx_coal_limit = 1; 2582 else 2583 msp->extended_rx_coal_limit = 0; 2584 2585 /* 2586 * Check whether the MAC supports TX rate control, and if 2587 * yes, whether its associated registers are in the old or 2588 * the new place. 2589 */ 2590 writel(1, msp->base + 0x0400 + TX_BW_MTU_MOVED); 2591 if (readl(msp->base + 0x0400 + TX_BW_MTU_MOVED) & 1) { 2592 msp->tx_bw_control = TX_BW_CONTROL_NEW_LAYOUT; 2593 } else { 2594 writel(7, msp->base + 0x0400 + TX_BW_RATE); 2595 if (readl(msp->base + 0x0400 + TX_BW_RATE) & 7) 2596 msp->tx_bw_control = TX_BW_CONTROL_OLD_LAYOUT; 2597 else 2598 msp->tx_bw_control = TX_BW_CONTROL_ABSENT; 2599 } 2600} 2601 2602static int mv643xx_eth_shared_probe(struct platform_device *pdev) 2603{ 2604 static int mv643xx_eth_version_printed; 2605 struct mv643xx_eth_shared_platform_data *pd = pdev->dev.platform_data; 2606 struct mv643xx_eth_shared_private *msp; 2607 struct resource *res; 2608 int ret; 2609 2610 if (!mv643xx_eth_version_printed++) 2611 printk(KERN_NOTICE "MV-643xx 10/100/1000 ethernet " 2612 "driver version %s\n", mv643xx_eth_driver_version); 2613 2614 ret = -EINVAL; 2615 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2616 if (res == NULL) 2617 goto out; 2618 2619 ret = -ENOMEM; 2620 msp = kzalloc(sizeof(*msp), GFP_KERNEL); 2621 if (msp == NULL) 2622 goto out; 2623 2624 msp->base = ioremap(res->start, res->end - res->start + 1); 2625 if (msp->base == NULL) 2626 goto out_free; 2627 2628 /* 2629 * Set up and register SMI bus. 2630 */ 2631 if (pd == NULL || pd->shared_smi == NULL) { 2632 msp->smi_bus = mdiobus_alloc(); 2633 if (msp->smi_bus == NULL) 2634 goto out_unmap; 2635 2636 msp->smi_bus->priv = msp; 2637 msp->smi_bus->name = "mv643xx_eth smi"; 2638 msp->smi_bus->read = smi_bus_read; 2639 msp->smi_bus->write = smi_bus_write, 2640 snprintf(msp->smi_bus->id, MII_BUS_ID_SIZE, "%d", pdev->id); 2641 msp->smi_bus->parent = &pdev->dev; 2642 msp->smi_bus->phy_mask = 0xffffffff; 2643 if (mdiobus_register(msp->smi_bus) < 0) 2644 goto out_free_mii_bus; 2645 msp->smi = msp; 2646 } else { 2647 msp->smi = platform_get_drvdata(pd->shared_smi); 2648 } 2649 2650 msp->err_interrupt = NO_IRQ; 2651 init_waitqueue_head(&msp->smi_busy_wait); 2652 2653 /* 2654 * Check whether the error interrupt is hooked up. 2655 */ 2656 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 2657 if (res != NULL) { 2658 int err; 2659 2660 err = request_irq(res->start, mv643xx_eth_err_irq, 2661 IRQF_SHARED, "mv643xx_eth", msp); 2662 if (!err) { 2663 writel(ERR_INT_SMI_DONE, msp->base + ERR_INT_MASK); 2664 msp->err_interrupt = res->start; 2665 } 2666 } 2667 2668 /* 2669 * (Re-)program MBUS remapping windows if we are asked to. 2670 */ 2671 if (pd != NULL && pd->dram != NULL) 2672 mv643xx_eth_conf_mbus_windows(msp, pd->dram); 2673 2674 /* 2675 * Detect hardware parameters. 2676 */ 2677 msp->t_clk = (pd != NULL && pd->t_clk != 0) ? pd->t_clk : 133000000; 2678 msp->tx_csum_limit = (pd != NULL && pd->tx_csum_limit) ? 2679 pd->tx_csum_limit : 9 * 1024; 2680 infer_hw_params(msp); 2681 2682 platform_set_drvdata(pdev, msp); 2683 2684 return 0; 2685 2686out_free_mii_bus: 2687 mdiobus_free(msp->smi_bus); 2688out_unmap: 2689 iounmap(msp->base); 2690out_free: 2691 kfree(msp); 2692out: 2693 return ret; 2694} 2695 2696static int mv643xx_eth_shared_remove(struct platform_device *pdev) 2697{ 2698 struct mv643xx_eth_shared_private *msp = platform_get_drvdata(pdev); 2699 struct mv643xx_eth_shared_platform_data *pd = pdev->dev.platform_data; 2700 2701 if (pd == NULL || pd->shared_smi == NULL) { 2702 mdiobus_unregister(msp->smi_bus); 2703 mdiobus_free(msp->smi_bus); 2704 } 2705 if (msp->err_interrupt != NO_IRQ) 2706 free_irq(msp->err_interrupt, msp); 2707 iounmap(msp->base); 2708 kfree(msp); 2709 2710 return 0; 2711} 2712 2713static struct platform_driver mv643xx_eth_shared_driver = { 2714 .probe = mv643xx_eth_shared_probe, 2715 .remove = mv643xx_eth_shared_remove, 2716 .driver = { 2717 .name = MV643XX_ETH_SHARED_NAME, 2718 .owner = THIS_MODULE, 2719 }, 2720}; 2721 2722static void phy_addr_set(struct mv643xx_eth_private *mp, int phy_addr) 2723{ 2724 int addr_shift = 5 * mp->port_num; 2725 u32 data; 2726 2727 data = rdl(mp, PHY_ADDR); 2728 data &= ~(0x1f << addr_shift); 2729 data |= (phy_addr & 0x1f) << addr_shift; 2730 wrl(mp, PHY_ADDR, data); 2731} 2732 2733static int phy_addr_get(struct mv643xx_eth_private *mp) 2734{ 2735 unsigned int data; 2736 2737 data = rdl(mp, PHY_ADDR); 2738 2739 return (data >> (5 * mp->port_num)) & 0x1f; 2740} 2741 2742static void set_params(struct mv643xx_eth_private *mp, 2743 struct mv643xx_eth_platform_data *pd) 2744{ 2745 struct net_device *dev = mp->dev; 2746 2747 if (is_valid_ether_addr(pd->mac_addr)) 2748 memcpy(dev->dev_addr, pd->mac_addr, 6); 2749 else 2750 uc_addr_get(mp, dev->dev_addr); 2751 2752 mp->rx_ring_size = DEFAULT_RX_QUEUE_SIZE; 2753 if (pd->rx_queue_size) 2754 mp->rx_ring_size = pd->rx_queue_size; 2755 mp->rx_desc_sram_addr = pd->rx_sram_addr; 2756 mp->rx_desc_sram_size = pd->rx_sram_size; 2757 2758 mp->rxq_count = pd->rx_queue_count ? : 1; 2759 2760 mp->tx_ring_size = DEFAULT_TX_QUEUE_SIZE; 2761 if (pd->tx_queue_size) 2762 mp->tx_ring_size = pd->tx_queue_size; 2763 mp->tx_desc_sram_addr = pd->tx_sram_addr; 2764 mp->tx_desc_sram_size = pd->tx_sram_size; 2765 2766 mp->txq_count = pd->tx_queue_count ? : 1; 2767} 2768 2769static struct phy_device *phy_scan(struct mv643xx_eth_private *mp, 2770 int phy_addr) 2771{ 2772 struct mii_bus *bus = mp->shared->smi->smi_bus; 2773 struct phy_device *phydev; 2774 int start; 2775 int num; 2776 int i; 2777 2778 if (phy_addr == MV643XX_ETH_PHY_ADDR_DEFAULT) { 2779 start = phy_addr_get(mp) & 0x1f; 2780 num = 32; 2781 } else { 2782 start = phy_addr & 0x1f; 2783 num = 1; 2784 } 2785 2786 phydev = NULL; 2787 for (i = 0; i < num; i++) { 2788 int addr = (start + i) & 0x1f; 2789 2790 if (bus->phy_map[addr] == NULL) 2791 mdiobus_scan(bus, addr); 2792 2793 if (phydev == NULL) { 2794 phydev = bus->phy_map[addr]; 2795 if (phydev != NULL) 2796 phy_addr_set(mp, addr); 2797 } 2798 } 2799 2800 return phydev; 2801} 2802 2803static void phy_init(struct mv643xx_eth_private *mp, int speed, int duplex) 2804{ 2805 struct phy_device *phy = mp->phy; 2806 2807 phy_reset(mp); 2808 2809 phy_attach(mp->dev, dev_name(&phy->dev), 0, PHY_INTERFACE_MODE_GMII); 2810 2811 if (speed == 0) { 2812 phy->autoneg = AUTONEG_ENABLE; 2813 phy->speed = 0; 2814 phy->duplex = 0; 2815 phy->advertising = phy->supported | ADVERTISED_Autoneg; 2816 } else { 2817 phy->autoneg = AUTONEG_DISABLE; 2818 phy->advertising = 0; 2819 phy->speed = speed; 2820 phy->duplex = duplex; 2821 } 2822 phy_start_aneg(phy); 2823} 2824 2825static void init_pscr(struct mv643xx_eth_private *mp, int speed, int duplex) 2826{ 2827 u32 pscr; 2828 2829 pscr = rdlp(mp, PORT_SERIAL_CONTROL); 2830 if (pscr & SERIAL_PORT_ENABLE) { 2831 pscr &= ~SERIAL_PORT_ENABLE; 2832 wrlp(mp, PORT_SERIAL_CONTROL, pscr); 2833 } 2834 2835 pscr = MAX_RX_PACKET_9700BYTE | SERIAL_PORT_CONTROL_RESERVED; 2836 if (mp->phy == NULL) { 2837 pscr |= DISABLE_AUTO_NEG_SPEED_GMII; 2838 if (speed == SPEED_1000) 2839 pscr |= SET_GMII_SPEED_TO_1000; 2840 else if (speed == SPEED_100) 2841 pscr |= SET_MII_SPEED_TO_100; 2842 2843 pscr |= DISABLE_AUTO_NEG_FOR_FLOW_CTRL; 2844 2845 pscr |= DISABLE_AUTO_NEG_FOR_DUPLEX; 2846 if (duplex == DUPLEX_FULL) 2847 pscr |= SET_FULL_DUPLEX_MODE; 2848 } 2849 2850 wrlp(mp, PORT_SERIAL_CONTROL, pscr); 2851} 2852 2853static const struct net_device_ops mv643xx_eth_netdev_ops = { 2854 .ndo_open = mv643xx_eth_open, 2855 .ndo_stop = mv643xx_eth_stop, 2856 .ndo_start_xmit = mv643xx_eth_xmit, 2857 .ndo_set_rx_mode = mv643xx_eth_set_rx_mode, 2858 .ndo_set_mac_address = mv643xx_eth_set_mac_address, 2859 .ndo_validate_addr = eth_validate_addr, 2860 .ndo_do_ioctl = mv643xx_eth_ioctl, 2861 .ndo_change_mtu = mv643xx_eth_change_mtu, 2862 .ndo_tx_timeout = mv643xx_eth_tx_timeout, 2863 .ndo_get_stats = mv643xx_eth_get_stats, 2864#ifdef CONFIG_NET_POLL_CONTROLLER 2865 .ndo_poll_controller = mv643xx_eth_netpoll, 2866#endif 2867}; 2868 2869static int mv643xx_eth_probe(struct platform_device *pdev) 2870{ 2871 struct mv643xx_eth_platform_data *pd; 2872 struct mv643xx_eth_private *mp; 2873 struct net_device *dev; 2874 struct resource *res; 2875 int err; 2876 2877 pd = pdev->dev.platform_data; 2878 if (pd == NULL) { 2879 dev_printk(KERN_ERR, &pdev->dev, 2880 "no mv643xx_eth_platform_data\n"); 2881 return -ENODEV; 2882 } 2883 2884 if (pd->shared == NULL) { 2885 dev_printk(KERN_ERR, &pdev->dev, 2886 "no mv643xx_eth_platform_data->shared\n"); 2887 return -ENODEV; 2888 } 2889 2890 dev = alloc_etherdev_mq(sizeof(struct mv643xx_eth_private), 8); 2891 if (!dev) 2892 return -ENOMEM; 2893 2894 mp = netdev_priv(dev); 2895 platform_set_drvdata(pdev, mp); 2896 2897 mp->shared = platform_get_drvdata(pd->shared); 2898 mp->base = mp->shared->base + 0x0400 + (pd->port_number << 10); 2899 mp->port_num = pd->port_number; 2900 2901 mp->dev = dev; 2902 2903 set_params(mp, pd); 2904 dev->real_num_tx_queues = mp->txq_count; 2905 2906 if (pd->phy_addr != MV643XX_ETH_PHY_NONE) 2907 mp->phy = phy_scan(mp, pd->phy_addr); 2908 2909 if (mp->phy != NULL) 2910 phy_init(mp, pd->speed, pd->duplex); 2911 2912 SET_ETHTOOL_OPS(dev, &mv643xx_eth_ethtool_ops); 2913 2914 init_pscr(mp, pd->speed, pd->duplex); 2915 2916 2917 mib_counters_clear(mp); 2918 2919 init_timer(&mp->mib_counters_timer); 2920 mp->mib_counters_timer.data = (unsigned long)mp; 2921 mp->mib_counters_timer.function = mib_counters_timer_wrapper; 2922 mp->mib_counters_timer.expires = jiffies + 30 * HZ; 2923 add_timer(&mp->mib_counters_timer); 2924 2925 spin_lock_init(&mp->mib_counters_lock); 2926 2927 INIT_WORK(&mp->tx_timeout_task, tx_timeout_task); 2928 2929 netif_napi_add(dev, &mp->napi, mv643xx_eth_poll, 128); 2930 2931 init_timer(&mp->rx_oom); 2932 mp->rx_oom.data = (unsigned long)mp; 2933 mp->rx_oom.function = oom_timer_wrapper; 2934 2935 2936 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 2937 BUG_ON(!res); 2938 dev->irq = res->start; 2939 2940 dev->netdev_ops = &mv643xx_eth_netdev_ops; 2941 2942 dev->watchdog_timeo = 2 * HZ; 2943 dev->base_addr = 0; 2944 2945 dev->features = NETIF_F_SG | NETIF_F_IP_CSUM; 2946 dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM; 2947 2948 SET_NETDEV_DEV(dev, &pdev->dev); 2949 2950 if (mp->shared->win_protect) 2951 wrl(mp, WINDOW_PROTECT(mp->port_num), mp->shared->win_protect); 2952 2953 netif_carrier_off(dev); 2954 2955 wrlp(mp, SDMA_CONFIG, PORT_SDMA_CONFIG_DEFAULT_VALUE); 2956 2957 set_rx_coal(mp, 250); 2958 set_tx_coal(mp, 0); 2959 2960 err = register_netdev(dev); 2961 if (err) 2962 goto out; 2963 2964 dev_printk(KERN_NOTICE, &dev->dev, "port %d with MAC address %pM\n", 2965 mp->port_num, dev->dev_addr); 2966 2967 if (mp->tx_desc_sram_size > 0) 2968 dev_printk(KERN_NOTICE, &dev->dev, "configured with sram\n"); 2969 2970 return 0; 2971 2972out: 2973 free_netdev(dev); 2974 2975 return err; 2976} 2977 2978static int mv643xx_eth_remove(struct platform_device *pdev) 2979{ 2980 struct mv643xx_eth_private *mp = platform_get_drvdata(pdev); 2981 2982 unregister_netdev(mp->dev); 2983 if (mp->phy != NULL) 2984 phy_detach(mp->phy); 2985 flush_scheduled_work(); 2986 free_netdev(mp->dev); 2987 2988 platform_set_drvdata(pdev, NULL); 2989 2990 return 0; 2991} 2992 2993static void mv643xx_eth_shutdown(struct platform_device *pdev) 2994{ 2995 struct mv643xx_eth_private *mp = platform_get_drvdata(pdev); 2996 2997 /* Mask all interrupts on ethernet port */ 2998 wrlp(mp, INT_MASK, 0); 2999 rdlp(mp, INT_MASK); 3000 3001 if (netif_running(mp->dev)) 3002 port_reset(mp); 3003} 3004 3005static struct platform_driver mv643xx_eth_driver = { 3006 .probe = mv643xx_eth_probe, 3007 .remove = mv643xx_eth_remove, 3008 .shutdown = mv643xx_eth_shutdown, 3009 .driver = { 3010 .name = MV643XX_ETH_NAME, 3011 .owner = THIS_MODULE, 3012 }, 3013}; 3014 3015static int __init mv643xx_eth_init_module(void) 3016{ 3017 int rc; 3018 3019 rc = platform_driver_register(&mv643xx_eth_shared_driver); 3020 if (!rc) { 3021 rc = platform_driver_register(&mv643xx_eth_driver); 3022 if (rc) 3023 platform_driver_unregister(&mv643xx_eth_shared_driver); 3024 } 3025 3026 return rc; 3027} 3028module_init(mv643xx_eth_init_module); 3029 3030static void __exit mv643xx_eth_cleanup_module(void) 3031{ 3032 platform_driver_unregister(&mv643xx_eth_driver); 3033 platform_driver_unregister(&mv643xx_eth_shared_driver); 3034} 3035module_exit(mv643xx_eth_cleanup_module); 3036 3037MODULE_AUTHOR("Rabeeh Khoury, Assaf Hoffman, Matthew Dharm, " 3038 "Manish Lachwani, Dale Farnsworth and Lennert Buytenhek"); 3039MODULE_DESCRIPTION("Ethernet driver for Marvell MV643XX"); 3040MODULE_LICENSE("GPL"); 3041MODULE_ALIAS("platform:" MV643XX_ETH_SHARED_NAME); 3042MODULE_ALIAS("platform:" MV643XX_ETH_NAME); 3043